text
stringlengths
56
7.94M
\begin{document} \title{A Fourier approach to pathwise stochastic integration} \begin{abstract} We develop a Fourier approach to rough path integration, based on the series decomposition of continuous functions in terms of Schauder functions. Our approach is rather elementary, the main ingredient being a simple commutator estimate, and it leads to recursive algorithms for the calculation of pathwise stochastic integrals, both of It\^o and of Stratonovich type. We apply it to solve stochastic differential equations in a pathwise manner. \end{abstract} \tableofcontents \section{Introduction} The theory of rough paths~\cite{Lyons1998} has recently been extended to a multiparameter setting~\cite{Hairer2014Regularity,Gubinelli2012}. While \cite{Hairer2014Regularity} has a much wider range of applicability, both approaches allow to solve many interesting SPDEs that were well out of reach with previously existing methods; for example the continuous parabolic Anderson in dimension two~\cite{Hairer2014Regularity,Gubinelli2012}, the three-dimensional stochastic quantization equation~\cite{Hairer2014Regularity,Catellier2013}, the KPZ equation~\cite{Hairer2013KPZ,Gubinelli2014}, or the three-dimensional stochastic Navier Stokes equation~\cite{Zhu2014,Zhu2014Discretization}. Our methods developed in~\cite{Gubinelli2012} are based on harmonic analysis, on Littlewood-Paley decompositions of tempered distributions, and on a simple commutator lemma. This requires a non-negligible knowledge of Littlewood-Paley theory and Besov spaces, while at the same time the application to classical rough path SDEs is not quite straightforward. That is why here we develop the approach of~\cite{Gubinelli2012} in the slightly different language of Haar / Schauder functions, which allows us to communicate our basic ideas while requiring only very basic knowledge in analysis. Moreover, in the Haar Schauder formulation the application to SDEs poses no additional technical challenges. It is a classical result of Ciesielski~\cite{Ciesielski1960} that $C^\alpha := C^\alpha([0,1],\mathbb{R}^d)$, the space of $\alpha$--H\"older continuous functions on $[0,1]$ with values in $\mathbb{R}^d$, is isomorphic to $\ell^\infty(\mathbb{R}^d)$, the space of bounded sequences with values in $\mathbb{R}^d$. The isomorphism gives a Fourier decomposition of a H\"older-continuous function $f$ as \begin{align*} f = \sum_{p,m} \langle H_{pm}, \mathrm{d} f \rangle G_{pm}, \end{align*} where $(H_{pm})$ are the Haar functions and $(G_{pm})$ are the Schauder functions. Ciesielski proved that a continuous function $f$ is in $C^{\alpha}([0,1],\mathbb{R}^d)$ if and only if the coefficients $(\langle H_{pm}, \mathrm{d} f\rangle)_{p,m}$ decay rapidly enough. Following Ciesielski's work, similar isomorphisms have been developed for many Fourier and wavelet bases, showing that the regularity of a function is encoded in the decay of its coefficients in these bases; see for example Triebel~\cite{Triebel2006}. But until this day, the isomorphism based on Schauder functions plays a special role in stochastic analysis, because the coefficients in the Schauder basis have the pleasant property that they are just rescaled second order increments of $f$. So if $f$ is a stochastic process with known distribution, then also the distribution of its coefficients in the Schauder basis is known explicitly. A simple application is the L\'evy-Ciesielski construction of Brownian motion. An incomplete list with further applications will be given below. Another convenient property of Schauder functions is that they are piecewise linear, and therefore their iterated integrals $\int_0^\cdot G_{pm}(s) \mathrm{d} G_{qn}(s)$, can be easily calculated. This makes them an ideal tool for our purpose of studying integrals. Indeed, given two continuous functions $f$ and $g$ on $[0,1]$ with values in $\mathcal{L}(\mathbb{R}^d, \mathbb{R}^n)$, the space of linear maps from $\mathbb{R}^d$ to $\mathbb{R}^n$, and $\mathbb{R}^d$ respectively, we can formally define \begin{align*} \int_0^t f(s) \mathrm{d} g(s) := \sum_{p,m}\sum_{q,n} \langle H_{pm}, \mathrm{d} f \rangle \langle H_{qn}, \mathrm{d} g \rangle \int_0^t G_{pm} (s) \mathrm{d} G_{qn}(s). \end{align*} In this paper we study, under which conditions this formal definition can be made rigorous. We start by observing that the integral introduces a bounded operator from $C^\alpha \times C^\beta$ to $C^\beta$ if and only if $\alpha+\beta > 1$. Obviously, here we simply recover Young's integral~\cite{Young1936}. In our study of this integral, we identify different components: \begin{align*} \int_0^t f(s) \mathrm{d} g(s) = S(f,g)(t) + \pi_<(f,g)(t) + L(f,g)(t), \end{align*} where $S$ is the \emph{symmetric part}, $\pi_<$ the \emph{paraproduct}, and $L(f,g)$ the \emph{L\'evy area}. The operators $S$ and $\pi_<$ are defined for $f \in C^\alpha$ and $g \in C^\beta$ for arbitrary $\alpha,\beta>0$, and it is only the L\'evy area which requires $\alpha + \beta > 1$. Considering the regularity of the three operators, we have $S(f,g) \in C^{\alpha + \beta}$, $\pi_<(f,g) \in C^\beta$, and $L(f,g) \in C^{\alpha+\beta}$ whenever the latter is defined. Therefore, in the Young regime $\int_0^\cdot f(s) \mathrm{d} g(s) - \pi_<(f,g) \in C^{\alpha + \beta}$. We will also see that for sufficiently smooth functions $F$ we have $F(f) \in C^{\alpha}$ but $F(f) - \pi_<(\mathrm{D} F(f), f) \in C^{2\alpha}$. So both $\int_0^\cdot f(s) \mathrm{d} g(s)$ and $F(f)$ are given by a paraproduct plus a smoother remainder. This leads us to call a function $f \in C^\alpha$ \emph{paracontrolled} by $g$ if there exists a function $f^g \in C^\beta$ such that $f - \pi_<(f^g,g) \in C^{\alpha+\beta}$. Our aim is then to construct the L\'evy area $L(f,g)$ for $\alpha < 1/2$ and $f$ paracontrolled by $g$. If $\beta > 1/3$, then the term $L(f - \pi_<(f^g,g),g)$ is well defined, and it suffices to make sense of the term $L(\pi_<(f^g,g),g)$. This is achieved with the following commutator estimate: \begin{align*} \left\lVert L(\pi_<(f^g,g),g) - \int_0^\cdot f^g(s) \mathrm{d} L(g,g)(s)\right\rVert_{3\beta} \le \lVert f^g \rVert_\beta \lVert g \rVert_\beta \lVert g \rVert_\beta. \end{align*} Therefore, the integral $\int_0^\cdot f(s)\mathrm{d} g(s)$ can be constructed for all $f$ that are paracontrolled by $g$, provided that $L(g,g)$ can be constructed. In other words, we have found an alternative formulation of Lyons'~\cite{Lyons1998} rough path integral, at least for H\"older continuous functions of H\"older exponent larger than 1/3. Since we approximate $f$ and $g$ by functions of bounded variation, our integral is of Stratonovich type, that is it satisfies the usual integration by parts rule. We also consider a non-anticipating It\^{o} type integral, that can essentially be reduced to the Stratonovich case with the help of the quadratic variation. The last remaining problem is then to construct the L\'evy area $L(g,g)$ for suitable stochastic processes $g$. We construct it for certain hypercontractive processes. For continuous martingales that possess sufficiently many moments we give a construction of the It\^{o} iterated integrals that allows us to use them as integrators for our pathwise It\^{o} integral. Below we give some pointers to the literature, and we introduce some basic notations which we will use throughout. In Section~\ref{s:preliminaries ciesielski} we recall some details on Ciesielski's isomorphism, and we give a short overview on rough paths and Young integration. In Section~\ref{s:paradifferential calculus} we develop a paradifferential calculus in terms of Schauder functions, and we examine the different components of Young's integral. In Section~\ref{s:schauder rough path integral} we construct the rough path integral based on Schauder functions. Section~\ref{s:pathwise ito} develops the pathwise It\^o integral. In Section~\ref{s:construction of levy area} we construct the L\'evy area for suitable stochastic processes. And in Section~\ref{s:sde} we apply our integral to solve both It\^o type and Stratonovich type SDEs in a pathwise way. \paragraph{Relevant literature} Starting with the L\'evy-Ciesielski construction of Brownian motion, Schauder functions have been a very popular tool in stochastic analysis. They can be used to prove in a comparatively easy way that stochastic processes belong to Besov spaces; see for example Ciesielski, Kerkyacharian, and Roynette~\cite{Ciesielski1993}, Roynette~\cite{Roynette1993}, and Rosenbaum~\cite{Rosenbaum2009}. Baldi and Roynette~\cite{Baldi1992} have used Schauder functions to extend the large deviation principle for Brownian motion from the uniform to the H\"older topology; see also Ben Arous and Ledoux~\cite{BenArous1994} for the extension to diffusions, Eddahbi, N'zi, and Ouknine~\cite{Eddahbi1999} for the large deviation principle for diffusions in Besov spaces, and Andresen, Imkeller, and Perkowski~\cite{Andresen2013} for the large deviation principle for a Hilbert space valued Wiener process in H\"older topology. Ben Arous, Gr\u{a}dinaru, and Ledoux~\cite{BenArous1994a} use Schauder functions to extend the Stroock-Varadhan support theorem for diffusions from the uniform to the H\"older topology. Lyons and Zeitouni~\cite{Lyons1999} use Schauder functions to prove exponential moment bounds for Stratonovich iterated integrals of a Brownian motion conditioned to stay in a small ball. Gantert~\cite{Gantert1994} uses Schauder functions to associate to every sample path of the Brownian bridge a sequence of probability measures on path space, and continues to show that for almost all sample paths these measures converge to the distribution of the Brownian bridge. This shows that the law of the Brownian bridge can be reconstructed from a single ``typical sample path''. Concerning integrals based on Schauder functions, there are three important references: Roynette~\cite{Roynette1993} constructs a version of Young's integral on Besov spaces and shows that in the one dimensional case the Stratonovich integral $\int_0^\cdot F(W_s) \mathrm{d} W_s$, where $W$ is a Brownian motion, and $F \in C^2$, can be defined in a deterministic manner with the help of Schauder functions. Roynette also constructs more general Stratonovich integrals with the help of Schauder functions, but in that case only almost sure convergence is established, where the null set depends on the integrand, and the integral is not a deterministic operator. Ciesielski, Kerkyacharian, and Roynette~\cite{Ciesielski1993} slightly extend the Young integral of~\cite{Roynette1993}, and simplify the proof by developing the integrand in the Haar basis and not in the Schauder basis. They also construct pathwise solutions to SDEs driven by fractional Brownian motions with Hurst index $H>1/2$. Kamont~\cite{Kamont1994} extends the approach of~\cite{Ciesielski1993} to define a multiparameter Young integral for functions in anisotropic Besov spaces. Ogawa~\cite{Ogawa1984, Ogawa1985} investigates an integral for anticipating integrands he calls \emph{noncausal} starting from a Parseval type relation in which integrand and Brownian motion as integrator are both developed by a given complete orthonormal system in the space of square integrable functions on the underlying time interval. This concept is shown to be strongly related to Stratonovich type integrals (see Ogawa~\cite{Ogawa1985}, Nualart, Zakai~\cite{NualartZakai1989}), and used to develop a stochastic calculus on a Brownian basis with \emph{noncausal} SDE (Ogawa~\cite{Ogawa2007}). Rough paths have been introduced by Lyons~\cite{Lyons1998}, see also~\cite{Lyons1995,Lyons1996,Lyons1997} for previous results. Lyons observed that solution flows to SDEs (or more generally ordinary differential equations (ODEs) driven by rough signals) can be defined in a pathwise, continuous way if paths are equipped with sufficiently many iterated integrals. More precisely, if a path has finite $p$--variation for some $p \ge 1$, then one needs to associate $\lfloor p\rfloor$ iterated integrals to it to obtain an object which can be taken as the driving signal in an ODE, such that the solution to the ODE depends continuously on that signal. Gubinelli~\cite{Gubinelli2004, Gubinelli2010} simplified the theory of rough paths by introducing the concept of controlled paths, on which we will strongly rely in what follows. Roughly speaking, a path $f$ is controlled by the reference path $g$ if the small scale fluctuations of $f$ ``look like those of $g$''. Good monographs on rough paths are~\cite{Lyons2002, Lyons2007, Friz2010, Friz2013}. \paragraph{Notation and conventions.} Throughout the paper, we use the notation $a \lesssim b$ if there exists a constant $c>0$, independent of the variables under consideration, such that $a \leqslant c \cdot b$, and we write $a \simeq b$ if $a \lesssim b$ and $b \lesssim a$. If we want to emphasize the dependence of $c$ on the variable $x$, then we write $a(x) \lesssim_{x} b(x)$. For a multi-index $\mu = ( \mu_{1} , \ldots , \mu_{d} ) \in \mathbb{N}^{d}$ we write $| \mu | = \mu_{1} + \ldots + \mu_{d}$ and $\partial^{\mu} = \partial^{| \mu |} / \partial_{x_{1}}^{\mu_{1}} \cdots \partial_{x_{d}}^{\mu_{d}}$. $\mathrm{D} F$ or $F'$ denote the total derivative of $F$. For $k \in \mathbb{N}$ we denote by $\mathrm{D}^{k} F$ the $k$-th order derivative of $F$. We also write $\partial_{x}$ for the partial derivative in direction $x$. \section{Preliminaries}\label{s:preliminaries ciesielski} \subsection{Ciesielski's isomorphism}\label{s:ciesielski} Let us briefly recall Ciesielski's isomorphism between $C^\alpha([0,1],\mathbb{R}^d)$ and $\ell^\infty(\mathbb{R}^d)$. The \emph{Haar functions} $(H_{pm}, p \in \mathbb{N}, 1 \le m \le 2^p)$ are defined as \begin{align*} H_{pm}(t) := \begin{cases} \sqrt{2^p}, & t \in \left[ \frac{m-1}{2^{p}}, \frac{2m-1}{2^{p+1}}\right),\\ -\sqrt{2^p}, & t \in \left[ \frac{2m-1}{2^{p+1}}, \frac{m}{2^{p}}\right), \\ 0, & \text{otherwise.} \end{cases} \end{align*} When completed by $H_{00} \equiv 1$, the Haar functions are an orthonormal basis of $L^2([0,1],\mathrm{d} t)$. For convencience of notation, we also define $H_{p0}\equiv 0$ for $p \ge 1$. The primitives of the Haar functions are called \emph{Schauder functions} and they are given by $G_{pm} (t) := \int_0^t H_{pm} (s) \mathrm{d} s$ for $t\in[0,1]$, $p\in \mathbb{N}$, $0 \le m \le 2^p$. More explicitly, $G_{00}(t) = t$ and for $p\in \mathbb{N}$, $1 \le m \le 2^p$ \begin{align*} G_{pm} (t) = \begin{cases} 2^{p/2}\left(t - \frac{m-1}{2^{p}}\right), & t \in \left[ \frac{m-1}{2^{p}}, \frac{2m-1}{2^{p+1}}\right),\\ - 2^{p/2}\left(t - \frac{m}{2^{p}} \right), & t \in \left[ \frac{2m-1}{2^{p+1}}, \frac{m}{2^{p}}\right),\\ 0, & \text{otherwise}. \end{cases} \end{align*} Since every $G_{pm}$ satisfies $G_{pm}(0) = 0$, we are only able to expand functions $f$ with $f(0)=0$ in terms of this family $(G_{pm})$. Therefore, we complete $(G_{pm})$ once more, by defining $G_{-10}(t) := 1$ for all $t \in [0,1]$. To abbreviate notation, we define the times $t^i_{pm}$, $i = 0,1,2$, as \begin{align*} t_{pm}^0 := \frac{m-1}{2^p}, \quad t_{pm}^1 := \frac{2m-1}{2^{p+1}}, \quad t_{pm}^2 := \frac{m}{2^p}, \end{align*} for $p \in \mathbb{N}$ and $1 \le m \le 2^p$. Further, we set $t^0_{-10} := 0$, $t^1_{-10}:= 0$, $t^2_{-10}:=1$, and $t^0_{00}:=0$, $t^1_{00}:=1$, $t^2_{00}:=1$, as well as $t^i_{p0} := 0$ for $p \ge 1$ and $i = 0,1,2$. The definition of $t^i_{-10}$ and $t^i_{00}$ for $i\neq 1$ is rather arbitrary, but the definition for $i = 1$ simplifies for example the statement of Lemma~\ref{l:schauder functions give linear interpolation} below. For $f \in C([0,1],\mathbb{R}^d)$, $p\in \mathbb{N}$, and $1 \le m \le 2^p$, we write \begin{align*} \langle H_{pm}, \mathrm{d} f \rangle :=\,& 2^{\frac{p}{2}}\left[ \left(f\left(t^1_{pm}\right) - f\left(t^0_{pm}\right)\right) - \left( f\left(t^2_{pm}\right) - f\left(t^1_{pm}\right)\right)\right] \\ =\, & 2^{\frac{p}{2}}\left[ 2 f\left(t^1_{pm}\right) - f\left(t^0_{pm}\right) - f\left(t^2_{pm}\right)\right] \end{align*} and $\langle H_{00}, \mathrm{d} f \rangle := f(1) - f(0)$ as well as $\langle H_{-10}, \mathrm{d} f \rangle := f(0)$. Note that we only defined $G_{-10}$ and not $H_{-10}$. \begin{lem}\label{l:schauder functions give linear interpolation} For $f\colon[0,1]\rightarrow \mathbb{R}^d$, the function \[ f_k := \langle H_{-10}, \mathrm{d} f\rangle G_{-10} + \langle H_{00}, \mathrm{d} f \rangle G_{00} + \sum_{p=0}^k \sum_{m=1}^{2^p} \langle H_{pm}, \mathrm{d} f \rangle G_{pm} = \sum_{p=-1}^k \sum_{m=0}^{2^p} \langle H_{pm}, \mathrm{d} f \rangle G_{pm} \] is the linear interpolation of $f$ between the points $t^1_{-10}, t^1_{00}, t^1_{pm}$, $0 \le p \le k, 1 \le m \le 2^p$. If $f$ is continuous, then $(f_k)$ converges uniformly to $f$ as $k \rightarrow \infty$. \end{lem} Ciesielski~\cite{Ciesielski1960} observed that if $f$ is H\"older-continuous, then the series $(f_k)$ converges absolutely and the speed of convergence can be estimated in terms of the H\"older norm of $f$. The norm $\lVert \cdot \rVert_{C^\alpha}$ is defined as \[ \lVert f \rVert_{C^\alpha} := \lVert f \rVert_\infty + \sup_{0\le s < t \le 1} \frac{|f_{s,t}|}{|t-s|^\alpha}, \] where we introduced the notation \[ f_{s,t} := f(t) - f(s). \] \begin{lem}[\cite{Ciesielski1960}]\label{l:ciesielski} Let $\alpha \in (0,1)$. A continuous function $f: [0,1] \rightarrow \mathbb{R}^d$ is in $C^\alpha$ if and only if $\sup_{p,m} 2^{p(\alpha - 1/2)} |\langle H_{pm}, \mathrm{d} f\rangle| < \infty$. In this case \begin{gather}\label{e:ciesielski isomorphism} \sup_{p,m} 2^{p(\alpha - 1/2)} |\langle H_{pm}, \mathrm{d} f\rangle| \simeq \lVert f \rVert_\alpha \text{ and} \\ \nonumber \lVert f - f_{N-1} \rVert_\infty = \Big\lVert \sum_{p = N}^\infty \sum_{m=0}^{2^p} |\langle H_{pm}, \mathrm{d} f\rangle| G_{pm} \Big\rVert_\infty \lesssim \lVert f \rVert_\alpha 2^{-\alpha N}. \end{gather} \end{lem} Before we continue, let us slightly change notation. We want to get rid of the factor $2^{-p/2}$ in \eqref{e:ciesielski isomorphism}, and therefore we define for $p \in \mathbb{N}$ and $0 \le m \le 2^p$ the rescaled functions \begin{align*} \chi_{pm} := 2^{\frac{p}{2}} H_{pm} \qquad \text{and} \qquad \mathrm{var}phi_{pm} := 2^{\frac{p}{2}} G_{pm}, \end{align*} as well as $\mathrm{var}phi_{-10} := G_{-10} \equiv 1$. Then we have for $p \in \mathbb{N}$ and $1 \le m \le 2^p$ \begin{align*} \lVert\mathrm{var}phi_{pm}(t)\rVert_\infty = \mathrm{var}phi_{pm}(t^1_{pm}) = 2^{\frac{p}{2}} \int_{t^0_{pm}}^{t^1_{pm}} 2^{\frac{p}{2}} \mathrm{d} s = 2^p \left( \frac{2m-1}{2^{p+1}} - \frac{2m - 2}{2^{p+1}}\right) = \frac{1}{2}, \end{align*} so that $\lVert \mathrm{var}phi_{pm}\rVert_\infty \le 1$ for all $p,m$. The expansion of $f$ in terms of $(\mathrm{var}phi_{pm})$ is given by $f_k = \sum_{p=0}^k \sum_{m=0}^{2^p} f_{pm} \mathrm{var}phi_{pm}$, where $f_{-10} := f(1)$, and $f_{00} := f(1)-f(0)$ and for $p \in \mathbb{N}$ and $m \ge 1$ \begin{align*} f_{pm} := 2^{-p} \langle \chi_{pm}, \mathrm{d} f \rangle = 2 f\left(t^1_{pm}\right) - f\left(t^0_{pm}\right) - f\left(t^2_{pm}\right) = f_{t^0_{pm}, t^1_{pm}} - f_{t^1_{pm}, t^2_{pm}}. \end{align*} We write $\langle \chi_{pm}, \mathrm{d} f\rangle := 2^p f_{pm}$ for all values of $(p,m)$, despite not having defined $\chi_{-10}$. \begin{defn} For $\alpha > 0$ and $f \colon [0,1] \to \mathbb{R}^d$ the norm $\lVert \cdot \rVert_{\alpha}$ is defined as \[ \lVert f \rVert_\alpha := \sup_{pm} 2^{p\alpha} |f_{pm}|, \] and we write \begin{align*} \mathcal{C}^\alpha := \mathcal{C}^\alpha(\mathbb{R}^d) := \left\{f \in C( [0,1], \mathbb{R}^d): \lVert f \rVert_\alpha < \infty\right\}. \end{align*} \end{defn} The space $\mathcal{C}^\alpha$ is isomorphic to $\ell^\infty(\mathbb{R}^d)$, in particular it is a Banach space. For $\alpha \in (0,1)$, Ciesielski's isomorphism (Lemma~\ref{l:ciesielski}) states that $\mathcal{C}^\alpha = C^\alpha([0,1],\mathbb{R}^d)$. Moreover, it can be shown that $\mathcal{C}^1$ is the Zygmund space of continuous functions $f$ satisfying $|2f(x) - f(x+h) - f(x-h)| \lesssim h$. But for $\alpha > 1$, there is no reasonable identification of $\mathcal{C}^{\alpha}$ with a classical function space. For example if $\alpha \in (1,2)$, the space $C^{\alpha}([0,1], \mathbb{R}^d)$ consists of all continuously differentiable functions $f$ with $(\alpha-1)$--H\"older continuous derivative $\mathrm{D} f$. Since the tent shaped functions $\mathrm{var}phi_{pm}$ are not continuously differentiable, even an $f$ with a finite Schauder expansion is generally not in $C^{\alpha}$. The a priori requirement of $f$ being continuous can be relaxed, but not much. Since the coefficients $(f_{pm})$ evaluate the function $f$ only in countably many points, a general $f$ will not be uniquely determined by its expansion. But for example it would suffice to assume that $f$ is c\`adl\`ag. \paragraph{Littlewood-Paley notation.} We will employ notation inspired from Littlewood-Paley theory. For $p \ge -1$ and $f \in C([0,1])$ we define \begin{align*} \mathcal{D}elta_p f := \sum_{m=0}^{2^p} f_{pm} \mathrm{var}phi_{pm} \qquad \text{and} \qquad S_p f := \sum_{q \le p} \mathcal{D}elta_q f. \end{align*} We will occasionally refer to $(\mathcal{D}elta_p f)$ as the Schauder blocks of $f$. Note that \[ \mathcal{C}^\alpha = \{f \in C([0,1],\mathbb{R}^d): \lVert (2^{p\alpha} \lVert \mathcal{D}elta_p f \rVert_\infty)_p \rVert_{\ell^\infty} < \infty\}. \] \subsection{Young integration and rough paths} \label{s:rough paths} Here we present the main concepts of Young integration and of rough path theory. The results presented in this section will not be applied in the remainder of this chapter, but we feel that it could be useful for the reader to be familiar with the basic concepts of rough paths, since it is the main inspiration for the constructions developed below. Young's integral~\cite{Young1936} allows to define $\int f \mathrm{d} g$ for $f \in C^\alpha$, $g \in C^\beta$, and $\alpha + \beta > 1$. More precisely, let $f \in C^\alpha$ and $g \in C^\beta$ be given, let $t \in [0,1]$, and let $\pi = \{t_0, \dots, t_N\}$ be a partition of $[0,t]$, i.e. $0=t_0 < t_1 < \dots < t_N=t$. Then it can be shown that the Riemann sums \begin{align*} \sum_{t_k \in \pi} f(t_k) (g(t_{k+1})-g(t_k)) := \sum_{k=0}^{N-1} f(t_k) (g(t_{k+1})-g(t_k)) \end{align*} converge as the mesh size $\max_{k=0,\dots, N-1} |t_{k+1}-t_k|$ tends to zero, and that the limit does not depend on the approximating sequence of partitions. We denote the limit by $\int_0^t f(s) \mathrm{d} g(s)$, and we define $\int_s^t f(r) \mathrm{d} g(r) := \int_0^t f(r) \mathrm{d} g(r) - \int_0^s f(r) \mathrm{d} g(r)$. The function $t \mapsto \int_0^t f(s) \mathrm{d} g(s)$ is uniquely characterized by the fact that \begin{align*} \left| \int_s^t f(r) \mathrm{d} g(r) - f(s) (g(t)-g(s)) \right| \lesssim |t-s|^{\alpha + \beta} \lVert f \rVert_\alpha \lVert g \rVert_\beta \end{align*} for all $s,t \in [0,1]$. The condition $\alpha + \beta > 1$ is sharp, in the sense that there exist $f, g \in C^{1/2}$, and a sequence of partitions $(\pi_n)_{n \in \mathbb{N}}$ with mesh size going to zero, for which the Riemann sums $\sum_{t_k \in \pi_n} f(t_k) (g(t_{k+1})-g(t_k))$ do not converge as $n$ tends to $\infty$. The condition $\alpha + \beta > 1$ excludes one of the most important examples: we would like to take $g$ as a sample path of Brownian motion, and $f = F(g)$. Lyons' theory of rough paths~\cite{Lyons1998} overcomes this restriction by stipulating the ``existence'' of basic integrals and by defining a large class of related integrals as their functionals. Here we present the approach of Gubinelli~\cite{Gubinelli2004}. Let $\alpha \in (1/3,1)$ and assume that we are given two functions $v,w \in C^\alpha$, as well as an associated ``Riemann integral'' $I^{v,w}_{s,t} = \int_s^t v(r) \mathrm{d} w(r)$ that satisfies the estimate \begin{align}\label{e:area estimate} |\mathbb{P}hi^{v,w}_{s,t}|:=|I^{v,w}_{s,t} - v(s) w_{s,t}| \lesssim |t-s|^{2\alpha}. \end{align} The remainder $\mathbb{P}hi^{v,w}$ is often (incorrectly) called the \emph{area} of $v$ and $w$. This name has its origin in the fact that its antisymmetric part $1/2(\mathbb{P}hi^{v,w}_{s,t} - \mathbb{P}hi^{w,v}_{s,t})$ corresponds to the algebraic area spanned by the curve $((v(r), w(r)): r \in [s,t])$ in the plane $\mathbb{R}^2$. If $\alpha \le 1/2$, then the integral $I^{v,w}$ cannot be constructed using Young's theory of integration, and also $I^{v,w}$ is not uniquely characterized by \eqref{e:area estimate}. But let us assume nonetheless that we are given such an integral $I^{v,w}$ satisfying \eqref{e:area estimate}. A function $f \in C^\alpha$ is \emph{controlled} by $v \in C^\alpha$ if there exists $f^v \in C^\alpha$, such that for all $s,t \in [0,1]$ \begin{align}\label{e:controlled} |f_{s,t} - f^v_s v_{s,t}| \lesssim |t-s|^{2\alpha}. \end{align} \begin{prop}[\cite{Gubinelli2004}, Theorem 1]\label{p:Gubinelli rough paths} Let $\alpha > 1/3$, let $v,w \in C^\alpha$, and let $I^{v,w}$ satisfy \eqref{e:area estimate}. Let $f$ and $g$ be controlled by $v$ and $w$ respectively, with derivatives $f^v$ and $g^w$. Then there exists a unique function $I(f,g) = \int_0^\cdot f(s) \mathrm{d} g(s)$ that satisfies for all $s,t \in [0,1]$ \begin{align*} |I(f,g)_{s,t} - f(s) g_{s,t} - f^v(s) g^w(s) \mathbb{P}hi^{v,w}_{s,t}| \lesssim |t-s|^{3\alpha}. \end{align*} If $(\pi_n)$ is a sequence of partitions of $[0,t]$, with mesh size going to zero, then \begin{align*} I(f,g)(t) = \lim_{n \rightarrow \infty} \sum_{t_k \in \pi_n} \left( f(t_k) g_{t_k, t_{k+1}} + f^v_{t_k} g^w_{t_k} \mathbb{P}hi^{v,w}_{t_k, t_{k+1}}\right). \end{align*} \end{prop} The integral $I(f,g)$ coincides with the Riemann-Stieltjes integral and with the Young integral, whenever these are defined. Moreover, the integral map is self-consistent, in the sense that if we consider $v$ and $w$ as paracontrolled by themselves, with derivatives $v^v = w^w \equiv 1$, then $I(v,w) = I^{v,w}$. The only remaining problem is the construction of the integral $I^{v,w}$. This is usually achieved with probabilistic arguments. If $v$ and $w$ are Brownian motions, then we can for example use It\^{o} or Stratonovich integration to define $I^{v,w}$. Already in this simple example we see that the integral $I^{v,w}$ is not unique if $v$ and $w$ are outside of the Young regime. It is possible to go beyond $\alpha > 1/3$ by stipulating the existence of higher order iterated integrals. For details see~\cite{Gubinelli2010} or any book on rough paths, such as~\cite{Lyons2002,Lyons2007,Friz2010,Friz2013}. \section{Paradifferential calculus and Young integration}\label{s:paradifferential calculus} In this section we develop the basic tools that will be required for our rough path integral in terms of Schauder functions, and we study Young's integral and its different components. \subsection{Paradifferential calculus with Schauder functions} Here we introduce a ``paradifferential calculus'' in terms of Schauder functions. Paradifferential calculus is usually formulated in terms of Littlewood-Paley blocks and was initiated by Bony~\cite{Bony1981}. For a gentle introduction see~\cite{Bahouri2011}. We will need to study the regularity of $\sum_{p,m} u_{pm} \mathrm{var}phi_{pm}$, where $u_{pm}$ are functions and not constant coefficients. For this purpose we define the following space of sequences of functions. \begin{defn} If $(u_{pm}: p \ge -1, 0\le m\le2^p)$ is a family of affine functions of the form $u_{pm}: [t^0_{pm}, t^2_{pm}] \rightarrow \mathbb{R}^d$, we set for $\alpha > 0$ \begin{align*} \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha} := \sup_{p,m} 2^{p\alpha} \lVert u_{pm}\rVert_\infty, \end{align*} where it is understood that $\lVert u_{pm} \rVert_\infty := \max_{t \in [t^0_{pm}, t^2_{pm}]} |u_{pm}(t)|$. The space $\mathcal{A}^\alpha := \mathcal{A}^\alpha(\mathbb{R}^d)$ is then defined as \[ \mathcal{A}^\alpha := \left\{(u_{pm})_{p \ge -1, 0\le m\le2^p}: u_{pm}\in C([t^0_{pm}, t^2_{pm}], \mathbb{R}^d) \text{ is affine and } \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha}<\infty \right\}. \] \end{defn} In Appendix~\ref{a:schauder with affine coefficients} we prove the following regularity estimate: \begin{lem}\label{l:upm hoelder} Let $\alpha \in (0,2)$ and let $(u_{pm})\in \mathcal{A}^\alpha$. Then $\sum_{p,m} u_{pm} \mathrm{var}phi_{pm} \in \mathcal{C}^\alpha$, and \begin{align*} \Bigl\lVert \sum_{p,m} u_{pm} \mathrm{var}phi_{pm}\Bigr\rVert_\alpha \lesssim \lVert (u_{pm}) \rVert_{\mathcal{A}^\alpha}. \end{align*} \end{lem} Let us introduce a paraproduct in terms of Schauder functions. \begin{lem}\label{l:paraproduct definition} Let $\beta \in (0,2)$, let $v \in C([0,1], \mathcal{L}(\mathbb{R}^d,\mathbb{R}^n))$, and $w \in \mathcal{C}^\beta(\mathbb{R}^d)$. Then \begin{align}\label{e:paraproduct definition} \pi_<(v,w) := \sum_{p=0}^\infty S_{p-1} v \mathcal{D}elta_p w \in \mathcal{C}^\beta(\mathbb{R}^n) \hspace{10pt} \text{and} \hspace{10pt} \lVert \pi_<(v,w) \rVert_\beta \lesssim \lVert v \rVert_\infty \lVert w \rVert_\beta. \end{align} \end{lem} \begin{proof} We have $\pi_<(v,w) = \sum_{p,m} u_{pm} \mathrm{var}phi_{pm}$ with $u_{pm} = (S_{p-1} v)|_{[t^0_{pm},t^2_{pm}]} w_{pm}$. For every $(p,m)$, the function $(S_{p-1} v)|_{[t^0_{pm},t^2_{pm}]}$ is the linear interpolation of $v$ between $t^0_{pm}$ and $t^2_{pm}$. As $\lVert (S_{p-1} v)|_{[t^0_{pm},t^2_{pm}]} w_{pm} \rVert_\infty \le 2^{-p\beta}\lVert v \rVert_\infty \lVert w \rVert_\beta$, the statement follows from Lemma~\ref{l:upm hoelder}. \end{proof} \begin{rmk} If $v \in \mathcal{C}^\alpha(\mathbb{R})$ and $w \in \mathcal{C}^\beta(\mathbb{R})$, we can decompose the product $vw$ into three components, $vw = \pi_<(v,w) + \pi_>(v,w) + \pi_\circ(v,w)$, where $\pi_>(v,w) := \pi_>(w,v)$ and $\pi_\circ(v,w):= \sum_p \mathcal{D}elta_p v \mathcal{D}elta_p w$, and we have the estimates \begin{align*} \lVert \pi_>(v,w) \rVert_\alpha \lesssim \lVert v \rVert_\alpha \lVert w \rVert_\infty, \qquad \text{and}\qquad \lVert \pi_\circ(v,w) \rVert_{\alpha+\beta} \lesssim \lVert v \rVert_\alpha \lVert w \rVert_\beta \end{align*} whenever $\alpha+\beta \in (0,2)$. However, we will not use this. \end{rmk} The paraproduct allows us to ``paralinearize'' nonlinear functions. We allow for a smoother perturbation, which will come in handy when constructing global in time solutions to SDEs. \begin{prop}\label{p:paralinearization} Let $\alpha \in (0,1/2)$, $\beta \in (0,\alpha]$, let $v \in \mathcal{C}^\alpha(\mathbb{R}^d)$, $w \in \mathcal{C}^{\alpha+\beta}$, and $F \in C^{1+\beta/\alpha}_b(\mathbb{R}^d,\mathbb{R})$. Then \begin{equation}\label{e:paralinearization estimate} \lVert F(v+w) - \pi_<(\mathrm{D} F(v+w),v) \rVert_{\alpha + \beta} \lesssim \lVert F \rVert_{C^{1+\beta/\alpha}_b} (1 + \lVert v \rVert_\alpha)^{1+\beta/\alpha} (1 + \lVert w \rVert_{\alpha+\beta}). \end{equation} If $F \in C^{2+\beta/\alpha}_b$, then $F(v) - \pi_<(\mathrm{D} F(v),v)$ depends on $v$ in a locally Lipschitz continuous way: \begin{align}\label{e:paralinearization lipschitz} \nonumber &\lVert F(v) - \pi_<(\mathrm{D} F(v),v) - (F(u) - \pi_<(\mathrm{D} F(u),u)) \rVert_{\alpha + \beta} \\ &\hspace{160pt} \lesssim \lVert F \rVert_{C^{2+\beta/\alpha}_b} (1 + \lVert v \rVert_\alpha + \lVert u \rVert_\alpha)^{1+\beta/\alpha} \lVert v - u\rVert_{\alpha}. \end{align} \end{prop} \begin{proof} First note that $\lVert F(v+w) \rVert_\infty \le \lVert F \rVert_\infty$, which implies the required estimate for $(p,m) = (-1,0)$ and $(p,m) = (0,0)$. For all other values of $(p,m)$ we apply a Taylor expansion: \begin{align*} (F(v+w))_{pm} = \mathrm{D} F(v(t^1_{pm}) + w(t^1_{pm}))v_{pm} + R_{pm}, \end{align*} where $|R_{pm}| \lesssim 2^{- p (\alpha+\beta)} \lVert F\rVert_{C^{1+\beta/\alpha}_b} (\lVert v \rVert_\alpha^{1+\beta/\alpha} + \lVert w \rVert_{\alpha+\beta})$. Subtracting $\pi_<(\mathrm{D} F(v),v)$ gives \begin{align*} &F(v+w) - \pi_<(\mathrm{D} F(v+w),v) \\ &\hspace{60pt}= \sum_{pm} [\mathrm{D} F(v(t^1_{pm}) + w(t^1_{pm})) - (S_{p-1} \mathrm{D} F(v+w))|_{[t^0_{pm}, t^2_{pm}]}] v_{pm} \mathrm{var}phi_{pm} + R. \end{align*} Now $(S_{p-1} \mathrm{D} F(v+w))|_{[t^0_{pm}, t^2_{pm}]}$ is the linear interpolation of $\mathrm{D} F(v+w)$ between $t^0_{pm}$ and $t^2_{pm}$, so according to Lemma~\ref{l:upm hoelder} it suffices to note that \begin{align*} &\lVert [\mathrm{D} F(v(t^1_{pm})+ w(t^1_{pm})) - (S_{p-1} \mathrm{D} F(v+w))|_{[t^0_{pm}, t^2_{pm}]}] v_{pm}\rVert_{\infty} \\ &\hspace{50pt} \lesssim 2^{-p\beta} \lVert \mathrm{D} F(v+w) \rVert_{C^\beta} 2^{-p\alpha} \lVert v \rVert_\alpha \lesssim 2^{-p(\alpha+\beta)} \lVert F \rVert_{C^{1+\beta/\alpha}_b} (1+\lVert v \rVert_\alpha + \lVert w \rVert_\alpha)^{\beta/\alpha} \lVert v \rVert_\alpha. \end{align*} The local Lipschitz continuity is shown in the same way. \end{proof} \begin{rmk} Since $v$ has compact support, it actually suffices to have $F \in C^{1+\beta/\alpha}$ without assuming boundedness. Of course, then the estimates in Proposition~\ref{p:paralinearization} have to be adapted. \end{rmk} \begin{rmk}\label{r:gubinelli controlled implies our controlled} The same proof shows that if $f$ is controlled by $v$ in the sense of Section~\ref{s:ciesielski}, i.e. $f_{s,t} = f^v(s) v_{s,t} + R_{s,t}$ with $f^v \in \mathcal{C}^\alpha$ and $|R_{s,t}|\le \lVert R\rVert_{2\alpha} |t-s|^{2\alpha}$, then $f - \pi_<(f^v,v) \in \mathcal{C}^{2\alpha}$. \end{rmk} \subsection{Young's integral and its different components}\label{s:young} In this section we construct Young's integral using the Schauder expansion. If $v \in \mathcal{C}^\alpha$ and $w \in \mathcal{C}^\beta$, then we formally define \begin{align*} \int_0^\cdot v(s) \mathrm{d} w(s) := \sum_{p,m} \sum_{q,n} v_{pm} w_{qn} \int_0^\cdot \mathrm{var}phi_{pm}(s) \mathrm{d} \mathrm{var}phi_{qn}(s) = \sum_{p,q} \int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s). \end{align*} We show that this definition makes sense provided that $\alpha+\beta>1$, and we identify three components of the integral that behave quite differently. This will be our starting point towards an extension of the integral beyond the Young regime. In a first step, let us calculate the iterated integrals of Schauder functions. \begin{lem}\label{l:iterated schauder integrals1} Let $p > q \ge 0$. Then \begin{align}\label{e:iterated schauder integral p>q} \int_0^1 \mathrm{var}phi_{pm}(s) \mathrm{d} \mathrm{var}phi_{qn}(s) = 2^{-p - 2} \chi_{qn}(t^0_{pm}) \end{align} for all $m,n$. If $p = q$, then $\int_0^1 \mathrm{var}phi_{pm}(s) \mathrm{d} \mathrm{var}phi_{pn}(s) = 0$, except if $p = q = 0$, in which case the integral is bounded by 1. If $0 \le p < q$, then for all $(m,n)$ we have \begin{align}\label{e:iterated schauder integral q<p} \int_0^1 \mathrm{var}phi_{pm}(s) \mathrm{d} \mathrm{var}phi_{qn}(s) = - 2^{-q - 2} \chi_{pm}\left(t^0_{qn}\right). \end{align} If $p=-1$, then the integral is bounded by 1. \end{lem} \begin{proof} The cases $p = q$ and $p=-1$ are easy, so let $p > q \ge 0$. Since $\chi_{qn} \equiv \chi_{qn}(t^0_{pm})$ on the support of $\mathrm{var}phi_{pm}$, we have \begin{align*} \int_0^1 \mathrm{var}phi_{pm}(s) \mathrm{d} \mathrm{var}phi_{qn}(s) = \chi_{qn}(t^0_{pm}) \int_0^1 \mathrm{var}phi_{pm}(s) \mathrm{d} s = \chi_{qn}(t^0_{pm}) 2^{-p-2}. \end{align*} If $0 \le p < q$, then integration by parts and \eqref{e:iterated schauder integral p>q} imply \eqref{e:iterated schauder integral q<p}. \end{proof} Next we estimate the coefficients of iterated integrals in the Schauder basis. \begin{lem}\label{l:schauder coefficients of iterated integrals} Let $i,p\ge -1$, $q \ge 0$, $0\le j \le 2^i$, $0\le m \le 2^p$, $0\le n \le 2^q$. Then \begin{align}\label{e:schauder coefficients of iterated integrals good} 2^{-i} \Big|\Big\langle \chi_{ij}, \mathrm{d}\Big(\int_0^\cdot\mathrm{var}phi_{pm} \chi_{qn}\mathrm{d} s\Big)\Big\rangle\Big| \le 2^{-2(i \vee p \vee q) + p + q}, \end{align} except if $p<q=i$. In this case we only have the worse estimate \begin{align}\label{e:schauder coefficients of iterated integrals bad} 2^{-i} \Big|\Big\langle \chi_{ij}, \mathrm{d}\Big(\int_0^\cdot\mathrm{var}phi_{pm} \chi_{qn}\mathrm{d} s\Big)\Big\rangle\Big| \le 1. \end{align} \end{lem} \begin{proof} We have $\langle \chi_{-10}, \mathrm{d}(\int_0^\cdot \mathrm{var}phi_{pm} \chi_{qn}\mathrm{d} s)\rangle = 0$ for all $(p,m)$ and $(q,n)$. So let $i \ge 0$. If $i < p \vee q$, then $\chi_{ij}$ is constant on the support of $\mathrm{var}phi_{pm}\chi_{qn}$, and therefore Lemma~\ref{l:iterated schauder integrals1} gives \[ 2^{-i} \left|\langle \chi_{ij},\mathrm{var}phi_{pm} \chi_{qn}\rangle\right| \le \left|\langle \mathrm{var}phi_{pm}, \chi_{qn}\rangle\right| \le 2^{ p + q -2(p\vee q)} = 2^{-2(i \vee p \vee q) + p + q}. \] Now let $i > q$. Then $\chi_{qn}$ is constant on the support of $\chi_{ij}$, and therefore another application of Lemma~\ref{l:iterated schauder integrals1} implies that \[ 2^{-i} \left|\langle \chi_{ij}, \mathrm{var}phi_{pm}\chi_{qn}\rangle\right| \le 2^{-i} 2^q 2^{p+i -2(p\vee i)} = 2^{-2(i \vee p \vee q) + p + q}. \] The only remaining case is $i=q \ge p$, in which \[ 2^{-i} \left|\langle \chi_{ij},\mathrm{var}phi_{pm} \chi_{qn}\rangle\right| \le 2^{i} \int_{t^0_{ij}}^{t^2_{ij}} \mathrm{var}phi_{pm}(s) \mathrm{d} s \le \lVert \mathrm{var}phi_{pm} \rVert_\infty \le 1. \] \end{proof} \begin{cor}\label{c:schauder blocks} Let $i, p\ge -1$ and $q \ge 0$. Let $v \in C([0,1],\mathcal{L}(\mathbb{R}^d,\mathbb{R}^n))$ and $w \in C([0,1],\mathbb{R}^d)$. Then \begin{align}\label{e:schauder blocks good} \Big\lVert \mathcal{D}elta_i\Big(\int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s)\Big)\Big\rVert_\infty \lesssim 2^{-(i\vee p\vee q) - i+p+q} \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty, \end{align} except if $i=q>p$. In this case we only have the worse estimate \begin{align}\label{e:schauder blocks bad} \Big \lVert \mathcal{D}elta_i\Big(\int_0^\cdot \mathcal{D}elta_p v (s) \mathrm{d} \mathcal{D}elta_q w(s)\Big)\Big\rVert_\infty \lesssim \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty. \end{align} \end{cor} \begin{proof} The case $i = -1$ is easy, so let $i \ge 0$. We have \begin{align*} \mathcal{D}elta_i\Big(\int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s)\Big) = \sum_{j,m,n} v_{pm} w_{qn} \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{pm} \chi_{qn}\rangle \mathrm{var}phi_{ij}. \end{align*} For fixed $j$, there are at most $2^{(i\vee p\vee q) - i}$ non-vanishing terms in the double sum. Hence, we obtain from Lemma~\ref{l:schauder coefficients of iterated integrals} that \begin{align*} \Big\lVert \sum_{m,n} v_{pm} w_{qn} \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{pm} \chi_{qn}\rangle \mathrm{var}phi_{ij}\Big\rVert_\infty & \lesssim 2^{(i\vee p\vee q) - i} \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty (2^{-2(i\vee p \vee q) + p + q} + \mathbf{1}_{i=q>p}) \\ & = (2^{-(i\vee p\vee q) - i + p + q} + \mathbf{1}_{i=q>p}) \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty. \end{align*} \end{proof} \begin{cor}\label{c:schauder blocks product} Let $i,p,q \ge -1$. Let $v \in C([0,1],\mathcal{L}(\mathbb{R}^d,\mathbb{R}^n))$ and $w \in C([0,1],\mathbb{R}^d)$. Then for $p \vee q \le i$ we have \begin{align}\label{e:schauder blocks product good} \left\lVert \mathcal{D}elta_i\left(\mathcal{D}elta_p v \mathcal{D}elta_q w\right)\right\rVert_\infty \lesssim 2^{-(i\vee p\vee q) - i+p+q} \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty, \end{align} except if $i=q>p$ or $i=p>q$, in which case we only have the worse estimate \begin{align}\label{e:schauder blocks product bad} \left \lVert \mathcal{D}elta_i(\mathcal{D}elta_p v \mathcal{D}elta_q w)\right\rVert_\infty \lesssim \lVert \mathcal{D}elta_p v \rVert_\infty \lVert \mathcal{D}elta_q w \rVert_\infty. \end{align} If $p > i$ or $q>i$, then $\mathcal{D}elta_i(\mathcal{D}elta_p v \mathcal{D}elta_q w) \equiv 0$. \end{cor} \begin{proof} The case $p=-1$ or $q=-1$ is easy. Otherwise we apply integration by parts and note that the estimates \eqref{e:schauder blocks good} and \eqref{e:schauder blocks bad} are symmetric in $p$ and $q$. If for example $p>i$, then $\mathcal{D}elta_p(v)(t^k_{ij}) = 0$ for all $k,j$, which implies that $\mathcal{D}elta_i (\mathcal{D}elta_p v \mathcal{D}elta_q w) = 0$. \end{proof} The estimates \eqref{e:schauder blocks good} and \eqref{e:schauder blocks bad} allow us to identify different components of the integral $\int_0^\cdot v(s) \mathrm{d} w(s)$. More precisely, \eqref{e:schauder blocks bad} indicates that the series $\sum_{p<q} \int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s)$ is rougher than the remainder $\sum_{p \ge q} \int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s)$. Integration by parts gives \[ \sum_{p<q} \int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s) = \pi_<(v,w) - \sum_{p<q} \sum_{m,n} v_{pm} w_{qn} \int_0^\cdot \mathrm{var}phi_{qn}(s) \mathrm{d} \mathrm{var}phi_{pm}(s). \] This motivates us to decompose the integral into three components, namely \begin{align*} \sum_{p,q} \int_0^\cdot \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_q w(s) = L(v,w) + S(v,w) + \pi_<(v,w). \end{align*} Here $L$ is defined as the antisymmetric \emph{L\'evy area} (we will justify the name below by showing that $L$ is closely related to the L\'evy area of certain dyadic martingales): \begin{align*} L(v,w) :=\,& \sum_{p>q} \sum_{m,n} (v_{pm} w_{qn} - v_{qn} w_{pm}) \int_0^\cdot \mathrm{var}phi_{pm} \mathrm{d} \mathrm{var}phi_{qn}\\ =\,& \sum_{p} \left(\int_0^\cdot \mathcal{D}elta_p v \mathrm{d} S_{p-1} w - \int_0^\cdot \mathrm{d} (S_{p-1} v) \mathcal{D}elta_{p} w\right). \end{align*} The \emph{symmetric part} $S$ is defined as \begin{align*} S(v,w) :=\, & \sum_{m,n \le 1} v_{0m} w_{0n} \int_0^\cdot \mathrm{var}phi_{0m} \mathrm{d} \mathrm{var}phi_{0n} + \sum_{p\ge 1} \sum_m v_{pm} w_{pm} \int_0^\cdot \mathrm{var}phi_{pm} \mathrm{d} \mathrm{var}phi_{pm} \\ =\,& \sum_{m,n\le 1} v_{0m} w_{0n} \int_0^\cdot \mathrm{var}phi_{0m} \mathrm{d} \mathrm{var}phi_{0n} + \frac{1}{2} \sum_{p\ge 1} \mathcal{D}elta_p v \mathcal{D}elta_p w, \end{align*} and $\pi_<$ is the paraproduct defined in \eqref{e:paraproduct definition}. As we observed in Lemma~\ref{l:paraproduct definition}, $\pi_<(v,w)$ is always well defined, and it inherits the regularity of $w$. Let us study $S$ and $L$. \begin{lem}\label{l:Levy area regularity} Let $\alpha, \beta \in (0,1)$ be such that $\alpha + \beta > 1$. Then $L$ is a bounded bilinear operator from $\mathcal{C}^\alpha \times \mathcal{C}^\beta$ to $\mathcal{C}^{\alpha+\beta}$. \end{lem} \begin{proof} We only argue for $\sum_{p} \int_0^\cdot \mathcal{D}elta_p v \mathrm{d} S_{p-1} w$, the term $- \int_0^\cdot \mathrm{d} (S_{p-1} v) \mathcal{D}elta_{p} w$ can be treated with the same arguments. Corollary~\ref{c:schauder blocks} (more precisely \eqref{e:schauder blocks good}) implies that \begin{align*} &\Big\lVert \sum_p \mathcal{D}elta_i \Big(\int_0^\cdot \mathcal{D}elta_p v \mathrm{d} S_{p-1} w\Big) \Big\rVert_\infty \\ &\hspace{50pt} \le \sum_{p\le i} \sum_{q<p} \Big\lVert \mathcal{D}elta_i \Big(\int_0^\cdot \mathcal{D}elta_p v \mathrm{d} \mathcal{D}elta_q w \Big)\Big\rVert_\infty + \sum_{p> i} \sum_{q<p} \Big\lVert \mathcal{D}elta_i \Big( \int_0^\cdot \mathcal{D}elta_p v \mathrm{d} \mathcal{D}elta_q w \Big)\Big\rVert_\infty\\ &\hspace{50pt} \le \bigg(\sum_{p\le i} \sum_{q<p} 2^{-2i + p + q} 2^{-p\alpha}\lVert v \rVert_\alpha 2^{-q\beta} \lVert w \rVert_\beta + \sum_{p> i} \sum_{q<p} 2^{- i + q} 2^{-p\alpha} \lVert v \rVert_\alpha 2^{-q\beta} \lVert w \rVert_\beta \bigg) \\ &\hspace{50pt} \lesssim_{\alpha + \beta} 2^{-i(\alpha+\beta)} \lVert v \rVert_\alpha \lVert w \rVert_\beta, \end{align*} where we used $1-\alpha < 0$ and $1-\beta<0$ and for the second series we also used that $\alpha+\beta>1$. \end{proof} Unlike the L\'evy area $L$, the symmetric part $S$ is always well defined. It is also smooth. \begin{lem}\label{l:symmetric part} Let $\alpha,\beta \in (0,1)$. Then $S$ is a bounded bilinear operator from $\mathcal{C}^\alpha \times \mathcal{C}^\beta$ to $\mathcal{C}^{\alpha+\beta}$. \end{lem} \begin{proof} This is shown using the same arguments as in the proof of Lemma~\ref{l:Levy area regularity}. \end{proof} In conclusion, the integral consists of three components. The L\'evy area $L(v,w)$ is only defined if $\alpha + \beta>1$, but then it is smooth. The symmetric part $S(v,w)$ is always defined and smooth. And the paraproduct $\pi_<(v,w)$ is always defined, but it is rougher than the other components. To summarize: \begin{thm}[Young's integral]\label{t:young integral} Let $\alpha, \beta \in (0,1)$ be such that $\alpha + \beta > 1$, and let $v \in \mathcal{C}^\alpha$ and $w \in \mathcal{C}^\beta$. Then the integral \begin{align*} I(v,\mathrm{d} w) := \sum_{p,q} \int_0^\cdot \mathcal{D}elta_p v \mathrm{d} \mathcal{D}elta_q w = L(v,w) + S(v,w) + \pi_<(v,w) \in \mathcal{C}^\beta \end{align*} satisfies $\lVert I(v,\mathrm{d} w) \rVert_\beta \lesssim \lVert v \rVert_\alpha \lVert w \rVert_\beta$ and \begin{align}\label{e:Young controlled} \lVert I(v,\mathrm{d} w) - \pi_<(v,w) \rVert_{\alpha+\beta} \lesssim \lVert v \rVert_\alpha \lVert w \rVert_\beta. \end{align} \end{thm} \subsubsection*{L\'evy area and dyadic martingales} Here we show that the L\'evy area $L(v,w)(1)$ can be expressed in terms of the L\'evy area of suitable dyadic martingales. To simplify notation, we assume that $v(0) = w(0) = 0$, so that we do not have to bother with the components $v_{-10}$ and $w_{-10}$. We define a filtration $(\mathcal{F}_n)_{n \ge 0}$ on $[0,1]$ by setting \begin{align*} \mathcal{F}_n = \sigma(\chi_{p m}: 0 \le p \le n, 0 \le m \le 2^p), \end{align*} we set $\mathcal{F} = \bigvee_n \mathcal{F}_n$, and we consider the Lebesgue measure on $([0,1], \mathcal{F})$. On this space, the process $M_n = \sum_{p=0}^n \sum_{m=0}^{2^p} \chi_{pm}$, $n \in \mathbb{N}$, is a martingale. For any continuous function $v:[0,1] \rightarrow \mathbb{R}$ with $v(0) = 0$, the process \begin{align*} M^v_n = \sum_{p=0}^n \sum_{m=0}^{2^p} \langle 2^{-p} \chi_{pm}, \mathrm{d} v\rangle \chi_{pm} = \sum_{p=0}^n \sum_{m=0}^{2^p} v_{pm} \chi_{pm} = \partial_t S_n v, \end{align*} $n \in \mathbb{N}$, is a martingale transform of $M$, and therefore a martingale as well. Since it will be convenient later, we also define $\mathcal{F}_{-1} = \{\emptyset, [0,1]\}$ and $M_{-1}^v = 0$ for every $v$. Assume now that $v$ and $w$ are continuous real-valued functions with $v(0) = w(0) = 0$, and that the L\'evy area $L(v,w)(1)$ exists. Then it is given by \begin{align*} L(v,w)(1) & = \sum_{p=0}^\infty \sum_{q=0}^{p-1} \sum_{m,n} (v_{pm} w_{qn} - v_{qn} w_{pm}) \int_0^1 \mathrm{var}phi_{pm}(s) \chi_{qn}(s) \mathrm{d} s \\ & = \sum_{p=0}^\infty \sum_{q=0}^{p-1} \sum_{m,n} (v_{pm} w_{qn} - v_{qn} w_{pm}) 2^p \int_0^1 \chi_{qn}(s) 1_{[t^0_{pm}, t^2_{pm})}(s) \mathrm{d} s \langle \mathrm{var}phi_{pm},1 \rangle \\ & = \sum_{p=0}^\infty \sum_{q=0}^{p-1} \sum_{m,n} (v_{pm} w_{qn} - v_{qn} w_{pm}) 2^{-p} \int_0^1 \chi_{qn}(s) \chi_{pm}^2(s) \mathrm{d} s 2^{-p-2}\\ & = \sum_{p=0}^\infty \sum_{q=0}^{p-1} 2^{-2p-2} \int_0^1 \sum_{m,n} \sum_{m'} (v_{pm} w_{qn} - v_{qn} w_{pm}) \chi_{qn}(s) \chi_{pm}(s) \chi_{pm'}(s) \mathrm{d} s, \end{align*} where in the last step we used that $\chi_{pm}$ and $\chi_{pm'}$ have disjoint support for $m \neq m'$. The $p$--th \emph{Rademacher function} (or \emph{square wave}) is defined for $p \ge 1$ as \begin{align*} r_p(t) := \sum_{m'=1}^{2^p} 2^{-p} \chi_{pm'}(t). \end{align*} The martingale associated to the Rademacher functions is given by $R_0 := 0$ and $R_p := \sum_{k=1}^p r_k$ for $p \ge 1$. Let us write $\mathcal{D}elta M^v_p = M^v_p - M^v_{p-1}$ and similarly for $M^w$ and $R$ and all other discrete time processes that arise. This notation somewhat clashes with the expression $\mathcal{D}elta_p v$ for the dyadic blocks of $v$, but we will only use it in the following lines, where we do not directly work with dyadic blocks. The quadratic covariation of two dyadic martingales is defined as $[M, N]_n := \sum_{k=0}^n \mathcal{D}elta M_k \mathcal{D}elta N_k$, and the discrete time stochastic integral is defined as $(M\cdot N)_n := \sum_{k=0}^n M_{k-1} \mathcal{D}elta N_k$. Writing $E(\cdot)$ for the integral $\int_0^1 \cdot \mathrm{d} s$, we obtain \begin{align*} L(v,w)(1) & = \sum_{p=0}^\infty \sum_{q=0}^{p-1} 2^{-p-2} E\left(\mathcal{D}elta M^v_p \mathcal{D}elta M^w_q \mathcal{D}elta R_p - \mathcal{D}elta M^v_q \mathcal{D}elta M^w_p \mathcal{D}elta R_p \right) \\ & = \sum_{p=0}^\infty 2^{-p-2} E\left(\left(M^w_{p-1} \mathcal{D}elta M^v_p - M^v_{p-1} \mathcal{D}elta M^w_p \right) \mathcal{D}elta R_p \right)\\ & = \sum_{p=0}^\infty 2^{-p-2} E\left(\mathcal{D}elta \left[M^w \cdot M^v - M^v \cdot M^w, R\right]_p \right). \end{align*} Hence, $L(v,w)(1)$ is closely related to the L\'evy area $1/2(M^w \cdot M^v - M^v \cdot M^w)$ of the dyadic martingale $(M^v, M^w)$. \section{Paracontrolled paths and pathwise integration beyond Young}\label{s:schauder rough path integral} In this section we construct a rough path integral in terms of Schauder functions. \subsection{Paracontrolled paths} We observed in Section~\ref{s:paradifferential calculus} that for $w \in \mathcal{C}^\alpha$ and $F \in C^{1+\beta/\alpha}_b$ we have $F(w) - \pi_<(\mathrm{D} F(w),w) \in \mathcal{C}^{\alpha+\beta}$. In Section~\ref{s:young} we observed that if $v \in \mathcal{C}^\alpha$, $w\in \mathcal{C}^\beta$ and $\alpha + \beta > 1$, then the Young integral $I(v,\mathrm{d} w)$ satisfies $I(v,\mathrm{d} w) - \pi_<(v,w) \in \mathcal{C}^{\alpha + \beta}$. Hence, in both cases the function under consideration can be written as $\pi_<(f^w,w)$ for a suitable $f^w$, plus a smooth remainder. We make this our definition of paracontrolled paths: \begin{defn} Let $\alpha > 0$ and $v \in \mathcal{C}^\alpha(\mathbb{R}^d)$. For $\beta \in (0,\alpha]$ we define \[ \mathcal{D}^{\beta}_v := \mathcal{D}^\beta_v(\mathbb{R}^n) := \left\{(f,f^v) \in \mathcal{C}^\alpha(\mathbb{R}^n) \times \mathcal{C}^\beta(\mathcal{L}(\mathbb{R}^d,\mathbb{R}^n)): f^\sharp = f - \pi_<(f^v,v)\in \mathcal{C}^{\alpha+\beta}(\mathbb{R}^n)\right\}. \] If $(f,f^v) \in \mathcal{D}^\beta_v$, then $f$ is called \emph{paracontrolled} by $v$. The function $f^v$ is called the \emph{derivative} of $f$ with respect to $v$. Abusing notation, we write $f \in \mathcal{D}^\beta_v$ when it is clear from the context what the derivative $f^v$ is supposed to be. We equip $\mathcal{D}^\beta_v$ with the norm \begin{align*} \lVert f \rVert_{v,\beta} := \lVert f^v \rVert_\beta + \lVert f^\sharp\rVert_{\alpha+\beta}. \end{align*} If $v \in \mathcal{C}^\alpha$ and $(\tilde f, \tilde f^{\tilde v}) \in \mathcal{D}^\beta_{\tilde v}$, then we also write \[ d_{\mathcal{D}^\beta}(f,\tilde f) := \lVert f^v - \tilde f^{\tilde v} \rVert_\beta + \lVert f^\sharp - \tilde f^\sharp \rVert_{\alpha+\beta}. \] \end{defn} \begin{ex} Let $\alpha \in (0,1)$ and $v \in \mathcal{C}^\alpha$. Then Proposition~\ref{p:paralinearization} shows that $F(v) \in \mathcal{D}^\beta_v$ for every $F \in C^{1+\beta/\alpha}_b$, with derivative $\mathrm{D} F(v)$. \end{ex} \begin{ex} Let $\alpha + \beta >1$ and $v\in \mathcal{C}^\alpha$, $w \in \mathcal{C}^\beta$. Then by~\eqref{e:Young controlled}, the Young integral $I(v,\mathrm{d} w)$ is in $\mathcal{D}^\alpha_w$, with derivative $v$. \end{ex} \begin{ex}\label{ex:controlled old vs new} If $\alpha + \beta < 1$ and $v \in \mathcal{C}^\alpha$, then $(f,f^v) \in \mathcal{D}^\beta_v$ if and only if $|f_{s,t} - f^v_s v_{s,t}| \lesssim |t-s|^{\alpha+\beta}$ and in that case \[ \lVert f^v\rVert_\infty + \sup_{s\neq t} \frac{| f^v_{s,t} |}{|t-s|^\beta} + \sup_{s \neq t}\frac{|f_{s,t} - f^v_s v_{s,t}|}{|t-s|^{\alpha+\beta}} \lesssim \|f\|_{v,\beta}(1+\lVert v \rVert_\alpha). \] Indeed we have $|f^v_s v_{s,t} - \pi_<(f^v,v)_{s,t}| \lesssim |t-s|^{\alpha+\beta} \lVert f^v\rVert_\beta \lVert v \rVert_\alpha$, which can be shown using similar arguments as for Lemma~B.2 in~\cite{Gubinelli2012}. In other words, for $\alpha \in (0,1/2)$ the space $\mathcal{D}^\alpha_v$ coincides with the space of controlled paths defined in Section~\ref{s:rough paths}. \end{ex} The following commutator estimate, the analog of Theorem~2.3 of~\cite{Bony1981} in our setting, will be useful for establishing some stability properties of~$\mathcal{D}^\beta_v$. \begin{lem}\label{l:commutator 2} Let $\alpha, \beta \in (0,1)$, and let $u\in C([0,1],\mathcal{L}(\mathbb{R}^n;\mathbb{R}^m))$, $v\in \mathcal{C}^\alpha(\mathcal{L}(\mathbb{R}^d;\mathbb{R}^n))$, and $w \in \mathcal{C}^\beta(\mathbb{R}^d)$. Then \begin{align*} \lVert \pi_<(u, \pi_<(v,w)) - \pi_<(uv, w)\rVert_{\alpha + \beta} \lesssim \lVert u\rVert_\infty \lVert v \rVert_\alpha \lVert w \rVert_\beta. \end{align*} \end{lem} \begin{proof} We have \begin{align*} \pi_<(u, \pi_<(v,w)) - \pi_<(uv, w) & = \sum_{p,m} (S_{p-1}u (\pi_<(v,w))_{pm} - S_{p-1}(uv) w_{pm})\mathrm{var}phi_{pm} \end{align*} and $[S_{p-1}u (\pi_<(v,w))_{pm} - S_{p-1}(uv) w_{pm}]|_{[t^0_{pm},t^2_{pm}]}$ is affine. By Lemma \ref{l:upm hoelder} it suffices to control $\lVert[S_{p-1}u (\pi_<(v,w))_{pm} - S_{p-1}(uv) w_{pm}]|_{[t^0_{pm},t^2_{pm}]}\rVert_\infty$. The cases $(p,m) = (-1,0)$ and $(p,m) = (0,0)$ are easy, so let $p \ge 0$ and $m \ge 1$. For $r<q<p$ we denote by $m_q$ and $m_r$ the unique index in generation $q$ and $r$ respectively for which $\chi_{pm} \mathrm{var}phi_{q m_q} \not \equiv 0$ and similarly for $r$. We apply Lemma \ref{l:schauder coefficients of iterated integrals} to obtain for $q<p$ \begin{align*} |(S_{q-1}v \mathcal{D}elta_q w)_{pm}| & = \Big|\sum_{r<q} v_{rm_r} w_{qm_q} 2^{-p} \langle \chi_{pm}, \mathrm{d}(\mathrm{var}phi_{rm_r} \mathrm{var}phi_{qm_q})\rangle \Big|\\ & = \Big|\sum_{r<q} v_{rm_r} w_{qm_q} 2^{-p} \langle \chi_{pm}, \chi_{rm_r} \mathrm{var}phi_{qm_q} + \mathrm{var}phi_{rm_r} \chi_{qm_q} \rangle\Big| \\ & \le \lVert v \rVert_\alpha \lVert w \rVert_\beta \sum_{r<q} 2^{-r\alpha} 2^{-q\beta} 2^{-p}2^{-2p+r+p+q} \lesssim 2^{-2p+q(2-\alpha-\beta)} \lVert v \rVert_\alpha \lVert w \rVert_\beta. \end{align*} Hence \[ \Big\lVert \Big( S_{p-1}u \sum_{q<p} (S_{q-1} v \mathcal{D}elta_q w)_{pm} \Big) \Big|_{[t^0_{pm},t^2_{pm}]}\Big\rVert_{\infty} \lesssim \lVert u \rVert_\infty \lVert v \rVert_\alpha \lVert w \rVert_\beta 2^{-p(\alpha+\beta)}. \] If $p<q$, then $\mathcal{D}elta_q w(t^k_{pm}) = 0$ for all $k$ and $m$, and therefore $(S_{q-1}v \mathcal{D}elta_q w)_{pm}=0$, so that it only remains to bound $\|[S_{p-1}u (S_{p-1}v \mathcal{D}elta_p w)_{pm} - S_{p-1}(uv) w_{pm}]|_{t^0_{pm}, t^2_{pm}]}\|_\infty$. We have $\mathcal{D}elta_p w(t^0_{pm}) = \mathcal{D}elta_p w(t^2_{pm})=0$ and $\mathcal{D}elta_p w(t^1_{pm}) = w_{pm}/2$. On $[t^0_{pm}, t^2_{pm}]$, the function $S_{p-1} v$ is given by the linear interpolation of $v(t^0_{pm})$ and $v(t^2_{pm})$, and therefore $(S_{p-1}v \mathcal{D}elta_p w)_{pm} = \frac{1}{2}(v(t^0_{pm}) + v(t^2_{pm}))w_{pm}$, leading to \begin{align*} &\lVert [S_{p-1}u (S_{p-1}v \mathcal{D}elta_p w)_{pm} - S_{p-1}(uv) w_{pm}]|_{[t^0_{pm},t^2_{pm}]}\rVert_{\infty}\\ &\hspace{80pt} \le |w_{pm}|\times \Big\lVert\Big[ \Big(u(t^0_{pm})+\frac{\cdot-t^0_{pm}}{t^2_{pm}-t^0_{pm}}u_{t^0_{pm},t^2_{pm}}\Big)\frac{v(t^0_{pm}) + v(t^2_{pm})}{2} \\ &\hspace{160pt} - \Big((uv)(t^0_{pm}) + \frac{\cdot-t^0_{pm}}{t^2_{pm}-t^0_{pm}}(uv)_{t^0_{pm},t^2_{pm}}\Big)\Big]\Big|_{[t^0_{pm},t^2_{pm}]}\Big\rVert_{\infty} \\ &\hspace{80pt} \lesssim \lVert u\rVert_{\infty} \lVert v \rVert_\alpha \lVert w \rVert_\beta 2^{-p(\alpha+\beta)}, \end{align*} where the last step follows by rebracketing. \end{proof} As a consequence, we can show that paracontrolled paths are stable under the application of smooth functions. \begin{cor}\label{c:controlled under smooth} Let $\alpha \in (0,1)$, $\beta \in (0,\alpha]$, $v \in \mathcal{C}^\alpha$, and $f \in \mathcal{D}^\beta_v$ with derivative $f^v$. Let $F \in C^{1+\beta/\alpha}_b$. Then $F(f) \in \mathcal{D}^\beta_v$ with derivative $\mathrm{D} F(f) f^v$, and \begin{align*} \lVert F(f)\rVert_{v,\beta} \lesssim \lVert F\rVert_{C^{1+\beta/\alpha}_b} (1 + \lVert v \rVert_\alpha)^{1+\beta/\alpha} (1+\lVert f \rVert_{v,\beta}) (1 + \lVert f^v \rVert_\infty)^{1+\beta/\alpha}. \end{align*} Moreover, there exists a polynomial $P$ which satisfies for all $F \in C^{2+\beta/\alpha}_b$, $\tilde v \in \mathcal{C}^\alpha$, $\tilde f \in \mathcal{D}^\beta_{\tilde v}$, and \[ M = \max\{\lVert v \rVert_\alpha, \lVert \tilde v \rVert_\alpha, \lVert f \rVert_{v,\beta}, \lVert \tilde f \rVert_{\tilde v,\beta}\} \] the bound \[ d_{\mathcal{D}^\beta}(F(f), F(\tilde f)) \le P(M) \lVert F\rVert_{C^{2+\beta/\alpha}_b}(d_{\mathcal{D}^\beta}(f,\tilde f) + \lVert u - \tilde u\rVert_\alpha). \] \end{cor} \begin{proof} The estimate for $\|\mathrm{D} F(f) f^v\|_\beta$ is straightforward. For the remainder we apply Proposition~\ref{p:paralinearization} and Lemma~\ref{l:commutator 2} to obtain \begin{align*} \|F(f)^\sharp\|_{\alpha+\beta} & \le \|F(f) - \pi_<(\mathrm{D} F(f), f)\|_{\alpha+\beta} + \|\pi_<(\mathrm{D} F(f), f^\sharp)\|_{\alpha+\beta} \\ &\quad + \|\pi_<(\mathrm{D} F(f), \pi_<(f^v,v)) - \pi_<(\mathrm{D} F(f) f^v, v) \|_{\alpha+\beta} \\ &\lesssim \lVert F \rVert_{C^{1+\beta/\alpha}_b} (1 + \lVert \pi_<(f^v,v)\rVert_\alpha)^{1+\beta/\alpha}(1 + \lVert f^\sharp \rVert_{\alpha+\beta}) \\ &\quad + \lVert F \rVert_{C^1_b} \lVert f \rVert_{v,\beta} + \lVert F \rVert_{C^1_b} \lVert f^v \rVert_{\beta} \lVert v \rVert_\alpha \\ &\lesssim \lVert F\rVert_{C^{1+\beta/\alpha}_b} (1+\lVert f^v \rVert_{\infty})^{1+\beta/\alpha} (1 + \lVert v \rVert_\alpha)^{1+\beta/\alpha}(1+\lVert f \rVert_{v,\beta}). \end{align*} The difference $F(f) - F(\tilde f)$ is treated in the same way. \end{proof} When solving differential equations it will be crucial to have a bound which is linear in $\lVert f \rVert_{v,\beta}$. The superlinear dependence on $\lVert f^v \rVert_\infty$ will not pose any problem as we will always have $f^v = F(\tilde f)$ for some suitable $\tilde f$, so that for bounded $F$ we get $\lVert F(f)\rVert_{v,\beta} \lesssim_{F,v} 1 + \lVert f \rVert_{v,\beta}$. \subsection{A basic commutator estimate} Here we prove the commutator estimate which will be the main ingredient in the construction of the integral $I(f,\mathrm{d} g)$, where $f$ is paracontrolled by $v$ and $g$ is paracontrolled by $w$, and where we assume that the integral $I(v,\mathrm{d} w)$ exists. \begin{prop}\label{p:commutator 1} Let $\alpha, \beta, \gamma \in (0,1)$, and assume that $\alpha+\beta+\gamma>1$ and $\beta+\gamma < 1$. Let $f\in \mathcal{C}^\alpha$, $v\in \mathcal{C}^\beta$, and $w \in \mathcal{C}^\gamma$. Then the ``commutator'' \begin{align}\label{e:commutator 1 def} C(f,v,w) &:= L(\pi_<(f, v),w) - I(f,\mathrm{d} L(v,w))\\ \nonumber & := \lim_{N\rightarrow \infty} [L(S_N(\pi_<(f, v)), S_N w) - I(f,\mathrm{d} L(S_N v,S_N w))] \\ \nonumber &\, = \lim_{N\rightarrow \infty} \sum_{p\le N} \sum_{q<p} \Biggl[ \int_0^\cdot \mathcal{D}elta_p (\pi_<(f,v))(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \int_0^\cdot \mathrm{d} (\mathcal{D}elta_{q} (\pi_<(f,v)))(s) \mathcal{D}elta_{p} w(s) \\ \nonumber &\hspace{90pt} - \Bigl(\int_0^\cdot f(s) \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \int_0^\cdot f(s) \mathrm{d} (\mathcal{D}elta_{q} v)(s) \mathcal{D}elta_{p} w(s)\Bigr) \Biggr] \end{align} converges in $\mathcal{C}^{\alpha+\beta+\gamma-\mathrm{var}epsilon}$ for all $\mathrm{var}epsilon > 0$. Moreover, \begin{align*} \lVert C(f,v,w)\rVert_{\alpha + \beta + \gamma} \lesssim \lVert f\rVert_\alpha \lVert v \rVert_\beta \lVert w \rVert_\gamma. \end{align*} \end{prop} \begin{proof} We only argue for the first difference in~\eqref{e:commutator 1 def}, i.e. for \begin{align}\label{e:commutator 1 pr1} X_N := \sum_{p\le N} \sum_{q<p} \left[ \int_0^\cdot \mathcal{D}elta_p (\pi_<(f,v))(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \int_0^\cdot f(s) \mathcal{D}elta_p v(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \right]. \end{align} The second difference can be handled using the same arguments. First we prove that $(X_N)$ converges uniformly, then we show that $\lVert X_N \rVert_{\alpha + \beta + \gamma}$ stays uniformly bounded. This will imply the desired result, since bounded sets in $\mathcal{C}^{\alpha+\beta+\gamma}$ are relatively compact in $\mathcal{C}^{\alpha+\beta+\gamma-\mathrm{var}epsilon}$. To prove uniform convergence, note that \begin{align}\label{e:commutator 1 pr2}\nonumber X_N - X_{N-1} & = \sum_{q<N}\left[ \int_0^\cdot \mathcal{D}elta_N (\pi_<(f,v))(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \int_0^\cdot f(s) \mathcal{D}elta_N v(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \right]\\ \nonumber & = \sum_{q<N} \Biggl[ \sum_{j\le N} \sum_{i<j} \int_0^\cdot \mathcal{D}elta_N (\mathcal{D}elta_i f \mathcal{D}elta_j v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s)\\ &\hspace{40pt} - \sum_{j \ge N} \sum_{i\le j} \int_0^\cdot \mathcal{D}elta_j (\mathcal{D}elta_i f \mathcal{D}elta_N v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \Biggr], \end{align} where for the second term it is possible to take the infinite sum over $j$ outside of the integral because $\sum_j \mathcal{D}elta_j g$ converges uniformly to $g$ and because $\mathcal{D}elta_q w$ is a finite variation path. We also used that $\mathcal{D}elta_N (\mathcal{D}elta_i f \mathcal{D}elta_j v)=0$ whenever $i > N$ or $j > N$. Only very few terms in \eqref{e:commutator 1 pr2} cancel. Nonetheless these cancellations are crucial, since they eliminate most terms for which we only have the worse estimate \eqref{e:schauder blocks product bad} in Corollary~\ref{c:schauder blocks product}. We obtain \begin{align}\label{e:commutator 1 pr3}\nonumber X_N - X_{N-1}& = \sum_{q<N} \sum_{j<N} \sum_{i<j} \int_0^\cdot \mathcal{D}elta_N (\mathcal{D}elta_i f \mathcal{D}elta_j v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \sum_{q<N} \int_0^\cdot \mathcal{D}elta_N (\mathcal{D}elta_N f \mathcal{D}elta_N v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \\ \nonumber &\hspace{20pt} - \sum_{q<N} \sum_{j > N} \sum_{i<j} \int_0^\cdot \mathcal{D}elta_j (\mathcal{D}elta_i f \mathcal{D}elta_N v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \\ &\hspace{20pt} - \sum_{q<N} \sum_{j > N} \int_0^\cdot \mathcal{D}elta_j (\mathcal{D}elta_j f \mathcal{D}elta_N v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s). \end{align} Note that $\lVert \partial_t \mathcal{D}elta_q w\rVert_\infty \lesssim 2^q \lVert \mathcal{D}elta_q w \rVert_\infty$. Hence, an application of Corollary~\ref{c:schauder blocks product}, where we use \eqref{e:schauder blocks product good} for the first three terms and \eqref{e:schauder blocks product bad} for the fourth term, yields \begin{align}\label{e:commutator 1 pr convergence speed} \nonumber \lVert X_N - X_{N-1} \rVert_\infty &\lesssim \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma \Biggl[ \sum_{q<N} \sum_{j<N} \sum_{i<j} 2^{-2N + i + j} 2^{-i\alpha} 2^{-j\beta}2^{q(1-\gamma)} \\ \nonumber &\qquad + \sum_{q<N} 2^{-N(\alpha + \beta)} 2^{q(1-\gamma)} + \sum_{q<N} \sum_{j > N} \sum_{i<j} 2^{-2j+i+N} 2^{-i\alpha} 2^{-N\beta} 2^{q(1-\gamma)} \\ \nonumber &\qquad + \sum_{q<N} \sum_{j > N} 2^{-j\alpha} 2^{-N\beta} 2^{q(1-\gamma)}\Biggr] \\ &\lesssim \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma 2^{-N(\alpha + \beta + \gamma - 1)}, \end{align} where in the last step we used $\alpha, \beta, \gamma < 1$. Since $\alpha + \beta + \gamma > 1$, this gives us the uniform convergence of $(X_N)$. Next let us show that $\lVert X_N \rVert_{\alpha + \beta + \gamma} \lesssim \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma$ for all $N$. Similarly to \eqref{e:commutator 1 pr3} we obtain for $n \in \mathbb{N}$ \begin{align*} \mathcal{D}elta_n X_N &= \sum_{p\le N} \sum_{q<p}\mathcal{D}elta_n \Biggl[ \sum_{j<p} \sum_{i<j} \int_0^\cdot \mathcal{D}elta_p (\mathcal{D}elta_i f \mathcal{D}elta_j v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) - \int_0^\cdot \mathcal{D}elta_p (\mathcal{D}elta_p f \mathcal{D}elta_p v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s)\\ &\hspace{80pt}- \sum_{j>p} \sum_{i\le j} \int_0^\cdot \mathcal{D}elta_j (\mathcal{D}elta_i f \mathcal{D}elta_p v)(s) \mathrm{d} \mathcal{D}elta_{q} w(s) \Biggr], \end{align*} and therefore by Corollary~\ref{c:schauder blocks} \begin{align*} \lVert \mathcal{D}elta_n X_N\rVert_\infty &\lesssim \sum_{p} \sum_{q<p} \Biggl[ \sum_{j<p} \sum_{i<j} 2^{-(n\vee p) - n + p + q} \lVert \mathcal{D}elta_p (\mathcal{D}elta_i f \mathcal{D}elta_j v)\rVert_\infty \lVert \mathcal{D}elta_{q} w\rVert_\infty \\ &\hspace{60pt} + 2^{-(n\vee p) - n + p + q}\lVert \mathcal{D}elta_p (\mathcal{D}elta_p f \mathcal{D}elta_p v)\rVert_\infty \lVert \mathcal{D}elta_{q} w\rVert_\infty\\ &\hspace{60pt} + \sum_{j>p} \sum_{i\le j} 2^{-(n\vee j) - n + j + q} \lVert\mathcal{D}elta_j(\mathcal{D}elta_i f \mathcal{D}elta_p v)\rVert_\infty \lVert \mathcal{D}elta_{q} w\rVert_\infty \Biggr]. \end{align*} Now we apply Corollary~\ref{c:schauder blocks product}, where for the last term we distinguish the cases $i < j$ and $i = j$. Using that $1-\gamma > 0$, we get \begin{align*} \lVert \mathcal{D}elta_n X_N\rVert_\infty & \lesssim \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma \sum_p 2^{p(1-\gamma)} \Biggl[ \sum_{j<p} \sum_{i<j} 2^{-(n\vee p) - n + p} 2^{-2p} 2^{i(1-\alpha)} 2^{j(1-\beta)} \\ &\hspace{150pt} + 2^{-(n\vee p) - n + p} 2^{-p\alpha} 2^{-p\beta}\\ &\hspace{150pt} + \sum_{j>p} \sum_{i < j} 2^{-(n\vee j) - n + j} 2^{-2j + i(1-\alpha) + p (1-\beta)}\\ &\hspace{150pt} + \sum_{j>p} 2^{-(n\vee j) - n + j} 2^{-j\alpha - p \beta} \Biggr]\\ &\lesssim \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma 2^{-n(\alpha+\beta+\gamma)}, \end{align*} where we used both that $\alpha+\beta+\gamma>1$ and that $\beta+\gamma<1$. \end{proof} \begin{rmk} If $\beta + \gamma = 1$, we can apply Proposition~\ref{p:commutator 1} with $\beta - \mathrm{var}epsilon$ to obtain that $C(f,v,w) \in \mathcal{C}^{\alpha + \beta + \gamma - \mathrm{var}epsilon}$ for every sufficiently small $\mathrm{var}epsilon > 0$. If $\beta + \gamma > 1$, then we are in the Young setting and there is no need to introduce the commutator. \end{rmk} For later reference, we collect the following result from the proof of Proposition~\ref{p:commutator 1}: \begin{lem}\label{l:commutator speed of convergence} Let $\alpha, \beta, \gamma, f, v, w$ be as in Proposition~\ref{p:commutator 1}. Then \[ \lVert C(f,v,w) - L(S_N(\pi_<(f, v)), S_N w) - I(f,\mathrm{d} L(S_N v,S_N w))\rVert_\infty \lesssim 2^{-N(\alpha + \beta + \gamma - 1)} \lVert f \rVert_\alpha \lVert v \rVert_\beta \lVert w\rVert_\gamma. \] \end{lem} \begin{proof} Simply sum up~\eqref{e:commutator 1 pr convergence speed} over $N$. \end{proof} \subsection{Pathwise integration for paracontrolled paths}\label{s:schauder rough path} In this section we apply the commutator estimate to construct the rough path integral under the assumption that the L\'evy area exists for a given reference path. \begin{thm}\label{t:rough path integral} Let $\alpha \in (1/3,1)$, $\beta \in (0,\alpha]$ and assume that $2\alpha+\beta>0$ as well as $\alpha+\beta \neq 1$. Let $v \in \mathcal{C}^\alpha(\mathbb{R}^d)$ and assume that the L\'evy area \begin{align*} L(v,v) := \lim_{N \rightarrow \infty}\bigl( L(S_N v^k, S_N v^\ell) \bigr)_{1\le k \le d, 1\le \ell \le d} \end{align*} converges uniformly and that $\sup_N \lVert L(S_N v, S_N v) \rVert_{2\alpha} < \infty$. Let $f \in \mathcal{D}^\alpha_v(\mathcal{L}(\mathbb{R}^d, \mathbb{R}^m))$. Then $I(S_N f, \mathrm{d} S_N v)$ converges in $\mathcal{C}^{\alpha - \mathrm{var}epsilon}$ for all $\mathrm{var}epsilon > 0$. Denoting the limit by $I(f,\mathrm{d} v)$, we have \begin{align*} \lVert I(f,\mathrm{d} v)\rVert_\alpha \lesssim \lVert f \rVert_{v,\beta} \bigl(\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 + \lVert L(v,v) \rVert_{2\alpha}\bigr). \end{align*} Moreover, $I(f,\mathrm{d} v) \in \mathcal{D}^{\alpha}_v$ with derivative $f$ and \begin{align*} \lVert I(f,\mathrm{d} v) \rVert_{v,\alpha} \lesssim \lVert f \rVert_{v,\beta} \bigl(1 + \lVert v \rVert_\alpha^2 + \lVert L(v,v) \rVert_{2\alpha}\bigr). \end{align*} \end{thm} \begin{proof} If $\beta+\gamma >1$, everything follows from the Young case, Theorem~\ref{t:young integral}, so let $\beta+\gamma<1$. We decompose \begin{align*} I(S_N f, \mathrm{d} S_N v) & = S(S_N f, S_N v) + \pi_<(S_N f, S_N v) + L(S_N f^\sharp, S_N v) \\ &\quad + [L(S_N \pi_<(f^v,v), S_N v) - I(f^v, \mathrm{d} L(S_N v,S_N v))] + I(f^v, \mathrm{d} L(S_N v,S_N v)). \end{align*} Convergence then follows from Proposition~\ref{p:commutator 1} and Theorem~\ref{t:young integral}. The limit is given by \[ I(f,\mathrm{d} v) = S(f,v) + \pi_<(f,v) + L(f^\sharp, v) + C(f^v,v,v) + I(f^v, \mathrm{d} L(v,v)), \] from where we easily deduce the claimed bounds. \end{proof} \begin{rmk}\label{r:locality of integral} Since $I(f,\mathrm{d} v) = \lim_{N\to \infty} \int_0^\cdot S_N f \mathrm{d} S_N v$, the integral is a local operator in the sense that $I(f,\mathrm{d} v)$ is constant on every interval $[s,t]$ for which $f|_{[s,t]}=0$. In particular we can estimate $I(f,\mathrm{d} v)|_{[0,t]}$ using only $f|_{[0,t]}$ and $f^v|_{[0,t]}$. \end{rmk} For fixed $v$ and $L(v,v)$, the map $f \mapsto I(f,\mathrm{d} v)$ is linear and bounded from $\mathcal{D}^\beta_v$ to $\mathcal{D}^\alpha_v$, and this is what we will need to solve differential equations driven by $v$. But we can also estimate the speed of convergence of $I(S_N f, \mathrm{d} S_N v)$ to $I(f, \mathrm{d} v)$, measured in uniform distance rather than in $\mathcal{C}^\alpha$: \begin{cor}\label{c:rough path speed of convergence} Let $\alpha \in (1/3,1/2]$ and let $\beta,v,f$ be as in Theorem~\ref{t:rough path integral}. Then we have for all $\mathrm{var}epsilon \in (0, 2\alpha + \beta-1)$ \begin{align*} \lVert I(S_N f, \mathrm{d} S_N v) - I(f,\mathrm{d} v)\rVert_\infty &\lesssim_\mathrm{var}epsilon 2^{-N(2\alpha + \beta - 1 - \mathrm{var}epsilon)} \lVert f\rVert_{v,\beta} \bigl(\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 \bigr)\\ &\qquad +\lVert f^v \rVert_\beta \lVert L(S_N v, S_N v) - L(v,w)\rVert_{2\alpha-\mathrm{var}epsilon}. \end{align*} \end{cor} \begin{proof} We decompose $I(S_N f, \mathrm{d} S_N v)$ as described in the proof of Theorem~\ref{t:rough path integral}. This gives us for example the term \[ \| \pi_<(S_N f - f, S_N v) + \pi_<(f, S_N v - v)\|_\infty \lesssim_\mathrm{var}epsilon \| S_N f - f\|_\infty \| v \|_\alpha + \| f \|_\infty \| f \|_\alpha \|S_N v - v\|_\mathrm{var}epsilon \] for all $\mathrm{var}epsilon > 0$. From here it is easy to see that \[ \| \pi_<(S_N f - f, S_N g) + \pi_<(f, S_N g - g)\|_\infty \lesssim 2^{-N(\alpha-\mathrm{var}epsilon)} \|f\|_\alpha \|v\|_\alpha \lesssim 2^{-N(\alpha-\mathrm{var}epsilon)} \|f\|_{v,\beta} (\|v\|_\alpha + \|v\|_\alpha^2). \] But now $\beta\le \alpha \le 1/2$ and therefore $\alpha \ge 2\alpha + \beta - 1$. Let us treat one of the critical terms, say $L(S_N f^\sharp, S_N v) - L(f^\sharp, v)$. Since $2 \alpha + \beta - \mathrm{var}epsilon > 1$, we can apply Lemma~\ref{l:Levy area regularity} to obtain \begin{align*} &\lVert L(S_N f^\sharp, S_N v) - L(f^\sharp, v) \rVert_\infty \lesssim \lVert L(S_N f^\sharp - f^\sharp, S_N v)\rVert_{1+\mathrm{var}epsilon} + \lVert L(f^\sharp, S_N v - v)\rVert_{1+\mathrm{var}epsilon}\\ &\hspace{120pt}\lesssim_\mathrm{var}epsilon \lVert S_N f^\sharp - f^\sharp\rVert_{1+\mathrm{var}epsilon - \alpha} \lVert v \rVert_\alpha + \lVert f^\sharp\rVert_{\alpha+\beta} \lVert S_N v - v\rVert_{1+\mathrm{var}epsilon - \alpha-\beta} \\ &\hspace{120pt}\lesssim 2^{-N(\alpha + \beta - (1 + \mathrm{var}epsilon - \alpha))} \lVert f^\sharp\rVert_{\alpha+\beta} \lVert v \rVert_\alpha + 2^{-N(\alpha - (1+\mathrm{var}epsilon - \alpha-\beta))} \lVert f^\sharp\rVert_{\alpha+\beta} \lVert v \rVert_\alpha \\ &\hspace{120pt}\lesssim 2^{-N(2\alpha + \beta - 1 -\mathrm{var}epsilon)}\lVert f^\sharp \rVert_{\alpha+\beta} \lVert v \rVert_\alpha. \end{align*} Lemma~\ref{l:commutator speed of convergence} gives \begin{align*} \lVert L(S_N \pi_<(f^v,v), S_N v) - L(\pi_<(f^v,v),v) \rVert_\infty &\lesssim 2^{-N(2 \alpha + \beta - 1)} \lVert f^v \rVert_\beta \lVert v\rVert_\alpha^2\\ &\quad + \lVert I(f^v, \mathrm{d} L(S_N v, S_N v)) - I(f^v, \mathrm{d} L(v,v))\rVert_\infty. \end{align*} The second term on the right hand side can be estimated using the continuity of the Young integral, and the proof is complete. \end{proof} \begin{rmk} In Lemma~\ref{l:commutator speed of convergence} we saw that the rate of convergence of \[ L(S_N \pi_<(f^v,v),S_N v) - I(f^v, \mathrm{d} L(S_Nv, S_Nv)) - (L(\pi_<(f^v,v),v) - I(f^v, \mathrm{d} L(v,v))) \] is in fact $2^{-N(2\alpha+\beta - 1)}$ when measured in uniform distance, and not just $2^{-N(2\alpha +\beta- 1 -\mathrm{var}epsilon)}$. It is possible to show that this optimal rate is attained by the other terms as well, so that \begin{align*} \lVert I(S_N f, \mathrm{d} S_N v) - I(f,\mathrm{d} v)\rVert_\infty &\lesssim 2^{-N(2\alpha + \beta - 1)} \lVert f\rVert_{v,\beta} \bigl(\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 \bigr)\\ &\qquad +\lVert f^v \rVert_\beta \lVert L(S_N v, S_N w) - L(v,w)\rVert_{2\alpha - \mathrm{var}epsilon}. \end{align*} Since this requires a rather lengthy calculation, we decided not to include the arguments here. \end{rmk} Since we approximate $f$ and $g$ by the piecewise smooth functions $S_N f$ and $S_N g$ when defining the integral $I(f,\mathrm{d} g)$, it is not surprising that we obtain a Stratonovich type integral: \begin{prop}\label{p:ibp stratonovich} Let $\alpha \in (1/3,1)$ and $v \in \mathcal{C}^\alpha(\mathbb{R}^d)$. Let $\mathrm{var}epsilon > 0$ be such that $(2+\mathrm{var}epsilon)\alpha > 1$ and let $F \in C^{2+\mathrm{var}epsilon}(\mathbb{R}^d,\mathbb{R})$. Then \begin{align*} F(v(t)) - F(v(0)) = I(\mathrm{D} F(v),\mathrm{d} v)(t) := \lim_{N\rightarrow \infty} I(S_N \mathrm{D} F(v), \mathrm{d} S_N v)(t) \end{align*} for all $t \in [0,1]$. \end{prop} \begin{proof} The function $S_N v$ is Lipschitz continuous, so that integration by parts gives \begin{align*} F(S_N v(t)) - F(S_N v(0)) = I(\mathrm{D} F(S_N v), \mathrm{d} S_N v)(t). \end{align*} The left hand side converges to $F(v(t)) - F(v(0))$. It thus suffices to show that $I(S_N \mathrm{D} F(v)-\mathrm{D} F(S_N v), \mathrm{d} S_N v)$ converges to zero. By continuity of the Young integral, Theorem~\ref{t:young integral}, it suffices to show that $\lim_{N\rightarrow \infty} \lVert S_N \mathrm{D} F(v) - \mathrm{D} F(S_N v)\rVert_{\alpha(1+\mathrm{var}epsilon')} = 0$ for all $\mathrm{var}epsilon' < \mathrm{var}epsilon$. Recall that $S_N v$ is the linear interpolation of $v$ between the points $(t^1_{pm})$ for $p \le N$ and $0 \le m \le 2^p$, and therefore $\mathcal{D}elta_p \mathrm{D} F(S_Nv) = \mathcal{D}elta_p \mathrm{D} F(v) = \mathcal{D}elta_p S_N \mathrm{D} F(v)$ for all $p \le N$. For $p > N$ and $1 \le m \le 2^p$ we apply a first order Taylor expansion to both terms and use the $\mathrm{var}epsilon$--H\"older continuity of $\mathrm{D}^2 F$ to obtain \begin{align*} \left|[S_N \mathrm{D} F(v) - \mathrm{D} F(S_N v)]_{pm}\right| & \le C_F 2^{-p\alpha(1+\mathrm{var}epsilon)} \lVert S_N v \rVert_\alpha \end{align*} for a constant $C_F>0$. Therefore, we get for all $\mathrm{var}epsilon' \le \mathrm{var}epsilon$ \begin{align*} \lVert S_N \mathrm{D} F(v) - \mathrm{D} F(S_Nv)\rVert_{\alpha(1+ \mathrm{var}epsilon')} \lesssim_F 2^{-N\alpha(\mathrm{var}epsilon-\mathrm{var}epsilon')} \lVert v \rVert_\alpha, \end{align*} which completes the proof. \end{proof} \begin{rmk}\label{r:symmetric structure induces cancellations stratonovich} Note that here we did not need any assumption on the area $L(v,v)$. The reason are cancellations that arise due to the symmetric structure of the derivative of $\mathrm{D} F$, the Hessian of $F$. Proposition~\ref{p:ibp stratonovich} was previously obtained by Roynette~\cite{Roynette1993}, except that there $v$ is assumed to be one dimensional and in the Besov space $B^{1/2}_{1,\infty}$. \end{rmk} \section{Pathwise It\^{o} integration}\label{s:pathwise ito} In the previous section we saw that our pathwise integral $I(f,\mathrm{d} v)$ is of Stratonovich type, i.e. it satisfies the usual integration by parts rule. But in applications it may be interesting to have an It\^{o} integral. Here we show that a slight modification of $I(f,\mathrm{d} v)$ allows us to treat non-anticipating It\^{o}-type integrals. A natural approximation of a non-anticipating integral is given for $k \in \mathbb{N}$ by \begin{align*} I^{\mathrm{It\hat{o}}}_k (f,\mathrm{d} v) (t) :=\, & \sum_{m=1}^{2^k} f(t^0_{km}) (v(t^2_{km}\wedge t) - v(t^0_{km}\wedge t))\\ =\, & \sum_{m=1}^{2^k} \sum_{p,q} \sum_{m,n} f_{pm} v_{qn} \mathrm{var}phi_{pm}(t^0_{km}) (\mathrm{var}phi_{qn}(t^2_{km}\wedge t) - \mathrm{var}phi_{qn}(t^0_{km}\wedge t)). \end{align*} Let us assume for the moment that $t=m2^{-k}$ for some $0 \le m \le 2^k$. In that case we obtain for $p \ge k$ or $q \ge k$ that $\mathrm{var}phi_{pm}(t^0_{km})(\mathrm{var}phi_{qn}(t^2_{km}\wedge t) - \mathrm{var}phi_{qn}(t^0_{km}\wedge t)) = 0$. For $p,q<k$, both $\mathrm{var}phi_{pm}$ and $\mathrm{var}phi_{qn}$ are affine functions on $[t^0_{km}\wedge t, t^2_{k m}\wedge t]$, and for affine $u$ and $w$ and $s<t$ we have \begin{align*} u(s)(w(t) - w(s)) = \int_s^t u(r) \mathrm{d} w(r) - \frac{1}{2} [u(t) - u(s)] [w(t) - w(s)]. \end{align*} Hence, we conclude that for $t=m2^{-k}$ \begin{align}\label{e:ito via piecewise linear} I^{\mathrm{It\hat{o}}}_k (f,\mathrm{d} v)(t) = I(S_{k-1} f, \mathrm{d} S_{k-1} v)(t) - \frac{1}{2}[f,v]_k(t), \end{align} where $[f,v]_k$ is the $k$--th dyadic approximation of the quadratic covariation $[f,v]$, i.e. \begin{align*} [f,v]_k(t) := \sum_{m=1}^{2^k} [f(t^2_{km}\wedge t) - f(t^0_{km}\wedge t)][v(t^2_{km}\wedge t) - v(t^0_{km}\wedge t)]. \end{align*} From now on we study the right hand side of~\eqref{e:ito via piecewise linear} rather than $I^{\mathrm{It\hat{o}}}_k(f,\mathrm{d} v)$, which is justified by the following remark. \begin{rmk}\label{r:our ito vs nonanticipating riemann} Let $\alpha \in (0,1)$. If $f\in C([0,1])$ and $v\in \mathcal{C}^\alpha$, then \begin{align*} \Bigl\lVert I^{\mathrm{It\hat{o}}}_k(f,\mathrm{d} v) - \Bigl(I(S_{k-1} f,\mathrm{d} S_{k-1}v) - \frac{1}{2} [S_{k-1} f, S_{k-1}v]_k \Bigr) \Bigr\rVert_\infty \lesssim 2^{-k\alpha} \lVert f \rVert_\infty \lVert v \rVert_\alpha. \end{align*} This holds because both functions agree in all dyadic points of the form $m2^{-k}$, and because between those points the integrals can pick up mass of at most $\lVert f \rVert_\infty 2^{-k\alpha} \lVert v \rVert_\alpha$. \end{rmk} We write $[v,v] := ([v^i, v^j])_{1 \le i, j \le d}$ and $L(v,v) := (L(v^i, v^j))_{1\le i,j\le d}$, and similarly for all expressions of the same type. \begin{thm}\label{t:pathwise ito integral} Let $\alpha \in (0,1/2)$ and let $\beta\le \alpha$ be such that $2\alpha + \beta > 1$. Let $v\in \mathcal{C}^\alpha(\mathbb{R}^d)$ and $f \in \mathcal{D}^\beta_v(\mathcal{L}(\mathbb{R}^d;\mathbb{R}^n))$. Assume that $(L(S_k v, S_k v))$ converges uniformly, with uniformly bounded $\mathcal{C}^{2\alpha}$ norm. Also assume that $([v,v]_k)$ converges uniformly. Then $(I^{\mathrm{It\hat{o}}}_k(f,\mathrm{d} v))$ converges uniformly to a limit $I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v) = I(f,\mathrm{d} v) - 1/2[f,v]$ which satisfies \begin{align*} \lVert I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v)\rVert_\infty \lesssim \lVert f\rVert_{v,\beta} (\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 + \lVert L(v,v) \rVert_{2\alpha} + \lVert[v,v]\rVert_\infty), \end{align*} and where the quadrativ variation $[f,v]$ is given by \begin{equation}\label{e:quadratic variation controlled explicit} [f,v] = \int_0^\cdot f^{v}(s) \mathrm{d} [v,v](s) := \bigg( \sum_{j,\ell=1}^d\int_0^\cdot (f^{ij})^{v,\ell}(s) \mathrm{d} [v^j,v^\ell](s)\bigg)_{1\le i \le n}, \end{equation} where $(f^{ij})^{v,\ell}$ is the $\ell$--th component of the $v$--derivative of $f^{ij}$. For $\mathrm{var}epsilon \in (0,3\alpha-1)$ the speed of convergence can be estimated by \begin{align*} \big\lVert I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v) - I^{\mathrm{It\hat{o}}}_k(f,\mathrm{d} v) \big\rVert_\infty & \lesssim_\mathrm{var}epsilon 2^{-k(2\alpha + \beta - 1 - \mathrm{var}epsilon)} \lVert f\rVert_{v,\beta} \bigl( \lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 \bigr)\\ &\quad +\lVert f^v \rVert_\beta \lVert L(S_{k-1} v, S_{k-1} v) - L(v,v)\rVert_{2\alpha} \\ &\quad + \lVert f^v \rVert_\infty \lVert [v,v]_k - [v,v]\rVert_{\infty}. \end{align*} \end{thm} \begin{proof} By Remark~\ref{r:our ito vs nonanticipating riemann}, it suffices to show our claims for $I(S_{k-1} f, \mathrm{d} S_{k-1} v) -1/2[f,v]_k$. The statements for the integral $I(S_{k-1} f, \mathrm{d} S_{k-1} g)$ follow from Theorem~\ref{t:rough path integral} and Corollary~\ref{c:rough path speed of convergence}. So let us us concentrate on the quadratic variation $[f,v]_k$. Recall from Example~\ref{ex:controlled old vs new} that $f \in \mathcal{D}^\beta_v$ if and only if $R^f_{s,t} = f_{s,t} - f^v(s) w_{s,t}$ satisfies $|R^f_{s,t}| \lesssim |t-s|^{\alpha+\beta}$. Hence \begin{align*} [f,v]^i_k (t) & = \sum_m \big(f_{t^0_{km} \wedge t, t^2_{km} \wedge t} v_{t^0_{km} \wedge t, t^2_{km} \wedge t}\big)^i\\ & = \sum_m \big(R^f_{t^0_{km} \wedge t, t^2_{km} \wedge t} v_{t^0_{km} \wedge t, t^2_{km} \wedge t}\big)^i + \sum_{j,\ell=1}^d \sum_m (f^{ij})^{v,\ell}(t^0_{km} \wedge t) v^\ell_{t^0_{km} \wedge t, t^2_{km} \wedge t} v^j_{t^0_{km} \wedge t, t^2_{km} \wedge t}. \end{align*} It is easy to see that the first term on the right hand side is bounded by \[ \Big| \sum_m \big(R^f_{t^0_{km} \wedge t, t^2_{km} \wedge t} v_{t^0_{km} \wedge t, t^2_{km} \wedge t}\big)^i \Big| \lesssim 2^{-k(2\alpha+\beta-1)} \lVert f \rVert_{v,\beta}(\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2). \] For the second term, let us fix $\ell$ and $j$. Then the sum over $m$ is just the integral of $(f^{ij})^{v,\ell}$ with respect to the signed measure $\mu^k_t = \sum_{m} \delta_{t^0_{km}} v^j_{t^0_{km} \wedge t, t^2_{km} \wedge t} v^\ell_{t^0_{km} \wedge t, t^2_{km} \wedge t}$. Decomposing $\mu^k_t$ into a positive and negative part as \begin{align*} \mu^k_t & = \frac{1}{4} \Big[\sum_m \delta_{t^0_{km}} [(v^j+v^\ell)_{t^0_{km} \wedge t, t^2_{km} \wedge t}]^2 -\sum_m \delta_{t^0_{km}} [(v^j - v^\ell)_{t^0_{km} \wedge t, t^2_{km} \wedge t}]^2\Big] \end{align*} and similarly for $\mathrm{d} \mu_t = \mathrm{d} [v^j,v^\ell]_t$ we can estimate \begin{align*} &\Big| \int_0^1 (f^{ij})^{v,\ell}(s) \mu^k_t(\mathrm{d} s) - \int_0^1 (f^{ij})^{v,\ell}(s) \mu_t(\mathrm{d} s) \Big| \\ &\hspace{100pt} \lesssim \left\lVert f^v \right\rVert_\infty \left(\left\lVert [v^i+v^j]_k - [v^i + v^j]\right\rVert_\infty + \left\lVert [v^i-v^j]_k - [v^i - v^j]\right\rVert_\infty\right)\\ &\hspace{100pt} \lesssim \left\lVert f^v \right\rVert_\infty \lVert [v,v]_k - [v,v]\rVert_\infty, \end{align*} where we write $[u] := [u,u]$ and similarly for $[u]_k$. By assumption the right hand side converges to zero, from where we get the uniform convergence of $[f,g]_k$ to $[f,g]$. \end{proof} \begin{rmk} We calculate the pathwise It\^o integral $I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v)$ as limit of nonanticipating Riemann sums involving only $f$ and $v$. This is interesting for applications of mathematical finance, because the integral process has a natural interpretation as capital obtained from investing. The classical rough path integral, see Proposition~\ref{p:Gubinelli rough paths}, is obtained via ``compensated Riemann sums'' that explicitly depend on $f^v$ and $I^{\mathrm{It\hat{o}}}(v,\mathrm{d} v)$. \end{rmk} \begin{rmk} We calculate the pathwise It\^{o} integral $I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v)$ as limit of nonanticipating Riemann sums involving only $f$ and $v$. The classical rough path integral, see Proposition~\ref{p:Gubinelli rough paths}, is obtained via ``compensated Riemann sums'' that depend explicitly on the derivative $f^v$ and the iterated integrals of $v$. For applications in mathematical finance, it is more convenient to have an integral that is the limit of nonanticipating Riemann sums, because this can be interpreted as capital process obtained from investing. \end{rmk} Note that $[v,v]$ is always a continuous function of bounded variation, but a priori it is not clear whether it is in $\mathcal{C}^{2\alpha}$. Under this additional assumption we have the following stronger result. \begin{cor}\label{c:pathwise ito with smooth quadratic variation} In addition to the conditions of Theorem~\ref{t:pathwise ito integral}, assume that also $[v,v] \in \mathcal{C}^{2\alpha}$. Then $I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v) \in \mathcal{D}^\alpha_v$ with derivative $f$, and \begin{align*} \lVert I^{\mathrm{It\hat{o}}}(f,\mathrm{d} v) \rVert_{v,\alpha} \lesssim \lVert f \rVert_{v,\beta} \bigl(1 + \lVert v \rVert_\alpha^2+ \lVert L(v,v) \rVert_{2\alpha} + \lVert [v,v]\rVert_{2\alpha} \bigr). \end{align*} \end{cor} \begin{proof} This is a combination of Theorem~\ref{t:rough path integral} and the explicit representation \eqref{e:quadratic variation controlled explicit} together with the continuity of the Young integral, Theorem~\ref{t:young integral}. \end{proof} The term $I(S_{k-1}f,\mathrm{d} S_{k-1}v)$ has the pleasant property that if we want to refine our calculation by passing from $k$ to $k+1$, then we only have to add the additional term $I(S_{k-1}f, \mathrm{d} \mathcal{D}elta_k v) + I(\mathcal{D}elta_k f, \mathrm{d} S_k v)$. For the quadratic variation $[f,v]_k$ this is not exactly true. But $[f,v]_k(m2^{-k}) = [S_{k-1}f,S_{k-1}v]_k(m2^{-k})$ for $m=0,\dots, 2^k$, and there is a recursive way of calculating $[S_{k-1}f, S_{k-1}v]_k$: \begin{lem} Let $f,v \in C([0,1],\mathbb{R})$. Then \begin{align}\label{e:recursive quadratic variation} [S_k f,S_k v]_{k+1}(t) & = \frac{1}{2} [S_{k-1} f, S_{k-1}v]_k(t) + [S_{k-1} f, \mathcal{D}elta_k v]_{k+1}(t) + [\mathcal{D}elta_k f, S_k v]_{k+1}(t) + R_k(t) \end{align} for all $k\ge 1$ and all $t \in [0,1]$, where \begin{align*} R_k(t) := -\frac{1}{2} f_{\llcorner t^k \lrcorner,t} v_{\llcorner t^k \lrcorner,t} + f_{\llcorner t^k \lrcorner,\ulcorner t^{k+1}\urcorner \wedge t} v_{\llcorner t^k \lrcorner,\ulcorner t^{k+1}\urcorner \wedge t} + f_{\ulcorner t^{k+1}\urcorner \wedge t, t} v_{\ulcorner t^{k+1}\urcorner \wedge t,t} \end{align*} and $\llcorner t^k \lrcorner := \lfloor t 2^k \rfloor 2^{-k}$ and $\ulcorner t^{k}\urcorner := \llcorner t^k \lrcorner + 2^{-(k+1)}$. In particular, we obtain for $t=1$ that \begin{align}\label{e:cesaro formula quadratic variation} [f,v]_{k+1}(1) = \frac{1}{2}[f,v]_k(1) + \frac{1}{2} \sum_m f_{km} v_{km} = \frac{1}{2^{k+1}}\sum_{p\le k} \sum_m 2^{p} f_{pm} v_{pm}. \end{align} If moreover $\alpha \in (0,1)$ and $f,v \in \mathcal{C}^\alpha$, then $\lVert [S_{k-1} f, S_{k-1} g]_k - [f,g]_k \rVert_\infty \lesssim 2^{-2k\alpha} \lVert f \rVert_\alpha \lVert g \rVert_\alpha$. \end{lem} \begin{proof} Equation~\eqref{e:recursive quadratic variation} follows from a direct calculation using the fact that $S_{k-1} f$ and $S_{k-1} v$ are affine on every interval $[t^0_{k\ell},t^1_{k\ell}]$ respectively $[t^1_{k\ell},t^2_{k\ell}]$ for $1 \le \ell \le 2^k$. The formula for $[f,v]_{k+1}(1)$ follows from the that $[\mathcal{D}elta_p f, \mathcal{D}elta_q v]_{k+1}(1) = 0$ unless $p=q$, and that $[\mathcal{D}elta_k f, \mathcal{D}elta_k v]_{k+1} = 1/2 \sum_m f_{km} v_{km}$. The estimate for $\lVert [S_{k-1} f, S_{k-1} g]_k - [f,g]_k \rVert_\infty$ holds because the two functions agree in all dyadic points $m 2^{-k}$. \end{proof} \begin{rmk} The Ces\`aro mean formula \eqref{e:cesaro formula quadratic variation} makes the study of existence of the quadratic variation accessible to ergodic theory. This was previously observed by Gantert~\cite{Gantert1994}. See also Gantert's thesis~\cite{Gantert1991}, Beispiel 3.29, where it is shown that ergodicity alone (of the distribution of $v$ with respect to suitable transformations on path space) is not sufficient to obtain convergence of $([v,v]_k(1))$ as $k$ tends to $\infty$. \end{rmk} It would be more natural to assume that for the controlling path $v$ the non-anticipating Riemann sums converge, rather than assuming that $(L(S_{k}v, S_k v))_k$ and $([v,v]_k)$ converge. This is indeed sufficient, as long as a uniform H\"older estimate is satisfied by the Riemann sums. We start by showing that the existence of the It\^{o} iterated integrals implies the existence of the quadratic variation. \begin{lem}\label{l:ito implies quadratic variation} Let $\alpha \in (0,1/2)$ and let $v \in \mathcal{C}^\alpha(\mathbb{R}^d)$. Assume that the non-anticipating Riemann sums $(I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v))_k$ converge uniformly to $I^{\mathrm{It\hat{o}}}(v,\mathrm{d} v)$. Then also $([v,v]_k)_k$ converges uniformly to a limit $[v,v]$. If moreover \begin{align}\label{e:discrete hoelder} \nonumber &\sup_k \sup_{0 \le m < m' \le 2^k} \frac{|I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v)(m' 2^{-k}) - I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v)(m 2^{-k}) - v(m2^{-k}) (v(m'2^{-k}) - v(m2^{-k}))|}{|(m'-m)2^{-k}|^{2\alpha}}\\ &\hspace{20pt} = C < \infty, \end{align} then $[v,v] \in \mathcal{C}^{2\alpha}$ and $\lVert [v,v] \rVert_{2\alpha} \lesssim C + \lVert v \rVert_\alpha^2$. \end{lem} \begin{proof} Let $t \in [0,1]$ and $1 \le i,j \le d$. Then \begin{align*} & v^i(t) v^j(t) - v^i(0)v^j(0) = \sum_{m = 1}^{2^k} \left[v^i(t^2_{km}\wedge t) v^j(t^2_{km}\wedge t) - v^i(t^0_{km}\wedge t) v^j(t^0_{km}\wedge t)\right] \\ &\hspace{50pt} = \sum_{m = 1}^{2^k} \left[v^i(t^0_{km}) v^j_{t^0_{km}\wedge t, t^2_{km}\wedge t} + v^j(t^0_{km}) v^i_{t^0_{km}\wedge t, t^2_{km}\wedge t} + v^i_{t^0_{km}\wedge t, t^2_{km}\wedge t} v^j_{t^0_{km}\wedge t, t^2_{km}\wedge t}\right] \\ &\hspace{50pt} = I^{\mathrm{It\hat{o}}}_k(v^i,\mathrm{d} v^j)(t) + I^{\mathrm{It\hat{o}}}_k(v^j,\mathrm{d} v^i)(t) + [v^i,v^j]_k(t), \end{align*} which implies the convergence of $([v,v]_k)_k$ as $k$ tends to $\infty$. For $0\le s<t\le 1$ this gives \begin{align*} ([v^i,v^j]_k)_{s,t} & = \bigl(v^i v^j\bigr)_{s,t} - I^{\mathrm{It\hat{o}}}_k(v^i,\mathrm{d} v^j)_{s,t} - I^{\mathrm{It\hat{o}}}_k(v^j,\mathrm{d} v^i)_{s,t} \\ & = \left[v^i(s) v^j_{s,t} - I^{\mathrm{It\hat{o}}}_k(v^i,\mathrm{d} v^j)_{s,t}\right] + \left[v^j(s) v^i_{s,t} - I^{\mathrm{It\hat{o}}}_k(v^j,\mathrm{d} v^i)_{s,t}\right] + v^i_{s,t} v^j_{s,t}, \end{align*} At this point it is easy to estimate $\lVert [v,v]\rVert_{2\alpha}$, where we work with the classical H\"older norm and not the $\mathcal{C}^{2\alpha}$ norm. Indeed let $0 \le s < t \le 1$. Using the continuity of $[v,v]$, we can find $k$ and $s\le s_k = m_s 2^{-k}< m_t 2^{-k} = t_k \le t$ with $|[v,v]|_{s,s_k} + |[v,v]|_{t,t_k}\le \lVert v\rVert_\alpha^2 |t-s|^{2\alpha}$. Moreover, \[ |[v,v]|_{s_k,t_k} \le \Big(\sup_{\ell \ge k} \sup_{0 \le m < m' \le 2^\ell} \frac{|([v,v]_\ell)_{m2^{-\ell},m'2^{-\ell}}|}{|(m'-m)2^{-\ell}|^{2\alpha}} \Big)|t_k - s_k|^{2\alpha} \le (2C + \lVert v \rVert_{\alpha}^2) |t-s|^{2\alpha}. \] \end{proof} \begin{rmk} The ``coarse-grained H\"older condition''~\eqref{e:discrete hoelder} is from~\cite{Perkowski2013Pathwise} and has recently been discovered independently by~\cite{Kelly2014}. \end{rmk} Similarly convergence of $(I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v))$ implies convergence of $(L(S_k v, S_k v))_k$: \begin{lem}\label{l:ito implies stratonovich} In the setting of Lemma~\ref{l:ito implies quadratic variation}, assume that~\eqref{e:discrete hoelder} holds. Then $L(S_k v, S_k v)$ converges uniformly as $k$ tends to $\infty$, and \begin{align*} \sup_k \lVert L(S_k v, S_k v)\rVert_{2\alpha} \lesssim C + \lVert v \rVert_\alpha^2. \end{align*} \end{lem} \begin{proof} Let $k \in \mathbb{N}$ and $0 \le m \le 2^k$, and write $t = m 2^{-k}$. Then we obtain from \eqref{e:ito via piecewise linear} that \begin{align}\label{e:ito implies stratonovich pr1} &L(S_{k-1} v, S_{k-1} v)(t) \\ \nonumber &\hspace{40pt} = I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v)(t) + \frac{1}{2} [v,v]_k(t) - \pi_<(S_{k-1}v, S_{k-1}v)(t) - S(S_{k-1}v, S_{k-1}v)(t). \end{align} Let now $s,t \in [0,1]$. We first assume that there exists $m$ such that $t^0_{km} \le s < t \le t^2_{km}$. Then we use $\lVert \partial_t \mathcal{D}elta_q v \rVert_\infty \lesssim 2^{q(1-\alpha)} \lVert v \rVert_\alpha$ to obtain \begin{align}\label{e:ito implies stratonovich pr2} &|L(S_{k-1}v, S_{k-1}v)_{s,t}| \le \sum_{p<k}\sum_{q<p} \left| \int_s^t \mathcal{D}elta_p v(r) \mathrm{d} \mathcal{D}elta_q v(r) - \int_s^t \mathrm{d} \mathcal{D}elta_q v(r) \mathcal{D}elta_p v(r)\right| \\ \nonumber &\hspace{40pt} \lesssim \sum_{p<k} \sum_{q<p} |t-s| 2^{-p\alpha} 2^{q(1-\alpha)} \lVert v \rVert_\alpha^2 \lesssim |t-s| 2^{-k(2\alpha-1)} \lVert v \rVert_\alpha^2 \le |t-s|^{2\alpha} \lVert v \rVert_\alpha^2. \end{align} Combining \eqref{e:ito implies stratonovich pr1} and \eqref{e:ito implies stratonovich pr2}, we obtain the uniform convergence of $(L(S_{k-1} v,S_{k-1} v))$ from Lemma~\ref{l:ito implies quadratic variation} and from the continuity of $\pi_<$ and $S$. For $s$ and $t$ that do not lie in the same dyadic interval of generation $k$, let $\ulcorner s^k\urcorner = m_s 2^{-k}$ and $\llcorner t^k\lrcorner = m_t 2^{-k}$ be such that $\ulcorner s^k\urcorner - 2^{-k} < s \le \ulcorner s^k\urcorner$ and $\llcorner t^k\lrcorner \le t < \llcorner t^k\lrcorner + 2^{-k}$. In particular, $\ulcorner s^k\urcorner\le \llcorner t^k\lrcorner$. Moreover \begin{align*} |L(S_{k-1}v, S_{k-1}v)_{s,t}| &\le |L(S_{k-1}v, S_{k-1}v)_{s,\ulcorner s^k\urcorner}| + |L(S_{k-1}v, S_{k-1}v)_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner }| \\ &\hspace{20pt} + |L(S_{k-1}v, S_{k-1}v)_{\llcorner t^k \lrcorner,t}|. \end{align*} Using~\eqref{e:ito implies stratonovich pr2}, the first and third term on the right hand side can be estimated by $(|\ulcorner s^k\urcorner -s|^{2\alpha} + |t-\llcorner t^k \lrcorner|^{2\alpha})\lVert v \rVert_\alpha^2 \lesssim |t-s|^{2\alpha} \lVert v \rVert_\alpha^2$. For the middle term we apply \eqref{e:ito implies stratonovich pr1} to obtain \begin{align*} |L(S_{k-1}v, S_{k-1}v)_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner }| & \le \left|I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v)_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner } - v(\ulcorner s^k\urcorner)(v(\llcorner t^k\lrcorner) - v(\ulcorner s^k\urcorner))\right| \\ &\hspace{20pt} + \left|v(\ulcorner s^k\urcorner)v_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner } - \pi_<(S_{k-1}v,S_{k-1}v)_{\ulcorner s^k\urcorner, \llcorner t^k\lrcorner}\right| \\ &\hspace{20pt} + \frac{1}{2} \left|([v,v]_k)_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner }\right| + \left| S(S_{k-1}v, S_{k-1}v)_{\ulcorner s^k\urcorner,\llcorner t^k\lrcorner }\right| \\ & \lesssim |\llcorner t^k\lrcorner - \ulcorner s^k\urcorner|^{2\alpha}\left( C + \lVert v \rVert_\alpha^2\right) \le |t-s|^{2\alpha} \left(C + \lVert v \rVert_\alpha^2\right), \end{align*} where Example~\ref{ex:controlled old vs new}, Lemma~\ref{l:ito implies quadratic variation}, and Lemma~\ref{l:symmetric part} have been used. \end{proof} It follows from the work of F\"ollmer that our pathwise It\^{o} integral satisfies It\^{o}'s formula: \begin{cor} Let $\alpha \in (1/3, 1/2)$ and $v\in \mathcal{C}^\alpha(\mathbb{R}^d)$. Assume that the non-anticipating Riemann sums $(I^{\mathrm{It\hat{o}}}_k(v,\mathrm{d} v))_k$ converge uniformly to $I^{\mathrm{It\hat{o}}}(v,\mathrm{d} v)$ and let $F \in C^2(\mathbb{R}^d,\mathbb{R})$. Then $(I^{\mathrm{It\hat{o}}}_k(\mathrm{D} F(v), \mathrm{d} v))_k$ converges to a limit $I^{\mathrm{It\hat{o}}}(\mathrm{D} F(v), \mathrm{d} v)$ that satisfies for all $t \in [0,1]$ \begin{align*} F(v(t)) - F(v(0)) = I^{\mathrm{It\hat{o}}}(\mathrm{D} F(v), \mathrm{d} v)(t) + \int_0^t \sum_{k,\ell=1}^d \partial_{x_k} \partial_{x_\ell} F(v(s)) \mathrm{d} [v^k, v^\ell](s). \end{align*} \end{cor} \begin{proof} This is Remarque 1 of F\"ollmer~\cite{Follmer1979} in combination with Lemma~\ref{l:ito implies quadratic variation}. \end{proof} \section{Construction of the L\'evy area}\label{s:construction of levy area} To apply our theory, it remains to construct the L\'evy area respectively the pathwise It\^{o} integrals for suitable stochastic processes. In Section~\ref{ss:hypercontractive area} we construct the L\'evy area for hypercontractive stochastic processes whose covariance function satisfies a certain ``finite variation'' property. In Section~\ref{ss:pathwise ito area for martingales} we construct the pathwise It\^{o} iterated integrals for some continuous martingales. \subsection{Hypercontractive processes}\label{ss:hypercontractive area} Let $X\colon [0,1] \to \mathbb{R}^d$ be a centered continuous stochastic process, such that $X^i$ is independent of $X^j$ for $i \neq j$. We write $R$ for its covariance function, $R\colon [0,1]^2 \to \mathbb{R}^{d\times d}$ and $R(s,t) := (E(X^i_s X^j_t))_{1 \le i,j\le d}$. The increment of $R$ over a rectangle $[s,t] \times [u,v] \subseteq [0,1]^2$ is defined as \begin{align*} R_{[s,t] \times [u,v]} := R(t,v) + R(s,u) - R(s,v) - R(t,u) := (E(X^i_{s,t} X^j_{u,v}))_{1 \le i, j \le d}. \end{align*} Let us make the following two assumptions. \begin{itemize} \item[($\rho$--var)] There exists $C > 0$ such that for all $0 \le s < t \le 1$ and for every partition $s = t_0 < t_1 < \dots < t_n = t$ of $[s,t]$ we have \begin{align*} \sum_{i,j=1}^n | R_{[t_{i-1},t_i] \times [t_{j-1},t_j]}|^\rho \le C |t-s|. \end{align*} \item[(HC)] The process $X$ is hypercontractive, i.e. for every $m,n \in \mathbb{N}$ and every $r \ge 1$ there exists $C_{r,m,n} > 0$ such that for every polynomial $P: \mathbb{R}^n \rightarrow \mathbb{R}$ of degree $m$, for all $i_1, \dots, i_n \in \{1, \dots, d\}$, and for all $t_1, \dots, t_n \in [0,1]$ \begin{align*} E(|P(X^{i_1}_{t_1}, \dots, X^{i_n}_{t_n})|^{2r}) \le C_{r,m,n} E(|P(X^{i_1}_{t_1}, \dots, X^{i_n}_{t_n})|^{2})^r. \end{align*} \end{itemize} These conditions are taken from~\cite{Friz2010c}, where under even more general assumptions it is shown that it is possible to construct the iterated integrals $I(X, \mathrm{d} X)$, and that $I(X,\mathrm{d} X)$ is the limit of $(I(X^n, \mathrm{d} X^n))_{n \in \mathbb{N}}$ under a wide range of smooth approximations $(X^n)_n$ that converge to $X$. \begin{ex} Condition (HC) is satisfied by all Gaussian processes. More generally, it is satisfied by every process ``living in a fixed Gaussian chaos''; see~\cite{Janson1997}, Theorem~3.50. Slightly oversimplifying things, this is the case if $X$ is given by polynomials of fixed degree and iterated integrals of fixed order with respect to a Gaussian reference process. Prototypical examples of processes living in a fixed chaos are Hermite processes. They are defined for $H \in (1/2,1)$ and $k\in \mathbb{N}$, $k \ge 1$ as \begin{align*} Z^{k,H}_t = C(H,k) \int_{\mathbb{R}^k} \left(\int_0^t \prod_{i=1}^k (s - y_i)^{-\left(\frac{1}{2} + \frac{1-H}{k}\right)}_+\mathrm{d} s\right) \mathrm{d} B_{y_1} \dots \mathrm{d} B_{y_k}, \end{align*} where $(B_y)_{y \in \mathbb{R}}$ is a standard Brownian motion, and $C(H,k)$ is a normalization constant. In particular, $Z^{k,H}$ lives in the Wiener chaos of order $k$. The covariance of $Z^{k,H}$ is \begin{align*} E(Z^{k,H}_s Z^{k,H}_t) = \frac{1}{2} \left( t^{2H} + s^{2H} + |t-s|^{2H}\right) \end{align*} Since $Z^{1,H}$ is Gaussian, it is just the fractional Brownian motion with Hurst parameter $H$. For $k=2$ we obtain the Rosenblatt process. For further details about Hermite processes see~\cite{Peccati2011}. However, we should point out that it follows from Kolmogorov's continuity criterion that $Z^{k,H}$ is $\alpha$--H\"older continuous for every $\alpha < H$. Since $H \in (1/2,1)$, Hermite processes are amenable to Young integration, and it is trivial to construct $L(Z^{k,H}, Z^{k,H})$. \end{ex} \begin{ex} Condition ($\rho$--var) is satisfied by Brownian motion with $\rho = 1$. More generally it is satisfied by the fractional Brownian motion with Hurst index $H$, for which $\rho = 1/(2H)$. It is also satisfied by the fractional Brownian bridge with Hurst index $H$. A general criterion that implies condition ($\rho$--var) is the one of Coutin and Qian~\cite{Coutin2002}: If $E(|X^i_{s,t}|^2) \lesssim |t-s|^{2H}$ and $|E(X^i_{s,s+h} X^i_{t,t+h})| \lesssim |t-s|^{2H-2} h^2$ for $i = 1, \dots, d$, then ($\rho$--var) is satisfied for $\rho = 1/(2H)$. For details and further examples see~\cite{Friz2010}, Section 15.2. \end{ex} \begin{lem}\label{l:rho-var to dyadic generation} Assume that the stochastic process $X:[0,1]\rightarrow \mathbb{R}$ satisfies ($\rho$--var). Then we have for all $p \ge -1$ and for all $M,N \in\mathbb{N}$ with $M \le N \le 2^{p}$ that \begin{align}\label{e:rho-var to dyadic generation} \sum_{m_1,m_2=M}^N |E(X_{pm_1} X_{pm_2})|^\rho \lesssim (N-M+1)2^{-p}. \end{align} \end{lem} \begin{proof} The case $p\le 0$ is easy so let $p \ge 1$. It suffices to note that \begin{align*} E(X_{pm_1} X_{pm_2}) & = E\left((X_{t^0_{pm_1},t^1_{pm_1}} - X_{t^1_{pm_1},t^2_{pm_1}})(X_{t^0_{pm_2},t^1_{pm_2}} - X_{t^1_{pm_2},t^2_{pm_2}})\right) \\ & = \sum_{i_1, i_2 = 0,1} (-1)^{i_1 + i_2} R_{[t^{i_1}_{pm_1},t^{i_1+1}_{pm_1}]\times [t^{i_2}_{pm_2},t^{i_2+1}_{pm_2}]}, \end{align*} and that $\{t^i_{pm}: i=0,1,2, m = M, \dots, N\}$ partitions the interval $[(M-1) 2^{-p}, N 2^{-p}]$. \end{proof} \begin{lem}\label{l:generation moment estimate} Let $X,Y: [0,1] \rightarrow \mathbb{R}$ be independent, centered, continuous processes, both satisfying ($\rho$--var) for some $\rho \in [1,2]$. Then for all $i, p \ge -1$, $q<p$, and $0 \le j \le 2^i$ \begin{align*} E\Big[\Big|\sum_{m\le 2^p} \sum_{n\le 2^q} X_{pm} Y_{qn} \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm} \chi_{qn}\rangle\Big|^2\Big] \lesssim 2^{(p \vee i)(1/\rho - 4)} 2^{(q \vee i)(1-1/\rho)} 2^{-i} 2^{p(4-3/\rho)} 2^{q/\rho}. \end{align*} \end{lem} \begin{proof} Since $p > q$, for every $m$ there exists exactly one $n(m)$, such that $\mathrm{var}phi_{pm}\chi_{qn(m)}$ is not identically zero. Hence, we can apply the independence of $X$ and $Y$ to obtain \begin{align*} &E\Bigl[\Bigl|\sum_{m\le 2^p} \sum_{n\le 2^q} X_{pm} Y_{qn} \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm} \chi_{qn}\rangle\Bigr|^2\Bigr] \\ &\hspace{20pt} \le \sum_{m_1,m_2=0}^{2^p} \bigl|E(X_{pm_1}X_{pm_2})E(Y_{qn(m_1)}Y_{qn(m_2)}) \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm_1}\chi_{qn(m_1)}\rangle \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm_2}\chi_{qn(m_2)}\rangle\bigr|. \end{align*} Let us write $M_j := \{m: 0 \le m \le 2^p, \langle \chi_{ij}, \mathrm{var}phi_{pm}\chi_{qn(m)}\rangle\neq 0\}$. We also write $\rho'$ for the conjugate exponent of $\rho$, i.e. $1/\rho + 1/\rho' = 1$. H\"older's inequality and Lemma~\ref{l:schauder coefficients of iterated integrals} imply \begin{align*} &\sum_{m_1,m_2 \in M_j} \bigl|E(X_{pm_1}X_{pm_2})E(Y_{qn(m_1)}Y_{qn(m_2)}) \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm_1}\chi_{qn(m_1)}\rangle \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm_2}\chi_{qn(m_2)}\rangle\bigr| \\ &\hspace{20pt} \lesssim \Biggl(\sum_{m_1,m_2 \in M_j} \bigl|E(X_{pm_1}X_{pm_2})\bigr|^\rho\Biggr)^{1/\rho} \Biggl(\sum_{m_1,m_2 \in M_j}\bigl|E(Y_{qn(m_1)}Y_{qn(m_2)})\bigr|^{\rho'} \Biggr)^{1/\rho'} (2^{-2 (p \vee i) + p + q})^2. \end{align*} Now write $N_j$ for the set of $n$ for which $\chi_{ij} \chi_{qn}$ is not identically zero. For every $\bar{n} \in N_j$ there are $2^{p-q}$ numbers $m \in M_j$ with $n(m) = \bar{n}$. Hence \begin{align*} &\Bigl(\sum_{m_1,m_2 \in M_j}\bigl|E(Y_{qn(m_1)}Y_{qn(m_2)})\bigr|^{\rho'} \Bigr)^{1/\rho'} \\ &\hspace{60pt} \lesssim (2^{2(p-q)})^{1/\rho'} \bigg(\Big(\max_{n_1, n_2 \in N_j} \bigl|E(Y_{qn_1}Y_{qn_2})\bigr|\Big)^{\rho'-\rho} \sum_{n_1,n_2 \in N_j}\bigl|E(Y_{qn_1}Y_{qn_2})\bigr|^{\rho} \bigg)^{1/\rho'}, \end{align*} where we used that $\rho \in [1,2]$ and therefore $\rho' - \rho \ge 0$ (for $\rho'=\infty$ we interpret the right hand side as $\max_{n_1, n_2 \in N_j} |E(Y_{qn_1}Y_{qn_2})|$). Lemma~\ref{l:rho-var to dyadic generation} implies that $\bigl(\bigl|E(Y_{qn_1}Y_{qn_2})\bigr|^{\rho'-\rho}\bigr)^{1/\rho'} \lesssim 2^{-q(1/\rho - 1/\rho')}$. Similarly we apply Lemma~\ref{l:rho-var to dyadic generation} to the sum over $n_1, n_2$, and we obtain \begin{align*} & (2^{2(p-q)})^{1/\rho'} \biggl(\Big(\max_{n_1, n_2 \in N_j} \bigl|E(Y_{qn_1}Y_{qn_2})\bigr|\Big)^{\rho'-\rho} \sum_{n_1,n_2 \in N_j}\bigl|E(Y_{qn_1}Y_{qn_2})\bigr|^{\rho} \biggr)^{1/\rho'}\\ &\hspace{60pt} \lesssim (2^{2(p-q)})^{1/\rho'} 2^{-q(1/\rho - 1/\rho')} (|N_j| 2^{-q})^{1/\rho'} = 2^{(q \vee i)/ \rho'} 2^{-i/\rho'} 2^{2p/\rho'} 2^{q(-2/\rho'-1/\rho)} \\ &\hspace{60pt} = 2^{(q \vee i)(1-1/\rho)} 2^{i(1/\rho-1)} 2^{2p(1-1/\rho)} 2^{q(1/\rho-2)}, \end{align*} where we used that $|N_j| = 2^{(q \vee i) - i}$. Since $|M_j| = 2^{(p \vee i) - i}$, another application of Lemma~\ref{l:rho-var to dyadic generation} yields \begin{align*} \Bigl(\sum_{m_1,m_2 \in M_j} \bigl|E(X_{pm_1}X_{pm_2})\bigr|^\rho\Bigr)^{1/\rho} \lesssim 2^{(p \vee i) / \rho} 2^{-i / \rho} 2^{-p/\rho}. \end{align*} The result now follows by combining these estimates: \begin{align*} &E\Bigl[\Bigl|\sum_{m\le 2^p} \sum_{n\le 2^q} X_{pm} Y_{qn} \langle 2^{-i}\chi_{ij}, \mathrm{var}phi_{pm} \chi_{qn}\rangle\Bigr|^2\Bigr]\\ &\hspace{25pt} \lesssim \Bigl(\sum_{m_1,m_2 \in M_j} \bigl|E(X_{pm_1}X_{pm_2})\bigr|^\rho\Bigr)^{1/\rho} \Bigl(\sum_{m_1,m_2 \in M_j}\bigl|E(Y_{qn(m_1)}Y_{qn(m_2)})\bigr|^{\rho'} \Bigr)^{1/\rho'} (2^{-2 (p \vee i) + p + q})^2\\ &\hspace{25pt} \lesssim \big(2^{(p \vee i) / \rho} 2^{-i / \rho} 2^{-p/\rho}\big) \big(2^{(q \vee i)(1-1/\rho)} 2^{i(1/\rho-1)} 2^{2p(1-1/\rho)} 2^{q(1/\rho-2)}\big)\big(2^{-4 (p \vee i) + 2p + 2q} \big)\\ &\hspace{25pt} = 2^{(p \vee i)(1/\rho - 4)} 2^{(q \vee i)(1-1/\rho)} 2^{-i} 2^{p(4-3/\rho)} 2^{q/\rho}. \end{align*} \end{proof} \begin{thm}\label{t:existence of levy area} Let $X\colon [0,1] \to \mathbb{R}^d$ be a continuous, centered stochastic process with independent components, and assume that $X$ satisfies (HC) and ($\rho$--var) for some $\rho \in [1,2)$. Then for every $\alpha \in (0,1/\rho)$ almost surely \begin{align*} \sum_{N \ge 0} \left\lVert L(S_N X, S_N X) - L(S_{N-1} X, S_{N-1} X) \right\rVert_\alpha < \infty, \end{align*} and therefore $L(X,X) = \lim_{N \rightarrow \infty} L(S_N X,S_N X)$ is almost surely $\alpha$--H\"older continuous. \end{thm} \begin{proof} First note that $L$ is antisymmetric, and in particular the diagonal of the matrix $L(S_N X, S_N X)$ is constantly zero. For $k, \ell \in \{1, \dots, d\}$ with $k \neq \ell$ we have \begin{align*} & \lVert L(S_N X^k, S_N X^\ell) - L(S_{N-1} X^k, S_{N-1} X^\ell)\rVert_\alpha \\ &\hspace{20pt} = \Bigl\lVert\sum_{q<N} \sum_{m,n} (X^k_{Nm} X^\ell_{qn} - X^k_{qn}X^\ell_{Nm}) \int_0^\cdot \mathrm{var}phi_{N m}(s) \mathrm{d} \mathrm{var}phi_{qn}(s)\Bigr\rVert_\alpha \\ &\hspace{20pt} \le \sum_{q<N} \Bigl\lVert \sum_{m,n} X^k_{Nm} X^\ell_{qn} \int_0^\cdot \mathrm{var}phi_{N m}(s) \mathrm{d} \mathrm{var}phi_{qn}(s)\Bigr\rVert_\alpha + \sum_{q<N} \Bigl\lVert \sum_{m,n} X^\ell_{Nm} X^k_{qn} \int_0^\cdot \mathrm{var}phi_{N m}(s) \mathrm{d} \mathrm{var}phi_{qn}(s)\Bigr\rVert_\alpha \end{align*} Let us argue for the first term on the right hand side, the arguments for the second one being identical. Let $r \ge 1$. Using the hypercontractivity condition (HC), we obtain \begin{align*} &\sum_{i,N} \sum_{j\le2^i} \sum_{q<N} P\Bigl( \Bigl|\sum_{m,n} X^\ell_{Nm} X^k_{qn} \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{N m} \chi_{qn}\rangle\Bigr| > 2^{-i\alpha} 2^{-N/(2r)} 2^{-q/(2r)} \Bigr) \\ &\hspace{100pt} \le \sum_{i,N} \sum_{j\le 2^i} \sum_{q<N} E\Bigl( \Bigl|\sum_{m,n} X^\ell_{Nm} X^k_{qn} \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{N m} \chi_{qn}\rangle\Bigr|^{2r}\Bigr) 2^{ i\alpha 2 r} 2^{N + q}\\ &\hspace{100pt} \lesssim \sum_{i,N} \sum_{j\le2^i} \sum_{q<N} E\Bigl( \Bigl|\sum_{m,n} X^\ell_{Nm} X^k_{qn} \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{N m} \chi_{qn}\rangle\Bigr|^{2}\Bigr)^r 2^{ i\alpha 2 r} 2^{N + q}. \end{align*} Now we can apply Lemma~\ref{l:generation moment estimate} to bound this expression by \begin{align*} &\sum_{i,N} \sum_{j\le2^i} \sum_{q<N} \bigl( 2^{(N \vee i)(1/\rho - 4)} 2^{(q \vee i)(1-1/\rho)} 2^{-i} 2^{N(4-3/\rho)} 2^{q/\rho}\bigr)^r 2^{ i\alpha 2 r} 2^{N + q}\\ &\hspace{60pt} \lesssim \sum_{i} 2^i \sum_{N\le i} \sum_{q<N} 2^{ir(2\alpha - 4)} 2^{Nr(4-3/\rho + 1/r)} 2^{qr(1/\rho+1/r)} \\ &\hspace{80pt} + \sum_{i} 2^i \sum_{N > i} \sum_{q\le i} 2^{ir(2\alpha - 1/\rho)} 2^{Nr(1/r - 2/\rho)} 2^{qr(1/\rho+1/r)} \\ &\hspace{80pt} + \sum_{i} 2^i \sum_{N > i} \sum_{i<q<N} 2^{ir(2\alpha - 1)} 2^{Nr(1/r - 2/\rho)} 2^{qr(1+1/r)} \\ &\hspace{60pt} \lesssim \sum_{i} 2^{ir(2\alpha + 3/r - 2/\rho)} + \sum_{i} \sum_{N > i} 2^{ir(2\alpha + 2/r)} 2^{Nr(1/r - 2/\rho)}\\ &\hspace{80pt} + \sum_{i} \sum_{N > i} 2^{ir(2\alpha +1/r - 1)} 2^{Nr(1 + 2/r - 2/\rho)}. \end{align*} For $r \ge 1$ we have $1/r - 2/\rho < 0$, because $\rho < 2$. Therefore, the sum over $N$ in the second term on the right hand side converges. If now we choose $r > 1$ large enough so that $1 + 3/r - 2/\rho < 0$ (and then also $2\alpha + 3/r - 2/\rho < 0$), then all three series on the right hand side are finite. Hence, Borel-Cantelli implies the existence of $C(\omega) > 0$, such that for almost all $\omega \in \Omega$ and for all $N, i, j$ and $q<N$ \begin{align*} \Bigl|\sum_{m,n} X^\ell_{Nm}(\omega) X^k_{qn}(\omega) \langle 2^{-i} \chi_{ij}, \mathrm{var}phi_{N m} \chi_{qn}\rangle\Bigr| \le C(\omega) 2^{-i\alpha} 2^{-N/(2r)} 2^{-q/(2r)}. \end{align*} From here it is straightforward to see that for these $\omega$ we have \begin{align*} \sum_{N=0}^\infty \left\lVert L(S_N X(\omega), S_N X(\omega)) - L(S_{N-1} X(\omega), S_{N-1} X(\omega)) \right\rVert_\alpha < \infty. \end{align*} \end{proof} \subsection{Continuous martingales}\label{ss:pathwise ito area for martingales} Here we assume that $(X_t)_{t \in [0,1]}$ is a $d$--dimensional continuous martingale. Of course in that case it is no problem to construct the It\^{o} integral $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$. But to apply the results of Section~\ref{s:pathwise ito}, we still need the pathwise convergence of $I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)$ to $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$ and the uniform H\"older continuity of $I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)$ along the dyadics. Recall that for a $d$--dimensional semimartingale $X=(X^1, \dots, X^d)$, the quadratic variation is defined as $[ X] = ([ X^i, X^j])_{1 \le i,j \le d}$. We also write $X_s X_{s,t} := (X^i_s X^j_{s,t})_{1 \le i, j \le d}$ for $s,t \in [0,1]$. \begin{thm}\label{t:continuous martingale iterated integrals} Let $X=(X^1,\dots,X^d)$ be a $d$--dimensional continuous martingale. Assume that there exist $p \ge 2$ and $\beta > 0$, such that $p \beta > 7/2$, and such that \begin{align}\label{e:martingale area assumption} E(|[ X ]_{s,t}|^p) \lesssim |t-s|^{2p\beta} \end{align} for all $s,t \in [0,1]$. Then $I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)$ almost surely converges uniformly to $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$. Furthermore, for all $\alpha \in (0, \beta - 1/p)$ we have $X \in \mathcal{C}^\alpha$ and almost surely \begin{align}\label{e:uniform hoelder along dyadics for martingale} \sup_k \sup_{0 \le \ell < \ell' \le 2^k} \frac{|I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)_{\ell 2^{-k}, \ell' 2^{-k}} - X_{\ell2^{-k}} X_{\ell2^{-k}, \ell' 2^{-k}}|}{|(\ell'-\ell)2^{-k}|^{2\alpha}} < \infty. \end{align} \end{thm} \begin{proof} The H\"older continuity of $X$ follows from Kolmogorov's continuity criterion. Indeed, applying the Burkholder-Davis-Gundy inequality and \eqref{e:martingale area assumption} we have \begin{align*} E(|X_{s,t}|^{2p}) \lesssim \sum_{i=1}^d E(|X^i_{s,t}|^{2p}) \lesssim \sum_{i=1}^d E(|[ X^i]_{s,t}|^p) \lesssim E(|[ X ]_{s,t}|^p) \lesssim |t-s|^{2p\beta}, \end{align*} so that $X \in \mathcal{C}^{\alpha}$ for all $\alpha \in (0, \beta - 1/(2p))$ and in particular for all $\alpha \in (0, \beta - 1/p)$. Since we will need it below, let us also study the regularity of the It\^{o} integral $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$: A similar application of Burkholder-Davis-Gundy gives \begin{align*} E(|I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{s,t} - X_s X_{s,t}|^p)\lesssim E\Bigl( \Bigl| \int_s^t |X_r - X_s|^2 \mathrm{d}|[ X ]|_s \Bigr|^{\frac{p}{2}} \Bigr). \end{align*} We apply H\"older's inequality (here we need $p \ge 2$) to obtain \begin{equation*} E\Bigl( \Bigl| \int_s^t |X_r - X_s|^2 \mathrm{d}|[ X ]|_s \Bigr|^{\frac{p}{2}} \Bigr) \lesssim E\Bigl( |[ X ]|_{s,t}^{\frac{p}{2} - 1} \int_s^t |X_r - X_s|^p \mathrm{d}|[ X ]|_s \Bigr). \end{equation*} Now the inequalities by Cauchy-Schwarz and then by Burkholder-Davis-Gundy yield \begin{align*} E\Bigl( |[ X ]|_{s,t}^{\frac{p}{2} - 1} \int_s^t |X_r - X_s|^p \mathrm{d}|[ X ]|_s \Bigr) & \lesssim E\Bigl( |[ X ]|_{s,t}^{\frac{p}{2}} \sup_{r \in [s,t]} |X_r - X_s|^p \Bigr) \\ &\le \sqrt{E\Bigl(\sup_{r \in [s,t]}|X_r - X_s|^{2p}\Bigr)} \sqrt{E(|[ X ]|_{s,t}^p)}\\ &\lesssim E(|[ X ]_{s,t}|^p) \lesssim |t-s|^{2p\beta}. \end{align*} The Kolmogorov criterion for rough paths, Theorem 3.1 of~\cite{Friz2013}, now implies that \begin{align}\label{e:continuous martingale pr1} |I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{s,t} - X_s X_{s,t}| \lesssim |t-s|^{2 \alpha} \end{align} almost surely for all $\alpha \in (0, \beta - 1/p)$. Let us get to the convergence of $I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)$. As before, we have \begin{align*} & E(|I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}} - I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}}|^p) \\ &\hspace{60pt} = E\Bigl( \Bigl| \int_{\ell 2^{-k}}^{\ell' 2^{-k}} \sum_{m = \ell}^{\ell'-1} \mathbf{1}_{[m2^{-k}, (m+1)2^{-k})}(r) X_{m 2^{-k},r} \mathrm{d} X_s \Bigr|^p \Bigr) \\ &\hspace{60pt} \lesssim E\Bigl( |[ X]|_{\ell 2^{-k}, \ell' 2^{-k}}^{\frac{p}{2}-1} \int_{\ell 2^{-k}}^{\ell' 2^{-k}} \Bigl|\sum_{m = \ell}^{\ell'-1} \mathbf{1}_{[m2^{-k}, (m+1)2^{-k})}(r) |X_{m 2^{-k},r}|^2 \Bigr|^{\frac{p}{2}} \mathrm{d}|[ X ]|_s \Bigr). \end{align*} Since the terms in the sum all have disjoint support, we can pull the exponent $p/2$ into the sum, from where we conclude that \begin{align*} &E\Bigl( |[ X]|_{\ell 2^{-k}, \ell' 2^{-k}}^{\frac{p}{2}-1} \int_{\ell 2^{-k}}^{\ell' 2^{-k}} \sum_{m = \ell}^{\ell'-1} \mathbf{1}_{[m2^{-k}, (m+1)2^{-k})}(r) |X_{m 2^{-k},r}|^p \mathrm{d}|[ X ]|_s \Bigr)\\ &\hspace{70pt} \lesssim \sqrt{E\Bigl(\sup_{r \in [s,t]} \Bigl| \sum_{m = \ell}^{\ell'-1} \mathbf{1}_{[m2^{-k}, (m+1)2^{-k})}(r) |X_{m 2^{-k},r}|^p \Bigr|^2 \Bigr)} \sqrt{E( |[ X]|_{\ell 2^{-k}, \ell' 2^{-k}}^p)}\\ &\hspace{70pt} \lesssim \sqrt{\sum_{m=\ell}^{\ell'-1} E( |[ X]_{m 2^{-k},(m+1)2^{-k}}|^p)} \sqrt{E( |[ X]_{\ell 2^{-k}, \ell' 2^{-k}}|^p)}\\ &\hspace{70pt} \lesssim \sqrt{(\ell'-\ell) (2^{-k})^{2p\beta}} \sqrt{|(\ell'-\ell) 2^{-k}|^{2p\beta}} = (\ell'-\ell)^{\frac{1}{2} + p\beta} 2^{-k 2 p \beta}. \end{align*} Hence, we obtain for $\alpha \in \mathbb{R}$ that \begin{align*} &P\left(|I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}} - I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}}| > |(\ell'-\ell)2^{-k}|^{2\alpha}\right) \\ &\hspace{160pt} \lesssim \frac{(\ell'-\ell)^{\frac{1}{2} + p\beta} 2^{-k 2 p \beta}}{(\ell'-\ell)^{2p\alpha} 2^{-k 2 p \alpha}} = (\ell'-\ell)^{\frac{1}{2} + p\beta - 2p\alpha} 2^{-k2p (\beta - \alpha)}. \end{align*} If we set $\alpha = \beta - 1/(2p) - \mathrm{var}epsilon$, then $1/2 + p \beta - 2p\alpha = 3/2 - p \beta + 2p\mathrm{var}epsilon$. Now by assumption $p \beta > 7/2$ and therefore we can find $\alpha \in (0, \beta - 1/(2p))$ such that \begin{align}\label{e:continuous martingale pr2} 1/2 + p \beta - 2p\alpha < -2. \end{align} Estimating the double sum by a double integral, we easily see that for all $\gamma<-2$ \begin{align*} \sum_{\ell=1}^{2^k} \sum_{\ell'=\ell+1}^{2^k} (\ell'-\ell)^{\gamma} \lesssim 2^k. \end{align*} Therefore, we have for $\alpha \in (0, \beta - 1/(2p))$ satisfying \eqref{e:continuous martingale pr2} \begin{align*} &\sum_{\ell=1}^{2^k} \sum_{\ell'=\ell+1}^{2^k} P\left(|I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}} - I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)_{\ell2^{-k}, \ell'2^{-k}}| > |(\ell'-\ell)2^{-k}|^{2\alpha}\right) \\ &\hspace{50pt} \lesssim 2^k 2^{-k2p (\beta - \alpha)}. \end{align*} Since $\alpha < \beta - 1/(2p)$, this is summable in $k$, and therefore Borel-Cantelli implies that \begin{align}\label{e:continuous martingale pr3} \sup_k \sup_{0 \le \ell < \ell' \le 2^k} \frac{|I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)_{\ell 2^{-k}, \ell' 2^{-k}} - I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)_{\ell 2^{-k}, \ell' 2^{-k}} |}{|(\ell'-\ell)2^{-k}|^{2\alpha}} < \infty \end{align} almost surely. We only proved this for $\alpha$ close enough to $\beta - 1/(2p)$, but of course then it also holds for all $\alpha'\le\alpha$. The estimate \eqref{e:uniform hoelder along dyadics for martingale} now follows by combining \eqref{e:continuous martingale pr1} and \eqref{e:continuous martingale pr3}. The uniform convergence of $I^{\mathrm{It\hat{o}}}_k(X,\mathrm{d} X)$ to $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$ follows from \eqref{e:continuous martingale pr3} in combination with the H\"older continuity of $X$. \end{proof} \begin{ex} The conditions of Theorem~\ref{t:continuous martingale iterated integrals} are satisfied by all It\^{o} martingales of the form $X_t = X_0 + \int_0^t \sigma_s \mathrm{d} W_s$, as long as $\sigma$ satisfies $E(\sup_{s \in [0,1]} |\sigma_s|^{2p}) < \infty$ for some $p > 7$. In that case we can take $\beta = 1/2$ so that in particular $\beta - 1/p > 1/3$, which means that $X$ and $I^{\mathrm{It\hat{o}}}(X,\mathrm{d} X)$ are sufficiently regular to apply the results of Section~\ref{s:pathwise ito}. \end{ex} \section{Pathwise stochastic differential equations}\label{s:sde} We are now ready to solve SDEs of the form \begin{equation}\label{e:sde} \mathrm{d} y(t) = b(y(t)) \mathrm{d} t + \sigma(y(t)) \mathrm{d} v(t), \qquad y(0) = y_0, \end{equation} pathwise, where the ``stochastic'' integral $\mathrm{d} v$ will be interpreted as $I(\sigma(y), \mathrm{d} v)$ or $I^{\mathrm{It\hat{o}}}(\sigma(y), \mathrm{d} v)$. Assume for example that $(v, L(v,v)) \in \mathcal{C}^\alpha \times \mathcal{C}^{2\alpha}$ for some $\alpha \in (1/3,1/2)$ are given, and that $b$ is Lipschitz continuous whereas $\sigma \in C^{1+\mathrm{var}epsilon}_b$ for some $\mathrm{var}epsilon$ with $2(\alpha+\mathrm{var}epsilon)>1$. Then Corollary~\ref{c:controlled under smooth} implies that $\sigma(y) \in \mathcal{D}^{\mathrm{var}epsilon\alpha}_v$ for every $y \in \mathcal{D}^\alpha_v$, and Theorem~\ref{t:rough path integral} then shows that $y_0 + \int_0^\cdot b(y(t)) \mathrm{d} t + I(\sigma(y),\mathrm{d} v) \in \mathcal{D}^\alpha_v$. Moreover, if we restrict ourselves to the set \[ \mathcal{M}_\sigma = \{ y \in \mathcal{D}^\alpha_v : \lVert y^v \rVert_\infty \le \lVert \sigma \rVert_\infty \}, \] then the map $\mathcal{M}_\sigma \ni (y,y^v) \mapsto \mathcal{G}amma(y) = (y_0 + \int_0^\cdot b(y(t)) \mathrm{d} t + I(\sigma(y),\mathrm{d} v), \sigma(y)) \in \mathcal{M}_\sigma$ satisfies the bound \begin{align*} \lVert \mathcal{G}amma(y) \rVert_{v,\alpha} & \lesssim |y_0| + |b(0)| + \lVert b \rVert_{\mathrm{Lip}} \lVert y \rVert_\infty + \lVert \sigma(y) \rVert_{v,\mathrm{var}epsilon\alpha} (\lVert v \rVert_\alpha + \lVert v \rVert_\alpha^2 + \lVert L(v,v)\rVert_{2\alpha}) + \lVert \sigma(y) \rVert_\alpha \\ & \lesssim |y_0| + |b(0)| + (1 + \lVert b \rVert_{\mathrm{Lip}})(1 + \lVert \sigma \rVert_{C^{1+\mathrm{var}epsilon}_b}^{2+\mathrm{var}epsilon})(1 + \lVert v \rVert_\alpha^2 + \lVert L(v,v)\rVert_{2\alpha})(1 + \lVert y \rVert_{v,\mathrm{var}epsilon\alpha}), \end{align*} where we wrote $\lVert b \rVert_{\mathrm{Lip}}$ for the Lipschitz norm of $b$. To pick up a small factor we apply a scaling argument. For $\lambda\in(0,1]$ we introduce the map $\mathcal{L}ambda_\lambda \colon \mathcal{C}^\beta \to \mathcal{C}^\beta$ defined by $\mathcal{L}ambda_\lambda f(t) = f(\lambda t)$. Then for $\lambda = 2^{-k}$ and on the interval $[0,\lambda]$ equation~\eqref{e:sde} is equivalent to \begin{equation}\label{e:sde rescaled} \mathrm{d} y^\lambda(t) = \lambda b(y^\lambda(t)) \mathrm{d} t + \lambda^\alpha \sigma(y^\lambda(t)) \mathrm{d} v^\lambda(t), \qquad y^\lambda(0) = y_0, \end{equation} where $y^\lambda = \mathcal{L}ambda_\lambda y$, $v^\lambda = \lambda^{-\alpha} \mathcal{L}ambda_\lambda v$. To see this, note that \[ \mathcal{L}ambda_\lambda I(f,\mathrm{d} v) = \lim_{N\to \infty} \int_0^{\lambda\cdot} S_N f (t) \partial_t S_N v (t) \mathrm{d} t = \lim_{N\to\infty} \int_0^{\cdot} (\mathcal{L}ambda_\lambda S_N f) (t) \partial_t (\mathcal{L}ambda_\lambda S_N v) (t) \mathrm{d} t. \] But now $\mathcal{L}ambda_{2^{-k}} S_N g = S_{N-k} \mathcal{L}ambda_\lambda g$ for all sufficiently large $N$, and therefore \[ \mathcal{L}ambda_\lambda I(f,\mathrm{d} v) = \lambda^\alpha I(\mathcal{L}ambda_\lambda f, \mathrm{d} v^\lambda). \] For the quadratic covariation we have \[ \mathcal{L}ambda_\lambda [f,v] = [\mathcal{L}ambda_\lambda f, \mathcal{L}ambda_\lambda v] = \lambda^\alpha [\mathcal{L}ambda_\lambda f, v^\lambda], \] from where we get~\eqref{e:sde rescaled} also in the It\^o case. In other words we can replace $b$ by $\lambda b$, $\sigma$ by $\lambda^\alpha \sigma$, and $v$ by $v^\lambda$. It now suffices to show that $v^\lambda$, $L(v^\lambda, v^\lambda)$, and $[v^\lambda, v^\lambda]$ are uniformly bounded in $\lambda$. Since only increments of $v$ appear in~\eqref{e:sde} we may suppose $v(0) = 0$, in which case it is easy to see that $\lVert \mathcal{L}ambda_\lambda v \rVert_\alpha \lesssim \lambda^\alpha \lVert v \rVert_\alpha$ and $\lVert [v^\lambda, v^\lambda]\rVert_{2\alpha} \lesssim \lVert [v,v]\rVert_{2\alpha}$. As for the L\'evy area, we have \begin{align*} L(v^\lambda, v^\lambda) & = I(v^\lambda, \mathrm{d} v^\lambda) - \pi_<(v^\lambda, v^\lambda) - S(v^\lambda, v^\lambda) = \lambda^{-2\alpha} \mathcal{L}ambda_\lambda I(v,\mathrm{d} v) - \pi_<(v^\lambda, v^\lambda) - S(v^\lambda, v^\lambda) \\ & = \lambda^{-2\alpha} \big\{ \mathcal{L}ambda_\lambda L(v,v) + [\mathcal{L}ambda_\lambda \pi_<(v,v) - \pi_<(\mathcal{L}ambda_\lambda v,\mathcal{L}ambda_\lambda v)] + [\mathcal{L}ambda_\lambda S(v,v) - S(\mathcal{L}ambda_\lambda v, \mathcal{L}ambda_\lambda v)]\big\}, \end{align*} and therefore \[ \lVert L(v^\lambda, v^\lambda) \rVert_{2\alpha} \lesssim \lVert L(v,v) \rVert_{2\alpha} + \lVert S(v,v) \rVert_{2\alpha} + \lVert v \rVert_{\alpha}^2 + \lambda^{-2\alpha} \lVert \mathcal{L}ambda_\lambda \pi_<(v,v) - \pi_<(v^\lambda,v^\lambda) \rVert_{2\alpha}. \] But now \begin{align*} |\mathcal{L}ambda_\lambda \pi_<(v,v)_{s,t} - \pi_<(v^\lambda,v^\lambda)_{s,t}| & \le | \pi_<(v,v)_{\lambda s,\lambda t} - v(\lambda s) v_{\lambda s, \lambda t}| \\ &\quad + |\mathcal{L}ambda_\lambda v(s) (\mathcal{L}ambda_\lambda v)_{s,t} - \pi_<(\mathcal{L}ambda_\lambda v, \mathcal{L}ambda_\lambda v)_{s,t}| \\ &\lesssim \lVert v \rVert_\alpha^2 |\lambda(t-s)|^{2\alpha} + \lVert \mathcal{L}ambda_\lambda v \rVert_\alpha |t-s|^{2\alpha} \\ &\lesssim \lambda^{2\alpha} \lVert v \rVert_\alpha^2 |(t-s)|^{2\alpha}. \end{align*} From here we obtain the uniform boundedness of $\lVert v^\lambda \rVert_{v^\lambda,\alpha}$ for small $\lambda$, depending only on $b,\sigma, v, L(v,v)$ and possibly $[v,v]$, but not on $y_0$. If $\sigma \in C^{2+\mathrm{var}epsilon}_b$, similar arguments give us a contraction for small $\lambda$, and therefore we obtain the existence and uniqueness of solutions to~\eqref{e:sde rescaled}. Since all operations involved depend on $(v,L(v,v),y_0)$ and possibly $[v,v]$ in a locally Lipschitz continuous way, also $y^\lambda$ depends locally Lipschitz continuously on this extended data. Then $y = \mathcal{L}ambda_{\lambda^{-1}} y^\lambda$ solves~\eqref{e:sde} on $[0,\lambda]$, and since $\lambda$ can be chosen independently of $y_0$, we obtain the global in time existence and uniqueness of a solution which depends locally Lipschitz continuously on $(v, L(v,v), y_0)$ and possibly $[v,v]$. \begin{thm}\label{t:sde} Let $\alpha \in (1/3, 1)$ and let $(v,L(v,v))$ satisfy the assumptions of Theorem~\ref{t:rough path integral}. Let $y_0 \in \mathbb{R}^d$ and $\mathrm{var}epsilon>0$ be such that $\alpha(2+\mathrm{var}epsilon) > 2$ and let $\sigma \in C^{2+\mathrm{var}epsilon}_b$ and $b$ be Lipschitz continuous. Then there exists a unique $y \in \mathcal{D}^\alpha_v$ such that \[ y = y_0 + \int_0^\cdot b(y(t)) \mathrm{d} t + I(\sigma(y), \mathrm{d} v). \] The solution $y$ depends locally Lipschitz continuously on $(v, L(v,v), y_0)$. If furthermore $[v,v]$ satisfies the assumptions of Corollary~\ref{c:pathwise ito with smooth quadratic variation}, then there also exists a unique solution $x\in \mathcal{D}^\alpha_v$ to \begin{align*} x & = y_0 + \int_0^\cdot b(x(t)) \mathrm{d} t + I^{\mathrm{It\hat{o}}}(\sigma(x), \mathrm{d} v) \\ & = y_0 + \int_0^\cdot b(x(t)) \mathrm{d} t + I(\sigma(x), \mathrm{d} v) - \frac{1}{2} \int_0^\cdot \mathrm{D} \sigma(x(t)) \sigma(x(t)) \mathrm{d} [v,v]_t \end{align*} and $x$ depends locally Lipschitz continuously on $(v, L(v,v), [v,v], y_0)$. \end{thm} \begin{rmk} Since our integral is pathwise continuous, we can of course consider anticipating initial conditions and coefficients. Such problems arise naturally in the study of random dynamical systems; see for example~\cite{Imkeller1998,Arnold1999}. There are various approaches, for example filtration enlargements, Skorokhod integrals, or the noncausal Ogawa integral. While filtration enlargements are technically difficult, Skorokhod integrals have the disadvantage that in the anticipating case the integral is not always easy to interpret and can behave pathologically; see~\cite{Barlow1995}. With classical rough path theory these technical problems disappear. But then the integral is given as limit of compensated Riemann sums (see Proposition~\ref{p:Gubinelli rough paths}). With our formulation of the integral it is clear that we can indeed consider usual Riemann sums. An approach to pathwise integration which allows to define anticipating integrals without many technical difficulties while retaining a natural interpretation of the integral is the stochastic calculus via regularization of Russo and Vallois~\cite{Russo1993,Russo2007}. The integral notion studied by Ogawa~\cite{Ogawa1984, Ogawa1985} for anticipating stochastic integrals with respect to Brownian motion is based on Fourier expansions of integrand and integrator, and therefore related to our and the Stratonovich integral (see Nualart, Zakai~\cite{NualartZakai1989}). Similarly as the classical It\^o integral, it is interpreted in an $L^2$ limit sense, not a pathwise one. \end{rmk} \appendix \section{Regularity for Schauder expansions with affine coefficients}\label{a:schauder with affine coefficients} Here we study the regularity of series of Schauder functions that have affine functions as coefficients. First let us establish an auxiliary result. \begin{lem} Let $s < t$ and let $f:[s,t] \rightarrow \mathcal{L}(\mathbb{R}^d,\mathbb{R}^n)$ and $g:[s,t]\rightarrow \mathbb{R}^d$ be affine functions. Then for all $r \in (s,t)$ and for all $h>0$ with $r-h \in [s,t]$ and $r+h \in [s,t]$ we have \begin{align}\label{e:second order increments quadratic} |(fg)_{r-h,r} - (fg)_{r,r+h}| \le 4 |t-s|^{-2} h^2 \lVert f\rVert_\infty \lVert g \rVert_\infty. \end{align} \end{lem} \begin{proof} For $f(r) = a_1 + (r-s)b_1$ and $g(r) = a_2 + (r-s)b_2$ we have \[ |(fg)_{r-h,r} - (fg)_{r,r+h}| = | 2 f(r) g(r) - f(r-h) g(r-h) - f(r+h)g(r+h)| = |-h^2 b_1 b_2|. \] Now $f_{s,t} = b_1(t-s)$ so that $|b_1| \le 2 |t-s|^{-1} \lVert f \rVert_\infty$, and similarly for $b_2$. \end{proof} Now we are ready to prove the regularity estimate. \begin{lem}\label{l:upm hoelder appendix} Let $\alpha \in (0,2)$ and let $(u_{pm})\in \mathcal{A}^\alpha$. Then $\sum_{p,m} u_{pm} \mathrm{var}phi_{pm} \in \mathcal{C}^\alpha$ and \begin{align*} \Bigl\lVert \sum_{p,m} u_{pm} \mathrm{var}phi_{pm}\Bigr\rVert_\alpha \lesssim \lVert (u_{pm}) \rVert_{\mathcal{A}^\alpha}. \end{align*} \end{lem} \begin{proof} We need to examine the coefficients $2^{-q} \langle \chi_{qn}, \mathrm{d}(\sum_{pm} u_{pm} \mathrm{var}phi_{pm})\rangle$. The cases $(q,n) = (-1,0)$ and $(q,n)=(0,0)$ are easy, so let $q \ge 0$ and $1 \le n \le 2^q$. If $p>q$, then $\mathrm{var}phi_{pm}(t^i_{qn}) = 0$ for $i=0,1,2$ and for all $m$, and therefore \begin{align*} 2^{-q}\Big\langle \chi_{qn}, \mathrm{d}\Big(\sum_{p,m} u_{pm} \mathrm{var}phi_{pm}\Big)\Big\rangle = 2^{-q}\sum_{p\le q} \sum_m \langle \chi_{qn}, \mathrm{d}(u_{pm} \mathrm{var}phi_{pm})\rangle. \end{align*} If $p < q$, there is at most one $m_0$ with $\langle \chi_{qn}, \mathrm{d}(u_{pm} \mathrm{var}phi_{pm})\rangle \neq 0$. The support of $\chi_{qn}$ is then contained in $[t^0_{pm_0},t^1_{pm_0}]$ or in $[t^1_{pm_0}, t^2_{pm_0}]$ and $u_{pm}$ and $\mathrm{var}phi_{pm}$ are affine on these intervals, so~\eqref{e:second order increments quadratic} yields \begin{align*} \sum_m |2^{-q}\langle \chi_{qn}, \mathrm{d} (u_{pm} \mathrm{var}phi_{pm})\rangle| & = \sum_m |(u_{pm} \mathrm{var}phi_{pm})_{t^0_{qn},t^1_{qn}} - (u_{pm} \mathrm{var}phi_{pm})_{t^1_{qn},t^2_{qn}}| \\ & \lesssim 2^{2p} 2^{-2q} \lVert u_{pm}\rVert_\infty \lVert \mathrm{var}phi_{pm} \rVert_\infty \lesssim 2^{p(2-\alpha)-2q} \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha}. \end{align*} For $p=q$ we have $\mathrm{var}phi_{qn}(t^0_{qn}) = \mathrm{var}phi_{qn}(t^2_{qn}) = 0$ and $\mathrm{var}phi_{qn}(t^1_{qn}) = 1/2$, and thus \[ \sum_m |2^{-q}\langle \chi_{qn}, \mathrm{d}(u_{qm} \mathrm{var}phi_{qm})\rangle| = \left|(u_{qn} \mathrm{var}phi_{qn})_{t^0_{qn},t^1_{qn}}-(u_{qn} \mathrm{var}phi_{qn})_{t^1_{qn},t^2_{qn}}\right| = |u(t^1_{qn})|\lesssim 2^{-\alpha q} \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha}. \] Combining these estimate and using that $\alpha < 2$, we obtain \[ 2^{-q}\Big|\Big\langle \chi_{qn}, \mathrm{d}\Big(\sum_{pm} u_{pm} \mathrm{var}phi_{pm}\Big)\Big\rangle\Big| \lesssim \sum_{p\le q} 2^{p(2-\alpha)-2q} \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha} \simeq 2^{-\alpha q} \lVert (u_{pm})\rVert_{\mathcal{A}^\alpha}, \] which completes the proof. \end{proof} {} \end{document}
\begin{document} \title{Formatting Instructions For NeurIPS 2022} \begin{abstract} The abstract paragraph should be indented \nicefrac{1}{2}~inch (3~picas) on both the left- and right-hand margins. Use 10~point type, with a vertical spacing (leading) of 11~points. The word \textbf{Abstract} must be centered, bold, and in point size 12. Two line spaces precede the abstract. The abstract must be limited to one paragraph. \end{abstract} \section{Introduction} \label{Sec:1} Deep reinforcement learning has achieved great success in a wide range of challenging environments such as Atari games \citep{mnih2013atari,bellemare2012ale,hessel2017rainbow} or continuous control tasks \citep{schulman2015trpo, schulman2017ppo}. However, in stark contrast with this trend, applying RL to real-world applications remains a great challenge. In most real-world settings, there could be variations in environmental factors, such as changing terrains in robotic tasks, fluctuated bandwidth in congestion control, and dynamic traffic patterns in autonomous driving. We refer to such environment factors as \textit{situation} or \textit{context}. The changes in context are not neglectable since context usually has a substantial impact on transition and reward functions. When the context is fixed and known to us, the problem is stationary and easier to solve. However, in most realistic settings, the context is usually dynamic within an episode and unknown to us at test time. Therefore, detecting and adapting to variations in context is very important for RL agents to make a real impact in a wide range of real-world applications. \begin{figure} \caption{A typical trace of network available bandwidth. The trace can be approximately divided into several segments of different length $g^i$, for $i \in \{1,2,\cdots,6\} \label{fluctuated_bandwidth} \end{figure} In real-world environments with varied unknown contexts, we find a typical evolving context pattern particularly interesting. The context $c$ usually stays the same for a stochastic period until it changes abruptly and unpredictably into another context value $c'$ which is sampled i.i.d. from some prior context distribution. What's more, we usually do not have access to context $c$ directly but instead have access to some noisy observation $x_t$ sampled from some distribution $p(x_t|c)$ only at training time \li{(e.g., as auxiliary information from the simulator)}. We take fluctuated bandwidth in congestion control as an example. In Figure \ref{fluctuated_bandwidth}, we show a typical trace of network available bandwidth. The fluctuated bandwidth is usually modeled by multiple non-overlapping segments \citep{akhtar2018oboe, zhang2001constancy}. Within each segment, the network condition is stationary, and thus the bandwidth approximately follows the same distribution. At the end of a segment, the network condition changes, and the bandwidth changes abruptly into another distribution. Here the context $c$ represents the parameters that determine the distribution of fluctuated bandwidth (network condition), while the observations $x_t$ represents the observed bandwidth which follows the above distribution $p(x_t|c)$. While the contexts $c$ are piecewise-stable, there could be slight variations in $x_t$ within each segment. This piecewise-stable pattern of context dynamics is of particular interest to us since it can capture a wide range of stochastic context processes in real-world applications, such as changing terrains in robotic tasks and fluctuating bandwidth in congestion control. Since the change in context is abrupt and unpredictable, we cannot predict the future context in advance. The best we can do is detect and adapt to changes when they happen. Although adaptation to varied unknown contexts has been studied under non-stationary RL and meta RL, few existing works look into piecewise stable context with abrupt changes within an episode. Most works in meta RL as task inference~\citep{rakelly2019efficient,zintgraf2020varibad,zhao2020meld, poiani2021meta} and some works in non-stationary RL~\citep{chandak2020towards,chandak2020optimizing,xie2021deep} assume the context stays the same for the whole episode and infer the context based on the entire episode~(c.f. Figure \ref{fig:PGM}(a)), therefore cannot quickly adapt to context changes within an episode. Other works on non-stationary RL assume intra-episode context changes and model $c_t$ at each time step, but few study the piecewise-stable context as we do. \cite{nagabandi2018deep} directly predict the context and thus cannot capture the prior that the context tends to stay the same for a stochastic period. \cite{feng2022factored, ren2022reinforcement} model Markovian discrete context for each time step~(c.f. Figure \ref{fig:PGM}(b)), therefore failing to model the non-markov property of context and the prior over context segment length in our setting. Compared with existing works, our setting is distinctive since the segment structure is latent to us ~(c.f. Figure \ref{fig:PGM}(c)). Therefore, the unique challenge in our setting is that we need to infer the segment structure, which can be further leveraged to infer belief context by only incorporating the relevant observed data in the current segment. \li{This paper studies how to infer segment structure to detect abrupt context changes and infer belief context accordingly to adapt to the piecewise-stable context in RL environments.} We first introduce latent situational MDP which models RL environments with the stochastic \textit{situation}/\textit{context} process~(Section \ref{Sec:2}). Then, we introduce how to infer the belief context from observed data. To address the challenge above, we propose to infer context segment structure and belief context jointly from observed data~(Section \ref{Sec:3-Inference}). Then we augment the state with inferred belief context so that the RL agent can automatically trade-off between acting optimally conditioned on inferred context and gathering more information about the current context~(Section \ref{Sec:3-policy}). Finally, we combine the training objectives for RL and inference and present all the details for our proposed deep RL algorithm~(Section \ref{Sec:3-practical}). We evaluate our algorithm on a gridworld environment with dynamic goals and MuJoCo tasks \citep{todorov2012mujoco} with varied contexts. Experiments demonstrate that our algorithm can quickly detect and adapt to abrupt changes in piece-wise stable contexts and outperform existing methods(Section \ref{experiment}). Our contributions can be listed as follows: \begin{itemize} \item We introduce latent situational MDP with piecewise-stable context, which can capture a wide range of real-world applications~(Section \ref{Sec:2}). \item We propose SeCBAD, an adaptive deep RL method for non-stationary environments with piecewise-stable context. Our method can infer the context segment structure and the belief context accordingly from observed data, which can be leveraged to detect and adapt to context changes~(Section \ref{method}). \item Experiments on a gridworld environment and Mujuco tasks with piecewise-stable context demonstrate that our method can quickly detect and adapt to abrupt context changes and outperform existing methods~(Section \ref{experiment}). \end{itemize} \section{Problem Formulation} \label{Sec:2} \begin{figure} \caption{Probabilistic Graphical Models~(PGMs) for different problem settings, with shaded circles for observable variables and white circles for latent variables. (a) One context: assume the context remains unchanged for the whole episode. (b) Markovian context: assuming the context is Markovian for each time step. (c) Our setting: the context remains unchanged in each segment, but the segment structure, illustrated by the red segment is unknown and needs to be inferred. We show two possible examples of PGM corresponding to $G_t=2$~((c), left) and $G_t=1$~((c), right), where $G_t$ measures the length of the current segment up to time step $t$. } \label{fig:PGM} \end{figure} In this section, we define a \textit{latent situational Markov Decision Process (LS-MDP)} as a tuple $M=(\mathcal{S}, \mathcal{A}, \mathcal{C}, \mathcal{X}, G, T, R, \gamma)$, where $\mathcal{S}$ is the set of states, $\mathcal{A}$ is the set of actions, and $\gamma \in (0,1]$ is the discount factor. To formulate the variations in environment factors, we introduce $\mathcal{C}$, the set of latent contexts, and $\mathcal{X}$, the set of observable contexts. $\mathcal{C}$ refers to the set of underlying hidden contexts that contains all necessary information but are left unobservable to the agent, while $\mathcal{X}$ refers to the set of contexts with only partial information. {{$G$ refers to the segment length which will be described in detail in the next paragraph.}} $\mathcal{X}$ are observable only during training and unobservable during deployment. In an example environment setting where a robot walking over changing terrain, $\mathcal{C}$ represents the terrain features containing perfect information, and $\mathcal{X}$ represents noisy and imperfect information like mechanical metrics of the current step from virtual sensors in a simulator and thus is only accessible during training. In our setting, we focus on the case where the environmental changes are abrupt and irregular, \li{in contrast to the smoothly changing assumption on context/task in existing works in non-stationary RL~\citep{chandak2020towards,chandak2020optimizing}.} To better model the generative process of the contexts, we introduce the segment length $G$. Each episode is composed of several stationary segments \li{with different segment lengths}. For the ease of notation, we also introduce $G_t$, which measures the length of the current segment up to time step $t$. Then, the generative process can be described as follows: at the beginning of the $h$-th segment, the environment samples $G^h \sim p_G(G)$ and $c^h \sim p_c(c)$ from \li{prior distributions $p_G$ and $p_c$.} Then, in the next $G^h$ steps, the latent context $c^h$ remains unchanged. For each time step $t$ in this segment, the current segment length accumulates as $G_t=G_{t-1} + 1$ (we define $G_t = 1$ for the first time step in this segment), and the current latent context satisfies that $c_t = c^h$. The agent can observe $x_t \sim p(x_t | c_t)$ for each time step if during training. The stationarity lasts until the end of the current segment, then the environment resamples $G^{h+1}$ and $c^{h+1}$, and the process repeats. \li{We show two examples of graphical models of our setting given different $G_t$ in Figure \ref{fig:PGM}(c)}. The transition function $T: p(s_{t+1} | s_t, a_t, c^h)$ and the reward function $R: p(r_t | s_t, a_t, c^h)$ are all conditioned on current latent context $c^h$. Therefore the changes in $\mathcal{C}$ lead to the changes in transition and reward functions. \li{At test time, $\mathcal{X}$ is no longer accessible, so we need to infer context changes from observed transitions and rewards. } Since the latent context $\mathcal{C}$ remains unobservable and changes silently, the environment is no longer stationary for the agents. To act optimally, it is important to keep track of the environment to recognize changes in time, and rapidly adapt to those changes. \iffalse \subsection{Latent situational Reinforcement Learning} \label{Sec2:latentbeliefRL} \textcolor{red}{To keep track of the environment, the agent needs to summarize the history to infer the latent contexts.} Inspired by the belief MDP \citep{kaelbling1998planning} and Bayes Adaptive MDP (BAMDP) \citep{duff2002optimal}, to effectively incorporate the inferred belief in to the decision process, we propose \textcolor{red}{Latent Situational Reinforcement Learning} which can be viewed as an augmentation on the \textcolor{red}{LS-MDP}. The belief latent context at time step $t$ is defined as the probability distribution of the current latent context $c$ conditioned on all historical information (in the following, we may use $b_t$ to refer $b_t(c)$): \begin{equation} b_t(c)=p(c|s_{t-G_t:t},a_{t-G_t:t-1}) \end{equation} \textcolor{red}{start from $t-G_t$!!! add some explnation.} In \textcolor{red}{Latent Situational RL}, we augment the state with the belief over the latent context: we define the augmented state as $(s, b) \in \mathcal{S} \times \mathcal{B}$, where $\mathcal{S}$ is the same as in \textcolor{red}{LS-MDP} and $\mathcal{B}$ is the set of belief latent contexts $b$. By augmenting the state space with the belief, one can get rid of the latent context $c$ in the augmented transition and reward functions: $T': p(s_{t+1},b_{t+1} | s_t, a_t, b_t)$ and $R': p(r_{t+1} | s_t, a_t, b_t)$. (\textcolor{blue}{Move the analysis part to section 1?})The advantages of this definition is two-folds. First, very similar to classic MDP, the agent is also puzzled by a contextual level dilemma: whether to take information gathering actions to improve the belief latent contex, or to take promising actions to maximize the return given current belief? Unlike classic MDP, where the explorative behaviors mainly occurs during the initial stage of training, , in \textcolor{red}{LS-MDP}, the agent needs to constantly gather information on the latent context since the latent context may change in each and every step, and in both training and deployment phases. Through state augmentation, the original transition and reward functions with unknown latent context $p(s_{t+1}|s_t,a_t,c^h)$ and $p(r_{t}|s_t, a_t, c^h)$ now get rid of unknown $c^h$, and the joint distribution can be written as: \textcolor{red}{(to check)} \begin{equation} p(s_{t+1}, r_t, b_{t+1} | s_t, a_t, b_t) = p(s_{t+1}, r_t|s_t, a_t, b_t) p(b_{t+1}|s_t, a_t, r_t, s_{t+1}, b_t) \end{equation} where the first term $p(s_{t+1}, r_t|s_t, a_t, b_t)$ is the joint distribution of the augmented transition and reward functions, and the second term $p(b_{t+1}|s_t, a_t, r_t, s_{t+1}, b_t)$ is a deterministic Bayesian update on the belief latent context. In this sense, we no longer need to explicitly distinguish the information gathering behavior and the exploitation behavior. This can be viewed as a generalization of BAMDP \citep{duff2002optimal}, which is an elegant framework to solve the explore/exploitation tradeoff optimally in stationary MDP \citep{zintgraf2020varibad, fellows2021bayesian}. Second, from the stationarity perspective, augmenting $b$ over the state also means that the new transition function $T'$ and reward function $R'$ are no longer conditioned on the latent context $\mathcal{C}$, which cancel out the non-stationarity. In this sense, the agent can treat it as any other stationary MDP and solve it using the policy $\pi: p(a_t|s_t, b_t)$ \textcolor{red}{(check Markovian property?)}. However, accurately estimate the belief latent context $b(c)$ is still not trivial. \textcolor{red}{In the next section, we will introduce ...} \fi \section{Methodology} \label{method} In this section, we present our \textit{Segmented Context Belief Augmented Deep~(SeCBAD)} RL method and elaborate on how SeCBAD solves the challenges discussed above. Our method consists of two main components: \begin{itemize} \item Joint inference of the belief distribution over the latent context and the segment structure from observed data. \item Policy optimization with inferred belief context under the belief MDP framework. \end{itemize} We first introduce the latent context inference part in Section \ref{Sec:3-Inference}, especially how to infer the segment structure jointly with belief context and leverage the segment structure to remove irrelevant observed data. \li{After the belief context is approximated, it is then incorporated into the state as the input of the policy.} We detail the policy optimization part under the belief MDP framework in Section \ref{Sec:3-policy}. And finally, in Section \ref{Sec:3-practical}, we describe how these parts constitute a practical algorithm. \subsection{Joint Inference for Belief Context and Segment Structure} \label{Sec:3-Inference} In this part, we perform joint inference over latent context $c_t$ and current segment length $G_t$ from observed trajectory $\tau_{1:t}$, so as to remove irrelevant data in $\tau_{1:t}$ for belief context inference. This can be formally expressed by the following equation: \begin{equation} \label{equation-1} p(c_t, G_t|\tau_{1:t}) = p(c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t}) p(G_t|\tau_{1:t}) \end{equation} where $\tau_{t_0:t} = (s_{t_0}, a_{t_0}, r_{t_0} \cdots, s_{t-1}, a_{t-1}, r_{t-1}, s_{t})$\footnote{This definition makes $\tau_{t-G_t:t}$ only contain observed data that belong to the current segment.}, and $c^{t-G_t+1:t}$ refers to the latent context for the whole segment from $t-G_t+1$ to $t$. In Equation \ref{equation-1}, we separately estimate the posterior $p(c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t})$ of latent context given known segment structure $G_t$, and the posterior of segment length $p(G_t|\tau_{1:t})$. \subsubsection{Approximate Inference for Latent Context under Known Segment Structure} \label{Sec:3.1-1inferknownGt} In this part, we focus on estimating $p(c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t})$, which is the posterior of the latent context under known segment structure $G_t$. We use the variational inference framework to approximate the true posterior. To be specific, we use an posterior inference network $q_\phi$ to infer the belief context in segment $[t-G_t+1:t]$: $q_\phi(c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t})$. The variational lower bound for the log-likelihood of the current segment is given by: \begin{align} \label{Eq:elbo} &\quad \log p( \tau^X_{t-G_t:t} | a_{t-G_t:t-1}, s_{t-G_t}, G_t) \nonumber \\ &\geq \mathbb{E}_{ q_\phi(c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t})} \left[ \log p_{\theta}(\tau^X_{t-G_t:t}|G_t, c^{t-G_t+1:t}, a_{t-G_t:t-1}, s_{t-G_t}) \right] \nonumber \\ &\quad\quad\quad\quad\quad\quad\quad\quad\quad -\mathbf{D}_{KL}(q_\phi (c^{t-G_t+1:t}|G_t, \tau_{t-G_t:t})\| p(c^{t-G_t+1:t})) \coloneqq \mathcal{J}_{Model}^t(G_t) \end{align} where $p_\theta$ denotes the decoder and $\tau^X_{t-G_t:t}= (\tau_{t-G_t:t}, x_{t-G_t+1:t})$. For the detailed derivation, please see Appendix \ref{Appendix:ELBO}. The reconstruction term of Equation \ref{Eq:elbo} can be factorized as: \begin{align} &\quad \log p_{\theta}(\tau^X_{t-G_t:t}|G_t, c^{t-G_t+1:t}, a_{t-G_t:t-1}, s_{t-G_t}) \nonumber \\ &= \sum_{i=t-G_t+1}^{t} \log p_{\theta}(x_{i}, s_{i}, r_{i-1}| G_t, c^{t-G_t+1:t}, s_{i-1}, a_{i-1}) \end{align} which is the sum of log-probablity of transitions under context sampled from $q_\phi$. The term $\mathbf{D}_{KL}(q_\phi||p)$ is the KL-divergence between our variational posterior $q_\phi$ and the prior over the belief context. For the prior $p(c^{t-G_t+1:t})$, we use previous posterior at timestep $t-1$ if the context remains unchanged at timestep $t$, or $\mathcal{N}(0, I)$ otherwise. Unlike most previous methods which assume invariant context within an episode or markovian context at each time step, our method 1) takes only observed data within current segment $\tau_{t-G_t:t}$ as input for encoder $q_\phi$, 2) reconstructs only data within current segment $\tau^X_{t-G_t:t}$ at the output for decoder $\log p_{\theta}$. Our method is naturally motivated by the piecewise-stable assumption on context dynamics. By removing irrelevant data outside the current segment \pushi{that are generated by past unassociated context} , our method can estimate the current latent context more accurately. \begin{figure} \caption{An overview of SeCBAD. (a) Encoder-decoder architecture for belief context inference given current segment length $G_t = g$: the encoder $q_\phi$ takes the recent $g$ steps in the current red segment as input, and infer the belief context $q_\phi(c^{t-g+1:t} \label{Fig:algo} \end{figure} \subsubsection{Iterative Inference for the Segment Length} \label{Sec:3recursive_inference} In this part, we focus on estimating $p(G_t|\tau_{1:t})$, which is the posterior for current segment length, given $p(c^h|\tau_{t-G_t:t})$, the posterior of context under given segment structure. To compute this posterior distribution, we first compute the joint distribution $p(G_t, \tau_{1:t})$ recursively based on $p(G_{t-1}, \tau_{1:t-1})$ as follows\footnote{For notation simplicity, we omit the condition $G_t=i$ in the term $p(s_t, a_{t-1}, r_{t-1}|\tau_{t-i:t-1})$, i.e. $t-i$ is the start of the segment. }: \begin{align} \label{Eq:postG} p(G_t = i, \tau_{1:t}) &= \sum_{k=1}^{t-1} p(G_{t-1} = k, \tau_{1:t-1}) \cdot p(G_t = i| G_{t-1} = k) \cdot p(s_t, a_{t-1}, r_{t-1}|\tau_{t-i:t-1}) \end{align} The three major components in Equation \ref{Eq:postG} in turn are the previous joint distribution, the evolution prior, and the observation probability. The previous joint distribution is iteratively provided at the beginning of each time step. The evolution prior $p(G_t=i|G_{t-1}=k)$ measures the prior knowledge on the segment length $G_t$ given the segment length of the previous time step $G_{t-1}=k$, where either $G_t=G_{t-1}+1$ or $G_t=1$ holds. As for the observation probability which is the third term of Equation \ref{Eq:postG}, it measures how likely the observations show up given the history of the segment. To be specific, we have \footnote{In Equation \ref{Eq:obs_prob}, $K = p(a_{t-1}|s_{t-1})$ is a constant with respect to $i$ and has no impact on the posterior $p(G_t=i|\tau_{1:t})$. } \begin{align} \label{Eq:obs_prob} p(s_t, a_{t-1}, r_{t-1}|\tau_{t-i:t-1}) &= K \mathbb{E}_{p(c^{t-i+1:t}|G_t=i, \tau_{t-i:t-1})} \Big[ p(s_t,r_{t-1}|s_{t-1}, a_{t-1}, c^{t-i+1:t}) \Big] \end{align} In Equation \ref{Eq:obs_prob}, the term $p(s_t,r_{t-1}|s_{t-1}, a_{t-1}, c^{t-i+1:t})$ estimates the data likelihood for the next state-reward pair, where $c$ is drawn from the posterior distribution given the data in the segment before timestep $t$. To compute the RHS of Equation \ref{Eq:obs_prob}, we sample from $q_\phi(c^{t-i+1:t-1}|G_t=i, \tau_{t-i:t-1})$\footnote{When $i \geq 2$, we can use the belief $q_\phi(c^{t-i+1:t-1}|G_{t-1}=i-1, \tau_{t-i:t-1})$ to approximate the posterior $p(c^{t-i+1:t}|G_t=i, \tau_{t-i:t-1})$. For the case where $i=1$, the posterior distribution $p(c^{t-i+1:t}|G_t=i, \tau_{t-i:t-1})$ is actually the prior distribution $p_c(c)=\mathcal{N}(0,I)$, and we can sample $c$ from the prior distribution. } which is the belief context inferred from the whole history of the segment before timestep $t$, and use the sampled $c$ the decoder to compute the data likelihood. Given the joint distribution, the posterior distribution of $G_t$ can be derived as \begin{equation} \label{Eq:postG_norm} p(G_t = i | \tau_{1:t}) = \frac{p(G_t = i, \tau_{1:t})}{\sum_l p(G_t = l, \tau_{1:t})} \end{equation} We can also incorporate observable contexts $x$ in the observation probability to improve accuracy during training. See Appendix \ref{Appendix:obsx} for more details. \subsubsection{Belief Context as a Mixture Distribution} \label{Sec:3.1-jointinference} Given inferred posterior of $G_t$ in Section \ref{Sec:3recursive_inference}, the belief of the latent context at time step $t$ can be derived as \begin{equation} \label{Eq:q-marginalize} b_t(c) = q_\phi(c_t|\tau_{1:t}) = \sum_{g_t} q_\phi(c^{t-g_t+1:t}|G_t=g_t,\tau_{t-g_t:t}) p(G_t=g_t|\tau_{1:t}) \end{equation} This mixed probability of $c^{t-g_t+1:t}$ which has taken all possible segment structures into consideration, can now represent the current belief $b_t$ of the latent context $c$. \subsection{Policy Optimization with Belief Context} \label{Sec:3-policy} Inspired by the belief MDP \citep{kaelbling1998planning}, Bayes Adaptive MDP (BAMDP) \citep{duff2002optimal} \li{and recent works on meta RL as task inference \citep{zintgraf2020varibad}, } \li{we incorporate the inferred belief context into the augmented state.} At \li{each} time step $t$, the belief latent context is approximated via $q_\phi$ using Equation \ref{Eq:q-marginalize}. Therefore, we define the augmented state as $(s, b) \in \mathcal{S} \times \mathcal{B}$, where $\mathcal{S}$ is the same state space as in \li{LS-MDP} and $\mathcal{B}$ is the set of belief latent contexts. \li{Accordingly, we have transition $T^{b}(s_{t+1}, r_t, b_{t+1} | s_t, a_t, b_t) = p(s_{t+1}, r_t|s_t, a_t, b_t) p(b_{t+1}|s_t, a_t, r_t, s_{t+1}, b_t)$ and reward $R^{b}(r_{t}|s_t, b_t, a_t)$ } This definition brings advantages in the sense that the information gathering and exploitation tradeoff is no longer a problem under such augmented states, since the transition and reward functions are no longer conditioned on exact $c$. Now, the policy is defined as $\pi(a|s, b)$ a mapping from the augmented state space to the action space, and the agent's objective is to maximize \begin{align} \label{Eq:RLloss} \li{ J_{RL} = \mathbb{E}_{s_0,b_0,\pi,T^{b}} \left[ \sum_{t=0}^{H} \gamma^t R^{b}(r_{t}|s_t, b_t, a_t) \right] .} \end{align} \subsection{Algorithm and Implementations of SeCBAD} \label{Sec:3-practical} In this section, we describe the overall algorithm and implementation details of SeCBAD. See Figure \ref{Fig:algo} for an overview of our framework. As shown in Figure \ref{Fig:algo}(a), we use a GRU \citep{cho2014learning} parameterized by $\phi$ as the recurrent encoder $q_\phi$, and distributions in latent context space is assumed to be diagonal Gaussians with mean and variance parameterized by $q_\phi$. The decoder includes transition model $p_\theta(s_{t+1}|s_t, a_t, c)$, reward model $p_\theta(r_t|s_t, a_t, c)$ and observable context model $p_\theta(x_t|c)$. \li{The output of all the decoders are Gaussian distributions with mean parameterized by feed-forward neural networks and fixed identity covariance.} Then, we estimate $p(G_t|\tau_{1:t})$ using $q_\phi$ and $p_\theta$ as described in Section \ref{Sec:3recursive_inference}. As shown in Figure \ref{Fig:algo}(b), we combine the belief context based on different segment according to $p(G_t|\tau_{1:t})$ to get the belief $b_t(c) = q_\phi (c_t|\tau_{1:t})$, where one approach is to provide a total of $t$ mean, covariance and weights as policy input. For simplicity, we choose $G_t^*$ with highest probability in $p(G_t|\tau_{1:t})$ and use the corresponding $q_\phi(c^{t-G_t^*+1:t}|\tau_{t-G_t^*:t})$ as the belief. Empirically, we find this approximation leads to little performance loss. We build our RL algorithm on the top of PPO \citep{schulman2017proximal} to learn the policy $\pi_{\psi}(a_t|s_t,b_t(c))$, where $\psi$ denotes the parameters in the actor and the critic. We use the objective described in Section \ref{Sec:3-policy} to optimize the policy. During deployment, $q_\phi$ and $p_\theta$ are fixed. We first compute the segment posterior $p(G_t|\tau_{1:t})$ using encoder $q_\phi$ and the transition and reward model $p_\theta$. Then, we estimate the belief $b_t(c)$ and feed it into the policy as input. For more implementation details, please refer to the Appendix \ref{Appendix:implementation}. \input{src/4-analysis} \section{Related works} Our work is closely related to \textbf{non-stationary RL}, where the transition and reward functions may change over time. Most existing works on non-stationary RL focus on inter-episode non-stationarity~\citep{xie2021deep, chandak2020towards, chandak2020optimizing, xie2022robust, sodhani2021block, alegre2021minimum, poiani2021meta, al2017continuous}, some of which adopt contextual MDP as formulation \citep{hallak2015contextual}. Recently there are some works considering intra-episode non-stationarity~\citep{ren2022reinforcement, kamienny2020learning, kumar2021rma, nagabandi2018deep, feng2022factored}. \cite{ren2022reinforcement} assume the latent context to be finite and Markovian, while \cite{feng2022factored} assume the latent context is Makovian and the environment can be modeled as a factored MDP. In contrast to existing works on non-stationary MDP, we assume piecewise-stable context with abrupt changes within an episode, which is more realistic and can capture a wide range of real-world applications. To rapidly adapt to dynamic context, the agent needs to continuously perform information gathering behavior. \textbf{Bayesian RL} \citep{duff2002optimal, zintgraf2020varibad, fellows2021bayesian} is an elegant framework to optimally tradeoff the exploration and exploitation in an unknown and stationary MDP. As a special type of \textbf{Belief MDP} \citep{kaelbling1998planning}, Bayes Adaptive MDP (BAMDP) \citep{duff2002optimal} maintains a belief over the environment and uses this belief to augment the state. \li{Our model can be viewed as a special case of belief MDP, where we only maintain belief over latent context to trade off between information gathering and exploitation. } To accurately infer belief context, we adopt the \textbf{variational inference}, which has been adopted by many existing works in RL for task inference \citep{rakelly2019efficient, zhao2020meld, humplik2019meta, poiani2021meta, zintgraf2020varibad} or context inference \citep{xie2021deep, feng2022factored, ren2022reinforcement}. However, none of these methods suit our setting. \li{We perform joint inference over latent context and segment structure from observed data, so as to remove irrelevant data for more accurate belief context inference. } LS-MDP can also be viewed as a special case of \textbf{POMDP}. Recently, progress has been made in learning the latent dynamics model \citep{krishnan2015deep,karl2016deep,doerr2018probabilistic,buesing2018learning,ha2018world,han2019variational,hafner2019learning,hafner2019dream}. Theoretically, it is possible to perform optimally only using recurrent neural networks (RNNs) like \cite{hausknecht2015deep} since the whole history has been taken into consideration. However, it has been shown that \citep{hafner2019learning} introducing more structured information will significantly enhance the performance. In this paper, we \li{exploit the addtional assumption of LS-MDP} to infer the context based on segment structure. The experiments show that SeCBAD can achieve better performance compared with \li{existing methods}. \iffalse \paragraph{Non-stationary Environments} RL in non-stationary settings is a long lasting challenge. Most existing works on non-stationary RL focus on inter-episode non-stationarity~\citep{xie2021deep, chandak2020towards, chandak2020optimizing, xie2022robust, sodhani2021block, alegre2021minimum, poiani2021meta, al2017continuous} , and model a series of tasks with contextual MDP \citep{hallak2015contextual}. Recently, another branch of works consider intra-episode non-stationarity~\citep{ren2022reinforcement, kamienny2020learning, kumar2021rma, nagabandi2018deep, feng2022factored}. \cite{ren2022reinforcement} assume the latent context to be finite and Markovian, while \cite{feng2022factored} assume the latent context is Makovian and the environment can be modeled as a factored MDP. In contrast to existing works on non-stationary MDP, we assume piecewise-stable context with abrupt changes within an episode, which is more realistic and can capture a wide range of real-world applications. \iffalse RL in non-stationary settings is a long lasting challenge. In contrast to stationary settings, the non-stationarity leads to the variations in the transition and the reward fuctions. \li{Existing works on non-stationary RL can be classified into two categories: methods that focus on inter-episode non-stationary and methods that focus on intra-episode non-stationary. The first class of works assume no variation will happen within an episode, and usually fall in contextual MDP \citep{hallak2015contextual}. A set of MDPs with shared state and action space but different dynamics and rewards are modeled, and the differences can be summarized as a context that varies across episodes. Additionally, \cite{chandak2020optimizing, chandak2020towards} assumes smooth context evolution, while \cite{sodhani2021block} introduce block assumption as well as Lipschitz property of the context.} \li{The second class of works focus on the setting where variations may happen within an episode \citep{ren2022reinforcement, kamienny2020learning, kumar2021rma, nagabandi2018deep, feng2022factored}. } \cite{ren2022reinforcement} assume the latent context to be finite and Markovian, and \cite{feng2022factored} assume the latent context is Makovian and the environment can be modeled as a factored MDP. \li{In contrast to existing works on non-stationary MDP, we assume piecewise-stable context with abrupt changes within an episode. We argue that the piecewise-stable assumption is more realistic than inter-episode non-stationary and markovian context assumptions in existing works. The proposed latent situational MDP can be viewed as a non-stationary MDP with special structure, and can capture a wide range of real-world applications. } \fi \paragraph{Belief MDP and Bayesian RL} To rapidly adapt to dynamic context, the agent needs to continuously perform information gathering behavior. Bayesian RL \citep{duff2002optimal} is an elegant framework to optimally tradeoff the exploration and exploitation in an unknown and stationary MDP. As a special type of Belief MDP \citep{kaelbling1998planning}, Bayes Adaptive MDP (BAMDP) \citep{duff2002optimal} maintains a belief over the environment and uses this belief to augment the state. Recently, \cite{zintgraf2020varibad, fellows2021bayesian} propose to use variational inference to infer the belief, which achieve approximate Bayes-optimal exploration in large scale tasks. \li{Our model can be viewed as a special case of belief MDP, where we only maintain belief over latent context and have access to state in LS-MDP. Moreover, we infer belief context based on segment structure, which is different from the belief inference in belief MDP for general POMDP. } \iffalse To rapidly adapt to the variations, the agent needs to continuously perform information gathering behavior. Bayesian RL \citep{duff2002optimal} is an elegant framework to optimally tradeoff the exploration and exploitation in unknown and stationary MDP. In Bayes Adaptive MDP (BAMDP) \citep{duff2002optimal}, the agent maintains a belief over the environment, and uses this belief to augment the state. BAMDP can be viewed as a special type of Belief MDP \citep{kaelbling1998planning}, and is constructed by maintaining the posterior beliefs and reinterpret them as Markov states \citep{cassandra1994acting}. Recently, \cite{zintgraf2020varibad, fellows2021bayesian} propose to use variational inference to infer the belief, which achieve approximate Bayes-optimal exploration in large scale tasks. Inspired by these works, we propose to incorporate the belief over latent context into the augmented state. \li{Our model can be viewed as a special case of belief MDP, where we only maintain belief over latent context and have access to state in LS-MDP. Moreover, we infer belief context based on segment structure, which is different from the belief inference in belief MDP for general POMDP. } \fi \paragraph{Variational Inference in RL} To accurately estimate the context, we adopt the variational inference to approximate the \li{posterior over contexts.} Many exisiting works have adopt variational inference in RL, especially for \li{task or context inference.} A branch of meta-RL methods \citep{rakelly2019efficient, zhao2020meld, humplik2019meta, poiani2021meta, zintgraf2020varibad} use variational inference to \li{infer the task.} Some works for non-stationary environments like \citep{xie2021deep, feng2022factored, ren2022reinforcement} adopt variational inference to infer the contexts. However, none of these methods suit our setting. \li{We perform joint inference over latent context and segment structure from observed data, so as to remove irrelevant data for more accurate belief context inference. } \paragraph{POMDP} The latent situational MDP setting can be viewed as a special case of POMDP. Recently, progress has been made in learning the latent dynamics model \citep{krishnan2015deep,karl2016deep,doerr2018probabilistic,buesing2018learning,ha2018world,han2019variational,hafner2019learning,hafner2019dream}. Theoretically, it is possible to perform optimally only using recurrent neural networks (RNNs) like \cite{hausknecht2015deep} since the whole history has been taken into consideration. However, it has been shown that \citep{hafner2019learning} introducing more structured information will significantly enhance the performance. In this paper, we \li{exploit the addtional assumption of LS-MDP} to infer the context based on context structure. The experiments show that our proposed algorithm can achieve significant better performance compared with \li{existing methods}. \fi \section{Conclusion} In this paper, we propose SeCBAD, a \textbf{Se}gmented \textbf{C}ontext \textbf{B}elief \textbf{A}ugmented \textbf{D}eep RL method to deal with piecewise-stable context in non-stationary environments. Piecewise-stable context is quite common in a wide range of real-world applications. Compared with existing methods, our method can automatically detect the segment structure, which reflects when the context changes abruptly. The detected segment structure can be further used to compute context belief with only relevant observed data. To the best of our knowledge, this is the first method that can model and leverage piecewise-stable context in reinforcement learning to help the agent adapt to environment change. With inferred belief context, our RL agents can quickly detect and adapt to abrupt changes in a gridwold environment and mujoco tasks with piecewise-stable context. For future work, we plan to leverage various deep learning techniques to improve SeCBAD, which includes replacing the GRU encoder by Transformer to better capture the long-term dependency in the input trajectory segment. \section{Submission of papers to NeurIPS 2022} Please read the instructions below carefully and follow them faithfully. \subsection{Style} Papers to be submitted to NeurIPS 2022 must be prepared according to the instructions presented here. Papers may only be up to {\bf nine} pages long, including figures. Additional pages \emph{containing only acknowledgments and references} are allowed. Papers that exceed the page limit will not be reviewed, or in any other way considered for presentation at the conference. The margins in 2022 are the same as those in 2007, which allow for $\sim$$15\%$ more words in the paper compared to earlier years. Authors are required to use the NeurIPS \LaTeX{} style files obtainable at the NeurIPS website as indicated below. Please make sure you use the current files and not previous versions. Tweaking the style files may be grounds for rejection. \subsection{Retrieval of style files} The style files for NeurIPS and other conference information are available on the World Wide Web at \begin{center} \url{http://www.neurips.cc/} \end{center} The file \verb+neurips_2022.pdf+ contains these instructions and illustrates the various formatting requirements your NeurIPS paper must satisfy. The only supported style file for NeurIPS 2022 is \verb+neurips_2022.sty+, rewritten for \LaTeXe{}. \textbf{Previous style files for \LaTeX{} 2.09, Microsoft Word, and RTF are no longer supported!} The \LaTeX{} style file contains three optional arguments: \verb+final+, which creates a camera-ready copy, \verb+preprint+, which creates a preprint for submission to, e.g., arXiv, and \verb+nonatbib+, which will not load the \verb+natbib+ package for you in case of package clash. \paragraph{Preprint option} If you wish to post a preprint of your work online, e.g., on arXiv, using the NeurIPS style, please use the \verb+preprint+ option. This will create a nonanonymized version of your work with the text ``Preprint. Work in progress.'' in the footer. This version may be distributed as you see fit. Please \textbf{do not} use the \verb+final+ option, which should \textbf{only} be used for papers accepted to NeurIPS. At submission time, please omit the \verb+final+ and \verb+preprint+ options. This will anonymize your submission and add line numbers to aid review. Please do \emph{not} refer to these line numbers in your paper as they will be removed during generation of camera-ready copies. The file \verb+neurips_2022.tex+ may be used as a ``shell'' for writing your paper. All you have to do is replace the author, title, abstract, and text of the paper with your own. The formatting instructions contained in these style files are summarized in Sections \ref{gen_inst}, \ref{headings}, and \ref{others} below. \section{General formatting instructions} \label{gen_inst} The text must be confined within a rectangle 5.5~inches (33~picas) wide and 9~inches (54~picas) long. The left margin is 1.5~inch (9~picas). Use 10~point type with a vertical spacing (leading) of 11~points. Times New Roman is the preferred typeface throughout, and will be selected for you by default. Paragraphs are separated by \nicefrac{1}{2}~line space (5.5 points), with no indentation. The paper title should be 17~point, initial caps/lower case, bold, centered between two horizontal rules. The top rule should be 4~points thick and the bottom rule should be 1~point thick. Allow \nicefrac{1}{4}~inch space above and below the title to rules. All pages should start at 1~inch (6~picas) from the top of the page. For the final version, authors' names are set in boldface, and each name is centered above the corresponding address. The lead author's name is to be listed first (left-most), and the co-authors' names (if different address) are set to follow. If there is only one co-author, list both author and co-author side by side. Please pay special attention to the instructions in Section \ref{others} regarding figures, tables, acknowledgments, and references. \section{Headings: first level} \label{headings} All headings should be lower case (except for first word and proper nouns), flush left, and bold. First-level headings should be in 12-point type. \subsection{Headings: second level} Second-level headings should be in 10-point type. \subsubsection{Headings: third level} Third-level headings should be in 10-point type. \paragraph{Paragraphs} There is also a \verb+\paragraph+ command available, which sets the heading in bold, flush left, and inline with the text, with the heading followed by 1\,em of space. \section{Citations, figures, tables, references} \label{others} These instructions apply to everyone. \subsection{Citations within the text} The \verb+natbib+ package will be loaded for you by default. Citations may be author/year or numeric, as long as you maintain internal consistency. As to the format of the references themselves, any style is acceptable as long as it is used consistently. The documentation for \verb+natbib+ may be found at \begin{center} \url{http://mirrors.ctan.org/macros/latex/contrib/natbib/natnotes.pdf} \end{center} Of note is the command \verb+\citet+, which produces citations appropriate for use in inline text. For example, \begin{verbatim} \citet{hasselmo} investigated\dots \end{verbatim} produces \begin{quote} Hasselmo, et al.\ (1995) investigated\dots \end{quote} If you wish to load the \verb+natbib+ package with options, you may add the following before loading the \verb+neurips_2022+ package: \begin{verbatim} \PassOptionsToPackage{options}{natbib} \end{verbatim} If \verb+natbib+ clashes with another package you load, you can add the optional argument \verb+nonatbib+ when loading the style file: \begin{verbatim} \usepackage[nonatbib]{neurips_2022} \end{verbatim} As submission is double blind, refer to your own published work in the third person. That is, use ``In the previous work of Jones et al.\ [4],'' not ``In our previous work [4].'' If you cite your other papers that are not widely available (e.g., a journal paper under review), use anonymous author names in the citation, e.g., an author of the form ``A.\ Anonymous.'' \subsection{Footnotes} Footnotes should be used sparingly. If you do require a footnote, indicate footnotes with a number\footnote{Sample of the first footnote.} in the text. Place the footnotes at the bottom of the page on which they appear. Precede the footnote with a horizontal rule of 2~inches (12~picas). Note that footnotes are properly typeset \emph{after} punctuation marks.\footnote{As in this example.} \subsection{Figures} \begin{figure} \caption{Sample figure caption.} \end{figure} All artwork must be neat, clean, and legible. Lines should be dark enough for purposes of reproduction. The figure number and caption always appear after the figure. Place one line space before the figure caption and one line space after the figure. The figure caption should be lower case (except for first word and proper nouns); figures are numbered consecutively. You may use color figures. However, it is best for the figure captions and the paper body to be legible if the paper is printed in either black/white or in color. \subsection{Tables} All tables must be centered, neat, clean and legible. The table number and title always appear before the table. See Table~\ref{sample-table}. Place one line space before the table title, one line space after the table title, and one line space after the table. The table title must be lower case (except for first word and proper nouns); tables are numbered consecutively. Note that publication-quality tables \emph{do not contain vertical rules.} We strongly suggest the use of the \verb+booktabs+ package, which allows for typesetting high-quality, professional tables: \begin{center} \url{https://www.ctan.org/pkg/booktabs} \end{center} This package was used to typeset Table~\ref{sample-table}. \begin{table} \caption{Sample table title} \label{sample-table} \centering \begin{tabular}{lll} \toprule \multicolumn{2}{c}{Part} \\ \cmidrule(r){1-2} Name & Description & Size ($\mu$m) \\ \midrule Dendrite & Input terminal & $\sim$100 \\ Axon & Output terminal & $\sim$10 \\ Soma & Cell body & up to $10^6$ \\ \bottomrule \end{tabular} \end{table} \section{Final instructions} Do not change any aspects of the formatting parameters in the style files. In particular, do not modify the width or length of the rectangle the text should fit into, and do not change font sizes (except perhaps in the \textbf{References} section; see below). Please note that pages should be numbered. \section{Preparing PDF files} Please prepare submission files with paper size ``US Letter,'' and not, for example, ``A4.'' Fonts were the main cause of problems in the past years. Your PDF file must only contain Type 1 or Embedded TrueType fonts. Here are a few instructions to achieve this. \begin{itemize} \item You should directly generate PDF files using \verb+pdflatex+. \item You can check which fonts a PDF files uses. In Acrobat Reader, select the menu Files$>$Document Properties$>$Fonts and select Show All Fonts. You can also use the program \verb+pdffonts+ which comes with \verb+xpdf+ and is available out-of-the-box on most Linux machines. \item The IEEE has recommendations for generating PDF files whose fonts are also acceptable for NeurIPS. Please see \url{http://www.emfield.org/icuwb2010/downloads/IEEE-PDF-SpecV32.pdf} \item \verb+xfig+ "patterned" shapes are implemented with bitmap fonts. Use "solid" shapes instead. \item The \verb+\bbold+ package almost always uses bitmap fonts. You should use the equivalent AMS Fonts: \begin{verbatim} \usepackage{amsfonts} \end{verbatim} followed by, e.g., \verb+\mathbb{R}+, \verb+\mathbb{N}+, or \verb+\mathbb{C}+ for $\mathbb{R}$, $\mathbb{N}$ or $\mathbb{C}$. You can also use the following workaround for reals, natural and complex: \begin{verbatim} \newcommand{I\!\!R}{I\!\!R} \newcommand{I\!\!N}{I\!\!N} \newcommand{I\!\!\!\!C}{I\!\!\!\!C} \end{verbatim} Note that \verb+amsfonts+ is automatically loaded by the \verb+amssymb+ package. \end{itemize} If your file contains type 3 fonts or non embedded TrueType fonts, we will ask you to fix it. \subsection{Margins in \LaTeX{}} Most of the margin problems come from figures positioned by hand using \verb+\special+ or other commands. We suggest using the command \verb+\includegraphics+ from the \verb+graphicx+ package. Always specify the figure width as a multiple of the line width as in the example below: \begin{verbatim} \usepackage[pdftex]{graphicx} ... \includegraphics[width=0.8\linewidth]{myfile.pdf} \end{verbatim} See Section 4.4 in the graphics bundle documentation (\url{http://mirrors.ctan.org/macros/latex/required/graphics/grfguide.pdf}) A number of width problems arise when \LaTeX{} cannot properly hyphenate a line. Please give LaTeX hyphenation hints using the \verb+\-+ command when necessary. \end{ack} \section*{Checklist} The checklist follows the references. Please read the checklist guidelines carefully for information on how to answer these questions. For each question, change the default \answerTODO{} to \answerYes{}, \answerNo{}, or \answerNA{}. You are strongly encouraged to include a {\bf justification to your answer}, either by referencing the appropriate section of your paper or providing a brief inline description. For example: \begin{itemize} \item Did you include the license to the code and datasets? \answerYes{See Section~\ref{gen_inst}.} \item Did you include the license to the code and datasets? \answerNo{The code and the data are proprietary.} \item Did you include the license to the code and datasets? \answerNA{} \end{itemize} Please do not modify the questions and only use the provided macros for your answers. Note that the Checklist section does not count towards the page limit. In your paper, please delete this instructions block and only keep the Checklist section heading above along with the questions/answers below. \begin{enumerate} \item For all authors... \begin{enumerate} \item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? \answerTODO{} \item Did you describe the limitations of your work? \answerTODO{} \item Did you discuss any potential negative societal impacts of your work? \answerTODO{} \item Have you read the ethics review guidelines and ensured that your paper conforms to them? \answerTODO{} \end{enumerate} \item If you are including theoretical results... \begin{enumerate} \item Did you state the full set of assumptions of all theoretical results? \answerTODO{} \item Did you include complete proofs of all theoretical results? \answerTODO{} \end{enumerate} \item If you ran experiments... \begin{enumerate} \item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? \answerTODO{} \item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? \answerTODO{} \item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? \answerTODO{} \item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? \answerTODO{} \end{enumerate} \item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... \begin{enumerate} \item If your work uses existing assets, did you cite the creators? \answerTODO{} \item Did you mention the license of the assets? \answerTODO{} \item Did you include any new assets either in the supplemental material or as a URL? \answerTODO{} \item Did you discuss whether and how consent was obtained from people whose data you're using/curating? \answerTODO{} \item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? \answerTODO{} \end{enumerate} \item If you used crowdsourcing or conducted research with human subjects... \begin{enumerate} \item Did you include the full text of instructions given to participants and screenshots, if applicable? \answerTODO{} \item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? \answerTODO{} \item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? \answerTODO{} \end{enumerate} \end{enumerate} \appendix \section{Appendix} \subsection{Bound Derivation} \label{Appendix:ELBO} The variational bound for our model can be derived using importance weighting and Jensen's inequality. We use $\tilde{c} \coloneqq c^{t-G_t+1:t}$ for simplicity: \begin{align*} & \log p(\tau^X_{t-G_t:t} | a_{t-G_t:t-1}, s_{t-G_t}, G_t) \\ &= \log \int p(\tau_{t-G_t:t}^X, \tilde{c}|a_{t-G_t:t-1}, s_{t-G_t}, G_t) \frac{q_\phi(\tilde{c}|G_t, \tau_{t-G_t:t})}{q_\phi(\tilde{c}|G_t, \tau_{t-G_t:t})} d \tilde{c} \\ &= \log \mathbb{E}_{q_\phi} \left[ \frac{p(\tau_{t-G_t:t}^X, \tilde{c}|a_{t-G_t:t-1}, s_{t-G_t}, G_t)}{q_\phi(\tilde{c}|G_t, \tau_{t-G_t:t})} \right] \\ &= \log \mathbb{E}_{q_\phi} \left[ \frac{p(\tau_{t-G_t:t}^X| \tilde{c},a_{t-G_t:t-1}, s_{t-G_t}, G_t)p(\tilde{c}|G_t)}{q_\phi(\tilde{c}|G_t, \tau_{t-G_t:t})} \right] \\ &\geq \mathbb{E}_{q_\phi} \left[ \log p(\tau_{t-G_t:t}^X| \tilde{c},a_{t-G_t:t-1}, s_{t-G_t}, G_t) \right] - \mathbf{D}_{KL}(q_\phi(\tilde{c}|G_t, \tau_{t-G_t:t}) \| p(\tilde{c}|G_t)) \\ &= \sum_{i=t-G_t+1}^{t} \mathbb{E}_{q_\phi} \left[ \log p(x_i|G_t,\tilde{c}) + \log p(s_i, r_{i-1}|G_t, \tilde{c}, s_{i-1}, a_{i-1}) \right] - \mathbf{D}_{KL} \left( q_\phi \| p(\tilde{c}|G_t) \right) \end{align*} \subsection{Algorithm Framework} In this Section, we provide the detailed training and evaluation algorithms of SeCBAD. The training algorithm is detailed in Algorithm \ref{algo-training}. The evaluation algorithm is detailed in Algorithm \ref{algo-eval}. \begin{algorithm}[h] \caption{SeCBAD Training Algorithm} \label{algo-training} \begin{algorithmic} \State {\bfseries Input:} buffer $\mathcal{B}$, imagination horizon $H$, interacting step $T$, batch size $B$, batch length $L$, number of trajetories $N$. \State Initialize buffer $\mathcal{B}$ with $S$ random seed episodes. \While{ not converged } \Comment{{\color{cyan}\emph{Parameter Optimization}}} \For {$c=1,\dots,C$} \State Draw $B$ data sequences $\{(s_t,a_t,r_t,x_t)\}_{t=k}^{k+L}$ from $\mathcal{B}$ \State Calculate $p(G_t|\tau_{1:t})$ using Equation (\ref{Eq:postG_norm}). \State Sample $g \sim p(G_t|\tau_{1:t})$. \State Infer belief state $q_\phi(c^{t-G_t+1:t}|G_t=g, \tau_{t-G_t:t})$. \State Sample $k \in \mathbb{N}$ from $[t-g+1, t]$. \Comment{{\color{cyan}\emph{Calculate the ELBO: sample 1 reconstruction step}}} \State Predict observable context: $p_\theta (x_k|c, G_t=g)$ \State Predict reward: $p_\theta(r_{k-1}|c, s_{k-1}, a_{k-1}, G_t=g)$, and state $p_\theta(s_{k}|c, s_{k-1}, a_{k-1}, G_t=g)$. \State Update $\phi,\theta$ using Equation (\ref{Eq:elbo}). \State Update $\psi$ using Equation (\ref{Eq:RLloss}). \EndFor \State Reset environment and get $s_1, x_1$. \For{$t=1,\dots,T$} \Comment{{\color{cyan}\emph{Data Collection}}} \State Calculate $p(G_t|\tau_{1:t})$ using Equation (\ref{Eq:postG_norm}). \State Calculate belief $b_t(c)$ using Equation (\ref{Eq:q-marginalize}). \State Compute $a_t \sim \pi_\psi(a_t|s_t, b_t)$ with action model. \State Add exploration noise to action. \State Execute $a_t$ and get $x_{t+1}, s_{t+1}, r_t$. \EndFor \State Add experience to buffer $\mathcal{B} = \mathcal{B} \cup \{(s_t,a_t,r_t, x_t)_{t=1}^T\}$ \EndWhile \end{algorithmic} \end{algorithm} \begin{algorithm}[h] \caption{SeCBAD Evaluation Algorithm} \label{algo-eval} \begin{algorithmic} \State {\bfseries Input:} interacting step $T$, encoder $q_\phi$, decoder $p_\theta$, policy $\pi_\psi$. \State Reset environment and get $s_1$. \For{$t=1,\dots,T$} \State Calculate $p(G_t|\tau_{1:t})$ using Equation (\ref{Eq:postG_norm}). \State Choose $g = \argmax p(G_t|\tau_{1:t})$. \State Compute $b_t(c) = q_\phi(c^{t-G_t+1:t}|G_t=g, \tau_{t-G_t:t})$. \State Compute $a_t \sim \pi_\psi(a_t|s_t, b_t)$ with action model. \State Execute $a_t$ and get $x_{t+1}, s_{t+1}, r_t$. \EndFor \end{algorithmic} \end{algorithm} \subsection{Include $x$ in observation probability} \label{Appendix:obsx} In Section \ref{Sec:3recursive_inference}, we recursively estimate the posterior of current segment length $p(G_t|\tau_{1:t})$, which conditions the segment length on the observation $\tau_{1:t}$ as conditions to keep consistent with the case in evaluation. However, During training, we can use more information to help us get a more accurate estimation. For instance, we can include the observable contexts $x$ into $\tau_{1:t}$, like $\tau_{1:t}^X$ and update the corresponding equations as follows: $$ p(G_t = i, \tau_{1:t}^X) = \sum_{k=1}^{t-1} p(G_{t-1} = k, \tau_{1:t-1}^X) \cdot p(G_t = i| G_{t-1} = k) \cdot p(s_t, x_t, a_{t-1}, r_{t-1}|\tau_{t-i:t-1}^X) $$ The difference mainly lies in the observation probability term: \begin{align*} & p(s_t, x_t, a_{t-1}, r_{t-1} | \tau_{t-i:t-1}^X) \\ &= K \mathbb{E}_{p} \left[ p(s_t, r_{t-1}|s_{t-1},a_{t-1},c^{t-i+1:t}) p(x_t|,c^{t-i+1:t}) \right] \end{align*} The observation probability term now estimates the data likelihood not only for the next state-reward pair, but also for the next observable context $x_t$. There are many other choices here for how to choose the information during training. It is also possible to only incorporate the observable contexts $x$ instead of using $\tau_{1:t}^X$ for simplicity. We can still use the online inference method like we stated above, or we can use some more simple and heurstic methods to derive the posterior. Since we can directly access $x$ from the simulator, it is possible for us to calculate the posterior in advance, which can help to save a lot of computation during training time. \subsection{Discussion with Related Works} \label{appendix:markov-context} Recently, some works \citep{ren2022reinforcement, feng2022factored} assume intra-episode context changes and model context $c_t$ at each time step, more specifically with Markovian transition on latent contexts. Since Markovian assumptions are made in latent context space, these methods are theoretically able to capture the non-Markovian pattern in observable context space. However, as discussed in our paper, one of the key challenges in deploying RL to real-world applications is to adapt to variations of unknown environment contexts that stay stable for a stochastic period. In contrast to general non-stationary environments, our problem setting assumes piece-wise stable context as special problem structure, which can be further exploited to improve performance against methods for general non-stationary environments. By contrast, we propose to jointly infer the segment structure as well as the segment latent context. By doing so, we can avoid solving non-stationary environments in general case. In our paper, we implement a one-step VariBAD to represent the methods that \li{model context $c_t$ at each time step}. For each time step, we use the whole trajectory as input just like VariBAD. However, instead of decoding the whole trajectory, we only make the agent to decode the current time step. \subsection{Bayesian Update Rule} \label{app:bayes_update} In Section \ref{Sec:4-1gridworld}, we use the Bayesian belief update rule to calculate the belief distribution. Let $K$ denotes the number of grids, then $b(\theta) \in \mathbb{R}^K$ is a simplex which refers to the probability location of the goal state and therefore a categorical distribution. As for the belief update rule, we have $$ b'(\theta) = b(\theta|s,a,s',r) \propto b(\theta) p(r | s, a, \theta) $$ In above equation, we omit the transition term since the transition keeps stationary across the episode. As for the reward term $p(r|s,a,\theta)$, it can be derived through the definition. For the $k$-th grid, suppose the observed state is the $k'$-th grid. Then, if $k = k'$, we can get the reward term as $p(r=1|\theta = k, s=k', a) = p_1$ and $p(r=0|\theta = k, s=k', a) = 1 - p_1$. If $k \neq k'$, we can get the reward term as $p(r=1|\theta = k, s=k', a) = p_0$ and $p(r=0|\theta = k, s=k', a) = 1 - p_0$. In this way, we can obtain the accurate belief without approximation. \subsection{Hyper parameters and Implementation Details} \label{Appendix:implementation} \paragraph{Network Architecture} For Half-Cheetah environments, we use a GRU\citep{cho2014learning} with 128 units as the dynamics model of the encoder $q_\phi$. For each time step, $q_\phi$ receives an encoded state, action and reward as input. The encoded state, action and reward are the output three single layer fully connected networks of respective size $[16, 32, 16]$ with ReLU as activation function. We assume the latent distribution are $5$-dimensional Gaussians with predicted mean and standard deviation. As for the transition model $p_\theta(s_{t+1}|s_t, a_t, c)$, reward model $p_\theta(r_t|s_t, a_t, c)$ and observable context model $p_\theta(x_t|c)$, the output of all the decoders are Gaussian distributions with mean parameterized by fully connected network (with hidden size $[64, 32]$ and ReLU activation) and fixed identity covariance. As for the Ant-Cheetah environments, we use 10-dimensional Gaussians as the latent distribution. The other settings are kept the same as in Half-Cheetah environments. As for the policy training part, we adopt PPO \citep{schulman2017proximal} to learn the policy $\pi_{\psi}(a_t|s_t,b_t(c))$. The actor and critic are parameterized by fully connected network of size $[128, 128]$ with Tanh as activation function. \paragraph{Training Details} The inputs of both actor and critic is $s_t$ and $b_t$. To parameterize the distribution $b_t$, we use its mean and standard deviation. For simplicity, we only incorporate one possible segment length $g$ into consideration and this approximation will not cause obvious performance drop according to our test. During training, we randomly sample $g$ from the posterior, while during evaluation, we use the most possible $g$. Then, we calculate the corresponding belief $b_t$ using $g$, and use the mean and standard deviation as the inputs. During training, we use the posterior $p(G_t|x_{1:t})$ which conditions only on the observable contexts to simplify the computation. During evaluation, we then use the posterior $p(G_t|x_{1:t})$ like we stated in Section \ref{Sec:3recursive_inference} since we can no longer access the training only information $x$. \paragraph{Hyper parameters} We train PPO with Adam optimizer with learning rate 7e-4, max gradient norm 0.5, clip parameters 0.1, value loss coefficient 0.5, and entropy coefficient 0.01. {{We use different training schedule between the VAE part (encoder \& decoder) and the policy part. We use two Adam optimizers with different learning rates. The VAE optimizer uses a learning rate of 1e-3, and the policy optimizer uses a learning rate of 7e-4.}} For the full details of hyper-parameter settings, please refer to the code repository that we will publish soon. \paragraph{Environment Details} { We give detailed descriptions of the environments we used. } { \textbf{Ant Direction} environment is built upon the well-known Ant Mujoco environment. For each environment step, we set a target direction in the 2D horizontal plane. The reward function is given as $v_f - 0.2 v_v$, where $v_f$ denotes the agent velocity along the target direction, $v_v$ denotes the agent velocity that is vertical to the target direction and is always non-negative. Such reward function pushes the agent moves faster along the target direction while penalizing the agent for the velocity component that is vertical to the target direction. In experiment the target direction is chosen uniformly from $[0,2\pi]$. } { For \textbf{Ant Velocity}, we set a target velocity $\boldsymbol{v_t} = (v_{t,x}, v_{t,y})$ in the 2D horizontal plane. Denote the agent velocity as $\boldsymbol{v_a}=(v_{a,x},v_{a,y})$. We project the agent velocity $\boldsymbol{v_a}$ to $\boldsymbol{v_t}$ to get the forward velocity component $v_f=\boldsymbol{v_t}\cdot \boldsymbol{v_a} / \lVert\boldsymbol{v_t}\rVert_2$. The velocity component perpendicular to the target velocity is given by $v_v=\sqrt{\lVert \boldsymbol{v_a}\rVert_2^2-v_f^2}$. If forward velocity $v_f<\lVert v_t\rVert_2$, we compute the reward as $v_f-0.3 v_v$, meaning that we expect the agent moves faster along the direction of task velocity, but not along the direction vertical to task velocity. Otherwise when $v_f\ge\lVert v_t\rVert_2$, we compute reward as $-v_f + 2\lVert\boldsymbol{v_t}\rVert_2-0.3v_v$. Observe that if ignore the term $-0.3v_v$ in the reward function, we assign the largest reward when $v_f = \lVert \boldsymbol{v_t}\rVert_2$, and reward decreases as $v_f$ deviating from $\lVert \boldsymbol{v_t}\rVert_2$, thus promoting the agent follows the target velocity. In experiment, $v_{t_x}$ and $v_{t_y}$ are independently chosen from $\text{Uniform}(-3,3)$ for each segment. } { \textbf{Cheetah Direction}, \textbf{Cheetah Goal} and \textbf{Cheetah Velocity} are modified from Half Cheetah Mujoco environment. The agent can only move along $x$-axis. In the original version of Half Cheetah, the reward is given by the agent velocity along the positive $x$-axis. Denote agent velocity as $v_a$ and agent location as $x_a$. In Cheetah Direction, we set the target direction to positive $x$-axis or negative $x$-axis, and the reward is $v_a$ if the target direction is positive $x$-axis or $-v_a$ otherwise. We choose the positive and negative directions with equal probability. In Cheetah Goal, we set a target location $x_t$ on the $x$-axis in each environment step, and the reward function is $-|x_a-x_t|$, with $x_t$ sampled from $\text{Uniform}(-5,5)$. The agent location $x_a$ is also included into the observation space, which is not for the original Half Cheetah, Cheetah Dir and Cheetah Velocity. For Cheetah Velocity, we set a target velocity $v_t$ on the $x$-axis, the reward is $-|v_t-v_a|$, and $v_t$ is chosen from $\text{Uniform}(-5,5)$. } \subsection{Extended Experiment Results} \label{Appendix:expMujoco} \iffalse \subsubsection{Locomotion Control Tasks with Varing Contexts} \textcolor{red}{Update this section?} \begin{figure} \caption{Extended results on Ant Direction.} \label{fig:app-mujoco-4-2} \end{figure} In this section, we provide extended results on experiments presented in Section \ref{Sec:4-2mujoco}. We provide more baselines on Ant Direction, and the results are shown in Figure \ref{fig:app-mujoco-4-2}. For 1-step VariBAD, we only use the decoder to decode the current time step as described in Appendix \ref{appendix:markov-context}. For VariBAD with X, we provide the observable context $x$ to VariBAD and make VariBAD to decode $x$ to align with SeCBAD. Our experiments empirically show that, when compared with VariBAD \citep{zintgraf2020varibad} and 1-step VariBAD, SeCBAD achieves superior performance and outperforms both baselines in most of the experiments. To gain a better understanding into it, we conduct a case study in the next section. \fi \subsubsection{A Case Study on Ant Direction} \label{app:casestudy} \begin{figure} \caption{Behavior and the learned latent contexts of different algorithms on Ant Direction} \label{fig:app-behavior} \end{figure} {{ To better understand the outperformance of SeCBAD, we show the behavior of SeCBAD and other baselines and the learned latent contexts in Figure \ref{fig:app-behavior}. }} {The first row shows the agent's location through time. It can be seen that the agent of PPO RNN, VariBAD, and Factor MDP choose to stay around a fixed position shortly after starting, while SeCBAD can successfully guide the agent moving around.} {In the second row, we show the agent's direction along with the target (task) direction. The orange curve shows the time-varying task direction, and the blue curve shows the agent's actual direction. Since the goal is to maximize the velocity along the task direction, we plot the velocity along the task direction in the third row. It can be concluded that SeCBAD is able to detect and adapt to the variations rapidly, and the agent is indeed moving along the task direction, while other methods fail to detect and adapt to the non-stationary task in time. After detecting the teak direction change, SeCBAD's velocity drops and then grows up as it tries to change the direction, while the velocities of other baselines are close to zero and the learned behaviors are undesirable.} {We provide some insights into the results. We plot the mean of the latent in the fourth row of Figure \ref{fig:app-behavior}. VariBAD \citep{zintgraf2020varibad} tries to learn a single context for the whole episode and train the decoder by reconstructing the whole trajectory. This may make the agent learn the same contexts across the episode. Therefore the learned latent mean is smooth and thus uninformative to the changing environment. Please refer to Appendix \ref{ablation:numberofsegment} for detailed analysis. FANS-RL \citep{feng2022factored} assumes that the contexts evolve in a Markovian pattern. PPO-RNN \citep{hausknecht2015deep} treats the problem in a general form as a POMDP and leverages little information about the context pattern. It is hard for these two methods to learn a meaning for contexts, especially in complex environments like Ant Direction. However, as for SeCBAD, thanks to the joint inference framework and segment decoder, the learned latent mean can change abruptly when the inferred segment structure changes, which informs the policy that the contexts have changed. The behavior in Figure \ref{fig:app-behavior} shows that the agent can learn to adapt to the variations rapidly with this accurate enough belief. We provide more case studies on other tasks in Section \ref{Sec:4-2mujoco} in Appendix \ref{ablation:casestudy2}. } \subsubsection{Prior Analysis} \label{app:prior} \begin{figure} \caption{Analysis on the choice of prior} \label{fig:app-prior} \end{figure} In this Section, we provide the analysis on the choice of prior $p(G_t = k | G_{t-1}=i)$ on Ant Direction. The results are provided in Figure \ref{fig:app-prior}. We choose two different prior functions. For prior $1$, we let $p(G_t = 1 | G_{t-1}=i) = \frac{1}{80}$ for any $i$ and $p(G_t = i+1 | G_{t-1}=i) = \frac{79}{80}$. For prior $2$, we use a more accurate prior by rolling out the generative process several times and approximate the $p(G_t=k|G_{t-1}=i)$ from the simulated data. The performance using these two priors are shown above. \begin{figure} \caption{The segment length posterior $p(G_t|\tau_{1:t} \label{fig:app-prior-gt} \end{figure} It can be concluded that, two prior funciton have little performance gap. To gain more insights, we draw the probablity of $p(G_t|\tau_{1:t})$ in Figure \ref{fig:app-prior-gt}. The results show that the inferred posterior is very deterministic, and the one with approximate prior is a little bit more blurry, but still deterministic enough. This result proves that, the observation term dominates the probability term and our method is robust with respect to the choice of prior. \subsubsection{Ablation study on the number of segments} \label{ablation:numberofsegment} \begin{figure} \caption{Ablation study on the number of segments. We keep the segment length fixed and adjust the episode length $L=\{100, 200, 300, 400, 500\} \label{fig:abl-trajlen} \end{figure} {{ In this section, we study how the number of segments affects the performance of SeCBAD and VariBAD \citep{zintgraf2020varibad}. As analyzed in Section \ref{Sec:4-2mujoco}, we hypothesize that methods assuming that contexts stay the same within an episode like \citep{zintgraf2020varibad} may end up learning an averaged latent contexts. We argue that this is not related to the length of the segment but is due to the number of segments in an episode. During reconstruction, \citep{zintgraf2020varibad} uses the learned latent contexts at timestep $t$ to decode the transitions and rewards in the whole trajectory. Suppose there are $n$ segments in the episode, then the probability of reconstructing the correct transition and reward (i.e., those in the same segment) is only $\frac{1}{n}$. The noisy even erroneous training signal may exacerbate as $n$ grows and end up in learning averaged latent contexts for the whole episode. }} {{ To look further into this hypothesis, we conduct an ablation study on the number of segments. We choose the Ant Direction environment and change the max episode length $L \in \{100, 200, 300, 400, 500\}$ while letting the average segment length be fixed. Then the expected number of segments $n$ may vary. We plot the averaged reward per step instead of the accumulated rewards to cancel out the effect of $L$ and study the effects on $n$. }} {{As illustrated in Figure \ref{fig:abl-trajlen}, for SeCBAD, $n$ has little effect on performance. However, for VariBAD, the performance deteriorates as the grows. The results fit the above analysis and show the significance of joint inference on the segment structure and context belief. }} \subsubsection{Ablation study on using $p(G_t|\tau_{1:t})$} \label{ablation:pgt} \begin{figure} \caption{Ablation study on how to use the inferred $p(G_t|\tau_{1:t} \label{fig:abl-pgt} \end{figure} {{ During the implementation, we may face a choice on how to use the inferred segment structure $p(G_t^*|\tau_{1:t})$. Since there are $t$ possible choices for $p(G_t|\tau_{1:t})$ in timestep $t$, it's hard to directly use the full distribution. One option is to use the $G_t$ with the highest probability. Another option is to sample $G_t \sim p$ to approximate the full distribution. In this section, we perform an ablation study on Ant Direction to show the performance of the two options. We keep other parameters fixed and only modify the way using $G_t^*$.}} As shown in Figure \ref{fig:abl-pgt}, {{ the performances of the two options are very close. Therefore, for stability, we choose the $G_t^*$ with the highest probability in experiments. }} \subsubsection{Ablation study on context robustness} \label{ablation:robustness} \begin{figure} \caption{Ablation study on the robustness of SeCBAD to the noise within each segment. We change the standard deviation $\sigma \in \{0.01, 0.05, 0.1, 0.2\} \label{fig:abl-robust} \end{figure} {{ In many real-world applications, the contexts are generally piecewise stable. However, within each segment, the contexts might be slightly noisy. Therefore, it is important to test the robustness of the proposed algorithm against the noise within each segment. In this section, we conduct an ablation study on SeCBAD on Ant Direction by adjusting the standard deviation of the noises. }} {{ For each segment, we uniformly sample the mean of the contexts. Then, for each timestep within each segment, we assume the contexts follow a Gaussian distribution. We adjust the standard deviation of the Gaussian distribution to model different levels of robustness by setting $\sigma = \{0.01, 0.05, 0.1, 0.2\}$ and keep other parameters fixed. As illustrated in Figure \ref{fig:abl-robust}, SeCBAD is able to handle noises within each segment since the performances of different $\sigma$ are very close. This further exhibit the ability of SeCBAD to solve many real-world applications. }} \subsubsection{More visualizations} \label{ablation:casestudy2} {In this section, we provide case studies of SeCBAD and other baselines for the rest environments in Section \ref{Sec:4-2mujoco}. For the case study on Ant Direction, we refer to \ref{app:casestudy}. Given each environment, we visualize the model behavior after training for the same number of frames. We use the same random seed for SeCBAD and other three baselines, thus these algorithms are tested on environments with the same trajectory context.} \begin{figure} \caption{A case study on Ant Velocity.} \end{figure} \begin{figure} \caption{A case study on Half Cheetah Velocity.} \end{figure} \begin{figure} \caption{A case study on Half Cheetah Direction.} \end{figure} \begin{figure} \caption{A case study on Half Cheetah Velocity.} \end{figure} \subsection{Bandwith Control Tasks for Real Time Communication} \label{app:rtcexp} {{ To further illustrate the proposed LS-MDP setting can boost the deployment of RL in many real-world applications, we test SeCBAD on a real-world bandwidth control task for real-time communications (RTC) \citep{alphartc} in this section.}} {{ RTC applications, e.g., online audio/video calls and conferences, have been greatly increasing since the global pandemic. The most critical goal in RTC is to provide high Quality of Experience (QoE) for users, including high audio/video bitrate, low end-to-end latency, no video freeze, and few bitrate switches. To achieve this, a bandwidth control module is needed, i.e., the RTC sender needs to decide the bitrate of outstreaming audio/video based on the network status towards the receiver. For example, it would decrease the bitrate when observing a high end-to-end delay, otherwise, the bitrate would be increased.}} {{ However, the ground truth network conditions are always changing in multiple items, such as available capacity, Round-Trip Time (RTT), and so on. The simple rule \textit{"decrease the bitrate when observed a high delay"} becomes unreasonable when the ground truth RTT is increased. This reveals the system should rapidly detect and react to the network condition otherwise the users' QoE may suffer. }} { To test SeCBAD for the bandwidth control problem in RTC, we use AlphaRTC \citep{alphartc} as our simulator environment. In our reinforcement learning formulation, we use a 7-tuple of current network statistics that is visible to the agent as states $s_t$, consisting of sending rate, short-term and long-term receiving rate, loss, and delay. The action $a_t$ is the estimated bandwidth. The reward function is formulated as $2R / C - (D - RTT/2) - L - 1 $, where $R$ is the receiving rate in the time step, $D$ is the average delay in the time step, $L$ is the packet loss rate, $C$ and $RTT$ are the ground truth capacity and average RTT of the network. The latent context $c_t$ here refers to the fluctuated network condition, in this section, we consider $x_t$ as the ground truth bandwidth capacity $C$, and the RTT. The agents are trained in AlphaRTC \citep{alphartc} that simulates real-time communication processes by specifying $C$ and $RTT$. All the network statistics are normalized to the $(0, 1)$ range.} \begin{figure} \caption{Experiment results on bandwidth control for RTC.} \label{fig:exp-rtc} \end{figure} { In this experiment, we compare our method with VariBAD \citep{zintgraf2020varibad}, FANS-RL \citep{feng2022factored}, vanilla PPO \citep{schulman2017ppo} and PPO-RNN \citep{hausknecht2015deep}. We also incorporate oracle PPO scores by incorporating the unobservable contexts into the observable states}. {{All the methods are trained for 10 million steps and the shaded area is across 3 random seeds.}} {As illustrated in Figure \ref{fig:exp-rtc}, SeCBAD achieves better performance than other baselines and is very close to the oracle PPO baseline score. The performances of VariBAD \citep{zintgraf2020varibad} and FANS-RL \citep{feng2022factored} are better than PPO-RNN \citep{hausknecht2015deep}, but SeCBAD outperforms both of these methods. The results suggest that SeCBAD is able to detect and adapt to the varying contexts more rapidly, which allows the policy to precisely control. This indicates the importance of the joint inference structure. } {{ Figure \ref{fig:case-rtc} shows the detailed behavior of different methods under one representative case. }} {{ The first row shows the agent's action (in blue) against the true available bandwidth (in orange). The second row shows the latency observed by the agent, which partially represents the mixed effect of another context, RTT. It can be observed that for SeCBAD, the agent is able to detect the change in bandwidth and adapt to the changes in time and produce a stable policy within each segment. The drops in actions are timed to coincide with the observed increasing latency. After realizing that the actual capacity is not changed, the agent can then increase the actions back to the optimal value. However, for other baselines, the actions oscillate a lot, especially for FANS-RL \citep{feng2022factored} which is not practical since this may lead to frequent bitrate switches. VariBAD \citep{zintgraf2020varibad} and PPO-RNN \citep{hausknecht2015deep} learn a quite smooth and conservative policy that leaves a large margin between the action and the actual capacity, which may lead to a waste in the network capacity. }} \begin{figure} \caption{Case study on bandwidth control for RTC.} \label{fig:case-rtc} \end{figure} \end{document}
\begin{document} \title{Why geometric numerical integration?} \author{A. Iserles \& G.R.W. Quispel} \maketitle \thispagestyle{empty} \section{The purpose of GNI} Geometric numerical integration (GNI) emerged as a major thread in numerical mathematics some 25 years ago. Although it has had antecedents, in particular the concerted effort of the late Feng Kang and his group in Beijing to design structure-preserving methods, the importance of GNI has been recognised and its scope delineated only in the 1990s. But we are racing ahead of ourselves. At the beginning, like always in mathematics, there is the definition and the rationale of GNI. The rationale is that all-too-often mathematicians concerned with differential equations split into three groups that have little in common. Firstly, there are the applied mathematicians, the model builders, who formulate differential equations to describe physical reality. Secondly, there are those pure mathematicians investigating differential equations and unravelling their qualitative features. Finally, the numerical analysts who flesh out the numbers and the graphics on the bones of mathematical formulation. Such groups tended to operate in mostly separate spheres and, in particular, this has been true with regards to computation. Discretisation methods were designed (with huge creativity and insight) to produce rapidly and robustly numerical solutions that can be relied to carry overall small error. Yet, such methods have often carried no guarantee whatsoever to respect qualitative features of the underlying system, the very same features that had been obtained with such effort by pure and applied mathematicians. Qualitative features come basically in two flavours, the {\em dynamical\/} and the {\em geometric.\/} Dynamical features -- sensitivity with respect to initial conditions and other parameters, as well as the asymptotic behaviour -- have been recognised as important by numerical analysts for a long time, not least because they tend to impinge directly on accuracy. Thus, sensitivity with respect to initial conditions and perturbations comes under `conditioning' and the recovery of correct asymptotics under `stability', both subject to many decades of successful enquiry. Geometric attributes are invariants, constants of the flow. They are often formulated in the language of differential geometry (hence the name!) and mostly come in three varieties: {\em conservation laws,\/} e.g.\ Hamiltonian energy or angular momentum, which geometrically mean that the solution, rather than evolving in some large space $\BB{R}^d$, is restricted to a lower-dimensional manifold $\mathcal{M}$, {\em Lie point symmetries,\/} e.g.\ scaling invariance, which restrict the solution to the tangent bundle of some manifold, and quantities like {\em symplecticity\/} and {\em volume,\/} whose conservation corresponds to an evolution on the cotangent bundle of a manifold. {\em The design and implementation of numerical methods that respect geometric invariants is the business of GNI.\/} Since its emergence, GNI has become the new paradigm in numerical solution of ODEs, while making significant inroads into numerical PDEs. As often, yesterday's revolutionaries became the new establishment. This is an excellent moment to pause and take stock. Have all the major challenges been achieved, all peaks scaled, leaving just a tidying-up operation? Is there still any point to GNI as a separate activity or should it be considered as a victim of its own success and its practitioners depart to fields anew -- including new areas of activity that have been fostered or enabled by GNI? These are difficult questions and we claim no special authority to answer them in an emphatic fashion. Yet, these are questions which, we believe, must be addressed. This short article is an attempt to foster a discussion. We commence with a brief survey of the main themes of GNI {\em circa\/} 2015. This is followed by a review of recent and ongoing developments, as well as of some new research directions that have emerged from GNI but have acquired a life of their own. \section{The story so far} \subsection{Symplectic integration} The early story of GNI is mostly the story of symplectic methods. A Hamiltonian system \begin{equation} \label{Hamiltonian} \dot{\MM{p}}=-\frac{\partial H(\MM{p},\MM{q})}{\partial\MM{q}},\qquad \dot{\MM{q}}=\frac{\partial H(\MM{p},\MM{q})}{\partial \MM{p}}, \end{equation} where $H:\BB{R}^{2d}\rightarrow\BB{R}$ is a {\em Hamiltonian energy,\/} plays a fundamental role in mechanics and is known to possess a long list of structural invariants, e.g.\ the conservation of the Hamiltonian energy. Yet, arguably its most important feature is the conservation of the {\em symplectic form\/} $\sum_{k=1}^d \D\MM{p}_k\wedge\D\MM{q}_k$ because symplecticity is equivalent to Hamiltonicity -- in other words, every solution of a Hamiltonian system is a symplectic flow and every symplectic flow is Hamiltonian with respect to an appropriate Hamiltonian energy \cite{hairer06gni}. The solution of Hamiltonian problems using symplectic methods has a long history, beautifully reviewed in \cite{hairer03gni}, but modern efforts can be traced to the work of Feng and his collaborators at the Chinese Academy of Sciences, who have used generating-function methods to solve Hamiltonian systems \cite{feng89ccd}. And then, virtually simultaneously, \citeasnoun{lasagni88crk}, \citeasnoun{sanzserna88rks} and \citeasnoun{suris88pss} proved that certain Runge--Kutta methods, including the well-known Gauss--Legendre methods, preserve symplecticity and they presented an easy criterion for the symplecticity of Runge--Kutta methods. GNI came of age! Subsequent research into symplectic Runge--Kutta methods had branched out into a number of directions, each with its own important ramifications outside the Hamiltonian world: \begin{itemize} \item {\em Backward error analysis.\/} The idea of backward error analysis (\reflectbox{BEA}) can be traced to Wilkinson's research into linear algebra algorithms in the 1950ties. Instead of asking ``what is the numerical error for our problem", Wilkinson asked ``which nearby problem is solved {\em exactly\/} by our method?". The difference between the original and the nearby problem can tell us a great deal about the nature of the error in a numerical algorithm. A generalisation of \reflectbox{BEA} to the field of differential equations is fraught with difficulties. Perhaps the first successful attempt to analyse Hamiltonian ODEs in this setting was by \citeasnoun{neishtadt84sms} and it was followed by many, too numerous to list: an excellent exposition (like for many things GNI) is the monograph of \citeasnoun{hairer06gni}. The main technical tool is the B-series, an expansion of composite functions in terms of forests of rooted trees, originally pioneered by \citeasnoun{butcher63csr}. (We mention in passing that the Hopf algebra structure of this {\em Butcher group\/} has been recently exploited by mathematical physicists to understand the renormalisation group \cite{connes99lqt} -- as the authors write, ``We regard Butcher's work on the classification of numerical integration methods as an impressive example that concrete problem-oriented work can lead to far-reaching conceptual results''.) It is possible to prove that, subject to very generous conditions, the solution of a Hamiltonian problem by a symplectic method, implemented with constant step size, is exponentially near to the {\em exact\/} solution of a nearby Hamiltonian problem for an exponentially long time. This leads to considerably greater numerical precision, as well as to the conservation on average (in a strict ergodic sense) of Hamiltonian energy. B-series fall short in a highly oscillatory and multiscale setting, encountered frequently in practical Hamiltonian systems. The alternative in the \reflectbox{BEA} context is an expansion into {\em modulated Fourier series\/} \cite{hairer00lte}. \item {\em Composition and splitting.} Many Hamiltonians of interest can be partitioned into a sum of kinetic and potential energy, $H(\MM{p},\MM{q})=\MM{p}^\top M\MM{p}+V(\MM{q})$. It is often useful to take advantage of this in the design of symplectic methods. While conventional symplectic Runge--Kutta methods are implicit, hence expensive, {\em partitioned Runge--Kutta methods,\/} advancing separately in the `direction' of kinetic and potential energy, can be explicit and are in general much cheaper. While perhaps the most important method, the St\"ormer--Verlet scheme \cite{hairer03gni}, has been known for many years, modern theory has led to an entire menagerie of composite and partitioned methods \cite{sanzserna94nhp}. Splitting methods\footnote{Occasionally known in the PDE literature as {\em alternate direction methods.\/}} have been used in the numerical solution of PDEs since 1950s. Thus, given the equation $u_t=\mathcal{L}_1(u)+\mathcal{L}_2(u)$, where the $\mathcal{L}_k$s are (perhaps nonlinear) operators, the idea is to approximate the solution in the form \begin{equation} \label{splitting} u(t+h)\approx {\mathrm e}^{\alpha_1 h\mathcal{L}_1} {\mathrm e}^{\beta_1 h\mathcal{L}_2} {\mathrm e}^{\alpha_2 h\mathcal{L}_1} \cdots {\mathrm e}^{\alpha_s h\mathcal{L}_1} {\mathrm e}^{\beta_s\mathcal{L}_2}u(t), \end{equation} where $v(t_0+h)=:{\mathrm e}^{h \mathcal{L}_1}v(t_0)$ and $w(t_0+h)=:{\mathrm e}^{h \mathcal{L}_2}w(t_0)$ are, formally, the solutions of $\dot{v}=\mathcal{L}_1(v)$ and $\dot{w}=\mathcal{L}_2(w)$ respectively, with suitable boundary conditions. The underlying assumption is that the solutions of the latter two equations are either available explicitly or are easy to approximate, while the original equation is more difficult. A pride of place belongs to {\em palindromic compositions\/} of the form \begin{equation} \label{palindromic} {\mathrm e}^{\alpha_1 h\mathcal{L}_1} {\mathrm e}^{\beta_1 h\mathcal{L}_2} {\mathrm e}^{\alpha_2 h\mathcal{L}_1} \cdots {\mathrm e}^{\alpha_q h\mathcal{L}_1}{\mathrm e}^{\beta_q h\mathcal{L}_2}{\mathrm e}^{\alpha_q h\mathcal{L}_1} \cdots {\mathrm e}^{\alpha_2 h\mathcal{L}_1} {\mathrm e}^{\beta_1 h\mathcal{L}_2} {\mathrm e}^{\alpha_1 h\mathcal{L}_1}, \end{equation} invariant with respect to a reversal of the terms. They constitute a {\em time-symmetric map,\/} and this has a number of auspicious consequences. Firstly, they are always of an even order. Secondly -- and this is crucial in the GNI context -- they respect both structural invariants whose integrators are closed under composition, i.e.\ form a group (for example integrators preserving volume, symmetries, or first integrals), as well as invariants whose integrators are closed under symmetric composition, i.e.\ form a symmetric space (for example integrators that are self-adjoint, or preserve reversing symmetries). A basic example of \R{palindromic} is the second-order {\em Strang composition\/} \begin{displaymath} {\mathrm e}^{\frac12 h\mathcal{L}_1} {\mathrm e}^{h\mathcal{L}_2}{\mathrm e}^{\frac12 h \mathcal{L}_1} ={\mathrm e}^{h(\mathcal{L}_1+\mathcal{L}_2)} +\O{h^3}. \end{displaymath} Its order -- and, for that matter, the order of any time-symmetric method -- can be boosted by the {\em Yoshida device\/} \cite{yoshida90cho}. Let $\Phi$ be a time-symmetric approximation to ${\mathrm e}^{t\mathcal{L}}$ of order $2P$, say. Then \begin{displaymath} \Phi((1+\alpha)h)\Phi(-(1+2\alpha)h)\Phi((1+\alpha)h),\qquad \mbox{where}\qquad \alpha=\frac{2^{1/(2P+1)}-1}{2-2^{1/(2P+1)}} \end{displaymath} is also time symmetric and of order $2P+2$. Successive applications of the Yoshida device allow to increase arbitrarily the order of the Strang composition, while retaining its structure-preserving features. This is but a single example of the huge world of splitting and composition methods, reviewed in \cite{mclachlan02sm}. \item {\em Exponential integrators.} Many `difficult' ODEs can be written in the form $\dot{\MM{y}}=A\MM{y}+\MM{b}(\MM{y})$ where the matrix $A$ is `larger' (in some sense) than $\MM{b}(\MM{y})$ -- for example, $A$ may be the Jacobian of an ODE (which may vary from step to step). Thus, it is to be expected that the `nastiness' of the ODE under scrutiny -- be it stiffness, Hamiltonicity or high oscillation -- is somehow `hardwired' into the matrix $A$. The exact solution of the ODE can be written in terms of the variation-of-constants formula, \begin{equation} \label{VoC} \MM{y}(t+h)={\mathrm e}^{hA}\MM{y}(t)+\int_0^h {\mathrm e}^{(h-\xi)A}\MM{b}(\MM{y}(t+\xi))\D\xi, \end{equation} except that, of course, the right-hand side includes the unknown function $\MM{y}$. Given the availability of very effective methods to compute the matrix exponential, we can exploit this to construct {\em exponential integrators,\/} explicit methods that often exhibit favourable stability and structure-preservation features. The simplest example, the {\em exponential Euler\/} method, freezes $\MM{y}$ within the integral in \R{VoC} at its known value at $t$, the outcome being the first-order method \begin{displaymath} \MM{y}_{n+1}={\mathrm e}^{hA}\MM{y}_n+A^{-1}({\mathrm e}^{hA}-I)\MM{b}(\MM{y}_n). \end{displaymath} The order can be boosted by observing that (in a loose sense which can be made much more precise) the integral above is discretised by the Euler method, which is a one-stage explicit Runge--Kutta scheme, discretising it instead by multistage schemes of this kind leads to higher-order methods \cite{hochbruck10ei}. Many Hamiltonian systems of interest can be formulated as second-order systems of the form $\ddot{\MM{y}}+\Omega^2\MM{y}=\MM{g}(\MM{y})$. Such systems feature prominently in the case of highly oscillatory mechanical systems, where $\Omega$ is positive definite and has some large eigenvalues. Variation of constants \R{VoC} now reads \begin{Eqnarray*} \left[\! \begin{array}{c} \MM{y}(t+h)\\ \dot{\MM{y}}(t+h) \end{array} \!\right]&=& \left[ \begin{array}{cc} \cos(h\Omega) & \Omega^{-1}\sin(h\Omega)\\ -\Omega\sin(h\Omega) & \cos(h\Omega) \end{array} \right] \left[\! \begin{array}{c} \MM{y}(t)\\ \dot{\MM{y}}(t) \end{array} \!\right]\\ &&\mbox{}+\int_t^{t+h} \left[ \begin{array}{cc} \cos((h-\xi)\Omega) & \Omega^{-1}\sin((h-\xi)\Omega)\\ -\Omega\sin((h-\xi)\Omega) & \cos((h-\xi)\Omega) \end{array} \right] \! \left[ \begin{array}{c} \MM{0}\\ \MM{g}(\MM{y}(t+\xi)) \end{array} \right]\!\D\xi \end{Eqnarray*} and we can use either standard exponential integrators or exponential integrators designed directly for second-order systems and using Runge--Kutta--Nystr\"om methods on the nonlinear part \cite{wu13spa}. An important family of exponential integrators for second-order systems are {\em Gautschi-type methods\/} \begin{equation} \label{Gautschi} \MM{y}_{n+1}-2\MM{y}_n+\MM{y}_{n-1}=h^2\Psi(h\Omega) (\MM{g}_n-\Omega^2\MM{y}_n), \end{equation} which are of second order. Here $\Psi(x)=2(1-\cos x)/x$ while, in Gautschi's original method, $\MM{g}_n=\MM{g}(\MM{y}_n)$ \cite{hochbruck10ei}. Unfortunately, this choice results in resonances and a better one is $\MM{g}_n=\MM{g}(\Phi(h\Omega)\MM{y}_n)$, where the {\em filter\/} $\Phi$ eliminates resonances: $\Phi(0)=I$ and $\Phi(k\pi)=0$ for $k\in\BB{N}$. We refer to \cite{hochbruck10ei} for further discussion of such methods in the context of symplectic integration. \item {\em Variational integrators.} {\em Lagrangian formulation\/} recasts a large number of differential equations as minima of nonlinear functionals. Thus, for example, instead of the Hamiltonian problem $M\ddot{\MM{q}}+\MM{\nabla} V(\MM{q})=\MM{0}$, where the matrix $M$ is positive definite, we may consider the equivalent variational formulation of minimizing the positive-definite nonlinear functional $L(\MM{q},\dot{\MM{q}})=\frac12 \dot{\MM{q}}^\top M\dot{\MM{q}}-V(\MM{q})$. With greater generality, Hamiltonian and Lagrangian formulations are connected via the familiar Euler--Lagrange equations and, given the functional $L$, the corresponding second-order system is \begin{displaymath} \frac{\partial L(\MM{q},\dot{\MM{q}})}{\partial\MM{q}}-\frac{\D}{\D t} \left[\frac{\partial L(\MM{q},\dot{\MM{q}})}{\partial \dot{\MM{q}}}\right]=\MM{0}. \end{displaymath} The rationale of variational integrators parallels that of the {\em Ritz method\/} in the theory of finite elements. We first reformulate the Hamiltonian problem as a Lagrangian one, project it to a finite-dimensional space, solve it there and transform back. The original symplectic structure is replaced by a finite-dimensional symplectic structure, hence the approach is by design symplectic \cite{marsden01dmv}. \end{itemize} \subsection{Lie-group methods} Let $\mathcal{G}$ be a Lie group and $\mathcal{M}$ a differentiable manifold. We say that $\Lambda:\mathcal{G}\times\mathcal{M}\rightarrow\mathcal{M}$ is a {\em group action\/} if\\[4pt] a.~$\Lambda(\iota,y)=y$ for all $y\in\mathcal{M}$ (where $\iota$ is the identity of $\mathcal{G}$) and \\[2pt] b.~$\Lambda(p,\Lambda(q,y))=\Lambda(p\cdot q,y)$ for all $p,q\in\mathcal{G}$ and $y\in\mathcal{M}$.\\[4pt] If, in addition, for every $x,y\in\mathcal{M}$ there exists $p\in\mathcal{G}$ such that $y=\Lambda(p,x)$, the action is said to be transitive and $\mathcal{M}$ is a {\em homogeneous space,\/} acted upon by $\mathcal{G}$. Every Lie group acts upon itself, while the orthogonal group $\CC{O}(n)$ acts on the $(n-1)$-sphere by multiplication, $\Lambda(p,y)=py$. The orthogonal group also acts on the {\em isospectral manifold\/} of all symmetric matrices similar to a specific symmetric matrix by similarity, $\Lambda(p,y)=pyp^\top$. Given $1\leq m\leq n$, the {\em Grassmann manifold\/} $\BB{G}(n,m)$ of all $m$-dimensional subspaces of $\BB{R}^n$ is a homogeneous space acted upon by $\CC{SO}(m)\times\CC{SO}(n-m)$, where $\CC{SO}(m)$ is the special orthogonal group -- more precisely, $\BB{G}(n,m)=\CC{SO}(n)/(\CC{SO}(m)\times\CC{SO}(n-m))$. Faced with a differential equation evolving in a homogeneous space, we can identify its flow with a group action: Given an initial condition $y_0\in\mathcal{M}$, instead of asking ``what is the value of $y$ at time $t>0$'' we might pose the equivalent question ``what is the group action that takes the solution from $y_0$ to $y(t)$?''. This is often a considerably more helpful formulation because a group action can be further related to an {\em algebra action.\/} Let $\GG{g}$ be the Lie algebra corresponding to the matrix group $\mathcal{G}$, i.e.\ the tangent space at $\iota\in\mathcal{G}$, and denote by $\GG{X}(\mathcal{M})$ the set of all Lipschitz vector fields over $\mathcal{M}$. Let $\lambda:\GG{g}\rightarrow\GG{X}(\mathcal{M})$ and $a:\BB{R}_+\times\mathcal{M}\rightarrow\GG{g}$ be both Lipschitz. In particular, we might consider \begin{displaymath} \lambda(a,y)=\frac{\D}{\D s} \Lambda(\rho(s,y),y)\,\rule[-6pt]{0.75pt}{18pt}_{\,s=0}, \end{displaymath} where $\Lambda$ is a group action and $\rho:\BB{R}_+\rightarrow\mathcal{G}$, $\rho(s,y(s))=\iota+a(s,y(s))s+\O{s^2}$ for small $|s|$. The equation $\dot{y}=\lambda(a(t,y),y)$, $y(0)=y_0\in\mathcal{M}$ represents {\em algebra action\/} and its solution evolves in $\mathcal{M}$. Moreover, \begin{equation} \label{alg_action} y(t)=\Lambda(v(t),y_0)\qquad \mbox{where}\qquad \dot{v}=a(t,\Lambda(v,y_0))v,\quad v(0)=\iota\in\mathcal{G} \end{equation} is a {\em Lie-group equation.\/} Instead of solving the original ODE on $\mathcal{M}$, it is possible to solve \R{alg_action} and use the group action $\Lambda$ to advance the solution to the next step: this is the organising principle of most {\em Lie-group methods\/} \cite{iserles00lgm}. It works because a Lie-group equation can be solved in the underlying Lie algebra, which is a {\em linear space.\/} Consider an ODE\footnote{Or, for that matter, a PDE, except that formalities are somewhat more complicated.} $\dot{y}=f(y)$, $y(0)\in\mathcal{M}$, such that $f:\mathcal{M}\rightarrow\GG{X}$ -- the solution $y(t)$ evolves on the manifold. While conventional numerical methods are highly unlikely to stay in $\mathcal{M}$, this is not the case for Lie-group methods. We can travel safely between $\mathcal{M}$ and $\mathcal{G}$ using a group action. The traffic between $\mathcal{G}$ and $\GG{g}$ is slightly more complicated and we need to define a {\em trivialisation,\/} i.e.\ an invertible map taking smoothly a neighbourhood of $0\in\GG{g}$ to a neighbourhood of $\iota\in\mathcal{G}$ and taking zero to identity. The most ubiquitous example of trivialisation is the exponential map, which represents the solution of \R{alg_action} as $v(t)={\mathrm e}^{\omega(t)}$, where $\omega$ is the solution of the {\em dexpinv equation\/} \begin{equation} \label{dexpinv} \dot{\omega}=\sum_{m=0}^\infty \frac{\CC{B}_m}{m!} \CC{ad}^m_{a(t,{\mathrm e}^\omega)}\omega,\qquad \omega(0)=0\in\GG{g} \end{equation} \cite{iserles00lgm}. Here the $\CC{B}_m$s are Bernoulli numbers, while $\CC{ad}^m_b$ is the {\em adjoint operator\/} in $\GG{g}$, \begin{displaymath} \CC{ad}_b^0 c=c,\qquad \CC{ad}_b^m c=[b,\CC{ad}_b^{m-1}c],\quad m\in\BB{N},\qquad b,c\in\GG{g}. \end{displaymath} Because $\GG{g}$ is closed under linear operations and commutation, solving \R{dexpinv} while respecting Lie-algebraic structure is straightforward. Mapping back, first to $\mathcal{G}$ and finally to $\mathcal{M}$, we keep the numerical solution of $\dot{y}=f(t)$ on the manifold. Particularly effective is the use of explicit Runge--Kutta methods for \R{dexpinv}, the so-called Runge--Kutta--Munthe-Kaas (RKMK) methods \cite{munthekass98rkm}. To help us distinguish between conventional Runge--Kutta methods and RKMK, consider the three-stage, third-order method with the Butcher tableau\footnote{For traditional concepts such as Butcher tableaux, Runge-Kutta methods and B-series, the reader is referred to \cite{hairer93sod}.} \begin{equation} \label{RK3} \begin{array}{c|ccc} 0 & \\ \frac12 & \frac12\\[2pt] 1 & -1 & 2\\\hline & \frac16 & \frac23 & \frac16\rule{0pt}{13pt} \end{array}. \end{equation} Applied to the ODE $\dot{y}=f(t,y)$, $y(t_n)=y_n\in\mathcal{M}$, evolving on the manifold $\mathcal{M}\subset\BB{R}^d$, it becomes \begin{Eqnarray*} &&k_1=f(t_n,y_n),\\ && k_2=f(t_{n+\frac12},y_n+\Frac12 hk_1),\\ && k_3=f(t_{n+1},y_n-hk_1+2hk_2),\\ &&\Delta=h(\Frac16 k_1+\Frac23 k_2+\Frac16 k_3),\\[3pt] y_{n+1}&=&y_n+\Delta. \end{Eqnarray*} Since we operate in $\BB{R}^d$, there is absolutely no reason for $y_{n+1}$ to live in $\mathcal{M}$. However, once we implement \R{RK3} at an algebra level (truncating first the dexpinv equation \R{dexpinv}), \begin{Eqnarray*} &&k_1=a(t_n,\iota),\\ &&k_2=a(t_{n+\frac12},{\mathrm e}^{hk_1/2}),\\ &&k_3=a(t_{n+1},{\mathrm e}^{-hk_1+2hk_2}),\\ &&\Delta=h(\Frac16 k_1+\Frac23 k_2+\Frac16 k_3),\\[3pt] \omega_{n+1}&=&\Delta+\Frac16 h[\Delta,k_1]\\ y_{n+1}&=&\Lambda({\mathrm e}^{\omega_{n+1}},y_n), \end{Eqnarray*} the solution is guaranteed to stay in $\mathcal{M}$. An important special case of a Lie-group equation is the linear ODE $\dot{v}=a(t)v$, where $a:\BB{R}_+\rightarrow\GG{g}$. Although RKMK works perfectly well in a linear case, special methods do even better. Perhaps the most important is the {\em Magnus expansion\/} \cite{magnus54esd}, $v(t)={\mathrm e}^{\omega(t)}v(0)$, where \begin{Eqnarray} \nonumber \omega(t)&=& \int_0^t a(\xi)\D \xi -\frac12 \int_0^t\!\int_0^{\xi_1} [a(\xi_2),a(\xi_1)]\D\xi_2\D\xi_1 \\ \label{Magnus} &&\mbox{}+\frac14 \int_0^t \! \int_0^{\xi_1} \! \!\int_0^{\xi_2} [[a(\xi_3),a(\xi_2)],a(\xi_1)]\D\xi_3\D\xi_2\D\xi_1\\ \nonumber &&+\frac{1}{12} \int_0^t\!\int_0^{\xi_1}\!\!\int_0^{\xi_2}[a(\xi_3),[a(\xi_2),a(\xi_1)]]\D\xi_3\D\xi_2\D\xi_1+\cdots. \end{Eqnarray} We refer to \cite{iserles99sld,iserles00lgm,blanes09mes} for explicit means to derive expansion terms, efficient computation of multivariate integrals that arise in this context and many other implementation details. Magnus expansions are important in a number of settings when preservation of structure is not an issue, not least in the solution of linear stochastic ODEs \cite{lord08esi}. There are alternative means to expand the solution of \R{dexpinv} in a linear case, not least the {\em Fer expansion,\/} that has found recently an important application in the computation of Sturm--Liouville spectra \cite{ramos15nss}. Another approach to Lie-group equations uses {\em canonical coordinates of the second kind\/} \cite{owren01imb}. \subsection{Conservation of volume} An ODE $\dot{\MM{x}}=\MM{f}(\MM{x})$ is divergence-free if $\MM{\nabla} \cdot \MM{f}(\MM{x})=0$. The flows of divergence-free ODEs are volume-preserving (VP). Volume is important to preserve, as it leads to KAM-tori, incompressibility, and, most importantly, is a crucial ingredient for ergodicity. Unlike symplecticity, however, phase space volume can generically {\it not} be preserved by Runge--Kutta methods, or even by their generalisations, B-series methods. This was proved independently in \cite{chartier07pfi} and in \cite{iserles07bmc}. Since B-series methods cannot preserve volume, we need to look to other methods. There are essentially two known numerical integration methods that preserve phase space volume. The first volume-preserving method is based on splitting \cite{feng95vpa}. As an example, consider a 3D volume preserving vector field: \begin{Eqnarray} \dot{x} &=& u(x,y,z) \nonumber \\ \label{3D} \dot{y} &=& v(x,y,z) \\ \dot{z} &=& w(x,y,z) \nonumber \end{Eqnarray} with \begin{displaymath} u_x + v_y + w_z = 0. \end{displaymath} We split this 3D VP vector field into two 2D VP vector fields as follows \begin{equation} \label{VP1} \begin{array}{lcl} \displaystyle \dot{x} = u(x,y,z), &\qquad\quad& \displaystyle \dot{x} = 0,\\[6pt] \displaystyle \dot{y} = -\int\! u_x(x,y,z)\D y, && \displaystyle \dot{y} = v(x,y,z) + \int\! u_x(x,y,z) \D y,\\[12pt] \displaystyle \dot{z} = 0; && \displaystyle \dot{z} = w(x,y,z). \end{array}\hspace*{20pt} \end{equation} The vector field on the left is divergence-free by construction, and since both vector fields add up to (2.1), it follows that the vector field on the right is also volume-preserving. Having split the original vector field into 2D VP vector fields, we need to find VP integrators for each of these 2D VP vector fields. But that is easy, since 2D VP vector fields are essentially equivalent to 2D Hamiltonian vector fields (with the extra dimension `frozen'), and all symplectic methods (e.g. symplectic Runge--Kutta methods) are volume-preserving for Hamiltonian vector fields. The above splitting method is easily generalised to $n$ dimensions, where one splits into $n-1$ 2D VP vector fields, and integrates each using a symplectic Runge--Kutta method. An alternative VP integration method was discovered independently by Shang and by Quispel \cite{shang94gfv,quispel95vpi}. We again illustrate this method in 3D. We will look for an integrator of the form \begin{Eqnarray} x_1 &=& g_1(x_1',x_2,x_3) \nonumber \\ \label{VPint} x_2' &=& g_2(x_1',x_2,x_3) \\ x_3' &=& g_1(x_1',x_2',x_3) \nonumber \end{Eqnarray} where (here and below) $x_i= x_i(nh)$, and $x_i'=x_i((n+1)h)$. The reason the form \R{VPint} is convenient, is because any such map is VP iff \begin{equation}\label{VPintcon} \frac{\partial x_1}{\partial x_1'} = \frac{\partial x_2'}{\partial x_2}\frac{\partial x_3'}{\partial x_3}. \end{equation} To see how to construct a VP integrator of the form \R{VPint}, consider as an example the ODE \begin{Eqnarray} \dot{x}_1 &=& x_2 + x_1^2 + x_3^3 \nonumber \\ \label{3D2} \dot{x}_2 &=& x_3 + x_1x_2 + x_1^4 \\ \dot{x}_3 &=& x_1 - 3x_1x_3 + x_2^5 \nonumber \end{Eqnarray} It is easy to check that it is divergence-free. \noindent Now consistency requires that any integrator for \R{3D2} should satisfy \begin{Eqnarray} x_1' &=& x_1 + h( x_2 + x_1^2 + x_3^3) + \O{h^2} \nonumber \\ \label{consis1} x_2' &=& x_2 + h(x_3 + x_1x_2 + x_1^4) + \O{h^2} \\ x_3' &=& x_3 + h(x_1 - 3x_1x_3 + x_2^5) + \O{h^2} \nonumber \end{Eqnarray} and therefore \noindent \begin{Eqnarray} x_1 &=& x_1' - h( x_2 + x_1'^2 + x_3^3) + \O{h^2} \\ \label{consis2} x_2' &=& x_2 + h(x_3 + x_1'x_2 + x_1'^4) + \O{h^2} \\ x_3' &=& x_3 + h(x_1' - 3x_1'x_3 + x_2'^5) + \O{h^2} \end{Eqnarray} Since we are free to choose any consistent $g_2$ and $g_3$ in \R{VPint}, provided $g_1$ satisfies \R{VPintcon}, we choose the terms designated by $\O{h^2}$ in (2.15) and (2.16) to be identically zero. Equation \R{VPintcon} then yields \begin{equation}\label{VPegcon} \frac{\partial x_1}{\partial x_1'} = (1+hx_1')(1-3hx_1'). \end{equation} This can easily be integrated to give \begin{equation}\label{VPintconsol} x_1 = x_1' - hx_1'^2 - h^2x_1'^3 + k(x_2,x_3;h). \end{equation} where the function $k$ denotes an integration constant that we can choose appropriately. The simplest VP integrator satisfying both (2.14) and \R{VPintconsol} is therefore: \begin{Eqnarray} x_1 &=& x_1' - h( x_2 + x_1'^2 + x_3^3) -h^2x_1'^3 \nonumber \\ \label{egint} x_2' &=& x_2 + h(x_3 + x_1'x_2 + x_1'^4) \\ x_3' &=& x_3 + h(x_1' - 3x_1'x_3 + x_2'^5) \nonumber \end{Eqnarray} A nice aspect of the integrator \R{egint} (and \R{VPint}) is that it is essentially only implicit in one variable. Once $x_1'$ is computed from the first (implicit) equation, the other two equations are essentially explicit. \noindent Of course the method just described also generalises to any divergence-free ODE in any dimension. \subsection{Preserving energy and other first integrals} As mentioned, Hamiltonian systems exhibit two important geometric properties simultaneously, they conserve both the symplectic form and the energy. A famous no-go theorem by \citeasnoun{ge88lph} has shown that it is generically impossible to construct a geometric integrator that preserves both properties at once. One therefore must choose which one of these two to preserve in any given application. Particularly in low dimensions and if the energy surface is compact, there are often advantages in preserving the energy. An energy-preserving B-series method was discovered in \cite{quispel08nce} cf.\ also \cite{mclachlan99gid}. For any ODE $\dot{\MM{x}} = \MM{f}(\MM{x})$, this so-called average vector field method is given by \begin{equation}\label{defavf} \frac{\MM{x}'-\MM{x}}{h} = \int_{0}^{1}\MM{f}(\xi \MM{x}' + (1-\xi)\MM{x})\D\xi. \end{equation} If the vector field $\MM{f}$ is Hamiltonian, i.e. if there exists a Hamiltonian function $H(\MM{x})$ and a constant skew-symmetric matrix $S$ such that $\MM{f}(\MM{x}) = S\nabla H(\MM{x})$, then it follows from \R{defavf} that energy is preserved, i.e. $H(\MM{x}')=H(\MM{x})$. While the B-series method \R{defavf} is energy-preserving for any Hamiltonian $H$, it can be shown that no Runge--Kutta method is energy-preserving for all $H$. For a given {\it polynomial} $H$ however, Runge--Kutta methods preserving that $H$ do exist \cite{iavtrig09hos}. This can be seen as follows. Note that the integral in \R{defavf} is one-dimensional. This means that e.g.\ for cubic vector fields (and hence for quartic Hamiltonians) an equivalent method is obtained by replacing the integral in \R{defavf} using Simpson's rule: \begin{equation}\label{simp} \int_{0}^{1} g(\xi)\D\xi \approx \frac{1}{6}\left[g(0) + 4g(\Frac{1}{2}) + g(1)\right]\!. \end{equation} yielding the Runge--Kutta method \begin{equation}\label{RKsimp} \frac{\MM{x}'-\MM{x}}{h} = \frac{1}{6}\left[\MM{f}(\MM{x}) + 4\MM{f}\left(\frac{\MM{x}+\MM{x}'}{2}\right) + \MM{f}(\MM{x}')\right]\!, \end{equation} preserving all quartic Hamiltonians. We note that \R{defavf} has second order accuracy. Higher order generalisations have been given in \cite{hairer10epv}. We note that the average vector field method has also been applied to a slew of semi-discretised PDEs in \cite{celledoni12per}. While energy is one of the most important constants of the motion in applications, many other types of first integrals do occur. We note here that all B-series methods preserve all linear first integrals, and that all symplectic B-series methods preserve all quadratic first integrals. So, for example, the implicit midpoint rule \begin{displaymath} \frac{\MM{x}'-\MM{x}}{h} = \MM{f}\!\left(\frac{\MM{x}+\MM{x}'}{2} \right) \end{displaymath} (which is symplectic) preserves all linear and quadratic first integrals. There are however many cases not covered by any of the above. How does one preserve a cubic first integral that is not energy? And what about Hamiltonian systems whose symplectic structure is not constant? It turns out that generically, any ODE $\dot{\MM{x}} = \MM{f}(\MM{x})$ that preserves an integral $I(\MM{x})$, can be written in the form \begin{equation}\label{gendeform} \dot{\MM{x}} = S(\MM{x})\MM{\nabla} I(\MM{x}), \end{equation} where $S(\MM{x})$ is a skew-symmetric matrix\footnote{Note that in general $S(\MM{x})$ need not satisfy the so-called Jacobi identity.}. An integral-preserving discretisation of \R{gendeform} is given by \begin{equation} \label{gendeintgrtr} \frac{\MM{x}'-\MM{x}}{h} = \bar{S}(\MM{x},\MM{x}') \bar{\nabla}I(\MM{x},\MM{x}'), \end{equation} where $\bar{S}(\MM{x},\MM{x}')$ is any consistent approximation to $S(\MM{x})$ (e.g. $\bar{S}(\MM{x},\MM{x}')=S(\MM{x})$), and the {\em discrete gradient\/} $ \bar{\MM{\nabla}}I$ is defined by \begin{equation} \label{dgdefn1} (\MM{x}'-\MM{x}) \cdot \bar{\MM{\nabla}}I(\MM{x},\MM{x}') = I(\MM{x}') - I(\MM{x}) \end{equation} and \begin{equation} \label{dgdefn2} \lim_{\Mm{x}' \rightarrow \Mm{x}} \bar{\MM{\nabla}}I(\MM{x},\MM{x}') = \MM{\nabla} I(\MM{x}). \end{equation} There are many different discrete gradients that satisfy \R{dgdefn1} and \R{dgdefn2}. A particularly simple one is given by the Itoh--Abe discrete gradient, which for example in 3D reads \begin{equation}\label{ItohAbe} \bar{\nabla}I(\MM{x},\MM{x}') = \left[ \begin{array}{c} \displaystyle \frac{I(x_1',x_2,x_3) - I(x_1,x_2,x_3)}{x_1'-x_1} \\[10pt] \displaystyle \frac{I(x_1',x_2',x_3) - I(x_1',x_2,x_3)}{x_2'-x_2} \\[10pt] \displaystyle \frac{I(x_1',x_2',x_3') - I(x_1',x_2',x_3)}{x_3'-x_3} \end{array} \right]\!. \end{equation} Other examples of discrete gradients, as well as constructions of the skew-symmetric matrix $S(\MM{x})$ for a given vector field $\MM{f}$ and integral $I$ may be found in \cite{mclachlan99gid}. We note that the discrete gradient method can also be used for systems with any number of integrals. For example an ODE $\dot{\MM{x}}=\MM{f}(\MM{x})$ possessing two integrals $I(\MM{x})$ and $J(\MM{x})$ can be written \begin{equation}\label{ode2ints} \dot{x}_i = S_{ijk}(\MM{x}) \frac{\partial I(\MM{x})}{\partial x_j} \frac{\partial J(\MM{x})}{\partial x_k}, \end{equation} where the summation convention is assumed over repeated indices and $S(\MM{x})$ is a completely antisymmetric tensor. A discretisation of \R{ode2ints} which preserves both $I$ and $J$ is given by \begin{equation}\label{disc2ints} \frac{x_i' - x_i}{h} = \bar{S}_{ijk}(\MM{x},\MM{x}') \bar{\nabla}I(\MM{x},\MM{x}') \,\rule[-4pt]{0.5pt}{16pt}_{\,j} \bar{\nabla}J(\MM{x},\MM{x}') \,\rule[-4pt]{0.5pt}{16pt}_{\,k} \end{equation} with $\bar{S}$ any completely skew approximation of $S$ and $\bar{\nabla}I$ and $\bar{\nabla}J$ discrete gradients as defined above. \setcounter{equation}{0} \setcounter{figure}{0} \section{Five recent stories of GNI} The purpose of this section is not to present a totality of recent research into GNI, a subject that would have called for a substantially longer paper. Instead, we wish to highlight a small number of developments with which the authors are familiar and which provide a flavour of the very wide range of issues on the current GNI agenda. \subsection{Highly oscillatory Hamiltonian systems} High oscillation occurs in many Hamiltonian systems. Sometimes, e.g.\ in the integration of equations of celestial mechanics, the source of the problem is that we wish to compute the solution across a very large number of periods and the oscillation is an artefact of the time scale in which the solution has physical relevance. In other cases oscillation is implicit in the multiscale structure of the underlying problem. A case in point are the (modified) {\em Fermi--Pasta--Ulam (FPU) equations,\/} describing a mechanical system consisting of alternating stiff harmonic and soft nonlinear springs. The soft springs impart fast oscillation, while the hard springs generate slow transfer of energy across the system: good numerical integration must capture both! A good point to start (which includes modified FPU as a special case) is the second-order ODE \begin{equation} \label{HiOscODE} \ddot{\MM{q}}+\Omega^2\MM{q}=\MM{g}(\MM{q}),\qquad t\geq0,\qquad \MM{q}(0)=\MM{u}_0,\quad \dot{\MM{q}}(0)=\MM{v}_0, \end{equation} where $\MM{g}(\MM{q})=-\MM{\nabla}U(\MM{q})$ and \begin{displaymath} \Omega= \left[ \begin{array}{cc} O & O\\ O & \omega I \end{array} \right]\!,\quad \omega\gg1,\qquad \MM{q}= \left[ \begin{array}{c} \MM{q}_0\\\MM{q}_1 \end{array} \right]\!,\qquad \MM{q}_0\in\BB{R}^{n_0},\;\;\MM{q}_1\in\BB{R}^{n_1}. \end{displaymath} An important aspect of systems of the form \R{HiOscODE} is that the exact solution, in addition to preserving the total Hamiltonian energy \begin{equation} \label{HamEn} H(\MM{p},\MM{q})=\frac12 (\|\MM{p}_1\|^2+\omega^2 \|\MM{q}_1\|^2)+\frac12 \|\MM{p}_0\|^2 +U(\MM{q}_0,\MM{q}_1), \end{equation} where $\dot{\MM{q}}=\MM{p}$, also preserves the {\em oscillatory energy\/} \begin{equation} \label{OscEn} I(\MM{p},\MM{q})=\frac12 \|\MM{p}_1\|^2+\frac{\omega^2}{2} \|\MM{q}_1\|^2 \end{equation} for intervals of length $\O{\omega^N}$ for any $N\geq1$. This has been proved using the {\em modulated Fourier expansions\/} \begin{displaymath} \MM{q}(t)=\sum_{m=-\infty}^\infty {\mathrm e}^{{\mathrm i} m\omega t} \MM{z}_m(t). \end{displaymath} The solution of \R{HiOscODE} exhibits oscillations at frequency $\O{\omega}$ and this inhibits the efficiency of many symplectic methods, requiring step size of $\O{\omega^{-1}}$, a situation akin to stiffness in more conventional ODEs. However, by their very structure, exponential integrators (and in particular Gautschi-type methods \R{Gautschi}) are particularly effective in integrating the linear part, which gives rise to high oscillation. The problem with Gautschi-type methods, though, might be the occurrence of resonances and we need to be careful to avoid them, both in the choice of the right filter (cf.\ the discussion in Subsection~2.1) and step size $h$. Of course, one would like geometric numerical integrators applied to \R{HiOscODE} to exhibit favourable preservation properties with respect to both total energy \R{HamEn} and oscillatory energy \R{OscEn}. Applying modulated Fourier expansions to trigonometric and modified trigonometric integrators, this is indeed the case provided that the step size obeys the {\em non-resonance condition\/} with respect to the frequency $\omega$, \begin{displaymath} |\sin(\Frac12 mh\omega)|\geq c h^{1/2},\qquad m=1,\ldots,N,\quad N\geq2, \end{displaymath} cf.\ \citeasnoun{hairer09olt}. All this has been generalised to systems with multiple frequencies, with the Hamiltonian function \begin{displaymath} H(\MM{p},\MM{q})=\overbrace{\frac12 \sum_{j=1}^s \left(\|\MM{p}_j\|^2+\omega_j^2\|\MM{q}_j\|^2\right)}^{\CC{oscillatory}}+\overbrace{\frac12\|\MM{p}_0\|^2+U(\MM{q})}^{\CC{slow}}, \end{displaymath} where \begin{displaymath} \MM{p}= \left[ \begin{array}{c} \MM{p}_0\\\MM{p}_1\\\vdots\\\MM{p}_s \end{array} \right]\!,\quad \MM{q}=\left[ \begin{array}{c} \MM{q}_0\\\MM{q}_1\\\vdots\\\MM{q}_s \end{array} \right]\!,\qquad 0<\min_{j=1,\ldots,s}\omega_j,\quad 1\ll \max_{j=1,\ldots,s}\omega_j \end{displaymath} for both the exact solution \cite{gauckler13eso} and for discretisations obtained using trigonometric and modified trigonometric integrators \cite{cohen15lta}. Further achievements and open problem in the challenging area of marrying symplectic integration and high oscillation are beautifully described in \cite{hairer14cgi}. \subsection{Kahan's `unconventional' method} A novel discretisation method for quadratic ODEs was introduced and studied in \cite{kahan93unm}. This new method discretised the vector field \begin{equation}\label{kahan1} \dot{x}_i = \sum_{j,k}^{}a_{ijk}x_jx_k + \sum_{j}^{}b_{ij}x_j + c_i \end{equation} as follows, \begin{equation}\label{kahan2} \frac{x_i'-x_i}{h} = \sum_{j,k}^{}a_{ijk} \left(\frac{x_jx_k' + x_j'x_k}{2}\right) + \sum_{j}^{}b_{ij} \left( \frac{x_j + x_j'}{2} \right) + c_i. \end{equation} Kahan called the method \R{kahan2} `unconventional', because it treats the quadratic terms different from the linear terms. He also noted some nice features of \R{kahan2}, e.g. that it often seemed to be able to integrate through singularities. \noindent \textbf{Properties of Kahan's method:} \begin{enumerate} \item {\it Kahan's method is (the reduction of) a Runge--Kutta method.} \citeasnoun{celledoni13gpk} showed that \R{kahan2} is the reduction to quadratic vector fields of the Runge--Kutta method \begin{equation}\label{kahan3} \frac{\MM{x}'-\MM{x}}{h} = 2 \MM{f}\left(\frac{\MM{x} + \MM{x}'}{2}\right) - \frac{1}{2} \MM{f}(\MM{x}) - \frac{1}{2} \MM{f}(\MM{x}') \end{equation} This explains {\em inter alia\/} why Kahan's method preserves all linear first integrals. \item {\it Kahan's method preserves a modified energy and measure.} For any Hamiltonian vector field of the form \begin{equation}\label{hamode} \dot{\MM{x}} = \MM{f}(x) = S\MM{\nabla} H(\MM{x}), \end{equation} with cubic Hamiltonian $H(\MM{x})$ and constant symplectic (or Poisson) structure $S$, Kahan's method preserves a modified energy as well as a modified measure exactly \cite{celledoni13gpk}. The modified volume is \begin{equation}\label{modvol} \frac{\D x_1 \wedge \dots \wedge \D x_n}{\det \!\left( I - \frac{1}{2}hf'(\MM{x}) \right)}, \end{equation} while the modified energy is \begin{equation}\label{modenergy} \tilde{H}(\MM{x}) := H(\MM{x}) + \frac{1}{3}h \MM{\nabla} H(\MM{x})^\top \!\left(I - \frac{1}{2}hf'(\MM{x}) \right)^{-1} \!\MM{f}(\MM{x}). \end{equation} \item {\it Kahan's method preserves the integrability of many integrable systems of quadratic ODEs.} Beginning with the work of Hirota and Kimura, subsequently extended by Suris and collaborators \cite{petrera11ihk}, and by Quispel and collaborators \cite{celledoni13gpk,celledoni14ipk,vanderkamp14iss}, it was shown that Kahan's method preserves the complete integrability of a surprisingly large number of quadratic ODEs. \end{enumerate} \noindent Here we list some 2D vector fields whose integrability is preserved by Kahan's method: \begin{itemize} \item Quadratic Hamiltonian systems in 2D: \noindent The 9-parameter family \begin{equation}\label{9paramfam} \left[ \begin{array}{c} \dot{x} \\ \dot{y} \end{array} \right] = \left[ \begin{array}{c} bx^2 + 2cxy +dy^2 +fx + gy + i \\ -ax^2 - 2bxy - cy^2 - ex -fy -h \end{array} \right]\!; \end{equation} \item Suslov systems in 2D: \noindent The 9-parameter family \begin{equation}\label{suslov} \left[ \begin{array}{c} \dot{x} \\ \dot{y} \end{array} \right] = l(x,y) \left[ \begin{array}{cc} 0 & 1 \\ -1 & 0 \end{array} \right] \nabla H(x,y), \end{equation} where $l(x,y) = ax+by+c$; $H(x,y) = dx^2 + exy +fy^2 + gx + hy + i$; \item Reduced Nahm equations in 2D: \noindent Octahedral symmetry: \begin{equation}\label{nahm1} \left[\begin{array}{c} \dot{x} \\ \dot{y} \end{array} \right] = \left[ \begin{array}{c} 2x^2 - 12y^2 \\ -6x^2 - 4y^2 \end{array} \right]\!; \end{equation} Icosahedral symmetry: \begin{equation}\label{nahm2} \left[ \begin{array}{c} \dot{x} \\ \dot{y} \end{array} \right] = \left[\begin{array}{c} 2x^2 - y^2 \\ -10xy + y^2 \end{array} \right]\!. \end{equation} \end{itemize} The modified energy and measure for the Kahan discretisations of these 2D systems, as well as of many other (higher-dimensional) integrable quadratic vector fields are given in \cite{petrera11ihk,celledoni13gpk,celledoni14ipk}. Generalisations to higher degree polynomial equations using polarisation are presented in \cite{celledoni15dpv}. \subsection{Applications to celestial mechanics} GNI methods particularly come into their own when the integration time is large compared to typical periods of the system. Thus long-term integrations of e.g. solar-type systems and of particle accelerators typically need symplectic methods. In this subsection we focus on the former\footnote{A very readable early review of integrators for solar system dynamics is \cite{morbidelli02mis}, cf also \cite{morbidelli02mcm}}. One of the first symplectic integrations of the solar system was done in \cite{sussman92ces} where it was confirmed that the solar system has a positive Lyapunov exponent, and hence exhibits chaotic behaviour cf \cite{laskar03css}. More recently these methods have been improved and extended \cite{mclachlan95cmp,duncan98mts,laskar11nos,blanes15nfs}. Several symplectic integrators of high order were tested in \cite{farres13hps}, in order to determine the best splitting scheme for long-term studies of the solar system. These various methods have resulted in the fact that numerical algorithms for solar system dynamics are now so accurate that they can be used to define the geologic time scales in terms of the initial conditions and parameters of solar system models (or vice versa). \subsection{Symmetric Zassenhaus splitting and the equations of quantum mechanics} Equations of quantum mechanics in the semiclassical regime represent a double challenge of structure conservation and high oscillation. A good starting point is the linear Schr\"odinger equation \begin{equation} \label{LSE} \frac{\partial u}{\partial t}={\mathrm i}\varepsilon \frac{\partial^2 u}{\partial x^2}-{\mathrm i}\varepsilon^{-1} V(x)u \end{equation} (for simplicity we restrict our discourse to a single space dimension), given in $[-1,1]$ with periodic boundary conditions. Here $V$ is the potential energy of a quantum system, $|u(x,t)|^2$ is a position density of a particle and $0<\varepsilon\ll1$ represents the difference in mass between an electron and nuclei. It is imperative to preserve the unitarity of the solution operator (otherwise $|u(\,\cdot\,,t)|^2$ is no longer a probability function), but also deal with oscillation at a frequency of $\O{\varepsilon^{-1}}$. A conventional approach advances the solution using a palindromic splitting \R{palindromic}, but this is suboptimal for a number of reasons. Firstly, the number of splittings increases exponentially with order. Secondly, error constants are exceedingly large. Thirdly, quantifying the quality of approximation in terms of the step-size $h$ is misleading, because there are three small quantities at play: the step size $h$, $N^{-1}$ where $N$ is the number of degrees of freedom in space discretisation (typically either a spectral method or spectral collocation) and, finally, $\varepsilon>0$ which, originating in physics rather than being a numerical artefact, is the most important. We henceforth let $N=\O{\varepsilon^{-1}}$ (to resolve the high-frequency oscillations) and $h=\O{\varepsilon^\sigma}$ for some $\sigma>0$ -- obviously, the smaller $\sigma$, the larger the time step. \citeasnoun{bader14eas} have recently proposed an alternative approach to the splitting of \R{LSE}, of the form \begin{equation} \label{Zassenhaus} {\mathrm e}^{{\mathrm i} h (\varepsilon \partial_x^2-\varepsilon^{-1} V)}\approx {\mathrm e}^{\mathcal{R}_0}{\mathrm e}^{\mathcal{R}_1} \cdots {\mathrm e}^{\mathcal{R}_s}{\mathrm e}^{\mathcal{T}_{s+1}}{\mathrm e}^{\mathcal{R}_s} \cdots {\mathrm e}^{\mathcal{R}_1} {\mathrm e}^{\mathcal{R}_0} \end{equation} such that $\mathcal{R}_k=\O{\varepsilon^{\alpha_k}}$, $\mathcal{T}_{s+1}=\O{\varepsilon^{\alpha_{s+1}}}$, where $\alpha_0\leq \alpha_1<\alpha_2<\alpha_3<\cdots$ -- the {\em symmetric Zassenhaus splitting.\/} Here $\partial_x=\partial / \partial x$. The splitting \R{Zassenhaus} is derived at the level of differential operators (i.e., prior to space discretisation), applying the symmetric Baker--Campbell--Hausdorff formula to elements in the free Lie algebra spanned by $\partial_x^2$ and $V$. For $\sigma=1$, for example, this yields \begin{Eqnarray*} \mathcal{R}_0&=&-\Frac12\tau\varepsilon^{-1}V=\O{1},\\ \mathcal{R}_1&=&\Frac12\tau\varepsilon \partial_x^2=\O{1},\\ \mathcal{R}_2&=&\Frac{1}{24}\tau^3\varepsilon^{-1}(\partial_xV)^2+\Frac{1}{12} \tau^3\varepsilon \{(\partial_x^2V)\partial_x^2+\partial_x^2[(\partial_x^2V)\,\cdot\,]\}=\O{\varepsilon^2},\\ \mathcal{R}_3&=&-\Frac{1}{120}\tau^5\varepsilon^{-1}(\partial_x^2 V)(\partial_xV)^2 -\Frac{1}{24}\tau^3\varepsilon (\partial_x^4V) +\Frac{1}{240}\tau^5\varepsilon \left(7\{(\partial_x^2V)^2\partial_x^2 \right.\\ &&\mbox{}+\partial_x^2[(\partial_x^2V)^2\,\cdot\,] +\{(\partial_x^3V)(\partial_xV)\partial_x^2\left.\mbox{}+\partial_x^2[(\partial_x^3V)(\partial_xV)\,\cdot\,]\}\right)\\ && +\Frac{1}{120}\tau^5\varepsilon^{-3} \{(\partial_x^4V)\partial_x^4+\partial_x^4[(\partial_x^4V)\,\cdot\,]\}=\O{\varepsilon^4}, \end{Eqnarray*} where $\tau={\mathrm i} h$. Note that all the commutators, ubiquitous in the BCH formula, have disappeared: in general, the commutators in this free Lie algebra can be replaced by linear combinations of derivatives, with the remarkable property of {\em height reduction:\/} each commutator `kills' one derivative, e.g. \begin{displaymath} [V,\partial_x^2]=-(\partial^2_x V)-2(\partial_xV)\partial_x,\qquad [[V,\partial_x^2],\partial_x^2]=(\partial_x^4V)+4(\partial_x^3V)\partial_x+4(\partial_x^2V)\partial_x^2. \end{displaymath} Once we discretise with spectral collocation, $\mathcal{R}_0$ becomes a diagonal matrix and its exponential is trivial, while ${\mathrm e}^{\mathcal{R}_1}\MM{v}$ can be computed in two FFTs for any vector $\MM{v}$ because $\mathcal{R}_1$ is a Toeplitz circulant. Neither $\mathcal{R}_2$ nor $\mathcal{R}_3$ possess useful structure, except that they are {\em small!\/} Therefore we can approximate ${\mathrm e}^{\mathcal{R}_k}\MM{v}$ using the Krylov--Arnoldi process in just 3 and 2 iterations for $k=2$ and $k=3$, respectively, to attain an error of $\O{\varepsilon^6}$ \cite{bader14eas}. All this has been generalised to time-dependent potentials and is applicable to a wider range of quantum mechanics equations in the semiclassical regime. \setcounter{equation}{0} \setcounter{figure}{0} \section{Beyond GNI} Ideas in one area of mathematical endeavour often inspire work in another area. This is true not just because new mathematical research equips us with a range of innovative tools but because it provides insight that casts new light not just on the subject in question but elsewhere in the mathematical universe. GNI has thus contributed not just toward its own goal, better understanding of structure-preserving discretisation methods for differential equations, but in other, often unexpected, directions. \subsection{GNI meets abstract algebra} The traditional treatment of discretisation methods for differential equations was wholly analytic, using tools of functional analysis and approximation theory. (Lately, also tools from algebraic topology.) GNI has added an emphasis on geometry and this leads in a natural manner into concepts and tools from abstract algebra. As often in such mathematical dialogues, while GNI borrowed much of its conceptual background from abstract algebra, it has also contributed to the latter, not just with new applications but also new ideas. \begin{itemize} \item {\em B-series and beyond.\/} Consider numerical integration methods that associate to each vector field $\MM{f}$ a map $\MM{\psi}_h(\MM{f})$. A method $\MM{\psi}_h$ is called $g$-covariant\footnote{Also called equivariant.} if the following diagram commutes, \begin{center} \begin{picture}(250,135) \thicklines \put (-20,0) {$\tilde{\MM{x}}=\MM{\psi}_h(\MM{f})(\MM{x})$} \put (50,2) {\vector(1,0){140}} \put (200,0) {$\tilde{\MM{y}}=\MM{\psi}_h(\tilde{\MM{f}})(\MM{y})$} \put (15,110) {\vector(0,-1){95}} \put (225,110) {\vector(0,-1){95}} \put (-5,118) {$\dot{\MM{x}}=\MM{f}(\MM{x})$} \put (47,120) {\vector(1,0){148}} \put (210,118) {$\dot{\MM{y}}=\tilde{\MM{f}}(\MM{y})$} \put (100,8) {$\MM{x}=\MM{g}(\MM{y})$} \put (100,126) {$\MM{x}=\MM{g}(\MM{y})$} \end{picture} \end{center} It follows that if $g$ is a symmetry of the vector field $f$ and $\psi$ is $g$-covariant, then $\psi$ preserves the symmetry $g$. It seems that this concept of covariance for integration methods was first introduced in \cite{mclachlan95cps} and \cite{mclachlan98nit}. It is not hard to check that all B-series methods are covariant with respect to the group of affine transformations. A natural question to ask then, was ``are B-series methods the only numerical integration methods that preserve the affine group?". This question was open for many years, until it was answered in the negative by \cite{munthekaas15abs}, who introduced a more general class of integration methods dubbed ``aromatic Butcher series", and showed that (under mild assumptions) this is the most general class of methods preserving affine covariance. Expansions of methods in this new class contain both rooted trees (as in B-series), as well as products of rooted trees and so-called $k$-loops \cite{iserles07bmc}. Whereas it may be said that to date the importance of aromatic B-series has been at the formal rather than at the constructive level, these methods may hold the promise of the construction of affine-covariant volume-preserving integrators. \item {\em Word expansions.\/} Classical B-series can be significantly generalised by expanding in {\em word series\/} \cite{murua15wsd}. This introduced an overarching framework for Taylor expansions, Fourier expansions, modulated Fourier expansions and splitting methods. We consider an ODE of the form \begin{equation} \label{word_series} \dot{\MM{x}}=\sum_{a\in\mathcal{A}} \lambda_a(t) \MM{f}_a(\MM{x}),\qquad \MM{x}(0)=\MM{x}_0, \end{equation} where $\mathcal{A}$ is a given {\em alphabet.\/} The solution of \R{word_series} can be formally expanded in the form \begin{displaymath} \MM{x}(t)=\sum_{n=0}^\infty \sum_{\Mm{w}\in\mathcal{W}_n} \alpha_{\Mm{w}}(t) f_{\Mm{w}}(\MM{x}_0), \end{displaymath} where $\mathcal{W}_n$ is the set of all words with $n$ letters from $\mathcal{A}$. The coefficients $\alpha_{\Mm{w}}$ and functions $\MM{f}_{\Mm{w}}$ can be obtained recursively from the $\lambda_a$s and $\MM{f}_a$s in a manner similar to B-series. Needless to say, exactly like with B-series, word series can be interpreted using an algebra over rooted trees. The concept of word series is fairly new in numerical mathematics but it exhibits an early promise to provide a powerful algebraic tool for the analysis of dynamical systems and their discretisation. \item {\em Extension of Magnus expansions.\/} Let $\mathcal{W}$ be a {\em Rota--Baxter algebra,\/} a commutative unital algebra equipped with a linear map $R$ such that \begin{displaymath} R(x)R(y)=R(R(x)y+xR(y)+\theta xy),\qquad x,y\in\mathcal{W}, \end{displaymath} where $\theta$ is a parameter. The inverse $\partial$ of $R$ obeys \begin{displaymath} \partial(xy)=\partial(x)y+x\partial(y)+\theta\partial(x)\partial(y) \end{displaymath} and is hence a generalisation of a derivation operator: a neat example, with clear numerical implications, is the backward difference $\partial(x)=[x(t)-x(t-\theta)]/\theta$. \citeasnoun{ebrahimifard09amf} generalised Magnus expansions to this and similar settings, e.g.\ dendriform algebras. Their work uses the approach in \cite{iserles99sld}, representing individual `Magnus terms' as rooted trees, but generalises it a great deal. \item {\em The algebra of the Zassenhaus splitting.\/} The success of the Zassenhaus splitting \R{Zassenhaus} rests upon two features. Firstly, the replacement of commutators by simpler, more tractable expressions and, secondly, height reduction of derivatives under commutation. \citeasnoun{singh15ath} has derived an algebraic structure $\GG{J}$ which, encoding these two features, allows for a far-reaching generalisation of the Zassenhaus framework. The elements of $\GG{J}$ are operators of the form $\langle f\rangle_k =f\circ\,\partial_x^k+\partial_x^k\circ f$, where $k\in\BB{Z}_+$ and $f$ resides in a suitable function space. $\GG{J}$ can be endowed with a Lie-algebraic structure and, while bearing similarities with the Weyl algebra and the Heisenberg group, is a new and intriguing algebraic concept. \end{itemize} \subsection{Highly oscillatory quadrature} Magnus expansions \R{Magnus} are particularly effective when the matrix $A(t)$ oscillates rapidly. This might seem paradoxical -- we are all conditioned to expect high oscillation to be `difficult' -- but actually makes a great deal of sense. Standard numerical methods are based on Taylor expansions, hence on {\em differentiation,\/} and their error typically scales as a high derivative of the solution. Once a function oscillates rapidly, differentiation roughly corresponds to multiplying amplitude by frequency, high derivatives become large and so does the error. However, the Magnus expansion does not differentiate, it {\em integrates!\/} This has an opposite effect: the more we integrate, the smaller the amplitude and the series \R{Magnus} converges more rapidly. Indeed, often it pays to render a linear system highly oscillatory by a change of variables, in a manner described in \cite{iserles02ged}, and then solve it considerably faster and cheaper. Yet, once we contemplate the discretisation of \R{Magnus} for a highly oscillatory matrix function $A(t)$, we soon come up another problem, usually considered difficult, if not insurmountable: computing multivariate integrals of highly oscillatory functions. In a long list of methods for highly oscillatory quadrature (HOQ) {\em circa\/} 2002, ranging from the useless to the dubious, one method stood out: \citeasnoun{levin82pco} proposed to calculate univariate integrals by converting the problem to an ODE and using collocation. This was the only effective method around, yet incompletely understood. The demands of GNI gave the initial spur to the emergence in the last ten years to a broad swath of new methods for HOQ: Filon-type methods, which replace the {\em non-oscillatory\/} portion of the integrand by an interpolating polynomial \cite{iserles05eqh}, improved Levin-type methods \cite{olver06qmh} and the method of numerical stationary phase of \citeasnoun{huybrechs06eho}. The common characteristic of all these methods is that they are based on asymptotic expansions. This means that high oscillation is no longer the enemy -- indeed, the faster the oscillation, the smaller the error! Highly oscillatory integrals occur in numerous applications, from electromagnetic and acoustic scattering to fluid dynamics, quantum mechanics and beyond. Their role in GNI is minor. However, their modern numerical theory was originally motivated by a problem in GNI. This is typical to how scholarship progresses and it is only natural that HOQ has severed its GNI moorings and has become an independent area on its own. \subsection{Structured linear algebra} GNI computations often lead to specialised problems in numerical linear algebra. However, structure preservation has wider impact in linear algebraic computations. Often a matrix in an algebraic problem belongs to an algebraic structure, e.g.\ a specific Lie algebra or a symmetric space, and it is important to retain this in computation -- the sobriquet ``Geometric Numerical Algebra'' might be appropriate! Moreover, as in GNI so in GNA, respecting structure often leads to better, more accurate and cheaper numerical methods. Finally, structured algebraic computation is often critical to GNI computations. \begin{itemize} \item Matrix factorization is the lifeblood of numerical algebra, the basis of the most effective algorithms for the solution of linear systems, computation of eigenvalues and solution of least-squares problems. A major question in GNA is ``Suppose that $A\in\mathcal{A}$, where $\mathcal{A}$ is a set of matrices of given structure. Given a factorization $A=BC$ according to some set of rules, what can we say about the structure of $B$ or $C$?''. \citeasnoun{mackey05sfs} addressed three such `factorization rules': the {\em matrix square root,\/} $B=C$, the {\em matrix sign,\/} where the elements of $B$ are $\pm1$, and the {\em polar decomposition,\/} with unitary $B$ and positive semidefinite $C$. They focussed on sets $\mathcal{A}$ generated by a sesquilinear form $\langle\,\cdot\,,\,\cdot\,\rangle$. Such sets conveniently fit into two classes: \begin{enumerate} \item[(a)] Automorphisms $G$, such that $\langle G\MM{x},G\MM{y}\rangle=\langle\MM{x},\MM{y}\rangle$, generate a {\em Lie group;\/} \item[(b)] Self-adjoint matrices $S$, such that $\langle S\MM{x},\MM{y}\rangle=\langle \MM{x},S\MM{y}\rangle$, generate a {\em Jordan algebra;\/} and \item[(c)] Skew-adjoint matrices $H$ such that $\langle H\MM{x},\MM{y}\rangle=-\langle\MM{x},H\MM{y}\rangle$, generate a {\em Lie algebra.\/} \end{enumerate} It is natural to expect that conservation of structure under factorization would depend on the nature of the underlying inner product. The surprising outcome of \cite{mackey05sfs} is that, for current purposes, it is sufficient to split sesquilinear forms into just two classes, unitary and orthosymmetric, each exhibiting similar behaviour. \item Many algebraic eigenvalue problems are structured, the simplest example being that the eigenvalues of a symmetric matrix are real and of a skew-symmetric are pure imaginary: all standard methods for the computation of eigenvalues respect this. However, many other problems might have more elaborate structure, and this is the case in particular for nonlinear eigenvalue problems. An important example, with significant applications in mechanics, is \begin{equation} \label{QuadEig} (\lambda^2 M+\lambda G+K)\MM{x}=\MM{0}, \end{equation} where both $M$ and $K$ are symmetric, while $G$ is skew symmetric. The eigenvalues $\lambda$ of \R{QuadEig} exhibit {\em Hamiltonian\/} pattern: if $\lambda$ is in the spectrum then so are $-\lambda,\bar{\lambda}$ and $-\bar{\lambda}$.\footnote{To connect this to the GNI narrative, such a pattern is displayed by matrices in the {\em symplectic Lie algebra\/} $\Gg{sp}(2n)$.} As often in numerical algebra, \R{QuadEig} is particularly relevant when the underlying matrices are large and sparse. Numerical experiments demonstrate that standard methods for the computation of a quadratic eigenvalue problems may fail to retain the Hamiltonian structure of the spectrum but this can be obtained by bespoke algorithms, using a symplectic version of the familiar Lanczos algorithm, cf.\ \cite{benner07slc}. This is just one example of the growing field of structured eigenvalue and inverse eigenvalue problems. \item The exponential from an algebra to a group: Recall Lie-group methods from Section~2.2: a critical step, e.g.\ in the RKMK methods, is the exponential map from a Lie algebra to a Lie group. Numerical analysis knows numerous effective ways to approximate the matrix exponential \cite{moler03ndw}, yet most of them fail to map a matrix from a Lie algebra to a Lie group! There is little point to expand intellectual and computational effort to preserve structure, only to abandon the latter in the ultimate step, and this explains the interest in the computation of the matrix exponential which is assured to map $A$ in a Lie algebra to an element in the corresponding Lie group. While early methods have used structure constants and, for maximal sparsity, Lie-algebraic bases given by space-root decomposition \cite{celledoni01mam}, the latest generation of algorithms is based upon {\em generalised polar decomposition} \cite{munthekaas01gpd}. \end{itemize} \section*{Acknowledgments} This work has been supported by the Australian Research Council. The authors are grateful to David McLaren for assistance during the preparation of this paper, as well as to Philipp Bader, Robert McLachlan and Marcus Webb, whose comments helped to improve this paper. \end{document}
\begin{document} \title{Orthogonal to principal ideles} \begin{abstract} We describe the orthogonal to the group of principal ideles with respect to the global tame symbol pairing on the group of ideles of a smooth projective algebraic curve over a field. \end{abstract} \section{Introduction} Harmonic analysis on the group of adeles $\Ab_X$ of a smooth projective algebraic curve $X$ over a finite field plays a fundamental role in the Tate--Iwasawa method in the study of the zeta-function of $X$. A crucial fact here is that the field of rational functions $K$ on $X$, being a subgroup in $\Ab_X$, coincides with its own orthogonal $K^{\bot}$ with respect to a natural pairing on $\Ab_X$. Parshin~\cite{Par1},~\cite{Par2} found a version of the Tate--Iwasawa method based on the harmonic analysis on the group of ideles~$\Ab_X^*$. Thus a natural problem is to investigate a multiplicative analogue of the self-orthogonality of $K$, that is, to describe the orthogonal~$K^{*\,\bot}$ to the subgroup $K^*\subset \Ab^*_X$ with respect to the global tame symbol pairing $$ (\cdot,\cdot)_X\;:\;\Ab_X^*\times \Ab_X^*\longrightarrow k^*\,,\qquad (f,g)_X=\mbox{$\prod\limits_{x\in X}{\rm Nm}_{k(x)/k}(f_x,g_x)_x$}\,, $$ where $(\cdot,\cdot)_x$ are the local tame symbol pairings (or Hilbert symbols). This question was treated recently by Mu\~{n}oz Porras, Navas Vicente, Pablos Romo, Plaza Mart\'in~\cite[Theor.\,5.5]{MP}. They considered the cases when $X$ is defined either over a finite field, or over the field of complex numbers. In the latter case, they were using analytic considerations with $\sigma$-functions and prime forms on Riemann surfaces. In this note, using algebraic methods, we describe the orthogonal $K^{*\,\bot}$ when $X$ is defined over an arbitrary ground field $k$. The kernel $U$ of the global tame symbol pairing has an explicit description, see Lemma~\ref{lem:Uinfinite}, Lemma~\ref{lem:Ufinite}, and is clearly contained in $K^{*\,\bot}$. Thus, in order to describe $K^{*\,\bot}$ it is enough to describe the quotient $K^{*\,\bot}/(K^*\cdot U)$ and this is what we do, see~Theorem~\ref{theo:infinite}, Remark~\ref{rmk:maingener}, and Theorem~\ref{theo:finite}. In particular, when $k$ is algebraically closed, or, more generally, when the group $k^*$ is divisible, we show that there is an exact sequence $$ 0\longrightarrow {\rm Hom}\big({\rm Pic}^0(X),k^*\big)\longrightarrow K^{\,*\bot}/(K^*\cdot U)\longrightarrow{\rm Pic}^0(X)\longrightarrow 0\,. $$ When $k$ is finite, we prove the equality ${K^{\,*\bot}=K^*\cdot U}$ (note that this differs from the description of $K^{*\,\bot}$ given in~\cite{MP}). Of course, one can show this fact using class field theory, but we take an opposite way. Namely, we prove this fact using non-degeneracy of the Weil pairing and a relation between the Weil pairing and the tame symbol, see Proposition~\ref{prop:Weiltame}. Then we deduce that there is a natural isomorphism between $C_X/(q-1)$, where $C_X=\Ab^*_X/K^*$, and the universal abelian $(q-1)$-torsion quotient of the Galois group of~$K$, see~Corollary~\ref{cor:CFT}. Note that this statement implies the second fundamental inequality for Kummer extensions in the function field case. We believe that we provide thus a more clear proof of this important step in the construction of the class field theory than the previously known sequence of tricks, see, e.g., the book of Artin and Tate~\cite[\S\,VI.2]{AT}. The author is very grateful to D.\,V.\,Osipov and A.\,N.\,Parshin for many useful suggestions. The author is partially supported by Laboratory of Mirror Symmetry NRU HSE, RF Government grant, ag. no. 14.641.31.0001 \section{Orthogonal to an isotropic subgroup}\label{sec:abstr} Let~$A$ and~$N$ be abelian groups and let $$ (\cdot,\cdot)\::\;A\times A\longrightarrow N $$ be a bilinear pairing. For simplicity, we assume that the pairing is either symmetric or antisymmetric to avoid the difference between left and right orthogonals. However, Proposition~\ref{prop:key} below is valid for an arbitrary pairing as well. For a subgroup $E\subset A$, denote by $E^{\bot}\subset A$ the orthogonal to $E$ in $A$ with respect to the pairing $(\cdot,\cdot)$. Let $B,C\subset A$ be subgroups which are isotropic with respect to the pairing~$(\cdot,\cdot)$, that is, ${B\subset B^{\bot}}$ and ${C\subset C^{\bot}}$. Put $$ A'=(B\cap C)^{\bot}\subset A\,. $$ Clearly, we have $B,C,B^{\bot},C^{\bot}\subset A'$. Our aim is to describe the quotient $B^{\bot}/B$ in terms of~$A'/(B+C)$. Given abelian groups $H\subset G$ and an element $g\in G$, we usually denote by $[g]$ the class of $g$ in the quotient $G/H$ when it is clear from the context which subgroup $H$ in $G$ is considered. The pairing $(\cdot,\cdot)$ defines naturally the maps $$ \alpha\;:\;C\longrightarrow {\rm Hom}(A/C,N)\,,\qquad \beta\;:\;B\cap C\longrightarrow {\rm Hom}(A/A',N)\,, $$ both given by the formula $c\longmapsto \big([a] \mapsto (a,c)\big)$, where $a\in A$, $c\in C$. \begin{prop}\label{prop:key} Suppose that the following conditions are satisfied: \begin{itemize} \item[(i)] the map $\alpha$ is an isomorphism; \item[(ii)] the natural map ${\rm Hom}\big(A/(B+C),N\big)\to {\rm Hom}\big(A'/(B+C),N\big)$ is surjective. \end{itemize} Then there is a decreasing filtration $B^{\bot}/B=F^0\supset F^1\supset F^2\supset F^3=0$ with the following adjoint quotients: $$ F^0/F^1\simeq {\rm Im}\big(B^{\bot}\to A'/(B+C)\big)\,,\qquad F^1/F^2\simeq {\rm Hom}\big(A'/(B+C),N\big)\,,\qquad F^2\simeq {\rm Coker}(\beta)\,. $$ \end{prop} \begin{proof} Since $B\subset A'$, there are embeddings $$ B^{\bot}\supset A'^{\,\bot}\,,\qquad B^{\bot}\cap C\supset A'^{\,\bot}\cap C\supset B\cap C\,. $$ Define the filtration as follows: $$ F^1={\rm Im}(B^{\bot}\cap C\to B^{\bot}/B)\simeq (B^{\bot}\cap C)/(B\cap C)\,, $$ $$ F^2={\rm Im}(A'^{\,\bot}\cap C\to B^{\bot}/B)\simeq (A'^{\,\bot}\cap C)/(B\cap C)\,. $$ Let us describe the adjoint quotients. It follows from condition~(i) that there are isomorphisms \begin{equation}\label{eq:auxilisom} B^{\bot}\cap C\simeq {\rm Hom}\big(A/(B+C),N\big)\,,\qquad A'^{\,\bot}\cap C\simeq {\rm Hom}(A/A',N)\,. \end{equation} The second isomorphism in~\eqref{eq:auxilisom} implies that ${F^2\simeq {\rm Coker}(\beta)}$. Both isomorphisms in~\eqref{eq:auxilisom} together with condition~(ii) imply that the quotient ${F^1/F^2\simeq (B^{\bot}\cap C)/(A'^{\,\bot}\cap C)}$ is isomorphic to ${{\rm Hom}\big(A'/(B+C),N\big)}$. It follows from the embedding $B\subset B^{\bot}$ that there is an equality $$ B+(B^{\bot}\cap C)=B^{\bot}\cap (B+C)\,. $$ Hence the quotient~$F^0/F^1$ is isomorphic to $B^{\bot}/\big(B^{\bot}\cap (B+C)\big)$, which is also isomorphic to image of the natural map $B^{\bot}\to A'/(B+C)$. \end{proof} Actually, condition~(i) of Proposition~\ref{prop:key} implies that the map $\beta$ is injective. Now we give two corollaries of Proposition~\ref{prop:key}, which will be useful for the applications to the tame symbol pairing. Let us say that a bilinear pairing between abelian groups ${G\times H\to N}$ is unimodular if it induces isomorphisms ${G\simeq {\rm Hom}(H,N)}$ and ${H\simeq {\rm Hom}(G,N)}$. \begin{cor}\label{cor:key} Suppose that the following conditions are satisfied: \begin{itemize} \item[(i)] the map $\alpha$ is an isomorphism; \item[(ii)] the natural pairing $$ (\cdot,\cdot)\;:\;(B\cap C)\times A/(B+C)\longrightarrow N $$ is unimodular. \end{itemize} Then there is an equality $B=B^{\bot}$. \end{cor} \begin{proof} The isomorphism ${A/(B+C)\simeq {\rm Hom}(B\cap C,N)}$ implies that $A'=B+C$, that is, $A'/(B+C)=0$. Together with the isomorphism ${B\cap C\simeq {\rm Hom}\big(A/(B+C),N\big)}$ this implies that $\beta$ is an isomorphism. Thus by Proposition~\ref{prop:key}, we have $F^0/F^1=F^1/F^2=F^2=0$, whence $B=B^{\bot}$. \end{proof} Let us introduce more notation. We have an exact sequence \begin{equation}\label{eq:secondexabs} 0\longrightarrow B/(B\cap C)\longrightarrow A'/C\longrightarrow A'/(B+C)\longrightarrow 0\,. \end{equation} Define a map $$ \gamma\;:\; A'/(B+C)\longrightarrow{\rm Ext}^1\big(A'/(B+C),N\big) $$ as follows. The class $[a]\in A'/(B+C)$ of an element $a\in A'$ is sent by $\gamma$ to the class of the extension of $A'/(B+C)$ by $N$ obtained as the push-out of extension~\eqref{eq:secondexabs} along the homomorphism $$ \lambda_a\;:\; B/(B\cap C)\longrightarrow N\,,\qquad [b]\longmapsto (a,b)\,, $$ where $b\in B$. The homomorphism $\lambda_a$ is well-defined, because $a\in A'$, so that $(a,B\cap C)=0$. The class of the obtained extension does not depend on the choice of $a\in A'$ with fixed $[a]\in A'/(B+C)$, because for any $b\in B$, the homomorphism $\lambda_b$ is trivial and for any $c\in C$, the homomorphism $\lambda_c$ extends to a well-defined homomorphism ${(c,\cdot)\colon A'/C\to N}$. \begin{cor}\label{cor:split} Suppose that the following conditions are satisfied: \begin{itemize} \item[(i)] the map $\alpha$ is an isomorphism; \item[(ii)] the quotient $A/A'$ split out of $A$. \end{itemize} Then there is a decreasing filtration $B^{\bot}/B=F^0\supset F^1\supset F^2\supset F^3=0$ with the following adjoint quotients: $$ F^0/F^1\simeq {\rm Ker}(\gamma)\,,\qquad F^1/F^2\simeq {\rm Hom}\big(A'/(B+C),N\big)\,,\qquad F^2\simeq {\rm Coker}(\beta)\,. $$ \end{cor} \begin{proof} Let us show that the image of the natural map ${\zeta\colon B^{\bot}\to A'/(B+C)}$ coincides with the kernel of $\gamma$. It follows from the definition of $\gamma$ that ${\rm Im}(\zeta)\subset{\rm Ker}(\gamma)$. Conversely, let $a\in A'$ be such that $\gamma[a]=0$. Then the push-out of exact sequence~\eqref{eq:secondexabs} along~$\lambda_a$ admits a splitting, or, equivalently, the map $\lambda_a\colon B/(B\cap C)\to N$ extends to a map ${\tilde{\lambda}\colon A'/C\to N}$. Condition~(ii) implies that $A/A'$ splits out of $A/C$ as well. Together with condition~(i) this implies that the natural map $C\to{\rm Hom}(A'/C,N)$ is surjective. Let $c\in C$ be sent to $\tilde{\lambda}$ under this map. Then $\lambda_{a-c}=0$, that is, $a-c\in B^{\bot}$. Since $[a]=[a-c]$ in $A'/(B+C)$, we see that $[a]$ is in the image of $\zeta$. This proves that ${\rm Im}(\zeta)={\rm Ker}(\gamma)$. Now we conclude the proof applying Proposition~\ref{prop:key}. \end{proof} \section{Tame symbol pairing} Let $X$ be a smooth projective curve over a field $k$ and let $K=k(X)$ be the field of rational functions on $X$. We suppose that $X$ is geometrically irreducible over $k$, that is, $k$ is algebraically closed in $K$, or, equivalently, $H^0(X,{\mathcal O}_X)=k$. Given a closed point $x\in X$, denote by $\widehat{{\mathcal O}}_{X,x}$ the completion of the local ring ${\mathcal O}_{X,x}$, by $\widehat{{\mathfrak m}}_x\subset \widehat{{\mathcal O}}_{X,x}$ the maximal ideal, by $k(x)$ the residue field at the point~$x$, which is also the residue field of the local ring $\widehat{{\mathcal O}}_{X,x}$, and denote by~$K_x$ the fraction field of $\widehat{{\mathcal O}}_{X,x}$. Equivalently,~$K_x$ is the completion of the field $K$ with respect to the discrete valuation $\nu_x:K^*\to{\mathbb Z}$ defined by~$x$. Note that $k(x)$ is canonically a finite extension of~$k$. We have a local tame symbol pairing $$ (\cdot,\cdot)_x\;:\;K_x^*\times K_x^*\longrightarrow k(x)^*\,,\qquad (f_x,g_x)_x= \big((-1)^{\nu_x(f_x)\nu_x(g_x)}f_x^{-\nu_x(g_x)}g_x^{\nu_x(f_x)}\big)(x)\,, $$ where $f_x,g_x\in K_x^*$. The pairing $(\cdot,\cdot)_x$ is antisymmetric. For each closed point $x\in X$, put $d_x=[k(x):k]$ and let $d$ be the greatest common divisor of the numbers $d_x$ over all~${x\in X}$: \begin{equation}\label{eq:d} d={\rm GCD}\,(\,d_x\mid x\in X)\,. \end{equation} Denote by $\Ab_X$ the ring of adeles of $X$ and by $\Ab^*_X$ the group of ideles of $X$, that is, the group of invertible elements in $\Ab_X$. We have a global tame symbol pairing $$ (\cdot,\cdot)_X\;:\;\Ab_X^*\times \Ab_X^*\longrightarrow k^*\,,\qquad (f,g)_X=\mbox{$\prod\limits_{x\in X}{\rm Nm}_{k(x)/k}(f_x,g_x)_x$}\,, $$ where $f=(f_x)_{x\in X},\,g=(g_x)_{x\in X}\in\Ab_X^*$. The pairing $(\cdot,\cdot)_X$ is antisymmetric. By the explicit formula for the local tame symbol pairing, the subgroup ${\mbox{$\prod\limits_{x\in X}\widehat{\mathcal O}^*_{X,x}$}\subset \Ab^*_X}$ is isotropic. By Weil reciprocity law, the subgroup $K^*\subset \Ab^*_X$ is isotropic as well. Let $U\subset \Ab^*_X$ denote the kernel of the global tame symbol pairing. Our aim is to describe the quotient $K^{*\,\bot}/(K^*\cdot U)$. For this, we will apply results from Section~\ref{sec:abstr} to $$ A=\Ab^*_X/U\,,\qquad B=K^*/(K^*\cap U)\,,\qquad C={\mbox{$\prod\limits_{x\in X}\widehat{\mathcal O}^*_{X,x}$}}\,/\Big(\,{\mbox{$\prod\limits_{x\in X}\widehat{\mathcal O}^*_{X,x}$}}\cap U\Big)\,,\qquad N=k^*\,, $$ and to the pairing on $A$ induced by the global tame symbol pairing $(\cdot,\cdot)_X$. \begin{remark}\label{rmk:kerneltame} Clearly, an idele $f=(f_x)_{x\in X}\in\Ab^*_X$ belongs to $U$ if and only if for any $x\in X$, we have $f_x\in U_x$, where $U_x\subset K^*_x$ denotes the kernel of the pairing $$ K_x^*\times K_x^*\longrightarrow k^*\,,\qquad (f_x,g_x)\longmapsto {\rm Nm}_{k(x)/k}(f_x,g_x)_x\,. $$ An explicit description of $U_x$ and $U$ depends on whether the field~$k$ is infinite or finite. \end{remark} \section{The case of an infinite ground field} Assume that the field $k$ is infinite. \begin{lemma}\label{lem:Uinfinite} There are equalities $$ U_x={\rm Ker}\big(\widehat{{\mathcal O}}^*_{X,x}\to k(x)^*\to k^*\big)\,,\qquad U=\mbox{$\prod\limits_{x\in X}U_x$}\,, $$ where $x\in X$ is any closed point, the map $\widehat{{\mathcal O}}^*_{X,x}\to k(x)^*$ is the natural surjective homomorphism, and the map $k(x)^*\to k^*$ is the norm map ${\rm Nm}_{k(x)/k}$. \end{lemma} \begin{proof} The formula for $U_x$ follows from the explicit description of the local tame symbol pairing (here we use that $k$ is infinite). The formula for $U$ follows from Remark~\ref{rmk:kerneltame}. \end{proof} Denote by ${\rm Div}(X)$ the group of divisors on $X$, by ${\rm Div}^0(X)\subset {\rm Div}(X)$ the subgroup of degree zero divisors, by ${\rm Pic}(X)$ the Picard group of $X$, and by ${\rm Pic}^0(X)\subset{\rm Pic}(X)$ the subgroup of classes of degree zero divisors. Define a map $$ \theta\;:\;{\rm Pic}^0(X)\longrightarrow {\rm Ext}^1\big({\rm Pic}^0(X),k^*\big) $$ as follows. An element $\ell\in {\rm Pic}^0(X)$ is sent by $\theta$ to the class of the extension of ${\rm Pic}^0(X)$ by $k^*$ obtained as the restriction of the Poincar\'e biextension over ${\rm Pic}^0(X)\times{\rm Pic}^0(X)$ to~${\{\ell\}\times{\rm Pic}^0(X)}$. \begin{theo}\label{theo:infinite} Assume that the field $k$ is infinite. \begin{itemize} \item[(i)] Suppose that for any finite extension of fields $k\subset l$, the norm map ${\rm Nm}_{l/k}\colon l^*\to k^*$ is surjective. Then there is a decreasing filtration $$ K^{*\,\bot}/(K^*\cdot U)=F^0\supset F^1\supset F^2\supset F^3=0 $$ with the following adjoint quotients (see~\eqref{eq:d} for the definition of $d$): $$ F^0/F^1\simeq {\rm Ker}(\theta)\subset {\rm Pic}^0(X)\,,\qquad F^1/F^2\simeq {\rm Hom}\big({\rm Pic}^0(X),k^*\big)\,,\qquad F^2\simeq k^*/(k^*)^d\,. $$ \item[(ii)] Suppose that the group $k^*$ is divisible. Then there is an exact sequence $$ 1\longrightarrow {\rm Hom}\big({\rm Pic}^0(X),k^*\big)\longrightarrow K^{*\,\bot}/(K^*\cdot U)\longrightarrow {\rm Pic}^0(X)\longrightarrow 0\,. $$ \end{itemize} \end{theo} The condition in Theorem~\ref{theo:infinite}(i) holds, in particular, if $k$ is quasi-algebraically closed and of zero characteristic, see, e.g., Serre's book~\cite[Prop.\,X.10, Prop.\,X.11]{Ser}. The condition in Theorem~\ref{theo:infinite}(ii) holds, in particular, if $k$ is algebraically closed. \begin{proof}[Proof of Theorem~\ref{theo:infinite}] (i) Let us describe in our case the groups $A$, $B$, $C$, $B\cap C$, $A'$ and the maps~$\alpha$,~$\beta$,~$\gamma$ from Section~\ref{sec:abstr}. Lemma~\ref{lem:Uinfinite} together with the surjectivity property of the norm maps imply the equalities $$ \mbox{$C=\prod\limits_{x\in X}\widehat{{\mathcal O}}^*_{X,x}/U_x=\prod\limits_{x\in X}k^*$}\,,\qquad A/C={\rm Div}(X)\,. $$ It follows that $B\cap C$ is the image of the map $$ k^*\longrightarrow\mbox{$\prod\limits_{x\in X}k^*$}\,,\qquad c\longmapsto (c^{d_x})_{x\in X}\,. $$ Note that for all $f\in \Ab^*_X$ and $c\in k^*$, we have $(f,c)_X=c^{\deg(f)}$, where $$ \deg\;:\;\Ab_X^*\longrightarrow {\mathbb Z}\,,\qquad (f_x)_{x\in X}\longmapsto \mbox{$\sum\limits_{x\in X}d_x\,\nu_x(f_x)$}\,, $$ is the degree homomorphism. Therefore, $A'=(\Ab^*_X)^0/U$, where $(\Ab^*_X)^0$ is the kernel of the degree homomorphism. We see that $A'/(B+C)={\rm Pic}^0(X)$. It is easy to see that the map $\alpha$ coincides with the natural isomorphism $$ \mbox{$\prod\limits_{x\in X}k^*$}\stackrel{\sim}\longrightarrow{\rm Hom}\big({\rm Div}(X),k^*\big)\,. $$ So, condition~(i) of Corollary~\ref{cor:split} is satisfied. Further, the isomorphism $\deg\colon A/A'\stackrel{\sim}\longrightarrow d\,{\mathbb Z}$ implies that $A/A'$ splits out of $A$, whence condition~(ii) of Corollary~\ref{cor:split} is satisfied as well. Also, we obtain that ${\rm Coker}(\beta)$ is the cokernel of the map $$ k^*\longrightarrow B\cap C\longrightarrow{\rm Hom}(A/A',N)\simeq k^*\,, \qquad c\longmapsto c^d\,. $$ Therefore, ${{\rm Coker}(\beta)=k^*/(k^*)^d}$. Finally, we show that the maps $\gamma$ and $\theta$ coincide up to sign. It is proved in~\cite[Theor.\,3.1]{Gor} that the Poincar\'e biextension over ${\rm Pic}^0(X)\times{\rm Pic}^0(X)$ is isomorphic to the quotient of the trivial biextension $k^*\times(\Ab^*_X)^0\times(\Ab^*_X)^0$ by the following action of the group $\big({K^*\times \prod\limits_{x\in X}\widehat{{\mathcal O}}_{X,x}^*}\big)^{\times 2}$: $$ \big((\varphi,u),(\psi,v)\big)\;:\;(c,f,g)\longmapsto \big(c(f,\psi)_X(u,\psi)_X(u,g)_X,f\varphi u,g\psi v\big)\,. $$ The proof is based on the fact that the global tame symbol pairing coincides with the commutator pairing for the central extension of $\Ab^*_X$ by $k^*$ constructed by Arbarello, Kac, de Concini~\cite{AKC}. Define a map \begin{equation}\label{eq:divadeles} {\rm div}\;:\;\Ab^*_X\longrightarrow{\rm Div}(X)\,,\qquad (f_x)_{x\in X}\longmapsto \mbox{$\sum\limits_{x\in X}\nu_x(f_x)\cdot x$}\,. \end{equation} We see that for any idele $f\in(\Ab^*_X)^0$, the class $\theta[{\rm div}(f)]$ is equal to the class of the extension of ${\rm Pic}^0(X)$ by $k^*$ given by the cokernel of the homomorphism $$ K^*\longrightarrow k^*\times {\rm Div}^0(X)\,,\qquad \psi\longmapsto \big((f,\psi)_X,{\rm div}(\psi)\big)\,. $$ The latter extension coincides up to sign with the extension obtained as the push-out along the map $\lambda_f$ of extension~\eqref{eq:secondexabs} in our case. This proves that the maps~$\gamma$ and~$\theta$ are equal up to sign. Now we conclude the proof applying Corollary~\ref{cor:split}. (ii) By the assumption, the group $k^*/(k^*)^d$ is trivial. Moreover, the group~$k^*$ is injective as a ${\mathbb Z}$-module, whence ${{\rm Ext}^1\big({\rm Pic}^0(X),k^*\big)=0}$ and ${{\rm Ker}(\theta)={\rm Pic}^0(X)}$. Also, clearly, the assumption in~(i) is satisfied. Hence we conclude the proof applying~(i). \end{proof} \begin{remark}\label{rmk:maingener} One can prove a generalization of Proposition~\ref{prop:key} for arbitrary isotropic subgroups ${B,C\subset A}$ without assuming conditions (i) and (ii) therein. This implies the following generalization of Theorem~\ref{theo:infinite} for an arbitrary infinite ground field $k$. For each closed point $x\in X$, let $\Gamma_x\subset k^*$ denote the image of the norm map $k(x)^*\to k^*$. Define an injective map~$\iota$ by the formula $$ \iota\;:\;k^*\longrightarrow \mbox{$\prod\limits_{x\in X}k^*$}\,,\qquad c\longmapsto(c^{d_x/d})\,. $$ Then there is a filtration $$ K^{*\,\bot}/(K^*\cdot U)=F^0\supset F^1\supset F^2\supset F^3=0 $$ with the following adjoint quotients: $$ F^0/F^1\simeq{\rm Ker}\left[{\rm Ker(\theta)}\longrightarrow {\rm Coker}\Big( {\rm Hom}\big({\rm Pic}^0(X),k^*\big)\to \big(\mbox{$\prod\limits_{x\in X}k^*\big)/\big(\iota(k^*)\cdot\prod\limits_{x\in X}\Gamma_x\big)$} \Big)\right]\,, $$ $$ F^1/F^2\simeq \mbox{$\prod\limits_{x\in X}\Gamma_x$}/\big(\iota(k^*)\cap \mbox{$\prod\limits_{x\in X}\Gamma_x$}\big)\cap {\rm Hom}\big({\rm Pic}^0(X),k^*\big)\subset {\big(\mbox{$\prod\limits_{x\in X}k^*$}\big)/\iota(k^*)}\,, $$ $$ F^2\simeq\big(\iota(k^*)\cap\mbox{$\prod\limits_{x\in X}\Gamma_x$}\big)/\iota(k^*)^d\,, $$ If $\Gamma_x=k^*$ for all $x\in X$, this specializes to Theorem~\ref{theo:infinite}(i). \end{remark} \section{The case of a finite ground field} Assume that $k=\FF_q$ is a finite field. We will use the following results. \begin{prop}\label{prop:AT} For any finite Galois extension of fields $K\subset L$, almost all valuations of $K$ split completely in $L$ if and only if $K=L$. \end{prop} This holds more generally when $K$ is an arbitrary global field, see, e.g.,~\cite[Theor.\,V.2]{AT}. As is noticed in op.cit., this is a consequence of the first fundamental inequality in class field theory, which is essentially reduced to the Riemann--Roch theorem in the function field case. \begin{cor}\label{cor:AT} \hspace{0cm} \begin{itemize} \item[(i)] The natural map $K^*/(K^*)^{q-1}\to \Ab_X^*/(\Ab_X^*)^{q-1}$ is injective. \item[(ii)] There is an equality $d=1$ (see~\eqref{eq:d} for the definition of $d$). \end{itemize} \end{cor} \begin{proof} (i) Given an element $\varphi\in K^*$, we have $\varphi\in(\Ab^*_X)^{q-1}$ if and only if any valuation of $K$ splits completely in $K(\varphi^{\frac{1}{q-1}})$. By Proposition~\ref{prop:AT}, the latter is equivalent to $\varphi\in (K^*)^{q-1}$. (ii) This is proved, e.g., in~\cite[Theor.\,V.5]{AT}. Indeed, apply Proposition~\ref{prop:AT} to the Galois extension of fields $K\subset K\otimes_{\FF_q}\FF_{q^d}$. \end{proof} Denote by $\overline X$ the curve $X\times_k\bar{k}$ over $\bar k$ and denote by ${\rm Fr}\colon \overline X\to \overline X$ the $\bar k$-linear $q$-th Frobenius morphism. This is a purely inseperable finite morphism of degree $q$. We have a group homomorphism ${\rm Fr}_*\colon{\rm Pic}(\overline X)\to{\rm Pic}(\overline X)$. Note that the subgroup $k^*\subset \bar k^*$ coincides with the group $\mu_{q-1}$ of degree $q-1$ roots of unity in~$\bar k^*$. Denote by $$ (\cdot,\cdot)_{q-1}\;:\;{\rm Pic}^0(\overline X)_{q-1}\times {\rm Pic}^0(\overline X)_{q-1}\longrightarrow \mu_{q-1}=k^* $$ the corresponding Weil pairing. \begin{lemma}\label{lemma:nondegen} There is a well-defined unimodular pairing $$ \kappa\;:\;{\rm Pic}^0(X)_{q-1}\times {\rm Pic}^0(X)/(q-1)\longrightarrow k^*\,,\qquad \kappa(\ell,[m])=(\ell,{\rm Fr}_*(\widetilde m)-\widetilde m)_{q-1}\,, $$ where for any $m\in {\rm Pic}^0(X)$, an element $\widetilde m\in {\rm Pic}^0(\overline X)$ is such that $(q-1)\widetilde m=m$. \end{lemma} \begin{proof} This is a rather standard fact. Namely, the Weil pairing $(\cdot,\cdot)_{q-1}$ is unimodular and the map ${\rm Fr}_*$ from ${\rm Pic}^0(\overline{X})_{q-1}$ to itself is an isometry with respect to the Weil pairing. Consider the map ${\rm Fr}_*-1$ from ${{\rm Pic}^0(\overline{X})_{q-1}}$ to itself. The Weil pairing induces a pairing $$ {\rm Ker}({\rm Fr}_*-1)\times {\rm Coker}({\rm Fr}_*-1)\longrightarrow k^* $$ such that the natural map ${{\rm Ker}({\rm Fr}_*-1)\to{{\rm Hom}\big({\rm Coker}({\rm Fr}_*-1),k^*\big)}}$ is injective. Since the finite groups ${\rm Ker}({\rm Fr}_*-1)$ and ${\rm Coker}({\rm Fr}_*-1)$ have the same orders, the above map is in fact an isomorphism. Since both groups are $(q-1)$-torsion and $k^*$ is a cyclic group of order~$q-1$, we obtain that the groups ${\rm Ker}({\rm Fr}_*-1)$ and ${\rm Coker}({\rm Fr}_*-1)$ are Pontryagin dual to each other. Therefore the natural map ${{\rm Coker}({\rm Fr}_*-1)\to{{\rm Hom}\big({\rm Ker}({\rm Fr}_*-1),k^*\big)}}$ is also an isomorphism. Now observe that ${\rm Ker}({\rm Fr}_*-1)={\rm Pic}^0(X)_{q-1}$. Further, using the well-known surjectivity of the map ${\rm Fr}_*-1$ on ${\rm Pic}^0(\overline{X})$, one shows that there is an isomorphism $$ {\rm Pic}^0(X)/(q-1)\stackrel{\sim}\longrightarrow {\rm Coker}({\rm Fr}_*-1)\,,\qquad [m]\longmapsto {\rm Fr}_*(\widetilde m)-\widetilde m\,. $$ Altogether this proves the lemma. \end{proof} The following relation between the Weil pairing and the tame symbol was first proved by Howe~\cite{How}, then by Mazo~\cite{Maz} in a more elementary way, and then in~\cite[Cor.\,4.1]{Gor} by a different method based on the relation between the Poincar\'e biextension and the tame symbol mentioned in the proof of Theorem~\ref{theo:infinite}(i). \begin{prop}\label{prop:Weiltame} Let $\varphi\in K^*$ be such that ${\rm div}(\varphi)\in (q-1){\rm Div}(X)$ and let $h\in \Ab^*_X$ be such that $h^{q-1}\in K^*$. Then there is an equality for the Weil pairing (see~\eqref{eq:divadeles} for the definition of~${\rm div}$) $$ \big([{\rm div}(\varphi)/(q-1)],[{\rm div}(h)]\big)_{q-1}=(\varphi,h)_X\,. $$ \end{prop} Actually, Proposition~\ref{prop:Weiltame} holds for a smooth projective curve over an arbitrary field and for $n$-torsion in the Picard group, where $n$ is prime to the characteristic of the ground field. \begin{cor}\label{cor:Weil} For any $\varphi\in K^*$ such that ${\rm div}(\varphi)\in (q-1){\rm Div}(X)$ and any $g\in (\Ab^*_X)^0$, there is an equality $$ \kappa\big([{\rm div}(\varphi)/(q-1)],[{\rm div}(g)]\big)=(\varphi,g)_X\,. $$ \end{cor} \begin{proof} We will use the ring of adeles $\Ab^*_{\bar X}$ of the curve $\overline{X}$ over the algebraic closure $\bar k$ of~$k$. Note that there is a natural embedding of rings $\Ab_X\subset \Ab_{\bar X}$. It follows from $(q-1)$-divisibility of the groups ${\rm Pic}^0(\overline{X})$ and $\bar k[[t]]^*$ that there are ${\widetilde{g}\in \Ab^*_{\bar X}}$ and ${\psi\in\bar k(\overline{X})^*}$ such that ${\widetilde{g}^{\,q-1}=g\psi}$. In particular, there is an equality \begin{equation}\label{eq:1metro} (q-1)[{\rm div}(\widetilde{g})]=[{\rm div}(g)] \end{equation} in ${\rm Pic}^0(\overline{X})$. The finite morphism ${\rm Fr}\colon \overline X\to \overline X$ defines the embedding $\Ab^*_{\bar X}\hookrightarrow \Ab^*_{\bar X}$ and the norm map ${\rm Nm}_{\,{\rm Fr}}\colon \Ab^*_{\bar X}\to \Ab^*_{\bar X}$. Put $$ h={\rm Nm}_{\,{\rm Fr}}(\widetilde g)\cdot\widetilde g^{\,-1}\in\Ab^*_{\bar X}\,. $$ Then we have \begin{equation}\label{eq:2metro} h^{q-1}={\rm Nm}_{\,{\rm Fr}}(\widetilde g^{\,q-1})\cdot \widetilde g^{\,-(q-1)}={\rm Nm}_{\,{\rm Fr}}(g\psi)\cdot (g\psi)^{-1}={\rm Nm}_{\,{\rm Fr}}(\psi)\cdot\psi^{-1}\in \bar k(\overline{X})^*\,, \end{equation} where the third equality follows from the fact that the restriction of ${\rm Nm}_{\,{\rm Fr}}$ to the subgroup $\Ab^*_X\subset \Ab^*_{\bar X}$ is the identity. There is a commutative diagram $$ \begin{CD} \Ab^*_{\bar X} @>{\rm [div(-)]}>> {\rm Pic}(\overline{X}) \\ @V_{{\rm Nm}_{\,{\rm Fr}}}VV @V_{{\rm Fr}_*}VV \\ \Ab^*_{\bar X} @>{\rm [div(-)]}>> {\rm Pic}(\overline{X})\,. \end{CD} $$ Hence, we have \begin{equation}\label{eq:1.5metro} [{\rm div}(h)]={\rm Fr}_*[{\rm div}(\widetilde{g})]-[{\rm div}(\widetilde{g})]\,. \end{equation} Combining formulas~\eqref{eq:1metro} and~\eqref{eq:1.5metro}, we obtain the equality $$ \kappa\big([{\rm div}(\varphi)/(q-1)],[{\rm div}(g)]\big)=\big([{\rm div}(\varphi)/(q-1)],[{\rm div}(h)]\big)_{q-1}\,. $$ By Proposition~\ref{prop:Weiltame} and formula~\eqref{eq:2metro}, we get $$ \big([{\rm div}(\varphi)/(q-1)],[{\rm div}(h)]\big)_{q-1}=(\varphi,h)_{\bar X}\,. $$ Finally, there are equalities $$ (\varphi,h)_{\bar X}=\big(\varphi,{\rm Nm}_{\,{\rm Fr}}(\widetilde g)\big)\cdot(\varphi,\widetilde g^{\,-1})_{\bar X}={\rm Nm}_{\,{\rm Fr}}(\varphi,\widetilde{g})_{\bar X}\cdot (\varphi,\widetilde{g})_{\bar X}^{-1}= $$ $$ =(\varphi,\widetilde{g})^q_{\bar X}\cdot (\varphi,\widetilde{g})^{-1}_{\bar X}=(\varphi,\widetilde{g}^{\,q-1})_{\bar X}=(\varphi,g\psi)_{\bar X}=(\varphi,g)_{\bar X}=(\varphi,g)_X\,, $$ where the second equality follows from the projection formula for the tame symbol pairing and the third equality follows from the fact that ${\rm Nm}_{\,{\rm Fr}}$ sends an element $c\in \bar k^*$ to $c^q$. \end{proof} \begin{lemma}\label{lem:Ufinite} There are equalities $$ U_x=(K_x^*)^{q-1}\,,\qquad U=(\Ab^*_X)^{q-1}\,, $$ where $x\in X$ is any closed point, and the global tame symbol pairing induces an isomorphism $\Ab_X^*/U\simeq {\rm Hom}_c\big(\Ab_X^*/U,k^*\big)$, where ${\rm Hom}_c$ denotes the group of continuous homomorphisms. \end{lemma} \begin{proof} The formula for $U_x$ follows from the explicit description of the local tame symbol pairing and from the facts for any finite extension of fields $k\subset l$, the multiplicative group $1+tl[[t]]$ is $(q-1)$-divisible and the norm map ${\rm Nm}_{l/k}$ gives an isomorphism ${l^*/(l^*)^{q-1}\simeq k^*}$. The formula for $U$ follows from Remark~\ref{rmk:kerneltame}. We see that there is an exact sequence $$ 1\longrightarrow \mbox{$\prod\limits_{x\in X}k^*$}\longrightarrow \Ab^*_X/(\Ab^*_X)^{q-1}\longrightarrow {\rm Div}(X)/(q-1)\longrightarrow 0 $$ and the tame symbol pairing induces isomorphisms $$ \mbox{$\prod\limits_{x\in X}k^*$}\stackrel{\sim}\longrightarrow {\rm Hom}\big({\rm Div}(X)/(q-1),k^*\big)\,,\qquad {\rm Div}(X)/(q-1)\stackrel{\sim}\longrightarrow {\rm Hom}_c\big(\mbox{$\prod\limits_{x\in X}k^*$},k^*\big)\,. $$ This proves the lemma. \end{proof} \begin{theo}\label{theo:finite} Assume that $k=\FF_q$ is a finite field. Then there is an equality $K^{*\,\bot}=K^*\cdot U$. \end{theo} \begin{proof} It follow from Lemma~\ref{lem:Ufinite} and its proof that there are equalities $$ C=\mbox{$\prod\limits_{x\in X}k^*$}\,,\qquad A/C={\rm Div}(X)/(q-1) $$ and the map $\alpha$ coincides with the natural isomorphism $$ \mbox{$\prod\limits_{x\in X}k^*$}\stackrel{\sim}\longrightarrow {\rm Hom}\big({\rm Div}(X)/(q-1),k^*\big)\,. $$ Let $F\subset K^*$ be the subgroup that consists of all $\varphi\in K^*$ such that ${{\rm div}(\varphi)\in(q-1){\rm Div}(X)}$. Clearly, there are embeddings $k^*,(K^*)^{q-1}\subset F$. It follows from Corollary~\ref{cor:AT}(i) and Lemma~\ref{lem:Ufinite} that there is an equality ${B\cap C=F/(K^*)^{q-1}}$. Also, there is an exact sequence \begin{equation}\label{eq:F} 1\longrightarrow k^*\longrightarrow F/(K^*)^{q-1}\longrightarrow {\rm Pic}^0(X)_{q-1}\longrightarrow 0\,, \end{equation} where the second map sends the class $[\varphi]$ of $\varphi\in F$ to $[{\rm div}(\varphi)/(q-1)]$. On the other hand, we have $A/(B+C)={\rm Pic}(X)/(q-1)$ and by Corollary~\ref{cor:AT}(ii), there is an exact sequence \begin{equation}\label{eq:Pic} 0\longrightarrow{\rm Pic}^0(X)/(q-1)\longrightarrow{\rm Pic}(X)/(q-1)\stackrel{\deg}\longrightarrow {\mathbb Z}/(q-1)\longrightarrow 0\,. \end{equation} The global tame symbol pairing induces naturally the pairing between the groups ${B\cap C=F/(K^*)^{q-1}}$ and $A/(B+C)={\rm Pic}(X)/(q-1)$. Using exact sequences~\eqref{eq:F},~\eqref{eq:Pic} and Corollary~\ref{cor:Weil}, we see that this induces pairings $$ k^*\times {\mathbb Z}/(q-1)\longrightarrow k^*\,,\qquad (c,[n])\longmapsto c^n\,, $$ $$ \kappa\;:\;{\rm Pic}^0(X)_{q-1}\times {\rm Pic}(X)/(q-1)\longrightarrow k^*\,. $$ The first pairing is obviously unimodular, while the second one is unimodular by Lemma~\ref{lemma:nondegen}. It follows that condition~(ii) of Corollary~\ref{cor:key} is satisfied, which finishes the proof. \end{proof} Let ${C_X=\Ab_X^*/K^*}$ be the idele class group of $X$ and let $G_K^{\rm ab}$ be the universal abelian quotient of the Galois group $G_K$ of the field~$K$. \begin{cor}\label{cor:CFT} The global tame symbol pairing $$ (\cdot,\cdot)_X\;:\;K^*\times C_X\longrightarrow k^* $$ induces an isomorphism of topological groups $$ C_X/(q-1)\stackrel{\sim}\longrightarrow {\rm Hom}\big(K^*/(K^*)^{q-1},k^*\big)\,, $$ which, followed by the isomorphism from the Kummer theory, provides an isomorphism of topological groups $$ C_X/(q-1)\stackrel{\sim}\longrightarrow G_K^{\rm ab}/(q-1)\,. $$ \end{cor} \begin{proof} By Lemma~\ref{lem:Ufinite}, the global tame symbol pairing induces an isomorphism ${K^{*\,\bot}/(K^{*\,\bot}\cap U)\simeq {\rm Hom}_c\big(C_X/(q-1),k^*\big)}$. By Theorem~\ref{theo:finite} and Corollary~\ref{cor:AT}(i), we have ${K^{*\,\bot}/(K^{*\,\bot}\cap U)=K^*/(K^*)^{q-1}}$. Since $k^*$ is a cyclic group of order $q-1$, we conclude that the $(q-1)$-torsion groups $K^*/(K^*)^{q-1}$ and $C_X/(q-1)$ are Pontryagin dual with respect to the global tame symbol pairing. \end{proof} \end{document}
\begin{document} \title[Smallest posets with given cyclic automorphism group]{Smallest posets with given cyclic automorphism group} \textrm{Aut}hor[J.A. Barmak]{Jonathan Ariel Barmak} \textrm{Aut}hor[A.N. Barreto]{Agust\'in Nicol\'as Barreto} \thanks{Both authors were supported by CONICET and partially supported by grant UBACyT 20020190100099BA. The first named author was also partially supported by grants CONICET PIP 11220170100357CO, ANPCyT PICT-2017-2806 and ANPCyT PICT-2019-02338.} \address{Universidad de Buenos Aires. Facultad de Ciencias Exactas y Naturales. Departamento de Matem\'atica. Buenos Aires, Argentina.} \address{CONICET-Universidad de Buenos Aires. Instituto de Investigaciones Matem\'aticas Luis A. Santal\'o (IMAS). Buenos Aires, Argentina. } \email{[email protected]} \email{[email protected]} \begin{abstract} For each $n\ge 1$ we determine the minimum number of points in a poset with cyclic automorphism group of order $n$. \end{abstract} \makeatletter \@namedef{subjclassname@2020}{ \textup{2020} Mathematics Subject Classification} \makeatother \subjclass[2020]{06A11, 20B25, 06A07, 05E18} \keywords{Posets, Automorphism group.} \maketitle \section{Introduction} In 1938 R. Frucht \cite{Fru} proved that any finite group can be realized as the automorphism group of a graph. Moreover, the graph can be taken with $3d|G|$ vertices, where $d$ is the cardinality of any generator set of $G$ (\cite[Theorems 3.2, 4.2]{Fru49}). In 1959 G. Sabidussi \cite{Sab} showed that in fact $O(|G|\textrm{log}(d))$ vertices suffice. In 1974 L. Babai proved that the number of generators is not relevant, and with exception of the cyclic groups $\mathbb{Z}_3, \mathbb{Z}_4$ and $\mathbb{Z}_5$, the graph can be taken with just $2|G|$ vertices. Sabbidussi claims in \cite{Sab} that he was able to compute the smallest number of vertices $\alpha (G)$ in a graph with automorphism group $G$ in the case that $G$ is cyclic of prime power order. Also, he asserts that for $n=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, $\alpha (\mathbb{Z}_n)=\sum\limits_{i=1}^k \alpha(\mathbb{Z}_{p_i^{r_i}})$. Unfortunately both his computations for $\mathbb{Z}_{p^r}$ and the assertion are wrong. In \cite{Mer} R.L. Meriwether rectifies these errors and correctly determines $\alpha(\mathbb{Z}_n)$ for any $n\ge 1$. However, he commits similar mistakes when trying to extend this computation to arbitrary finite abelian groups. In \cite{Arl1, Arl2} W. Arlinghaus provides a complete calculation of $\alpha (G)$ for $G$ finite abelian. The proof follows these steps. First compute $\alpha (G)$ for $G$ cyclic of prime power order, then for arbitrary finite cyclic groups, then for abelian $p$-groups and finally, the general case. In parallel, the analogous problem was studied for partially ordered sets. In 1946 G. Birkhoff \cite{Bir} proved that for any finite group $G$ there is a poset of $|G|(|G|+1)$ points and automorphism group isomorphic to $G$. Then Frucht \cite{Fru50} improved this to $(d+2)|G|$ points. In 1980 Babai \cite{Bab2} proved that $3|G|$ points are enough. However, the smallest number $\beta (G)$ of points of a poset with an arbitrary finite abelian group $G$ of automorphisms has not yet been determined. In this paper we compute $\beta (G)$ for every finite cyclic group $G$. This result was first announced in \cite{Barr}. In \cite{Barr} we computed first $\beta (G)$ for $G$ cyclic of prime power order, then for arbitrary finite cyclic and for finite abelian $p$-groups with $p\ge 11$, following the steps of the proof of the graph case exposed by Arlinghaus. The calculation of $\beta(\mathbb{Z}_n)$ in this paper is more direct than the original we gave in \cite{Barr}. The case of $p$-groups will not be addressed in this article. Just as in graphs, the bound $\beta (\mathbb{Z}_n)\le \sum\limits_{i=1}^k \beta(\mathbb{Z}_{p_i^{r_i}})$ holds for $n=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, but not the equality, in general. For instance $\beta(\mathbb{Z}_{12})=\beta(3)+\beta(4)-1$. In Section \ref{sectionexamples} we construct explicit examples which provide an upper bound for $\beta(\mathbb{Z}_n)$. In Section \ref{sectionlemmas} we prove some lemmas concerning the cyclic structure of a generator of $\textrm{Aut} (P)$. In the last section we introduce the notion of weight of a prime power in a cycle, which we use in the proof of the lower bound. \section{Construction of the examples} \label{sectionexamples} A poset is a set with a partial order $\le$. The elements of the underlying set of a poset are called points. All posets are assumed to be finite, that is, their underlying set is finite. If $P$ is a poset and $x,y\in P$, we write $x<y$ if $x\le y$ and $x\neq y$. We say that $y$ covers $x$ if $x<y$ and there is no $x<z<y$. The edges of $P$ are the pairs $(x,y)$ such that $y$ covers $x$. The Hasse diagram of $P$ is the digraph whose vertices are the points of $P$ and the edges are the edges of $P$. If the orientation of an arrow is not indicated in the graphical representation, we assume it points upwards. A morphism $P\to Q$ of posets is an order-preserving map, i.e. a function $f$ between the underlying sets such that for every pair $x,y\in P$ with $x\le y$ we have $f(x)\le f(y)$. If $P$ is a poset, since it is finite, an automorphism of $P$ is just a permutation of the underlying set which is a morphism. A subposet of a poset $P$ is a subset of the underlying set with the inherited order. Given an automorphism $g$ of a poset $P$, we say that a subset $A$ of the underlying set of $P$ is invariant or $g$-invariant if $g(A)=A$. In this case, $g$ induces an automorphism on the subposet with underlying set $A$. \begin{defi} Define $b(1)=0$, $b(2)=1$, $b(3)=b(4)=b(5)=b(7)=3$. For any other prime power $p^r$, define $b(p^r)=2$. \end{defi} \begin{prop} \label{ejemplos} Let $n=p^r$, where $p\ge 2$ is a prime and $r\ge 0$. Then there exists a poset $P$ with $b(n)n$ points and automorphism group $\textrm{Aut}(P)$ isomorphic to $\mathbb{Z}_{n}$. \end{prop} \begin{proof} For $n=1$ we take the empty poset and for $n=2$ we take the discrete poset on $2$ points. By discrete we mean an antichain, i.e. a poset of pairwise incomparable elements. If $n=3,4,5,7$ we use the well-known general construction \cite{Fru50}: $P=\mathbb{Z}_{n}\times \{0,1,2\}$ with the order $(i,2)>(i,1)>(i,0)<(i+1,2)$ for every $i\in \mathbb{Z}_{n}$. It is easy to see that such poset satisfies $\textrm{Aut}(P) \simeq \mathbb{Z}_{n}$. Suppose then that $n\ge 8$. We take two copies of $\mathbb{Z}_{n}$: $A=\mathbb{Z}_{n}=\{0,1,\ldots, n-1\}$ and $A'=\{0',1',\ldots, (n-1)'\}$. Let $S=\{0,1,2,4\}\subseteq \mathbb{Z}_{n}$. For $i\in A$ and $j'\in A'$ we set $i<j'$ if $j-i\in S$. Any two elements in the same copy of $\mathbb{Z}_n$ are not comparable (see Figure \ref{fig0124}). We will prove that the automorphism group of this poset $P$ is $\mathbb{Z}_{n}$. It is clear that $G=\mathbb{Z}_n$ acts regularly on each copy of $\mathbb{Z}_n$ by multiplication (addition), and this gives a faithful action $G\to \textrm{Aut}(P)$ on $P$. So $G$ can be seen as a subgroup of $\textrm{Aut}(P)$. Since each automorphism of $P$ maps $0\in A$ to another minimal element of $P$, then the order of the $\textrm{Aut}(P)$-orbit of $0\in P$ is $n$. If we prove that the $\textrm{Aut}(P)$-stabilizer of $0\in P$ is trivial, then $|\textrm{Aut}(P)|=n$, so $\textrm{Aut}(P)$ is isomorphic to $G$. Let $h \in \textrm{Aut}(P)$ be such that $h (0)=0$. \begin{figure} \caption{The Hasse diagram of $P$ for $n=8$.} \label{fig0124} \end{figure} We define the \textit{double neighborhood} $B(i)$ of $i\in A$ as the set of those $j\in A$ such that $\# (P_{> i}\cap P_{> j}) \ge 2$, that is, there are at least two points in $A'$ greater than both, $i$ and $j$. The \textit{reduced double neighborhood} of $i\in A$ is $\hat{B}(i)=B(i)\smallsetminus \{i\}$. Since $h$ is an automorphism, $B(h(i))=h (B(i))$ and $\hat{B}(h(i))=h (\hat{B}(i))$. Given $k\ge 1$, we say that two points $i,j\in A$ are \textit{$k$-adjacent} if $\#(B(i)\cap B(j))=k$, and they are \textit{reduced $k$-adjacent} if $\#(\hat{B}(i)\cap \hat{B}(j))=k$. Clearly, $h$ preserves $k$-adjacency and reduced $k$-adjacency. Suppose first that $n\ge 9$. Then for each $i\in A$, $B(i)=\{i-2,i-1,i,i+1,i+2\}$. It is easy to see that $i,j$ are $4$-adjacent if and only if $i-j=\pm 1$. Thus, $h$ induces an automorphism of the cyclic graph on $A$ with edges given by $4$-adjacency. Since $h(0)=0$, $h$ is either the identity $1_{\mathbb{Z}_n}$ or $-1_{\mathbb{Z}_n}$. The second case cannot occur as $\{0,2,3,4\}$ has an upper bound while $\{0,-2,-3,-4\}$ does not. Thus every point of $A$ is fixed by $h$. If $j'\in A'$, then $j'$ is the unique upper bound of $\{j,j-1,j-2,j-4\}$. Thus $h(j')=j'$. This proves that $h=1_P$. Finally, suppose $n=8$. Given $i\in A$, we have now $\hat{B}(i)=\{i-2,i-1,i+1,i+2, i+4\}$ and $i,j\in A$ are reduced $4$-adjacent if and only if $i-j=\pm 3$. Thus, $h$ induces an automorphism in the cyclic graph on $A$ with edges given by reduced $4$-adjacency. Then $h=1_{\mathbb{Z}_n}$ or $-1_{\mathbb{Z}_n}$. The second case cannot occur for the same reason as before. Since each point in $A'$ is determined by the set of smaller points, $h=1_P$. \end{proof} \begin{ej} \label{ejemplo12} There exists a poset $P$ with $20$ points and automorphism group isomorphic to $\mathbb{Z}_{12}$. Take two copies $A=\{0,1,2,3,4,5\}$, $A'=\{0',1',2',3',4',5'\}$ of $\mathbb{Z}_6$ and two copies $B=\{0'',1'',2'',3''\}$, $B'=\{0''',1''',2''',3'''\}$ of $\mathbb{Z}_4$. The underlying set of $P$ is the union of these four sets. Let $S=\{0,1,3\}\subseteq \mathbb{Z}_6$, $T=\{0,1\}\subseteq \mathbb{Z}_4$. Define the following order in $P$: $i<j'$ if $j-i\in S$, $i''<j'''$ if $j-i\in T$, $i'''<j'$ if $j-i$ is even, $i''<j$ if $j-i$ is even, $i''<j'$ for every $i,j$ (see Figure \ref{figveinte}). \begin{figure} \caption{A poset $P$ of $20$ points and $\textrm{Aut} \label{figveinte} \end{figure} It is clear that $G=\mathbb{Z}_{12}$ acts in each copy of $\mathbb{Z}_6$ and of $\mathbb{Z}_4$ by multiplication (addition). This induces a faithful action of $G$ on $P$. If $h\in \textrm{Aut}(P)$, $h(0'')$ must be a minimal point $i''$ and $h(0')$ must be a maximal point $j'$. However $i,j$ cannot have different parity. Indeed, among the points $0,2,4,0''',1'''$ which cover $0''$, there are just two $0,0'''$ smaller that $0'$. However, if $i\in \mathbb{Z}_4$ and $j\in \mathbb{Z}_6$ have different parity, among the points covering $i''$ ($k\in A$ with $k\equiv i (2)$ and $i''', (i+1)'''$) there are three smaller that $j'$: both $j-1, j-3$, and one of $i''', (i+1)'''$. Thus $i\equiv j(2)$, which implies that the $\textrm{Aut}(P)$-orbit of the set $\{0',0''\}$ has at most $12$ elements. If we prove that the $\textrm{Aut}(P)$-stabilizer of $\{0',0''\}$ is trivial, then $|\textrm{Aut}(P)|\le 12=|G|$, so $\textrm{Aut}(P)$ is isomorphic to $G$. Let $h$ be an automorphism of $P$ which fixes $0'$ and $0''$. Note that $2''$ is the unique minimal point different from $0''$ which is covered by three points that cover $0''$. Thus $h(2'')=2''$. Now, the points of $B'$ are the unique points of $P$ which cover exactly one of $0'',2''$. Thus $B'$ is invariant. This implies that $h$ restricts to an automorphism of the subposet $R$ with underlying set $B\cup B'$ and of the subposet $Q$ with set $A\cup A'$. Since $R$ is a cycle, there are only two automorphisms of $R$ fixing $0''$. One is the identity and the other maps $0'''$ to $1'''$. However, $0'''<0'$ while $1'''\nless 0'$. Thus $0'''$ is fixed by $h$ and then $h$ is the identity of $R$. Suppose that $i'\in A'$ is a fixed point. Among the points $i,i-1,i-3$ in $A$ covered by $i'$, only $i-1$ and $i-3$ share a lower bound. Thus $h(i)=i$. Similarly, among the points $(i-4)',(i-2)',(i-1)'$ of $A'$ not covering $i$, only $(i-4)'$ and $(i-2)'$ share a lower bound in $B'$. Thus $(i-1)'$ is fixed. In conclusion, we showed that $i'$ fixed implies that both $i$ and $(i-1)'$ are fixed. Since $0'$ is fixed, this implies that every point of $A$ and of $A'$ is fixed. Thus $h=1_P$. \end{ej} We say that a prime power $p^r$ ($r\ge 1$) exactly divides an integer $n$, and write $p^r\parallel n$, if $p^r| n$ and $p^{r+1}\nmid n$. \begin{teo} \label{teoejemplos} Let $n=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$ where the $p_i$ are different primes and $r_i\ge 1$ for every $i$. Then there exists a poset with automorphism group isomorphic to $\mathbb{Z}_n$ and $\sum\limits_{i=1}^k b(p_i^{r_i})p_i^{r_i}-1$ points if $3\parallel n$ and $4\parallel n$, and with $\sum\limits_{i=1}^k b(p_i^{r_i})p_i^{r_i}$ points otherwise. \end{teo} \begin{proof} By Proposition \ref{ejemplos}, for each $1\le i\le k$ there exists a poset $P_i$ with $b(p_i^{r_i})p_i^{r_i}$ points and $\textrm{Aut}(P_i)\simeq \mathbb{Z}_{p_i^{r_i}}$. The non-Hausdorff join or ordinal sum $P=P_1\oplus P_2\oplus \ldots \oplus P_k$ is constructed by taking a copy of each poset and keeping the given ordering in each copy, while setting $x<y$ for each $x\in P_i$ and $y\in P_j$ if $i<j$. Since each automorphism of $P$ preserves heights (the maximum length of a chain with a given maximum element), it restricts to automorphisms of each $P_i$. Thus $\textrm{Aut}(P)=\textrm{Aut}(P_1)\oplus \textrm{Aut}(P_2) \oplus \ldots \oplus \textrm{Aut}(P_k)=\mathbb{Z}_n$. If $p_i^{r_i}=3$ and $p_j^{r_j}=4$, instead of $P_i$ and $P_j$ we take the poset in Example \ref{ejemplo12} of $20=b(3)3+b(4)4-1$ points and automorphism group $\mathbb{Z}_{12}$. \end{proof} \section{Lemmas} \label{sectionlemmas} Let $X$ be a finite set, $n\ge 1$ and $x_0,x_1,\ldots, x_{n-1}$ pairwise different elements of $X$. The cycle $\alpha =(x_0,x_1,\ldots , x_{n-1})$ is the permutation which maps $x_i$ to $x_{i+1}$ (indices considered modulo $n$) and fixes every other point of $X$. The number $n$ is the order or length of the cycle, which we denote by $|\alpha|$. A cycle of order $n$ is also called an $n$-cycle. A cycle $\alpha$ is non-trivial if $|\alpha|\ge 2$. The representation $(x_0,x_1,\ldots, x_{n-1})$ of a non-trivial $n$-cycle is unique up to cyclic permutation of the $n$-tuple $x_0,x_1,\ldots,x_{n-1}$. The underlying set of a non-trivial cycle $(x_0,x_1,\ldots , x_{n-1})$ is $\{x_0,x_1,\ldots , x_{n-1}\}$. Many times we will identify a non-trivial cycle with its underlying set. Two non-trivial cycles are disjoint if their underlying sets are. Any permutation $g$ of $X$ can be written as a composition $\alpha_1 \alpha_2 \ldots \alpha_k$ of disjoint non-trivial cycles. This representation is unique up to reordering of the cycles. If a cycle $\alpha$ appears in the factorization of $g$, we say that $\alpha$ is contained in $g$ and write $\alpha \in g$. The orbits of $g$, or of the action of the cyclic group $\langle g \rangle$ on $X$, are the underlying sets of the cycles in $g$ and the singletons consisting of fixed points. Disjoint non-trivial cycles commute. Thus, if $g$ is a composition $\alpha_1 \alpha_2 \ldots \alpha_k$ of disjoint non-trivial cycles and $m\in \mathbb{Z}$, then $g^m=\alpha_1^m \alpha_2^m \ldots \alpha_k^m$. If $\alpha$ is a cycle of length $n$ and $m\in \mathbb{Z}$, the permutation $\alpha^m$ is a composition of $(n,m)=$gcd$\{n,m\}$ cycles of length $\frac{n}{(n,m)}$. In particular, $\alpha^m$ is a cycle with the same underlying set as $\alpha$ if $n$ and $m$ are coprime. Moreover, the order of $g$ is the least common multiple of the lengths of its cycles and if a cycle of $g$ has order $n$, and $m\in \mathbb{Z}$, then $g^m$ fixes every point of the cycle if $n|m$, and fixes no point of the cycle otherwise. If $g$ is an automorphism of a poset $P$, then each orbit of $g$ is discrete, as $a<b$ would imply that $a<g^k(a)$ for some $k\in \mathbb{Z}$ and then $\{g^{nk}(a)\}_{n\ge 0}$ would be an infinite chain. If $A$ and $B$ are two different orbits of $g$ we cannot have an element $a\in A$ smaller than another $b\in B$ and at the same time an element $b'\in B$ smaller than another $a'\in A$, as this would imply that $a<b=g^k(b')<g^k(a')$ for some $k\in \mathbb{Z}$, contradicting the fact that $A$ is discrete, or the antisymmetry of the order. \begin{obs} \label{extension} Let $P$ be a poset and let $g$ be an automorphism of $P$. Let $Q$ be the subposet of points which are not fixed by $g$. Let $A_0,A_1,\ldots, A_k$ be the orbits of the automorphism induced by $g$ on $Q$. If $h$ is an automorphism of $Q$ such that $h(A_i)=A_i$ for every $i$, then it extends to an automorphism of $P$ which fixes every element not in $Q$. Indeed, if $x\in P\smallsetminus Q$, $y\in A_i$ and $x<y$, then $h(y)\in A_i$, so there exists $r\ge 0$ such that $g^r(y)=h(y)$. Then $x=g^r(x)<g^r(y)=h(y)$. Similarly, if $x>y$, then $x>h(y)$. \end{obs} \begin{lema} \label{dos} Let $n\ge 1$ and let $p^r\neq 2$ be a prime power which exactly divides $n$. Let $P$ be a poset with $\textrm{Aut}(P)$ cyclic of order $n$, and let $g$ be a generator of $\textrm{Aut} (P)$. Then $g$ contains at least two cycles of length divisible by $p^r$. \end{lema} \begin{proof} Since $g$ has order $n$, it contains at least one cycle $\alpha$ of length divisible by $p^r$. Assume there is no other cycle of length divisible by $p^r$. The automorphism $g^{\frac{n}{p}}$ fixes then every point not in $\alpha$. Let $x$ be an element of $\alpha$ and let $\tau$ be the transposition of the underlying set of $\alpha$ which permutes $x$ and $g^{\frac{n}{p}}(x)\neq x$. By Remark \ref{extension}, $\tau$ extends to an automorphism $h$ of $P$ which is a transposition. But any power of $g$ either fixes each point in $\alpha$ or fixes no point of $\alpha$. Since the order of $\alpha$ is at least $p^r>2$, $h \notin \langle g\rangle=\textrm{Aut}(P)$, a contradiction. \end{proof} If a group $G$ acts on a poset $P$, an automorphism of $P$ is said to be induced by the action if it is in the image of the homomorphism $G\to \textrm{Aut}(P)$. \begin{lema} \label{nuevofacil} Let $p=3,5$ or $7$. Let $P$ be a poset on which $\mathbb{Z}_p$ acts with exactly two orbits, both of order $p$. Then there exists an automorphism of $P$ not induced by the action for which each orbit of the action is invariant. \end{lema} \begin{proof} Let $g=\alpha \beta \in \textrm{Aut}(P)$ be the automorphism induced by a generator of $\mathbb{Z}_p$, where $\alpha=(0,1,\ldots, p-1)$ and $\beta=(0',1',\ldots, (p-1)')$. If no element of $\alpha$ is comparable with an element of $\beta$, then the transposition $(0,1)$ is an automorphism which is different to $g^k$ for any $k\in \mathbb{Z}$, that is, not induced by the action. Without loss of generality we can assume then that $0$ and $0'$ are comparable, and moreover, that $0<0'$. Then no element in $\beta$ can be smaller than another in $\alpha$. Since $g$ is an automorphism, $i<i'$ for every $0\le i\le p-1$. If no other pair of elements are comparable, then $(0,1)(0',1')$ is an automorphism not induced by the action (it has order $2$, for example). If $i<j'$ for every $0\le i,j\le p-1$, then $(0,1)$ satisfies the desired property. This completes the proof of the case $p=3$ by the following argument. The case we did not analyze is when $P$ has exactly $6$ edges. In that case, let $P^c$ be the \textit{complement} of $P$, defined as the poset $P^c$ with the same underlying set and setting $i<j'$ if and only if $i\nless j'$ in $P$, while $i,j$ are not comparable and $i',j'$ are not comparable for every $i\neq j$. Since $P$ and $P^c$ are non-discrete, they have the same automorphisms. As $P^c$ has only $3$ edges, there is an automorphism of $P^c$ not induced by the action, so this is the required automorphism of $P$. For $p=5$ we need to consider the case that $P$ has $10$ edges. By the complement argument, this will complete the $p=5$ case. So, suppose $0<k'$ for some $1\le k\le 4$ (and then $i<(i+k)'$ for every $i$, where $i+k$ is considered modulo $5$). Note that $g^k$ is induced by another generator of $\mathbb{Z}_p$ and it maps $i'$ to $(i+k)'$. Thus, for each $0\le i\le 4$, $i<i'$ and $i<g^k(i')$. Therefore we can assume that $k=1$. We have then the ``symmetry about the axis $03'$'', which maps $i$ to $-i$ and $j'$ to $(1-j)'$ (see Figure \ref{figcinco}). This is an automorphism of $P$ which is different to any power of $g$ (it has order $2$). \begin{figure} \caption{The underlying undirected graph of a poset with $10$ points and edges $i'>i<(i+1)'$, and the axis $03'$.} \label{figcinco} \end{figure} For $p=7$, if $P$ has $14$ edges, then by the argument above we can assume $i'>i<(i+1)'$ for every $0\le i\le 6$ and there is then a symmetry about $04'$. By the complement argument it only remains to analyze the case that $P$ has exactly $21$ edges. Here $i<i',(i+k)', (i+l)'$ for certain $1\le k\neq l\le 6$ and again we can assume $k=1$ by replacing $g$ by $g^k$. Finally, by replacing $g$ by $g^{-1}$, it suffices to consider the cases $l=2,3$ and $4$ (Figure \ref{figsietetres}). \begin{figure} \caption{Posets with two $\mathbb{Z} \label{figsietetres} \end{figure} For $l=2$ we have the involution that maps $i$ to $-i$ and $j'$ to $(2-j)'$. For $l=3$ we have the following automorphism of order $3$: $(142)(356)(0'3'1')(2'4'5')$ (see Figure \ref{triangulo}). For $l=4$, there is again the symmetry about $04'$. \begin{figure} \caption{The underlying graph of the poset $P$ of $14$ points and edges $i<i',(i+1)',(i+3)'$. An automorphism of order $3$ is given by a rotation of angle $\frac{2\pi} \label{triangulo} \end{figure} \end{proof} \begin{lema} \label{nuevodificil} Let $P$ be a poset on which $\mathbb{Z}_4$ acts with exactly two orbits of order $4$ or exactly three orbits: two of order $4$ and one of order $2$. Then there exists an automorphism of $P$ not induced by the action for which each orbit of the action is invariant. \end{lema} \begin{proof} Let $g$ be an automorphism induced by a generator of the action and suppose first that $g=(0,1,2,3)(0',1',2',3')$. If $P$ is discrete, $(0,1)$ satisfies the required conditions. If $P$ has exactly $4$ edges, then as in the proof of Lemma \ref{nuevofacil} we can assume $i<i'$ for every $0\le i\le 3$, and $(0,1)(0',1')$ works. By the complement argument we can assume $P$ has exactly $8$ edges and that it is determined by the relations $i'>i<(i+k)'$ for some $1\le k\le 3$. The case $k=3$ reduces to the case $k=1$ by replacing $g$ by $g^3$. If $k=1$, the symmetry $(1,3)(0',1')(2',3')$ about $02$ satisfies the required conditions. If $k=2$, then $(0,2)$ works. Suppose then that $g=\alpha \beta \gamma$ with $\alpha=(0,1,2,3)$, $\beta=(0',1',2',3')$, $\gamma=(0'',1'')$. Let $Q$ be the subposet of points in $\alpha$ and $\beta$. Since $g^2=(0,2)(1,3)(0',2')(1',3')$, every automorphism of the poset $Q$ which has $\{0,2\},\{1,3\}, \{0',2'\}, \{1',3'\}$ as invariant sets, extends to $P$ by Remark \ref{extension}. If $Q$ is discrete or if $Q$ has $16$ edges, then $(0,2)$ is an automorphism of $Q$ which extends to $P$ and this extension is not induced by the action. If $Q$ has exactly $4$ edges, we may assume $i<i'$ for every $i$ and then $(0,2)(0',2')$ extends to an automorphism of $P$ different to any power of $g$. If $Q$ has exactly 12 edges, the complement argument can be used. Suppose then $Q$ has exactly $8$ edges. By relabelling we can assume the relations are (a) $i<j'$ for $i\equiv j (2)$ or (b) $i'>i<(i+1)'$ for every $i$. In case (a), $(0,2)$ is again an automorphism which has every nontrivial orbit of $g^2$ as an invariant set. In the rest of the proof we assume we are in case (b). If the points of $\gamma$ are not comparable with any point of $Q$, then the symmetry about $02$ which maps $i$ to $-i$ and $j'$ to $(1-j)'$, is an automorphism of $Q$ which extends to $P$, and this extension satisfies the required conditions. By considering the opposite order, we can assume a point of $\gamma$ is comparable with a point of $\alpha$. Moreover, by relabelling if needed we can assume $0''$ is comparable with $0$. Suppose first that $0''<0$. Since $g$ is an automorphism, then $0''<2$ and $1''<1,3$. If $0''\nless 1$, then $0''\nless 3$ and $1''\nless 0,2$. If $0''<1$, then $0''<3$ and $1''<0,2$. In either case, the symmetry of $Q$ about $02$ extends by the identity to an automorphim of $P$ which is not induced by the action, even though this automorphism of $Q$ does not have the orbits of $g^2$ as invariant sets. Finally suppose $0''>0$. Then $0''>2$ and $1''>1,3$. We can assume no element in $\beta$ is smaller than an element in $\gamma$, by the previous case and the duality argument. Also, we cannot have an element of $\gamma$ being smaller than another $j'$ of $\beta$, since this would imply that $i<j'>i+2$, modulo 4, for certain $0\le i\le 3$, which is absurd. In any case, if $0''\ngtr 1$ or if $0''>1$, we have that the symmetry of $Q$ about $02$ extends to an automorphism of $P$. \end{proof} \begin{lema} \label{lema357} Let $p=3,5$ or $7$. Let $P$ be a poset with cyclic automorphism group of order $n\ge 1$, and let $g\in \textrm{Aut}(P)$ be a generator. Suppose $g$ contains a $p$-cycle $\alpha$ and a $pk$-cycle $\beta\neq \alpha$ for some $p\nmid k\ge 1$. Then it contains a third cycle whose length is divisible by $p$. \end{lema} \begin{proof} Suppose $\beta=(0,1,\ldots, pk-1)$. Let $Q$ be the subposet of $P$ whose points are those of $\alpha$ and $\beta$. Assume that there is no other cycle in $g$ whose length is divisible by $p$. In particular $p\parallel n$. Since the order of any cycle of $g$ different from $\alpha$ and $\beta$ divides $\frac{n}{p}$, the automorphism $g^{\frac{n}{p}}$ fixes every point not in $Q$. Moreover $g^{\frac{n}{p}}$ has $k+1$ orbits of order $p$, which are the underlying set of $\alpha$ and $A_i=\{0\le j\le pk-1 | \ j\equiv i (k)\}$ for $0\le i\le k-1$. In particular, by Remark \ref{extension} every automorphism of $Q$ for which these sets are invariant extends to an automorphism of $P$. Let $Q'$ be the subposet of $Q$ whose points are those of $\alpha$ and $A_0$. Since $g^{k}$ induces an automorphism of $Q'$ with two orbits of order $p$, by Lemma \ref{nuevofacil} there is an automorphism $h$ of $Q'$ not induced by a power of $g^{k}$ for which the underlying set of $\alpha$ and $A_0$ are invariant. We extend $h$ to an automorphism $\overline{h}$ of $Q$ as follows. Let $j$ be a point of $\beta$, $0\le j\le kp-1$. Let $0\le i\le k-1$ be such that $j\in A_i$. Since $p\nmid k$, there exists a unique $0\le t\le k-1$ such that $k|j+tp$, in other words $j+tp$, considered modulo $kp$, lies in $A_0$. Then $h(j+tp)\in A_0$. Define $\overline{h}(j)=h(j+tp)-tp\in A_i$. We claim that $\overline{h}$ is an automorphism of $Q$. It is clearly bijective. Two different points of $\beta$ cannot be comparable as they are in the same orbit. Suppose $j$ in $\beta$ and $a$ in $\alpha$ are comparable, say $a<j$. Let $0\le t\le k-1$ be such that $k|j+tp$. Then $a=g^{tp}(a)<g^{tp}(j)=j+tp$. Since $h$ is a morphism, $h(a)<h(j+tp)$. Thus $\overline{h}(a)=h (a)=g^{-tp}(h(a))<g^{-tp} (h(j+tp))=h(j+tp)-tp=\overline{h}(j)$. Since the underlying set of $\alpha$ and each $A_i$ are $\overline{h}$-invariant, $\overline{h}$ extends to an automorphism of $P$, which must be a power $g^r$ of $g$. Since $g^r$ leaves $A_0$ invariant, in particular $r=g^r(0)\in A_0$, so $k|r$ and $h$ is then induced by a power of $g^k$, a contradiction. \end{proof} \begin{lema} \label{lema4} Let $P$ be a poset with cyclic automorphism group of order $n\ge 1$, and let $g\in \textrm{Aut} (P)$ be a generator. Suppose that $g$ contains two $4$-cycles $\alpha, \beta$. Then it contains a third cycle of length divisible by $4$ or two more cycles of even length. \end{lema} \begin{proof} The proof is very similar to that of Lemma \ref{lema357}, so we omit details. If $\alpha$ and $\beta$ are the unique two cycles of even length in $g$, then by Lemma \ref{nuevodificil} there is an automorphism $h$ of the poset of points of these two cycles which is not induced by a power of $g$, and moreover has the underlying sets of $\alpha$ and $\beta$ as invariant sets. Since the non-trivial orbits of $g^{\frac{n}{4}}\in \textrm{Aut}(P)$ are the underlying sets of $\alpha$ and $\beta$, $h$ extends to an automorphism of $P$, a contradiction. Suppose then there exists a third cycle $\gamma=(1,2,\ldots, 2k)$ in $g$ with $k$ odd, and that there is no other cycle of even length. We define $Q$ to be the subposet whose points are those of $\alpha, \beta$ and $\gamma$. Then $g^{\frac{n}{4}}$ fixes every point not in $Q$. The other orbits of $g^{\frac{n}{4}}$ are the underlying sets of $\alpha$ and $\beta$, and $A_i=\{i,k+i\}$ for $0\le i\le k-1$. Let $Q'$ be the subposet whose points are those of $\alpha, \beta$ and $A_0$. Then $g^k$ induces an automorphism of $Q'$ and by Lemma \ref{nuevodificil} there is an automorphism $h$ of $Q'$ which is not induced by a power of $g^k$, and for which the underlying sets of $\alpha, \beta$ and $A_0$ are invariant. We extend it to an automorphism $\overline{h}$ of $Q$ by defining $\overline{h}(j)=h(j+4t)-4t$, where $t$ is such that $k|j+4t$. Then $\overline{h}$ is bijective, it is a morphism and leaves each $A_i$ invariant. It extends to an automorphism of $P$, say $g^r$. Since $g^r$ leaves $A_0$ invariant, then $k|r$, which implies that $h$ is induced by a power of $g^k$, a contradiction. \end{proof} \section{Weights and the lower bound} \label{sectionweights} Let $g$ be a permutation of order $n$ of a finite set $X$. Let $\alpha$ be a cycle in $g$ of length $l=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, where the $p_i$ are distinct prime integers, $r_i\ge 1$ for every $i$. For each prime power $p^r$ we will define a weight $w_{p^r}(\alpha)\in \mathbb{R}_{\ge 0}$ which depends on $p^r,l$ and $n$, in such a way that $\sum\limits_{p^r} w_{p^r}(\alpha)p^r= l$, where the sum is taken over all prime powers dividing $n$. In particular $\# X\ge \sum\limits_{p^r\parallel n} (\sum\limits_{\alpha \in g} w_{p^r}(\alpha))p^r$. For each $l\ge 2$ we will assign the weight of every prime power $p^r$ in a cycle $\alpha$ of length $|\alpha|=l$ according to a series of rules. In every case, if the weight $w_{p^r}(\alpha)$ is not explicitly defined for some prime power, we assume it is $0$. \noindent \textbf{Exception 6}. Suppose $l=6$. If $3\parallel n$ then $w_3(\alpha)=2$. If $3\nparallel n$ and $2\parallel n$, then $w_{2}(\alpha)=3$. If $3\nparallel n$ and $2\nparallel n$, then $w_4(\alpha)=\frac{3}{2}$. \noindent \textbf{Exception 12}. Suppose $l=12$. If $3\parallel n$ then $w_3(\alpha)=4$. If $3\nparallel n$, then $w_{4}(\alpha)=3$. \noindent \textbf{Exception 10-14}. Suppose $l=2p$ for $p=5$ or $7$. If $2\parallel n$, $w_2(\alpha)=1$. Otherwise $w_4(\alpha)=\frac{1}{2}$. In any case $w_p(\alpha)=\frac{2(p-1)}{p}$. \noindent \textbf{General case}. Suppose $l=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}\neq 6,12,10,14$, where the $p_i$ are different primes and each $r_i\ge 1$. For each $1\le i\le k$, we define $w_{p_i^{r_i}}(\alpha)=\frac{\prod\limits_{j\neq i} p_j^{r_j}}{k}$, unless $p_i^{r_i}=2$ and $2\nparallel n$. In that case, $w_2(\alpha)=0$, while $w_4(\alpha)=\frac{\prod\limits_{j\neq i} p_j^{r_j}}{2k}$. In particular, if $l=p^r\ge 3$ is a prime power, $w_{p^r}(\alpha)=1$. Note that, as we required, the sum $\sum\limits_{p^r|n} w_{p^r}(\alpha)$ over all the prime powers dividing $n$ is the length $l$ of $\alpha$. Note also that if $l=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, then $w_{p^r}(\alpha)\neq 0$ only if $p^r=p_i^{r_i}$ for some $1\le i\le k$ or $p^r=4$. \begin{teo} \label{main} Let $n\ge 1$. Let $P$ be a poset with $\textrm{Aut} (P)$ cyclic of order $n$ generated by $g$. Let $p^r$ be a prime power which exactly divides $n$. If $p^r\neq 2,4$ then $\sum\limits_{\alpha \in g} w_{p^r}(\alpha)\ge b(p^r)$. If $3\nparallel n$ and $p^r=2$ or $p^r=4$, $\sum\limits_{\alpha \in g} w_{p^r}(\alpha)\ge b(p^r)$ as well. If $3\parallel n$ and $2\parallel n$, $\sum\limits_{\alpha \in g} (2w_{2}(\alpha)+3w_{3}(\alpha))\ge 2b(2)+3b(3)=11$. Finally, if $3\parallel n$ and $4\parallel n$, $\sum\limits_{\alpha \in g} (4w_{4}(\alpha)+3w_{3}(\alpha))\ge 4b(4)+3b(3)-1=20$. \end{teo} \begin{proof} If $p^r\neq 2,3,4,5,7$, by Lemma \ref{dos}, there are at least two cycles of length divisible by $p^r$. By hypothesis their lengths are not multiples of $p^{r+1}$. But if $\alpha$ is a cycle of $g$ whose length is a multiple of $p^r$, then $w_{p^r}(\alpha)\ge 1$. Indeed, the weights in $\alpha$ are assigned according to the General case. If the length of $\alpha$ is $l=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, we can assume $p^r=p_1^{r_1}$ and then $w_{p^r}(\alpha)=\frac{\prod\limits_{j=2}^k p_j^{r_j}}{k}\ge \frac{2^{k-1}}{k}\ge 1$. Thus, $\sum\limits_{\alpha \in g} w_{p^r}(\alpha)\ge 2= b(p^r)$. Suppose now $p^r=5$. If $\alpha$ is a cycle of $g$ of length $l=5$, then $w_5(\alpha)=1$. If $l=10$, then $w_5(\alpha)=\frac{8}{5}\ge \frac{3}{2}$ (Exception 10-14). If $l=5s$ with $s=p_2^{r_2}p_3^{r_3}\ldots p_k^{r_k}\ge 3$ not divisible by $5$, then either $k=2$, or $k\ge 3$. In the first case $w_5(\alpha)=\frac{s}{2}\ge \frac{3}{2}$, and in the second case $w_5(\alpha)=\frac{\prod\limits_{j=2}^k p_j^{r_j}}{k}\ge \frac{2^{k-2}.3}{k}\ge 2\ge \frac{3}{2}$. By Lemma \ref{dos}, there are at least two cycles of length divisible by $5$ (and not by $5^2$). Suppose first there exactly two such cycles, $\alpha$ and $\alpha'$. None of them can be of length $5$ by Lemma \ref{lema357}. Thus $w_5(\alpha)+w_5(\alpha')\ge 2. \frac{3}{2}= 3=b(5)$. Finally, if there are at least three cycles in $g$ of length divisible by $5$, then $\sum\limits_{\alpha \in g} w_{5}(\alpha)\ge 3= b(5)$. The case $p^r=7$ is similar to the previous one, with the observation that for length $l=14$, $w_7(\alpha)=\frac{12}{7}\ge \frac{3}{2}$ (Exception 10-14). So, also in this case $\sum\limits_{\alpha \in g} w_{7}(\alpha)\ge 3= b(7)$. Let $p^r=3$. If the length of a cycle $\alpha$ in $g$ is $l=3$, $w_3(\alpha)=1$. If $l=6$, $w_3(\alpha)=2$ (Exception 6). If $l=12$, $w_3(\alpha)=4$ (Exception 12). If $l=3s$ with $s=p_2^{r_2}p_3^{r_3}\ldots p_k^{r_k}\ge 5$, then either $k=2$, or $k\ge 3$. In the first case $w_3(\alpha)=\frac{s}{2}\ge \frac{5}{2}$, and in the second case $w_3(\alpha)=\frac{\prod\limits_{j=2}^k p_j^{r_j}}{k}\ge \frac{2^{k-2}.3}{k}\ge 2$. By Lemma \ref{dos} there are at least two cycles in $g$ of length divisible by $3$ (and not by $3^2$). Suppose first there are exactly two such cycles $\alpha$ and $\alpha'$. None of them can have length $3$ by Lemma \ref{lema357}. Then $w_3(\alpha)+w_3(\alpha')\ge 2.2=4\ge 3=b(3)$. Finally, if there are at least three cycles in $g$ of length divisible by $3$, then $\sum\limits_{\alpha \in g} w_{3}(\alpha)\ge 3= b(3)$. Note that $\sum\limits_{\alpha \in g} w_{3}(\alpha)\ge 4$ unless there are exactly three cycles of length $3$ and no other cycle of length divisible by $3$. We analyze now the case that $3\nparallel n$ and $p^r=2$ or $4$. In the first situation, there is at least one cycle $\alpha$ of even length $l$ (not divisible by $4$). If $l=2$, $w_2(\alpha)=1$ (General case). If $l=6$, $w_2(\alpha)=3$ (Exception 6). If $l=10$ or $l=14$, then $w_2(\alpha)=1$ (Exception 10-14). If $l=2s$ with $s=p_2^{r_2}p_3^{r_3}\ldots p_k^{r_k}\neq 1,3,5,7$ (odd), then $w_2(\alpha)=\frac{\prod\limits_{j=2}^k p_j^{r_j}}{k}\ge \frac{3^{k-1}}{k}\ge \frac{3}{2}$. Thus $\sum\limits_{\alpha \in g} w_{2}(\alpha)\ge 1= b(2)$. We consider the second situation, $p^r=4$. If $\alpha$ has length $l=4$, then $w_4(\alpha)=1$. If $l=12$, $w_4(\alpha)=3$ (Exception 12). If $l=4s$ with $s=p_2^{r_2}p_3^{r_3}\ldots p_k^{r_k}\ge 5$ (odd), then $k=2$ or $k\ge 3$. For $k=2$ we have $w_4(\alpha)=\frac{s}{2}\ge \frac{5}{2}$. For $k\ge 3$, $w_4(\alpha)\ge \frac{3^{k-1}}{k}\ge 3$. By Lemma \ref{dos}, $g$ contains at least two cycles of lengths divisible by $4$ (and not by $8$). Suppose first there are exactly two such cycles, $\alpha$ and $\alpha'$, of lengths $l,l'$. If $l=l'=4$, then by Lemma \ref{lema4}, there exists a third and a fourth cycle $\beta, \beta'$ of lengths $2m$ and $2m'$ for some odd $m,m'$. The weights $w_4(\beta)$ that we obtain for each $m$ are the halves of the weights that we obtained for $2$ in cycles of the same length when $2\parallel n$. Namely, if $m=1$, $w_4(\beta)=\frac{1}{2}$ (General case); if $m=3$, $w_4(\beta)=\frac{3}{2}$ (Exception 6); if $m=5,7$, $w_4(\beta)=\frac{1}{2}$ (Exception 10-14); if $m=p_2^{r_2}p_3^{r_3}\ldots p_k^{r_k}\neq 1,3,5,7$ then $w_4(\beta)\ge \frac{3^{k-1}}{2k}\ge \frac{3}{4}$ (General case). The same happens with $\beta'$. Thus $w_4(\alpha)+w_4(\alpha')+w_4(\beta)+w_4(\beta')\ge 1+1+\frac{1}{2}+\frac{1}{2}=3=b(4)$. If instead $l=4$ and $l'=12$, then $w_4(\alpha)+w_4(\alpha')=1+3=4> 3$. If $l=4$ and $l'=4s$ for some odd $s\ge 5$, then $w_4(\alpha)+w_4(\alpha')\ge 1+\frac{5}{2}>3$. If both $l$ and $l'$ are greater than $4$, then $w_4(\alpha)+w_4(\alpha')\ge \frac{5}{2}+\frac{5}{2}>3$. Finally, if there are at least three cycles of length divisible by $4$, then $\sum\limits_{\alpha \in g} w_4(\alpha)\ge 3$. Thus, in any case $\sum\limits_{\alpha \in g} w_4(\alpha)\ge 3=b(4)$. It only remains to analyze the case $3\parallel n$ and $2\parallel n$ and the case $3\parallel n$ and $4\parallel n$. If $3\parallel n$ and $2\parallel n$, recall that we have already proved that $\sum\limits_{\alpha \in g} w_{3}(\alpha)\ge 4$ or there are exactly three cycles of length $3$ and no other cycle of length divisible by $3$. In the first case $\sum\limits_{\alpha \in g} (2w_{2}(\alpha)+3w_{3}(\alpha))\ge \sum\limits_{\alpha \in g} 3w_{3}(\alpha)\ge 12$. In the second case, there exists a cycle $\beta$ in $g$ of even length $m\neq 6$, so $w_2(\beta)\ge 1$. Thus $\sum\limits_{\alpha \in g} (2w_{2}(\alpha)+3w_{3}(\alpha))\ge 2.1+3.3= 11$. The last case is $3\parallel n$ and $4\parallel n$. Note that if there are no cycles of length $6$ nor $12$ in $g$, then the computation $\sum\limits_{\alpha \in g} w_4(\alpha)\ge 3$ remains valid as Exceptions 6 and 12 do not occur. Thus $\sum\limits_{\alpha \in g} (3w_{3}(\alpha)+4w_{4}(\alpha))\ge 3.3+4.3=21>20$. If there are at least two $12$-cycles, then $\sum\limits_{\alpha \in g} (3w_{3}(\alpha)+4w_{4}(\alpha))\ge 2.3.4=24>20$. If there is no $12$-cycle in $g$ and $\sum\limits_{\alpha \in g} w_4(\alpha)< 3$, then we must be in the case that there is a $6$-cycle. This already implies $\sum\limits_{\alpha \in g} w_3(\alpha)\ge 4$, while the existence of two cycles of length divisible by $4$ implies $\sum\limits_{\alpha \in g} w_4(\alpha)\ge 2$. Thus $\sum\limits_{\alpha \in g} (3w_{3}(\alpha)+4w_{4}(\alpha))\ge 3.4+4.2=20$. Thus we may assume $g$ has a unique $12$-cycle. By Lemma \ref{dos} there is another cycle of length divisible by $4$, so $\sum\limits_{\alpha \in g} w_4(\alpha)\ge 1$. On the other hand, $\sum\limits_{\alpha \in g} w_{3}(\alpha)\ge 4+2=6$, as the weight of $3$ in a $12$-cycle is $4$ and by Lemmas \ref{dos} and \ref{lema357} there are either two more cycles of lengths divisible by $3$ or just one, but of length not $3$. Thus $\sum\limits_{\alpha \in g} (3w_{3}(\alpha)+4w_{4}(\alpha))\ge 3.6+4.1=22>20$. \end{proof} \begin{coro} Let $n=p_1^{r_1}p_2^{r_2}\ldots p_k^{r_k}$, where the $p_i$ are different primes and $r_i\ge 1$ for every $i$. Then the minimum number $\beta(\mathbb{Z}_n)$ of points in a poset with cyclic automorphism group of order $n$ is $\sum\limits_{i=1}^k b(p_i^{r_i})p_i^{r_i}-1$ if $3\parallel n$ and $4\parallel n$, and $\sum\limits_{i=1}^k b(p_i^{r_i})p_i^{r_i}$ otherwise. \end{coro} \begin{proof} If $P$ is a poset with $\textrm{Aut} (P)\simeq \mathbb{Z}_n$ generated by $g$, then the number of points in $P$ is at least $\sum\limits_{\alpha \in g} |\alpha|=$ $\sum\limits_{\alpha \in g} \sum\limits_{p^r|n}w_{p^r}(\alpha)p^r\ge \sum\limits_{i=1}^k (\sum\limits_{\alpha \in g} w_{p_i^{r_i}}(\alpha))p_i^{r_i}$. If both $3$ and $4$ exactly divide $n$, by Theorem \ref{main} this is $\sum\limits_{p_i^{r_i}\neq 3,4} (\sum\limits_{\alpha \in g} w_{p_i^{r_i}}(\alpha))p_i^{r_i}+\sum\limits_{\alpha \in g} (3w_{3}(\alpha)+4w_4(\alpha))\ge \sum\limits_{p_i^{r_i}\neq 3,4} b(p_i^{r_i})p_i^{r_i}+3b(3)+4b(4)-1=\sum\limits_{i=1}^k b(p_i^{r_i})p_i^{r_i}-1$. Otherwise, the bound is one more than this number. The bound is attained by Theorem \ref{teoejemplos}. \end{proof} \end{document}
\begin{document} \sloppy \maketitle {\bf Abstract.} We suggest a way to quantize, using Berezin-Toeplitz quantization, a compact hyperk\"ahler manifold (equipped with a natural $3$-plectic form), or a compact integral K\"ahler manifold of complex dimension $n$ regarded as a $(2n-1)$-plectic manifold. We show that quantization has reasonable semiclassical properties. \section{Introduction} (Berezin-)Toeplitz quantization, while interesting to study by itself, also has turned out to be a useful tool in several areas of mathematics. Over the years it was found to have applications to deformation quantization (see e.g. \cite {schlich:00}, \cite{karabegov:01}), to study of the Hitchin connection and TQFT (work of J. Andersen, see in particular \cite{andersen:06}, \cite{andersen:10}), L. Polterovich's work on rigidity of Poisson brackets \cite{polterovich:12}, and work of Y. Rubinstein and S. Zelditch \cite{rubins:12} on homogeneous complex Monge-Amp\`ere equation, in connection to geodesics on the space of K\"ahler metrics. T. Foth (T. Barron) and A. Uribe applied Berezin-Toeplitz quantization to give another proof of Donaldson's "scalar curvature is a moment map" statement \cite{foth:07}. In this paper we discuss how to use Berezin-Toeplitz quantization to quantize hyperk\"ahler manifolds or two types of multisymplectic manifolds. Geometric quantization and K\"ahler/Berezin-Toeplitz quantization associate a Hilbert space (say, $\mathcal{H}$) and operators on it to a symplectic manifold $(M,\omega)$. In physics' terminology this is a way to pass from classical Hamiltonian mechanics to a quantum system. Let $C^{\infty}(M)$ denote the space of complex-valued smooth functions on $M$. Quantization is a linear map $C^{\infty}(M)\to \{ \text{operators on} \ {\mathcal{H}}\}$, $f\mapsto \hat{f}$, satisfying a version of Dirac's quantization conditions: $1\mapsto const(\hbar)I$, $\{ f,g\} \mapsto const(\hbar)[\hat{f},\hat{g}]$. It is probably fair to say that geometric quantization was developed and mainstreamed in the 1950s-1960s, by representation theorists, including Kostant, Kirillov and others, whose primary agenda was to look for representations of infinite-dimensional Lie algebras with certain properties, and who found this language to be quite convenient. Berezin-Toeplitz quantization can be regarded as a version of geometric quantization. In the case when the symplectic manifold is, moreover, K\"ahler, it is also referred to as K\"ahler quantization. The groundwork for Berezin-Toeplitz quantization was laid in \cite{berezin:74}, \cite{boutet:81}. Well-known Theorem \ref{bmstheorem}(i) below shows that in the framework of Berezin-Toeplitz quantization the $\{ .,.\} \leadsto [.,.]$ quantization condition is satisfied in the semiclassical limit $\hbar=\frac{1}{k}\to 0$, which is essentially the best one can get, due to various no-go theorems. There are physical systems whose behaviour is encoded by an $m$-{\it plectic} form on $M$ (i.e. a closed non-degenerate $m+1$-form), $\Omega$, for $m\ge 1$. The case $m=1$ is when $\Omega$ is symplectic. Specific examples from physics, with $m\ge 2$, are discussed in \cite{nambu:73}, \cite{chat:96}, \cite{baez:10}. See also discussion and references in \cite{cantr:99}. Multisymplectic geometry has been thoroughly studied by mathematicians. See, in particular, \cite{martin:88}, \cite{cantr:99}, \cite{madsen:12}, \cite{bur:13}, \cite{takhtajan:94}, \cite{baez:10}, \cite{baezrog:10}, \cite{rogers:12}. There has been extensive discussion of {\it quantization} of $n$-plectic manifolds in physics literature, and substantial amount of work has been done by mathematicians too. See, for example, \cite{nambu:73}, \cite{takhtajan:94}, \cite{chat:96}, \cite{dito:97}, \cite{curtr:03}, \cite{curtr:04}, \cite{debellis:10}, \cite{samann:13}, \cite{rogers:13}, \cite{vaisman:99}. Work of C. Rogers \cite{rogers:13} addresses quantization of $2$-plectic manifolds. It seems that the appropriate quantum-mechanical setting there involves a category, instead of a vector space, and intuitively this makes sense because an (integral) $2$-plectic form corresponds to a gerbe and sections of a gerbe form a category, not a vector space. There have been attempts, informally speaking, "to embed a multisymplectic physical system into Hamiltonian system" \cite{bayen:75}, \cite{mukunda:76}, \cite{debellis:10}. As far as we know, there is no known canonical way of doing this. DeBellis, S\"amann and Szabo \cite{debellis:10} used Berezin-Toeplitz quantization for multisymplectic spheres via embedding them in a certain explicit way into complex projective spaces ${\mathbb{CP}}^{q}$ and using Berezin-Toeplitz quantization on ${\mathbb{CP}}^{q}$. This is somewhat related to our results in Section \ref{volumeform}, only for $M=S^2$ (because among spheres only $S^2$ admits a K\"ahler form). Let $(M,\omega)$ be a compact connected integral K\"ahler manifold of complex dimension $n$. In this paper we are looking into two situations when the $m$-plectic form $\Omega$ on $(M,\omega)$ is constructed from the K\"ahler form (or forms): (I) $m=2n-1$, $\Omega = \frac{\omega^n}{n!}$ (II) $M$ is, moreover, hyperk\"ahler, $m=3$, $$ \Omega = \omega_1\wedge \omega_1 + \omega_2\wedge \omega_2 + \omega_3\wedge \omega_3 $$ where $\omega_1=\omega, \omega_2, \omega_3$ are the three K\"ahler forms on $M$ given by the hyperk\"ahler structure. It is well-known (and easy to prove) that a volume form on an oriented $N$-dimensional manifold is an $(N-1)$-plectic form, and that the $4$-form above is a $3$-plectic form on a hyperk\"ahler manifold. See, for example, \cite{cantr:99}, \cite{rogers:12}. It is intuitively clear that in these two cases the classical multisymplectic system is essentially built from Hamiltonian system(s) and it should be possible to quantize $(M,\Omega)$ using the (Berezin-Toeplitz) quantization of $(M,\omega)$. We discuss case (I) in section \ref{volumeform}, case (II) in section \ref{hyperkahler}. Semiclassical asymptotics are the content of Theorems \ref{thvolform}, \ref{thhyperk}, \ref{thdim4}, \ref{tensorth}, Propositions \ref{commvolform}, \ref{commdim4}, \ref{tensorprop}, \ref{tensorprop4}, Corollary \ref{tensorcorcomm2}. In both cases there are natural multisymplectic analogues of the Poisson bracket and the commutator: an almost Poisson bracket $\{ .,...,.\}$ and the generalized commutator $[.,...,.]$. Our discussion mainly revolves around the $\{ .,...,.\} \leadsto [.,...,.]$ quantization condition. The main result of section \ref{volumeform} is Theorem \ref{thvolform}. It is an analogue, for brackets of order $2n$, of well-known Theorem \ref{bmstheorem}(i) (and of its $C^l$ analogue ($l\in \ensuremath{{\mathbb N}})$ from \cite{barron:14}). In section \ref{hyperkahler} we work on a hyperk\"ahler manifold $M$. For a smooth function $f$ on $M$ we have three Berezin-Toeplitz operators $T_{f;1}^{(k)}$, $T_{f;2}^{(k)}$, $T_{f;3}^{(k)}$, and to four smooth functions $f,g,h,t$ on $M$ we associate three brackets of order $4$: $\{ f,g,h,t\} _r$, $r=1,2,3$. In subsection \ref{directsumgen} we show that the direct sum of generalized commutators is asymptotic to $$ T_{\{ f,g,h,t\} _1;1 }^{(k)} \oplus T_{\{ f,g,h,t\} _2;2 }^{(k)} \oplus T_{\{ f,g,h,t\} _3;3 }^{(k)} $$ (Theorem \ref{thhyperk}). In subsection \ref{directsum4} we show that the attempt to formulate everything on {\it one} vector space (not three), by taking direct sums, goes through all the way in the case when $M$ is the $4$-torus with three linear complex structures, where we get a straightforward analogue of Theorem \ref{bmstheorem}(i) - see Example \ref{R4} (\ref{asymptorus}). In subsection \ref{tensorproduct} we take the tensor product of the three operators, instead. Tensor product of generalized commutators is asymptotic to $$ T_{\{ f,g,h,t\} _1;1 }^{(k)} \otimes T_{\{ f,g,h,t\} _2;2 }^{(k)} \otimes T_{\{ f,g,h,t\} _3;3 }^{(k)} $$ (Proposition \ref{tensorprop4}). Asymptotic properties of commutators and generalized commutators of operators $\ensuremath{{\mathbb{T}}} ^{(k)}_f= T_{f;1}^{(k)}\otimes T_{f;2}^{(k)}\otimes T_{f;3}^{(k)}$ are captured in Prop. \ref{tensorprop} and Theorem \ref{tensorth}. We note that while, for simplicity, the exposition throughout the paper is for $C^{\infty}$ symbols, - all our results hold, in fact, for $C^4$ symbols. To modify the proofs in order to get the same statements for $C^4$ symbols, the estimates from \cite{bordemann:94} should be replaced by estimates from \cite{barron:14} - see subsection \ref{nonsmoothsymb}. Results from \cite{barron:14} allow to tackle the case of $C^2$ and $C^3$ symbols as well, but we do not include the corresponding version of our results (the asymptotics will differ from the $C^{\infty}$ case). This paper is a part of the Ph.D. thesis of the second author who is co-supervised by the first author and M. Pinsonnault. {\bf Acknowledgements.} We are thankful to G. Denham, M. Gualtieri, B. Hall, N. Lemire, A. Uribe, K. Yoshikawa, for brief related discussions, and to X. Ma and G. Marinescu - for comments. We are grateful to M. Pinsonnault for many questions and comments. We appreciate referee's suggestions that helped improve exposition in the paper. \section{Preliminaries} \subsection{Some notations and definitions} Throughout the paper we shall use the following notations: $S_n$, for a positive integer $n$, will denote the symmetric group (i.e. the group of permutations of $1,...,n$), for a finite-dimensional complex vector space $V$ and $A,B\in \End(V)$ $[A,B]=AB-BA$, $I$ will denote the identity operator on $V$, if $V$ is equipped with a norm, then $||A||$ will denote the operator norm of $A$, $C^{\infty}(M)$ will denote the algebra of smooth complex-valued functions on a smooth manifold $M$, for $f\in C^\infty (M)$ we write $|f|_{\infty}=\sup _{x\in M}|f(x)|$. \begin{defn} An $(m+1)$-form $\Omega$ on a smooth manifold $M$ is called an {\bf m-plectic form} if it is closed (i.e. $d\Omega =0$) and non-degenerate (i.e. $v\in T_xM, v\lrcorner \Omega _x=0 \ensuremath{\mathbb R}ightarrow v=0$). If $\Omega$ is an $m$-plectic form on $M$, $(M,\Omega )$ is called a {\bf multisymplectic}, or {\bf m-plectic}, manifold. \end{defn} \begin{defn}(\cite{takhtajan:94}, \cite{gautheron:96}) Let $M$ be a smooth manifold. A multilinear map $$ \{ .,...,.\} :(C^{\infty}(M))^{\otimes j}\to C^{\infty}(M) $$ is called a {\bf Nambu-Poisson bracket} or {\bf (generalized) Nambu bracket of order $j$} if it satisfies the following properties: \begin{itemize} \item (skew-symmetry) $\{ f_1,...,f_j\} = \sign (\sigma )\{ f_{\sigma (1)},...,f_{\sigma (j)}\}$ for all $f_1,...,f_j\in C^{\infty}(M)$ and for all $\sigma\in S_j$, \item (Leibniz rule) $\{ f_1,...,f_{j-1}, g_1g_2\} = \{ f_1,...,f_{j-1}, g_1\} g_2+g_1 \{ f_1,...,f_{j-1},g_2\}$ for all $f_1,...,f_{j-1}, g_1,g_2\in C^{\infty}(M)$, \item (Fundamental Identity) $$ \{ f_1,...,f_{j-1,},\{ g_1,..., g_j\} \} = \sum _{i=1}^j\{ g_1,...,\{ f_1,...,f_{j-1}, g_i\} ,...,g_j\} , $$ for all $f_1,...,f_{j-1}, g_1,...,g_j \in C^{\infty}(M)$. \end{itemize} \end{defn} It is natural to ask how to generalize the Hamiltonian formalism of symplectic geometry to the multisymplectic setting. We do not need the full multisymplectic formalism for the purposes of this paper, and we refer the reader to \cite{takhtajan:94}, \cite{helein:04}, \cite{rogers:12}. \begin{defn}\label{defgpb} (\cite{azcar:96}, \cite{azcar:10}) Let $M$ be a smooth manifold and suppose $j$ is an even positive integer. A multilinear map $$ \{ .,...,.\} :(C^{\infty}(M))^{\otimes j}\to C^{\infty}(M) $$ is called a {\bf generalized Poisson bracket} if it satisfies the following properties: \begin{itemize} \item (skew-symmetry) $\{ f_1,...,f_j\} = \sign (\sigma )\{ f_{\sigma (1)},...,f_{\sigma (j)}\}$ for all $f_1,...,f_j\in C^{\infty}(M)$ and for all $\sigma\in S_j$, \item (Leibniz rule) $\{ f_1,...,f_{j-1}, g_1g_2\} = \{ f_1,...,f_{j-1}, g_1\} g_2+g_1 \{ f_1,...,f_{j-1},g_2\}$ for all $f_1,...,f_{j-1}, g_1,g_2\in C^{\infty}(M)$, \item (Generalized Jacobi Identity) $$ \mbox{Alt}\{ f_1,...,f_{j-1},\{ f_j,..., f_{2j-1}\} \} = $$ $$ \sum _{\sigma\in S_{2j-1}} \sign(\sigma) \{ f_{\sigma(1)},...,f_{\sigma(j-1)},\{ f_{\sigma(j)},..., f_{\sigma(2j-1)}\} \} =0 $$ for all $f_1,...,f_{2j-1} \in C^{\infty}(M)$. \end{itemize} \end{defn} \begin{defn} (\cite{ibanez:97}) A bracket as in Definition \ref{defgpb} satisfying only the first two conditions (skew-symmetry and Leibniz rule) is called an {\bf almost Poisson bracket of order j}. \end{defn} \begin{remark} A Nambu-Poisson bracket of even order is a generalized Poisson bracket \cite{ibanez:97}. \end{remark} \subsection{Generalized commutator} Let $[.,.,.,.]$ denote the Nambu generalized commutator (\cite{nambu:73}, \cite{takhtajan:94}, \cite{chat:96}): for a finite-dimensional complex vector space $V$ and $A_1,...,A_{2n}\in \End(V)$ $$ [A_1,...,A_{2n}]=\sum_{\sigma\in S_{2n}} \sign(\sigma )A_{\sigma(1)}...A_{\sigma(2n)} . $$ For example, for $n=2$ $$ [A_1,A_2,A_3,A_4]=\sum_{\sigma\in S_4} \sign(\sigma)A_{\sigma(1)}A_{\sigma(2)}A_{\sigma(3)} A_{\sigma(4)} = $$ \begin{equation} \label{comm4} \begin{split} [A_1,A_2][A_3,A_4] - [A_1,A_3][A_2,A_4] + [A_1,A_4][A_2,A_3]+ \\ [A_3,A_4][A_1,A_2] - [A_2,A_4][A_1,A_3] + [A_2,A_3][A_1,A_4] . \end{split} \end{equation} The bracket $[.,.,.,.]$ defines a map $\bigwedge^4 \End(V)\to \End(V)$ which does not satisfy the Leibniz rule and does not satisfy the Fundamental Identity. There has been some discussion of this in physics literature (e.g. \cite{curtr:03}) and they seem to think that requiring these two conditions is not necessary. There has been investigation into algebraic properties of this bracket - see e.g. \cite{curtr:09} and \cite{azcar:10}, where some ideas go back to \cite{bremner:98}, \cite{filippov:85}, and earlier work by Kurosh and his school. Let us denote, for convenience, $$ \sideset{}{'} \sum _{\sigma\in S_{2n}} =\sum_{\substack{{\sigma\in S_{2n}}\\ { \sigma (1)<\sigma(2),...,} \\ {\sigma(2n-1)<\sigma(2n)}} } . $$ \begin{lemma} \label{lemcomm1} $$ [A_1,...,A_{2n}]=\sideset{}{'} \sum _{\sigma\in S_{2n}} \sign (\sigma ) [A_{\sigma(1)},A_{\sigma(2)}][A_{\sigma(3)},A_{\sigma(4)}]... [A_{\sigma(2n-1)},A_{\sigma(2n)}] . $$ \end{lemma} \noindent {\bf Proof.} Each monomial from the left hand side appears in the right hand side, exactly once, with the same sign. Each term from the right hand side appears in the left hand side. Therefore the expressions are identical. $\Box$ \begin{lemma} \label{lemcomm2} $$ [A_1,...,A_{2n}]=\frac{1}{2^n} \sum _{\sigma\in S_{2n}} \sign (\sigma ) [A_{\sigma(1)},A_{\sigma(2)}][A_{\sigma(3)},A_{\sigma(4)}]... [A_{\sigma(2n-1)},A_{\sigma(2n)}] . $$ \end{lemma} \noindent {\bf Proof.} By straightforward comparison of the polynomials. Observe that each monomial from the left-hand side appears in the sum in the right-hand side exactly $2^n$ times, with appropriate sign, and this accounts for all the terms in the right hand side. $\Box$ \begin{remark} Equality (\ref{comm4}) is (93) \cite{curtr:03}. It is not hard to see that Lemma \ref{lemcomm2} is equivalent to (94) \cite{curtr:03}. \end{remark} \subsection{Berezin-Toeplitz operators} Suppose $(M,\omega )$ is a compact connected K\"ahler manifold and the K\"ahler form $\frac{\omega}{2\pi}$ is integral. Let $L$ be a holomorphic hermitian line bundle such that the curvature of the Chern connection is $-i\omega$. Let $k$ be a positive integer. The space $H^0(M,L^{\otimes k})$ of holomorphic sections of $L^{\otimes k}$ is a finite-dimensional complex vector space. Let $\Pi_k$ denote the orthogonal projection from $L^2(M,L^{\otimes k})$ onto $H^0(M,L^{\otimes k})$ (the Hermitian inner product is obtained from the hermitian metric on $L$). \subsubsection{Smooth symbol} \label{smoothsymb} Reference used throughout this subsection is \cite{bordemann:94}, where the method is based on the analysis of Toeplitz structures from \cite{boutet:81}. Results mentioned here and more extensive discussion can be found in surveys on Berezin-Toeplitz quantization, - for example in \cite{schlich:10}. For $f\in C^{\infty}(M)$ the operator $$ T_f^{(k)}=\Pi_k\circ (mult. \ by \ f)\in \End ( H^0(M,L^{\otimes k}) ) , $$ or the operator $\oplus T_f^{(k)}$, is called the {\it Berezin-Toeplitz operator for $f$}. Here are some properties of these operators that will be most frequently used in this paper. For $\alpha,\beta\in \ensuremath{\mathbb C}$ and $f,g\in C^{\infty}(M)$ $$ T_{\alpha f+\beta g}^{(k)}=\alpha T_f^{(k)}+\beta T_g^{(k)} . $$ \begin{theorem}[\cite{bordemann:94} Th. 4.1, 4.2; \cite{ma:07}, \cite{ma:08}] \label{bmstheorem} For $f,g\in C^{\infty}(M)$, as $k\to\infty$, \begin{itemize} \item[(i)] $$ ||ik[T_f^{(k)},T_g^{(k)}]-T_{\{ f,g\} }^{(k)}||=O(\frac{1}{k}), $$ \item[(ii)] there is a constant $C=C(f)>0$ such that $$ |f|_{\infty}-\frac{C}{k}\le ||T_f^{(k)}||\le |f|_{\infty} . $$ \end{itemize} \end{theorem} \begin{proposition}[\cite{bordemann:94} p. 291] \label{bmsprop} For $f_1,...,f_p\in C^{\infty} (M)$ $$ ||T_{f_1}^{(k)}...T_{f_p}^{(k)}-T_{f_1...f_p}^{(k)}||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{proposition} \begin{proposition}[\cite{bordemann:94} p. 289] \label{propcomm} For $f,g\in C^{\infty}(M)$ $$ \lim_{k\to \infty} ||[T_f^{(k)},T_g^{(k)}]||=0. $$ \end{proposition} \begin{remark} \label{remarkcomm} Proof of this Proposition (it's one line, use Theorem \ref{bmstheorem} and triangle inequality) actually implies that $$ ||[T_f^{(k)},T_g^{(k)}]||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{remark} \subsubsection{$C^l$ symbol} \label{nonsmoothsymb} The reference for theorems analogous to those above in subsection \ref{smoothsymb}, with $f\in C^l(M)$, is \cite{barron:14}. In \cite{barron:14} the method is different from \cite{bordemann:94}. It relies on techniques developed in \cite{ma:07}, \cite{ma:08}, see also \cite{ma:11}. For $l=4$ statements similar to Theorem \ref{bmstheorem}, Prop. \ref{bmsprop} follow from Cor. 4.5, Remark 5.7(b), Cor. 4.4 of \cite{barron:14}. The fact that for $f,g\in C^4(M)$ $||[T_f^{(k)},T_g^{(k)}]||=O(\frac{1}{k})$ as $k\to\infty$ easily follows too, from Cor. 4.5 and Remark 5.7(b) \cite{barron:14}. \section{Quantization of the $(2n-1)$-plectic structure on an $n$-dimensional K\"ahler manifold} \label{volumeform} Let $(M,\omega)$ be a compact connected $n$-dimensional K\"ahler manifold ($n\ge 1$). We shall denote by $\{ .,.\}$ the Poisson bracket for $\omega$. Assume that the K\"ahler form $\frac{\omega}{2\pi}$ is integral. Let $L$ be a hermitian holomorphic line bundle on $M$ such that the curvature of the Chern connection is equal to $-i \omega$. It is clear that the volume form $\Omega = \frac{\omega^n}{n!}$ is a $(2n-1)$-plectic form. The bracket $\{ .,...,.\} :\bigwedge ^{2n}C^{\infty}(M)\to C^{\infty}(M)$ defined by $$ df_1\wedge ...\wedge df_{2n}=\{ f_1,...,f_{2n}\} \Omega $$ is a Nambu-Poisson bracket \cite[Cor. 1 p. 106]{gautheron:96} . \begin{lemma} \label{lembrackets} For $f_1,...,f_{2n}\in C^{\infty}(M)$ \begin{equation} \label{eqfunbrackets} \{ f_1,...,f_{2n}\} = \frac{1}{2^n n!}\sum_{\sigma\in S_{2n}}\sign (\sigma) \prod_{j=1}^n \{ f_{\sigma(2j-1)}, f_{\sigma(2j)}\} \end{equation} \end{lemma} \begin{remark} In particular, for $n=2$ $$ \{f_1,f_2,f_3,f_4\} =\{ f_1,f_2\} \{ f_3,f_4\} - \{ f_1,f_3\} \{ f_2,f_4\} + \{ f_1,f_4\} \{ f_2,f_3\} . $$ \end{remark} \begin{remark} For $M=\ensuremath{\mathbb R} ^{2n}$ with the standard symplectic form equality (\ref{eqfunbrackets}) is (7) in \cite{curtr:03}. \end{remark} \noindent {\bf Proof of Lemma \ref{lembrackets}.} Let's use Darboux theorem and compare the left-hand side and the right-hand side of (\ref{eqfunbrackets}) in a local chart with coordinates $x_1$,...,$x_{2n}$ such that in this chart $\omega = \sum_{j=1}^n dx_{2j-1}\wedge dx_{2j}$. Locally, in this chart, the Poisson bracket of $f_i$, $f_l$, for $i,l\in \{ 1,...,2n\}$, is $$ \{ f_i,f_l\} = \sum _{j=1}^n(\frac{\partial f_i}{\partial x_{2j-1}} \frac{\partial f_l}{\partial x_{2j}}- \frac{\partial f_i}{\partial x_{2j}} \frac{\partial f_l}{\partial x_{2j-1}} ) $$ and $\{ f_1,...,f_{2n}\} =\det J$, where $J=(\frac{\partial f_i}{\partial x_l})$. $\det$ is the only function on $(2n)\times (2n)$ complex matrices which takes value $1$ on the identity matrix, linear in the rows, and takes value zero on a matrix whose two adjacent rows are equal (axiomatic characterization of the determinant, see e.g. Theorem 1.3.(3.14) \cite{artin:91}). The right-hand side of (\ref{eqfunbrackets}) is a polynomial in the entries of $J$ that satisfies these three conditions, therefore it must be equal to $\det J$. $\Box$ The following theorem shows that, informally speaking, $\{ .,...,.\}\to [.,...,.]$ as $k\to\infty$. \begin{theorem} \label{thvolform} For $f_1,...,f_{2n}\in C^{\infty} (M)$ $$ ||\frac{(ik)^n}{n!}[T_{f_1}^{(k)},...,T_{f_{2n}}^{(k)}]-T_{\{ f_1,...,f_{2n}\} }^{(k)}|| = O(\frac{1}{k}) $$ as $k\to\infty$. \end{theorem} \noindent {\bf Proof.} By Theorem \ref{bmstheorem} (i) \begin{equation} \label{asympcomm} ||ik[T_{f_{2j-1}}^{(k)},T_{f_{2j}}^{(k)}]-T_{\{ f_{2j-1},f_{2j}\} }^{(k)}||=O(\frac{1}{k}) \end{equation} for $j=1,...,n$. Using Prop. \ref{bmsprop} and the triangle inequality, we get: $$ ||(ik)^n[T_{f_1}^{(k)},T_{f_2}^{(k)}]...[T_{f_{2n-1}}^{(k)},T_{f_{2n}}^{(k)}] -T_{\{ f_1,f_2\} ...\{ f_{2n-1},f_{2n}\} }^{(k)}|| \le $$ $$ ||(ik)^n[T_{f_1}^{(k)},T_{f_2}^{(k)}]...[T_{f_{2n-1}}^{(k)},T_{f_{2n}}^{(k)}] -T_{\{ f_1,f_2\} } ^{(k)}...T_ {\{ f_{2n-1},f_{2n}\} }^{(k)}||+ $$ $$ || T_{\{ f_1,f_2\} ...\{ f_{2n-1},f_{2n}\} }^{(k)}- T_{\{ f_1,f_2\} } ^{(k)}...T_ {\{ f_{2n-1},f_{2n}\} }^{(k)}|| = $$ $$ ||\bigl ( (ik[T_{f_1}^{(k)},T_{f_2}^{(k)}]-T_{\{ f_1,f_2\} } ^{(k)})+T_{\{ f_1,f_2\} } ^{(k)}\bigr ) ... \bigl ( (ik[T_{f_{2n-1}}^{(k)},T_{f_{2n}}^{(k)}]-T_ {\{ f_{2n-1},f_{2n}\} }^{(k)} $$ $$ +T_ {\{ f_{2n-1},f_{2n}\} }^{(k)} \bigr ) - T_{\{ f_1,f_2\} } ^{(k)}...T_ {\{ f_{2n-1},f_{2n}\} }^{(k)}||+O(\frac{1}{k}) . $$ This is $O(\frac{1}{k})$. Indeed, within $||.||$ the term $T_{\{ f_1,f_2\} } ^{(k)}...T_ {\{ f_{2n-1},f_{2n}\} }^{(k)}$ cancels and all the other terms are products of factors of the form $(ik[T_{f_{2j-1}}^{(k)},T_{f_{2j}}^{(k)}]-T_ {\{ f_{2j-1},f_{2j}\} }^{(k)})$ (at least one of these appears) and of the form $T_ {\{ f_{2j-1},f_{2j}\} }^{(k)}$. Using the triangle inequality, (\ref{asympcomm}) and Theorem \ref{bmstheorem} (ii), we get $O(\frac{1}{k})$. Thus, as $k\to\infty$, $$ ||(ik)^n[T_{f_1}^{(k)},T_{f_2}^{(k)}]...[T_{f_{2n-1}}^{(k)},T_{f_{2n}}^{(k)}] -T_{\{ f_1,f_2\} ...\{ f_{2n-1},f_{2n}\} }^{(k)}||=O(\frac{1}{k}). $$ Exact same proof shows that $$ ||(ik)^n[T_{f_{\sigma (1)}}^{(k)},T_{f_{\sigma(2) }}^{(k)}]... [T_{f_{\sigma(2n-1)}}^{(k)},T_{f_{\sigma(2n)}}^{(k)}] -T_{\{ f_{\sigma(1)},f_{\sigma(2)}\} ...\{ f_{\sigma(2n-1)},f_{\sigma(2n) }\} }^{(k)}||=O(\frac{1}{k}). $$ We note that $$ T_{\{ f_1,...,f_{2n}\}}^{(k)}=\frac{1}{2^n n!}\sum_{\sigma\in S_{2n}}\sign(\sigma) T_{\prod_{j=1}^n \{ f_{\sigma (2j-1},f_{\sigma (2j)}\}} ^{(k)} $$ (by Lemma \ref{lembrackets}). The desired statement now follows from Lemma \ref{lemcomm2} and the triangle inequality. $\Box$ The following proposition is similar to Prop. \ref{propcomm}. It implies that $\lim_{k\to\infty }||[T_{f_1}^{(k)}, ..., T_{f_{2n}}^{(k)}]||=0$ (i.e. $T_{f_1}^{(k)}$, ..., $T_{f_{2n}}^{(k)}$ "Nambu-commute as $k\to\infty$"). \begin{proposition} \label{commvolform} For $f_1,...,f_{2n}\in C^{\infty} (M)$ $$ ||[T_{f_1}^{(k)}, ..., T_{f_{2n}}^{(k)}]||=O(\frac{1}{k^n}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} $$ ||[T_{f_1}^{(k)}, ..., T_{f_{2n}}^{(k)}]||= $$ $$ || \sideset{}{'} \sum _{\sigma\in S_{2n}}\sign(\sigma ) [T_{f_{\sigma(1)}}^{(k)}, T_{f_{\sigma(2)}}^{(k)}] [T_{f_{\sigma(3)}}^{(k)}, T_{f_{\sigma(4)}}^{(k)}]... [T_{f_{\sigma(2n-1)}}^{(k)}, T_{f_{\sigma(2n)}}^{(k)}]||\le $$ $$ \sideset{}{'} \sum _{\sigma\in S_{2n}} ||[T_{f_{\sigma(1)}}^{(k)}, T_{f_{\sigma(2)}}^{(k)}]||... ||[T_{f_{\sigma(2n-1)}}^{(k)}, T_{f_{\sigma(2n)}}^{(k)}]|| $$ which is $O(\frac{1}{k^n})$ by Remark \ref{remarkcomm}. $\Box$ \section{Quantization on a hyperk\"ahler manifold} \label{hyperkahler} Let $(M,g,J_1,J_2,J_3)$ be a compact connected hyperk\"ahler manifold. Let $4q$ denote the real dimension of $M$. Denote $\omega_r=g(.,J_r.)$ for $r=1,2,3$. The $4$-form $$ \Omega = \omega_1 \wedge \omega_1 +\omega_2 \wedge \omega_2 + \omega_3 \wedge \omega_3 $$ is $3$-plectic \cite{cantr:99}. Define the brackets $\{.,.,.,.\} _r$, $\{.,.,.,.\} _{hyp}$ (multilinear maps $\bigwedge^4 C^{\infty}(M)\to C^{\infty}(M)$) as follows: $$ \{f_1,f_2,f_3,f_4\} _r=\{ f_1,f_2\} _r\{ f_3,f_4\} _r- \{ f_1,f_3\} _r\{ f_2,f_4\} _r+ \{ f_1,f_4\} _r\{ f_2,f_3\} _r , $$ where $\{ .,.\} _r$ is the Poisson bracket on $(M,\omega_r )$, $r=1,2,3$, $$ \{f_1,f_2,f_3,f_4\} _{hyp} =\sum_{r=1}^{3}\{f_1,f_2,f_3,f_4\} _r . $$ From the properties of the Poisson bracket it immediately follows that the Leibniz rule is satisfied: $$ \{f_1,f_2,f_3,f_4f_5\} _{r}= f_4 \{f_1,f_2,f_3,f_5\} _{r}+ \{f_1,f_2,f_3,f_4\} _{r} f_5 $$ $$ \{f_1,f_2,f_3,f_4f_5\} _{hyp}= f_4 \{f_1,f_2,f_3,f_5\} _{hyp}+ \{f_1,f_2,f_3,f_4\} _{hyp} f_5 . $$ Therefore $\{ .,.,.,.\} _{r}$, $\{ .,.,.,.\} _{hyp}$ are almost Poisson brackets of order $4$. For $q=1$ $\omega_r\wedge \omega_r$ ($r=1,2,3$) and $\Omega$ are volume forms. The standard bracket $\{.,.,.,.\} ^{(r)}$ is defined by $$ df_1\wedge df_2\wedge df_3\wedge df_4=\{f_1,f_2,f_3,f_4\}^{(r)}\frac{1}{2} \omega_r \wedge \omega_r . $$ From Lemma \ref{lembrackets}, or by a direct calculation (using Darboux theorem, in local coordinates), we get: \begin{lemma} For $q=1$ $\{.,.,.,.\} _r$ coincides with $\{.,.,.,.\} ^{(r)}$. \end{lemma} From \cite[Cor. 1 p.106]{gautheron:96} it immediately follows that for $q=1$ ($M$ is $4$-dimensional) the Fundamental Identity $$ \{f_1,f_2,f_3,\{g_1,g_2,g_3,g_4\}_{hyp}\} _{hyp} = \{ \{f_1,f_2,f_3,g_1\}_{hyp} ,g_2,g_3,g_4\}_{hyp} + $$ $$ \{ g_1,\{f_1,f_2,f_3,g_2\}_{hyp}, g_3,g_4\}_{hyp} + \{ g_1,g_2,\{f_1,f_2,f_3,g_3\}_{hyp}, g_4\}_{hyp} + $$ $$ \{ g_1,g_2,g_3,\{f_1,f_2,f_3,g_4\}_{hyp} \}_{hyp} $$ is satisfied (similarly for $\{.,.,.,.\} _r$). For $q>1$ $\{.,.,.,.\} _r$, $\{.,.,.,.\} _{hyp}$ are not necessarily Nambu-Poisson brackets (the Fundamental Identity may not be satisfied if $q>1$). Assume that the K\"ahler forms $\frac{\omega_1}{2\pi}$, $\frac{\omega_2}{2\pi}$, $\frac{\omega_3}{2\pi}$ are integral. Let $L_r$ be a holomorphic Hermitian line bundle with curvature of the Chern connection equal to $-i\omega_r$, for $r=1,2,3$. For a positive integer $k$ and $f\in C^\infty (M)$ denote by $T_{f;r}^{(k)}\in \End(H^0(M,L_r^{\otimes k}))$ the Berezin-Toeplitz operator for $f$. There are two obvious ways to form a Hilbert space out of three Hilbert spaces $H^0(M,L_r^{\otimes k})$ ($r=1,2,3$): by taking direct sum or tensor product. Another way to approach this is to say that the vector space of quantization is $H^0(M,(L_1\otimes L_2\otimes L_3)^{\otimes k})$, - this would be just the usual Berezin-Toeplitz quantization, with the line bundle $L_1\otimes L_2\otimes L_3$. Note: in general $H^0(M,(L_1\otimes L_2\otimes L_3)^{\otimes k})$ is not isomorphic to $H^0(M,L_1^{\otimes k})\otimes H^0(M,L_2^{\otimes k})\otimes H^0(M,L_3^{\otimes k})$. Of course, the hyperk\"ahler structure defines a whole $S^2$ of complex structures (and of K\"ahler forms) on $M$, not just three. A. Uribe pointed out to us that maybe an appropriate notion of quantization on a hyperk\"ahler manifold should take into account all $J\in S^2$, and should involve an appropriate vector bundle over the twistor space, with fibers $H^0(M,L_J^{\otimes k})$. We look forward to seeing his work on this. Note that the twistor space of a hyperk\"ahler manifold is not K\"ahler (it is generally well-known, see for example \cite{kaledin:99} p. 37, or \cite{huybr:10}), so it's not possible to construct a Berezin-Toeplitz quantization on the twistor space. \begin{remark} Denote by $\pi_r : M\times M\times M\to M$ the projection to the $r$-th factor ($r=1,2,3$). For sufficiently large $k$ $$ H^0(M\times M\times M, (\pi_1^*L_1\otimes \pi_2^*L_2\otimes \pi_3^*L_3)^{\otimes k)})\cong \ensuremath{{\mathcal {H}}_k} $$ The proof was explained to us by K. Yoshikawa and it goes as follows: $$ \dim H^0(M\times M\times M, (\pi_1^*L_1\otimes \pi_2^*L_2\otimes \pi_3^*L_3)^{\otimes k)}= $$ $$ \int_{M\times M\times M} \td (M\times M\times M) \ch ((\pi_1^*L_1\otimes \pi_2^*L_2\otimes \pi_3^*L_3)^{\otimes k})= $$ $$ \int_{M\times M\times M} \pi_1^*\td (M)\pi_2^*\td (M)\pi_3^*\td (M) \pi_1^*\ch(L_1^{\otimes k}) \pi_2^*\ch (L_2^{\otimes k}) \pi_3^*ch(L_3^{\otimes k})= $$ $$ \int_M \td (M)\ch(L_1^{\otimes k}) \ \int_M \td (M)\ch(L_2^{\otimes k}) \ \int_M \td (M)\ch(L_3^{\otimes k})= $$ $$ \dim H^0(M,L_1^{\otimes k})\dim H^0(M,L_2^{\otimes k})\dim H^0(M,L_3^{\otimes k}) $$ $\Box$ \end{remark} In this paper we shall work with functions and structures on $M$, rather than on $M\times M\times M$. We shall find useful the following statement. \begin{proposition} \label{4functions} For $f,g,h,t\in C^{\infty} (M)$, $r=1,2,3$, $$ ||-\frac{k^2}{2}[T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]-T_{\{ f,g,h,t\} _r;r}^{(k)}|| = O(\frac{1}{k}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} As $k\to\infty$, for $r=1,2,3$, by Theorem \ref{bmstheorem} (i) for $f,g\in C^{\infty}(M)$ \begin{equation} \label{asympfg} ||ik[T_{f;r}^{(k)},T_{g;r}^{(k)}]-T_{\{ f,g\} _r ;r}^{(k)}||=O(\frac{1}{k}), \end{equation} \begin{equation} \label{asympht} ||ik[T_{h;r}^{(k)},T_{t;r}^{(k)}]-T_{\{ h,t\} _r ;r }^{(k)}||=O(\frac{1}{k}). \end{equation} Using Prop. \ref{bmsprop}, we get: $$ ||(ik)^2[T_{f;r}^{(k)},T_{g;r}^{(k)}][T_{h;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,g\} _r \{ h,t\} _r ;r }^{(k)}|| \le $$ $$ ||(ik)^2[T_{f;r}^{(k)},T_{g;r}^{(k)}][T_{h;r}^{(k)},T_{t;r}^{(k)}]-T_{\{ f,g\} _r ;r }^{(k)} T_{\{ h,t\} _r ;r }^{(k)}||+ ||T_{\{ f,g\} _r ;r }^{(k)}T_{\{ h,t\} _r ;r }^{(k)} - T_{\{ f,g\} _r \{ h,t\} _r ;r }^{(k)}||= $$ $$ ||(ik[T_{f;r}^{(k)},T_{g;r}^{(k)}] - T_{\{ f,g\} _r;r }^{(k)} + T_{\{ f,g\} _r;r }^{(k)}) (ik[T_{h;r}^{(k)},T_{t;r}^{(k)}] - T_{\{ h,t\} _r;r }^{(k)} + T_{\{ h,t\} _r;r }^{(k)}) $$ $$ -T_{\{ f,g\} _r;r }^{(k)}T_{\{ h,t\} _r;r }^{(k)}||+O(\frac{1}{k})= $$ $$ || (ik[T_{f;r}^{(k)},T_{g;r}^{(k)}] - T_{\{ f,g\} _r;r }^{(k)})(ik[T_{h;r}^{(k)},T_{t;r}^{(k)}] - T_{\{ h,t\} _r;r }^{(k)})+ $$ $$ (ik[T_{f;r}^{(k)},T_{g;r}^{(k)}] - T_{\{ f,g\} _r;r }^{(k)})T_{\{ h,t\} _r;r }^{(k)}+ T_{\{ f,g\} _r;r }^{(k)}(ik[T_{h;r}^{(k)},T_{t;r}^{(k)}] - T_{\{ h,t\} _r;r }^{(k)})||+O(\frac{1}{k}) $$ $$ \le || ik([T_{f;r}^{(k)},T_{g;r}^{(k)}] - T_{\{ f,g\} _r;r }^{(k)})|| \ ||ik[T_{h;r}^{(k)},T_{t;r}^{(k)}] - T_{\{ h,t\} _r;r }^{(k)})||+ $$ $$ ||ik[T_{f;r}^{(k)},T_{g;r}^{(k)}] - T_{\{ f,g\} _r;r }^{(k)}|| \ ||T_{\{ h,t\} _r;r }^{(k)}||+ ||T_{\{ f,g\} _r;r }^{(k)}|| \ ||ik[T_{h;r}^{(k)},T_{t;r}^{(k)}] - T_{\{ h,t\} _r;r }^{(k)}||+O(\frac{1}{k})= $$ $$ O(\frac{1}{k})O(\frac{1}{k})+|\{ h,t\} _r|_{\infty}O(\frac{1}{k})+|\{ f,g\} _r|_{\infty}O(\frac{1}{k}) +O(\frac{1}{k})=O(\frac{1}{k}). $$ In the last line we used (\ref{asympfg}), (\ref{asympht}), and applied Theorem \ref{bmstheorem} (ii) twice. Similarly we conclude, for $f,h$ and $g,t$: $$ ||(ik)^2[T_{f;r}^{(k)},T_{h;r}^{(k)}][T_{g;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,h\} _r \{ g,t\} _r ;r }^{(k)}||=O(\frac{1}{k}), $$ etc. (i.e. we get similar asymptotics for $f,t$ and $g,h$, for $h,t$ and $f,g$, for $g,t$ and $f,h$, for $g,h$ and $f,t$). Note: $$ T_{\{ f,g,h,t\} _r;r }^{(k)}= T_{\{ f,g\} _r \{ h,t\} _r ;r }^{(k)}- T_{\{ f,h\} _r \{ g,t\} _r ;r }^{(k)}+ T_{\{ f,t\} _r \{ g,h\} _r ;r }^{(k)}. $$ Therefore, by (\ref{comm4}) and the triangle inequality, $$ ||-\frac{k^2}{2}[T_{f;r}^{(k)}, T_{g;r}^{(k)},T_{h;r}^{(k)}, T_{t;r}^{(k)}]-T_{\{ f,g,h,t\} _r;r }^{(k)}||= O(\frac{1}{k}). $$ $\Box$ \subsection{Direct sum} \label{directsumgen} Denote $$ \ensuremath{{\mathcal {H}}_k} = H^0(M,L_1^{\otimes k})\oplus H^0(M,L_2^{\otimes k})\oplus H^0(M,L_3^{\otimes k}) $$ (direct sum of Hilbert spaces) and $$ {\ensuremath{{\mathbf{T}}}}_{f}^{(k)}=T_{f;1}^{(k)}\oplus T_{f;2}^{(k)}\oplus T_{f;3}^{(k)}, $$ (${\ensuremath{{\mathbf{T}}}}_{f}^{(k)}$ acts on $\ensuremath{{\mathcal {H}}_k}$ by ${\ensuremath{{\mathbf{T}}}}_{f}^{(k)}(s_1,s_2,s_3) =(T_{f;1}^{(k)}s_1,T_{f;2}^{(k)}s_2, T_{f;3}^{(k)}s_3)$). \begin{remark} Since $||{\ensuremath{{\mathbf{T}}}}_{f}^{(k)}||=\max \{ ||T_{f;1}^{(k)}||,||T_{f;2}^{(k)}||,||T_{f;3}^{(k)}||\}$, we immediately have: \begin{itemize} \item For $f,g\in C^{\infty}(M)$, as $k\to\infty$, $$ ||ik[\ensuremath{{\mathbf{T}}}_f^{(k)},\ensuremath{{\mathbf{T}}}_g^{(k)}]-\ensuremath{{\mathbf{T}}}_{\{ f,g\} }^{(k)}||=O(\frac{1}{k}), \ ||[\ensuremath{{\mathbf{T}}}_f^{(k)},\ensuremath{{\mathbf{T}}}_g^{(k)}]||=O(\frac{1}{k}) $$ \item For $f\in C^{\infty}(M)$, there is a constant $C=C(f)>0$ such that, as $k\to\infty$, $$ |f|_{\infty}-\frac{C}{k}\le ||\ensuremath{{\mathbf{T}}}_f^{(k)}||\le |f|_{\infty} . $$ \item For $f_1,...,f_p\in C^{\infty} (M)$ $$ ||\ensuremath{{\mathbf{T}}}_{f_1}^{(k)}...\ensuremath{{\mathbf{T}}}_{f_p}^{(k)}-\ensuremath{{\mathbf{T}}}_{f_1...f_p}^{(k)}||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{itemize} \end{remark} For $f,g,h,t\in C^{\infty}(M)$ we have: $$ [\ensuremath{{\mathbf{T}}}_{f}^{(k)},\ensuremath{{\mathbf{T}}}_{g}^{(k)},\ensuremath{{\mathbf{T}}}_{h}^{(k)},\ensuremath{{\mathbf{T}}}_{t}^{(k)}]=\oplus_{r=1}^3 [T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}] . $$ \begin{theorem} \label{thhyperk} For $f,g,h,t\in C^{\infty} (M)$ $$ ||-\frac{k^2}{2}[\ensuremath{{\mathbf{T}}}_{f}^{(k)},\ensuremath{{\mathbf{T}}}_{g}^{(k)},\ensuremath{{\mathbf{T}}}_{h}^{(k)},\ensuremath{{\mathbf{T}}}_{t}^{(k)}]-\oplus_{r=1}^3T_{\{ f,g,h,t\} _r;r}^{(k)}|| = O(\frac{1}{k}) $$ as $k\to\infty$. \end{theorem} \noindent {\bf Proof.} Using Proposition \ref{4functions}, we get: $$ ||-\frac{k^2}{2}[\ensuremath{{\mathbf{T}}}_f^{(k)}, \ensuremath{{\mathbf{T}}}_g^{(k)},\ensuremath{{\mathbf{T}}}_h^{(k)}, \ensuremath{{\mathbf{T}}}_t^{(k)}]-\oplus_{r=1}^3 T_{\{ f,g,h,t\} _r;r }^{(k)}||= $$ $$ \max _{1\le r\le 3} ||-\frac{k^2}{2}[T_{f;r}^{(k)}, T_{g;r}^{(k)},T_{h;r}^{(k)}, T_{t;r}^{(k)}]-T_{\{ f,g,h,t\} _r;r }^{(k)}||= O(\frac{1}{k}). $$ $\Box$ The following proposition is similar to Prop. \ref{propcomm}. It implies that $\ensuremath{{\mathbf{T}}}_f^{(k)}$, $\ensuremath{{\mathbf{T}}}_g^{(k)}$, $\ensuremath{{\mathbf{T}}}_h^{(k)}$, $\ensuremath{{\mathbf{T}}}_t^{(k)}$ "Nambu-commute as $k\to\infty$". \begin{proposition} \label{commdim4} For $f_1,f_2,f_3,f_4\in C^{\infty} (M)$ $$ ||[\ensuremath{{\mathbf{T}}} _{f_1}^{(k)}, \ensuremath{{\mathbf{T}}}_{f_2}^{(k)}, \ensuremath{{\mathbf{T}}}_{f_3}^{(k)}, \ensuremath{{\mathbf{T}}}_{f_4}^{(k)}]||=O(\frac{1}{k^2}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} $$ ||[\ensuremath{{\mathbf{T}}} _{f_1}^{(k)}, \ensuremath{{\mathbf{T}}} _{f_2}^{(k)}, \ensuremath{{\mathbf{T}}} _{f_3}^{(k)}, \ensuremath{{\mathbf{T}}} _{f_4}^{(k)}]||= \max _{1\le r\le 3}||[T_{f_1;r}^{(k)}, T_{f_2;r}^{(k)}, T_{f_3;r}^{(k)}, T_{f_4;r}^{(k)}]||= $$ $$ \max _{1\le r\le 3}||\sideset{}{'} \sum _{\sigma\in S_4}\sign(\sigma) [T_{f_{\sigma(1)};r}^{(k)}, T_{f_{\sigma(2)};r}^{(k)}] [T_{f_{\sigma(3)};r}^{(k)}, T_{f_{\sigma(4)};r}^{(k)}]||\le $$ $$ \max _{1\le r\le 3}\sideset{}{'} \sum_{\sigma\in S_4} ||[T_{f_{\sigma(1)};r}^{(k)}, T_{f_{\sigma(2)};r}^{(k)}]|| \ ||[T_{f_{\sigma(3)};r}^{(k)}, T_{f_{\sigma(4)};r}^{(k)}]||. $$ By Remark \ref{remarkcomm} it is $O(\frac{1}{k^2})$. $\Box$ \subsection{Direct sum: dimension 4} \label{directsum4} To discuss the correspondence between the the bracket on functions and the generalized commutator (as $k\to\infty$) in the hyperk\"ahler case: we showed (Theorem \ref{thhyperk}) that for a hyperk\"ahler manifold $M$ of arbitrary dimension and smooth functions $f,g,h,t$ on $M$ $[\ensuremath{{\mathbf{T}}} _{f}^{(k)},\ensuremath{{\mathbf{T}}} _{g}^{(k)},\ensuremath{{\mathbf{T}}} _{h}^{(k)},\ensuremath{{\mathbf{T}}} _{t}^{(k)}]$ is asymptotic to $$ \begin{pmatrix} T_{\{ f,g,h,t\} _1;1}^{(k)} & & \\ & T_{\{ f,g,h,t\} _2;2}^{(k)} & \\ & & T_{\{ f,g,h,t\} _3;3}^{(k)} \end{pmatrix} , $$ {\bf not} to $$ \ensuremath{{\mathbf{T}}} _{\{ f,g,h,t\}_{hyp}}^{(k)}= \begin{pmatrix} T_{\{ f,g,h,t\}_{hyp} ;1}^{(k)} & & \\ & T_{\{ f,g,h,t\}_{hyp} ;2}^{(k)} & \\ & & T_{\{ f,g,h,t\}_{hyp} ;3}^{(k)} \end{pmatrix} . $$ To clarify, we have obtained an asymptotic relation between a map $$ \sideset{}{^4} \bigwedge C^{\infty}(M) \to C^{\infty}(M)\times C^{\infty}(M) \times C^{\infty}(M) $$ $$ f,g,h,t \mapsto (\{ f,g,h,t\} _1, \{ f,g,h,t\} _2, \{ f,g,h,t\} _3) $$ and the Nambu generalized commutator $[.,.,.,.]$. It is not the same as a correspondence between $\{ .,.,.,.\}_{hyp} : \bigwedge ^4 C^{\infty}(M) \to C^{\infty}(M)$ and $[.,.,.,.]$. From now on $M$ will be of real dimension $4$ (hence $M$ is isomorphic to a K3-surface or a torus \cite{besse:87} 14.22). In this case we get Theorem \ref{thdim4} below, and in the case when $M$ is a $4$-torus with three standard linear complex structures (Example \ref{R4} below) - we get that $[\ensuremath{{\mathbf{T}}} _{f}^{(k)},\ensuremath{{\mathbf{T}}} _{g}^{(k)},\ensuremath{{\mathbf{T}}} _{h}^{(k)},\ensuremath{{\mathbf{T}}} _{t}^{(k)}]$ is asymptotic to $\ensuremath{{\mathbf{T}}} _{\{ f,g,h,t\}_{hyp}}^{(k)}$. We have: for $r=1,2,3$ $$ \Omega = \frac{\mu _r}{2}\omega_r\wedge\omega_r, $$ where $\mu_r$ is a smooth non-vanishing function on $M$. Denote by $\{ .,.,.,.\}$ the Nambu-Poisson bracket defined by $$ df_1\wedge df_2\wedge df_3\wedge df_4=\{ f_1,f_2,f_3,f_4\} \Omega . $$ Therefore $$ \{ f_1,f_2,f_3,f_4\} _r=\{ f_1,f_2,f_3,f_4\} ^{(r)}=\mu_r \{ f_1,f_2,f_3,f_4\} . $$ Denote $$ \ensuremath{{\mathbf{T}}} _{\mu}^{(k)}=\begin{pmatrix} T_{\mu_1;1}^{(k)} & & \\ & T_{\mu_2;2}^{(k)} & \\ & & T_{\mu_3;3}^{(k)} \end{pmatrix} . $$ The following theorem shows that $[\ensuremath{{\mathbf{T}}} _{f_1}^{(k)},\ensuremath{{\mathbf{T}}} _{f_2}^{(k)},\ensuremath{{\mathbf{T}}} _{f_3}^{(k)},\ensuremath{{\mathbf{T}}} _{f_4}^{(k)}]$ is asymptotic to $\ensuremath{{\mathbf{T}}} _{\{ f_1,f_2,f_3,f_4\} }^{(k)}\ensuremath{{\mathbf{T}}} _{\mu}^{(k)}$. \begin{theorem} \label{thdim4} For $f,g,h,t\in C^{\infty}(M)$ $$ ||-\frac{k^2}{2} [ \ensuremath{{\mathbf{T}}} _f^{(k)},\ensuremath{{\mathbf{T}}} _g^{(k)},\ensuremath{{\mathbf{T}}} _h^{(k)},\ensuremath{{\mathbf{T}}} _t^{(k)}] -\ensuremath{{\mathbf{T}}} _{\{ f,g,h,t\} }^{(k)}\ensuremath{{\mathbf{T}}} _{\mu}^{(k)}|| =O(\frac{1}{k}) $$ as $k\to\infty$. \end{theorem} \noindent {\bf Proof.} For $r=1,2,3$ the same argument as in the proof of Proposition \ref{4functions} gives: \begin{equation} \label{asympt} ||-\frac{k^2}{2} [ T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,g,h,t\} _r ;r }^{(k)}|| =O(\frac{1}{k}) \end{equation} We have: $$ ||-\frac{k^2}{2} [ T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,g,h,t\} ;r }^{(k)}T_{\mu _r;r}^{(k)}|| \le $$ $$ ||-\frac{k^2}{2} [ T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,g,h,t\} \mu _r;r}^{(k)}|| + ||T_{\{ f,g,h,t\} \mu _r;r}^{(k)} - T_{\{ f,g,h,t\} ;r }^{(k)}T_{\mu _r;r}^{(k)}||. $$ This is $O(\frac{1}{k})$ by (\ref{asympt}) and Prop. \ref{bmsprop}. Hence $$ ||-\frac{k^2}{2} [ \ensuremath{{\mathbf{T}}} _f^{(k)},\ensuremath{{\mathbf{T}}} _g^{(k)},\ensuremath{{\mathbf{T}}} _h^{(k)},\ensuremath{{\mathbf{T}}} _t^{(k)}] -\ensuremath{{\mathbf{T}}} _{\{ f,g,h,t\} }^{(k)}\ensuremath{{\mathbf{T}}} _{\mu}^{(k)}|| $$ $$ =\max_{1\le r\le 3} ||-\frac{k^2}{2} [ T_{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]- T_{\{ f,g,h,t\} ;r }^{(k)}T_{\mu _r ;r}^{(k)}||=O(\frac{1}{k}). $$ $\Box$ \begin{example} \label{R4} Denote $\tilde{M}=\ensuremath{\mathbb R}^4$, with coordinates $x_1$, $x_2$, $x_3$, $x_4$, and equipped with three (linear) complex structures $$ J_1=\begin{pmatrix} 0 & -1 & & \\ 1 & 0 & & \\ & & 0 & -1 \\ & & 1 & 0 \end{pmatrix}, \ J_2=\begin{pmatrix} & & -1 & 0 \\ & & 0 & 1 \\ 1 & 0 & & \\ 0 & -1 & & \end{pmatrix}, \ J_3=\begin{pmatrix} & & & -1 \\ & & -1 & \\ & 1 & & \\ 1 & & & \end{pmatrix} . $$ We have: $J_1J_2=J_3$ and, of course, $J_1^2=J_2^2=J_3^2=-I$. Note: if we regard $\tilde{M}$ as the one-dimensional quaternionic vector space, with basis 1, {\bf{i, j, k}} ({\bf i}$^2$={\bf j}$^2$={\bf k}$^2$=$-1$, {\bf ij=k}), then $J_1$, $J_2$, $J_3$ correspond to left multiplication by {\bf{i, j, k}} respectively. For the standard Riemannian metric on $\tilde{M}$, with the metric tensor $g=I$, the symplectic forms are as follows: $$ \omega_1 = dx_1\wedge dx_2 +dx_3\wedge dx_4, $$ $$ \omega_2 = dx_1\wedge dx_3 -dx_2\wedge dx_4, $$ $$ \omega_3 = dx_1\wedge dx_4 +dx_2\wedge dx_3. $$ For $r=1,2,3$ $$ \frac{1}{2}\omega_r\wedge \omega_r=dx_1\wedge dx_2\wedge dx_3\wedge dx_4, $$ $$ \Omega =\sum_{r=1}^3 \omega_r\wedge \omega_r = 6 dx_1\wedge dx_2\wedge dx_3\wedge dx_4. $$ Everything is $\ensuremath{{\mathbb Z}}^4$-invariant and $g$, $J_1$, $J_2$, $J_3$, $\omega_1$, $\omega_2$, $\omega_3$, $\Omega$ descend to $M=\tilde{M}/\ensuremath{{\mathbb Z}}^4$. We get: $\mu_1=\mu_2=\mu_3=6$ and $$ 6\{ .,.,.,.\} = \{ .,.,.,.\} _r=\{ .,.,.,.\} ^{(r)}=\frac{1}{3}\{ .,.,.,.\} _{hyp}. $$ Theorem \ref{thdim4} gives: for $f,g,h,t\in C^{\infty}(M)$ \begin{equation} \label{asymptorus} ||-c k^2 [ \ensuremath{{\mathbf{T}}} _f^{(k)},\ensuremath{{\mathbf{T}}} _g^{(k)},\ensuremath{{\mathbf{T}}} _h^{(k)},\ensuremath{{\mathbf{T}}} _t^{(k)}] -\ensuremath{{\mathbf{T}}} _{\{ f,g,h,t\} _{hyp}}^{(k)}|| =O(\frac{1}{k}) \end{equation} as $k\to\infty$, where $c$ is a positive constant. \end{example} \subsection{Tensor product} \label{tensorproduct} Denote $$ \ensuremath{{\mathcal {H}}_k} = H^0(M,L_1^{\otimes k})\otimes H^0(M,L_2^{\otimes k})\otimes H^0(M,L_3^{\otimes k}) $$ (tensor product of Hilbert spaces) and $$ \ensuremath{{\mathbb{T}}} ^{(k)}_f=T_{f;1}^{(k)}\otimes T_{f;2}^{(k)}\otimes T_{f;3}^{(k)}, $$ ($\ensuremath{{\mathbb{T}}} _{f}^{(k)}(s_1\otimes s_2\otimes s_3) =T_{f;1}^{(k)}s_1\otimes T_{f;2}^{(k)}s_2\otimes T_{f;3}^{(k)}s_3$ and the action extends to $\ensuremath{{\mathcal {H}}_k}$ by linearity, also note: $||\ensuremath{{\mathbb{T}}}_{f}^{(k)}||=||T_{f;1}^{(k)}|| \ ||T_{f;2}^{(k)}|| \ ||T_{f;3}^{(k)}||$). In the proofs below we shall need the following elementary statement. \begin{lemma} \label{tensorlem} If $M_j$, $N_j$ are linear operators on a finite dimensional Hilbert space $V_j$ ($j=1,2,3$), then $$ ||M_1\otimes M_2\otimes M_3-N_1\otimes N_2\otimes N_3||\le ||M_1-N_1|| \ ||M_2-N_2|| \ ||M_3-N_3||+ $$ $$ ||M_1-N_1|| \ || M_2|| \ ||N_3||+ ||M_1|| \ ||N_2|| \ ||M_3-N_3||+||N_1|| \ ||M_2-N_2|| \ ||M_3|| $$ \end{lemma} \noindent {\bf Proof.} This immediately follows from the equality $$ (M_1-N_1)\otimes (M_2-N_2)\otimes (M_3-N_3)=M_1\otimes M_2\otimes M_3-N_1\otimes N_2\otimes N_3- $$ $$ (M_1-N_1)\otimes M_2\otimes N_3-M_1\otimes N_2\otimes (M_3-N_3)-N_1\otimes (M_2-N_2)\otimes M_3 $$ $\Box$ We also note the following identity for tensor products of operators: \begin{equation} \label{tensorid3} [A_1\otimes A_2 \otimes A_3, B_1\otimes B_2 \otimes B_3]= [A_1,B_1]\otimes [A_2,B_2] \otimes [A_3,B_3] + \end{equation} $$ [A_1,B_1]\otimes B_2A_2\otimes A_3B_3+ A_1B_1 \otimes [A_2,B_2]\otimes B_3A_3 + B_1A_1 \otimes A_2B_2 \otimes [A_3,B_3]. $$ \begin{remark} \ \begin{itemize} \item For $f\in C^{\infty}(M)$, there is a constant $C=C(f)>0$ such that, as $k\to\infty$, $$ (|f|_{\infty}-\frac{C}{k})^3\le ||\ensuremath{{\mathbb{T}}}_f^{(k)}||\le (|f|_{\infty})^3 . $$ \item For $f_1,...,f_p\in C^{\infty} (M)$ $$ ||\ensuremath{{\mathbb{T}}}_{f_1}^{(k)}...\ensuremath{{\mathbb{T}}}_{f_p}^{(k)}-\ensuremath{{\mathbb{T}}}_{f_1...f_p}^{(k)}||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{itemize} The last statement holds for $p=2$ by Lemma \ref{tensorlem}, Theorem \ref{bmstheorem} and Prop. \ref{bmsprop}. It follows for arbitrary $p$ by induction. \end{remark} \begin{proposition} \label{3comms} For $f,g\in C^{\infty} (M)$ $$ ||(ik)^3[T_{f;1}^{(k)},T_{g;1}^{(k)}]\otimes [T_{f;2}^{(k)},T_{g;2}^{(k)}] \otimes [T_{f;3}^{(k)},T_{g;3}^{(k)}]- T_{\{ f,g\} _1;1}^{(k)}\otimes T_{\{ f,g\} _2;2}^{(k)}\otimes T_{\{ f,g\} _3;3}^{(k)}||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} This follows from Lemma \ref{tensorlem}, Theorem \ref{bmstheorem} and Remark \ref{remarkcomm}. $\Box$ \begin{proposition} \label{tensorprop} For $f,g\in C^{\infty} (M)$ $$ ||ik[\ensuremath{{\mathbb{T}}}_f^{(k)},\ensuremath{{\mathbb{T}}}_g^{(k)}]- (T_{\{ f,g\} _1;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{fg;3}^{(k)}+ T_{fg;1}^{(k)}\otimes T_{\{ f,g\} _2;2}^{(k)}\otimes T_{fg;3}^{(k)}+ $$ $$ T_{fg;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{\{ f,g\} _3;3}^{(k)}) ||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} Using (\ref{tensorid3}), we get: $$ ||ik[\ensuremath{{\mathbb{T}}}_f^{(k)},\ensuremath{{\mathbb{T}}}_g^{(k)}]- (T_{\{ f,g\} _1;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{fg;3}^{(k)}+ T_{fg;1}^{(k)}\otimes T_{\{ f,g\} _2;2}^{(k)}\otimes T_{fg;3}^{(k)}+ $$ $$ T_{fg;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{\{ f,g\} _3;3}^{(k)})||\le $$ $$ ||ik[T_{f;1}^{(k)},T_{g;1}^{(k)}]\otimes T_{g;2}^{(k)}T_{f;2}^{(k)}\otimes T_{f;3}^{(k)}T_{g;3}^{(k)}- T_{\{ f,g\} _1;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{fg;3}^{(k)}||+ $$ $$ ||T_{f;1}^{(k)}T_{g;1}^{(k)}\otimes ik[T_{f;2}^{(k)},T_{g;2}^{(k)}]\otimes T_{g;3}^{(k)}T_{f;3}^{(k)}- T_{fg;1}^{(k)}\otimes T_{\{ f,g\} _2;2}^{(k)}\otimes T_{fg;3}^{(k)}||+ $$ $$ ||T_{g;1}^{(k)}T_{f;1}^{(k)}\otimes T_{f;2}^{(k)}T_{g;2}^{(k)}\otimes ik[T_{f;3}^{(k)},T_{g;3}^{(k)}]- T_{fg;1}^{(k)}\otimes T_{fg;2}^{(k)}\otimes T_{\{ f,g\} _3;3}^{(k)}||+ $$ $$ k||[T_{f;1}^{(k)},T_{g;1}^{(k)}]\otimes [T_{f;2}^{(k)},T_{g;2}^{(k)}]\otimes [T_{f;3}^{(k)},T_{g;3}^{(k)}]||. $$ Each of the first three terms is $O(\frac{1}{k})$ by Lemma \ref{tensorlem}, Theorem \ref{bmstheorem}, Prop. \ref{bmsprop} and Remark \ref{remarkcomm}. The last term is $O(\frac{1}{k^2})$ by Remark \ref{remarkcomm}. $\Box$ \begin{corollary} \label{tensorcorcomm} For $f,g\in C^{\infty}(M)$ $$ ||[\ensuremath{{\mathbb{T}}} _f^{(k)},\ensuremath{{\mathbb{T}}} _g^{(k)}]||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{corollary} \noindent {\bf Proof.} Follows from Proposition \ref{tensorprop} and Theorem \ref{bmstheorem}(ii) by triangle inequality. $\Box$ \begin{corollary} \label{tensorcorcomm2} For $f,g,h,t\in C^{\infty}(M)$ $$ ||[\ensuremath{{\mathbb{T}}} _f^{(k)},\ensuremath{{\mathbb{T}}} _g^{(k)},\ensuremath{{\mathbb{T}}} _h^{(k)},\ensuremath{{\mathbb{T}}} _t^{(k)}]||=O(\frac{1}{k^2}) $$ as $k\to\infty$. \end{corollary} \noindent {\bf Proof.} Follows from equality (\ref{comm4}) and Corollary \ref{tensorcorcomm} by triangle inequality. $\Box$ \begin{proposition} \label{tensorprop4} For $f,g,h,t\in C^{\infty}(M)$ $$ ||-\frac{k^6}{8}[T _{f;1}^{(k)},T_{g;1}^{(k)},T_{h;1}^{(k)},T_{t;1}^{(k)}]\otimes [T _{f;2}^{(k)},T_{g;2}^{(k)},T_{h;2}^{(k)},T_{t;2}^{(k)}]\otimes [T _{f;3}^{(k)},T_{g;3}^{(k)},T_{h;3}^{(k)},T_{t;3}^{(k)}]- $$ $$ T_{\{ f,g,h,t\} _1 ;1}^{(k)}\otimes T_{\{ f,g,h,t\} _2 ;2}^{(k)}\otimes T_{\{ f,g,h,t\} _3 ;3}^{(k)} ||=O(\frac{1}{k}) $$ as $k\to\infty$. \end{proposition} \noindent {\bf Proof.} For $r=1,2,3$ $$ ||[T _{f;r}^{(k)},T_{g;r}^{(k)},T_{h;r}^{(k)},T_{t;r}^{(k)}]||=O(\frac{1}{k^2}) $$ as $k\to\infty$ (this follows by triangle inequality from (\ref{comm4}) and Remark \ref{remarkcomm}). The statement now follows from Lemma \ref{tensorlem}, Proposition \ref{4functions} and Theorem \ref{bmstheorem} (ii). $\Box$ It is natural to ask about asymptotics of $[\ensuremath{{\mathbb{T}}} _f^{(k)},\ensuremath{{\mathbb{T}}} _g^{(k)},\ensuremath{{\mathbb{T}}} _h^{(k)},\ensuremath{{\mathbb{T}}} _t^{(k)}]$ for given $f,g,h,t \in C^{\infty} (M)$. Proposition \ref{tensorprop} dictates the following very technical statement. \begin{theorem} \label{tensorth} For $f_1,f_2,f_3,f_4\in C^{\infty}(M)$ $$ ||-\frac{k^2}{2}[\ensuremath{{\mathbb{T}}} _{f_1}^{(k)},\ensuremath{{\mathbb{T}}} _{f_2}^{(k)},\ensuremath{{\mathbb{T}}} _{f_3}^{(k)},\ensuremath{{\mathbb{T}}} _{f_4}^{(k)}]- {\mathbb{W}}_{f_1,f_2,f_3,f_4 }^{(k)}||=O(\frac{1}{k}) $$ as $k\to\infty$, where $$ {\mathbb{W}}_{f_1,f_2,f_3,f_4 }^{(k)}= T_{ \{ f_1,f_2,f_3,f_4\} _1 ;1}^{(k)}\otimes T_{ f_1f_2f_3f_4;2}^{(k)} \otimes T_{ f_1f_2f_3f_4 ;3}^{(k)}+ $$ $$ T_{ f_1f_2f_3f_4 ;1}^{(k)}\otimes T_{ \{ f_1,f_2,f_3,f_4\} _2 ;2}^{(k)} \otimes T_{ f_1f_2f_3f_4 ;3}^{(k)}+ T_{f_1f_2f_3f_4 ;1}^{(k)}\otimes T_{f_1f_2f_3f_4 ;2}^{(k)} \otimes T_{ \{ f_1,f_2,f_3,f_4\} _3 ;3}^{(k)}+ $$ $$ \sum_{\substack{{(i,j,m,l)=(1,2,3,4),}\\ {(1,3,2,4),(1,4,2,3)}} } \sign (i,j,m,l) \Bigl [ T_{f_if_j\{ f_m,f_l\} _1;1}^{(k)} \otimes ( T_{f_mf_l\{ f_i,f_j\} _2;2}^{(k)} \otimes T_{f_if_jf_mf_l;3}^{(k)} + $$ $$ T_{f_if_jf_mf_l;2}^{(k)} \otimes T_{f_mf_l\{ f_i,f_j\} _3;3}^{(k)} )+ T_{f_mf_l\{ f_i,f_j\} _1;1}^{(k)}\otimes $$ $$ (T_{f_if_j\{ f_m,f_l\} _2;2}^{(k)} \otimes T_{f_if_jf_mf_l;3}^{(k)}+ T_{f_if_jf_mf_l;2}^{(k)} \otimes T_{f_if_j\{ f_m,f_l\} _3;3}^{(k)})+ $$ $$ T_{f_if_jf_mf_l;1}^{(k)} \otimes ( T_{f_if_j\{ f_m,f_l\} _2;2}^{(k)} \otimes T_{f_mf_l\{ f_i,f_j\} _3;3}^{(k)}+ T_{f_mf_l\{ f_i,f_j\} _2;2}^{(k)}\otimes T_{f_if_j\{ f_m,f_l\} _3;3}^{(k)}) \Bigr ] . $$ \end{theorem} \noindent {\bf Proof.} First, we observe: as $k\to\infty$ $$ ||(ik)^2[\ensuremath{{\mathbb{T}}} _{f_i}^{(k)},\ensuremath{{\mathbb{T}}} _{f_j}^{(k)}][\ensuremath{{\mathbb{T}}} _{f_m}^{(k)},\ensuremath{{\mathbb{T}}} _{f_l}^{(k)}]- (T_{\{ f_i,f_j\} _1;1}^{(k)}\otimes T_{f_i f_j;2}^{(k)}\otimes T_{f_i f_j;3}^{(k)}+ $$ $$ T_{f_i f_j;1}^{(k)}\otimes T_{\{ f_i,f_j\} _2;2}^{(k)}\otimes T_{f_i f_j;3}^{(k)}+ T_{f_i f_j;1}^{(k)}\otimes T_{f_i f_j;2}^{(k)}\otimes T_{\{ f_i,f_j\} _3;3}^{(k)}) $$ $$ (T_{\{ f_m,f_l\} _1;1}^{(k)}\otimes T_{f_m f_l;2}^{(k)}\otimes T_{f_m f_l;3}^{(k)}+ T_{f_m f_l;1}^{(k)}\otimes T_{\{ f_m,f_l\} _2;2}^{(k)}\otimes T_{f_m f_l;3}^{(k)}+ $$ $$ T_{f_m f_l;1}^{(k)}\otimes T_{f_m f_l;2}^{(k)}\otimes T_{\{ f_m,f_l\} _3;3}^{(k)})||=O(\frac{1}{k}). $$ This follows from the elementary inequality $$ ||M_1M_2-N_1N_2||=||M_1M_2-M_2N_1+M_2N_1-N_1N_2||\le $$ $$ ||M_2||||M_1-N_1||+||N_1||||M_2-N_2|| $$ by setting $$ M_1=ik[\ensuremath{{\mathbb{T}}} _{f_i}^{(k)},\ensuremath{{\mathbb{T}}} _{f_j}^{(k)}], \ M_2=ik[\ensuremath{{\mathbb{T}}} _{f_m}^{(k)},\ensuremath{{\mathbb{T}}} _{f_l}^{(k)}] $$ $$ N_1=T_{\{ f_i,f_j\} _1;1}^{(k)}\otimes T_{f_i f_j;2}^{(k)}\otimes T_{f_i f_j;3}^{(k)}+ T_{f_i f_j;1}^{(k)}\otimes T_{\{ f_i,f_j\} _2;2}^{(k)}\otimes T_{f_i f_j;3}^{(k)}+ $$ $$ T_{f_i f_j;1}^{(k)}\otimes T_{f_i f_j;2}^{(k)}\otimes T_{\{ f_i,f_j\} _3;3}^{(k)}, $$ $$ N_2=T_{\{ f_m,f_l\} _1;1}^{(k)}\otimes T_{f_m f_l;2}^{(k)}\otimes T_{f_m f_l;3}^{(k)}+ T_{f_m f_l;1}^{(k)}\otimes T_{\{ f_m,f_l\} _2;2}^{(k)}\otimes T_{f_m f_l;3}^{(k)}+ $$ $$ T_{f_m f_l;1}^{(k)}\otimes T_{f_m f_l;2}^{(k)}\otimes T_{\{ f_m,f_l\} _3;3}^{(k)}, $$ with the use of Theorem \ref{bmstheorem}(ii), Prop. \ref{tensorprop} and Cor. \ref{tensorcorcomm}. Next, using Lemma \ref{tensorlem}, Theorem \ref{bmstheorem}(ii) and Prop. \ref{bmsprop}, we get: $$ ||-k^2[\ensuremath{{\mathbb{T}}} _{f_i}^{(k)},\ensuremath{{\mathbb{T}}} _{f_j}^{(k)}][\ensuremath{{\mathbb{T}}} _{f_m}^{(k)},\ensuremath{{\mathbb{T}}} _{f_l}^{(k)}]-\Bigl [ T_{ \{ f_i,f_j\}_1 \{ f_m,f_l\} _1 ;1}^{(k)}\otimes T_{ f_if_jf_mf_l;2}^{(k)} \otimes T_{ f_if_jf_mf_l ;3}^{(k)}+ $$ $$ T_{ f_if_jf_mf_l ;1}^{(k)}\otimes T_{ \{ f_i,f_j\} _2 \{ f_m,f_l\} _2 ;2}^{(k)} \otimes T_{ f_if_jf_mf_l ;3}^{(k)}+ T_{f_if_jf_mf_l ;1}^{(k)}\otimes T_{f_if_jf_mf_l ;2}^{(k)} \otimes T_{ \{ f_i,f_j\} _3 \{ f_m,f_l\} _3 ;3}^{(k)}+ $$ $$ T_{f_if_j\{ f_m,f_l\} _1;1}^{(k)} \otimes ( T_{f_mf_l\{ f_i,f_j\} _2;2}^{(k)} \otimes T_{f_if_jf_mf_l;3}^{(k)} + T_{f_if_jf_mf_l;2}^{(k)} \otimes T_{f_mf_l\{ f_i,f_j\} _3;3}^{(k)} )+ $$ $$ T_{f_mf_l\{ f_i,f_j\} _1;1}^{(k)}\otimes (T_{f_if_j\{ f_m,f_l\} _2;2}^{(k)} \otimes T_{f_if_jf_mf_l;3}^{(k)}+ T_{f_if_jf_mf_l;2}^{(k)} \otimes T_{f_if_j\{ f_m,f_l\} _3;3}^{(k)})+ $$ $$ T_{f_if_jf_mf_l;1}^{(k)} \otimes ( T_{f_if_j\{ f_m,f_l\} _2;2}^{(k)} \otimes T_{f_mf_l\{ f_i,f_j\} _3;3}^{(k)}+ T_{f_mf_l\{ f_i,f_j\} _2;2}^{(k)}\otimes T_{f_if_j\{ f_m,f_l\} _3;3}^{(k)}) \Bigr ] ||=O(\frac{1}{k}). $$ After that we note: $$ [\ensuremath{{\mathbb{T}}} _{f_1}^{(k)},\ensuremath{{\mathbb{T}}} _{f_2}^{(k)},\ensuremath{{\mathbb{T}}} _{f_3}^{(k)},\ensuremath{{\mathbb{T}}} _{f_4}^{(k)}]= \sum_{\substack{{(i,j,m,l)=}\\ {(1,2,3,4),(1,3,2,4),(1,4,2,3)}\\ {(3,4,1,2),(2,4,1,3),(2,3,1,4)} } } \sign (i,j,m,l)[\ensuremath{{\mathbb{T}}} _{f_i}^{(k)},\ensuremath{{\mathbb{T}}} _{f_j}^{(k)}][\ensuremath{{\mathbb{T}}} _{f_m}^{(k)},\ensuremath{{\mathbb{T}}} _{f_l}^{(k)}] $$ (see (\ref{comm4})). Taking the sum, we get: $$ ||-k^2[\ensuremath{{\mathbb{T}}} _{f_1}^{(k)},\ensuremath{{\mathbb{T}}} _{f_2}^{(k)},\ensuremath{{\mathbb{T}}} _{f_3}^{(k)},\ensuremath{{\mathbb{T}}} _{f_4}^{(k)}]- 2{\mathbb{W}}_{f_1,f_2,f_3,f_4 }^{(k)}||=O(\frac{1}{k}). $$ $\Box$ \end{document}
\begin{document} \title{Symmetry exploitation for Online Machine Covering with Bounded Migration ootnote{This work was partially supported by FONDECYT project 11140579, FONDECYT project 11130266, and by Nucleo Milenio Informaci\'on y Coordinaci\'on en Redes ICM/FIC RC130003.} \begin{abstract} Online models that allow recourse are highly effective in situations where classical models are too pessimistic. One such problem is the online machine covering problem on identical machines. In this setting, jobs arrive one by one and must be assigned to machines with the objective of maximizing the minimum machine load. When a job arrives, we are allowed to reassign some jobs as long as their total size is (at most) proportional to the processing time of the arriving job. The proportionality constant is called the \varepsilonmph{migration factor} of the algorithm. Using a rounding procedure with useful structural properties for online packing and covering problems, we design first a simple $(1.7 + \varepsilon)$-competitive algorithm using a migration factor of $O(1/\varepsilon)$ which maintains at every arrival a locally optimal solution with respect to the Jump neighborhood. After that, we present as our main contribution a more involved $(4/3+\varepsilon)$-competitive algorithm using a migration factor of $\tilde{O}(1/\varepsilon^3)$. At every arrival, we run an adaptation of the \varepsilonmph{Largest Processing Time first} ($\textnormal{LPT}$) algorithm. Since the new job can cause a complete change of the assignment of smaller jobs in both cases, a low migration factor is achieved by carefully exploiting the highly symmetric structure obtained by the rounding procedure. \varepsilonnd{abstract} \section{Introduction} We consider a fundamental load balancing problem where $n$ jobs need to be assigned to $m$ identical parallel machines. Each job $j$ is fully characterized by a non-negative processing time $p_j$. Given an assignment of jobs, the load of a machine is the sum of the processing times of jobs assigned to it. The \varepsilonmph{machine covering problem} asks for an assignment of jobs to machines maximizing the load of the least loaded machine. This problem is well known to be strongly $\text{NP}$-hard and allows for a polynomial-time approximation scheme (PTAS)~\cite{W97}. A well studied algorithm for this problem is the \varepsilonmph{Largest Processing Time First} rule ($\textnormal{LPT}$), that sorts the jobs non-increasingly and assigns them iteratively to the least loaded machine. Deuermeyer et al. \cite{DFL82} show that $\textnormal{LPT}$ is a $\frac{4}{3}$-approximation and that this factor is asymptotically tight; later, Csirik et al. \cite{CKW92} refine the analysis giving a tight bound for each~$m$. In the online setting jobs arrive one after another, and at the moment of an arrival, we must decide on a machine to assign the arriving job. This natural problem does not admit a constant competitive ratio. Deterministically, the best possible competitive ratio is~$m$~\cite{W97}, while randomization allows for a $\tilde{O}(\sqrt{m})$-competitive algorithm, which is the best possible up to logarithmic factors~\cite{AE98}. \paragraph*{\textbf{Dynamic model}.} The previous negative facts motivate the study of a relaxed online scenario with \varepsilonmph{bounded migration}. Unlike the classic online model, when a new job $j$ arrives we are allowed to reassign other jobs. More precisely, given a constant $\beta>0$, we can migrate jobs whose total size is upper bounded by $\beta p_j$. The value $\beta$ is called the \varepsilonmph{migration factor} and it accounts for the robustness of the computed solutions. In one extreme, we can model the usual online framework by setting $\beta=0$. In the other extreme, setting $\beta=\infty$ allows to compute the optimal offline solution in each iteration. Our main interest is to understand the exact trade-off between the migration factor $\beta$ and the competitiveness of our algorithms. Besides being a natural problem with an interesting theoretical motivation, its original purpose was to find good algorithms for a problem in the context of Storage Area Networks~(SAN)~\cite{SSS09}. \paragraph*{\textbf{Local search and migration.}} The local search method has been extensively used to tackle different hard combinatorial problems, and it is closely related to online algorithms where recourse is allowed. This comes from the fact that simple local search neighborhoods allow to get considerably improved solutions while having accurate control over the recourse actions needed, and in some cases even a bounded number of local moves leads to substantially improved solutions (see \cite{MSVW16,GGK16,Lacki2015} for examples in network design problems). \paragraph*{\textbf{Related Work.}} Sanders et al.~\cite{SSS09} develop online algorithms for load balancing problems in the migration framework. For the makespan minimization objective, where the aim is to minimize the maximum load, they give a $(1+\varepsilon)$-competitive algorithm with migration factor $2^{\tilde{O}(1/\varepsilon)}$. A mayor open problem in this area is to determine whether a migration factor of $\text{poly}(1/\varepsilon)$ is achievable. The landscape for the machine covering problem is somewhat different. Sanders et al.~\cite{SSS09} give a $2$-competitive algorithm with migration factor $1$, and this is until now the best competitive ratio known for any algorithm with constant migration factor. On the negative side, Skutella and Verschae \cite{SV10} show that it is not possible to maintain arbitrarily near optimal solutions using a constant migration factor, giving a lower bound of $20/19$ for the best competitive ratio achievable in that case. The lower bound is based on an instance where arriving jobs are very small, which do not allow to migrate any other job. This motivated the study of an amortized version, called \varepsilonmph{reassignment cost model}, where they develop a $(1+\varepsilon)$-competitive algorithm using a constant reassignment factor. They also show that if all arriving jobs are larger than $\varepsilon\cdot\textnormal{OPT}$, then there is a $(1+\varepsilon)$-competitive algorithm with constant migration factor. Similar migration models have been studied for other packing and covering problems. For example, Epstein \& Levin \cite{EL09} design a $(1+\varepsilon)$-competitive algorithm for the online bin packing problem using a migration factor of $2^{\tilde{O}(1/\varepsilon^2)}$, which was improved later by Jansen \& Klein~\cite{JK13} to $\text{poly}(1/\varepsilon)$ migration factor, and then further refined by Berndt~ et al.~\cite{BJK15}. Also, for makespan minimization with preemption and other objectives, Epstein \& Levin \cite{EL14} design a best-possible online algorithm using a migration factor of~$\left(1-\frac{1}{m}\right)$. Regarding local search applied to load balancing problems, many neighborhoods have been studied such as \varepsilonmph{Jump}, \varepsilonmph{Swap}, \varepsilonmph{Push} and \varepsilonmph{Lexicographical Jump} in the context of makespan minimization on related machines \cite{SV07}, makespan minimization on restricted parallel machines \cite{RRSV10}, and also multi-exchange neighborhoods for makespan minimization on identical parallel machines \cite{FNS04}. For the case of machine covering, Chen et al.~\cite{CEKvS13} study the Jump neighborhood in a game-theoretical context, proving that every locally optimal solution is $1.7$-approximate and that this factor is tight. \paragraph*{\textbf{Our Contribution.}} Our main result is a $(4/3+\varepsilon)$-competitive algorithm using $\text{poly}(1/\varepsilon)$ migration factor. This is achieved by running a carefully crafted version of $\textnormal{LPT}$ at the arrival of each new job. We would like to stress that, even though $\textnormal{LPT}$ is a simple and very well studied algorithm in the offline context, directly running this algorithm in each time step in the online context yields an unbounded migration factor; see Figure~\ref{fig:LPT_no_red} for an illustrative example and Lemma~\ref{lm:nonConstantMigration} in Appendix~\ref{app:nonConstant} for a proof. \begin{figure} \centering \captionsetup[subfigure]{justification=centering} \begin{subfigure}[b]{.5\textwidth} \centering \begin{tikzpicture}[xscale=0.5,yscale=0.4] \draw (7.5,-0.5) rectangle (12.5,4.5); \draw (8,2) node {$1$}; \draw (9,2) node {$2$}; \draw (10,2) node {$3$}; \draw (11,2) node {$4$}; \draw (12,2) node {$5$}; \draw (12.5,-0.5) rectangle (13.5,2.75); \draw (13,1.125) node {$6$}; \draw (13.5,-0.5) rectangle (14.5,2.5); \draw (14,1) node {$7$}; \draw (14.5,-0.5) rectangle (15.5,2.25); \draw (15,0.875) node {$8$}; \draw (15.5,-0.5) rectangle (16.5,2); \draw (16,0.75) node {$9$}; \draw (12.5,2.75) rectangle (13.5,4.25); \draw (13,3.5) node {${13}$}; \draw (13.5,2.5) rectangle (14.5,4.25); \draw (14,3.375) node {${12}$}; \draw (14.5,2.25) rectangle (15.5,4.25); \draw (15,3.25) node {${11}$}; \draw (15.5,2) rectangle (16.5,4.25); \draw (16,3.125) node {${10}$}; \draw (12.5,4.25) rectangle (13.5,5.75); \draw (13,5) node {${14}$}; \draw (13.5,4.25) rectangle (14.5,5.75); \draw (14,5) node {${15}$}; \draw (14.5,4.25) rectangle (15.5,5.75); \draw (15,5) node {${16}$}; \draw (15.5,4.25) rectangle (16.5,5.75); \draw (16,5) node {${17}$}; \draw (17,-0.5) rectangle (18,3); \draw (17.5,1.25) node {${j^*}$}; \draw (7.5,7) -- (7.5,-0.5) -- (16.5,-0.5) -- (16.5,7) ; \draw (8.5,7) -- (8.5,-0.5); \draw (9.5,7) -- (9.5,-0.5); \draw (10.5,7) -- (10.5,-0.5); \draw (11.5,7) -- (11.5,-0.5); \draw (12.5,7) -- (12.5,-0.5); \draw (13.5,7) -- (13.5,-0.5); \draw (14.5,7) -- (14.5,-0.5); \draw (15.5,7) -- (15.5,-0.5); \varepsilonnd{tikzpicture} \caption{$\textnormal{LPT}$ for the original instance \\and arriving job $j^*$.} \label{fig:LPT_no_red1} \varepsilonnd{subfigure} \begin{subfigure}[b]{.5\textwidth} \centering \begin{tikzpicture}[xscale=0.5,yscale=0.4] \draw (7.5,-0.5) rectangle (12.5,4.5); \draw (8,2) node {$1$}; \draw (9,2) node {$2$}; \draw (10,2) node {$3$}; \draw (11,2) node {$4$}; \draw (12,2) node {$5$}; \draw (12.5,-0.5) rectangle (13.5,3); \draw (13,1.25) node {${j^*}$}; \draw (13.5,-0.5) rectangle (14.5,2.75); \draw (14,1.125) node {$6$}; \draw (14.5,-0.5) rectangle (15.5,2.5); \draw (15,1) node {$7$}; \draw (15.5,-0.5) rectangle (16.5,2.25); \draw (16,0.875) node {$8$}; \draw[draw=black,ultra thick] (12.5,3) rectangle (13.5,4.75); \draw (13,3.875) node {${12}$}; \draw[draw=black,ultra thick] (13.5,2.75) rectangle (14.5,4.75); \draw (14,3.75) node {${11}$}; \draw[draw=black,ultra thick] (14.5,2.5) rectangle (15.5,4.75); \draw (15,3.625) node {${10}$}; \draw[draw=black,ultra thick] (15.5,2.25) rectangle (16.5,4.75); \draw (16,3.5) node {${9}$}; \draw[draw=black,ultra thick] (7.5,4.5) rectangle (12.5,6); \draw (8,5.25) node {${17}$}; \draw (9,5.25) node {${16}$}; \draw (10,5.25) node {${15}$}; \draw (11,5.25) node {${14}$}; \draw (12,5.25) node {${13}$}; \draw[draw=black,ultra thick] (8.5,4.5) -- (8.5,6); \draw[draw=black,ultra thick] (9.5,4.5) -- (9.5,6); \draw[draw=black,ultra thick] (10.5,4.5) -- (10.5,6); \draw[draw=black,ultra thick] (11.5,4.5) -- (11.5,6); \draw (7.5,7) -- (7.5,-0.5) -- (16.5,-0.5) -- (16.5,7) ; \draw (8.5,7) -- (8.5,-0.5); \draw (9.5,7) -- (9.5,-0.5); \draw (10.5,7) -- (10.5,-0.5); \draw (11.5,7) -- (11.5,-0.5); \draw (12.5,7) -- (12.5,-0.5); \draw (13.5,7) -- (13.5,-0.5); \draw (14.5,7) -- (14.5,-0.5); \draw (15.5,7) -- (15.5,-0.5); \varepsilonnd{tikzpicture} \caption{$\textnormal{LPT}$ for the new instance. Thick items correspond to migrated jobs.} \label{fig:LPT_no_red2} \varepsilonnd{subfigure} \caption{$\Omega(m)$ migration factor needed to maintain $\textnormal{LPT}$ at the arrival of $j^*$.} \label{fig:LPT_no_red} \varepsilonnd{figure} To overcome this barrier, we first adapt a less standard rounding procedure to the online framework. Roughly speaking, the rounding reduces the possible number of sizes of jobs larger than $\Omega(\varepsilon\textnormal{OPT})$ (where $\textnormal{OPT}$ is the offline optimum value) to $\tilde{O}(1/\varepsilon)$ many numbers, and furthermore these values are multiples of a common number $g\in \Theta(\varepsilon^2\textnormal{OPT})$. This implies that the number of possible loads for machines having only big jobs is constant since they are multiples of $g$ as well. Unlike known techniques used in previous work that yield similar results (see e.g.~\cite{JKV16}), our rounding is well suited for online algorithms and helps simplifying the analysis as it does not depend on $\textnormal{OPT}$ (which varies through iterations). In order to show the usefulness of the rounding procedure, we first present a simple $(1.7+\varepsilon)$-competitive algorithm using a migration factor of $O(1/\varepsilon)$. This algorithm maintains through the arrival of new jobs a locally optimal solution with respect to Jump for large jobs and a greedy assignment for small jobs on top of that. Although for general instances this can induce a very large migration factor as discussed before, for rounded instances we can have a very accurate control on the jumps needed to reach a locally optimal solution by exploiting the fact that there are constant many possible processing times for large jobs. In the second part of the paper we proceed with the analysis of our $(4/3+\varepsilon)$-competitive algorithm. Here we crucially make use of the properties obtained by the rounding procedure to create symmetries. After a new job arrival we re-run the LPT algorithm for the new instance. While assigning a job to a current least loaded machine, since there is a constant number of possible machine loads, there will usually be multiple least loaded machines to assign the job. All options lead to different (but symmetric) solutions in terms of job assignments, all having the same load vector and thus the same objective value. Broadly speaking, the algorithm will construct one of these symmetric schedules, trying to maintain as many machines with the same assignments as in the previous time step. The analysis of the algorithm will rely on monotonicity properties implied by LPT which, coupled with rounding, implies that for every job size the increase in the number of machines with different assignments (w.r.t the solution of the previous time step) is constant. This finally yields a migration factor that only grows polynomially in $1/\varepsilon$. Finally, we give a lower bound of $17/16$ for the best competitive ratio achievable by an algorithm with constant migration, improving the previous bound from Skutella \& Verschae~\cite{SV10}. \section{Preliminaries}\label{LPTsec} Consider a set of $n$ jobs $\mathcal{J}$ and a set of $m$ machines $\mathcal{M}$. In our problem, a solution or schedule $\mathcal{S}:\mathcal{J}\rightarrow \mathcal{M}$ corresponds to an assignment of jobs to machines. The set of jobs assigned to a machine $i$ is then $\mathcal{S}^{-1}(i)\subseteq J$. The load of machine $i$ in $\mathcal{S}$ corresponds to $\varepsilonll_i(\mathcal{S}) = \sum_{j\in \mathcal{S}^{-1}(i)} p_j$. The minimum load is denoted by $\ell_{\min}(\mathcal{S})=\min_{i\in \mathcal{M}} \varepsilonll_i(\mathcal{S})$, and a machine $i$ is said to be \varepsilonmph{least loaded} in $\mathcal{S}$ if $\varepsilonll_i(\mathcal{S}) = \ell_{\min}(\mathcal{S})$. For an algorithm $\mathcal{A}$ and an instance $(\mathcal{J},\mathcal{M})$, we denote by $\mathcal{S}_{\mathcal{A}}(\mathcal{J},\mathcal{M})$ the schedule returned by $\mathcal{A}$ when run on $(\mathcal{J},\mathcal{M})$. Similarly, $\mathcal{S}_{\textnormal{OPT}}(\mathcal{J},\mathcal{M})$ denotes the optimal schedule, being $\textnormal{OPT}(\mathcal{J},\mathcal{M})$ its minimum load. When it is clear from the context, we will drop the dependency on $\mathcal{J}$ or $\mathcal{M}$. \subsection{Algorithms with robust structure} An important fact used in the design of the robust PTAS for makespan minimization from Sanders et al.~\cite{SSS09} is that small jobs can be assigned greedily almost without affecting the approximation guarantee. This is however not the case for machine covering; see, e.g.~\cite{SV10} or Section~\ref{sec:lowerbound}. One way to avoid this inconvenience is to develop algorithms that are oblivious to the arrival of small jobs, that is, algorithms where the assignment of big jobs is not affected when a new small job arrives. \begin{definition}\label{EstRob} Let $h\in\mathbb{R}_+$. An algorithm $\mathcal{A}$ has \textbf{robust structure at level $h$} if, for any instance $(\mathcal{J},\mathcal{M})$ and $j^*\notin \mathcal{J}$ such that $p_{j^*}< h$, $\mathcal{S}_{\mathcal{A}}(\mathcal{J},\mathcal{M})$ and $\mathcal{S}_{\mathcal{A}}(\mathcal{J}\cup\{j^*\},\mathcal{M})$ assign to the same machines all the jobs in $\mathcal{J}$ with processing time at least $h$. \varepsilonnd{definition} This definition highlights also the usefulness of working with the $\textnormal{LPT}$ rule, since the addition of a new small job to the instance does not affect the assignment of larger jobs. Indeed, it is easy to see the following. \begin{remark} For any $h\in \mathbb{R}_+$, $\textnormal{LPT}$ has robust structure at level $h$. \varepsilonnd{remark} We proceed now to define \varepsilonmph{relaxed} solutions where, roughly speaking, small jobs are added greedily on top of the assignment of big jobs. \begin{definition}\label{k-rel} Let $\mathcal{A}$ be an $\alpha$-approximation algorithm for the machine covering problem, with $\alpha$ constant, $k_1, k_2\in \mathbb{R}_+$ constants, $1\le k_1\le k_2$ and $\varepsilon>0$. Given a machine covering instance $(\mathcal{J},\mathcal{M})$, a schedule $\mathcal{S}$ is a \textbf{$(k_1,k_2)$-relaxed version of $\mathcal{S}_{\mathcal{A}}$} if: \begin{enumerate} \item jobs with processing time at least $k_1\varepsilon\textnormal{OPT}$ are assigned exactly as in $\mathcal{S}_\mathcal{A}$, and \item for every machine $i\in \mathcal{M}$, if $\mathcal{S}$ assigns at least one job of size less than $k_1\varepsilon\textnormal{OPT}$ to $i$, then $\varepsilonll_i(\mathcal{S}) \le \ell_{\min}(\mathcal{S}) + k_2\varepsilon\textnormal{OPT}$. \varepsilonnd{enumerate} \varepsilonnd{definition} The following lemma shows that we can consider relaxed versions of known algorithms or solutions while almost not affecting the approximation factor. This will be helpful to control the migration of small jobs. \begin{lemma}\label{RobGree} Let $\mathcal{A}$ be an $\alpha$-approximation, $\alpha\ge1$ constant, $k_1, k_2\in\mathbb{R}_+$ constants, $1\le k_1\le k_2$, $0<\varepsilon<\frac{1}{2k_2\alpha}$ and $(\mathcal{J},\mathcal{M})$ a machine covering instance. Every $(k_1,k_2)$-relaxed version of $\mathcal{S}_{\mathcal{A}}$ is an $(\alpha + O(\varepsilon))$-approximate solution. \varepsilonnd{lemma} \begin{proof} Suppose by contradiction that there exists a $(k_1,k_2)$-relaxed version of $\mathcal{S}_{\mathcal{A}}$, say $\mathcal{S}$, which is not $(\alpha + 2k_2\alpha^2\varepsilon)$-approximate. This implies that $\ell_{\min}(\mathcal{S}) < \frac{1}{\alpha+2k_2\alpha^2\varepsilon}\textnormal{OPT} \le \left( \frac{1}{\alpha} - k_2\varepsilon\right) \textnormal{OPT}$. Let $\mathcal{M}_s$ the set of machines where $\mathcal{S}$ assigns at least one job of size less than $k_1\varepsilon\textnormal{OPT}$. Notice that $\mathcal{M}_s \neq \varepsilonmptyset$ and actually the least loaded machine in $\mathcal{S}$ belongs to $\mathcal{M}_s$, because otherwise $\ell_{\min}(\mathcal{S}_{\mathcal{A}}) = \ell_{\min}(\mathcal{S}) < \left( \frac{1}{\alpha} - k_2\varepsilon\right) \textnormal{OPT}$, which contradicts that $\mathcal{S}_\mathcal{A}$ is $\alpha$-approximate. Since $\mathcal{S}$ and $\mathcal{S}_{\mathcal{A}}$ assign to the same machines jobs of size at least $k_1\varepsilon\textnormal{OPT}$, we have that the total processing time of jobs assigned by $\mathcal{S}$ to $\mathcal{M}_s$ is at most $\lvert \mathcal{M}_s \rvert(\ell_{\min}(\mathcal{S}) + k_2\varepsilon\textnormal{OPT})$. Thus, \begin{equation*} \ell_{\min}(\mathcal{S}_{\mathcal{A}}) \le \displaystyle\min_{i\in\mathcal{M}_{s}}{\varepsilonll_i(\mathcal{S}_{\mathcal{A}})} \le \ell_{\min}(\mathcal{S}) + k_2\varepsilon\textnormal{OPT} < \frac{1}{\alpha}\textnormal{OPT}, \varepsilonnd{equation*} which contradicts that $\mathcal{S}_\mathcal{A}$ is $\alpha$-approximate. \varepsilonnd{proof} The described results allow us to significantly simplify the analysis of the designed algorithms. For example, consider $\textnormal{LPT}$ and suppose that at the arrival of jobs with processing time at least some specific value $h=\Theta(\varepsilon \textnormal{OPT})$ we can construct relaxed versions of solutions constructed by $\textnormal{LPT}$. Dealing with an arriving job of size smaller than $h$ becomes a simple task since assigning it to the current least loaded machine does not affect the assignment of big jobs, and we can prove that, for suitable constants $k_1, k_2$, a $(k_1,k_2)$-relaxed version of a solution constructed by $\textnormal{LPT}$ is maintained that way, almost preserving then its approximation ratio. It is important to remark that this approach is useful only if the algorithm has robust structure as, in general, the arrival of small jobs does not allow migration of big jobs and their structure may need to be changed because of these arrivals in order to maintain the approximation factor (see for example Section~\ref{sec:lowerbound}). \subsection{Rounding procedure}\label{sec:Rounding} Another useful tool is rounding the processing times to simplify the instance and create symmetries while affecting the approximation factor only by a negligible value. Let us consider $0<\varepsilon<1$ such that $1/\varepsilon \in \mathbb{Z}$. We use the following rounding technique which is a slight modification of the one presented by Hochbaum and Shmoys in the context of makespan minimization on related machines~\cite{HS88}. For any job $j$, let $e_j\in \mathbb{Z}$ be such that $2^{e_j}\le p_j< 2^{e_j+1}$. We then round down $p_j$ to the previous number of the form $2^{e_j} + k\varepsilon 2^{e_j}$ for $k\in \mathbb{N}$, that is, we define $\tilde{p}_j := 2^{e_j} + \left\lfloor \frac{p_j-2^{e_j}}{\varepsilon2^{e_j}} \right\rfloor \varepsilon 2^{e_j}.$ Observe that $p_j\ge \tilde{p}_j \ge p_j - \varepsilon2^{e_j} \ge (1-\varepsilon)p_j$. Hence, an $\alpha$-approximation algorithm for a rounded instance has an approximation ratio of $\alpha/(1-\varepsilon)=\alpha+O(\varepsilon)$ for the original instance. From now on we work exclusively with the rounded processing times. Consider an upper bound $\textnormal{\footnotesize{UB}}$ on $\textnormal{OPT}$ such that $\textnormal{OPT}\le \textnormal{\footnotesize{UB}} \le 2\textnormal{OPT}$. This can be computed using any 2-approximation for the problem, in particular $\textnormal{LPT}$. Consider the index set \begin{equation} \label{eq:deftP1} \tilde{I}(\textnormal{\footnotesize{UB}}) := \left\lbrace i\in\mathbb{Z}:\varepsilon\textnormal{\footnotesize{UB}} \le 2^i < \textnormal{\footnotesize{UB}}\right \rbrace= \{\varepsilonll,\dots,u\}. \varepsilonnd{equation} We classify jobs as \varepsilonmph{small} if $\tilde{p}_j < 2^{\varepsilonll}$, \varepsilonmph{big} if $\tilde{p}_j\in [2^{\varepsilonll},2^{u+1})$, and \varepsilonmph{huge} otherwise. Notice that small jobs have size at most $2\varepsilon\textnormal{\footnotesize{UB}}$ and huge jobs have size at least $\textnormal{\footnotesize{UB}}$. As we will see, our main difficulty will be given by big jobs; small and huge jobs are easy to handle. Notice that in every solution $\mathcal{S}$ constructed using $\textnormal{LPT}$, if we ignore small jobs, huge jobs are assigned to a machine on their own and every machine $i\in \mathcal{M}$ without huge jobs has load at most $2\textnormal{\footnotesize{UB}}$. This is because $i$ either has a big job alone, which has size at most $2\textnormal{\footnotesize{UB}}$, or it has load at most $\ell_{\min}(\mathcal{S}) + \tilde{p}_j \le 2\ell_{\min}(\mathcal{S}) \le 2 \textnormal{\footnotesize{UB}}$, where $j$ is the smallest job assigned to $i$. Let \begin{equation} \label{eq:deftP2} \tilde{P}=\left\lbrace2^{i}+k\varepsilon2^{i}: i\in\{\varepsilonll,\ldots,u\}, k\in\{0,1,\ldots,(1/\varepsilon)-1\}\right\rbrace \varepsilonnd{equation} be the set of all (rounded) processing times that a big job may take. The next lemma highlights the main properties of our rounding procedure. \begin{lemma} \label{lm:rounding} Consider the rounded job sizes $\tilde{p}_j$ for all $j$. Then it holds that, \begin{enumerate} \item $|\tilde{P}|\in O((1/\varepsilon)\log (1/\varepsilon))$, and \item for each big and huge job $j$ it holds that $\tilde{p}_j=h\cdot \varepsilon 2^{\varepsilonll}$ for some $h\in \mathbb{N}_0$. \varepsilonnd{enumerate} \varepsilonnd{lemma} \begin{proof} From the definition of $\tilde{P}$, we have that $|\tilde{P}| = \frac{1}{\varepsilon}(u-\varepsilonll-1)-1$. Since $\varepsilonll \ge \log\left(\varepsilon\textnormal{\footnotesize{UB}}\right)$ and $u\le \log(\textnormal{\footnotesize{UB}})$, then $(u-\varepsilonll-1) \le \log \left( \frac{1}{\varepsilon} \right)$. Altogether, $|\tilde{P}|\in O((1/\varepsilon)\log (1/\varepsilon))$. Also, if $j$ is a big or huge job, then $\tilde{p}_j = 2^{i}+k\varepsilon2^{i}$ for some $i\ge \varepsilonll$ and $k\in \{0,\dots, \frac{1}{\varepsilon}-1\}$. We conclude by noticing that $\tilde{p}_j = \left( \frac{1}{\varepsilon} + k \right)2^{i-\varepsilonll} \cdot \varepsilon2^{\varepsilonll} = h \cdot \varepsilon 2^{\varepsilonll}$ for $h = \left( \frac{1}{\varepsilon} + k \right)2^{i-\varepsilonll} \in \mathbb{N}_0$. \varepsilonnd{proof} Unlike other standard rounding techniques (e.g. \cite{SV10, JKV16}), the rounded sizes do not depend on $\textnormal{OPT}$ (or $\textnormal{\footnotesize{UB}}$). This avoids possible migrations provoked by new rounded values, greatly simplifying our techniques. \section{A simple $(1.7+\varepsilon)$-competitive algorithm with $O(1/\varepsilon)$ migration.}\label{sec:JOpt} In this section we will adapt a local search algorithm for Machine Covering to the online context with migration, using the properties of instances rounded as described in Section~\ref{sec:Rounding} to bound the migration factor. In the context of online load balancing with migration, it is a good strategy to look for local search algorithms with good approximation guarantees and efficient running times. The main reason is that the migrated load corresponds to the sum of the migrated jobs in each local move, and for simplified instances (rounded, for example) the number of local moves until a locally optimal solution is found is usually a constant. That is the case for two natural neighborhoods used in local search algorithms for load balancing problems: \varepsilonmph{Jump} and \varepsilonmph{Swap}. Two solutions $\mathcal{S}, \mathcal{S}'$ are \varepsilonmph{jump-neighbors} if they assign the jobs to the same machines (up to relabeling of machines or jobs of equal size) except for at most one job, and \varepsilonmph{swap-neighbors} if they assign the jobs to the same machines (up to relabeling of machines or jobs of equal size) except for at most two jobs and, if they differ in exactly two jobs $j_1,j_2$ then they are in swapped machines, i.e., $\mathcal{S}(j_1)=\mathcal{S}'(j_2)$ and $\mathcal{S}(j_2)=\mathcal{S}'(j_1)$. The \varepsilonmph{weight} of a solution is defined through a two-dimensional vector having the minimum load of the schedule as first coordinate and the number of non-least loaded machines as second one. We compare the weight of two solutions lexicographically\footnote{Just using the minimum load does not lead to good approximation ratios: think for example of $m>2$ machines and $m$ jobs of size $1$; it is swap-optimal to assign all of them to the same machine.}. In other words, a solution is jump-optimal (respectively swap-optimal) if the migration of a single job (resp.~the migration of a job or the swapping of two jobs) does not increase the minimum load and, if it maintains the minimum load, then it does not reduce the number of least loaded machines. The following lemma characterizes jump-optimal solutions for machine covering. \jvcom{No se si el ``up to symmetry'' se entiende todavía... que simetrías? Maquinas? Trabajos del mismo tamaño? Aquí hay que explicar.}\wgcom{Lo especifiqué mejor} \begin{lemma}\label{CarOL} Given $(\mathcal{J},\mathcal{M})$ a machine covering instance, a schedule $\mathcal{S}$ is jump-optimal if and only if for any machine $i\in\mathcal{M}$ and any job $j\in\mathcal{S}^{-1}(i)$, we have that $\varepsilonll_i(\mathcal{S})-p_j \le \ell_{\min}(\mathcal{S})$. \varepsilonnd{lemma} \begin{proof} If $\mathcal{S}$ is jump-optimal and there is a job not satisfying the inequality, then moving it to a least loaded machine either increases the minimum load of the schedule or reduces the number of least loaded machines, which is a contradiction. On the other hand, if $\mathcal{S}$ is not jump-optimal then there is a job $j$ whose migration improves the weight of the solution or, if not, a job whose migration to a least loaded machine decreases the number of least loaded machines. Consider first the case in which moving $j$ from a machine $i$ increases the minimum load. This means that the new load of machine $i$, $\varepsilonll_i(\mathcal{S})-p_j$, is at least the new minimum load, which is strictly larger than $\ell_{\min}(\mathcal{S})$, proving the needed inequality. Consider now the case in which moving $j$ from a machine $i$ to machine $i'$ maintains the minimum load while reducing the number of least loaded machines. Then $i'$ must have been a non-unique least loaded machine in $\mathcal{S}$. Furthermore, machine $i$ cannot become a minimum loaded machine in the new schedule (otherwise the number of least loaded machines would not change). This means that $\varepsilonll_i(\mathcal{S})-p_j$ is strictly larger than $\ell_{\min}(\mathcal{S})$. \varepsilonnd{proof} Chen et al.~\cite{CEKvS13} proved tight bounds for the approximability of jump-optimal solutions. Their result is stated in a game theoretical framework, where jump-optimal solutions are equivalent to pure Nash equilibria for the Machine Covering game (see for example~\cite{V07}). In this game, each job is a selfish agent trying to minimize the load of its own machine and the minimum load is the welfare function to be maximized. Through a small modification these bounds can be generalized to swap-optimal solutions as well (notice that a swap-optimal solution is jump-optimal by definition). We summarize the result in the following theorem which will be useful for our purposes (refer to Appendix~\ref{sec:chenetal} for details). \begin{theorem}[from~\cite{CEKvS13}]\label{thm:1.7apx} Any locally optimal solution with respect to Jump (resp. Swap) for Machine Covering is $1.7$-approximate. Moreover, there are instances showing that the approximation ratio of jump-(resp. swap-)optimality is at least $1.7$. \varepsilonnd{theorem} \subsection{Online jump-optimality.} Using the rounding procedure developed in Section \ref{sec:Rounding}, jump-optimality can be adapted to the online context using migration factor $O\left(\frac{1}{\varepsilon}\right)$. In order to do this, we need an auxiliary algorithm called \varepsilonmph{Push} (Algorithm~\ref{push}) to assign a job $j$ to a given machine. This procedure inserts a given job to a given machine, and then iteratively removes the jobs that break jump-optimality according to Lemma~\ref{CarOL}, storing them in a special set $Q$ which is part of its output. This algorithm is the base of the Push neighborhood analyzed by Schuurman and Vredeveld \cite{SV07}. Our algorithm, described in detail in Algorithm~\ref{OLSonline}, is called every time a new job $j^*$ arrives to the system, and receives as input the current solution $\mathcal{S}$ for $(\mathcal{J},\mathcal{M})$, initialized as empty if $\mathcal{J}=\varepsilonmptyset$. It will output a $(k,2k)$-relaxed \jvcom{Que significa $4$-relaxed? La versión actual necesita dos constantes en la definión...}\wgcom{Corregido} version of a jump-optimal solution for some $k\le 4$. We use the concept of a \varepsilonmph{list-scheduling} algorithm, that refers to assigning jobs iteratively (in any order) to some machine of minimum load. Given a schedule $\mathcal{S}$, $\mathcal{S}_B$ denotes the restriction of schedule $\mathcal{S}$ to big jobs. \begin{algorithm}[h!t] \caption{Push} \label{push} \begin{algorithmic}[1] \Require \text{ Schedule $\mathcal{S}$ for $(\mathcal{J},\mathcal{M})$, $i\in \mathcal{M}$, $j\notin \mathcal{J}$} \Ensure \text{ $Q\subseteq \mathcal{J}$, schedule $\mathcal{S}'$ for $((\mathcal{J}\cup\{j\})\setminus Q,\mathcal{M})$} \State $Q \gets \varepsilonmptyset$. \State $\mathcal{S}' \gets \mathcal{S}$. \State assign $j$ to machine $i$ in $\mathcal{S}'$. \For{$k \in \mathcal{S}^{-1}(i)$} \If{$\varepsilonll_i(\mathcal{S}')-\tilde{p}_k>\ell_{\min}(\mathcal{S}')$} \State take out $k$ from $i$ in $\mathcal{S}'$. \State $Q \gets Q \cup \{k\}$. \EndIf \EndFor \State \Return $Q$, $\mathcal{S}'$. \varepsilonnd{algorithmic} \varepsilonnd{algorithm} The general idea of Algorithm~\ref{OLSonline} is to first round the instance, and assign the incoming job to a least loaded machine using Algorithm~\ref{push}. Jobs removed by Algorithm~\ref{push} need to be reassigned, which we do by iteratively applying Algorithm~\ref{push} on each one of them which is big until only small jobs are left to be assigned. At each iteration jump-optimality is preserved in a relaxed way, and as a last step all remaining small jobs are reassigned using list-scheduling. Notice that, since Algorithm~\ref{push} only removes jobs of size strictly smaller than the inserted job, each job is migrated at most once. \begin{algorithm}[h!] \caption{Online jump-optimality} \label{OLSonline} \textbf{Input:}{ \parbox[t]{12.75cm}{ Instances $(\mathcal{J},\mathcal{M})$ and $(\mathcal{J}',\mathcal{M})$ such that $\mathcal{J}'=\mathcal{J}\cup\{j^*\}$; a schedule $\mathcal{S}(\mathcal{J},\mathcal{M})$.}} \begin{algorithmic}[1] \State{run LPT on input $\mathcal{J}'$ and let $\tau$ be the minimum load. Set $\textnormal{\footnotesize{UB}}\gets 2\tau$. Define $\tilde{P}, \varepsilonll$, and $u$ based on this upper bound $\textnormal{\footnotesize{UB}}$ using \varepsilonqref{eq:deftP1} and \varepsilonqref{eq:deftP2}.} \State{set $\mathcal{S}' \gets \mathcal{S}$} \If{$\tilde{p}_{j^*} < 2^\varepsilonll$}. \State{assign $j^*$ to a least loaded machine in $\mathcal{S}'$.} \Else \State{set $Q_B \gets \{j^*\}$.}\Comment{Set with unassigned big jobs.} \State{set $Q_s \gets \varepsilonmptyset$.} \Comment{Set with unassigned small jobs.} \While{$Q_B \neq \varepsilonmptyset$} \State{let $j$ be the largest job in $Q_B$. Set $Q_B \gets Q_B \setminus\{j\}$.} \State{in $\mathcal{S}_B'$, use Push (Algorithm \ref{push}) to assign $j$ to a least loaded machine $m^*$, obtaining its output set $Q$. Update $\mathcal{S}_B'$ to be the output solution of this procedure.} \State reassign jobs in $\mathcal{S}'$ such that the assignment of (big) jobs in $\mathcal{S}'$ and $\mathcal{S}_B'$ coincides. \While{$m^*$ contains a small job w.r.t. $\textnormal{\footnotesize{UB}}$ and $\varepsilonll_{m^*}(\mathcal{S}')>\varepsilonll_{\min}(\mathcal{S}')+2^\varepsilonll$} \State remove the smallest job in $\mathcal{S}'^{-1}(m^*)$ and add it to $Q_s$. \EndWhile \State $Q_B \gets Q_B \cup Q$. \EndWhile \State assign the jobs in $Q_s$ to $\mathcal{S}'$ using list-scheduling. \EndIf \State \Return $\mathcal{S}'$. \varepsilonnd{algorithmic} \varepsilonnd{algorithm} \jvcom{Falta definir $\mathcal{S}_B$ en el algoritmo.}\wgcom{corregido} \begin{lemma}\label{lm:1.7-competitive} For any $h\in \mathbb{R}^+$, Algorithm~\ref{OLSonline} has robust structure at level $h$. Furthermore, Algorithm~\ref{OLSonline} is $(1.7+O(\varepsilon))$-competitive and has polynomial running time. \varepsilonnd{lemma} \begin{proof} First of all, Algorithm~\ref{OLSonline} has, for any $h\in\mathbb{R}^+$, robust structure to level $h$ because each time that Push is called, it moves a total load of jobs smaller than the processing time of the inserted job (otherwise, the machine would not be a least loaded machine), and if the job is small, then nothing is migrated. This also directly implies that the running time of the algorithm is polynomial because every job is migrated at most once, so the while loop is executed only a polynomial number of times. In order to show that the competitive ratio is $(1.7+O(\varepsilon))$, we just need to show that the schedule constructed by Algorithm \ref{OLSonline} is a $(k_1,k_2)$-relaxed version of a jump-optimal solution for some constants $k_1, k_2$. Having that, the result follows from Theorem~\ref{thm:1.7apx} and Lemma~\ref{RobGree}. Let $k=\frac{2^\varepsilonll}{\varepsilon\textnormal{OPT}'}$. We will prove that the constructed schedule is a $(k,2k)$-relaxed version of a jump-optimal schedule by induction on $|\mathcal{J}|$ (notice that $k$ depends on $\textnormal{OPT}'$ and $\varepsilonll$, hence on the instance $(\mathcal{J},\mathcal{M})$). The base case when $\mathcal{J}=\varepsilonmptyset$ is trivial. Let $\varepsilonll^{(1)}$ be the lower bound computed for $\textnormal{OPT}$ and $k^{(1)}=\frac{2^{\varepsilonll^{(1)}}}{\varepsilon\textnormal{OPT}}$, and let us assume that $\mathcal{S}$ is a $(k^{(1)},2k^{(1)})$-relaxed version of some jump-optimal solution $\mathcal{S}^*$ for $(\mathcal{J},\mathcal{M})$ (recall that $\textnormal{OPT}\le \textnormal{OPT}'$ and $\varepsilonll^{(1)}\le \varepsilonll$). This means that $\mathcal{S}$ and $\mathcal{S}^*$ assign to the same machines jobs of size at least $2^{\varepsilonll^{(1)}}$, and machines in $\mathcal{S}$ containing at least one job of size smaller than $2^{\varepsilonll^{(1)}}$ have load at most $\varepsilonll_{\min}(\mathcal{S})+2\cdot2^{\varepsilonll^{(1)}}$. Our goal is to prove that the output $\mathcal{S}'$ of Algorithm~\ref{OLSonline} when run on $\mathcal{S}(\mathcal{J},\mathcal{M})$ plus an arriving job $j^*$ is a $(k,2k)$-relaxed version of some jump-optimal solution $\mathcal{S}^{**}$ for $(\mathcal{J}\cup\{j^*\},\mathcal{M})$. Notice first that for this $k$ we have that big jobs have processing time at least $k\varepsilon\textnormal{OPT}'$. If $\tilde{p}_{j^*}<2^\varepsilonll$, it is easy to see that the conditions are fulfilled since it is assigned to the least loaded machine. Assume from now on that $j^*$ is big. Suppose that we run Algorithm~\ref{OLSonline} on $\mathcal{S}^*(\mathcal{J},\mathcal{M})$ and arriving job $j^*$, getting a solution $\mathcal{S}^*_{aux}$. First of all it is not difficult to see that the minimum load does not decrease when applying Algorithm~\ref{OLSonline}. Thanks to the jump-optimality of $\mathcal{S}^*$ we have that, for every machine $i$ where no job was assigned using Push and any job $j$ assigned to $i$, $\varepsilonll_i(\mathcal{S}^*_{aux})-p_j<\ell_{\min}(\mathcal{S}^*_{aux})$, and hence the jobs breaking jump-optimality in $\mathcal{S}^*_{aux}$ can only belong to the remaining machines. In these machines we either have only big jobs or they have load at most $\ell_{\min}(\mathcal{S}^*_{aux})+2^\varepsilonll$, implying that the jobs breaking jump-optimality are small thanks to Lemma~\ref{CarOL}. If we take out from the solution such jobs and reassign them using Push until no job is left to be assigned (i.e. reassigning also the jobs which are pushed out) we get a jump-optimal solution $\mathcal{S}^{**}$. Since this procedure moves only small jobs (as pushed jobs are always smaller than the assigned job), the assignment of big jobs in $\mathcal{S}'$ and $\mathcal{S}^{**}$ is the same, proving the first part of being a $(k,2k)$-relaxed version of some jump-optimal solution. We will now prove that if a machine has at least one job of size at most $2^\varepsilonll$ then its load is at most $\ell_{\min}(\mathcal{S}')+2\cdot 2^\varepsilonll$. To this end we will consider three cases: \begin{itemize} \item If $i$ is a machine where no job was assigned using Push and it has a job of size smaller than $2^{\varepsilonll^{(1)}}$, since $\mathcal{S}$ is a $(k^{(1)},2k^{(1)})$-relaxed version of some jump-optimal solution, the load of $i$ is at most $\ell_{\min}(\mathcal{S})+2\cdot 2^{\varepsilonll^{(1)}} \le \ell_{\min}(\mathcal{S}')+2\cdot 2^\varepsilonll$. \item If $i$ is a machine where no job was assigned using Push, it has only jobs of size at least $2^{\varepsilonll^{(1)}}$ and has at least one job of size smaller than $2^\varepsilonll$ (implying that $\varepsilonll^{(1)}<\varepsilonll$), since $\mathcal{S}$ is a $(k^{(1)},2k^{(1)})$-relaxed version of some jump-optimal solution $\mathcal{S}^*$, the load of $i$ is at most $\ell_{\min}(\mathcal{S})+2\cdot2^{\varepsilonll^{(1)}} \le \ell_{\min}(\mathcal{S}^*)+2^{\varepsilonll}$. From the proof of Lemma~\ref{RobGree}, we have that $\ell_{\min}(\mathcal{S}^*)\le \ell_{\min}(\mathcal{S}')+2\cdot 2^{\varepsilonll^{(1)}}$. Putting everything together, we have that \begin{eqnarray*} \varepsilonll_i(\mathcal{S}') & \le & \ell_{\min}(\mathcal{S}^*)+2^\varepsilonll \\ & \le & \ell_{\min}(\mathcal{S}')+2\cdot 2^{\varepsilonll^{(1)}} + 2^\varepsilonll \\ & \le & \ell_{\min}(\mathcal{S}')+2\cdot 2^\varepsilonll, \varepsilonnd{eqnarray*} where the last inequality comes from the fact that $\varepsilonll^{(1)}<\varepsilonll$. \item If $i$ is a machine where some job was assigned using Push and it has at least one job of size smaller than $2^\varepsilonll$, the algorithm enforces its load to be at most $\ell_{\min}(\mathcal{S}')+2^\varepsilonll$. \varepsilonnd{itemize} This proves that $\mathcal{S}'$ is a $(k,2k)$-relaxed version of some jump-optimal solution, and we conclude the proof by noticing that $1\le k = 2^{\varepsilonll}/(\varepsilon\textnormal{OPT}')\le 2\varepsilon\textnormal{\footnotesize{UB}}/(\varepsilon \textnormal{OPT}') \le 4$. \varepsilonnd{proof} Now we will bound the migration factor, and also construct an instance showing that the analysis of the migration factor is essentially tight. \begin{lemma}\label{OLS_migration} Algorithm~\ref{OLSonline} has migration factor $O\left(1/\varepsilon\right)$.\varepsilonnd{lemma} \begin{proof} To analyze the migration factor, we define the \varepsilonmph{migration tree} of the algorithm as a node-weighted tree $G=(V,E)$, where $V$ is the set of migrated jobs together with the incoming job $j^*\notin\mathcal{J}$, and the weight of each $v\in V$ is the processing time of the corresponding job $\tilde{p}_v$. The tree is constructed by first adding $j^*$ as root. For each node (job) $v$ in the tree, its children are defined as all the jobs migrated at the insertion of $v$. It is easy to see that this process does not create any loops as each job is migrated at most once. By definition, the leaves of the tree are the jobs not inducing migration, and thus any small job in the tree is a leaf. In the context of local search, the number of nodes in the tree corresponds to the number of iterations of the specific local search procedure. Let $w_i$ be the total processing time of nodes corresponding to big jobs in level $i$ of the migration tree. Assume that $\tilde{p}_{j^*} = q_{\kappa} =2^g + h\varepsilon2^g$ for some $\kappa \in \{1,\dots,|\tilde{P}|\}$, $g\in\{\varepsilonll,\dots,u\}$ and $h\in\{0,\dots,\frac{1}{\varepsilon}-1\}$. Every time a job $j$ is inserted using Push, the total load of jobs in the output $Q$ of the algorithm is strictly less than $\tilde{p}_{j}$, which means that $w_i$ is strictly decreasing, and also that at each level $i$ of the tree there are at most $\frac{w_i}{2^\varepsilonll}$ nodes corresponding to big jobs. Since the second condition of being a $(k_1,k_2)$-relaxed version (Definition~\ref{k-rel}) of a jump-optimal solution is maintained through the iterations, the small jobs that need to be migrated because of insertion of a big job $j$ have total load at most $\tilde{p}_{j}+2^\varepsilonll$. This implies that the total load of small jobs at each level $i\ge 1$ of the tree is at most $w_{i-1} + \frac{w_{i-1}}{2^\varepsilonll}\cdot 2^\varepsilonll = 2w_{i-1}$, and hence the total processing time of nodes corresponding to small jobs is at most twice the total processing time of nodes corresponding to big jobs. Because of that, from now on we will assume that the migration tree contains only nodes corresponding to big jobs. We categorize each level $i\ge 1$ of the migration tree according to the following two cases: if there is a node in level $i-1$ having at least two children, we say that level $i$ falls in case $1$, and it falls in case $2$ otherwise. We first show that there are at most $\frac{\tilde{p}_{j^*}}{2^\varepsilonll}\le1/\varepsilon$ levels of the tree falling in case $1$. Because of the way the migration tree is constructed, it is not difficult to see that the total weight of the leaves in the tree is at most $\tilde{p}_{j^*}$ (this property is maintained inductively through the executions of Algorithm Push). Because of this, since each big job has processing time at least $2^\varepsilonll$, every migration tree has at most $\tilde{p}_{j^*}/2^\varepsilonll$ leaves, which is also an upper bound for the number of nodes that have more than one children in the tree (each one of them induces at least one extra leaf), and hence for the number of levels falling in case $1$. There can be more than $1/\varepsilon$ levels falling in case $2$ along the tree, but we will show that in that case $w_{i}$ quickly decreases based on the following claim. \noindent\textbf{Claim:} Let $q_{i_1}, \dots, q_{i_k} \in \tilde{P}$ such that $\displaystyle\sum_{j=1}^k{q_{i_j}} \in (q_{s+1}, q_s]$ for some $s\in \{1,\dots,|\tilde{P}|\}$. Then $\displaystyle\sum_{j=1}^k{q_{i_j+1}} \le \displaystyle\sum_{j=1}^k{q_{i_j}} - \frac{\varepsilon}{4}q_{s+1}$, where we assume that $q_{|\tilde{P}|+1} = 0$. Notice that the claim implies that for a level $i$ falling in case $2$, if $w_{i-1} \in (q_{s+1}, q_s]$ for some $s\in\{1,\dots,|\tilde{P}|\}$, then $w_i \le w_{i-1} - \frac{\varepsilon}{4}q_{s+1}$. To compute the total processing time of the nodes in the migration tree, we will bound the total weight of the levels corresponding to each case separately. Since there are at most $1/\varepsilon$ levels falling in case~$1$, each one of them having total weight at most $\tilde{p}_{j^*}$, we can bound the total weight of those levels by $\frac{1}{\varepsilon}\tilde{p}_{j^*}$. Let us now relabel the levels of the tree where the second case occurs by just $\{1,2,\dots,L_2\}$ (i.e. we ignore the levels falling in case $1$). Thanks to the claim, for every $i\in \{1,2,\dots,L_2\}$, if $w_{i-1} \in (q_{s+1},q_s]$ for some $s \in \{1,\dots,|\tilde{P}|\}$, then $\displaystyle\sum_{j=i-1}^{i+2}{w_j}\le 4q_s$ and $w_{i+3}\le q_{s+1}$ (because $q_s - \varepsilon q_{s+1} \le q_{s+1}$), and we can restart the process for $i+3$ with the correct value $q_{s'}\le q_{s+1}$. If we use this argument starting with $w_0 \in (q_{\kappa+1},q_{\kappa}]$, we can conclude that $\displaystyle\sum_{i=0}^{L_2}{w_i} \le 4\displaystyle\sum_{i=\kappa}^{|\tilde{P}|}{q_i}$, which, recalling that $\tilde{p}_{j^*} = 2^g + h\varepsilon2^g$, is at most \begin{align*} & 4\sum_{i=\varepsilonll}^{g}{ \sum_{b=0}^{\frac{1}{\varepsilon}-1}{(2^i + b\varepsilon2^i)}} = 4\sum_{i=\varepsilonll}^{g}{2^{i-1}\left(\dfrac{3}{\varepsilon}-1\right) } \le 4\tilde{p}_{j^*}\left(\dfrac{3}{\varepsilon}-1\right). \varepsilonnd{align*} These two bounds, together with the fact that the total load of small migrated jobs is at most twice this value, implies that the migration factor is at most $O\left(\frac{1}{\varepsilon}\right)$. To prove the claim, notice that $\displaystyle\sum_{j=1}^k{q_{i_j+1}} \le \displaystyle\sum_{j=1}^k{q_{i_j}} - \displaystyle\sum_{j=1}^k{\varepsilon 2^{\lfloor\log(q_{i_j})\rfloor-1}} \le \displaystyle\sum_{j=1}^k{q_{i_j}} - \frac{\varepsilon}{2}\displaystyle\sum_{j=1}^k{2^{\lfloor\log(q_{i_j})\rfloor}}$. Also, since $\displaystyle\sum_{j=1}^k{2^{\lceil\log(q_{i_j})\rceil}}\ge \displaystyle\sum_{j=1}^k{q_{i_j}}>q_{s+1}$, we have that $\displaystyle\sum_{j=1}^k{2^{\lfloor\log(q_{i_j})\rfloor}}>\frac{q_{s+1}}{2}$. This concludes the proof of the claim. \varepsilonnd{proof} \begin{lemma}\label{lm:lbOLS_migration}There are instances for which Algorithm \ref{OLSonline} uses a migration factor of at least $\Omega\left( \frac{1}{\varepsilon}\right)$. \varepsilonnd{lemma} \begin{proof} Consider an instance with $\textnormal{OPT}= 2^{u+1}$ and $\varepsilon\textnormal{OPT}= 2^{\varepsilonll}$ for some integers $\varepsilonll, u$, and assume for simplicity that $\textnormal{\footnotesize{UB}}= \textnormal{OPT}$. This way, $\tilde{I}(\textnormal{\footnotesize{UB}}) = \{\varepsilonll,\dots,u\}$. The instance, consisting of $m\in O\left( \frac{1}{\varepsilon}\log\frac{1}{\varepsilon}\right)$ machines, is constructed in the following way: Consider the possible processing times sorted non-increasingly $t_1,\dots,t_h$. For each $i$ such that $t_i < 2^u$, the schedule has a machine with a job of size $t_i$ assigned, and it is completed with jobs until having load $2^{u+1}$: if $t_i = 2^k + j\varepsilon2^k$, this can be done adding a job of size $2^k + \left(\frac{1}{\varepsilon}-j\right)\varepsilon2^k$, a job of size $2^k$ and for each $k' = k+2,\dots,u$, a job of size $2^{k'}$ (if $i=u-1$, the machine will not have any of these last jobs). By doing so, the load of the machine is \begin{equation*} 2^i + j\varepsilon2^i + 2^i + \left(\frac{1}{\varepsilon} - k\right)\varepsilon 2^i + 2^{i} + \displaystyle\sum_{i'=i+2}^{u}{2^{i'}} = 2^{i+2} + 2^{u+1}-2^{i+2} = 2^{u+1}. \varepsilonnd{equation*} Now, if a job of size $2^u$ arrives to the system, it can be inserted using Push in the machine with the largest job of size less than $2^u$ (i.e., with processing time $2^{u-1} + \left(\frac{1}{\varepsilon}-1\right)\varepsilon2^{u-1}$), taking out such job because it breaks jump-optimality. If Algorithm \ref{OLSonline} takes the decision in the same way iteratively, then at least one job of each possible size $t_i<2^u$ is migrated, being then the total migrated load at least \begin{equation*} \displaystyle\sum_{i=\varepsilonll}^{u-1}{\displaystyle\sum_{j=1}^{\frac{1}{\varepsilon}-1}{2^i + j\varepsilon2^i}} = \left(\dfrac{1}{\varepsilon}-1\right)(2^u-2^{\varepsilonll+1}) + \dfrac{1}{2}\left(\dfrac{1}{\varepsilon}-1\right)2^u \in \Omega\left(\dfrac{1}{\varepsilon}2^u\right), \varepsilonnd{equation*} and hence the migration factor needed for this instance is $\Omega\left( \frac{1}{\varepsilon} \right)$. \varepsilonnd{proof} By putting together Lemmas~\ref{lm:1.7-competitive}, \ref{OLS_migration} and \ref{lm:lbOLS_migration} we can conclude the following result. \begin{theorem}\label{hinfOnl} Given $\varepsilon>0$, Algorithm \ref{OLSonline} is a polynomial time $(1.7+\varepsilon)$-competitive algorithm and uses migration factor $O\left(1/\varepsilon\right)$. Moreover, there are instances for which this factor is $\Omega\left(1/\varepsilon\right)$. \varepsilonnd{theorem} \section{LPT online with migration $\tilde{O}(1/\varepsilon^3)$.}\label{sec:4/3online} In this section we present our main contribution which is an approximate online adaptation of $\textnormal{LPT}$ using $\text{poly}(1/\varepsilon)$ migration factor. In order to analyze it, we will first show some structural properties of the solutions constructed by $\textnormal{LPT}$ and how they behave when the instance is perturbed by a new job. Algorithm~\ref{OLSonline} presented in Section~\ref{sec:JOpt} already gives some of the features and properties that our online version of $\textnormal{LPT}$ fulfills. However, now in the analysis we will crucially exploit the symmetry of instances rounded according to the procedure described in Section~\ref{sec:Rounding}, in particular the fact that the load of each machine is a multiple of some fixed value. Since $\textnormal{LPT}$ takes decisions based solely on the machine loads, having a bounded number of values for them allows us to accurately control the set of machines where the assignment of big jobs can be kept unchanged after the arrival of a big job while maintaining the structure of the solution.\wgcom{Revisar este párrafo, aquí hay que enfatizar la importancia de la sección 4}\jvcom{Me parece ok!} Unless stated otherwise, for the rest of this section machine loads are considered with respect to the rounded processing times $\tilde{p}_j$. \paragraph*{\textbf{Load Monotonicity.}} Here we describe in more detail the useful structural properties of solutions constructed using $\textnormal{LPT}$. \begin{definition}\label{LoaPro} Given a schedule $\mathcal{S}$, its \textbf{load profile}, denoted by $\textnormal{load}(\mathcal{S})$, is an $\mathbb{R}_{\ge0}^m$-vector $(t_1,\ldots,t_m)$ containing the load of each machine sorted so that $t_1\le t_2 \le \ldots\le t_m$.\varepsilonnd{definition} The following lemma shows that after the arrival of a job, the load profile of solutions constructed using $\textnormal{LPT}$ can only increase. This property only holds if the vector of loads is sorted, as it can be seen in Figure~\ref{fig:LPT_no_red}. This monotonicity property is essential for our analysis. To show the mentioned property the following rather technical lemma will help. \begin{lemma}\label{loadprof_generalized} Let $x,y\in \mathbb{R}_+^n$, $x=(x_1,x_2,\dots,x_n)$, $y=(y_1,y_2,\dots,y_n)$ such that $x_1\le x_2 \le \dots\le x_n$, $y_1\le y_2\le \dots\le y_n$ and $x\le y$ coordinate-wise, and $\alpha, \beta\in \mathbb{R}$ such that $\alpha\le \beta$. If we consider the new vectors defined by replacing $x_i$ by $x_i + \alpha$ in $x$ and $y_i$ by $y_i + \beta$ in $y$ for some $i\in\{1,2,\dots,n\}$, and then we sort the coordinates non-decreasingly of the new vectors, obtaining $x'$ and $y'$, then $x' \le y'$ coordinate-wise. \varepsilonnd{lemma} \begin{proof} Let $\bar{i}$ be the coordinate such that $x'_{\bar{i}}=x_i+\alpha$ and $\bar{j}$ such that $y'_{\bar{j}}=y_i+\beta$. For each coordinate $k<\min\{\bar{i},\bar{j}\}$ or $k>\max\{\bar{i},\bar{j}\}$ we have that $x'_k=x_k$ and $y'_k=y_k$, thus satisfying the desired inequality by hypothesis. In the remaining we have two cases: \begin{itemize} \item Assume that $\bar{i}<\bar{j}$, and let $k\in\{\bar{i},\bar{i}+1,\ldots,\bar{j}-1\}$. Then we have that \[ x'_k \le x'_{k+1} = x_{k+1} \le y_{k+1} = y'_k \] where the first inequality holds due to the monotonicity of the vectors and the second one because of the hypothesis. Similarly, for $k=\bar{j}$ we have that \[ x'_k = x_k \le y_k = y'_{k-1} \le y'_k, \] where the first inequality follows from the hypothesis. \item Assume that $\bar{i}\ge \bar{j}$ and let $k\in\{\bar{j},\bar{j}+1,\ldots,\bar{i}\}$. Then, \begin{equation*} x'_k\le x'_{\bar{i}} \le y'_{\bar{j}} \le y'_k, \varepsilonnd{equation*} where, the first and third inequalities follows from the monotonicity of the vectors, and the second one from the fact that $x_i+\alpha \le y_i + \beta$. \qedhere \varepsilonnd{itemize} \varepsilonnd{proof} \begin{lemma}\label{load} Let $(\mathcal{J},\mathcal{M})$ be a machine covering instance and $j^*\notin\mathcal{J}$ a job. Then, it holds that \( \textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J},\mathcal{M})) \le \textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M})),\) where the inequality is considered coordinate-wise and $\mathcal{J}' = \mathcal{J}\cup\{j^*\}$. \varepsilonnd{lemma} \begin{proof} Let us first relabel the jobs in $\mathcal{J}$ so that $\tilde{p}_1\ge \tilde{p}_2\ge \ldots \ge \tilde{p}_n$. To simplify the argument we assume that both runs of $\textnormal{LPT}$ assign jobs in the order given by the labeling above $1,2,\ldots,n$, where in the run for $\mathcal{J}'$ the new job $j^*$ is inserted to the list in any position consistent with $\textnormal{LPT}$. This is without loss of generality since different tie breaking do not affect the load profiles of the solutions. Consider the set of instances $(\mathcal{J}|_k,\mathcal{M})$ for $k=r,\dots,n$, where $\mathcal{J}|_k \subseteq \mathcal{J}$ is the set of the $k$ largest jobs in $\mathcal{J}$, and $r$ is the maximal index such that $\tilde{p}_{r}\le \tilde{p}_{j^*}$. Similarly, let $\mathcal{J}'|_k = \mathcal{J}|_k\cup \{j^*\}$ for any $k\in \{r,\ldots,n\}$. We will show by induction that the lemma is true for each pair $(\mathcal{J}|_k,\mathcal{M})$ and $(\mathcal{J}'|_k,\mathcal{M})$. The base case $k=r$ follows easily from Lemma~\ref{loadprof_generalized} since $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}|_k,\mathcal{M})$ and $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k\setminus\{j^*\},\mathcal{M})$ assign to the same machines all jobs $\{1,\ldots,r\}$, and adding $j^*$ to the least loaded machine in $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k\setminus\{j^*\},\mathcal{M})$ (and a job of size $0$ to the least loaded machine in $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}|_k,\mathcal{M})$) is the same as adding $\tilde{p}_{j^*}$ to the first coordinate of $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k\setminus\{j^*\},\mathcal{M}))$, and then the inequality holds. Suppose now that $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}|_k,\mathcal{M})) \le \textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k,\mathcal{M}))$. Showing that the inequality is true for $k+1$ is equivalent to show that when assigning job $k+1$ to a least loaded machine in $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}|_k,\mathcal{M})$ and in $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k,\mathcal{M})$, the resulting load profiles satisfy the inequality, which is precisely the statement of Lemma~\ref{loadprof_generalized} adding $\tilde{p}_{k+1}$ to the first coordinate of $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}|_k,\mathcal{M}))$ and also to the first coordinate of $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}'|_k,\mathcal{M}))$. \varepsilonnd{proof} This lemma together with our rounding procedure allow us to show that the difference (in terms of the Hamming distance) of the load profiles of two consecutive solutions consisting purely of big jobs, is bounded by a small constant. This property will be important to obtain a $\text{poly}(1/\varepsilon)$ migration factor and here we crucially exploit the fact that the load of the machines is always multiple of a fixed value. \begin{lemma}\label{PocaSub} Consider two instances $(\mathcal{J},\mathcal{M})$ and $(\mathcal{J}',\mathcal{M})$ with $\mathcal{J}'=\mathcal{J}\cup\{j^*\}$, where $\mathcal{J}'$ contains only big or huge jobs w.r.t $\textnormal{\footnotesize{UB}}$. Then the vectors $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J},\mathcal{M}))$ and $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M}))$ differ in at most $\frac{\tilde{p}_{j^*}}{\varepsilon 2^{\varepsilonll}} \in O(1/\varepsilon^2)$ many coordinates.\varepsilonnd{lemma} \begin{proof} We have that $\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J},\mathcal{M}))=(t_1,\ldots,t_m)\le (t_1',\ldots,t'_m)=\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M}))$ thanks to Lemma~\ref{load}. Also, if $t_i< t_i'$ for some $i$, then $t_i'\ge t_i + \varepsilon 2^{\varepsilonll}$ since all values $t_{j},t_{j'}$ are integer multiples of $\varepsilon 2^{\varepsilonll}$ because of Lemma~\ref{lm:rounding}. Since $||\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M}))-\textnormal{load}(\mathcal{S}_{\textnormal{LPT}}(\mathcal{J},\mathcal{M}))||_1= \tilde{p}_{j^*}$, we obtain that the number of coordinates in which the load profiles differ is at most $\frac{\tilde{p}_{j^*}}{\varepsilon 2^{\varepsilonll}}$. Finally, recalling that $j^*$ is big, then $\tilde{p}_{j^*}\le 2^u \le \textnormal{\footnotesize{UB}} \le 2^{\varepsilonll}/\varepsilon$, and we can bound the number of different coordinates by $\frac{\tilde{p}_{j^*}}{\varepsilon 2^{\varepsilonll}} \le 1/\varepsilon^2$. \varepsilonnd{proof} \paragraph*{\textbf{Description of Online LPT.}} Consider two instances $(\mathcal{J},\mathcal{M})$ and $(\mathcal{J}',\mathcal{M})$ such that $\mathcal{J}'=\mathcal{J}\cup\{j^*\}$, and let $\textnormal{OPT}$ and $\textnormal{OPT}'$ be their optimal values respectively. In what follows, for a given list-scheduling algorithm, we will refer to a tie-breaking rule as a rule that decides a particular machine for assigning a job when faced with multiple least loaded machines. We say that an assignment is an LPT-solution if there is some tie-breaking rule such that LPT yields such assignment. We will compute an upper bound $\textnormal{\footnotesize{UB}}$ on $\textnormal{OPT}'$ by computing an LPT-solution and duplicating the value of its minimum load. For this upper bound, we compute its respective set $\tilde{P}$ with \varepsilonqref{eq:deftP1} and \varepsilonqref{eq:deftP2}. In the algorithm, we will label elements in $\tilde{P}=\{q_1,\ldots,q_{|\tilde{P}|}\}$ such that $q_1 > q_2 > \cdots> q_{|\tilde{P}|}$. Let $\mathcal{J}_h\subseteq \mathcal{J}$ (respectively $\mathcal{J}_h'\subseteq \mathcal{J}'$) be the set of jobs of size $q_h$ in $\mathcal{J}$ (respectively $\mathcal{J}'$), for $q_h\in \tilde{P}$. Similarly, we define $\mathcal{J}_0$ (resp. $\mathcal{J}'_0$) to be the set of jobs in $\mathcal{J}$ (resp. $\mathcal{J}'$) of sizes larger than $q_1$, that is, all huge jobs in $\mathcal{J}$ (resp. $\mathcal{J}'$). Also, let $\mathcal{S}_{h}$ (resp. $\mathcal{S}_h'$) be the solution $\mathcal{S}$ (resp. $\mathcal{S}'$) restricted to jobs of size $q_h$ or larger. Finally, $\mathcal{S}_0$ and $\mathcal{S}_0'$ are the respective solutions restricted to jobs in $\mathcal{J}_0$. In what follows, $x_+$ denotes the positive part of $x\in\mathbb{R}$, i.e., $x_+=\max\{x,0\}$. To understand the algorithm, it is useful to have the following observation in mind. \begin{observation}\label{obs:LScharac} Consider a solution $\mathcal{S}$ for jobs in $\mathcal{J}$ and let $\mathcal{K}$ be a set of jobs with $\mathcal{J}\cap \mathcal{K}=\varepsilonmptyset$ and all jobs in $\mathcal{K}$ have the same size $p$. Consider a solution $\mathcal{S}_{LS}$ constructed by adding the jobs from $\mathcal{K}$ in $\mathcal{S}$ using list-scheduling, and let $\lambda=\varepsilonll_{\min}(\mathcal{S}_{LS})$. Notice that $\lambda$ is independent of the tie-breaking rule used in list-scheduling. Consider any solution $\mathcal{S}'$ that is constructed starting from $\mathcal{S}$ and adding jobs in $\mathcal{K}$ in some arbitrary way. Then, $\mathcal{S}'$ corresponds to a solution obtained by adding jobs from $\mathcal{K}$ with a list-scheduling procedure (for some tie-breaking rule) if and only if the number of jobs in $\mathcal{K}$ added to each machine $i$ is: (i) $\left\lceil \tfrac{(\lambda-\varepsilonll_i(\mathcal{S}))_+}{p}\right\rceil$ if $\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}))_+}{p}$ is not an integer, and either $\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}))_+}{p}$ or $\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}))_+}{p}+1$ if $\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}))_+}{p}$ is a non-negative integer. \varepsilonnd{observation} Our main procedure is called every time that we get a new job $j^*$ (where $\mathcal{J}' = \mathcal{J} \cup\{j^*\})$ and receives as input the current solution $\mathcal{S}$ for $(\mathcal{J},\mathcal{M})$. If $\mathcal{J}=\varepsilonmptyset$, then $\mathcal{S}$ is trivially initialized as empty. The exact description is given in Algorithm~\ref{LPTonline}. Broadly speaking, the algorithm works in phases $h\in\{0,\ldots,|\tilde{P}|\}$, where for each $h$ it assigns jobs in $\mathcal{J}'_{h}$. First, we assign jobs exactly as in $\mathcal{S}_{h}$ for machines in which the assignment of $\mathcal{S}_{h-1}$ and $\mathcal{S}'_{h-1}$ coincide. The set of such machines is denoted by $\mathcal{M}e_{h-1}$ and the set of remaining machines is denoted by $\mathcal{M}ne_{h-1}$. As we will see, this is consistent with LPT by the previous observation and Lemma~\ref{load}. The remaining jobs in $\mathcal{J}'_h$ are assigned using list-scheduling. Crucially, we will break ties in favor of machines where the assignment of $\mathcal{S}_{h-1}$ and $\mathcal{S}'_{h-1}$ differ. This is necessary to avoid creating new machines with different assignments. After assigning huge and big jobs, small jobs are added exactly as in $\mathcal{S}$ in machines where the assignment of big jobs in $\mathcal{S}$ and $\mathcal{S}'$ coincides. The rest of small jobs are added greedily. In the last part, the algorithm rebalances small jobs by moving them from machines of load higher than $\varepsilonll_i(\mathcal{S}')+2^{\varepsilonll}$ to the least loaded machines. \begin{algorithm} \caption{Online LPT} \label{LPTonline} \textbf{Input:}{ \parbox[t]{12.75cm}{ Instances $(\mathcal{J},\mathcal{M})$ and $(\mathcal{J}',\mathcal{M})$ such that $\mathcal{J}'=\mathcal{J}\cup\{j^*\}$; a schedule $\mathcal{S}(\mathcal{J},\mathcal{M})$.}} \begin{algorithmic}[1] \State{run LPT on input $\mathcal{J}'$ and let $\tau$ be the minimum load of the constructed solution. Set $\textnormal{\footnotesize{UB}}\gets 2\tau$. Define $\tilde{P}, \varepsilonll$, and $u$ based on this upper bound $\textnormal{\footnotesize{UB}}$ using \varepsilonqref{eq:deftP1} and \varepsilonqref{eq:deftP2}.} \State{set $\mathcal{M}e_{-1}\gets\mathcal{M}$ and $\mathcal{M}ne_{-1}\gets\varepsilonmptyset$.} \For{$h=0,1,\ldots,|\tilde{P}|$} \Comment{Assignment of big and huge jobs} \State{for each machine $i\in \mathcal{M}e_{h-1}$, assign all jobs in $\mathcal{J}_h\cap \mathcal{S}^{-1}(i)$ to $i$ in $\mathcal{S}'$.}\label{st:bigMe} \State{for jobs in $\mathcal{J}'_h$ still not assigned in $\mathcal{S}'$, apply list-scheduling (with an arbitrary order of jobs). If there is more than one least loaded machine break ties in favor of machines in $\mathcal{M}ne_{h-1}$.} \label{st:bigMne} \State{define $\mathcal{M}e_h$ as the set of machines $i$ such that $\mathcal{S}_h^{-1}(i)=\mathcal{S}_h'^{-1}(i)$ and $\mathcal{M}ne_h\gets\mathcal{M} \setminus \mathcal{M}e_h$.} \EndFor \For{machines $i\in \mathcal{M}e_{|\tilde{P}|}$}\Comment{Assignment of small jobs} \State{assign all small jobs w.r.t to $\textnormal{\footnotesize{UB}}$ in $\mathcal{J}\cap \mathcal{S}^{-1}(i)$ to $i$ in $\mathcal{S}'$.} \EndFor \State{\label{st:listSmall}assign the remaining jobs using list-scheduling}. \State{set $\overline{\mathcal{M}}$ to be the set of machines containing a small job w.r.t $\textnormal{\footnotesize{UB}}$.} \While{there exists $i\in \overline{\mathcal{M}}$ s.t. $\varepsilonll_i(\mathcal{S}')> \varepsilonll_{\min}(\mathcal{S}')+2^{\varepsilonll}$}\label{st:reassignSmall} \State{consider a machine $i\in \overline{\mathcal{M}}$ of maximum load. Reassign the smallest job in $\mathcal{S}'^{-1}(i)$ to any least loaded machine.} \State{update $\overline{\mathcal{M}}$ to be the set of machines containing a small job w.r.t $\textnormal{\footnotesize{UB}}$.} \EndWhile \State \Return $\mathcal{S}'$. \varepsilonnd{algorithmic} \varepsilonnd{algorithm} We can bound the competitive ratio of the algorithm in a very similar way to Lemma~\ref{lm:1.7-competitive}. First we prove the following auxiliary lemma. \begin{lemma}\label{lm:LPT-solution} If $\mathcal{S}'$ is the output of the algorithm then $\mathcal{S}'_{|\tilde{P}|}$ is an LPT-solution. \varepsilonnd{lemma} \begin{proof} We show the proof inductively. Consider a run of the algorithm with input assignment~$\mathcal{S}$. If $\mathcal{S}$ is empty then it is clearly an LPT-solution. Otherwise, $\mathcal{S}$ is the output of a run of the algorithm. We can assume inductively that $\mathcal{S}_{|\tilde{P}_0|}$ is an LPT-solution (and thus also any restriction of $\mathcal{S}_{|\tilde{P}_0|}$ to jobs of sizes at least $p$, for any $p\ge0$). Notice that $\textnormal{\footnotesize{UB}}_0\le \textnormal{\footnotesize{UB}}$, by Lemma~\ref{load}, and thus $\min \tilde{P}_0\le \min \tilde{P}$ and $\max \tilde{P}_0\le \max \tilde{P}$. We use a second induction to show that, for every $h\in\{0,\ldots,|\tilde{P}|\}$, $\mathcal{S}_h'$ is an LPT-solution. To show the base case ($h=0$), consider jobs in $J_0'$, which are all larger than $\textnormal{\footnotesize{UB}}\ge \textnormal{OPT}'$. Hence there are at most $m$ of them, and the algorithm assigns them each to a different machine (this, again, follows inductively). Thus, the base case holds. Consider $h\ge 1$ and let us assume that $\mathcal{S}_{h-1}'$ is an LPT-solution. Let $\mathcal{S}_{\text{LPT},h}$ be an LPT-solution for jobs in $\mathcal{J}_0\cup \ldots \cup \mathcal{J}_{h}$, and similarly $\mathcal{S}'_{\text{LPT},h}$ for jobs in $\mathcal{J}'_0\cup \ldots \cup \mathcal{J}'_{h}$. First observe that the load profile vector $\textnormal{load}(\mathcal{S}'_{\text{LPT},h})$ is independent of the tie-breaking rule. Consider the target value $\lambda = \varepsilonll_{\min}(\mathcal{S}_{\text{LPT},h})$ and $\lambda'=\varepsilonll_{\min}(\mathcal{S}'_{\text{LPT},h})$. Notice that, by Lemma~\ref{load}, $\lambda\le \lambda'$. Since $\mathcal{S}_{h-1}$ is an LPT-solution, then $\mathcal{S}'_h$ is an LPT-solution if jobs in $\mathcal{J}'_h$ are added using list-scheduling. By Observation~\ref{obs:LScharac} the following characterizes this fact: for all machines $i\in M$, the number of jobs assigned in $\mathcal{S}'_h$ to $i$ is: (i) $\lceil (\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h\rceil$ if $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h$ is not an integer, and either $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h$ or $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h+1$ if $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h$ is an integer. Since $\lambda \le \lambda'$, and $\mathcal{S}_h$ is an LPT-solution, then the number of jobs assigned in Step~\ref{st:bigMe} is never more than $\lceil (\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h\rceil$ if $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h$ is not an integer, and never more than $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h+1$ if $(\lambda'-\varepsilonll_i(\mathcal{S}'_{h-1}))_+/q_h$ is integer. This implies that after adding jobs in Step~\ref{st:bigMne} we obtain an LPT-solution. \varepsilonnd{proof} Now we can argue about the approximation guarantee of the obtained solution. \begin{lemma}\label{lm:4/3-competitive} When considering instances of Machine Covering such that $|\mathcal{M}|=m$, Algorithm~\ref{LPTonline} is $(\frac{4m-2}{3m-1}+O(\varepsilon))$-competitive. \varepsilonnd{lemma} \begin{proof} We will use the previous lemma to show that $\mathcal{S}'$ is a $(k,k)$-relaxed version of $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M})$ for some $k\le 4$, which is enough to conclude the claim due to Lemma~\ref{RobGree} and the result from Csirik et al.~\cite[Theorem~$3.5$]{CKW92}. Indeed, let $k = 2^{\varepsilonll}/(\varepsilon\textnormal{OPT}')$. Then, by the previous lemma all jobs larger than $k\varepsilon\textnormal{OPT}'=2^{\varepsilonll}$ are assigned with LPT. Also, the while loop at Step~\ref{st:reassignSmall} ensures that the output of the algorithm is a $(k,k)$-relaxed version of $\mathcal{S}_{\textnormal{LPT}}(\mathcal{J}',\mathcal{M})$. The lemma follows since $k\le 4$ as shown in Lemma~\ref{lm:1.7-competitive}. \varepsilonnd{proof} \paragraph*{\textbf{Bounding the migration factor.}} To analyze the migration factor of the algorithm, we will show that $|\mathcal{M}ne_{|\tilde{P}|}|$ is upper bounded by a constant. This will be done inductively by first bounding $|\mathcal{M}ne_{h}\setminus \mathcal{M}ne_{h-1}|$ for each $h$ and then using the fact that $|\tilde{P}|\in O((1/\varepsilon)\log(1/\varepsilon))$. A description of the overall idea can be found in Figure~\ref{fig:migration}. \begin{figure} \centering \begin{tikzpicture}[yscale=0.3, xscale=0.7] \draw[fill=gray, color=gray, pattern=north east lines, pattern color=gray] (0,2.5) rectangle (0.6,3); \draw[fill=gray, color=gray, pattern=north east lines, pattern color=gray] (1.2,3) rectangle (1.8,4); \draw[fill=gray, color=gray, pattern=north east lines, pattern color=gray] (2.4,4.5) rectangle (3,5); \draw[thick] (0,0) rectangle (0.6,3); \draw[thick] (0.6,0) rectangle (1.2,3); \draw[thick] (1.2,0) rectangle (1.8,4); \draw[thick] (1.8,0) rectangle (2.4,4.5); \draw[thick] (2.4,0) rectangle (3,5); \draw[thick] (3,0) rectangle (3.6,5); \draw[thick] (3.6,0) rectangle (4.2,2.5); \draw[thick] (4.2,0) rectangle (4.8,3); \draw[thick] (4.8,0) rectangle (5.4,3); \draw[thick] (5.4,0) rectangle (6,4); \draw[thick] (8.4,0) rectangle (9,6); \draw (0,6.5) -- (0,0) -- (9,0) -- (9,6.5); \draw (0.6,0) -- (0.6,6.5); \draw (1.2,0) -- (1.2,6.5); \draw (1.8,0) -- (1.8,6.5); \draw (2.4,0) -- (2.4,6.5); \draw (3,0) -- (3,6.5); \draw (3.6,0) -- (3.6,6.5); \draw (4.2,0) -- (4.2,6.5); \draw (4.8,0) -- (4.8,6.5); \draw (5.4,0) -- (5.4,6.5); \draw (6,0) -- (6,6.5); \draw (8.4,0) -- (8.4,6.5); \draw[ultra thick] (3.6,-0.5) -- (3.6,7); \draw [decorate,decoration={brace,amplitude=4pt,raise=0.5pt}] (3.6,0) -- (0,0); \draw (1.8,-0.2) node[anchor=north] {\small $\mathcal{M}ne_{h-1}$}; \draw [decorate,decoration={brace,amplitude=4pt,raise=0.5pt}] (9,0) -- (3.6,0); \draw (6.3,-0.3) node[anchor=north] {\small $\mathcal{M}e_{h-1}$}; \draw (7.2,3) node {\small $\dots$}; \varepsilonnd{tikzpicture} \caption{\footnotesize{Depiction of a possible situation at the end of iteration $h-1$. The machines on the right side correspond to machines in $\mathcal{M}e_{h-1}$ and therefore process the same jobs in $\mathcal{S}_{h-1}$ and $\mathcal{S}'_{h-1}$. Assume, possibly erroneously and just as a thought experiment, that the machines in $\mathcal{M}ne_{h-1}$ can be sorted non-decreasingly by load for $\mathcal{S}_{h-1}$ and $\mathcal{S}'_{h-1}$ simultaneously. The two solutions are depicted simultaneously in the picture, where the difference of loads on machines in $\mathcal{M}ne_{h-1}$ corresponds to the dashed area. The total dashed load equals to $\tilde{p}_{j^*}$, which is spread in only constantly many machines by Lemma~\ref{PocaSub}. When assigning jobs in $\mathcal{J}_h$, the algorithm first assigns a number of jobs to each machine in $\mathcal{M}e_{h-1}$ (Step~\ref{st:bigMe}), and then fills machines in $\mathcal{M}ne_{h-1}$. Notice that while the algorithm does not assign another job to a machine in $\mathcal{M}e_{h-1}$, no new machine will enter $\mathcal{M}ne_{h}\setminus \mathcal{M}ne_{h-1}$. On the other hand, the number of such jobs can be bounded by a number proportional to $\tilde{p}_{j^*}$ (and $1/\varepsilon$), which then also bounds the number of machines in $\mathcal{M}ne_{h}\setminus \mathcal{M}ne_{h-1}$. In reality, however, it is not true that the machines in $\mathcal{M}ne_{h-1}$ can be sorted non-decreasingly on the loads for $\mathcal{S}_{h-1}$ and $\mathcal{S}'_{h-1}$ simultaneously. This provokes a number of technical difficulties that we avoid by using a different permutation of machines for each solution and invoking Lemma~\ref{load}.}}\label{fig:migration} \varepsilonnd{figure} Let us consider huge jobs w.r.t $\textnormal{\footnotesize{UB}}$ (i.e. jobs in $\mathcal{J}'_0$). Notice that all these jobs are larger than $\textnormal{OPT}'\ge \textnormal{OPT}$, and hence in $\mathcal{S}'_{0}$ each one is assigned alone to one machine. The same situation happens in solution $\mathcal{S}$ restricted to jobs in $\mathcal{J}_0$. Thus, none of these jobs are migrated. Hence, we can assume w.l.o.g. for the sake of the analysis of the migration that all jobs are big or small w.r.t $\textnormal{\footnotesize{UB}}$ (including $j^*$). Additionally, we can assume that $j^*$ is not small, since otherwise there is no migration. Let $\mathcal{J}e_h$ be the set of jobs assigned by Step~\ref{st:bigMne} to machines in $\mathcal{M}e_{h-1}$. Notice that the jobs in $\mathcal{J}e_h$ correspond to the jobs in $\mathcal{J}'_{h}$ that $\mathcal{S}'$ assigns to a machine in $\mathcal{M}e_{h-1}$ but $\mathcal{S}$ processes in $\mathcal{M}ne_{h-1}$. Our strategy will be to bound the cardinality of set $\mathcal{J}e_h$ and then use this to upper bound $|\mathcal{M}ne_h \setminus \mathcal{M}ne_{h-1}|$. First we prove two auxiliary lemmas that help to upper bound $|\mathcal{J}e_h|$. \begin{lemma} \label{lm:LowerBoundAssignmentSingleMachine} Assume that $\mathcal{J}e_h\neq \varepsilonmptyset$. For each machine $i\in \mathcal{M}ne_{h-1}$, if $\lambda-\varepsilonll_i(\mathcal{S}_{h-1}')\ge 0$ solution $\mathcal{S}'_{h}$ assigns to $i$ at least $\left\lfloor\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor +1 $ many jobs from $\mathcal{J}_h$. \varepsilonnd{lemma} \begin{proof} We consider two cases. If $\lambda'=\lambda$, then the number of jobs in $\mathcal{J}_h$ assigned to machine $i$ is at least $\left\lfloor\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor +1$. Indeed, if $\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}$ is fractional then the number of jobs must be $\left\lceil\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rceil = \left\lfloor\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor +1$. If, on the other hand, $\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}$ is integral, then the algorithm might assign to $i\in \mathcal{M}ne_{h-1}$ only $\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}=\left\lfloor\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor$ many jobs. However, if this is the case the tie-breaking rule in Step~\ref{st:bigMne} implies that $\mathcal{J}e_h= \varepsilonmptyset$, which contradicts our hypothesis. Then the number of assigned jobs is exactly $\left\lfloor\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor +1$, and thus the claim holds. If $\lambda'>\lambda$, then $\lambda'>\varepsilonll_i(\mathcal{S}_{h-1}') $ and the number of jobs in $\mathcal{J}_h$ assigned to machine $i$ is at least $\left\lceil\tfrac{(\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rceil = \left\lceil\tfrac{\lambda'-\varepsilonll_i(\mathcal{S}_{h-1}')}{q_h}\right\rceil\ge \left\lfloor\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}_{h-1}'))}{q_h}\right\rfloor +1=\left\lfloor\tfrac{(\lambda-\varepsilonll_i(\mathcal{S}_{h-1}'))_+}{q_h}\right\rfloor +1$ and hence the claim holds. \varepsilonnd{proof} \begin{lemma}\label{lm:LoadRemoveEntry} Let $x,y\in \mathbb{R}_+^n$, $x=(x_1,x_2,\dots,x_n)$, $y=(y_1,y_2,\dots,y_n)$ such that $x_1\le x_2 \le \dots\le x_n$, $y_1\le y_2\le \dots\le y_n$ and $x\le y$ coordinate-wise. Assume that $x_j=y_i$ for some indices $i,j$. If $x_{-i}$ denotes the $(n-1)$-dimensional vector obtained by removing the $i$-th entry of $x$, and $y_{-j}$ is the vector obtained by removing the $j$-th entry of $y$, then $x_{-i}\le y_{-j}$.\varepsilonnd{lemma} \begin{proof} Notice first that if $i=j$ then the result is a direct consequence of Lemma~\ref{loadprof_generalized}: by taking $\alpha=\beta=-x_i$ and coordinate $i$, we get new vectors $\tilde{x}$ and $\tilde{y}$ satisfying $\tilde{x} \le \tilde{y}$ and $\tilde{x}_1 = \tilde{y}_1 = 0$, and hence we can conclude that $x_{-i} \le y_{-j}$ because $x_{-i}$ (resp $y_{-j}$) corresponds to the last $n-1$ coordinates of $\tilde{x}$ (resp. $\tilde{y}$). We now distinguish two cases: if $i<j$, we have that $y_j = x_i \le x_j \le y_j$, hence $x_k = y_j$ for every $k=i,i+1,\dots,j$. This implies that $x_{-i} = x_{-(i+1)} = \dots = x_{-j}$, and then we can conclude that $x_{-i} \le y_{-j}$ by applying the previous observation for $x_{-j}$ and $y_{-j}$. On the other hand, if $j<i$, we define vector $z$ equal to $y$ but replacing coordinates $j, j+1, \dots, i$ by $y_j$. It is not difficult to see that $x \le z \le y$ coordinate-wise, and also $z_{-j} = z_{-(j+1)} = \dots = z_{-i}$. If we apply the first observation for $x$ and $z$ using coordinate $i$ we have that $x_{-i}\le z_{-i}$, and applying it to $z$ and $y$ using coordinate $j$ we get that $z_{-j} \le y_{-j}$. Merging both inequalities and using the fact that $z_{-i} = z_{-j}$, we conclude that $x_{-i} \le y_{-j}$. \varepsilonnd{proof} \begin{lemma}\label{lm:Jeq} It holds that $|\mathcal{J}e_h|\in O( \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$. \varepsilonnd{lemma} \begin{proof} Assume, w.l.o.g., that $\mathcal{M}ne_{h-1}=\{1,\ldots,m'\}$ and that $\varepsilonll_1(\mathcal{S}'_{h-1})\le \varepsilonll_2(\mathcal{S}'_{h-1}) \le \ldots \le \varepsilonll_{m'}(\mathcal{S}'_{h-1})$. Consider also a permutation $\sigma:\mathcal{M}ne_{h-1}\rightarrow \mathcal{M}ne_{h-1}$ such that $\varepsilonll_{\sigma(1)}(\mathcal{S}_{h-1})\le \varepsilonll_{\sigma(2)}(\mathcal{S}_{h-1}) \le \ldots \le \varepsilonll_{\sigma(m')}(\mathcal{S}_{h-1})$. By Lemma~\ref{load} the sorted vector of loads (over all machines) of solution $\mathcal{S}_{h-1}$ is upper bounded by the sorted vector of loads of $\mathcal{S}'_{h-1}$. Applying Lemma~\ref{lm:LoadRemoveEntry} iteratively to remove machines in $\mathcal{M}e_{h-1}$ one by one (which have the same assignment in both solutions), it holds that $\varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}) \le \varepsilonll_{i}(\mathcal{S}'_{h-1})$ for all $i\in \mathcal{M}ne_{h-1}$. Let us consider sets \begin{eqnarray*} T_{-}&=&\{i\in \mathcal{M}ne_{h-1}\,:\, \varepsilonll_i(\mathcal{S}'_{h-1})\le\lambda\}, \text{ and }\\ T_{+}&=&\{i\in \mathcal{M}ne_{h-1}\,:\, \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1})\le \lambda \text{ and } \varepsilonll_i(\mathcal{S}'_{h-1})>\lambda\}. \varepsilonnd{eqnarray*} Lemma~\ref{lm:LowerBoundAssignmentSingleMachine} implies that the total number of jobs from $\mathcal{J}_h'$ assigned by $\mathcal{S}'_{h}$ to machines in $\mathcal{M}ne_{h-1}$ is at least \begin{eqnarray*} & & \sum_{i\in T_-} \left(\left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor+1\right) \\ & = &\sum_{i\in T_-} \left(\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i) }(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor+1\right) +\sum_{i\in T_-} \left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor -\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor\\ & = &\sum_{i\in T_-\cup T_+} \left(\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i) }(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor+1\right) - \sum_{i\in T_+} \left(\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i) }(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor+1\right)\\ & &+ \sum_{i\in T_-} \left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor -\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor. \varepsilonnd{eqnarray*} Notice that the set $T_-\cup T_+$ contains all indices $i\in\mathcal{M}ne_{h-1}$ such that $\varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1})\le \lambda$. Hence, the first sum in the last expression upper bounds the number of jobs in $\mathcal{J}_{h}$ that solution $\mathcal{S}$ assigns to machines in $\mathcal{M}ne_{h-1}$. That way, since $|\mathcal{J}'_h\setminus \mathcal{J}_h | \le 1$, it holds that \begin{eqnarray*} |\mathcal{J}e_h| &\le& 1 + \sum_{i\in T_+} \left(\left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i) }(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor+1\right) + \sum_{i\in T_-} \left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor-\left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor \\ &\le& 1 + |T_+|+\sum_{i\in T_+} \left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i) }(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor + \sum_{i\in T_-} \left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor-\left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor \\ &\le& 1 + |T_+|+\underbrace{\sum_{i\in T_+} \left\lfloor \tfrac{(\lambda - \varepsilonll_{i }(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor}_{=0} + \sum_{i\in T_-\cup T_+} \left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor-\left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor. \varepsilonnd{eqnarray*} Let us now consider $T_{\neq}=\{i\in \mathcal{M}ne_{h-1}: \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1})\neq \varepsilonll_{i}(\mathcal{S}'_{h-1})\}$. Thus, the last expression is at most \begin{eqnarray*} |\mathcal{J}e_h| & \le& 1 + |T_+| + \sum_{i\in (T_-\cup T_+)\cap T_{\neq}} \left\lfloor \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} \right\rfloor-\left\lfloor \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} \right\rfloor \\ &\le& 1 + |T_+| + \sum_{i\in T_{\neq}} \left( \tfrac{(\lambda - \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1}))_+}{q_h} - \tfrac{(\lambda - \varepsilonll_i(\mathcal{S}'_{h-1}))_+}{q_h} +1 \right)\\ &\le& 1 + |T_+| + |T_{\neq}| + \sum_{i\in T_{\neq}} \tfrac{\varepsilonll_i(\mathcal{S}'_{h-1})- \varepsilonll_{\sigma(i)}(\mathcal{S}_{h-1})}{q_h}\\ &\le& 1 + 2|T_{\neq}| + \tfrac{\tilde{p}_{j^*}}{q_h}. \varepsilonnd{eqnarray*} Also, Lemma~\ref{PocaSub} can be applied and thus $|T_{\neq}|\le \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}}$. The lemma finally follows since $q_h\ge 2^{\varepsilonll}$ by definition. \varepsilonnd{proof} Notice that jobs in $\mathcal{J}_h^{=}$ are the only jobs assigned in a given iteration $h$ that can cause one new machine to have different assignments in $\mathcal{S}_{h}$ and $\mathcal{S}'_h$. Thus, $|\mathcal{M}ne_{h}\setminus \mathcal{M}ne_{h-1}|\le |\mathcal{J}_h^{=}|$ and the following lemma holds. \begin{lemma}\label{lm:Mne_variation} For all $h\in\{1,\ldots,|\tilde{P}|\}$ it holds that $|\mathcal{M}ne_{h}\setminus \mathcal{M}ne_{h-1}|\in O( \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$. \varepsilonnd{lemma} Putting all the discussed ideas together, we prove the following result. \begin{theorem}\label{thm:4/3+mig} When considering instances of Machine Covering such that $|\mathcal{M}|=m$, Algorithm Online LPT is a polynomial time $(\frac{4m-2}{3m-1}+O(\varepsilon))$-competitive algorithm with $O((1/\varepsilon^3)\log(1/\varepsilon))$ migration factor. \varepsilonnd{theorem} \begin{proof} We first argue that the algorithm runs in polynomial time. Indeed, it suffices to show that the algorithm enters the while loop in Step~\ref{st:reassignSmall} a polynomial number of times. This follows easily as the quantity $\varepsilonll_{\min}(\mathcal{S}')$ is non-decreasing, and hence a job can be reassigned to a least loaded machine at most once. Notice that the competitive ratio of the algorithm follows from Lemma~\ref{lm:4/3-competitive}. Let us now bound the migration factor. We do this in two steps. First consider solution~$\mathcal{S}'$ before entering Step~\ref{st:reassignSmall}. We first bound the volume of jobs migrated between $\mathcal{S}$ and $\mathcal{S}'$, and then bound the total volume of jobs reassigned in the while loop in Step~\ref{st:reassignSmall}. For the first bound, by the previous lemma and since $\mathcal{M}ne_{-1}=\varepsilonmptyset$, it holds that $|\mathcal{M}ne_{|\tilde{P}|}|\le |\tilde{P}|\cdot O( \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})\le O((1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$. The load of jobs in $\mathcal{S}'_{|\tilde{P}|}$ that are migrated is upper bounded by $\sum_{i\in\mathcal{M}ne_{|\tilde{P}|}} \varepsilonll_i(\mathcal{S}_{|\tilde{P}|}')\le|\mathcal{M}ne_{|\tilde{P}|}|\max_{i\in \mathcal{M}ne_{|\tilde{P}|}}\varepsilonll_i(\mathcal{S}_{|\tilde{P}|}')$. On the other hand, since we are assuming (w.l.o.g) that there is no huge job, the total load of each machine is at most $2\textnormal{\footnotesize{UB}}$ as argued in Section~\ref{sec:Rounding}. We conclude that the big jobs migrated have a total load of at most $2\textnormal{\footnotesize{UB}}\cdot|\mathcal{M}ne_{|\tilde{P}|}| =\textnormal{\footnotesize{UB}}\cdot O((1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}}).$ Finally, notice that small jobs migrated (before entering Step~\ref{st:reassignSmall}) are the ones assigned to machines in $\mathcal{M}ne_{|\tilde{P}|}$ by $\mathcal{S}$. Since $\mathcal{S}$ is the output of Online LPT, then the total load of these jobs is at most $(\varepsilonll_{\min}(\mathcal{S}')+2^{\varepsilonll})\cdot \mathcal{M}ne_{|\tilde{P}|}\le 2\textnormal{\footnotesize{UB}}\cdot \mathcal{M}ne_{|\tilde{P}|}\le\textnormal{\footnotesize{UB}}\cdot O((1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$. We conclude that the total load migrated is at most $\textnormal{\footnotesize{UB}}\cdot O((1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$. It remains to bound the volume migrated in the while loop of Step~\ref{st:reassignSmall}. For this we will show the following claim. \noindent\textbf{Claim:} Let $\mathcal{S}'$ be the solution constructed before entering Step~\ref{st:reassignSmall}. Then all reassigned jobs in the while loop, except possibly the one reassigned last, are assigned to a machine in $\mathcal{M}ne_{|\tilde{P}|}$ by $\mathcal{S}'$. Assume the claim holds and let us consider the solution $\mathcal{S}'$ as output by the algorithm. Then the total volume of reassigned jobs is bounded by $|\mathcal{M}ne_{|\tilde{P}|}|\max_{i\in \mathcal{M}ne_{|\tilde{P}|}}\varepsilonll_i(\mathcal{S}')$. Since by construction the load of a machine that process a job smaller than $2^{\varepsilonll}$ is at most $\varepsilonll_{\min}(\mathcal{S}')+2^{\varepsilonll}\le 2\textnormal{\footnotesize{UB}}$, the total volume migrated will be at most $\textnormal{\footnotesize{UB}}\cdot O((1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}})$ as before. Hence, the migration factor is upper bounded by \[ O\left(\frac{\textnormal{\footnotesize{UB}}}{\tilde{p}_{j^*}}\cdot(1/\varepsilon)\log(1/\varepsilon) \frac{\tilde{p}_{j^*}}{\varepsilon2^{\varepsilonll}}\right) = O((1/\varepsilon^3)\log(1/\varepsilon)). \] To show the claim, consider $\mathcal{S}'$ before entering Step~\ref{st:listSmall} together with the corresponding set $\overline{\mathcal{M}}$ of machines that process some small job. Since $\mathcal{S}$ is the output of Online LPT, then the difference between the maximum an minimum loads of machines in $\overline{\mathcal{M}}\cap\mathcal{M}e_{|\tilde{P}|}$ for solution $\mathcal{S}'$ is at most $2^{\varepsilonll}$. We call this property (P1). Also, notice that $\overline{\mathcal{M}}\cap \mathcal{M}ne_{|\tilde{P}|} =\varepsilonmptyset$, and hence, the maximum load difference of two machines in this set is at most $2^{\varepsilonll}$, vacuously. We refer to this property as (P2). Notice that (P1) and (P2) hold iteratively throughout the later steps of the algorithm. Additionally, if some job is assigned to a machine $\mathcal{M}e_{|\tilde{P}|}$ in Step~\ref{st:listSmall}, the algorithm does not enter the while loop and we are done. Otherwise, the minimum load is achieved at $\mathcal{M}ne_{|\tilde{P}|}$. Hence, if there is a job migrated from a machine in $\mathcal{M}e_{|\tilde{P}|}$ to $\mathcal{M}ne_{|\tilde{P}|}$ then the algorithm finishes. The claim follows. \varepsilonnd{proof} \subsection{A note on geometric v/s arithmetic rounding}\label{sec:geom_round} One of the main reasons to use our rounding procedure to multiples of $\varepsilon2^\varepsilonll$ instead of the geometric rounding (i.e., down to the nearest power of $(1+\varepsilon)$) is because the same arguments used in this work cannot be applied to geometric rounded instances. It is crucial in the analysis that the number of possible loads is $\text{poly}\left(1/\varepsilon\right)$, while for geometric rounded instances that is not true as the following lemmas show. \begin{lemma}\label{DifLoa} Let $\varepsilon \in \mathbb{Q}_+, \varepsilon < 1$. Given a machine covering instance $(\mathcal{J},\mathcal{M})$, let $\tilde{\mathcal{J}}$ be the set of jobs obtained by rounding geometrically jobs with processing time $p_j \in [\varepsilon\textnormal{OPT},\textnormal{OPT}]$. If $C_1, C_2 \subseteq \tilde{J}$ are two different multi-sets of jobs with processing times at least $\varepsilon \textnormal{OPT}$ such that $\sum_{j\in C_i}{p_j} \in [\varepsilon\textnormal{OPT},\textnormal{OPT}]$, $i=1,2$, then $\sum_{j\in C_1}{p_j} \neq \sum_{j\in C_2}{p_j}$. \varepsilonnd{lemma} \begin{proof} Assume w.l.o.g. $\textnormal{OPT}=1$. Hence the possible processing times are $(1+\varepsilon)^i$, with $i$ such that $\varepsilon \le (1+\varepsilon)^i \le 1$ (a finite family of such possible values). Suppose by contradiction that there are two different non-empty multi-sets $C_1, C_2$ with the same total load, and assume they are minimal, i.e. that there is no other pair of non-empty multi-sets with the same total load but with smaller total load. For $k=1,2$, let $C_k(j)$ be the number of jobs with processing time $(1+\varepsilon)^j$ in set $C_k$. Since the pair $C_1$, $C_2$ is minimal, we have that $C_1(j) = 0$ or $C_2(j) = 0$ for every $j$. $C_1$ and $C_2$ having the same total load means that \begin{equation*} \displaystyle\sum_{j=-k}^{0}{C_1(j) (1+\varepsilon)^j} = \displaystyle\sum_{j=-k}^{0}{C_2(j) (1+\varepsilon)^j}, \varepsilonnd{equation*} where $k = -\lfloor \log_{1+\varepsilon}(\varepsilon)\rfloor$. This last equality can be rephrased as the existence of a non-zero polynomial $p(x) = b_0 + b_1 x + \dots + b_k x^k$, with $\lvert b_j \rvert \in \{C_1(j),C_2(j)\}$ (i.e. with integer coefficients), that has $(1+\varepsilon)$ as one of its roots. Since $\varepsilon= \frac{c}{d} > 0$ for some co-primes $c$ and $d$, then $1+\varepsilon= \frac{c+d}{d}$. Dividing $p(x)$ by $(dx-(c+d))$ leads to a polynomial $q(x)=a_0 + a_1 x + \dots + a_{k-1}x^{k-1}$ which, thanks to Gauss lemma, has integer coefficients too. Let $b_i$ be the first coefficient of $p$ different from zero. Then \begin{equation*} \lvert b_i \rvert = \lvert (c+d) a_i + d a_{i-1}\rvert= \lvert\left(c + \frac{c}{\varepsilon}\right)a_i + \frac{c}{\varepsilon}a_{i-1} \rvert > \frac{1}{\varepsilon}, \varepsilonnd{equation*} implying, since the size of each job is at least $\varepsilon$, that the total load of the multi-sets is at least $b_i \varepsilon>1$, which is a contradiction. \varepsilonnd{proof} \begin{lemma}\label{ExpConf} Given $0<\varepsilon<1$, the number of different multi-sets of jobs with processing time at least $\varepsilon\textnormal{OPT}$ with total load at most $\textnormal{OPT}$ for a geometrically rounded instance is $2^{\Omega\left(\frac{1}{\varepsilon}\right)}$. \varepsilonnd{lemma} \begin{proof} Let $u=\lfloor \log_2 \textnormal{OPT} \rfloor$ and $\varepsilonll = \lceil \log_2 (\varepsilon\textnormal{OPT})\rceil$. We will give a lower bound on the number of different sets with total load $2^u$ when the jobs are rounded to powers of $2$, which implies that for $0<\varepsilon<1$ the same bound holds for processing times rounded to powers of $(1+\varepsilon)$. Let $\mathcal{C}_i$ be the number of different multi-sets with total load $2^{\varepsilonll + i}$. This number is characterized by the recurrence \begin{align*} C_0 & = 1 \\ C_{i+1} & = 1 + \frac{C_i(C_i+1)}{2}. \varepsilonnd{align*} This last term comes from the fact that a multi-set with total load $2^{\varepsilonll+i+1}$ can be constructed using only one job of size $2^{\varepsilonll+i+1}$, or merging two multi-sets of size $2^{\varepsilonll+i}$ (there are $\binom{C_i}{2} + C_i = \frac{C_i(C_i+1)}{2}$ such pairs). Since recurrence $a_0=1$, $a_i = \frac{a_i^2}{2}$ satisfies $a_i \ge 2^{2^{i}}$, we conclude that $C_{u-\varepsilonll} \ge 2^{2^{\log\frac{1}{\varepsilon}}} \ge 2^{\Omega\left(\frac{1}{\varepsilon}\right)}$. \varepsilonnd{proof} Because of these two lemmas, if we use geometrically rounded instances we cannot make sure that, when a new jobs arrives to the system, the load profile changes only by $\text{poly}\left(1/\varepsilon\right)$ coordinates since there are $2^{\Omega\left(1/\varepsilon\right)}$ number of possible different loads. \subsection{An improved lower bound for the competitive ratio with constant migration factor}\label{sec:lowerbound} In opposition to online makespan minimization with migration, where competitive ratio arbitrarily close to one can be achieved using a constant migration factor \cite{SSS09}, the online machine covering problem does not allow it. Until now, the best lower bound known for this ratio is $\frac{20}{19}$ \cite{SV10}, which we now improve to $\frac{17}{16}$ using similar ideas. \begin{figure} \centering \resizebox{!}{95pt}{ \begin{tikzpicture}[scale=0.6] \draw (2.5,7) -- (2.5,0) -- (-2,0) -- (-2,7); \draw (1,7) -- (1,0); \draw (-0.5,7) -- (-0.5,0); \draw (-0.5,0) rectangle (1,3); \draw (0.25,1.5) node {$3$}; \draw (1,0) rectangle (2.5,3); \draw (1.75,1.5) node {$3$}; \draw (-0.5,3) rectangle (1,5); \draw (0.25,4) node {$2$}; \draw (1,3) rectangle (2.5,5); \draw (1.75,4) node {$2$}; \draw (-2,4.7) rectangle (-0.5,6.7); \draw (-1.25,5.7) node {$2$}; \draw (-2,0) rectangle (-0.5,4.7); \draw (-1.25,2.35) node {$\dfrac{80}{17}$}; \draw (11.5,7) -- (11.5,0) -- (7,0) -- (7,7); \draw (10,7) -- (10,0); \draw (8.5,7) -- (8.5,0); \draw (10,0) rectangle (11.5,3); \draw (10.75,1.5) node {$3$}; \draw (10,3) rectangle (11.5,6); \draw (10.75,4.5) node {$3$}; \draw (8.5,0) rectangle (10,2); \draw (9.25,1) node {$2$}; \draw (8.5,2) rectangle (10,4); \draw (9.25,3) node {$2$}; \draw (8.5,4) rectangle (10,6); \draw (9.25,5) node {$2$}; \draw (7,0) rectangle (8.5,4.7); \draw (7.75,2.35) node {$\dfrac{80}{17}$}; \draw (7,4.7) rectangle (8.5,4.83); \draw (7,4.83) rectangle (8.5,4.96); \draw (7,4.96) rectangle (8.5,5.09); \draw (7,5.09) rectangle (8.5,5.22); \draw (7,5.22) rectangle (8.5,5.35); \draw (7,5.35) rectangle (8.5,5.48); \draw (7,5.48) rectangle (8.5,5.61); \draw (7,5.61) rectangle (8.5,5.74); \draw (7,5.74) rectangle (8.5,5.87); \draw (7,5.87) rectangle (8.5,6); \varepsilonnd{tikzpicture}} \caption{Left: Unique $(17/16)$-approximate solution before the arrival of small jobs. Right: Unique $(17/16)$-approximate solution after the arrival of small jobs.} \label{cotainf} \varepsilonnd{figure} \begin{lemma}\label{17/16} For any $\varepsilon>0$, there is no $\left(\frac{17}{16}-\varepsilon\right)$-competitive algorithm using constant migration factor for the online machine covering problem with migration. \varepsilonnd{lemma} \begin{proof} Consider an instance consisting of $3$ machines and $6$ jobs of sizes $p_1=p_2=p_3=2$, $p_4=p_5=3$ and $p_6=\frac{80}{17}$. It is easy to see that the optimal solution is given by Figure \ref{cotainf} (a). Moreover, there is no other $\left( \frac{17}{16}-\varepsilon \right)$-approximate solution (up to symmetry). Suppose by contradiction that there exists a $\left( \frac{17}{16}-\varepsilon\right)$-competitive algorithm with constant migration factor $C$. While processing the above instance, the algorithm must construct the optimal solution depicted in Figure \ref{cotainf} (left). Consider now that jobs with processing time smaller than $1/C$ arrive to the system, with total processing time $\frac{22}{17}$. Since the migration factor is $C$, none of the six previous jobs can be migrated, thus the best minimum load we can obtain is $\frac{96}{17}$, while the optimal solution is $6$ (see Figure \ref{cotainf} (right)). We conclude by noting that $\frac{6}{96/17}=\frac{17}{16}$. \varepsilonnd{proof} Notice that the instance reaching the lower bound crucially depends on the arrival of jobs with arbitrarily small processing times. This kind of jobs are in fact the problematic ones, because under the assumption that at each iteration the incoming job is big enough (has processing time at least $\varepsilon\textnormal{OPT}$), there is a robust PTAS with constant migration factor~\cite{SV10}. {} \appendix \section{Non-constant Migration for classic LPT} \label{app:nonConstant} \begin{lemma}\label{lm:nonConstantMigration} For any $k\ge 2$ there exists a set $\mathcal{J}$ of $4k+1$ jobs and an extra job $j^*\notin \mathcal{J}$ such that, for every schedule $\mathcal{S}$ constructed using $\textnormal{LPT}$ on $2k+1$ machines, it is not possible to construct a schedule $\mathcal{S}'$ using $\textnormal{LPT}$ for $\mathcal{J} \cup \{j^*\}$ with migration factor less than $m/2$. \varepsilonnd{lemma} \begin{proof} Fix a constant $0<\varepsilon\le \frac{1}{6k}$. Consider a set $\mathcal{J}$ consisting of the following $4k+1$ jobs: $k+1$ jobs of size $1$; for each $i\in \{0,\dots,k-1\}$, a job of size $\frac{1}{2}+i\varepsilon$ and a job of size $\frac{1}{2}-(i+1)\varepsilon$, and finally $k$ jobs of size $\frac{1}{2}-k\varepsilon\ge\frac{1}{3}$. Assume the jobs in $\mathcal{J}$ are sorted non-increasingly by size. There is a unique schedule constructed using $\textnormal{LPT}$ for this instance (up to symmetry) which assigns the jobs in the following way (see Figure \ref{fig:LPT_no_red1}): The $k+1$ jobs of size $1$ to a machine on their own, and for each $i=1,\dots,k$, it assigns to machine $k+i$ a job of size $\frac{1}{2} + (k-i-1)\varepsilon$, a job of size $\frac{1}{2}-(k-i)\varepsilon$ and a job of size $\frac{1}{2}-k\varepsilon$ (since the total load of the first two jobs is $1-\varepsilon$, the last $k$ jobs must be assigned to these $k$ machines). Now consider an arriving job $j^*$ of size $\frac{1}{2} + k\varepsilon\le\frac{2}{3}$. There is a unique schedule constructed using $\textnormal{LPT}$ for the new instance (up to symmetry) which assigns the jobs in the following way (see Figure \ref{fig:LPT_no_red2}): it assigns to the first $k+1$ machines a job of size $1$ and a job of size $\frac{1}{2}-k\varepsilon$, to machine $k+2$ job $p_{j^*}$ and a job of size $\frac{1}{2}-(k-1)\varepsilon$, for each $i=2,\dots,k-1$ it assigns to machine $k+i+1$ a job of size $\frac{1}{2} + (k+1-i)\varepsilon$ and a job of size $\frac{1}{2} - (k-i)\varepsilon$, and finally to machine $2k+1$ a job of size $\frac{1}{2}+\varepsilon$ and a job of size $\frac{1}{2}$ (now the total load of machines $k+2,\dots,2k+1$ is $1+\varepsilon$, then the last $k+1$ jobs must be assigned to the first $k+1$ jobs). It is not difficult to see that, in the new schedule, every machine has a different subset of jobs assigned to it compared with the original schedule, and so at least one job must have been migrated per machine. Thus, the migrated total load is at least the load of the smallest $2k+1$ jobs, which implies that the needed migration factor is at least \begin{eqnarray*} \frac{\displaystyle\sum_{i=0}^{2k}{p_{(4k+1)-i}}}{p_{j^*}} \ge\frac{(2k+1)\left(\frac{1}{2}-k\varepsilon\right)}{\frac{1}{2}+k\varepsilon} \ge m\frac{1/3}{2/3} = \frac{m}{2}. \qedhere \varepsilonnd{eqnarray*} \varepsilonnd{proof} \section{\textbf{Proof of Theorem~\ref{thm:1.7apx}.}}\label{sec:chenetal} We will use and generalize a result from Chen et al.~\cite{CEKvS13} which bounds the price of anarchy of a related game. We say that a schedule is \varepsilonmph{lex-jump-optimal} if the solution is locally optimal with respect to Jump but considering the whole vector of loads (sorted non-decreasingly) as weight function and comparing them lexicographically. In this context, lex-jump-optimal solutions are equivalent to pure Nash equilibria for the Machine Covering game, obtained if jobs are selfish agents trying to minimize the load of the machine where they are assigned and the minimum load is considered as the welfare function. \begin{theorem}[Chen et al.~\cite{CEKvS13}]\label{thm:chenetal} The Price of Anarchy of the Machine Covering Game is $1.7$. \varepsilonnd{theorem} Theorem~\ref{thm:chenetal} gives a tight bound for the approximation ratio of lex-jump-optimality. In order to prove Theorem~\ref{thm:1.7apx} we need to prove that in the case of Machine Covering a jump-optimal solution is also lex-jump-optimal, and to construct instances to prove the lower bound for swap-optimality. \begin{lemma}\label{lem:lextojump} Let $\mathcal{S}$ be a jump-optimal solution for Machine Covering. Then $\mathcal{S}$ is lex-jump-optimal as well. \varepsilonnd{lemma} \begin{proof} Suppose $\mathcal{S}$ is not lex-jump-optimal. This implies that there is a job $j$ and two machines $i,i'$, where $j$ is assigned to $i$, such that $\varepsilonll_i(\mathcal{S})-p_j>\varepsilonll_{i'}(\mathcal{S})$. Since $\varepsilonll_{i'}(\mathcal{S})\ge \varepsilonll_{\min}(\mathcal{S})$, we get that $\varepsilonll_i(\mathcal{S})-p_j>\varepsilonll_{\min}(\mathcal{S})$, which implies that $\mathcal{S}$ is not jump-optimal thanks to Lemma~\ref{CarOL}. \varepsilonnd{proof} \begin{lemma}\label{lem:swapLB} The approximation ratio of swap-optimality is at least $1.7$. \varepsilonnd{lemma} \begin{proof} The family of instances leading to the desired lower bound is a slight modification of the one presented by Chen et al.~\cite{CEKvS13} in the proof of Theorem~\ref{thm:chenetal}, because the original family of instances is jump-optimal but not swap-optimal. Let $n_0=0$ and, for each $i\ge 1$, $n_i=4n_{i-1}+2$. For each $k\ge 2$ we will define an instance of Machine Covering $(\mathcal{J}_k,\mathcal{M}_k)$. Let $\delta=\frac{1}{30n_k}$ and $|\mathcal{M}_k|=2(10^k-1)$. There will be five types of jobs: \begin{itemize} \item $|\mathcal{M}_k|$ jobs of size $1$; \item For each $i=1,\dots,k$, we create $6\cdot 10^{k-i}$ jobs of size $a_i=\frac{1}{2}+(n_i-1)\delta$; \item For each $i=1,\dots,k$, we create $12\cdot 10^{k-i}$ jobs of size $b_i=\frac{1}{2}-(n_i-1)\delta$; \item For each $i=1,\dots,k$, we create $12\cdot 10^{k-i}$ jobs of size $c_i=\frac{1}{5}+4n_{i-1}\delta$; \item For each $i=1,\dots,k-1$, we create $6\cdot 10^{k-i}$ jobs of size $d_i=\frac{1}{5}-n_i\delta$, and finally $6|\mathcal{M}_k|$ jobs of size $d_k=\frac{1}{6|\mathcal{M}_k|-1}$. \varepsilonnd{itemize} Notice first that the optimal solution for instance $(\mathcal{J}_k,\mathcal{M}_k)$ achieves a minimum load of at least $1.7-\delta$: we can assign a job of size $1$ to each machine, and on top of that either a job of size $a_i$ plus a job of size $d_i$ (for some $1\le i \le k-1$), or a job of size $b_i$ plus a job of size $c_i$ (for some $1\le i \le k$), or a job of size $a_k$ plus $|\mathcal{M}_k|$ jobs of size $d_k$. Since $1+a_i+d_i=1.7-\delta$ for $1\le i\le k$, $1+b_i+c_i=1.7+(4n_{i-1}-n_i+1)\delta = 1.7 - \delta$ and $1+a_k+\frac{|\mathcal{M}_k|}{6|\mathcal{M}_k|-1}=1.7-\delta+\frac{1}{6(6|\mathcal{M}_k|-1)} \ge 1.7-\delta$, we get the desired bound. The number of machines is $18\displaystyle\sum_{i=1}^{k}{10^{k-i}}=2(10^k-1)$ as required. Consider now the solution $\mathcal{S}_{swap}$ which assigns the jobs in the following way: \begin{itemize} \item Jobs of size $1$ are assigned in pairs to $\frac{|\mathcal{M}_k|}{2}$ machines, being the load of such machines $2$; \item For each $i=1,\dots,k$, we assign one job of size $a_i$ and two jobs of size $b_i$ to $6\cdot 10^{k-i}$ machines, being the load of such machines $\frac{3}{2}-(n_i-1)\delta$; \item We assign six jobs of size $c_1=\frac{1}{5}$ to $2\cdot 10^{k-1}$ machines, being the load of such machines $\frac{6}{5}$; \item For each $i=1,\dots,k-1$, we assign one job of size $c_{i-1}$ and five jobs of size $d_i$ to $12\cdot 10^{k-i-1}$ machines, being the load of such machines $\frac{6}{5}-n_i\delta$; \item All the jobs of size $d_k$ goes to a final machine which will have load $1+\frac{1}{6|\mathcal{M}_k|-1}$. \varepsilonnd{itemize} The number of machines is $$\frac{|\mathcal{M}_k|}{2}+6\displaystyle\sum_{i=1}^{k}{10^{k-i}}+2\cdot 10^{k-1} + 12\displaystyle\sum_{i=1}^{k-1}{10^{k-i-1}}+1=\frac{|\mathcal{M}_k|}{2}+10^k-1=|\mathcal{M}_k|,$$ so the solution is feasible. Furthermore, the last machine is the only least loaded machine as the smallest load among the remaining machines is $\frac{6}{5}-n_k\delta=\frac{7}{6}$. We will now prove that $\mathcal{S}_{swap}$ is swap-optimal. Since for each job $j$ we have that $\varepsilonll_{\mathcal{S}_{swap}^{-1}(j)}(\mathcal{S}_{swap})-p_j\le 1$ and the minimum load is at least $1$, $\mathcal{S}_{swap}$ is jump-optimal. Consider now any job $j$ assigned to a machine $i$ which is not the least loaded one and a job $j'$ of size $d_k$ assigned to the least loaded machine $i'$. If job $j$ is the smallest job assigned to $i$ then swapping it with $j'$ does not improve the solution because $\varepsilonll_{i}(\mathcal{S})-p_j=\varepsilonll_{i'}(\mathcal{S}_{swap})-p_{j'}=1$. On the other hand, if $p_j$ is strictly larger than the rest of the processing times of jobs in $i$ (i.e. $p_j=c_q$ or $a_q$ for some $q=1,\dots,k$), then $\varepsilonll_{i}(\mathcal{S})-p_j<1$, implying that $\varepsilonll_{i}(\mathcal{S})-p_j+p_{j'}<\varepsilonll_{i'}(\mathcal{S}_{swap})$, and hence not improving the solution. This proves that $\mathcal{S}_{swap}$ is swap-optimal. By taking $k$ increasing, $\delta$ decreases and approaches zero, implying that the approximation ratio of swap-optimality is at least $1.7$. \varepsilonnd{proof} By putting together Theorem~\ref{thm:chenetal} and Lemma~\ref{lem:swapLB} we can conclude the proof of Theorem~\ref{thm:1.7apx}. \qed \varepsilonnd{document}
\begin{document} \newcommand{\end{equation}}{\end{equation}} \newcommand{\end{eqnarray}}{\end{eqnarray}} \newcommand{\andy}[1]{ } \def\widetilde{\widetilde} \def\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}{\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}} \def\mbox{\boldmath $B$}} \def\bmb{\mbox{\boldmath $b$}{\mbox{\boldmath $B$}} \def\bmb{\mbox{\boldmath $b$}} \def\mbox{\boldmath $\sigma$}{\mbox{\boldmath $\sigma$}} \def\mbox{\boldmath $\sigma$}n{\mbox{\boldmath $\sigma$}\cdot\mbox{\boldmath $n$}} \def\mbox{\boldmath $\sigma$}b{\mbox{\boldmath $\sigma$}\cdot\mbox{\boldmath $b$}} \def\mbox{\boldmath $\sigma$}A{\mbox{\boldmath $\sigma$}\cdot\mbox{\boldmath $A$}} \def\mbox{\boldmath $\sigma$}B{\mbox{\boldmath $\sigma$}\cdot\mbox{\boldmath $B$}} \def\mbox{ch}{\mbox{ch}} \def\mbox{sh}{\mbox{sh}} \newcommand{\ket}[1]{| #1 \rangle} \newcommand{\bra}[1]{\langle #1 |} \def\noindent} \def\ellip{$\ldots${\noindent} \def\ellip{$\ldots$} \def\undertext#1{$\underline{\hbox{#1}}$} \def\lsnote#1{\def\dash{\hbox{\rm---}}{\bf~[[}~{\tt #1~$\ldots\,$LS}{\bf]]~}} \def\asteriskbreak{\vbox{ \hrule\vskip -11pt $$************************************************$$\vskip-3pt \hrule }} \def\beginbignote{\begingroup\baselineskip 13pt\tt\def\dash{\hbox{\rm---}} \hrule } \def \hrule \endgroup{ \hrule \endgroup} \def\hfil\break} \def\eqn#1{Eq.\ (\ref{eq:#1}){\hfil\break} \def\eqn#1{Eq.\ (\ref{eq:#1})} \def{\vec p}} \def\AA{{\vec A}{{\vec p}} \def\AA{{\vec A}} \font\romsix=cmr6 scaled\magstep0 \def\coltwovector#1#2{\left({#1\atop#2}\right)} \def\coltwovector10} \def\down{\coltwovector01{\coltwovector10} \def\down{\coltwovector01} \def\header#1{{ \removelastskip\vskip 20pt plus 40pt \penalty-200 \vskip 0pt plus -32pt \noindent} \def\ellip{$\ldots$\bf #1}\nobreak \nobreak} \font\romeight=cmr8 scaled\magstep0 \font\boldeight=cmbx8 scaled\magstep0 \font\italeight=cmti8 scaled\magstep0 \def\marginpar{?? ask: }{\marginpar{?? ask: }} \def\marginpar{fill in ... }{\marginpar{fill in ... }} \def\lyrics#1{{\bf~[[LYRICS:}~{\bf #1}~{\bf]]}} \def\marginpar{$\bigg|$ note }{\marginpar{$\bigg|$ note }} \def\mbox{ch}eck{\marginpar{check }} \def\marginpar{discuss }{\marginpar{discuss }} \begin{titlepage} \begin{flushright} \today \\ BA-TH/99-334\\ \end{flushright} \begin{center} {\LARGE Berry phase from a quantum Zeno effect } \\ \quad {\large P. FACCHI,$^{(1)}$ A.G. KLEIN,$^{(2)}$ \\ S. PASCAZIO$^{(1)}$ and L. S. SCHULMAN$^{(3)}$\\ \quad \\ $^{(1)}$Dipartimento di Fisica, Universit\`a di Bari \\ and Istituto Nazionale di Fisica Nucleare, Sezione di Bari \\ I-70126 Bari, Italy $^{(2)}$School of Physics, The University of Melbourne \\ Parkville, Victoria, Australia 3052 $^{(3)}$Physics Department, Clarkson University \\ Potsdam, NY 13699-5820, USA \\and\\Physics Department, Technion, Haifa, Israel } \vspace*{.5cm} \end{center} \noindent} \def\ellip{$\ldots$ PACS: 03.65.Bz; 03.75.Be; 03.75.Dg \vspace*{.5cm} \begin{center}{\small\bf Abstract}\\ \end{center} {\small We exhibit a specific implementation of the creation of geometrical phase through the state-space evolution generated by the dynamic quantum Zeno effect. That is, a system is guided through a closed loop in Hilbert space by means a sequence of closely spaced projections leading to a phase difference with respect to the original state. Our goal is the proposal of a specific experimental setup in which this phase could be created and observed. To this end we study the case of neutron spin, examine the practical aspects of realizing the ``projections," and estimate the difference between the idealized projections and the experimental implementation.} \end{titlepage} \setcounter{equation}{0} \section{Introduction } \label{sec-introd} \andy{intro} The effect of the observer in quantum mechanics is perhaps nowhere more dramatic than in the collection of phenomena loosely (and casually) known as the ``quantum Zeno effect." This was first formulated by von Neumann \cite{von,Beskow}, and is deeply rooted in fundamental features of the temporal behavior of quantum systems \cite{strev}. During the last decade there has been much interest in this issue, mainly because of an idea due to Cook \cite{Cook}, who proposed using two-level systems to check this effect, and the subsequent experiment performed by Itano {\em et al.} \cite{Itano1}. New experiments were proposed, based on the physics of the simplest of two-level systems: Neutron spin and photon polarization \cite{qze1,inn}. Most of the referenced papers deal with what might be called the ``static" version of the quantum Zeno effect. However, the most striking action of the observer is not only to stop time evolution (e.g., by repeatedly checking if a system has decayed), but to {\it guide} it. In this article we will be concerned with a ``dynamical" version of the phenomenon: we will show how guiding a system through a closed loop in its state space (projective Hilbert space) leads to a geometrical phase \cite{Panchar,BerryQuantal,BerryClassical,Shapere,Wagh}. This was predicted on general grounds \cite{AA87}, but here we use a specific implementation on a spin system \cite{continuous} and propose a particular experimental context in which to see this effect. It is remarkable that the Berry phase that is discussed below is due to measurements only: no Hamiltonian is needed. \setcounter{equation}{0} \section{Forcing the pot to boil } \label{sec-potboil} \andy{potboil} We summarize the main features of the quantum Zeno effect (QZE). Prepare a quantum system in some initial state $\psi(0)$. In time $dt$, by the Schr\"odinger equation, its phase changes by $\hbox{O}(dt)$ while the absolute value of its scalar product with the initial state changes by $\hbox{O}(dt^2)$. The {\it dynamical\/} quantum Zeno effect exploits the above features and forces the evolution in an arbitrary direction by a series of repeated measurements: Let $\psi$ evolve with the Hamiltonian $H$, so that in the absence of observations its evolution would be $\psi(T)=\exp(-iHT) \psi(0)$ (we take $\hbar=1$ throughout). Let there be a family of states $\phi_k$, $k=0,1,\ldots, N$, such that $\phi_0=\psi(0)$, and such that successive states differ little from one another (i.e., $|\langle\phi_{k+1} | \phi_k \rangle|$ is nearly 1). Now let $\delta T = T/N$ and at $T_k=k\delta T$ project the evolving wave function on $\phi_k$. Then for sufficiently large $N$, $\psi(T) \approx \phi_{_N}$. [The usual QZE is the special case $\phi_k=\phi_0 (=\psi(0)) \ \forall \ k$.] In the following we consider an experiment involving a neutron spin. It should be clear, however, that our proposal is valid for any system with the same two-level structure. \subsection{Evolution with no Hamiltonian} \label{sec-noH} \andy{noH} Assume first that there is {\em no} Hamiltonian acting on the system: one can think, for instance, of a neutron crossing a region where no magnetic field is present. The time-evolution is due to measurement only. The system starts with spin up along the $z$-axis and is projected on the family of states \andy{projfamily} \beq \phi_k \equiv \exp(-i\theta_k\mbox{\boldmath $\sigma$}n)\coltwovector10} \def\down{\coltwovector01 \qquad \hbox{with~} \theta_k \equiv \frac{ak}N \;, \qquad k=0,\ldots,N \ , \label{eq:projfamily} \eeq where $\mbox{\boldmath $\sigma$}$ is the vector of the Pauli matrices and $\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$} = (n_x,n_y,n_z)$ a unit vector (independent of $k$). We assume that the system evolves for a time $T$ with projections at times $T_k = k\delta T$ ($k=1,\dots,N$ and $\delta T=T/N$). The final state is $\left[\phi_0 = \coltwovector10} \def\down{\coltwovector01\right]$ \andy{finstate} \barr \ket{\psi(T)} &=& |\phi_N\rangle \langle \phi_N| \phi_{N-1}\rangle \cdots \langle \phi_2| \phi_1\rangle \langle \phi_1| \phi_0\rangle \nonumber \\ &=& |\phi_N\rangle \left(\cos \frac{a}{N} + i n_z \sin \frac {a}{N} \right)^N \nonumber \\ &=& \cos^N \left(\frac{a}{N} \right) \left(1 + i n_z \tan \frac {a}{N} \right)^N |\phi_N\rangle \nonumber \\ &\stackrel{N\rightarrow \infty}{\longrightarrow} & \exp (ia n_z) |\phi_N\rangle \nonumber \\ & = & \exp (ia n_z) \exp (-ia\mbox{\boldmath $\sigma$}n) | \phi_0\rangle . \label{eq:finstate} \earr Therefore, as $N\to\infty$, $\psi(T)$ is an eigenfunction of the final projection operator $P_N$, with unit norm. If $\cos\Theta \equiv n_z$ and $a=\pi$, \andy{finstatepi} \beq \psi(T) = \exp (i \pi \cos \Theta) (-1) \phi_0 = \exp [-i \pi (1-\cos \Theta)] \phi_0 = \exp (-i \Omega/2 ) \phi_0 , \label{eq:finstatepi} \eeq where $\Omega$ is the solid angle subtended by the curve traced by the spin during its evolution. The factor $ \exp (-i\Omega/2)$ is a Berry phase and it is due only to measurements (the Hamiltonian is zero). Notice that no Berry phase appears in the usual quantum Zeno context, namely when $\phi_k \propto \phi_0 \ \forall \ k$, because in that case $a=0$ in (\ref{eq:finstate}). To provide experimental implementation of the mathematical process just described, one could (in principle) let a neutron spin evolve in a field-free region of space. With no further tinkering, the spin state would not change. However, suppose we place spin filters sequentially projecting the neutron spin onto the states of \eqn{projfamily}, for $k=0,\ldots,N$. Thus the neutron spin is forced to follow another trajectory in spin space. The essence of the mathematical demonstration just provided is that while $N$ measurements are performed, the norm of wave function that is absorbed by the filters is $N\cdot$O$(1/N^2)=$O$(1/N)$. For $N\to\infty$, this loss is negligible. Meanwhile, as a result of these projections, the trajectory of the spin (in its space) is a cone whose symmetry axis is $\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$. By suitably matching the parameters, the spin state can be forced back to its initial state after time $T$ \cite{continuous}. It is interesting to look at the process (\ref{eq:finstate}) for $N$ finite. The spin goes back to its initial state after describing a regular polygon on the Poincar\'e sphere, as in Figure 1a. \begin{figure} \caption{a) Spin evolution due to $N=5$ measurements. b) Solid angles.} \end{figure} After $N (<\infty)$ projections the final state is \beq \ket{\psi(T)}=\left(\cos\frac{a}{N}+in_z\sin\frac{a}{N}\right)^N \exp(-ia\mbox{\boldmath $\sigma$}n)\ket{\phi_0}. \eeq For $a=\pi$ the spin describes a closed path and \andy{clpath} \barr \ket{\psi(T)}&=&\left(\cos\frac{\pi}{N}+in_z\sin\frac{\pi}{N}\right)^N \exp(-i\pi)\ket{\phi_0}\nonumber\\ &=&\left(\cos^2\frac{\pi}{N}+n^2_z\sin^2\frac{\pi}{N}\right)^{\frac{N}{2}} \exp\left(iN\arctan\left(n_z\tan\frac{\pi}{N}\right)\right)\exp(-i\pi) \ket{\phi_0}.\nonumber\\ \label{eq:clpath} \earr The first factor in the far r.h.s.\ accounts for the probability loss ($N$ is finite and there is no QZE). We can rewrite (\ref{eq:clpath}) in the following form \beq \ket{\psi(T)}=\rho_N \exp(-i\beta_N)\ket{\phi_0}, \eeq where \andy{rhoN, betaN} \barr \rho_N &=& \left(\cos^2\frac{\pi}{N}+n^2_z \sin^2\frac{\pi}{N}\right)^{\frac{N}{2}}, \label{eq:rhoN}\\ \beta_N &=& \pi-N\arctan\left(\cos\Theta\tan\frac{\pi}{N}\right). \label{eq:betaN} \earr In the ``continuous measurement" limit (QZE), we have \barr \rho &=& \lim_{N\to\infty}\rho_N=1,\nonumber\\ \beta &=& \lim_{N\to\infty}\beta_N=\pi(1-\cos\Theta)=\frac{\Omega}{2}, \earr where $\Omega$ is the solid angle subtended by the circular path, viewed at an angle $\Theta$ (see Figure 1a). We recover therefore the result (\ref{eq:finstatepi}). The relation between the solid angle and the geometrical phase is valid also with a finite number of polarizers $N$. Indeed, it is straightforward to show that the solid angle subtended by an isosceles triangle with vertex angle equal to $2\alpha$ (Figure 1b) has the value \beq \Omega_{2\alpha}=2\alpha-2\arctan(\cos\Theta \tan\alpha). \eeq Hence if the polarizers are equally rotated of an angle $2\pi/N$, the spin describes a regular $N$-sided polygon, whose solid angle is \beq \Omega_{(N)}=N\Omega_{2\pi/N}=2\pi- 2N\arctan\left(\cos\Theta\tan\frac{\pi}{N}\right)=2\beta_N, \eeq where we used the definition (\ref{eq:betaN}). This result is of course in agreement with other analyses \cite{SM} based on the Pancharatnam connection \cite{Panchar}. The above conclusion can be further generalized to the general case of an arbitrary (not necessarily regular) polygon. Indeed, if the polarizers are rotated at (relative) angles $\alpha_n$ with $n=0,\dots,N$, so that \beq \sum_{n=1}^N 2\alpha_n=2\pi, \eeq the solid angle is \beq \Omega'_{(N)}=\sum_{n=1}^N \Omega_{2\alpha_n}= 2\pi-2\sum_{n=1}^N\arctan(\cos\Theta \tan\alpha_n). \eeq This is also twice the Berry phase. Notice that if all $\alpha_n\to0$ as $N\to\infty$ one again obtains the limit (\ref{eq:finstatepi}): \beq \Omega'=\lim_{N\to\infty}\Omega'_N =2\pi-2\lim_{N\to\infty}\sum_{n=1}^N \alpha_n \cos\Theta =\Omega. \eeq We emphasize that these predictions for the $N<\infty$ case are not trivial from the physical point of view. The above phases are computed by assuming that, during a ``projection" {\em \`a la} von Neumann, the spin follows a geodesics on the Poincar\'e sphere. The mathematics of the projection has no such assumptions. The ``postulate's" only job is to relate all this projection formalism to measurements. \subsection{Evolution with a non-zero Hamiltonian} \label{sec-yesH} \andy{yesH} Let us now consider the effect of a non-zero Hamiltonian \andy{Hamadd} \beq H=\mu \mbox{\boldmath $\sigma$}b , \label{eq:Hamadd} \eeq where $\bmb = (b_x,b_y,b_z)$ is a unit vector, in general different from $\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$. One can think of a neutron spin in a magnetic field. See Figure 2. \begin{figure} \caption{Spin evolution with measurements and non-zero Hamiltonian.} \end{figure} If the system starts with spin up it would have the following---undisturbed---evolution: \andy{undisturb} \beq \psi(t) = \exp(-i\mu t\mbox{\boldmath $\sigma$}b)\coltwovector10} \def\down{\coltwovector01 . \label{eq:undisturb} \eeq Now let the system evolve for a time $T$ with projections at times $T_k=k\delta T$ ($k=1,\dots,N$ and $\delta T=T/N$) and Hamiltonian evolution in between. Defining $P_0 \equiv |\phi_0\rangle \langle \phi_0| = \pmatrix{1&0\cr0&0\cr}$, the $2\times2$ projection operator at stage-$k$ is \andy{projk} \beq P_k =|\phi_k\rangle \langle \phi_k| =\exp(-i\theta_k\mbox{\boldmath $\sigma$}n) P_0 \exp(i\theta_k\mbox{\boldmath $\sigma$}n) \label{eq:projk} \eeq and the state evolves to \andy{projev} \beq \psi(T)= \left[ \prod_{k=1}^N \left[P_k \exp(-i\mu \delta T \mbox{\boldmath $\sigma$}b)\right]\right]\coltwovector10} \def\down{\coltwovector01, \label{eq:projev} \eeq where here and in subsequent expressions a time-ordered product is understood [with earlier times (lower $k$) to the right]. Using $P_0^2=P_0$, Eq.\ (\ref{eq:projev}) can be rewritten \andy{projev2} \beq \psi(T)= \exp(-ia\mbox{\boldmath $\sigma$}n) \left[ \prod_{k=1}^N B_k \right] \coltwovector10} \def\down{\coltwovector01 , \label{eq:projev2} \eeq with \andy{bk} \beq B_k \equiv P_0\exp(i\theta_k\mbox{\boldmath $\sigma$}n) \exp(-i\mu \delta T \mbox{\boldmath $\sigma$}b) \exp(-i\theta_{k-1}\mbox{\boldmath $\sigma$}n) P_0 \label{eq:bk} \eeq ($\theta_0\equiv 0$). The computation of $B_k$ requires a bit of SU(2) manipulation. By using \andy{su21,2} \barr \left[ \mbox{\boldmath $\sigma$}A,\mbox{\boldmath $\sigma$}B \right] &=& 2i \mbox{\boldmath $\sigma$} \cdot \bmA \times \mbox{\boldmath $B$}} \def\bmb{\mbox{\boldmath $b$} \label{eq:su21} \\ (\mbox{\boldmath $\sigma$}A) (\mbox{\boldmath $\sigma$}B) (\mbox{\boldmath $\sigma$}A) &=& 2 (\bmA \cdot \mbox{\boldmath $B$}} \def\bmb{\mbox{\boldmath $b$})\mbox{\boldmath $\sigma$}A - (\bmA\cdot\bmA)\mbox{\boldmath $\sigma$}B , \label{eq:su22} \earr valid for $c$-number $\bmA$ and $\mbox{\boldmath $B$}} \def\bmb{\mbox{\boldmath $b$}$, one gets \andy{sbA} \barr \exp(i\theta\mbox{\boldmath $\sigma$}n) \mbox{\boldmath $\sigma$}b \exp(-i\theta\mbox{\boldmath $\sigma$}n) =\mbox{\boldmath $\sigma$}\cdot \widetilde{\bmb}, \label{eq:sbA} \earr with \andy{sbB} \beq \widetilde{\bmb}(\theta) \equiv \bmb \cos 2\theta + \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$} (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$})(1-\cos2\theta) + \bmb \times \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$} \sin 2\theta , \label{eq:sbB} \eeq which is the vector $\bmb$ rotated by $2\theta$ about the $\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$-axis. The calculation of $B_k$ is now straightforward: \andy{sbk} \barr B_k & = & P_0\exp(i\delta \theta \mbox{\boldmath $\sigma$}n) \exp(-i\mu \delta T \mbox{\boldmath $\sigma$}\cdot \widetilde{\bmb}(\theta_{k-1})) P_0 \nonumber \\ & = & P_0 \left(1+i\delta \theta \mbox{\boldmath $\sigma$}n -i\mu \delta T \mbox{\boldmath $\sigma$}\cdot \widetilde{\bmb}(\theta_k) \right) P_0 + O(1/N^2), \label{eq:sbk} \earr where $\delta \theta = \theta_{k+1} - \theta_k$ is $k$-independent. Second order terms in $1/N$ drop out when the product (\ref{eq:projev2}) is computed for $N\to\infty$, so that \andy{prodB} \barr \prod_{k=1}^N B_k &=& \prod_{k=1}^N P_0 (1+i\delta \theta \mbox{\boldmath $\sigma$}n -i \mu \delta T \mbox{\boldmath $\sigma$}\cdot \widetilde{\bmb}(\theta_k)) P_0 \nonumber \\ &=& \prod_{k=1}^N \left\{ P_0+i P_0 (\delta \theta \mbox{\boldmath $\sigma$}n - \mu \delta T \mbox{\boldmath $\sigma$}\cdot \widetilde{\bmb}(\theta_k)) P_0 \right\} \nonumber\\ &=& \prod_{k=1}^N P_0\left\{1+i [\delta \theta n_z - \mu \delta T \widetilde{b}_z(\theta_k)] \right\} \nonumber\\ & = & P_0\exp\left\{ i \sum_{k=1}^N \left( \delta \theta n_z - \mu \delta T \widetilde{b}_z(\theta_k) \right)\right\} \label{eq:prodB} \earr where we have used $P_0 \sigma_x P_0= P_0 \sigma_y P_0=0$ and $P_0 \sigma_z P_0=P_0$. The continuum limit can be computed by letting the summations in (\ref{eq:prodB}) become integrals in $dT$ and $d\theta$. Moreover, $\frac{dT}{d\theta}=\frac{T}{a}$, which enables one to change integration variable and get for the ``(1,1)" component of $\prod_{k=1}^N B_k$ (all other components being zero) \andy{prodB11} \barr \exp \left( i n_z \int_0^a d \theta -i \mu \frac Ta \int_0^a \left[b_z \cos 2\theta + (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z (1- \cos 2 \theta) + (\bmb \times \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$})_z \sin 2\theta \right] d\theta \right) \nonumber \\ = \exp \left(i n_z a -i \mu \frac{T}{a} \left[ b_z \frac{\sin 2a}{2} + (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z \left(a- \frac{\sin 2a}{2} \right) + (\bmb \times \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$})_z \frac{1-\cos 2a}{2} \right]\right), \nonumber \\ \label{eq:prodB11} \earr The final state is an eigenstate of $P_N$ with unit norm, {\em independent\/} of the Hamiltonian $H$: \andy{finpsi} \barr \psi(T) &=& \exp \left( - i\mu \frac{T}{a} \left[ b_z \frac{\sin 2a}{2} + (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z \left(a-\frac{\sin 2a}{2} \right) + (\bmb \times \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$})_z \frac{1-\cos 2a}{2} \right]\right) \nonumber \\ & & \times \exp\left(i a n_z - ia\mbox{\boldmath $\sigma$}n \right) \coltwovector10} \def\down{\coltwovector01. \label{eq:finpsi} \earr The first factor in (\ref{eq:finpsi}) is obviously the ``dynamical phase." Note that up to a phase, $\psi(t)$ is just $\phi_k$, with $k=tN/T$. Therefore \andy{dynam} \barr \int_0^T \langle \psi(t) | H | \psi(t) \rangle dt &=& \frac{T}{a} \int_0^a \langle \phi_0| \exp(i\theta\mbox{\boldmath $\sigma$}n) \mu \mbox{\boldmath $\sigma$}b \exp(-i\theta\mbox{\boldmath $\sigma$}n) |\phi_0\rangle d\theta \nonumber \\ &=& \mu T \left[ b_z \frac{\sin 2a}{2a} + (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z \left(1 - \frac{\sin 2a}{2a} \right) + (\bmb \times \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$})_z \frac{1-\cos2a}{2a} \right] , \nonumber\\ \label{eq:dynam} \earr because the phases drop out in the above sandwich. It follows that the remaining phase in (\ref{eq:finpsi}), when the spin goes back to its initial state, is the geometrical phase. When $a=\pi$ \andy{fun} \beq \psi(T) = \exp \left( - i\Omega/2 \right) \exp\left(- i\mu T (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z \right) \coltwovector10} \def\down{\coltwovector01 , \label{eq:fun} \eeq where $\Omega$ is the solid angle subtended by the curve traced out by the spin, as in (\ref{eq:finstatepi}), and $\mu T (\bmb \cdot \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}) n_z$ yields the dynamical phase, as can also be seen by direct computation of (\ref{eq:dynam}). We remark that if time ordered products are looked upon as path integrals \cite{PInt}, then our above demonstration is effectively a path integral derivation of the geometrical phase. A practical implementation of the process just described would involve an experimental setup similar to the one described after \eqn{finstatepi}, but with a magnetic field whose action on the spin is described by the Hamiltonian (\ref{eq:Hamadd}). If the neutron were to evolve {\em only} under the action of the Hamiltonian, its spin would precess around the magnetic field. However, the sequence of spin filters, which project the neutron spin onto the states (\ref{eq:projfamily}), compel the spin to follow the same trajectory as in the previous case [Eq.\ (\ref{eq:finstate})], i.e.\ a cone whose symmetry axis is $\mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$. As above, the spin acquires a geometrical phase, but now there is a dynamical phase as well. \subsection{A particular case} \label{sec-partc} \andy{partc} It is instructive to look at a particular case of (\ref{eq:finpsi})-(\ref{eq:fun}). We first note that if $\mu=0$ in (\ref{eq:finpsi}) we recover (\ref{eq:finstate}). Now let $\bmb = \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$. In this situation the projectors and the Hamiltonian yield the same trajectory in spin space (although, as will be seen, at different rates). If $\mu=0$ (so that $H=0$), the spin evolution is only due to the projectors and the final result was computed in (\ref{eq:finstatepi}) \andy{finbis} \beq \psi(T) = \exp (-i \Omega/2 ) \phi_0 . \label{eq:finbis} \eeq If, on the other hand, there is a nonvanishing Hamiltonian (\ref{eq:Hamadd}), but {\em no} projectors are present, a cyclic evolution of the spin is obtained for $\mu T=\pi$. The calculation is elementary and yields \andy{fintris} \beq \psi(T) = \exp (-i \pi ) \phi_0 . \label{eq:fintris} \eeq Observe that the dynamical phase in this case is [$\mu T=\pi, \bmb = \mbox{\boldmath $n$}} \def\bmA{\mbox{\boldmath $A$}$ and $a=\pi$ in Eq.\ (\ref{eq:dynam})] \andy{dynamss} \beq \int_0^T \langle \psi(t) | H | \psi(t) \rangle dt = \pi n_z = \pi[1- (1-n_z)] = \pi -\Omega/2 . \label{eq:dynamss} \eeq Therefore, the ``$\pi$" phase in (\ref{eq:fintris}) can be viewed, {\em \`a la} Aharonov and Anandan \cite{AA87}, as the sum of a geometrical ($\Omega/2$) and a dynamical ($\pi - \Omega/2$) contribution. Now let both the Hamiltonian and the projectors be present. From Eq.\ (\ref{eq:fun}), one gets \andy{funny} \beq \psi(T) = \exp \left( - i\Omega/2 \right) \exp\left(- i\mu T n_z \right) \coltwovector10} \def\down{\coltwovector01 , \label{eq:funny} \eeq Notice that the value of $\mu$ is now arbitrary, so that $\mu T$ is not necessarily equal to $\pi$ (the cyclic evolution of the spin is due to the projectors, not to the Hamiltonian). When $\mu T < \pi$, the projections are too ``fast" and do not yield (\ref{eq:fintris}). On the other hand, when $\mu T > \pi$, the projections are too slow and supply less phase, in comparison with Eq.\ (\ref{eq:fintris}). Only in the case $\mu T = \pi$ do the projections yield the right phase in (\ref{eq:fintris}). Their presence is superfluous in this case: one would obtain exactly the same vector and the same phase without them. Our conclusions are summarized in Table~1. In some sense, one may say that the Hamiltonian dynamics provides a ``natural clock" for the phase of the wave function. \begin{center} {\small {\bf Table 1}: Phases for cyclic spin evolutions} \\ \quad \\ \begin{tabular}{|c|c|c|c|} \hline\hline & $H=0$ & $H=\mu \mbox{\boldmath $\sigma$}b$ & $H=\mu \mbox{\boldmath $\sigma$}b$ \\ & and projections & no projections & and projections \\ \hline $\phi_{\rm geom}$ & $\Omega/2$ & $\Omega/2$ & $\Omega/2$ \\ \hline $\phi_{\rm dyn}$ & 0 & $\pi - \Omega/2$ & $\mu T n_z$ \\ \hline $\phi_{\rm tot}=\phi_{\rm geom}+\phi_{\rm dyn}$ & $\Omega/2$ & $\pi (=\mu T)$ & $\Omega/2 + \mu T n_z$ \\ \hline & cyclic evolution & cyclic evolution & cyclic evolution \\ & due to projections & due to field & due to projections \\ \hline\hline \end{tabular} \end{center} \setcounter{equation}{0} \section{A Gedanken Experiment} \label{sec-expimpl} \andy{expimpl} An experimental implementation with neutrons would be difficult because it would involve putting a QZE set-up inside an interferometer in order to measure phase. We therefore restrict ourselves to a gedanken experiment based on the use of $^3$He as a neutron polarization filter \cite{Heil}. It is well known \cite{Passel} that Helium 3 is ``black" to neutrons but {\em polarized\/} $^3$He only absorbs one spin state of a neutron beam---hence acts as a 50$\%$ absorber of a beam; the rest of it emerges fully polarized. In practice an external magnetic field is used to maintain the polarization axis of the $^3$He. If this external bias field were to be given a slow twist along a longitudinal axis, the state of polarization of the $^3$He should follow the direction of the twist. A neutron beam propagating through a cell of high-pressure polarized $^3$He along an axis aligned with the direction of twist will become fully polarized and should develop a Berry phase according to the argument of the previous section. From an experimental perspective a significant problem is that we so far lack a notion of slowness (as when we speak of ``slow twist" of the $B$ field). In the previous calculation, it is implicitly assumed that $\theta$ changes more slowly than $t$ (time): in other words, the relaxation processes in the $^3$He are given enough time (are fast enough) to function as a polarizer. A full treatment of this problem should therefore describe the physics of the projection process. We now tackle this issue and see that the notion of slowness can be given quantitative meaning in terms of a condition for adiabaticity. In practice, the absorption of the non-selected spin state occurs over a finite distance, of the order of one or two centimeters. This situation can be modeled via the following family of effective (nonhermitian) Hamiltonians: \andy{effham} \beq H_k = -i V |\phi_k^\perp \rangle \langle \phi_k^\perp |, \label{eq:effham} \eeq where $V$ is a real constant and \andy{projfamily2} \beq \phi_k^\perp \equiv \exp(-i\theta_k\mbox{\boldmath $\sigma$}n)\down \qquad \hbox{with~~} \theta_k \equiv \frac{ak}N \;, \qquad k=0,\ldots,N \ . \label{eq:projfamily2} \eeq Note that $\langle \phi_k | \phi^\perp_k \rangle = 0$ [see Eq.\ (\ref{eq:projfamily})]. We first assume, for simplicity, that no external ($^3$He aligning) magnetic field is present. We define \andy{projk2} \beq P_k^\perp \equiv |\phi_k^\perp \rangle \langle \phi_k^\perp| =\exp(-i\theta_k\mbox{\boldmath $\sigma$}n) P_0^\perp \exp(i\theta_k\mbox{\boldmath $\sigma$}n) \qquad (P_0^\perp = |\phi_0^\perp \rangle \langle \phi_0^\perp|) \ . \label{eq:projk2} \eeq Obviously $P_k^\perp = 1-P_k$, where $P_k$ was defined in (\ref{eq:projk}). The evolution engendered by the above Hamiltonian reads \andy{effhamev} \beq e^{-iH_k \tau} = P_k + \epsilon P^\perp_k = \exp(-i\theta_k\mbox{\boldmath $\sigma$}n) \pmatrix{1& 0\cr 0& \epsilon \cr} \exp(i\theta_k\mbox{\boldmath $\sigma$}n) \equiv P'_k , \label{eq:effhamev} \eeq where (inserting $\hbar$) \andy{epsest} \beq \epsilon \equiv e^{-V \tau/\hbar} \label{eq:epsest} \eeq is a parameter yielding an estimate of the efficiency of the polarizer. One can estimate a minimal value for $V$: for a thermal neutron (speed $v \simeq 2000$m/s) and an absorption length $\ell$ on the order of 1$\;$cm for the wrong-spin component, one gets $\tau = \ell/v \simeq 5 \mu$s and one obtains a good polarizer for $V > \hbar / \tau \simeq 10^{-29}\;$J $\simeq 10^{-7}\;$meV. The evolution can be computed by using the technique of Section~\ref{sec-potboil} ($\sqrt{P'_0}=P_0+\epsilon^{1/2}P^\perp_0$): \andy{projevpri} \beq \psi'(T)= \exp(-ia\mbox{\boldmath $\sigma$}n) \sqrt{P'_0} \left[ \prod_{k=1}^N B'_k \right] \coltwovector10} \def\down{\coltwovector01 , \label{eq:projevpri} \eeq with $T=N\tau$ and \andy{prodBpri} \barr \prod_{k=1}^N B'_k &=& \prod_{k=1}^N \sqrt{P'_0} (1+i\delta \theta \mbox{\boldmath $\sigma$}n ) \sqrt{P'_0} = \prod_{k=1}^N P'_0+i \sqrt{P'_0} (\delta \theta \mbox{\boldmath $\sigma$}n ) \sqrt{P'_0} \nonumber\\ & = & \pmatrix{1+i \delta \theta n_z& i \delta \theta \epsilon^{1/2} n_-\cr i \delta \theta \epsilon^{1/2} n_+& \epsilon(1-i \delta \theta n_z) \cr}^N , \label{eq:prodBpri} \earr where $n_\pm \equiv n_x \pm i n_y$. The evaluation of the above matrix product when $N \to \infty$ is lengthy but straightforward. One gets \andy{finpri} \beq \psi'(T)=\exp(-ia\mbox{\boldmath $\sigma$}n){\cal M}\phi_0, \label{eq:finpri} \eeq where \andy{fin1} \beq {\cal M} =\frac{e^{-ab}}{\Delta} \pmatrix{\Delta\;\mbox{ch}(a\Delta)+(b+in_z)\;\mbox{sh}(a\Delta)& in_{-}\;\mbox{sh}(a\Delta)\cr in_{+}\;\mbox{sh}(a\Delta)& \Delta\;\mbox{ch}(a\Delta)-(b+in_z)\;\mbox{sh}(a\Delta)\cr}, \label{eq:fin1} \eeq with \andy{Deltadef} \beq b = \frac{VT}{2a\hbar}, \qquad \Delta=\sqrt{b^2+2ibn_z-1}. \label{eq:Deltadef} \eeq We are interested in the limit of large $b=VT/2a\hbar$. Indeed, larger values of $b$ correspond to more ideal polarizers. In fact $\gamma=V/\hbar$ represents the absorption rate of the wrong component of the spin, while $\omega=2a/T$ is the angular velocity of precession (the spin describes an angle of $2a$ in time $T$). The parameter $b=\gamma/\omega$ is the ratio of these two quantities. Large values of $b$ imply \andy{gammaomega} \beq \gamma\gg \omega, \label{eq:gammaomega} \eeq i.e., an absorption rate much larger than the velocity of precession. In other words, the spin rotation must be sufficiently slow to allow the absorption of the wrong component of the spin. By introducing the neutron speed $v$, one can define the absorption length $\ell=v/\gamma=v\hbar/V$ and the length covered by the neutron while rotating for $1$ rad, $L=v/\omega=vT/2a$. Hence (\ref{eq:gammaomega}) reads \beq L\gg\ell. \eeq These are all conditions of adiabaticity. In the large $b$ limit, using the definition (\ref{eq:Deltadef}), (\ref{eq:fin1}) becomes \barr {\cal M} &=&\frac{e^{a(\Delta-b)}}{2\Delta} \pmatrix{\Delta+b+in_z& in_{-}\cr in_{+}& \Delta-b-in_z\cr}+{\rm O}(e^{-2ab})\nonumber\\ &=&\exp(ian_z) \pmatrix{1-a\frac{1-n_z^2}{2b}& i\frac{n_{-}}{2b}\cr i\frac{n_{+}}{2b}& 0\cr} +{\rm O}\left(\frac{1}{b^2}\right). \earr Remembering the definition of $b$ in (\ref{eq:Deltadef}), one gets \barr {\cal M}&=&\exp(ian_z) \pmatrix{1+\frac{\hbar a^2(n_z^2-1)}{VT}& i\frac{\hbar an_{-}}{VT}\cr i\frac{\hbar an_{+}}{VT}& 0\cr} +{\rm O}\left(\left(\frac{2a\hbar}{VT}\right)^2\right) \nonumber\\ & &\longrightarrow \exp(ian_z) P_0, \quad\mbox{when}\quad \frac{VT}{2a\hbar}\to\infty. \earr The above formula yields the first corrections to an ideal, purely adiabatic evolution. Basically, the system is projected on slightly different directions, thereby rotating in spin space. But if the system ``on its own" (i.e., through its dynamics) manages to rotate significantly between projections, then more will be absorbed on the next projection and it will not follow the rotating field, at least not without loss of probability (or intensity). It is interesting to note that the same result can be obtained by considering a continuous version of the effective Hamiltonian (\ref{eq:effham}) \andy{effhamcont} \beq H(t)=-iV P^\perp (t)=-iV U^\dagger(t) P^\perp_0 U(t), \label{effhamcont} \eeq where \andy{unrot} \beq U(t)=\exp\left(i\frac{a}{T}t\;\mbox{\boldmath $\sigma$}n\right) \label{eq:unrot} \eeq is a unitary operator (rotation). The state vector $\psi(t)$ satisfies the Schr\"odinger equation \beq i\partial_t \psi(t)=H(t) \psi(t). \eeq Consider now the following rotated vector \beq \widetilde \psi(t)=U(t) \psi(t). \eeq It is easy to prove that it satisfies the equation \beq i\partial_t \widetilde\psi(t)=\widetilde H \widetilde\psi(t), \eeq where \beq \widetilde H=i \dot U(t) U^\dagger(t) + U(t) H(t) U^\dagger(t) = -\frac{a}{T}\mbox{\boldmath $\sigma$}n-i V P^\perp_0 \eeq is independent of $t$. One then gets \andy{evol} \beq \psi(t)=U^\dagger(t)\widetilde\psi(t) =\exp\left(-i\frac{a}{T}t\;\mbox{\boldmath $\sigma$}n\right)\exp(-i\widetilde H t) \psi(0), \label{eq:evol} \eeq where \beq \widetilde H T=-a\mbox{\boldmath $\sigma$}n - i V T P^\perp_0=-a M, \quad M=\pmatrix{n_z& n_{-}\cr n_{+}& -n_z+i2b\cr}, \eeq $b$ being defined in (\ref{eq:Deltadef}). Hence one obtains \beq \exp(-i\widetilde H T)=\exp(ia M)={\cal M} \eeq and (\ref{eq:evol}) yields (\ref{eq:finpri}). Observe that \beq \widetilde H=-\omega\frac{\mbox{\boldmath $\sigma$}n}{2}-i\gamma P^\perp_0, \eeq from which it is apparent the previous interpretation of the coefficients $\omega$ and $\gamma$. The above calculation was performed by assuming that no external field is present. However, we do need an external $B$ field, in order to align $^3$He. Its effect can be readily taken into account by noticing that, when the neutron crosses the region containing polarized $^3$He, if the conditions for adiabaticity are satisfied, the neutron spin will always be (almost) parallel to the direction of $^3$He and therefore to the direction of the magnetic field. The resulting dynamical phase is therefore trivial to compute and reads $\phi_{\rm dyn} \simeq \mu B T/\hbar$. In order to obtain the geometric phase in a realistic experiment, such a dynamical phase should be subtracted from the total phase acquired by the neutron during its interaction with $^3$He. Incidentally, notice that this is experimentally feasible: one can take into account the contribution of a large dynamical phase due to the magnetic field and neatly extract a small Berry phase \cite{Ioffe}. The novelty of our proposal consists in the introduction of polarizing $^3$He to force the neutron spin to follow a given trajectory is spin space. An alternative realization relies on a set of discrete $^3$He polarization filters with progressively tilted polarization axes, as a finite-difference approximation to the system discussed above. Such a system would be a neutron analog of a set of polaroid filters with progressively tilted axes through which a photon beam propagates with little or no loss (in the limit of small angles) as proposed by Peres \cite{Beskow}. However, in the case discussed in this Letter, the axes of the neutron polarizers need not belong to a single plane and the neutron can acquire a Berry phase as well as change in polarization direction. \vspace*{1cm} {\bf Acknowledgments:} This work was supported in part by the United States National Science Foundation grant PHY 97 21459. \end{document}
\begin{document} \begin{center} {\Large Dynamics of multi-modes maximum entangled coherent state over amplitude damping channel} A. El Allati $^{a,b}$, Y. Hassouni $^{a}$ and N. Metwally $^{c}$\\ $^{a}$ Facult\'e des Sciences, Laboratoire de Physique Th\'{e}orique URAC 13, Universit\'e Mohammed V - Agdal. Av. Ibn Battouta, B.P. 1014, Rabat, Morocco\\[0pt] $^{b}$ The Abdus Salam International Centre for Theoretical Physics, Trieste, Italy\\ $^{c}$Mathematics Department, College of Science, University of Bahrain, P.O. Box, 32038 Bahrain. \\ $^{c}$Mathematics Department, Faculty of Science, South Valley University, Aswan, Egypt. \end{center} \begin{abstract} The dynamics of maximum entangled coherent state travels through an amplitude damping channel is investigated. For small values of the transmissivity rate the travelling state is very fragile to this noise channel, where it suffers from the phase flip error with high probability. The entanglement decays smoothly for larger values of the transmissivity rate and speedily for smaller values of this rate. As the number of modes increases, the travelling state over this noise channel loses its entanglement hastily. The odd and even states vanish at the same value of the field intensity. \textit{Keywords}: Entanglement; Quantum communication; Decoherence; Coherent states. \end{abstract} \section{Introduction} Entanglement is one of the fundamental properties of quantum information theory, where it has been considered as a nonclassical resource for many applications as quantum teleportation \cite{Ben} and super dense coding \cite{Ben1}. To achieve these tasks with high efficiency one needs maximum entangled states and perfect local operations, which are very difficult to be established in the real word. Therefore, investigating the dynamics of entanglement in the presence of imperfect circumstance is very important in the context of quantum information processing. For example, the dynamics of multiparities entanglement under the influence of decoherence is investigated in \cite{Carvahlo,Konrad}. The dynamics of entangled atoms interact with a deformed cavity mode is investigated by Metwally \cite{Metwally1}. Coherent states play important roles in many fields of physics, specially in quantum technologies and quantum optics \cite{Deu}. For example, two entangled coherent states are used to realize an effective quantum computation\cite{Jen} and quantum teleportation \cite{Enk}. Allati and et al \cite{allati1} have suggested a system of three modes coherent state and used it to perform quantum teleportation. Communication via entangled coherent quantum network is investigated in \cite{allati2}, where it is shown that the probability of performing successful teleportation through this network depends on its size. Entanglement properties of an optical coherent entangled state consists of two entangled modes under amplitude damping channel is discussed by Wickrt\cite{Ricardo}. The dynamics of the GHZ state through the amplitude damping channel is investigated by Konrad et. al \cite{Konrad}. This motivates us to investigate the entanglement properties of a class of maximum entangled coherent states consist of three modes pass through a damping channel. Also, we study the dynamics of a multi-entangled coherent state passes through this noise channel. The effect of this channel equivalence to a photon absorption followed by a phase flip operator. The suppressing of the travelling state over this channel is discussed, where we quantified the bound entanglement of the output state as well as the survival amount of entanglement. The paper is organized as follows: In Sec.2, we review the suggested entangled muti-modes coherent state, MMCS the amount of entanglement over a perfect environment is quantified \cite{allati1}. The entanglement of the MMCS over an amplitude damping channel is investigated in Sec.3, where we quantify the bound of entanglement for a maximum entangled state consists of three modes. The dynamics of multi-modes entangled coherent state passes through the damping channel is discussed. Finally, we summarize our results in Sec.4. \section{Perfect environment} Entangled coherent states have been proposed as an important resource in quantum information processing, ensuring or teleporting an unknown quantum states. These sates can be written as function of the Fock state \cite{Gil} as, \begin{equation} \bigl| \pm\alpha \bigr\rangle=\exp(-2|\alpha|^2)\sum_{n=0}^{\infty}{\frac{ (\pm\alpha)^n}{\sqrt{n!}}\bigl| n \bigr\rangle}. \end{equation} The coherent state can be generated from the vacuum state $|0\rangle$, by the displacement operator $D(\alpha)=exp(\alpha \hat{a}^{\dag}-\alpha^{*}\hat{a})$, where $\hat{a}^{\dag}$ and $\hat{a}$ are bosons creation and annihilation operators respectively. Among of the properties of these states is the non-orthogonality, and the overlap of two coherent states $|\pm\alpha\rangle$ is $\langle\alpha|-\alpha\rangle = exp(-2|\alpha|^{2})$ which becomes orthogonal by increasing the amplitude $|\alpha|$. Two coherent states can be used as basis states of a logical qubit where $|0\rangle_{L}=|\alpha\rangle$ and $|1\rangle_{L}=|-\alpha\rangle$. One form of the entangled coherent states between three modes can be written as, \begin{figure} \caption{Concurrence for the coherent states for a range of $\theta$ and $p$, with $p=\langle\alpha|-\alpha\rangle^{2} \label{conc1} \end{figure} \begin{eqnarray} \label{channel} \rho_{\alpha} &=&\frac{1}{N_{\theta }^{2}}\Bigl\{\bigl|\sqrt{2}\alpha ,\alpha ,\alpha \bigr\rangle_{345}\bigl\langle\sqrt{2}\alpha ,\alpha ,\alpha \bigr|+e^{-i\theta} \bigl|\sqrt{2}\alpha ,\alpha ,\alpha \bigr\rangle_{345}\bigl\langle-\sqrt{2}\alpha ,-\alpha ,-\alpha \bigr| \nonumber \\ &+&e^{i\theta}\bigl|-\sqrt{2}\alpha ,-\alpha ,-\alpha \bigr\rangle_{345} \bigl\langle\sqrt{2}\alpha ,\alpha ,\alpha \bigr| \nonumber \\ &+&\bigl|-\sqrt{2}\alpha ,-\alpha ,-\alpha \bigr\rangle_{345} \bigl\langle-\sqrt{2}\alpha ,-\alpha ,-\alpha \bigr|\Bigr\}, \end{eqnarray} where $N_{\theta }=\sqrt{2(1+ e^{-16|\alpha |^{2}}cos(\theta))}$ is the normalization factor. If we set $\theta=\pi$ in (\ref{channel}), one obtains a maximum entangled state defined by, \begin{equation}\label{max} \rho^{-}_{\alpha}=\ket{\psi^{-}_{\alpha}}\bra{\psi^{-}_{\alpha}},\quad \ket{\psi^{-}_{\alpha}}= \frac{1}{\sqrt{N_{\alpha}}}(\ket{\sqrt{2}\alpha,\alpha,\alpha}-\ket{-\sqrt{2}\alpha,-\alpha,-\alpha}) \end{equation} where $N_{\alpha}=2(1-e^{-8|\alpha|^{2}})$ is the normalization factor. We use the concurrence to quantify entanglement between two qubits, which is denoted by $\mathcal{C(\ket{\psi_{\alpha}}})$ as \cite{Dur}, \begin{equation} \mathcal{C}^{1/23}(\ket{\psi_{\alpha}})=\frac{1-exp(-8|\alpha|^{2})}{1+exp(-8|\alpha|^{2})cos(\theta)}. \end{equation} Fig.(\ref{conc1}), describes the dynamics of entanglement contained in the state $\ket{\psi_{\alpha}}$ as function of $\theta$ and $|\alpha|$. It is clear that, at $\theta=\pi$ the concurrence $\mathcal{C}=1$ namely the entanglement is maximum and is independent of $|\alpha|$. However the concurrence is less than 1 ebit for the small amplitude, but it increases to one ebit for larger amplitudes. Therefore, this state represents two classes of entangled coherent states: the first is partial entangled states and the second is maximum entangled one (see \cite{allati1} for more details). \section{Entanglement through noise environment} \subsection{Amplitude damping:Description} \begin{figure} \caption{Phase flip probability $p_{f} \label{phaseflip1} \end{figure} In this section we investigate the dynamics of the maximum entangled state (\ref{max}), when it passes through an amplitude damping channel, which is defined by a photon loss and phase flip with probability $p_{f}$. The photon loss due to the interaction of travelling state (\ref{max}) with an optical fiber prepared in a vacuum state. This interaction transfer the state $\ket{\pm\alpha}\ket{0}_E$ to $\ket{\pm\sqrt{\eta}\alpha}\ket{\pm\sqrt{1-\eta}\ket{\alpha}}_{E}$, where $\eta$ is called the the transmissivity rate\cite{Ricardo}. Tracing out the environment mode, one obtains a new state where the amplitude is reduced from $\alpha$ to $\alpha\sqrt{\eta}$ \cite{Ricardo}. Therefore, the state vector $\ket{\psi^{-}_{\alpha}}$ changes to $\ket{\psi^{-}_{\eta}}$ where, \begin{equation} \ket{\psi^{-}_{\eta}}= \frac{1}{\sqrt{N_{\eta}}}(\ket{\sqrt{2}\alpha\sqrt{\eta},\alpha \sqrt{\eta},\alpha \sqrt{\eta}}-\ket{-\sqrt{2}\alpha \sqrt{\eta},-\alpha \sqrt{\eta},-\alpha \sqrt{\eta}}). \end{equation} On the other hand, if we assume that this travelling state is subject to a phase noise with probability $p_f$, then the final resulting effect is equivalent to the effect of the amplitude damping channel. So, the final output state $\rho^{-}_{adc}$ which is obtained from the travelling state (\ref{max}) through amplitude damping channel is given by, \begin{equation} \rho^{-}_{adc}=(1-P_{f})\rho^{-}_{\eta}+P_{f}Z\rho^{-}_{\eta}Z, \end{equation} where, $\rho^{-}_\eta=\ket{\psi^{-}_\eta}\bra{\psi^{-}_\eta}$ and $Z$ is the phase flip error, which is defined as $Z(\lambda_{1}|0 \rangle_{L}+\lambda_{2}|1\rangle_{L})=\lambda_{1}|0\rangle_{L}-\lambda_{2}|1\rangle_{L}$. This operator effects on the travelling state with probability $p_f$ and with $(1-p_f)$ the state passes safely. In terms of $\alpha$ and $\eta$, the probability $p_f$ is given by, \begin{equation} p_{f}=\frac{1-e^{-8|\alpha|^{2}}-e^{-4(1-\eta)|\alpha|^{2}}+e^{-4(1+\eta)|\alpha|^{2}}}{2(1-e^{-8|\alpha|^{2}})}. \end{equation} The behavior of the probability is shown in Fig.\ref{phaseflip1} for different values of the transmissivity rate, $\eta$. It displays that for small values of the field's intensity $(\alpha\simeq 0)$, the minimum values of $p_f$ increases as the noise strength $\eta$ decreases. In a small range of field intensity $\alpha\in[0,4]$, $p_f$ increases faster and reaches its maximum value ($\frac{1}{2})$ as the noise strength increases. However, for larger values of the field intensity the dynamics of $p_f$ is independent of the noise strength, where $p_f=\frac{1}{2}$. This means that for larger values of the transmissivity rate $\eta\simeq 1$, the travelling state is almost maximum and its resistance to phase flip error is stronger. \subsection{Dynamics of entanglement:three qubit} To investigate the entanglement of a maximum entangled tripartite state ( which is defined by (\ref{max})), passes through amplitude damping channel, we consider the following situation: Let us assume that we have a source supplies a three users, Alice, Bob and Charlie with a maximum entangled state of type (\ref{max}). For simplicity, it is assumed that during the transition from the source to the users, Bob and Charlie's qubit are forced to pass through amplitude damping channel. According to this suggested scenario, the dynamics of the travelling state is given by, \begin{equation} \rho_{adc}=(1\otimes\mathcal{S}_1\otimes\mathcal{S}_2)\rho^{-}_{\alpha}, \end{equation} where $\mathcal{S}_1$ and $\mathcal{S}_2$ represent the damping channels which effect on Bob and Charlie's qubits respectively. For simplicity we set $\mathcal{S}_1=\mathcal{S}_2=\mathcal{S}$ and rewrite the state vector $\ket{\psi^{-}_{\alpha}}$ by using the orthogonal basis $u$ and $v$ defined as, \begin{equation}\label{bas} \ket{\alpha}=\lambda_{\alpha}\ket{u}+\mu_{\alpha}\ket{v},\quad \ket{-\alpha}=\lambda_{\alpha}\ket{u}-\mu_{\alpha}\ket{v}, \end{equation} and $\lambda_{\alpha}=(\frac{1+e^{-2|\alpha|^{2}}}{2})^{\frac{1}{2}}$ and $\mu_{\alpha}=(\frac{1-e^{-2|\alpha|^{2}}}{2})^{\frac{1}{2}}$. Then the output state vector can be written as, \begin{eqnarray}\label{output} |\psi_{out}\rangle&=& (1+e^{i\theta})\Bigl\{\lambda_{\sqrt{2}\alpha}\lambda_{\alpha}^{2}\ket{uuu}+ \lambda_{\sqrt{2}\alpha}\lambda_{\alpha}\mu_{\alpha}\bigl(\ket{uuv}+\ket{uvu}\bigr) +\lambda_{\sqrt{2}\alpha}\mu^2_{\alpha}\ket{uvv} \nonumber\\ &+& \mu_{\sqrt{2}\alpha}\lambda^{2}_{\alpha}\ket{vuu}+\mu_{\sqrt{2}\alpha}\lambda_{\alpha}\mu_{\alpha}\bigl(\ket{vuv}+\ket{vuu}\bigr)+\mu_{\sqrt{2}\alpha}\mu^{2}_{\alpha}\ket{vvv}\Bigr\} \nonumber\\ &+&(1-e^{i\theta}\Bigl\{\lambda_{\sqrt{2}\alpha}\lambda_{\alpha}^{2}\ket{uuu} -\lambda_{\sqrt{2}\alpha}\lambda_{\alpha}\mu_{\alpha}\bigl(\ket{uuv}+\ket{uvu}\bigr) +\lambda_{\sqrt{2}\alpha}\mu^2_{\alpha}\ket{uvv} \nonumber\\ &-& \mu_{\sqrt{2}\alpha}\lambda^{2}_{\alpha}\ket{vuu}+\mu_{\sqrt{2}\alpha}\lambda_{\alpha}\mu_{\alpha}\bigl(\ket{vuv}+\ket{vuu}\bigr) -\mu_{\sqrt{2}\alpha}\mu^{2}_{\alpha}\ket{vvv}\Bigr\}. \end{eqnarray} The lower bound of entanglement of state $\rho_{out}$ can be quantified by using a procedure described in \cite{Konrad}. This procedure state that the concurrence for any two qubits state $\ket{\zeta}\bra{\zeta}$ passes either in one or two sides of channels $\mathcal{S}_1$ and $\mathcal{S}_2$ is bounded from above in terms of the evolution of the concurrence of the maximally entangled state under either one of the one-sided channels as: \begin{equation} \mathcal{C}\bigl[(\mathcal{S}_1\otimes\mathcal{S}_2)\ket{\zeta}\bra{\zeta}\bigr]= \mathcal{C}\bigl[(\mathcal{S}_1\otimes\mathcal{S}_2)\ket{\phi}\bra{\phi}\bigr]\mathcal{C}\bigr[\ket{\zeta}\bra{\zeta}\bigl], \end{equation} where $\ket{\phi}\bra{\phi}$ is a maximum entangled two qubits state. For a three qubits state we use the same procedure, where we consider GHZ state represent the maximum entangled state. Therefore the concurrence of the maximum entangled state (\ref{max}) is bounded from the above as \cite{Simon}, \begin{equation}\label{K} \mathcal{C}^{23/1}[(1\otimes S\otimes S)\rho^{-}_{\alpha}]\leq \mathcal{C}^{23/1}[(1\otimes S\otimes S)|GHZ\rangle\langle GHZ|]\mathcal{C}^{23/1}[\rho^{-}_{\alpha}]. \end{equation} To quantify the degree of entanglement of the output state $\rho_{out}=\ket{\psi_{out}}\bra{\psi_{out}}$, we have to reexpress the GHZ in the new basis $u$ and $v$ as, \begin{equation} \ket{GHZ}=\frac{1}{\sqrt{2}}(\ket{uuu}+\bra{vvv}). \end{equation} As a first step, we consider one side effect of the amplitude damping channel on the GHZ state. This evolution is defined as, \begin{equation} (1\otimes 1\otimes S)\ket{GHZ_{uv}}\bra{GHZ_{uv}}=\left( \begin{array}{cccccccc} a & 0 & 0 & 0 & 0 & 0 & 0 & f \\ 0 & b & 0 & 0 & 0 & 0 & e & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & e^{*}& 0 & 0 & 0 & 0 & c & 0 \\ f^{*}& 0 & 0 & 0 & 0 & 0 & 0 & d ,\\ \end{array} \right), \end{equation} where, \begin{eqnarray} a&=&P_{s}\frac{\lambda^{2}_{\sqrt{\eta}u}}{4\lambda^{2}_u}, \quad b=(1-P_{s})\frac{\mu^{2}_{\sqrt{\eta}u}}{4\mu^{2}_u}, \quad f=P_{s}\frac{\mu_{\sqrt{\eta}u}\nu_{\sqrt{\eta}u}}{4\mu_{u}\nu_{u}}, \nonumber\\ e&=&-(1-P_{s})\frac{\lambda_{\sqrt{\eta}u}\mu_{\sqrt{\eta}u}}{4\lambda_{u}\mu_\alpha},\quad c=(1-P_{s})\frac{\lambda^{2}_{\sqrt{\eta}\alpha}}{4\lambda^{2}_{u}}, \quad d=P_{s}\frac{\mu^{2}_{\sqrt{\eta}u}}{4\lambda^{2}_u}. \nonumber\\ P_{s}&=&\frac{1}{2}+\frac{e^{-4(1-\eta)|u|^{2}}-e^{-4(1+\eta)|u|^{2}}}{2(1-e^{-8|u|^{2}})}. \end{eqnarray} It is clear that, the outer and the inner elements of the state represent the "unflipped" and "flipped" GHZ states of reduced, $\sqrt{\eta}u$ amplitude respectively. Then the dynamics of GHZ state through two-sides amplitude damping channel is given by, \begin{eqnarray} (1\otimes S\otimes S)\ket{GHZ_{u,u,u}}\bra{GHZ_{u,u,u}}&=&P_{s}\ket{GHZ_{u,\sqrt{\eta}u,\sqrt{\eta}u}}\bra{ GHZ_{u,\sqrt{\eta}u,\sqrt{\eta}u}} \nonumber\\ &+&(1-P_{s})Z\ket{GHZ_{u,\sqrt{\eta}u,\sqrt{\eta}u}}\bra{GHZ_{u,\sqrt{\eta}u,\sqrt{\eta}u}}Z. \end{eqnarray} \begin{figure} \caption{The concurrence $\mathcal{C} \label{Con} \end{figure} The concurrence of the travelling state (\ref{output}) through the amplitude damping channel is given by, \begin{equation}\label{conc} \mathcal{C}(\rho)=2max[0,|e|-\sqrt{ad},|f|-\sqrt{bc}]. \end{equation} Fig.\ref{Con} shows the dynamics of the concurrence $\mathcal{C}(\rho)$ for different values of the the transmissivity rate $\eta$. If the travelling state through the amplitude damping channel is partially entangled state i.e. $\eta$ is small, the entanglement, which is represented by the concurrence, is very small and vanishes for small values of the field intensity. However, for larger values of $\eta$, the initial entanglement is large and decreases smoothly as the field's intensity increases. So, to keep the entanglement of the MMECS over the amplitude damping channel survival for a long time, one has to decrease the field's intensity. It is clear that, for larger values of the transmissivity rate $\eta$ the travelling state is more robust. \subsection{Dynamics of entanglement:"$m$ modes} In this section, we assume that the users share a coherent state of $m$ modes given by, \begin{eqnarray} \label{gen} \ket{\Psi^{\pm}_{0...m}} &=& A^{\pm}_{m+1}\Bigl(|2^{\frac{m-1}{2} }\alpha\rangle_{0}...|2^{\frac{1}{2}}\alpha\rangle_{m-2}|\alpha \rangle_{m-1}|\alpha\rangle_{m} \nonumber \\ &&\pm|-2^{\frac{m-1}{2}}\alpha\rangle_{0}...|-2^{\frac{1}{2} }\alpha\rangle_{m-2}|-\alpha\rangle_{m-1}|-\alpha\rangle_{m}|\Bigr), \end{eqnarray} where $A^{\pm}_{m+1}=[2(1\pm e^{-2^{m+1}|\alpha|^{2}})]^{-\frac{1}{2}}$, is the normalized factor. This state can be generated from Schr\"{o}dinger state and optics devices. In \cite{allati1}, we have shown that this state represents a quantum network, shared between multiusers where one user called emitter posses the mode $0$ and the other users share the remaining $m$ modes. Moreover we have employed this state to teleport a multipartite states of $m$ modes. The degree of entanglement of the network which is defined by the state $\rho_{gen}=\ket{ \psi^{\pm}_{0,...m}} \bra{\psi^{\pm}_{0,...,m}}$ is given by \cite{allati2}, \begin{equation} C^{0/1,2,...,m}=1. \end{equation} for $\theta=\pi$ or for $\theta=0$. The main aim of this section is investigating the entangled and separable properties of this multipartite state. Let us assume that there are $m$ modes of the state (\ref{gen}) passes through an amplitude damping channel. In this case the output state can be written as, \begin{equation} \rho^{\pm}_{out}=(1-p_{f,m})\rho^{\pm}_{\eta,0...m}+p_{f,m}Z\rho^{\pm}_{\eta,0,...,m}Z, \end{equation} where, $\rho^{\pm}_{out}=\ket{\Psi^{\pm}_{0,...,m}}\bra{\Psi^{\pm}_{0,...,m}}$ and $p_{f,m}$ is the probability that the phase flip affects the travelling state through the amplitude damping channel. This probability is given by, \begin{equation}\label{prob} p_{f,m}=\frac{1-e^{-2^{m}|\alpha|^{2}}-e^{-2^{m-1}(1-\eta)|\alpha|^{2}} +e^{-2^{m-1}(1+\eta)|\alpha|^{2}}}{2(1-e^{-2^{m}|\alpha|^{2}})}, \quad m\geq1 \end{equation} \begin{figure} \caption{The probability of the phase flip error $p_{f,m} \label{PFN} \end{figure} Fig.(\ref{PFN}a), displays the dynamics of the probability ${p_f}$ of the phase bit flip error which effects on the travelling state through the amplitude damping channel for different values of the modes while transmissivity rate is large $(\eta=0.99)$, i.e. the travelling state is almost maximum. It is clear that, for small vales of modes, the probability $p_{f,m}$ increases gradually to reach its maximum value $(=0.5)$ for lager values of the field intensity $|\alpha|$. However for larger values of $m$, $p_{f,m}$ increases abruptly and reaches the maximum bound for small values of the field intensity. In Fig.(\ref{PFN}b), we assume that the travelling state (\ref{gen}) through the amplitude damping channel is partially entangled state, where we set the transmissivity rate $\eta=0.1$. In this case, the resistance of the input state (\ref{gen}) for the phase bit flip error is very fragile, where $p_{f,n}$ reaches its maximum values for smaller values of the field intensity. To quantify the degree of entanglement contained in the travelling state (\ref{gen}) through the amplitude damping channel, we rewrite the lower bound of entanglement to include $m$ modes. Therefore Eq.(\ref{K}) can be generalized as, \begin{equation}\label{c-gen} C[(1\otimes ...\otimes S\otimes S)\rho^{\pm}_{gen}]\leq C[(1\otimes...\otimes S\otimes S)|GHZ_{\alpha,...,\alpha}\rangle\langle GHZ_{\alpha,...,\alpha}|]C^{1/2...m}[\rho^{\pm}_{gen}]. \end{equation} To evaluate this bound of entanglement, one has to investigate the effect of the amplitude noise channel on the $m+1$ modes of GHZ state which in the orthogonal basis takes the form, \begin{equation} \ket{GHZ}=\frac{1}{\sqrt{2}}(\ket{u...u}+\ket{v...v}). \end{equation} The dynamics of $\ket{GHZ}$ state through the amplitude damping channel is given by, \begin{eqnarray} (1\otimes...\otimes S\otimes S)\ket{GHZ_{\alpha,\alpha,...,\alpha}}\bra{ GHZ_{\alpha,\alpha,...,\alpha}}&=&(1-P_{f,m})\ket{GHZ_{\alpha,\sqrt{\eta}\alpha,...,\sqrt{\eta}\alpha}}\bra{ GHZ_{\alpha,\sqrt{\eta}\alpha,...,\sqrt{\eta}\alpha}}\nonumber\\ &&+P_{f,m}Z\ket{GHZ_{\alpha,\sqrt{\eta}\alpha,...,\sqrt{\eta}\alpha}}\bra{ GHZ_{\alpha,\sqrt{\eta}\alpha,....,\sqrt{\eta}\alpha}}Z. \nonumber\\ \end{eqnarray} The amount of entanglement is quantified by means of the concurrence as, \begin{equation} \mathcal{C}_{\pm}=\frac{1-2\mathcal{P}_{f,m}}{1\pm exp\Bigl\{-2^{m-1}(1+\eta)|\alpha|^2\Bigr\}}\sqrt{1-exp(-2^m |\alpha|^2)}\sqrt{1-exp(-2^m\eta|\alpha|^2)}, \end{equation} where $\mathcal{C}_{+}$ and $\mathcal{C}_{-}$ for $\theta=0,\pi$ respectively and the concurrence $\mathcal{C}^{1/2,...m}[\rho^{\pm}_{gen}]=1$(see Eq.(19)). \begin{figure} \caption{Dynamics of the concurrence for different values of modes. The dot, dash-dot and solid curves for $m=2,5$, $8$ respectively and transmissivity rate $\eta=0.9$ (a) For $\theta=\pi$(b) For $\theta=0$.} \label{ConcG99} \end{figure} The dynamics of entanglement which is represented by concurrence for different values of the phase $\theta$ is described in Fig.\ref{ConcG99}, where the transmissivity rate $\eta$ is assumed to be fixed. It is clear that, for odd state i.e. $\theta=\pi$, the concurrence decreases as the field intensity increases as shown in Fig.(\ref{ConcG99}a). The decay of entanglement depends on the number of modes of the travelling state. For small values of modes, the entanglement decays smoothly and gradually to vanishes completely at larger values of the field intensity. However as the number of modes increases the entanglement decays fast and abruptly vanishes at small values of the field intensity. The dynamics of the concurrence for an even class of MMECS, is shown in Fig.(\ref{ConcG99}b), where $\theta=0$. It is clear that for $|\alpha|=0$, the travelling state is almost separable. However as soon as $|\alpha|$ increases, the entanglement increases sharply to reach its maximum value in a very small range of the $|\alpha|$ depending on the number of travelling modes. However, as $|\alpha|$ increases more, the entanglement decays gradually for small values of $m$ and hastily for larger values of $m$. From Figs.(\ref{ConcG99}$a \&\ref{ConcG99}b$),the entanglement vanishes for the same value of $|\alpha|$. Therefore, the amount of entanglement contained in the odd and even travelling states over the amplitude damping channel vanishes for the same value of the field intensity. Fig.(\ref{ConcG1}) shows the dynamics of entanglement for small value of $\eta(=0.1)$, i.e the travelling state over the amplitude damping channel has an initial small value of entanglement. The general behavior is the same as that depicted in Fig.(\ref{ConcG99}). However, the initial amount of entanglement is very small and vanishes very fast at small values of the field intensity. \begin{figure} \caption{The same as Fig.\ref{ConcG99} \label{ConcG1} \end{figure} \section{Conclusion} The dynamics of a maximum entangled state passes through an amplitude damping channel is discussed. We showed that,the entanglement decays gradually for larger values of the field intensity and small values of the transmissivity rate. However For small values of the transmissivity rate, the entanglement vanishes at small values of the field intensity. Therefore to increase the resistance of the MMECS to entanglement degradation one has to increase the field's intensity when the transmissivity rate is large. The dynamics of a multi-modes entangled state passes through an amplitude damping channel is investigated. This type of study displays the effect of the noise strength, the phase flip operator and the field intensity. We show that the travelling state suffering from the phase flip effect with high probability for small values the noise strength absorption parameter. However the robustness of this multi-modes entangled state for the phase flip operator, decreases as the photon absorption decreases, where in this case the travelling state is partially entangled state. Moreover, this resistance decreases as the field's intensity increases. On the other hand, the probability of the phase error effects depends on the number of photons for each mode, where the probability is maximized as the number of photons increases. The entanglement of MMECS for different modes is investigated, where the entanglement decreases gradually for small values of modes. However as the number of modes increases, the entanglement decays very fast for small values of the field's intensity. It is shown that the entanglement for both the odd and even MMECS states completely vanishes at the same values of the field intensity. The decay rat of the travelling entanglement depends on the field's intensity and the transmissivity rate. \textbf{Acknowledgement}: we are grateful for the helpful comments given by the referees which improves our results. \end{document}
\begin{document} \title{On the Asymptotic Formula of $L'(1,\chi)$} \begin{abstract} Let $\chi$ be a quadratic Dirichlet character. In some literatures, various asymptotic formulae of $L'(1,\chi)$, under the assumption that $L(1,\chi)$ takes a small value, were derived. In this paper, we will give a new treatment unified for the odd and even cases, not depending on Kronecker limit formula. For imaginary quadratic fields, our result coincides with Proposition 22.10 in \cite{IK}. \end{abstract} \section{Introduction} Siegel zeros (if possible) cause many strange phenomena. For a good survey, we refer to \cite{I}. In \cite{G},\cite{GS} and Chapter 22 of \cite{IK}, some asymptotic formulae were derived. And for imaginary quadratic fields, our following main result is the same as Proposition 22.10 in \cite{IK} essentially. \begin{thm}\label{main4} Let $\chi$ be a primitive quadratic Dirichlet character modulo $q$. If $L(1,\chi)\ll (\log q)^{-26}$, then we have $$L'(1,\chi)=\frac{\pi^2}{6}\prod_{p|q}\left(1+\frac{1}{p}\right)\prod_{p\leqslant q\atop \chi(p)=1}\left(1+\frac{1}{p}\right)\left(1-\frac{1}{p}\right)^{-1}\cdot\Big(1+\mathcal{O}\left((\log q)^{-1/10}\right)\Big).$$ \end{thm} \section{The Proof of the Main Theorem} We need some preliminary results before proving it. \begin{lem}\label{lem:ide} Suppose $f(m,n)$ is an arithmetic function in two variables, and $x>u\geqslant 1$. Then \begin{eqnarray*} \sum_{k<\sqrt{\frac{x}{u}}}\sum_{u<n\leqslant \frac{x}{k^2}}f(k^2,n) &=& \sum_{d\leqslant u}\sum_{u<n\leqslant \frac{x}{d}}\sum_{r\leqslant \frac{x}{dn}}\lambda(d)f(dr,n) \\ & & {}+\sum_{u<n\leqslant x}\sum_{u<m\leqslant \frac{x}{n}}\sum_{d\mid m\atop d>u}\lambda(d)f(m,n), \end{eqnarray*} where $\lambda(n)=(-1)^{\Omega(n)}$~. \end{lem} Proof: Using the following property of the function $\lambda(n)$: \begin{equation*} \sum_{d\mid m}\lambda(d)= \begin{cases} 1, & \text{$m$ is a square,}\\ 0, & \text{otherwise.} \end{cases} \end{equation*} we get that \begin{eqnarray*} \sum_{k<\sqrt{\frac{x}{u}}}\sum_{u<n\leqslant \frac{x}{k^2}}f(k^2,n) &=& \sum_{u<mn\leqslant x\atop u<n}f(m,n)\sum_{d\mid m}\lambda(d) \\ &=& \sum_{u<mn\leqslant x\atop u<n}f(m,n)\sum_{d\mid m\atop d\leqslant u}\lambda(d)+\sum_{u<mn\leqslant x\atop u<n}f(m,n)\sum_{d\mid m\atop d>u}\lambda(d) \\ &=& \sum_{d\leqslant u}\sum_{u<n\leqslant \frac{x}{d}}\sum_{r\leqslant \frac{x}{dn}}\lambda(d)f(dr,n)\\ & &{}+\sum_{u<n\leqslant x}\sum_{u<m\leqslant \frac{x}{n}}\sum_{d\mid m\atop d>u}\lambda(d)f(m,n). \end{eqnarray*} \begin{lem}\label{lem:psi} Suppose $\chi$ is a primitive real Dirichlet character modulo $q$ and $q<x$. Let $\psi_u(z,\chi)=\sum\limits_{u<n\leqslant z}\Lambda(n)\chi(n)$, for $z>u$. we have \begin{eqnarray*} \sum_{k<\sqrt{\frac{x}{u}}\atop (k,q)=1}\psi_u\left(\frac{x}{k^2},\chi\right) &=& \sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\psi_u\left(\frac{x}{m},\chi\right) \\ & & {}+\mathcal{O}\left(\left(q\sqrt q+\frac{x}{\sqrt q}+u^2\sqrt q\right)\log^2 x\right), \end{eqnarray*} where $$\rho_u(m)=\sum_{d\mid m\atop d>u}\lambda(d).$$ \end{lem} Proof: Let $\alpha=a/q$ with $(a,q)=1$, and $e(t)=\exp(2\pi it)$.\\ Set $f(m,n)=\Lambda(n)e(\alpha mn).$ By Lemma \ref{lem:ide}, we have \begin{eqnarray}\label{rep} \sum_{k<\sqrt{\frac{x}{u}}}\sum_{u<n\leqslant \frac{x}{k^2}}\Lambda(n)e(\alpha k^2 n) &=& \sum_{d\leqslant u}\sum_{u<n\leqslant \frac{x}{d}}\sum_{r\leqslant \frac{x}{dn}}\lambda(d)\Lambda(n)e(\alpha drn)\\ & & {}+\sum_{u<n\leqslant x}\sum_{u<m\leqslant \frac{x}{n}}\rho_u(m)\Lambda(n)e(\alpha mn)\nonumber\\ &:=& T^\sharp(\alpha)+T^\flat(\alpha).\nonumber \end{eqnarray} Let $rn=l$, \begin{eqnarray*} T^\sharp(\alpha)&=& \sum_{d\leqslant u}\lambda(d)\sum_{l\leqslant x/d}e(\alpha dl)\sum_{n>u,\;n|l}\Lambda(n)\\ &=& \sum_{d\leqslant u}\lambda(d)\sum_{l\leqslant x/d}e(\alpha dl)\log l-\sum_{d\leqslant u}\lambda(d)\sum_{l\leqslant x/d}e(\alpha dl)\sum_{n\leqslant u,\;n|l}\Lambda(n)\\ &:=& T^\sharp_1(\alpha)-T^\sharp_2(\alpha). \end{eqnarray*} Applying the basic estimations $$\left|\sum\limits_{1\leq n\leq N}e(\alpha n)\right|\leqslant \min \left(N,\frac {1}{2\|\alpha\|}\right)$$ and $$\sum\limits_{1\leq n\leq N}\min \left(\frac xn,\frac {1}{2\|\alpha n\|}\right)\ll N\log q+\frac xq\log N+q\log q,$$ we derive by partial summation that \begin{eqnarray*} T^\sharp_1(\alpha)&\ll&\sum_{d\leqslant u}\left|\sum_{l\leqslant x/d}e(\alpha dl)\log l\right|\\ &\ll&\log x\sum_{d\leqslant u}\min \left(\frac xd,\;\frac {1}{2\|\alpha d\|}\right)\\ &\ll&(xq^{-1}+u+q)\log^2 x. \end{eqnarray*} For $T^\sharp_2(\alpha)$, let $l=rn$, we get \begin{eqnarray*} T^\sharp_2(\alpha)&\ll&\sum_{d\leqslant u}\sum_{n\leqslant u}\Lambda(n) \left|\sum_{r\leqslant \frac{x}{dn}}e(\alpha drn)\right|\\ &\ll&\sum_{d\leqslant u}\sum_{n\leqslant u}\Lambda(n)\min \left(\frac {x}{dn},\;\frac {1}{2\|\alpha dn\|}\right)\\ &\ll&\sum_{h\leqslant u^2}\min \left(\frac {x}{h},\frac {1}{2\|\alpha h\|}\right)\sum_{n|h}\Lambda(n)\\ &\ll&(xq^{-1}+u^2+q)\log^2 x. \end{eqnarray*} For a real primitive character $\chi$ modulo $q$, \begin{eqnarray*} \sum_{k<\sqrt{\frac{x}{u}}\atop (k,q)=1}\psi_u\left(\frac{x}{k^2},\chi\right)&=& \sum_{k<\sqrt{\frac{x}{u}}}\sum_{u<n\leqslant \frac{x}{k^2}} \Lambda(n)\chi(k^2n)\\ &=& \frac {1}{\tau(\chi)}\sum_{a=1}^{q}\chi(a)\sum_{k<\sqrt{\frac{x}{u}}}\sum_{u<n\leqslant \frac{x}{k^2}}\Lambda(n) e\left(\frac {ak^2n}{q}\right) \end{eqnarray*} Using (\ref{rep}), we get \begin{eqnarray*} \sum_{k<\sqrt{\frac{x}{u}}\atop (k,q)=1}\psi_u\left(\frac{x}{k^2},\chi\right) &=& \frac {1}{\tau(\chi)}\sum_{a=1}^{q}\chi(a)T^\sharp\left(\frac{a}{q}\right)+ \frac {1}{\tau(\chi)}\sum_{a=1}^{q}\chi(a)T^\flat\left(\frac{a}{q}\right)\\ &=& \mathcal{O}\left(\left(q\sqrt q+\frac{x}{\sqrt q}+u^2\sqrt q\right)\log^2 x\right)\\ & &{}+\sum_{u<n\leqslant x}\sum_{u<m\leqslant x/n}\rho_u(m)\Lambda(n)\chi(mn)\\ &=& \mathcal{O}\left(\left(q\sqrt q+\frac{x}{\sqrt q}+u^2\sqrt q\right)\log^2 x\right)\\ & &{}+\sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\psi_u\left(\frac{x}{m},\chi\right). \end{eqnarray*} $\square$\\ The following lemma shows that the absolute value of the means of some suitable multiplicative functions does vary slowly. \begin{lem}[\cite{GS1} corollary 3]\label{lem:absmean} Let $f$ be a complex-valued multiplicative function with $|f(n)|\leqslant 1$. Then for $1\leqslant \omega\leqslant x/10$, we have $$\frac{1}{x}\left|\sum_{n\leqslant x}f(n)\right|-\frac{\omega}{x}\left|\sum_{n\leqslant x/\omega}f(n)\right| \ll\left(\frac{\log(2\omega)}{\log x}\right)^{1-\frac{2}{\pi}}\log \left(\frac{\log x}{\log (2\omega)}\right)+\frac{\log\log x}{(\log x)^{2-\sqrt 3}},$$ The implied constant is absolute and computable. \end{lem} From the above lemma, we can deduce the following corollary. \begin{cor}\label{cor:real} Suppose $f$ is a real-valued multiplicative function with $|f(n)|\leqslant 1$ for all $n$. Then for $1\leqslant \omega\leqslant \sqrt x/2$, we have $$\frac{1}{x}\sum_{n\leqslant x}f(n)-\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n) \ll\left(\frac{\log(2\omega)}{\log x}\right)^{1-\frac{2}{\pi}}\log \left(\frac{\log x}{\log (2\omega)}\right)+\frac{\log\log x}{(\log x)^{2-\sqrt 3}}.$$ \end{cor} Proof: By Lemma~\ref{lem:absmean}, there exists an absolute constant $C_0>1$ such that $$\left|\,\frac{1}{x}\Bigg|\sum_{n\leqslant x}f(n)\Bigg|-\frac{\omega}{x}\Bigg|\sum_{n\leqslant x/\omega}f(n)\Bigg|\,\right| <C_0M(x,\omega),$$ where $$M(x,\omega)=\left(\frac{\log(2\omega)}{\log x}\right)^{1-\frac{2}{\pi}}\log \left(\frac{\log x}{\log (2\omega)}\right)+\frac{\log\log x}{(\log x)^{2-\sqrt 3}}.$$ If $\frac{1}{x}\left|\sum\limits_{n\leqslant x}f(n)\right|<2C_0M(x,\omega),$ then $\frac{\omega}{x}\left|\sum\limits_{n\leqslant x/\omega}f(n)\right|<3C_0M(x,\omega).$ Therefore $$\left|\,\frac{1}{x}\sum_{n\leqslant x}f(n)-\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n)\,\right| <5C_0M(x,\omega).$$ Otherwise, $$\frac{1}{x}\Bigg|\sum_{n\leqslant x}f(n)\Bigg|\geqslant 2C_0M(x,\omega).$$ Without loss of generality, we may assume $$\frac{1}{x}\sum_{n\leqslant x}f(n)\geqslant 2C_0M(x,\omega).$$ Then $$\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n)>C_0M(x,\omega)\quad\text{or}\quad<-C_0M(x,\omega).$$ If $\frac{\omega}{x}\sum\limits_{n\leqslant x/\omega}f(n)>C_0M(x,\omega)$, we have $$\left|\,\frac{1}{x}\sum_{n\leqslant x}f(n)-\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n)\,\right|= \left|\,\frac{1}{x}\Bigg|\sum_{n\leqslant x}f(n)\Bigg|-\frac{\omega}{x}\Bigg|\sum_{n\leqslant x/\omega}f(n)\Bigg|\,\right| <C_0M(x,\omega).$$ For the case $\frac{\omega}{x}\sum\limits_{n\leqslant x/\omega}f(n)<-C_0M(x,\omega)$, we can show that there exists a $x_0\in [\frac{x}{\omega},x]$ such that $\left|\sum\limits_{n\leqslant x_0}f(n)\right|\leqslant \frac{1}{2}$, since $\left(\sum\limits_{n\leqslant x}f(n)\right)\cdot \left(\sum\limits_{n\leqslant x/\omega}f(n)\right)<0$ and the real-valued function $f$ satisfies $|f(n)|\leqslant 1$ for all $n$. Hence we get that \begin{eqnarray}\label{ast} \left|\,\frac{1}{x}\bigg|\sum_{n\leqslant x}f(n)\bigg|-\frac{1}{x_0}\bigg|\sum_{n\leqslant x_0}f(n)\bigg|\,\right| &>& 2C_0M(x,\omega)-\frac{1}{2x_0}\\ &>& 2C_0M(x,\omega)-\frac{\omega}{2x}.\nonumber \end{eqnarray} On the other hand, from Theorem~\ref{lem:absmean}, we deduce that $$\left|\,\frac{1}{x}\bigg|\sum_{n\leqslant x}f(n)\bigg|-\frac{1}{x_0}\bigg|\sum_{n\leqslant x_0}f(n)\bigg|\,\right| < C_0M(x,x/x_0).$$ Since $x/x_0\leqslant \omega$, we have \begin{equation}\label{aast} \frac{\log x}{\log(2(x/x_0))}\geqslant\frac{\log x}{\log(2\omega)}. \end{equation} A basic observation is that the function $$\left(\frac{1}{t}\right)^{1-\frac{2}{\pi}}\log t$$ increases in $[1,\exp(\frac{1}{1-2/\pi})]$ and decreases in $[\exp(\frac{1}{1-2/\pi}),+\infty)$. \begin{itemize} \item For $\frac{\log x}{\log(2\omega)}\geqslant\exp(\frac{1}{1-2/\pi})$, combining \ref{aast}, we have $M(x,x/x_0)\leqslant M(x,\omega)$. Hence $$\left|\,\frac{1}{x}\bigg|\sum_{n\leqslant x}f(n)\bigg|-\frac{1}{x_0}\bigg|\sum_{n\leqslant x_0}f(n)\bigg|\,\right| < C_0M(x,\omega).$$ Furthermore, from $\frac{\log x}{\log(2\omega)}>\exp(\frac{1}{1-2/\pi})>15$, we have $2\omega<x^{1/15}$. It follows that for $x\geqslant 4$, we have $$\frac{\omega}{2x}<\frac{1}{4x^{14/15}}<\frac{\log\log x}{(\log x)^{2-\sqrt 3}}<C_0M(x,\omega),\qquad(\text{since}~C_0>1).$$ Taking the inequality \ref{ast} into consideration, we get $$ \left|\,\frac{1}{x}\bigg|\sum_{n\leqslant x}f(n)\bigg|-\frac{1}{x_0}\bigg|\sum_{n\leqslant x_0}f(n)\bigg|\,\right| > C_0M(x,\omega),$$ This leads to a contradiction! \item If $\frac{\log x}{\log (2\omega)}<\exp(\frac{1}{1-2/\pi})$, we get $2\leqslant \frac{\log x}{\log (2\omega)}<\exp(\frac{1}{1-2/\pi}),$ since $\omega\leqslant \sqrt x/2$.\\ Hence $\left(\frac{\log (2\omega)}{\log x}\right)^{1-\frac{2}{\pi}}\log\left(\frac{\log x}{\log(2\omega)}\right)\geqslant(\frac 12)^{1-\frac{2}{\pi}}\log 2$~.\\ In such a case, the result is implied by the trivial estimate: \begin{equation*} \frac{1}{x}\sum_{n\leqslant x}f(n)-\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n) \ll\; 1\;\ll \;\left(\frac{\log (2\omega)}{\log x}\right)^{1-\frac{2}{\pi}}\log\left(\frac{\log x} {\log(2\omega)}\right). \end{equation*} \end{itemize} In conclusion, we always have $$\frac{1}{x}\sum_{n\leqslant x}f(n)-\frac{\omega}{x}\sum_{n\leqslant x/\omega}f(n) \ll M(x,\omega).\qquad \square$$ \begin{lem}\label{LL} Suppose $2\sqrt x<u^2<x$ and $\chi$ is a non-principle Dirichlet character modulo $q$, we have \begin{eqnarray*} \sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\left(\frac{x}{m}-u\right) &=& \left(u\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(L(1,\chi)\log {\frac{x}{eu^2}}+L'(1,\chi)\right) \\ & & {}+\mathcal{O}\left(\left(u^2\log{\frac{x}{u^2}}\right)\sqrt q\log q+\frac{x}{u}\right)\\ & & {}+\mathcal{O}\left(\epsilon(x,u)\cdot x\left(\log q+\left(\log {\frac{x}{u^2}}\right)^2\right)\right), \end{eqnarray*} where $$\epsilon(x,u)=\left(\left(\log{\frac{x}{u}}\right)^{\sqrt 3-2}+\left(\frac{\log {\frac{2x}{u^2}}}{\log{\frac{x}{u}}}\right)^{1-\frac{2}{\pi}}\right)\cdot \log\log x.$$ \end{lem} Proof: By partial summation, \begin{eqnarray*} \sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\left(\frac{x}{m}-u\right) &=& \sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t(t+1)} \sum_{u<m\leqslant t}\rho_u(m)\chi(m) \\ & & {}+\left(\frac{x}{[x/u]}-u\right)\sum_{u<m\leqslant \frac{x}{u}}\rho_u(m)\chi(m). \end{eqnarray*} From the property of $\lambda(n)$, we have \begin{eqnarray*} \sum_{u<m\leqslant x/u}\rho_u(m)\chi(m) &=& \sum_{u<t^2\leqslant \frac{x}{u}\atop t>0}\chi(t^2) -\sum_{u<m\leqslant x/u}\left(\sum_{d\mid m\atop d\leqslant u}\lambda(d)\right)\chi(m) \\ &=&\mathcal{O}\left(\sqrt{\frac{x}{u}}\right)-\sum_{d\leqslant u}\lambda(d)\chi(d)\sum_{\frac{u}{d}<n\leqslant \frac{x}{u}}\chi(n)\\ &=&\mathcal{O}\left(\sqrt{\frac{x}{u}}+u\sqrt q\log q\right), \end{eqnarray*} In the last step, we used the P\"{o}lya-Vinogradov estimate.\\ On the other hand, we have \begin{eqnarray*} \sum_{u<m\leqslant t}\rho_u(m)\chi(m) &=& \sum_{u<m\leqslant t}\chi(m)\lambda(m)\sum_{d|m\atop d\leqslant \frac{m}{u}}\lambda(d)\\ &=& \sum_{u<m\leqslant t}\rho_u(m)\chi(m)=\sum_{d\leqslant \frac{t}{u}}\chi(d)\sum_{u\leqslant n\leqslant \frac{t}{d}}\lambda(n)\chi(n). \end{eqnarray*} Combining the above identities, we have \begin{eqnarray}\label{eq:sigma} {}\qquad\sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\left(\frac{x}{m}-u\right) &=& \sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t(t+1)} \left(\sum_{d\leqslant \frac{t}{u}}\chi(d)\sum_{u\leqslant n\leqslant \frac{t}{d}}\lambda(n)\chi(n)\right)\\ & &{}+\mathcal{O}\left(\frac{u^2}{x}\left(\sqrt{\frac{x}{u}}+u\sqrt q\log q\right)\right)\nonumber\\ &:=& I_1-I_2+\mathcal{O}\left(\sqrt{\frac{x}{u}}+u\sqrt q\log q\right),\nonumber \end{eqnarray} where $$I_1=\sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t(t+1)} \left(\sum_{d\leqslant \frac{t}{u}}\chi(d)\sum_{n\leqslant \frac{t}{d}}\lambda(n)\chi(n)\right),$$ $$I_2=\sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t(t+1)} \left(\sum_{d\leqslant \frac{t}{u}}\chi(d)\sum_{n<u}\lambda(n)\chi(n)\right).$$ \begin{eqnarray*} I_2 &=& x\sum_{n<u}\lambda(n)\chi(n)\left(\sum_{d\leqslant \frac{x/u-1}{u}}\chi(d)\sum_{du\leqslant t \leqslant \frac{x}{u}-1\atop u<t}\frac{1}{t(t+1)}\right) \\ &=& x\sum_{n<u}\lambda(n)\chi(n)\left(\sum_{d\leqslant \frac{x/u-1}{u}}\chi(d)\left(\frac{1}{du}-\frac{1}{x/u}\right)+\mathcal{O}\left(\sum_{d\leqslant \frac{x/u-1}{u}}\frac{1}{(du)^2}\right)\right) \\ &=& x\sum_{n<u}\lambda(n)\chi(n)\left(\frac{1}{u}\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d} +\mathcal{O}\left(\frac{u\sqrt q\log q}{x}\right)+\mathcal{O}\left(\frac{1}{u^2}\right)\right) \end{eqnarray*} Noticing that \begin{equation}\label{eq:appr} \sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d}=L(1,\chi)+\mathcal{O}\left(\frac{u^2}{x}\sqrt q\log q\right), \end{equation} we have \begin{equation}\label{eq:I2} I_2=\frac{x}{u}L(1,\chi)\sum_{n<u}\lambda(n)\chi(n)+\mathcal{O}\left(u^2\sqrt q\log q+\frac{x}{u}\right). \end{equation} Applying Corollary~\ref{cor:real}, we have \begin{equation}\label{eq:eqI2} I_2=\left(u\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right)\cdot L(1,\chi)+\mathcal{O}\left(u^2\sqrt q\log q+\frac{x}{u}+ \epsilon(x,u)x\log q\right). \end{equation} By Corollary ~\ref{cor:real}, for $du\leqslant t\leqslant \frac{x}{u}-1$, we have \begin{equation}\label{eq:ide} \frac{1}{t/d}\sum_{n\leqslant \frac{t}{d}}\lambda(n)\chi(n)=\frac{1}{x/u}\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n) +\mathcal{O}\left(\epsilon(x,u)\right), \end{equation} where $$\epsilon(x,u)=\left(\left(\log{\frac{x}{u}}\right)^{\sqrt 3-2}+\left(\frac{\log {\frac{2x}{u^2}}}{\log{\frac{x}{u}}}\right)^{1-\frac{2}{\pi}}\right)\cdot \log\log x.$$ Due to the identity (\ref{eq:ide}), now we can handle the sum $I_1$. \begin{eqnarray*} I_1 &=& \left(\frac{u}{x}\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(\sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t+1}\sum_{d\leqslant \frac{t}{u}}\frac{\chi(d)}{d}\right) \\ & & {}+ \mathcal{O}\left(\epsilon(x,u)\left(\sum_{u<t\leqslant \frac{x}{u}-1}\frac{x}{t(t+1)}\sum_{d\leqslant \frac{t}{u}}\frac{t}{d}\right)\right)\\ &=& \left(\frac{u}{x}\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d}\sum_{du\leqslant t\leqslant \frac{x}{u}-1\atop u<t}\frac{x}{t+1}\right) \\ & & {}+ \mathcal{O}\left(\epsilon(x,u)x\log^2\left(\frac{x}{u^2}\right)\right)\\ &=& \left(\frac{u}{x}\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(x\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d}\left(\log\frac{x}{u^2}-\log d\right)+ \mathcal{O}\left(\sum_{d\leqslant \frac{x/u-1}{u}}\frac{x}{d^2u}\right)\right) \\ & & {}+ \mathcal{O}\left(\epsilon(x,u)x\log^2\left(\frac{x}{u^2}\right)\right)\\ &=& \left(u\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(\log\frac{x}{u^2}\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d} -\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d}\log d\right) \\ & & {}+\mathcal{O}\left(\frac{x}{u}+\epsilon(x,u)x\log^2\left(\frac{x}{u^2}\right)\right)\\ \end{eqnarray*} Since $$-\sum_{d\leqslant \frac{x/u-1}{u}}\frac{\chi(d)}{d}\log d =L'(1,\chi)+\mathcal{O}\left(\frac{u^2\log\frac{x}{u^2}}{x}\sqrt q\log q\right),$$ combining (\ref{eq:appr}), we get \begin{eqnarray}\label{eq:I1} I_1 &=& \left(u\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(\log\frac{x}{u^2}\cdot L(1,\chi)+L'(1,\chi)\right)\\ & & {}+\mathcal{O}\left(\frac{x}{u}+\left(u^2\log\frac{x}{u^2}\right) \sqrt q\log q+\epsilon(x,u)x\log^2\left(\frac{x}{u^2}\right)\right).\nonumber \end{eqnarray} Finally, combing equations (\ref{eq:sigma}), (\ref{eq:I1}) and (\ref{eq:eqI2}), we get \begin{eqnarray*} \lefteqn{\sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\left(\frac{x}{m}-u\right)}\\ &=& \left(u\sum_{n\leqslant \frac{x}{u}}\lambda(n)\chi(n)\right) \left(\log\frac{x}{eu^2}\cdot L(1,\chi)+L'(1,\chi)\right) \\ & & {}+\mathcal{O}\left(\frac{x}{u}+\left(u^2\log\frac{x}{u^2}\right) \sqrt q\log q+\epsilon(x,u)x\left(\log q+\log^2\left(\frac{x}{u^2}\right)\right)\right).\quad\square \end{eqnarray*} \begin{lem}[see (22.109) in Chapter 22 of ~\cite{IK}]\label{lem:bound} Suppose $\chi$ is a non-principle Dirichlet character modulo $q$ and $x\geqslant q$, we have $$\sum_{n\leqslant x}\frac{\tau(n,\chi)}{n}=L(1,\chi)(\log x+\gamma)+L'(1,\chi)+\mathcal{O}\left(q^{1/4}x^{-1/2}\log x\right),$$ where $\tau(n,\chi)=\sum\limits_{d|n}\chi(d).$ \end{lem} The following corollary shows that if $L(1,\chi)$ takes small value, then $\chi(p)$ takes negative value for most of small prime $p$. \begin{cor}\label{cor:fapp} Suppose $\chi$ is a quadratic Dirichlet character modulo $q$ and $x\geqslant q$. Then $$\sum_{n\leqslant x}\Lambda(n)\chi(n)=-x+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right)x\log^2 x+xe^{-c\sqrt{\log x}}+q\right),$$ where $c$ is some positive constant. \end{cor} Proof: By Lemma~\ref{lem:bound}, we have \begin{equation}\label{eq:base} \sum_{q<n\leqslant y}\frac{\tau(n,\chi)}{n}=L(1,\chi)\log {\frac{y}{q}}+\mathcal{O}\left(q^{-1/4}\log q\right). \end{equation} For a real character $\chi$, $\tau(n,\chi)\geqslant 0$. Then for $y>q$, we have \begin{equation}\label{eq:bound} \sum_{q<p\leqslant y}\frac{1+\chi(p)}{p}\log p\leqslant \log y\sum_{q<n\leqslant y}\frac{\tau(n,\chi)}{n}\ll \left(L(1,\chi)+q^{-1/4}\right)\log^2 y. \end{equation} Applying partial summation, we have \begin{eqnarray}\label{eq:fapp} \sum_{q<p\leqslant x}(1+\chi(p))\log p &=& [x]\cdot\sum_{q<p\leqslant x}\frac{(1+\chi(p))\log p}{p}\\ & & {}-\sum_{q<m\leqslant x-1}\sum_{q<p\leqslant m}\frac{(1+\chi(p))\log p}{p}\nonumber\\ &\ll&\left(L(1,\chi)+q^{-1/4}\right)x\log^2 x.\nonumber \end{eqnarray} On the other hand, \begin{eqnarray*} \sum_{n\leqslant x}\Lambda(n)\chi(n)&=& \sum_{n\leqslant x}\Lambda(n)(1+\chi(n))-\sum_{n\leqslant x}\Lambda(n)\\ &=& \sum_{p\leqslant x}(1+\chi(p))\log p-x+\mathcal{O}\left(xe^{-c\sqrt{\log x}}\right)\\ &=& -x+\sum_{q<p\leqslant x}(1+\chi(p))\log p+\mathcal{O}\left(q+xe^{-c\sqrt{\log x}}\right) \end{eqnarray*} Combining the estimate (\ref{eq:fapp}), we get the conclusion.$\qquad\square$ \begin{lem}[Proposition 4.5 in ~\cite{GS2}~ or Proposition 3 in ~\cite{GS1}~]\label{lem:multiplicative} For any multiplicative function $f$ with $|f(p^k)|\leqslant 1$ for every prime power $p^k$, let $$\Theta(f,x):=\prod_{p\leqslant x}\left(1+\frac{f(p)}{p}+\frac{f(p^2)}{p^2}+\cdots\right)\left(1-\frac{1}{p}\right),$$ and $$s(f,x):=\sum_{p\leqslant x}\frac{|1-f(p)|}{p}.$$ For any $\varepsilon$ satisfying $1>\varepsilon\geqslant \frac{\log 2}{\log x}$, let $g$ be a completely multiplicative function satisfying the following condition: \begin{equation*} g(p)= \begin{cases} 1, & p\leqslant x^{\varepsilon}; \\ f(p), & p> x^{\varepsilon}. \end{cases} \end{equation*} Then we have $$\frac{1}{x}\sum_{n\leqslant x}f(n)=\Theta(f,x^\varepsilon)\frac{1}{x}\sum_{m\leqslant x}g(m)+\mathcal{O}(\varepsilon\exp(s(f,x))),$$ where the implied constant is absolute. \end{lem} Similarly as corollary ~\ref{cor:fapp}, we get the following result. \begin{cor}\label{cor:fap} Suppose $\chi$ is a quadratic Dirichlet character modulo $q$ and $x\geqslant q$. Then $$\sum_{n\leqslant x}\chi(n)\lambda(n)=P(q)x+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right) x\log x+x\frac{\log^3 q}{\log x}\right),$$ where $$P(q)=\prod_{p\leqslant q}\left(1-\frac{1}{p}\right)\left(1+\frac{\chi(p)}{p}\right)^{-1}.$$ \end{cor} Proof: Let the completely multiplicative function $f$ uniquely determined by the following condition: \begin{equation*} f(p)= \begin{cases} \chi(p)\lambda(p)=-\chi(p), & p\leqslant q; \\ 1, & p> q. \end{cases} \end{equation*} Denote $$E(x)=\{n\leqslant x\,|\,\text{ $n$ has at least a prime factor $p$ such that $p>q$ and $\lambda(p)\chi(p)\neq 1$~}\}.$$ Obviously, we have \begin{eqnarray*} \left|\sum_{n\leqslant x}\chi(n)\lambda(n)-\sum_{n\leqslant x}f(n)\right| &\leqslant& 2|E(x)| \\ &\ll& \sum_{q<p\leqslant x\atop \chi(p)\neq -1}\sum_{n\leqslant x\atop p\mid n}1 \\ &\ll& x\sum_{q<p\leqslant x\atop \chi(p)\neq -1}\frac{1}{p}\\ &\ll& x\sum_{q<p\leqslant x}\frac{1+\chi(p)}{p}. \end{eqnarray*} Similarly as (\ref{eq:bound}), from (\ref{eq:base}) we get that $$\sum_{q<p\leqslant x}\frac{1+\chi(p)}{p}\leqslant \sum_{q<n\leqslant x}\frac{\tau(n,\chi)}{n}\ll \left(L(1,\chi)+q^{-1/4}\right) \log x.$$ Hence \begin{equation}\label{eq:two} \sum_{n\leqslant x}\chi(n)\lambda(n)=\sum_{n\leqslant x}f(n)+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right) x\log x\right). \end{equation} Now let the completely multiplicative function $g(n)\equiv 1$~.\\ Note that $g(p)=f(p)$ for $p>q$, by Lemma ~\ref{lem:multiplicative} we have $$\frac{1}{x}\sum_{n\leqslant x}f(n)=\Theta\left(f,q\right)\frac{1}{x}\sum_{m\leqslant x}g(m)+\mathcal{O}\left(\frac{\log q}{\log x}\exp(s(f,x))\right),$$ where $$\Theta\left(f,q\right)=\prod_{p\leqslant q}\left(1-\frac{1}{p}\right)\left(1+\frac{\chi(p)}{p}\right)^{-1},$$ $$s(f,x)=\sum_{p\leqslant x}\frac{1-f(p)}{p}=\sum_{p\leqslant q}\frac{1+\chi(p)}{p}.$$ Therefore, $$\sum_{n\leqslant x}f(n)=\Theta\left(f,q\right)x+\mathcal{O}\left(x\frac{\log^3 q}{\log x}\right).$$ Combining (\ref{eq:two}), we have $$\sum_{n\leqslant x}\chi(n)\lambda(n)=\Theta\left(f,q\right)x+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right) x\log x+x\frac{\log^3 q}{\log x}\right).\qquad \square$$ \textbf{The proof of Theorem ~\ref{main4}}: Now we compute the sum $$\sum_{u<m\leqslant x/u}\rho_u(m)\chi(m)\psi_u\left(\frac{x}{m},\chi\right)$$ in two different ways under our assumption, which leads to the main result.\\ Let $T=\exp\left((\log q)^{8}\right)$~. We choose $x=T$ and $u=\sqrt{\frac{T}{q}}$~. For sufficiently large $q$, we have $u\geqslant q$ and $u^2>2\sqrt x$. From Lemma ~\ref{lem:psi} and Corollary ~\ref{cor:fapp}, it follows that \begin{eqnarray*} \lefteqn{\sum_{k<(qT)^{1/4}\atop (k,q)=1}\left(-\frac{T}{k^2}+\mathcal{O}\left(\sqrt{T/q}\right)\right)}\\ & & {}+\mathcal{O}\left(\sum_{k<(qT)^{1/4}}\left(\left(L(1,\chi)+q^{-1/4}\right)\frac{T}{k^2}\log^2 T+\frac{T}{k^2}e^{-c\sqrt{\log (T/k^2)}}+q\right)\right)\\ &=&-\sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\rho_u(m)\chi(m)\left(\frac{T}{m}-\sqrt{T/q}\right)+\mathcal{O}\left(\frac{T}{\sqrt q}\log^2 T\right) \\ & & {}+\mathcal{O}\left(\sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}d(m)\left(\left(L(1,\chi)+q^{-1/4}\right)\frac{T}{m}\log^2 T+\frac{T}{m}e^{-c\sqrt{\log (T/m)}}+q\right)\right). \end{eqnarray*} Since $\sum\limits_{m\leqslant y}d(m)\ll y\log y$, by partial summation we have $$\sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\frac{d(m)}{m}\ll \log T\log q,$$ and $$\sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\frac{d(m)}{m}e^{-c\sqrt{\log (T/m)}}\ll(\log T)^2e^{-c\sqrt{\frac{1}{2}\log (T/q)}}.$$ Hence \begin{eqnarray}\label{identity} T\sum_{k<(qT)^{1/4}\atop (k,q)=1}\frac{1}{k^2} &=& \sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\rho_u(m)\chi(m)\left(\frac{T}{m}-\sqrt{T/q}\right)\\ & &{}+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right)T(\log T)^3\log q\right).\nonumber \end{eqnarray} On the other hand, it follows from Lemma ~\ref{LL}~ and Corollary ~\ref{cor:fap}~that \begin{eqnarray*} \lefteqn{\sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\rho_u(m)\chi(m)\left(\frac{T}{m}-\sqrt{T/q}\right)}\\ &=& \left(P(q)T+\mathcal{O}\left(\left(L(1,\chi)+q^{-1/4}\right)T\log T+T\frac{\log^3 q}{\log T}\right)\right)\\ & &{}\times\left(L'(1,\chi)+\mathcal{O}(L(1,\chi)\log q)\right) \\ & &{}+\mathcal{O}\left(\frac{T}{\sqrt q}\log^2 q+T\log^2 q\left((\log T)^{\sqrt 3-2}+\left(\frac{\log q} {\log T}\right)^{1-\frac{2}{\pi}}\right)\cdot \log\log T\right). \end{eqnarray*} Note that $\log T=(\log q)^8$ and $P(q)\gg (\log q)^{-2}$, under the assumption of $L(1,\chi)\ll (\log q)^{-26}$, we have \begin{equation}\label{indrect} \sum_{\sqrt{\frac{T}{q}}<m\leqslant (qT)^{1/2}}\rho_u(m)\chi(m)\left(\frac{T}{m}-\sqrt{T/q}\right)= P(q)L'(1,\chi)T+\mathcal{O}\left(T(\log q)^{-1/10}\right). \end{equation} Besides, \begin{equation}\label{coefficient} \sum_{k<(qT)^{1/4}\atop (k,q)=1}\frac{1}{k^2}=\frac{\pi^2}{6}\prod_{p|q}\left(1-\frac{1}{p^2}\right)+\mathcal{O}\left((qT)^{-1/4}\right). \end{equation} Inserting (\ref{indrect}) and (\ref{coefficient}) into (\ref{identity}) gives $$P(q)L'(1,\chi)=\frac{\pi^2}{6}\prod_{p|q}\left(1-\frac{1}{p^2}\right)+\mathcal{O}\left((\log q)^{-1/10}\right).$$ It follows that $$L'(1,\chi)=\frac{\pi^2}{6}\prod_{p|q}\left(1+\frac{1}{p}\right)\prod_{p\leqslant q\atop \chi(p)=1}\left(1+\frac{1}{p}\right)\left(1-\frac{1}{p}\right)^{-1}\Big(1+\mathcal{O}\left((\log q)^{-1/10}\right)\Big).$$ $\square$ E-mail address: [email protected]\par Academy of Mathematics and System Science, Chinese Academy of Sciences, Beijing,\par 100190, P.R.China \end{document}
\begin{document} \title{Cylindrical Wiener processes} \begin{abstract} In this work cylindrical Wiener processes on Banach spaces are defined by means of cylindrical stochastic processes, which are a well considered mathematical object. This approach allows a definition which is a simple straightforward extension of the real-valued situation. We apply this definition to introduce a stochastic integral with respect to cylindrical Wiener processes. Again, this definition is a straightforward extension of the real-valued situation which results now in simple conditions on the integrand. In particular, we do not have to put any geometric constraints on the Banach space under consideration. Finally, we relate this integral to well-known stochastic integrals in literature. \end{abstract} \tableofcontents \section{Introduction} Cylindrical Wiener processes appear in a huge variety of models in infinite dimensional spaces as a source of random noise or random perturbation. Almost in the same amount as models with cylindrical Wiener processes one can find different definitions of cylindrical Wiener processes in literature. Most of these definitions suffer from the fact that they do not generalise comprehensibly the real-valued definition to the infinite dimensional situation. In this note cylindrical Wiener processes on a Banach space are introduced by virtue of the core mathematical object which underlies all these definitions but which is most often not mentioned: a {\em cylindrical stochastic process}. A cylindrical stochastic process is a generalised stochastic process whose distribution at a fixed time defines only a finite countably additive set function on the Banach space. These finite countably additive set functions are called {\em cylindrical measures}. We give a very transparent definition of a weakly cylindrical Wiener process as a {\em cylindrical stochastic process which is Wiener}. Our approach has the side-effect that the appearance of the word {\em cylindrical} is given a reason. This definition of a weakly cylindrical Wiener process is a straightforward extension of the real-valued situation but it is immediately seen to be too general in order to be analytically tractable. An obvious request is that the covariance operator of the associated Gaussian cylindrical measures exists and has the analogue properties as in the case of ordinary Gaussian measures on infinite-dimensional spaces. This leads to a second definition of a strongly cylindrical Wiener process. For strongly cylindrical Wiener processes we derive a representation by a series with independent real-valued Wiener processes. On the other hand, we see, that by such a series a strongly cylindrical Wiener process can be constructed. The obvious question when is a cylindrical Wiener process actually a Wiener process in the ordinary sense can be answered easily thanks to our approach by the self-suggesting answer: if and only if the underlying cylindrical measure extends to an infinite countably additive set function, i.e. a measure. Utilising furthermore the approach by cylindrical measures we define a stochastic integral with respect to cylindrical Wiener processes. Again, this definition is a straightforward extension of the real-valued situation which results now in simple conditions on the integrand. In particular, we do not have to put any geometric constraints on the Banach space under consideration. The cylindrical approach yields that the distribution of the integral is a cylindrical measure. We finish with two corollaries giving conditions such that the cylindrical distribution of the stochastic integral extends to a probability measure. These results relate our integral to other well-known integrals in literature. To summarise, this article introduces two major ideas: \begin{itemize} \item A cylindrical Wiener process is defined by a straightforward extension of the real-valued situation and the requirement of having a nice covariance operator. It can be seen that most of the existing definitions in literature have the same purpose of guaranteeing the existence of an analytically tractable covariance operator. Thus, our definition unifies the existing definitions and respects the core mathematical object underlying the idea of a cylindrical Wiener process. \item Describing a random dynamic in an infinite dimensional space by an ordinary stochastic process requires the knowledge that it is a {\em real} infinite dimensional phenomena, i.e. that there exits a probability measure on the state space. Whilst describing the dynamic by a cylindrical stochastic process it is sufficient to know only the finite dimensional dynamic under the application of all linear bounded functionals. Our introduced stochastic integral allows the development of such a theory of cylindrical stochastic dynamical systems and has the advantage that no constraints are put on the underlying space. \end{itemize} We do not claim that we accomplish very new mathematics in this work. But the innovation might be seen by relating several mathematical objects which results in a straightforward definition of a cylindrical Wiener process and its integral. Even these relations might be well known to some mathematicians but they do not seem to be accessible in a written form. Our work relies on several ingredients from the theory of cylindrical and ordinary measures on infinite dimensional spaces. Based on the monographs Bogachev \cite{Bogachev98} and Vakhaniya et al \cite{Vaketal} we give an introduction to this subject. The section on $\gamma$-radonifying operators is based on the notes by Jan van Neerven \cite{JanSeminar}. Cylindrical Wiener processes in Banach or Hilbert spaces and their integral are treated for example in the monographs Da Prato and Zabcyzk \cite{DaPrato92}, Kallianpur \cite{Kallianpur} and Metivier and Pellaumail \cite{Metivier80}. In van Gaans \cite{vanGaans} the series representation of the cylindrical Wiener process is used to define a stochastic integral in Hilbert spaces and in Berman and Root \cite{BerRo83} an approach similar to ours is introduced. The fundamental observation in this work that not every Gaussian cylindrical measure has a nice covariance operator was pointed out to me the first time by Dave Applebaum. \section{Preliminaries} Throughout this notes let $U$ be a separable Banach space with dual $U^\ast$. The dual pairing is denoted by $\scapro{u}{u^\ast}$ for $u\in U$ and $u^\ast\in U^\ast$. If $V$ is another Banach space then $L(U,V)$ is the space of all linear, bounded operators from $U$ to $V$ equipped with the operator norm $\norm{\cdot}_{U\to V}$. The Borel $\sigma$-algebra is denoted by $\Borel(U)$. Let $\Gamma$ be a subset of $U^\ast$. Sets of the form \begin{align*} \Z(u_1^\ast,\dots ,u_n^\ast,B):= \{u\in U:\, (\scapro{u}{u_1^\ast},\cdots, \scapro{u}{u_n^\ast})\in B\}, \end{align*} where $u_1^\ast,\dots, u_n^\ast\in \Gamma$ and $B\in \Borel(\R^n)$ are called {\em cylindrical sets } or {\em cylinder with respect to $(U,\Gamma)$}. The set of all cylindrical sets is denoted by $Z(U,\Gamma)$, which turns out to be an algebra. The generated $\sigma$-algebra is denoted by $\Cc(U,\Gamma)$ and it is called {\em cylindrical $\sigma$-algebra with respect to $(U,\Gamma)$}. If $\Gamma=U^\ast$ we write $\Cc(U):=\Cc(U,\Gamma)$. If $U$ is separable then both the Borel $\Borel(U)$ and the cylindrical $\sigma$-algebra $\Cc(U)$ coincide. A function $\mu:\Cc(U)\to [0,\infty]$ is called {\em cylindrical measure on $\Cc(U)$}, if for each finite subset $\Gamma\subseteq U^\ast$ the restriction of $\mu$ on the $\sigma$-algebra $\Cc(U,\Gamma)$ is a measure. A cylindrical measure is called finite if $\mu(U)<\infty$. For every function $f:U\to\R$ which is measurable with respect to $\Cc(U,\Gamma)$ for a finite subset $\Gamma\subseteq U^\ast$ the integral $\int f(u)\,\mu(du)$ is well defined as a real-valued Lebesgue integral if it exists. In particular, the characteristic function $\varphi_\mu:U^\ast\to\C$ of a finite cylindrical measure $\mu$ is defined by \begin{align*} \varphi_{\mu}(u^\ast):=\int e^{i\scapro{u}{u^\ast}}\,\mu(du)\qquad\text{for all }u^\ast\in U^\ast. \end{align*} In contrast to measures on infinite dimensional spaces there is an analogue of Bochner's Theorem for cylindrical measures: \begin{theorem}\label{th.bochner} A function $\varphi:U^\ast\to\C$ is a characteristic function of a cylindrical measure on $U$ if and only if \begin{enumerate} \item[{\rm (a)}] $\varphi(0)=0$; \item[{\rm (b)}] $\varphi$ is positive definite; \item[{\rm (c)}] the restriction of $\varphi$ to every finite dimensional subset $\Gamma\subseteq U^\ast$ is continuous with respect to the norm topology. \end{enumerate} \end{theorem} For a finite set $\{u_1^\ast,\dots, u_n^\ast\}\subseteq U^\ast$ a cylindrical measure $\mu$ defines by \begin{align*} \mu_{u_1^\ast,\dots, u_n^\ast}:\Borel(\R^n)\to [0,\infty],\qquad \mu_{u_1^\ast,\dots, u_n^\ast}(B):=\mu\big(\leqslantft\{u\in U:\, (\scapro{u}{u_1^\ast},\dots, \scapro{u}{u_n^\ast})\in B\right\}\big) \end{align*} a measure on $\Borel(\R^n)$. We call $\mu_{u_1^\ast,\dots, u_n^\ast}$ {\em the image of the measure $\mu$ under the mapping $u\mapsto (\scapro{u}{u_1^\ast},\dots, \scapro{u}{u_n^\ast})$}. Consequently, we have for the characteristic function $\varphi_{\mu_{u_1^\ast,\dots, u_n^\ast}}$ of $\mu_{u_1^\ast,\dots,u_n^\ast}$ that \begin{align*} \varphi_{\mu_{u_1^\ast,\dots, u_n^\ast}}(\beta_1,\dots, \beta_n) =\varphi_\mu(\beta_1u_1^\ast+\dots + \beta_nu_n^\ast) \end{align*} for all $\beta_1,\dots , \beta_n\in\R$. Cylindrical measures are described uniquely by their characteristic functions and therefore by their one-dimensional distributions $\mu_{u^\ast}$ for $u^\ast\in U^\ast$. \section{Gaussian cylindrical measures} A measure $\mu$ on $\Borel(\R)$ is called Gaussian with mean $m\in\R$ and variance $\sigma^2\geqslant 0$ if either $\mu=\delta_m$ and $\sigma^2=0$ or it has the density \begin{align*} f:\R\to\Rp,\qquad f(s)=\tfrac{1}{\sqrt{2\pi\sigma^2}} \exp\leqslantft(-\tfrac{1}{2\sigma^2}(s-m)^2\right). \end{align*} In case of a multidimensional or an infinite dimensional space $U$ a measure $\mu$ on $\Borel(U)$ is called Gaussian if the image measures $\mu_{u^\ast}$ are Gaussian for all $u^\ast\in U^\ast$. Gaussian cylindrical measures are defined analogously but due to some reasons explained below we have to distinguish between two cases: weakly and strongly Gaussian. \begin{definition} A cylindrical measure $\mu$ on $\Cc(U)$ is called {\em weakly Gaussian} if $\mu_{u^\ast}$ is Gaussian on $\Borel(\R)$ for every $u^\ast\in U^\ast$. \end{definition} Because of well-known properties of Gaussian measures in finite dimensional Euclidean spaces a cylindrical measure $\mu$ is weakly Gaussian if and only if $\mu_{u_1^\ast, \dots, u_n^\ast}$ is a Gaussian measure on $\Borel(\R^n)$ for all $u_1^\ast, \dots, u^\ast_n\in U^\ast$ and all $n\in\N$. \begin{theorem}\label{th.Gausschar} Let $\mu$ be a weakly Gaussian cylindrical measure on $\Cc(U)$. Then its characteristic function $\varphi_{\mu}$ is of the form \begin{align}\label{eq.charGauss} \varphi_{\mu}:U^\ast \to\C,\qquad \varphi_{\mu}(u^\ast) =\exp\leqslantft( i m(u^\ast) -\tfrac{1}{2} s(u^\ast)\right), \end{align} where the functions $m:U^\ast \to \R$ and $ s:U^\ast \to \Rp$ are given by \begin{align*} m(u^\ast)=\int_{U} \scapro{u}{u^\ast}\,\mu(du), \qquad s(u^\ast)=\int_{U} \scapro{u}{u^\ast}^2 \mu(du) - (m(u^\ast))^2. \end{align*} Conversely, if $\mu$ is a cylindrical measure with characteristic function of the form \begin{align*} \varphi_{\mu}:U^\ast \to\C,\qquad \varphi_{\mu}(u^\ast) =\exp\leqslantft( i m(u^\ast) -\tfrac{1}{2} s(u^\ast)\right), \end{align*} for a linear functional $m:U^\ast \to \R$ and a quadratic form $ s:U^\ast \to \Rp$, then $\mu$ is a weakly Gaussian cylindrical measure. \end{theorem} \begin{proof} Follows from \cite[Prop.IV.2.7]{Vaketal}, see also \cite[p.393]{Vaketal}. \end{proof} \begin{example} Let $H$ be a separable Hilbert space. Then the function \begin{align*} \varphi:H\to\C,\qquad \varphi(u)=\exp(-\tfrac{1}{2}\norm{u}^2_H) \end{align*} satisfies the condition of Theorem \ref{th.Gausschar} and therefore there exists a weakly Gaussian cylindrical measure $\gamma$ with characteristic function $\varphi$. We call this cylindrical measure {\em standard Gaussian cylindrical measure on $H$}. If $H$ is infinite dimensional the cylindrical measure $\gamma$ is not a measure, see \cite[Cor.2.3.2]{Bogachev98}. Note, that this example might be not applicable for a Banach space $U$ because then $x\mapsto \norm{x}^2_U$ need not to be a quadratic form. \end{example} For a weakly Gaussian cylindrical measure $\mu$ one defines for $u^\ast,v^\ast\in U^\ast$: \begin{align*} r(u^\ast,v^\ast):=\int_U \scapro{u}{u^\ast}\scapro{u}{v^\ast}\,\mu(du) - \int_U \scapro{u}{u^\ast}\,\mu(du) \int_U \scapro{u}{v^\ast}\,\mu(du). \end{align*} These integrals exist as $\mu$ is a Gaussian measure on the cylindrical $\sigma$-algebra generated by $u^\ast$ and $v^\ast$. One defines the {\em covariance operator $Q$ of $\mu$} by \begin{align*} Q:U^\ast\to (U^\ast)^\prime,\qquad (Qu^\ast)v^\ast:=r(u^\ast,v^\ast) \qquad\text{for all }v^\ast\in U^\ast, \end{align*} where $(U^\ast)^\prime$ denotes the algebraic dual of $U^\ast$, i.e. all linear but not necessarily continuous functionals on $U^\ast$. Hence, the characteristic function $\varphi_\mu$ of $\mu$ can be written as \begin{align*} \varphi_{\mu}:U^\ast \to\C,\qquad \varphi_{\mu}(u^\ast) =\exp\leqslantft( i m(u^\ast) - (Qu^\ast)u^\ast\right). \end{align*} The cylindrical measure $\mu$ is called {\em centered} if $m(u^\ast)=0$ for all $u^\ast\in U^\ast$. If $\mu$ is a Gaussian measure or more general, a measure of weak order $2$, i.e. \begin{align*} \int_{U}\abs{\scapro{u}{u^\ast}}^2\, \mu(du)<\infty\qquad\text{for all }u^\ast\in U^\ast, \end{align*} then the covariance operator $Q$ is defined in the same way as above. However, in this case it turns out that $Qu^\ast$ is not only continuous and thus in $U^{\ast\ast}$ but even in $U$ considered as a subspace of $U^{\ast\ast}$, see \cite[Thm.III.2.1]{Vaketal}. This is basically due to properties of the Pettis integral in Banach spaces. For cylindrical measures we have to distinguish this property and define: \begin{definition} A centred weakly Gaussian cylindrical measure $\mu$ on $\Cc(U)$ is called {\em strongly Gaussian} if the covariance operator $Q:U^\ast\to (U^\ast)^\prime$ is $U$-valued. \end{definition} Below Example \ref{ex.weaknotstrong} gives an example of a weakly Gaussian cylindrical measure which is not strongly. This example can be constructed in every infinite dimensional space in particular in every Hilbert space. Strongly Gaussian cylindrical measures exhibit an other very important property: \begin{theorem}\label{th.strongGauss} For a cylindrical measure $\mu$ on $\Cc(U)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $\mu$ is a continuous linear image of the standard Gaussian cylindrical measure on a Hilbert space; \item[{\rm (b)}] there exists a symmetric positive operator $Q:U^\ast\to U$ such that \begin{align*} \varphi_{\mu}(u^\ast)= \exp\leqslantft(-\tfrac{1}{2} \scapro{Qu^\ast}{u^\ast}\right) \qquad\text{for all }u^\ast\in U^\ast. \end{align*} \end{enumerate} \end{theorem} \begin{proof} See \cite[Prop.VI.3.3]{Vaketal}. \end{proof} Theorem \ref{th.strongGauss} provides an example of a weakly Gaussian cylindrical measure which is not strongly Gaussian: \begin{example}\label{ex.weaknotstrong} For a discontinuous linear functional $f:U^\ast\to\R$ define \begin{align*} \varphi:U^\ast \to\C,\qquad \varphi(u^\ast) =\exp\leqslantft( - \frac{1}{2}(f(u^\ast))^2\right). \end{align*} Then $\varphi$ is the characteristic function of a weakly Gaussian cylindrical measure due to Theorem \ref{th.Gausschar} but this measure can not be strongly Gaussian by Theorem \ref{th.strongGauss} because every symmetric positive operator $Q:U^\ast\to U$ is continuous. \end{example} \section{Reproducing kernel Hilbert space} According to Theorem \ref{th.strongGauss} a centred strongly Gaussian cylindrical measure is the image of the standard Gaussian cylindrical measure on a Hilbert space $H$ under an operator $F\in L(H,U)$. In this section we introduce a possible construction of this Hilbert space $H$ and the operator $F$. For this purpose we start with a bounded linear operator $Q:U^\ast\to U$, which is positive, \begin{align*} \scapro{Qu^\ast}{u^\ast}\geqslant 0\qquad\text{for all }u^\ast\in U^\ast, \end{align*} and symmetric, \begin{align*} \scapro{Qu^\ast}{v^\ast}=\scapro{Qv^\ast}{u^\ast}\qquad\text{for all }u^\ast,v^\ast\in U^\ast. \end{align*} On the range of $Q$ we define a bilinear form by \begin{align*} [Qu^\ast,Qv^\ast]_{H_Q}:=\scapro{Qu^\ast}{v^\ast}. \end{align*} It can be seen easily that this defines an inner product $[\cdot,\cdot]_{H_Q}$. Thus, the range of $Q$ is a pre-Hilbert space and we denote by $H_Q$ the real Hilbert space obtained by its completion with respect to $[ \cdot , \cdot ]_{H_Q}$. This space will be called the {\em reproducing kernel Hilbert space associated with $Q$}. In the following we collect some properties of the reproducing kernel Hilbert space and its embedding: \begin{enumerate} \item[(a)] The inclusion mapping from the range of $Q$ into $U$ is continuous with respect to the inner product $[ \cdot, \cdot ]_{H_Q}$. For, we have \begin{align*} \norm{Qu^\ast}_{H_Q}^2 =\abs{\scapro{Qu^\ast}{u^\ast}}\leqslant \norm{Q}_{U^\ast\to U}\norm{u^\ast}^2, \end{align*} which allows us to conclude \begin{align*} \abs{\scapro{Qu^\ast}{v^\ast}} &= \abs{[Qu^\ast,Qv^\ast]_{H_Q}} \leqslant \norm{Qu^\ast}_{H_Q}\norm{Qv^\ast}_{H_Q} \leqslant \norm{Qu^\ast}_{H_Q}\norm{Q}_{U^\ast\to H_Q}\norm{v^\ast}. \intertext{Therefore, we end up with} \norm{Qu^\ast}&=\sup_{\norm{v^\ast}\leqslant 1}\abs{\scapro{Qu^\ast}{v^\ast}}\leqslant \norm{Q}_{U^\ast\to H_Q}\norm{Qu^\ast}_{H_Q}. \end{align*} Thus the inclusion mapping is continuous on the range of $Q$ and it extends to a bounded linear operator $i_Q$ from $H_Q$ into $U$. \item[(b)] The operator $Q$ enjoys the decomposition \begin{align*} Q=i_Q i_Q^\ast. \end{align*} For the proof we define $h_{u^\ast}:=Qu^\ast$ for all $u^\ast\in U^\ast$. Then we have $i_Q(h_{u^\ast})=Qu^\ast$ and \begin{align*} [h_{u^\ast},h_{v^\ast}]_{H_Q}=\scapro{Qu^\ast}{v^\ast} =\scapro{i_Q(h_{u^\ast})}{v^\ast} =[h_{u^\ast},i_Q^\ast v^\ast]_{H_Q}. \end{align*} Because the range of $Q$ is dense in $H_Q$ we arrive at \begin{align}\label{eq.hQ} h_{v^\ast}&=i_Q^\ast v^\ast \qquad\text{for all }v^\ast\in U^\ast \intertext{which finally leads to} Qv^\ast&=i_Q(h_{v^\ast})=i_Q(i_Q^\ast v^\ast)\qquad\text{for all }v^\ast\in U^\ast.\notag \end{align} \item[(c)] By \eqref{eq.hQ} it follows immediately that the range of $i_Q^\ast$ is dense in $H_Q$. \item[(d)] the inclusion mapping $i_Q$ is injective. For, if $i_Qh=0$ for some $h\in H_Q$ it follows that \begin{align*} [h,i_Q^\ast u^\ast]_{H_Q} =\scapro{i_Q h}{u^\ast}=0\qquad\text{for all }u^\ast\in U^\ast, \end{align*} which results in $h=0$ because of (c). \item[(e)] If $U$ is separable then $H_Q$ is also separable. \end{enumerate} \begin{remark}\label{re.repandimage} Let $\mu$ be a centred strongly Gaussian cylindrical measure with covariance operator $Q:U^\ast\to U$. Because $Q$ is positive and symmetric we can associate with $Q$ the reproducing kernel Hilbert space $H_Q$ with the inclusion mapping $i_Q$ as constructed above. For the image $\gamma\circ i_Q^{-1}$ of the standard cylindrical measure $\gamma$ on $H_Q$ we calculate \begin{align*} \varphi_{\gamma\circ i_Q^{-1}}(u^\ast)&=\int_U e^{i\scapro{u}{u^\ast}}\,(\gamma\circ i_Q^{-1})(du)\\ &= \int_{H_Q} e^{i\scapro{h}{i_Q^\ast u^\ast}}\,\gamma(dh)\\ &= \exp\leqslantft(-\tfrac{1}{2}\norm{i_Q^\ast u^\ast}_{H_Q}^2\right)\\ &= \exp\leqslantft(-\tfrac{1}{2}\scapro{Qu^\ast}{u^\ast}\right). \end{align*} Thus, $\mu=\gamma\circ i_{Q}^{-1}$ and we have found one possible Hilbert space and operator satisfying the condition in Theorem \ref{th.strongGauss}. But note, that there might exist other Hilbert spaces exhibiting this feature. But the reproducing kernel Hilbert space is characterised among them by a certain ``minimal property'', see \cite{Bogachev98}. \end{remark} \section{$\gamma$-radonifying operators} This section follows the notes \cite{JanSeminar}. Let $Q:U^\ast\to U$ be a positive symmetric operator and $H$ the reproducing kernel Hilbert space with the inclusion mapping $i_Q:H\to U$. If $U$ is a Hilbert space then it is a well known result by Mourier (\cite[Thm. IV.2.4]{Vaketal}) that $Q$ is the covariance operator of a Gaussian measure on $U$ if and only if $Q$ is nuclear or equivalently if $i_Q$ is Hilbert-Schmidt. By Remark \ref{re.repandimage} it follows that the cylindrical measure $\gamma\circ i_Q^{-1}$ extends to a Gaussian measure on $\Borel(U)$ and $Q$ is the covariance operator of this Gaussian measure. The following definition generalises this property of $i_Q:H\to U$ to define by $Q:=i_Q i_Q^\ast$ a covariance operator to the case when $U$ is a Banach space: \begin{definition} Let $\gamma$ be the standard Gaussian cylindrical measure on a separable Hilbert space $H$. A linear bounded operator $F:H\to U$ is called {\em $\gamma$-radonifying} if the cylindrical measure $\gamma\circ F^{-1}$ extends to a Gaussian measure on $\Borel(U)$. \end{definition} \begin{theorem}\label{th.eqradonifying} Let $\gamma$ be the standard Gaussian cylindrical measure on a separable Hilbert space $H$ with orthonormal basis $(e_n)_{n\in\N}$ and let $(G_n)_{n\in\N}$ be a sequence of independent standard real normal random variables. For $F\in L(H,U)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $F$ is $\gamma$-radonifying; \item[{\rm (b)}] the operator $FF^\ast:U^\ast\to U$ is the covariance operator of a Gaussian measure $\mu$ on $\Borel(U)$; \item[{\rm (c)}] the series $\displaystyle \sum_{k=1}^\infty G_k Fe_k$ converges a.s. in $U$. \item[{\rm (d)}] the series $\displaystyle \sum_{k=1}^\infty G_k Fe_k$ converges in $L^p({\mathcal O}mega; U)$ for some $p\in [1,\infty)$. \item[{\rm (e)}] the series $\displaystyle \sum_{k=1}^\infty G_k Fe_k$ converges in $L^p({\mathcal O}mega; U)$ for all $p\in [1,\infty)$. \end{enumerate} In this situation we have for every $p\in [1,\infty)$: \begin{align*} \int_U \norm{u}^p\,\mu(du)=E\norm{\sum_{k=1}^\infty G_k Fe_k}^p. \end{align*} \end{theorem} \begin{proof} As in Remark \ref{re.repandimage} we obtain for the characteristic function of $\nu:=\gamma\circ F^{-1}$: \begin{align*} \varphi_{\nu}(u^\ast) = \exp\leqslantft(-\tfrac{1}{2} \scapro{FF^\ast u^\ast}{u^\ast}\right)\qquad \text{for all }u^\ast\in U^\ast. \end{align*} This establishes the first equivalence between (a) and (b). The proofs of the remaining part can be found in \cite[Prop.4.2]{JanSeminar}. \end{proof} To show that $\gamma$-radonifying generalise Hilbert-Schmidt operators to Banach spaces we prove the result by Mourier mentioned already above. Other proofs only relying on Hilbert theory can be found in the literature. \begin{corollary}\label{th.gammahilbert} If $H$ and $U$ are separable Hilbert spaces then the following are equivalent for $F\in L(H,U)$: \begin{enumerate} \item[{\rm (a)}] $F$ is $\gamma$-radonifying; \item[{\rm (b)}] $F$ is Hilbert-Schmidt. \end{enumerate} \end{corollary} \begin{proof} Let $(e_k)_{k\in\N}$ be an orthonormal basis of $H$. The equivalence follows immediately from \begin{align*} E\norm{ \sum_{k=m}^n G_k Fe_k}^2=\sum_{k=m}^n \norm{Fe_k}^2 \end{align*} for every family $(G_k)_{k\in\N}$ of independent standard normal random variables. \end{proof} In general, the property of being $\gamma$-radonifying is not so easily accessible as Hilbert-Schmidt operators in case of Hilbert spaces. However, for some specific Banach spaces, as $L^p$ or $l^p$ spaces, the set of all covariance operators of Gaussian measures can be also described more precisely, see \cite[Thm.V.5.5 and Thm.V.5.6]{Vaketal}. It turns out that the set of all $\gamma$-radonifying operators can be equipped with a norm such that it is a Banach space, see \cite[Thm. 4.14]{JanSeminar}. \section{Cylindrical stochastic processes} Let $({\mathcal O}mega, {\mathcal A},P)$ be a probability space with a filtration $\{\F_t\}_{t\geqslant 0}$. Similarly to the correspondence between measures and random variables there is an analogue random object associated to cylindrical measures: \begin{definition}\label{de.cylrv} A {\em cylindrical random variable $X$ in $U$} is a linear map \begin{align*} X:U^\ast \to L^0({\mathcal O}mega). \end{align*} A cylindrical process $X$ in $U$ is a family $(X(t):\,t\geqslant 0)$ of cylindrical random variables in $U$. \end{definition} The characteristic function of a cylindrical random variable $X$ is defined by \begin{align*} \varphi_X:U^\ast\to\C, \qquad \varphi_X(u^\ast)=E[\exp(i Xu^\ast)]. \end{align*} The concepts of cylindrical measures and cylindrical random variables match perfectly. Because the characteristic function of a cylindrical random variable is positive-definite and continuous on finite subspaces there exists a cylindrical measure $\mu$ with the same characteristic function. We call $\mu$ the {\em cylindrical distribution of $X$}. Vice versa, for every cylindrical measure $\mu$ on $\Cc(U)$ there exists a probability space $({\mathcal O}mega,{\mathcal A},P)$ and a cylindrical random variable $X:U^\ast\to L^0({\mathcal O}mega)$ such that $\mu$ is the cylindrical distribution of $X$, see \cite[VI.3.2]{Vaketal}. \begin{example} A cylindrical random variable $X:U^\ast \to L^0({\mathcal O}mega)$ is called weakly Gaussian, if $Xu^\ast$ is Gaussian for all $u^\ast\in U^\ast$. Thus, $X$ defines a weakly Gaussian cylindrical measure $\mu$ on $\Cc(U)$. The characteristic function of $X$ coincide with the one of $\mu$ and is of the form \begin{align*} \varphi_X(u^\ast)=\exp(im(u^\ast)-\tfrac{1}{2}s(u^\ast)) \end{align*} with $m:U^\ast\to \R$ linear and $s:U^\ast\to \Rp$ a quadratic form. If $X$ is strongly Gaussian there exists a covariance operator $Q:U^\ast\to U$ such that \begin{align*} \varphi_X(u^\ast)=\exp(im(u^\ast)-\tfrac{1}{2}\scapro{Qu^\ast}{u^\ast}). \end{align*} Because $\varphi_X(u^\ast)=\varphi_{Xu^\ast}(1)$ it follows \begin{align*} E[Xu^\ast]=m(u^\ast)\qquad\text{and}\qquad \Var[Xu^\ast]=\scapro{Qu^\ast}{u^\ast}. \end{align*} In the same way by comparing the characteristic function \begin{align*} \varphi_{Xu^\ast,Xv^\ast}(\beta_1,\beta_2) =E\leqslantft[\exp\leqslantft(i(\beta_1Xu^\ast +\beta_2Xv^\ast)\right)\right] =E\leqslantft[\exp\leqslantft(i(X(\beta_1u^\ast +\beta_2v^\ast))\right)\right] \end{align*} for $\beta_1,\beta_2\in\R$ with the characteristic function of the two-dimensional Gaussian vector $(Xu^\ast,Xv^\ast)$ we may conclude \begin{align*} \text{Cov}[Xu^\ast, Xv^\ast]=\scapro{Qu^\ast}{v^\ast}. \end{align*} Let $H_Q$ denote the reproducing kernel Hilbert space of the covariance operator $Q$. Then we obtain \begin{align*} E\abs{Xu^\ast-m(u^\ast)}^2=\Var[Xu^\ast]=\scapro{Qu^\ast}{u^\ast}= \norm{i_Q^\ast u^\ast}_{H_Q}^2.\\ \end{align*} \end{example} The cylindrical process $X=(X(t):\,t\geqslant 0)$ is called {\em adapted to a given filtration $\{\F_t\}_{t\geqslant 0}$}, if $X(t)u^\ast$ is $\F_t$-measurable for all $t\geqslant 0$ and all $u^\ast\in U^\ast$. The cylindrical process $X$ has weakly independent increments if for all $0\leqslant t_0<t_1<\dots <t_n$ and all $u^\ast_1,\dots, u^\ast_n\in U^\ast$ the random variables \begin{align*} (X(t_1)-X(t_0))u_1^\ast,\dots , (X(t_n)-X(t_{n-1}))u_n^\ast \end{align*} are independent. \begin{remark} Our definition of cylindrical processes is based on the definitions in \cite{BerRo83} and \cite{Vaketal}. In \cite{Metivier80} and \cite{Schwartz96} cylindrical random variables are considered which have values in $L^p({\mathcal O}mega)$ for $p>0$. They assume in addition that a cylindrical random variable is continuous. The continuity of a cylindrical variable is reflected by continuity properties of its characteristic function, see \cite[Prop.IV. 3.4]{Vaketal}. The notion of weakly independent increments origins from \cite{BerRo83}. \end{remark} \begin{example}\label{ex.hat} Let $Y=(Y(t):\,t\geqslant 0)$ be a stochastic process with values in a separable Banach space $U$. Then $\hat{Y}(t)u^\ast:=\scapro{Y(t)}{u^\ast}$ for $u^\ast\in U^\ast$ defines a cylindrical process $\hat{Y}=(\hat{Y}(t):\,t\geqslant 0)$. The cylindrical process $\hat{Y}$ is adapted if and only if $Y$ is also adapted and $\hat{Y}$ has weakly independent increments if and only if $Y$ has also independent increments. Both statements are due to the fact that the Borel and the cylindrical $\sigma$-algebras coincide for separable Banach spaces due to Pettis' measurability theorem. \end{example} An {\em $\R^n$-valued Wiener process} $B=(B(t): \,t\geqslant 0)$ is an adapted stochastic process with independent, stationary increments $B(t)-B(s)$ which are normally distributed with expectation $E[B(t)-B(s)]=0$ and covariance Cov$[B(t)-B(s),B(t)-B(s)]=\abs{t-s}C$ for a non-negative definite symmetric matrix $C$. If $C=\Id$ we call $B$ a {\em standard} Wiener process. \begin{definition}\label{de.weakcyl} An adapted cylindrical process $W=(W(t):\,t\geqslant 0)$ in $U$ is a {\em weakly cylindrical Wiener process}, if \begin{enumerate} \item[{\rm (a)}] for all $u^\ast_1,\dots, u^\ast_n\in U^\ast$ and $n\in \N$ the $\R^n$-valued stochastic process \begin{align*} \big((W(t)u^\ast_1,\dots, W(t)u^\ast_{n}):\,t\geqslant 0\big) \end{align*} is a Wiener process. \end{enumerate} \end{definition} Our definition of a weakly cylindrical Wiener process is an obvious extension of the definition of a finite-dimensional Wiener processes and is exactly in the spirit of cylindrical processes. The multidimensional formulation in Definition \ref{de.weakcyl} would be already necessary to define a finite-dimensional Wiener process by this approach and it allows to conclude that a weakly cylindrical Wiener process has weakly independent increments. The latter property is exactly what is needed in addition to an one-dimensional formulation: \begin{lemma}\label{th.weaklyind} For an adapted cylindrical process $W=(W(t):\,t\geqslant 0)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $W$ is a weakly cylindrical Wiener process; \item[{\rm (b)}] $W$ satisfies \begin{enumerate} \item[{\rm (i)}] $W$ has weakly independent increments; \item[{\rm (ii)}] $(W(t)u^\ast:\,t\geqslant 0)$ is a Wiener process for all $u^\ast\in U^\ast$. \end{enumerate} \end{enumerate} \end{lemma} \begin{proof} We have only to show that (b) implies (a). By linearity we have \begin{align*} \beta_1(W(t)-W(s))u_1^\ast+\dots +\beta_n(W(t)-W(s))u_n^\ast = (W(t)-W(s))\leqslantft(\sum_{i=1}^n \beta_iu^\ast_i\right), \end{align*} for all $\beta_i\in\R$ and $u_i^\ast \in U^\ast$ which shows that the increments of $((W(t)u^\ast_1,\dots, W(t)u^\ast_n)):\, t\geqslant 0)$ are normally distributed and stationary. The independence of the increments follows by (i). \end{proof} Because $W(1)$ is a centred weakly Gaussian cylindrical random variable there exists a weakly Gaussian cylindrical measure $\mu$ such that \begin{align*} \varphi_{W(1)}(u^\ast)= E[\exp(i W(1)u^\ast)] = \varphi_{\mu}(u^\ast)=\exp\leqslantft(-\tfrac{1}{2} s(u^\ast)\right) \end{align*} for a quadratic form $s:U^\ast\to \Rp$. Therefore, one obtains \begin{align*} \varphi_{W(t)}(u^\ast)= E[\exp(iW(t)u^\ast)] =E[\exp\leqslantft(i W(1)(tu^\ast)\right)] =\exp\leqslantft(-\tfrac{1}{2}t^2 s(u^\ast)\right) \end{align*} for all $t\geqslant 0$. Thus, the cylindrical distributions of $W(t)$ for all $t\geqslant 0$ are only determined by the cylindrical distribution of $W(1)$. \begin{definition}\label{de.strongW} A weakly cylindrical Wiener process $(W(t):\, t\geqslant 0)$ is called {\em strongly cylindrical Wiener process}, if \begin{enumerate} \item[{\rm (b)}] the cylindrical distribution $\mu$ of $W(1)$ is strongly Gaussian. \end{enumerate} \end{definition} The additional condition on a weakly cylindrical Wiener process to be strongly requests the existence of an $U$-valued covariance operator for the Gaussian cylindrical measure. To our knowledge weakly cylindrical Wiener processes are not defined in the literature and (strongly) cylindrical Wiener processes are defined by means of other conditions. Often, these definition are formulated by assuming the existence of the reproducing kernel Hilbert space. But this implies the existence of the covariance operator. Another popular way for defining cylindrical Wiener processes is by means of a series. We will see in the next chapter that this is also equivalent to our definition. Later, we will compare a strongly cylindrical Wiener process with an $U$-valued Wiener process. Also the latter is defined as a direct generalisation of a real-valued Wiener process: \begin{definition} An adapted $U$-valued stochastic process $(W(t):\,t\geqslant 0)$ is called a {\em Wiener process } if \begin{enumerate} \item[{\em (a)}] $W(0)=0$ $P$-a.s.; \item[{\em (b)}] $W$ has independent, stationary increments; \item[{\em (c)}] there exists a Gaussian covariance operator $Q:U^\ast\to U$ such that \begin{align*} W(t)-W(s)\stackrel{d}{=} N(0,(t-s)Q)\qquad\text{for all }0\leqslant s\leqslant t. \end{align*} \end{enumerate} \end{definition} If $U$ is finite dimensional then $Q$ can be any symmetric, positive semi-definite matrix. In case that $U$ is a Hilbert space we know already that $Q$ has to be nuclear. For the general case of a Banach space $U$ we can describe the possible Gaussian covariance operator by Theorem \ref{th.eqradonifying}. It is obvious that every $U$-valued Wiener process $W$ defines a strongly cylindrical Wiener process $(\hat{W}(s):\,t\geqslant 0)$ in $U$ by $\hat{W}(s)u^\ast:=\scapro{W(s)}{u^\ast}$. For the converse question, if a cylindrical Wiener process can be represented in such a way by an $U$-valued Wiener process we will derive later necessary and sufficient conditions. \section{Representations of cylindrical Wiener processes} In this section we derive representations of cylindrical Wiener processes and $U$-valued Wiener processes in terms of some series. In addition, these representations can also serve as a construction of these processes, see Remark \ref{re.construction}. \begin{theorem}\label{th.cylsum} For an adapted cylindrical process $W:=(W(t):\,t\geqslant 0)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $W$ is a strongly cylindrical Wiener process; \item[{\rm (b)}] there exist a Hilbert space $H$ with an orthonormal basis $(e_n)_{n\in\N}$, $F\in L(H,U)$ and independent real-valued standard Wiener processes $(B_n)_{n\in\N}$ such that \begin{align*} W(t)u^\ast=\sum_{k=1}^\infty \scapro{Fe_k}{u^\ast} B_k(t) \qquad \text{in }L^2({\mathcal O}mega)\text{ for all $u^\ast\in U^\ast$}. \end{align*} \end{enumerate} \end{theorem} \begin{proof} (b) $\;\Rightarrow\;$ (a) By Doob's inequality we obtain for any $m,n\in\N$ \begin{align*} E\leqslantft[ \sup_{t\in [0,T]}\abs{\sum_{k=n}^{n+m} \scapro{Fe_k}{u^\ast} B_k(t)}^2\right] &\leqslant 4 E \abs{\sum_{k=n}^{n+m} \scapro{Fe_k}{u^\ast} B_k(T)}^2\\ &= 4 T\sum_{k=n}^{n+m} \scapro{e_k}{F^\ast u^\ast}^2\\ &\to 0 \qquad\text{for }m,n\to\infty. \end{align*} Thus, for every $u^\ast\in U^\ast$ the random variables $W(t)u^\ast$ are well defined and form a cylindrical process $(W(t):\,t\geqslant 0)$. For any $0=t_0<t_1<\dots <t_m$ and $\beta_k\in\R$ we calculate \begin{align*} & E\leqslantft[\exp\leqslantft(i\sum_{k=0}^{m-1} \beta_k (W(t_{k+1})u^\ast - W(t_k)u^\ast)\right)\right]\\ &\qquad=\lim_{n\to\infty} E\leqslantft[\exp\leqslantft(i\sum_{k=0}^{m-1} \beta_k \sum_{l=1}^n \scapro{Fe_l}{u^\ast}(B_l(t_{k+1})-B_l(t_k)) \right)\right]\\ &\qquad =\lim_{n\to\infty} \prod_{k=0}^{m-1}\prod_{l=1}^n E\Big[ \exp \leqslantft( i\beta_k \scapro{Fe_l}{u^\ast}(B_l(t_{k+1})-B_l(t_k))\right)\Big]\\ &\qquad =\lim_{n\to\infty} \prod_{k=0}^{m-1}\prod_{l=1}^n \exp \Big( -\tfrac{1}{2}\beta_k^2 \scapro{Fe_l}{u^\ast}^2(t_{k+1}-t_k)\Big)\\ &\qquad = \prod_{k=0}^{m-1} \exp \leqslantft( -\tfrac{1}{2}\beta_k^2 \norm{F^\ast u^\ast}^2_H(t_{k+1}-t_k)\right), \end{align*} which shows that $(W(t)u^\ast:\,t\geqslant 0)$ has independent, stationary Gaussian increments. Because the partial sums converge uniformly on every finite interval the process $(W(t)u^\ast:\,t\geqslant 0)$ has a.s. continuous paths and is therefore established as a real-valued Wiener process. The calculation above of the characteristic function yields \begin{align*} E\leqslantft[\exp(i W(1)u^\ast)\right]=\exp\leqslantft(-\tfrac{1}{2} \norm{F^\ast u^\ast}_H^2\right) =\exp\leqslantft(-\tfrac{1}{2}\scapro{FF^\ast u^\ast}{u^\ast}^2\right). \end{align*} Hence, the process $W$ is a strongly cylindrical Wiener process with covariance operator $Q:=FF^\ast$. (a) $\Rightarrow$ (b): Let $Q:U^\ast\to U$ be the covariance operator of $W(1)$ and $H$ its reproducing kernel Hilbert space with the inclusion mapping $i_Q:H\to U$. Because the range of $i_Q^\ast$ is dense in $H$ and $H$ is separable there exists an orthonormal basis $(e_n)_{n\in\N}\subseteq$range$(i_Q^\ast)$ of $H$. We choose $u_n^\ast\in U^\ast$ such that $i_Q^\ast u_n^\ast=e_n$ for all $n\in\N$ and define $B_n(t):=W(t)u_n^\ast$. Then we obtain \begin{align*} E\abs{ \sum_{k=1}^n \scapro{i_Qe_k}{u^\ast}B_k(t) - W(t)u^\ast }^2 &=E\leqslantft[W(t)\leqslantft(\sum_{k=1}^n \scapro{i_Qe_k}{u^\ast} u_k^\ast -u^\ast\right)\right]^2\\ &=t\norm{i_Q^\ast\leqslantft(\sum_{k=1}^n \scapro{i_Qe_k}{u^\ast}u_k^\ast -u^\ast\right)}^2_H\\ &=t\norm{\sum_{k=1}^n [e_k,i_Q^\ast u^\ast]_{H_Q}e_k -i_Q^\ast u^\ast }^2_H\\ &\to 0 \qquad\text{for }n\to\infty. \end{align*} Thus, $W$ has the required representation and it remains to establish that the Wiener processes $B_n:=(B_n(t):\,t\geqslant 0)$ are independent. Because of the Gaussian distribution it is sufficient to establish that $B_n(s)$ and $B_m(t)$ for any $s\leqslant t$ and $m,n\in\N$ are independent: \begin{align*} E[B_n(s)B_m(t)]&= E[W(s)u_n^\ast W(t)u_m^\ast]\\ & = E[W(s)u^\ast_n (W(t)u_m^\ast- W(s)u_m^\ast)] + E[W(s)u_n^\ast W(s)u_m^\ast]. \intertext{The first term is zero by Theorem \ref{th.weaklyind} and for the second term we obtain} E[W(s)u_n^\ast W(s)u_m^\ast] &=s\scapro{Qu_n^\ast}{u_m^\ast} = s [i_Q^\ast u_n^\ast,i_Q^\ast u_m^\ast]_{H_Q} =s [e_n,e_m]_{H_Q} =s \delta_{n,m}. \end{align*} Hence, $B_n(s)$ and $B_m(t)$ are uncorrelated and therefore independent. \end{proof} \begin{remark} The proof has shown that the Hilbert space $H$ in part (b) can be chosen as the reproducing kernel Hilbert space associated to the Gaussian cylindrical distribution of $W(1)$. In this case the function $F:H \to U$ is the inclusion mapping $i_Q$. \end{remark} \begin{remark} Let $H$ be a separable Hilbert space with orthonormal basis $(e_k)_{k\in\N}$ and $(B_k(t):\,t\geqslant 0)$ be independent real-valued Wiener processes. By setting $U=H$ and $F=\Id$ Theorem \ref{th.cylsum} yields that a strongly cylindrical Wiener process $(W_H(t):\, t\geqslant 0)$ is defined by \begin{align*} W_H(t)h= \sum_{k=1}^\infty \scapro{e_k}{h} B_k(t) \qquad\text{for all }h\in H . \end{align*} The covariance operator of $W_H$ is $\Id:H\to H$. This is the approach how a cylindrical Wiener process is defined for example in \cite{Bogachev98} and \cite{NeeWeis}. If in addition $U$ is a separable Banach space and $F\in L(H,U)$ we obtain by defining \begin{align*} W(t)u^\ast := W_H(t)(F^\ast u^\ast) \qquad\text{for all }u^\ast\in U^\ast, \end{align*} a strongly cylindrical Wiener process $(W(t):\, t\geqslant 0)$ with covariance operator $Q:=FF^\ast$ according to our Definition \ref{de.strongW}. \end{remark} \begin{theorem}\label{th.Usum} For an adapted $U$-valued process $W:=(W(t):\,t\geqslant 0)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $W$ is an $U$-valued Wiener process; \item[{\rm (b)}] there exist a Hilbert space $H$ with an orthonormal basis $(e_n)_{n\in\N}$, a $\gamma$-radonifying operator $F\in L(H,U)$ and independent real-valued standard Wiener processes $(B_n)_{n\in\N}$ such that \begin{align*} W(t)=\sum_{k=1}^\infty Fe_k B_k(t) \qquad \text{in }L^2({\mathcal O}mega;U). \end{align*} \end{enumerate} \end{theorem} \begin{proof} (b) $\Rightarrow$ (a): As in the proof of Theorem \ref{th.cylsum} we obtain by Doob's Theorem (but here for infinite-dimensional spaces) for any $m,n\in \N$ \begin{align*} E\leqslantft[ \sup_{t\in [0,T]}\norm{\sum_{k=n}^{n+m} Fe_k B_k(t)}^2\right] &\leqslant 4 E \norm{\sum_{k=n}^{n+m} Fe_k B_k(T)}^2\\ &\to 0 \qquad\text{for }m,n\to\infty, \end{align*} where the convergence follows by Theorem \ref{th.eqradonifying} because $F$ is $\gamma$-radonifying. Thus, the random variables $W(t)$ are well defined and form an $U$-valued stochastic process $W:=(W(t):\,t\geqslant 0)$. As in the proof of Theorem \ref{th.cylsum} we can proceed to establish that $W$ is an $U$-valued Wiener process. (a) $\Rightarrow$ (b): By Theorem \ref{th.cylsum} there exist a Hilbert space $H$ with an orthonormal basis $(e_n)_{n\in\N}$, $F\in L(H,U)$ and independent real-valued standard Wiener processes $(B_n)_{n\in\N}$ such that \begin{align*} \scapro{W(t)}{u^\ast}=\sum_{k=1}^\infty \scapro{Fe_k}{u^\ast} B_k(t) \qquad \text{in }L^2({\mathcal O}mega)\text{ for all $u^\ast\in U^\ast$}. \end{align*} The It{\^o}-Nisio Theorem \cite[Thm.V.2.4]{Vaketal} implies \begin{align*} W(t)=\sum_{k=1}^\infty Fe_k B_k(t) \qquad \text{$P$-a.s. for all $u^\ast\in U^\ast$} \end{align*} and a result by Hoffmann-Jorgensen \cite[Cor.2 in V.3.3]{Vaketal} yields the convergence in $L^2({\mathcal O}mega;U)$. Theorem \ref{th.eqradonifying} verifies $F$ as $\gamma$-radonifying. \end{proof} \begin{remark}\label{re.construction} In the proofs of the implication from (b) to (a) we established in both Theorems \ref{th.cylsum} and \ref{th.Usum} even more than required: we established the convergence of the series in the specified sense without assuming the existence of the limit process, respectively. This means, that we can read these results also as a construction principle of cylindrical or $U$-valued Wiener processes without assuming the existence of the considered process a priori. The construction of these random objects differs significantly in the required conditions on the involved operator $F$. For a cylindrical Wiener process no conditions are required, however, for an $U$-valued Wiener process we have to guarantee $Q=FF^\ast$ to be a covariance operator of a Gaussian measure by assuming $F$ to be $\gamma$-radonifying. \end{remark} \section{When is a cylindrical Wiener process $U$-valued ?} In this section we give equivalent conditions for a strongly cylindrical Wiener process to be an $U$-valued Wiener process. To be more precise a cylindrical random variable $X:U^\ast \to L^0({\mathcal O}mega)$ is called {\em induced by a random variable $Z:{\mathcal O}mega\to U$}, if $P$-a.s. \begin{align*} Xu^\ast= \scapro{Z}{u^\ast}\qquad\text{for all }u^\ast\in U^\ast. \end{align*} This definition generalises in an obvious way to cylindrical processes. Because of the correspondence to cylindrical measures the question whether a cylindrical random variable is induced by an $U$-valued random variable is reduced to the question whether the cylindrical measure extends to a Radon measure (\cite[Thm. Vi.3.1]{Vaketal}). There is a classical answer by Prokhorov (\cite[Thm. VI.3.2]{Vaketal}) to this question in terms of tightness. A cylindrical measure $\mu$ on $\Cc(U)$ is called {\em tight} if for each $\varepsilon>0$ there exists a compact subset $K\subseteq U$ such that \begin{align*} \mu(K)\geqslant 1-\varepsilon. \end{align*} In case of non-separable Banach spaces $U$ one has to be more careful because then compact sets are not necessarily admissible arguments of a cylindrical measure. \begin{theorem}\label{th.cyl=radon} For a strongly cylindrical Wiener process $W:=(W(t):\,t\geqslant 0)$ with covariance operator $Q=i_Qi_Q^\ast$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $W$ is induced by an $U$-valued Wiener process; \item[{\rm (b)}] $i_Q$ is $\gamma$-radonifying; \item[{\rm (c)}] the cylindrical distribution of $W(1)$ is tight; \item[{\rm (d)}] the cylindrical distribution of $W(1)$ extends to a measure. \end{enumerate} \end{theorem} \begin{proof} (a) $\Rightarrow$ (b) If there exists an $U$-valued Wiener process $(\tilde{W}(t):\,t\geqslant 0)$ with $W(t)u^\ast=\scapro{\tilde{W}(t)}{u^\ast}$ for all $u^\ast\in U^\ast$, then $\tilde{W}(1)$ has a Gaussian distribution with covariance operator $Q$. Thus, $i_Q$ is $\gamma$-radonifying by Theorem \ref{th.eqradonifying}. (b)$\Leftrightarrow$ (c) $\Leftrightarrow$ (d) This is Prokhorov's Theorem on cylindrical measures. (b)$\Rightarrow $(a) Due to Theorem \ref{th.cylsum} there exist an orthonormal basis $(e_n)_{n\in\N}$ of the reproducing kernel Hilbert space of $Q$ and independent standard real-valued Wiener process $(B_k(t):\,t\geqslant 0)$ such that \begin{align*} W(t)u^\ast=\sum_{k=1}^\infty \scapro{i_Q e_k}{u^\ast} B_k(t)\qquad\text{for all } u^\ast\in U^\ast. \end{align*} On the other hand, because $i_Q$ is $\gamma$-radonifying Theorem \ref{th.Usum} yields that \begin{align*} \tilde{W}(t)=\sum_{k=1}^\infty i_Q e_k B_k(t) \end{align*} defines an $U$-valued Wiener process $(\tilde{W}(t):\,t\geqslant 0)$. Obviously, we have $W(t)u^\ast=\scapro{\tilde{W}(t)}{u^\ast}$ for all $u^\ast$. \end{proof} If $U$ is a separable Hilbert space we can replace the condition (b) by \begin{enumerate} \item[(b$^\prime$)] $i_Q$ is Hilbert-Schmidt \end{enumerate} because of Theorem \ref{th.gammahilbert}. \section{Integration} In this section we introduce an integral with respect to a strongly cylindrical Wiener process $W=(W(t):\,t\geqslant 0)$ in $U$. The integrand is a stochastic process with values in $L(U,V)$, the set of bounded linear operators from $U$ to $V$, where $V$ denotes a separable Banach space. For that purpose we assume for $W$ the representation according to Theorem \ref{th.cylsum}: \begin{align*} W(t)u^\ast=\sum_{k=1}^\infty \scapro{i_Qe_k}{u^\ast} B_k(t) \qquad \text{in }L^2({\mathcal O}mega)\text{ for all $u^\ast\in U^\ast$}, \end{align*} where $H$ is the reproducing kernel Hilbert space of the covariance operator $Q$ with the inclusion mapping $i_Q:H\to U$ and an orthonormal basis $(e_n)_{n\in\N}$ of $H$. The real-valued standard Wiener processes $(B_k(t):\,t\geqslant 0)$ are defined by $B_k(t)=W(t)u_k^\ast$ for some $u_k^\ast\in U^\ast$ with $i_Q^\ast u_k^\ast=e_k$. \begin{definition} The set $M_T(U,V)$ contains all random variables $Phi:[0,T]\times {\mathcal O}mega\to L(U,V)$ such that: \begin{enumerate} \item[{\rm (a)}] $(t,{o}mega)\mapsto Phi^\ast(t,{o}mega)v^\ast$ is $\Borel[0,T]{o}times {\mathcal A}$ measurable for all $v^\ast\in V^\ast$; \item[{\rm (b)}] ${o}mega\mapsto Phi^\ast(t,{o}mega)v^\ast$ is $\F_t$-measurable for all $v^\ast\in V^\ast$ and $t\in [0,T]$; \item[{\rm (c)}] $\displaystyle \int_0^T E\norm{Phi^\ast(s,\cdot)v^\ast}_{U^\ast}^2\,ds<\infty \;$ for all $v^\ast\in V^\ast$. \end{enumerate} \end{definition} As usual we neglect the dependence of $Phi\in M_T(U,V)$ on ${o}mega$ and write $Phi(s)$ for $Phi(s,\cdot)$ as well as for the dual operator $Phi^\ast(s):=Phi^\ast(s,\cdot)$ where $Phi^\ast(s,{o}mega)$ denotes the dual operator of $Phi(s,{o}mega)\in L(U,V)$. We define the candidate for a stochastic integral: \begin{definition}\label{de.I_t} For $Phi\in M_T(U,V)$ we define \begin{align*} I_t(Phi)v^\ast:= \sum_{k=1}^\infty \int_0^t \scapro{Phi(s)i_Q e_k}{v^\ast}\, dB_k(s) \qquad \text{in }L^2({\mathcal O}mega) \end{align*} for all $v^\ast\in V^\ast$ and $t \in [0,T]$. \end{definition} The stochastic integrals appearing in Definition \ref{de.I_t} are the known real-valued It{\^o} integrals and they are well defined thanks to our assumption on $Phi$. In the next Lemma we establish that the asserted limit exists: \begin{lemma}\label{le.cylintwell} $I_t(Phi):V^\ast \to L^2({\mathcal O}mega)$ is a well-defined cylindrical random variable in $V$ which is independent of the representation of $W$, i.e. of $(e_n)_{n\in\N}$ and $(u_n^\ast)_{n\in\N}$. \end{lemma} \begin{proof} We begin to establish the convergence in $L^2({\mathcal O}mega)$. For that, let $m,n\in \N$ and we define for simplicity $h(s):=i_Q^\astPhi^\ast(s)v^\ast$. Doob's theorem implies \begin{align*} & E\abs{\sup_{0\leqslant t\leqslant T} \sum_{k=m+1}^{n}\int_0^t \scapro{Phi(s)i_Q e_k}{v^\ast}\,dB_k(s)}^2\\ &\qquad \leqslant 4 \sum_{k=m+1}^{n}\int_0^T E\scaproh{e_k}{h(s)}^2\,ds\\ &\qquad \leqslant 4 \sum_{k=m+1}^{\infty}\int_0^T E\scaproh{\scaproh{e_k}{h(s)}e_k}{h(s)}\,ds\\ &\qquad = 4 \sum_{k=m+1}^{\infty}\sum_{l=m+1}^\infty\int_0^T E\scaproh{\scaproh{e_k}{h(s)}e_k}{\scaproh{e_l}{h(s)}e_l}\,ds\\ &\qquad =4 \int_0^T E\norm{(\Id -\pi_m)h(s)}_H^2\,ds, \end{align*} where $\pi_m:H\to H$ denotes the projection onto the span of $\{e_1,\dots, e_m\}$. Because $\norm{(\Id -\pi_m)h(s)}_H^2\to 0$ for $m\to \infty$ and \begin{align*} \int_0^T E\norm{(\Id -\pi_m)h(s)}_H^2\,ds \leqslant \norm{i_Q^\ast}^2_{U^\ast\to H} \int_0^T E\norm{Phi^\ast(s,\cdot)v^\ast}^2_{U^\ast}\,ds<\infty \end{align*} we obtain by Lebesgue's theorem the convergence in $L^2({\mathcal O}mega)$. To prove the independence on the chosen representation of $W$ let $(f_l)_{l\in\N}$ be an other orthonormal basis of $H$ and $w^\ast_l \in U^\ast $ such that $i_Q^\ast w^\ast_l=f_l$ and $(C_l(t):\,t\geqslant 0)$ independent Wiener processes defined by $C_l(t)=W(t)w^\ast_l$. As before we define in $L^2({\mathcal O}mega)$: \begin{align*} \tilde{I}_t(Phi)v^\ast:=\sum_{l=1}^\infty \int_0^t \scapro{Phi(s)i_Q f_l}{v^\ast}\,dC_l(s) \qquad\text{for all }v^\ast\in V^\ast. \end{align*} The relation $\Cov(B_k(t), C_l(t))=t \scaproh{i_Q^\ast u_k^\ast}{i_Q^\ast w_l^\ast}= t \scaproh{e_k}{f_l}$ enables us to calculate \begin{align*} & E\abs{I_t(Phi)v^\ast-\tilde{I}_t(Phi)v^\ast}^2\\ &\qquad= E\abs{I_t(Phi)v^\ast}^2+ E\abs{\tilde{I}_t(Phi)v^\ast}^2 -2 E\leqslantft[ \big(I_t(Phi)v^\ast\big)\big( \tilde{I}_t(Phi)v^\ast\big)\right]\\ &\qquad= \sum_{k=1}^\infty \int_0^t E\scapro{Phi(s)i_Qe_k}{v^\ast}^2\,ds + \sum_{l=1}^\infty \int_0^t E\scapro{Phi(s)i_Qf_l}{v^\ast}^2\,ds\\ &\qquad\qquad -2 \sum_{k=1}^\infty \sum_{l=1}^\infty \int_0^t E\leqslantft[\scapro{Phi(s)i_Q e_k}{v^\ast} \scapro{Phi(s)i_Q f_l}{v^\ast} \scaproh{i_Q^\ast u_k^\ast}{i_Q^\ast w_l^\ast}\right]\,ds\\ & \qquad = 2\int_0^t E\norm{i_Q^\astPhi^\ast(s) v^\ast}_H^2\,ds -2 \int_0^t E\norm{i_Q^\astPhi^\ast (s) v^\ast}_H^2\,ds \\ &\qquad =0, \end{align*} which proves the independence of $I_t(Phi)$ on $(e_k)_{k\in\N}$ and $(u_k^\ast)_{k\in\N}$. The linearity of $I_t(Phi)$ is obvious and hence the proof is complete. \end{proof} Our next definition is not very surprising: \begin{definition} For $Phi\in M_T(U,V)$ we call the cylindrical random variable \begin{align*} \int_0^t Phi(s)\, dW(s):=I_t(Phi) \end{align*} {\em cylindrical stochastic integral with respect to $W$}. \end{definition} Because the cylindrical stochastic integral is strongly based on the well-known real-valued It{\^o} integral many features can be derived easily. We collect the martingale property and It{\^o}'s isometry in the following theorem. \begin{theorem} Let $Phi$ be in $M_T(U,V)$. Then we have \begin{enumerate} \item[{\rm (a)}] for every $v^\ast\in V^\ast$ the family \begin{align*} \leqslantft(\Big(\int_0^t Phi(s)\,dW(s)\Big)v^\ast:\,t\in [0,T]\right) \end{align*} forms a continuous square-integrable martingale. \item[{\rm (b)}] the It{\^o}'s isometry: \begin{align*} E\abs{\Big(\int_0^t Phi(s)\,dW(s)\Big)v^\ast}^2= \int_0^t E\norm{i_Q^\astPhi^\ast(s) v^\ast}_H^2\,ds. \end{align*} \end{enumerate} \end{theorem} \begin{proof} (a) In Lemma \ref{le.cylintwell} we have identified $I_t(Phi)v^\ast$ as the limit of \begin{align*} M_n(t):= \sum_{k=1}^{n}\int_0^t \scapro{Phi(s)i_Q e_k}{v^\ast}\,dB_k(s), \end{align*} where the convergence takes place in $L^2({\mathcal O}mega)$ uniformly on the interval $[0,T]$. As $(M_n(t):\,t\in [0,T])$ are continuous martingales the assertion follows. (b) Using It{\^o}'s isometry for real-valued stochastic integrals we obtain \begin{align*} E\abs{\Big(\int_0^t Phi(s)\,dW(s)\Big)v^\ast}^2 &= \sum_{k=1}^\infty E\leqslantft[\int_0^T \scapro{Phi(s)i_Q e_k}{v^\ast}\,dB_k(s)\right]^2\\ &=\sum_{k=1}^\infty \int_0^T E\scaproh{e_k}{i_Q^\ast Phi^\ast (s) v^\ast}^2\,ds\\ &= \int_0^T E\norm{i_Q^\astPhi^\ast (s) v^\ast}_H^2\, ds. \end{align*} \end{proof} An obvious question is under which conditions the cylindrical integral is induced by a $V$-valued random variable. The answer to this question will also allow us to relate the cylindrical integral with other known definitions of stochastic integrals in infinite dimensional spaces. From our point of view the following corollary is an obvious consequence. We call an stochastic process $Phi\in M_T(U,V)$ non-random if it does not depend on ${o}mega\in{\mathcal O}mega$. \begin{corollary}\label{co.intind} For non-random $Phi\in M_T(U,V)$ the following are equivalent: \begin{enumerate} \item[{\rm (a)}] $\displaystyle \int_0^T Phi(s)\,dW(s)$ is induced by a $V$-valued random variable; \item[{\rm (b)}] there exists a Gaussian measure $\mu$ on $V$ with covariance operator $R$ such that: \begin{align*} \int_0^T \norm{i_Q^\ast Phi^\ast (s)v^\ast}_H^2\,ds =\scapro{Rv^\ast}{v^\ast} \qquad\text{for all }v^\ast\in V^\ast. \end{align*} \end{enumerate} \end{corollary} \begin{proof} (a) $\Rightarrow$ (b): If the integral $I_T(Phi)$ is induced by a $V$-valued random variable then the random variable is centred Gaussian, say with a covariance operator $R$. Then It{\^o}'s isometry yields \begin{align*} \scapro{Rv^\ast}{v^\ast} =E\abs{I_T(Phi)v^\ast}^2 = \int_0^T \norm{i_Q^\astPhi^\ast (s) v^\ast}_H^2\, ds. \end{align*} (b)$\Rightarrow $(a): Again It{\^o}'s isometry shows that the weakly Gausian cylindrical distribution of $I_T(Phi)$ has covariance operator $R$ and thus, extends to a Gaussian measure on $V$. \end{proof} The condition (b) of Corollary \ref{co.intind} is derived in van Neerven and Weis \cite{NeeWeis} as a sufficient and necessary condition for the existence of the stochastic Pettis integral introduced in this work. Consequently, it is easy to see that under the equivalent conditions (a) or (b) the cylindrical integral coincides with this stochastic Pettis integral. Further relation of condition (b) to $\gamma$-radonifying properties of the integrand $Phi$ can also be found in \cite{NeeWeis}. Our next result relates the cylindrical integral to the stochastic integral in Hilbert spaces as introduced in Da Prato and Zabczyk \cite{DaPrato92}. For that purpose, we assume that $U$ and $V$ are separable Hilbert spaces. Let $W$ be a strongly cylindrical Wiener process in $U$ and let the inclusion mapping $i_Q:H_Q\to U$ be Hilbert-Schmidt. Then there exist an orthonormal basis $(f_k)_{k\in \N}$ in $U$ and real numbers $\lambda_k\geqslant 0$ such that $Qf_k=\lambda_kf_k$ for all $k\in \N$. For the following we can assume that $\lambda_k\neq 0$ for all $k\in\N$. By defining $e_k:=\sqrt{\lambda_k}f_k$ for all $k\in \N$ we obtain an orthonormal basis of $H_Q$ and $W$ can be represented as usual as a sum with respect to this orthonormal basis. Our assumption on $i_Q$ to be Hilbert-Schmidt is not a restriction because in general the integral with respect to a strongly cylindrical Wiener process is defined in \cite{DaPrato92} by extending $U$ such that $i_Q$ becomes Hilbert-Schmidt. \begin{corollary} Let $W$ be a strongly cylindrical Wiener process in a separable Hilbert space $U$ with $i_Q:H_Q\to U$ Hilbert-Schmidt. If $V$ is a separable Hilbert space and $Phi\in M_T(U,V)$ is such that \begin{align*} \sum_{k=1}^\infty \lambda_k\ \int_0^T E\norm{Phi(s)i_Qe_k}^2_V\,ds <\infty, \end{align*} then the cylindrical integral \begin{align*} \int_0^T Phi(s)\,dW(s) \end{align*} is induced by a $V$-valued random variable. This random variable is the standard stochastic integral in Hilbert spaces of $Phi$ with respect to $W$. \end{corollary} \begin{proof} By Theorem \ref{th.cyl=radon} the cylindrical Wiener process $W$ is induced by an $U$-valued Wiener process $Y$. We define $U$-valued Wiener processes $(Y_N(t):\,t\in [0,T])$ by \begin{align*} Y_N(t)=\sum_{k=1}^N i_Q e_k B_k(t). \end{align*} Theorem \ref{th.Usum} implies that $Y_N(t)$ converges to $Y$ in $L^2({\mathcal O}mega;U)$. By our assumption on $Phi$ the stochastic integrals $Phi\circ Y_N(T)$ in the sense of Da Prate and Zabczyk \cite{DaPrato92} exist and converge to the stochastic integral $Phi\circ Y(T)$ in $L^2({\mathcal O}mega;V)$, see \cite[Ch.4.3.2]{DaPrato92}. On the other hand, by first considering simple functions $Phi$ and then extending to the general case we obtain \begin{align*} \scapro{Phi\circ Y_N(T)}{v^\ast}=\sum_{k=1}^N \int_0^t \scapro{Phi(s)i_Qe_k}{v^\ast}\, dB_k(s) \end{align*} for all $v^\ast\in V^\ast$. By Definition \ref{de.I_t} the right hand side converges in $L^2({\mathcal O}mega)$ to \begin{align*} \leqslantft(\int_0^T Phi(s)\,dW(s)\right)v^\ast, \end{align*} whereas at least a subsequence of $(\scapro{Phi\circ Y_N(T)}{v^\ast})_{N\in\N}$ converges to $\scapro{Phi\circ Y(T)}{v^\ast}$ $P$-a.s.. \end{proof} Based on the cylindrical integral one can build up a whole theory of {\em cylindrical stochastic differential equations}. Of course, a solution will be in general a cylindrical process but there is no need to put geometric constrains on the state space under consideration. If one is interested in classical stochastic processes as solutions for some reasons one can tackle this problem as in our two last results by deriving sufficient conditions guaranteeing that the cylindrical solution is induced by a $V$-valued random process. \end{document}
\begin{document} \title{Every Borel automorphism without finite invariant measures admits a two-set generator} \author{Michael Hochman} \maketitle \xdef\@thefnmark{}\@footnotetext{Partially supported by ISF grant 1409/11 and ERC grant 306494}\xdef\@thefnmark{}\@footnotetext{\emph{2010 Mathematics Subject Classication}. 37B10, 37A35, 37A40, 37A99} \begin{abstract} We show that if an automorphism of a standard Borel space does not admit finite invariant measures, then it has a two-set generator. This implies that if the entropies of invariant probability measures of a Borel system are all less than $\log k$, then the system admits a $k$-set generator, and that a wide class of hyperbolic-like systems are classified completely at the Borel level by entropy and periodic points counts. \end{abstract} \tableofcontents{} \section{Introduction} \subsection{Background and statement of results} Borel dynamics is the study of the action of an automorphism $T$, or a group of automorphisms, on a standard Borel space $(X,\mathcal{B})$. These objects appear throughout \,dynamical systems theory, lying at the intersection of ergodic theory and topological dynamics, in the first of which the system is additionally endowed with an invariant measure, and in the second with topology which makes $T$ continuous. But it is perhaps more accurate to say that Borel dynamics lies somewhere between the two: since measurable maps are far more abundant than continuous ones the category is ``looser'' than the topological one, but, in the absence of a reference measure, maps are defined everywhere, rather than almost-everywhere, and so morphisms preserve substantially more of the structure than in ergodic theory. The systematic development of the theory as a branch of dynamics began with the work of Shelah and Weiss \cite{ShelahWeiss1982}, and over the past few decades a close parallel has been established between Borel dynamics and the ergodic theory of conservative transformations \cite{ShelahWeiss1982,Weiss1984,Weiss1989,Nadkarni1990}. Another notable direction in the theory is the study of the orbit relation of Borel actions, see e.g. \cite{JacksonKechrisLouveau2002} (but this will not concern us here). Recently, and more directly related to our work, Borel dynamics has come up in connection with the classification of hyperbolic-like dynamics following Buzzi's work on entropy conjugacy \cite{Buzzi2005,Hochman2013b}. In this paper we resolve a longstanding problem on the existence and size of generators for Borel automorphisms (i.e. actions of $\mathbb{Z}$). Let us begin by describing the situation in ergodic theory, which is classical and intimately related to entropy theory. Given a probability measure $\mu$ on $(X,\mathcal{B})$, a measurable partition $\alpha=\{A_{i}\}$ is called a \emph{generator} (for $\mu$) if the family of iterates $\{T^{j}\alpha\}_{j\in\mathbb{Z}}$ generates the $\sigma$-algebra $\mathcal{B}$ up to $\mu$-null sets. The size of the smallest generator is a reflection of the complexity of the system, and when $\mu$ is $T$-invariant it is a classical theorem of Krieger that a $k$-set generator for $\mu$ exists if (and, essentially, only if) $h_{\mu}(T)<\log k$, where $h_{\mu}(T)$ denotes the Kolmogorov-Sinai entropy.\footnote{In fact, $h_{\mu}(T)$ can be expressed purely in terms of the size of generators: writing $g_{\mu}(T)$ for the cardinality of the smallest $\mu$-generator of $T$, we have \[ h_{\mu}(T)=\lim_{n\rightarrow\infty}\frac{1}{n}\log g_{\mu}(T^{n}) \] } When $h_{\mu}(T)=\infty$ no finite generator can exist, but, by a theorem of Rohlin, a countable one does. When one moves away from invariant probability measures to more general ones the picture changes drastically. A measure $\mu$ is \emph{conservative }for $T$ if $T$ preserves the class of $\mu$-null sets and $\mu(A)=0$ for every wandering set $A$, where $A$ is \emph{wandering} if its iterates $T^{n}A$, $n\in\mathbb{Z}$, are pairwise disjoint. Krengel \cite{Krengel970} showed that \emph{every }ergodic conservative measure $\mu$ that is not equivalent to an invariant probability measure admits a two-set generator. This is another manifestation of the absence of a good entropy theory for conservative transformations.\footnote{Several notions of entropy have been suggested for conservative transformation, e.g. \cite{Krengel1967,Parry1969,JanvresseMeyerovitchRoy2010}, but these generally lack many important properties present in the classical notion.} We now return to the Borel setting. Here, a partition $\alpha=\{A_{i}\}_{i\in I}$ is called a\emph{ (Borel) generator} for $(X,\mathcal{B},T)$ if the $\sigma$-algebra generated by $\{T^{j}\alpha\}_{j\in\mathbb{Z}}$ is equal to $\mathcal{B}$. Such a partition clearly must be a generator for every conservative measure of $T$, so the presence of invariant probability measures of high entropy poses an obstruction to the existence of finite Borel generators, but the theorems of Rohlin and Krengel make it plausible that countable Borel generators could exist. That they always do exist for free actions\footnote{A free $\mathbb{Z}$-action is one without periodic points. In order for countable generators to exist, some assumption on periodic points is needed, since a countable generator cannot exist if there are more than countably many periodic points. If there are only countably many periodic points, then they pose no obstruction.} was established by Benjamin Weiss \cite{Weiss1989}, who showed that every free Borel system $(X,T)$ admits a countable generator. Weiss proved his theorem modulo a the sigma-filter generated by wandering sets, but this qualification can be removed, see e.g. \cite[Corollary 7.6]{Tserunyan2015}. We shall later use the existence of such a generator. Weiss's theorem left open the question of finite generators when there is no obstruction from the finite invariant measures. Specifically, Weiss asked in \cite{Weiss1989} whether, in the total absence of invariant probability measures, finite generators must exist (again, he allowed wandering sets to be neglected. The stronger version appears in \cite[Problem 5.7 and 6.6(A)]{JacksonKechrisLouveau2002}). The question was partly answered recently by Tserunyan \cite{Tserunyan2015}, who gave an affirmative answer when $T$ is a continuous map of a locally compact separable metric space. Our main result of the present paper is an answer to the problem in general: \begin{thm} \label{thm:main}Every Borel system without invariant probability measures\footnote{This assumption automatically ensures that $T$ acts freely, since a finite orbit would carry an invariant probability measure.} admits a two-set generator. \end{thm} More generally, given a non-trivial mixing shift of finite type $Y\subseteq\Sigma^{\mathbb{Z}}$, one can find a generator $\alpha=\{A_{i}\}_{i\in\Sigma}$ such that the itineraries lie in $Y$. In \cite[Theorem 1.5]{Hochman2013b} we showed how to obtain a uniform Krieger generator theorem; more precisely, if $(X,T)$ is a free Borel system with $h_{\mu}(T)<\log k$ for every $T$-invariant probability measure $\mu$, then there is an invariant Borel subset $X_{0}\subseteq X$ supporting all finite $T$-invariant measures, and $(X_{0},T|_{X_{0}})$ admits a $k$-set generator. Combining this with Theorem \ref{thm:main} to find a generator for $X\setminus X_{0}$, and working a little to make the images disjoint, we get the following corollary (see Section \ref{sec:Proofs-of-Corollaries}): \begin{cor} \label{cor:generator-with-entropy}Suppose that $(X,T)$ is a free Borel system with $h_{\mu}(T)<\log k$ for every $T$-invariant probability measure $\mu$ (alternatively, for every such measure $\mu$ with a single exception which is Bernoulli of entropy $\log k$). Then there exists a $k$-set Borel generator for $T$. \end{cor} We also note the following related dichotomy, which was conditionally derived in \cite[Theorem 9.5]{Tserunyan2015} from Theorem \ref{thm:main}: A Borel system admits a finite generator if and only if it admits no invariant probability measure of infinite entropy. \subsection{Application to hyperbolic-like dynamics} It is classical and well known that hyperbolic-like maps are ``essentially'' determined by their periodic points counts and entropy. The ``top'' of the system usually consists of a unique invariant probability measure of maximal entropy, which is ergodically isomorphic to a Bernoulli shift. Thus, by Ornstein theory \cite{Ornstein1970}, when the entropies of two such systems are equal, their entropy-maximizing measures are isomorphic. In many special cases, e.g. for mixing shifts of finite type, this isomorphism can be made continuous on a set of full measure, as in the finitary isomorphism theory of Keane and Smorodinsky \cite{KeanSmorodinsky1979}, and even extended farther ``down'' to some of the ``low-entropy'' part of the phase, as is the almost-conjugacy theorem of Adler and Marcus \cite{AdlerMarcus1979}. More recently Buzzi introduced the notion of entropy conjugacy \cite{Buzzi2005}, whereby in the problem above one replaces continuity by measurability in the hope of extending the isomorphism results to a larger class of systems \cite{BoyleBuzziGomez2006,Buzzi2005}. One also hopes to extend the isomorphisms farther into the low-entropy part of the systems, ideally to all of the ``free part'' of the system, that is, to the complement of the periodic points.\footnote{The periodic points themselves can be dealt with separately to give an isomorphism if their numbers are compatible} This possibility was raised in \cite{Hochman2013b}, where it was partly achieved for a large family of systems on sets supporting all non-atomic invariant probability measures (but not all conservative ones). See also \cite{BoyleBuzzi2014}. Isomorphisms between the entire free parts of equal-entropy strongly positively recurrent Markov shifts were constructed recently by Boyle, Buzzi and G\'omez \cite{BoyleBuzziGomez2014}, using the special presentations of such subshifts. Using the arguments from \cite{Hochman2013b} together with Theorem \ref{thm:main} one can give a quite general result in this direction. \begin{cor} \label{cor:universal-Borel-systems}Let $h>0$. Then, up to Borel isomorphism, there is a unique homeomorphism $T$ of a Polish space satisfying the following properties: (a) $T$ acts freely, (b) every $T$-invariant probability measure has entropy $\leq h$, and equality occurs for a unique measure which is Bernoulli, (c) $T$ admits embedded mixing SFTs of topological entropy arbitrarily close to $h$. In particular, if two systems from the classes listed below have the same topological entropy, then they are isomorphic, as Borel systems, on the complements of their periodic points. The classes are: Mixing positively-recurrent countable-state shifts of finite type, mixing sofic shifts, Axiom A diffeomorphisms, intrinsically ergodic mixing shifts of quasi-finite type. \end{cor} It remains an open problem whether, on the complement of the periodic points, the isomorphism can be made continuous in any non-trivial cases, e.g. between equal-entropy mixing shifts of finite type which are not topologically conjugate \cite[Problem 1.9]{Hochman2013b}. \subsection{Remark about the role of wandering sets and conservative measures} Although it has no direct bearing on the proof, we digress to say a few words about the role of conservative measures and wandering sets. In ergodic theory one generally neglects nulsets. In Borel dynamics, the appropriate class of dynamically negligible sets is the family $\mathcal{W}\subseteq\mathcal{B}$ of all countable unions of (measurable) wandering sets. It is easy to check that $\mathcal{W}$ is closed under taking measurable subsets and countable unions, i.e. it is a $\sigma$-ideal. Many results from ergodic theory, including Poincar\'{e} recurrence, Rohlin's tower lemma, and hyperfiniteness of the orbit relation for an automorphism, can be proved in the Borel setting if we work modulo $\mathcal{W}$. The $\sigma$-ideal $\mathcal{W}$ is closely related to conservative measures: by definition, every $A\in\mathcal{W}$ is a nullset for every $T$-conservative measure, and Shelah and Weiss \cite{ShelahWeiss1982,Weiss1984} proved the converse, showing that $\mathcal{W}$ consists of precisely those $A\in\mathcal{B}$ which are nullsets for every $T$-conservative measure. In particular, $T$ is dissipative (i.e. $X\in\mathcal{W}$) if and only if it admits no conservative measures. This implies that results which hold a.e. for conservative measures hold everywhere modulo $\mathcal{W}$, because the set of points where the property fails is null for every conservative measure, and hence the set of these points is in $\mathcal{W}$. Now, Krengel's generator theorem says that in a Borel system without invariant probability measures, we can find a two-set generator for every conservative measure. The discussion above hints that one should be able to find a finite Borel generator, at least modulo $\mathcal{W}$. Unfortunately, it is unclear how to glue these generators together. One might hope to partition the space into invariant Borel sets, each of which supports a unique conservative measure; then, at least, the partitions given by Krengel's theorem would be of disjoint sets and we could take their union, leaving only the measurability question. Unfortunately, if the system admits conservative measures at all, then no such partition exists\emph{,}\footnote{This is in contrast to the theorem of Varadarajan \cite{Varadarajan1963}, which gives a partition into invariant Borel sets each supporting a unique invariant probability measure. }\emph{ }see e.g. Weiss \cite{Weiss1984} (alternatively, this is a consequence of the Glimm-Effros theorem and standard results on topologizing Borel systems). This makes it highly unlikely that this ``divide and conquer'' strategy can work. \subsection{Structure of the proof of Theorem \ref{thm:main}} Our proof of Theorem \ref{thm:main} is made up of three separate generator theorems, each of which applies to points exhibiting a different form of ``non-stationary'' statistical behavior. By statistical behavior we mean the asymptotics of the number of visits to a set: For $A\in\mathcal{B}$ and $x\in X$ let \[ S_{n}(x,A)=\frac{1}{n}\sum_{i=0}^{n-1}1_{A}(T^{n}x) \] If the limit as $n\rightarrow\infty$ exists, we denote it by \[ s(x,A)=\lim_{n\rightarrow\infty}S_{n}(x,IA) \] We write $\overline{s}(x,A)$ and $\underline{s}(x,A)$ for the upper and lower limits.\footnote{We have chosen to work with forward averages because it has some mild simplifying effects, though also some odd side-effects. In some places we will need to consider two-sided averages as well and it would have been possible to use these exclusively.} \begin{defn} \label{def:null-div-def-points}Let $x\in X$ and $A\in\mathcal{B}$, and let $\alpha=\{A_{i}\}_{i=1}^{\infty}\subseteq\mathcal{B}$ be a measurable partition of $X$. We say that \begin{itemize} \item $x$ is $A$\emph{-null }if $x\in\bigcup_{n=-\infty}^{\infty}T^{n}A$ and $s(x,A)=0$. \item $x$ is $A$\emph{-divergent }is $S_{n}(x,A)$ diverges. \item $x$ is $\alpha$\emph{-deficient }if $s(x,A_{i})$ exists and is positive for all $i$ and $\sum s(x,A_{i})<1$. \end{itemize} The sets of points satisfying each of the above conditions are denoted $\nul(A)$, $\divergent(A)$ and $\deficient(\alpha)$, respectively. \end{defn} These behaviors are ``non-stationary'' in the following sense. If $\mu$ is an ergodic probability measure for $T$ and $\mu(A)>0$, then by the ergodic theorem the frequencies $s(x,A)$ exist $\mu$-a.s. and are equal to $\mu(A)$, which is positive. Hence $x$ is neither $A$-divergent nor $A$-deficient. Similarly, for any partition $\alpha=\{A_{i}\}$, for $\mu$-a.e. $x$ we have $\sum s(x,A_{i})=\sum\mu(A_{i})=1$, so $x$ is not $\alpha$-defective. The sets $\nul(A),\divergent(A)$ and $\deficient(\alpha)$ are measurable and $T$-invariant, and the core of this paper is devoted to proving that the restriction of $T$ to each of them has a finite generator. These constructions share some common infrastructure (see Section \ref{sec:General-strategy}), but the underlying mechanism in each case is rather different. The construction for null points is quite simple, and related to the construction of generators for infinite invariant measures. We give the details in Section \ref{sec:A-generator-theorem-for-nul-sequences}. The construction for divergent points is new, and of independent interest: it gives an effective and optimal (though in no sense efficient) source coding algorithm for sequences that do not have a limiting mean value. This may be seen as another manifestation of the necessity of stationary statistics for the existence of an entropy theory. The details appear in Section \ref{sec:A-generator-theorem-for-div-points}. The deficient case, given in Section \ref{sec:A-generator-theorem}, is the most involved of the three, though also in a sense the most classical. It partly relies on the other two cases, and it is the only one where entropy makes an appearance. In fact a crucial component will be a version of the Krieger generator theorem, given in Section \ref{sec:Krieger}, that uses only empirical statistics to find a finite partition generating the same $\sigma$-algebra as a given countable partition of finite empirical entropy. Let us now explain how all this comes together to give Theorem \ref{thm:main}. The starting point is Nadkarni's beautiful characterization of Borel systems which do not admit finite invariant measures. Recall that a set $D\in\mathcal{B}$ is called a \emph{sweeping out set }if $\bigcup_{i\in\mathbb{Z}}T^{i}D=X$. \begin{thm} [Nadkarni, \cite{Nadkarni1990}] \label{thm:Nadkarni}Let $(X,\mathcal{B},T)$ be a Borel system. Then $T$ does not admit an invariant probability measure if and only if there exists a sweeping out set $D\in\mathcal{B}$, a measurable partition $\{D_{n}\}_{n=1}^{\infty}$ of $X$, and integers $n_{1},n_{2},\ldots$, such that the sets $T^{n_{i}}D_{i}$ are pairwise disjoint, and $T^{n_{i}}D_{i}\subseteq X\setminus D$ for all $i$. \end{thm} Given sets $D,D_{1},D_{2},\ldots\in\mathcal{B}$ and integer $n_{1},n_{2},\ldots\in\mathbb{Z}$ as in the theorem above, let $\mathcal{A}$ denote the (countable) algebra generated by $D,D_{1},D_{2},\ldots$. We claim that every $x\in X$ is either null for some $A\in\mathcal{A}$, divergent for some $A\in\mathcal{A}$, or deficient for the partition $\alpha=\{D_{i}\}_{i=1}^{\infty}$. Indeed suppose $x$ is not null or divergent for any $A\in\mathcal{A}$. By non-divergence, the frequencies $s(x,A)$ exists for all $A\in\mathcal{A}$, so the set-function $\mu_{x}(A)=s(x,A)$ is a well-defined finitely additive measure on $\mathcal{A}$ that is invariant in the sense that $\mu_{x}(TA)=\mu_{x}(A)$. Also, $x$ is not $D$-null, i.e. $\mu_{x}(D)>0$. Therefore the inclusion $\bigcup_{i=1}^{\infty}T^{n_{i}}D_{i}\subseteq X\setminus D$ implies that for every $N$, \[ \sum_{i=1}^{N}\mu_{x}(D_{i})=\mu_{x}(\bigcup_{i=1}^{N}T^{n_{i}}D_{i})\leq1-\mu_{x}(D) \] Hence $\sum_{i=1}^{\infty}\mu_{x}(D_{i})\leq1-\mu_{x}(D)<1$, showing that $x$ is $\alpha$-deficient. All this goes to show that if a Borel system $(X,\mathcal{B},T)$ does not admit invariant probability measures then we can cover the space by a set of the form $\deficient(\alpha)$ together with countably many sets of the form $\divergent(A)$ and $\nul(A)$. By a standard disjointification argument the cover can be turned into a partition by sets of the same form, and we then can merge sets with common forms to obtain a partition $X=\nul(A')\cup\divergent(A'')\cup\deficient(\alpha)$ (See Section \ref{sub:Generators-for-unions-of-nul-and-div-sets}). We will show that the restriction of $T$ to each of these three invariant sets admits a $k$-set generator for some universal constant $k$. Then, by taking the union of these generators, we obtain a $3k$-set generator for $T$. The final step of the proof is to reduce the size of the generator from $3k$ to $2$. This uses the observation that if $T$ has no invariant probability measures then neither does the induced map $T_{C}$ on any sweeping-out set $C\in\mathcal{B}$ with bounded return times. Applying the argument above gives a $3k$-set generator for $T_{C}$. Then, by a version of the Abramov entropy formula for induced maps, and assuming (as one may) that the first return time to $C$ takes values that are large enough relative to $k$, one obtains a $2$-set generator for $T$. A similar argument gives a generator whose itineraries lie in a given mixing shift of finite type. The details of this argument are given in Section \ref{sub:From-finite-to-2-set-generators}. \subsection{Further remarks} It would be quite interesting if it were enough to consider the null or divergent cases alone. In other words, does a Borel system without invariant probability measures always admit a set $A$ such that $X=\nul(A)$? Or a set $B$ such that $X=\divergent(B)$? Besides simplifying the proof of Theorem \ref{thm:main} this would give new characterizations of such systems, and the existence of such a set $B$ would also give an elegant converse to the ergodic theorem. We do not know whether such sets exist, but we point out that for every non-singular measure $\mu$ in the system (which, by assumption, is not equivalent to to an invariant probability measure) there is a set $A$ such that $X=\nul(A)$ modulo $\mu$, and a set $B$ such that $X=\divergent(B)$ modulo $\mu$, so by the Shelah-Weiss characterization of $\mathcal{W}$ it is plausible that our question has a positive answer. Finally, it is very natural to ask the question about generators in the context of more general group actions. The work of Tserunyan mentioned earlier \cite{Tserunyan2015} is restricted by topological assumptions, but it has the remarkable feature that it applies to actions of arbitrary countable groups. Our argument relies on statistical properties of orbits and entropy considerations, and we see no reason why in principle it should not extend to countable amenable groups, but anything beyond this will probably require substantial new ideas. We remark that Tserunyan's proof works for actions of general countable groups, and shows that if a finite generator does not exist, then there is a finitely additive, finite invariant measure for the action \cite[Theorem 4.1 and Corollary 4.4]{Tserunyan2015}; the topology is used to extend this to a countably additive measure. If these measures are not $\sigma$-additive, then, in a sense, they are deficient, and perhaps this could be ruled out using some coding procedure similar to ours to show that deficiency implies a finite generator. However, the coding would need to be done without access to the machinery of F\o{}lner sets, empirical frequencies etc., so in fact quite a different methods would be required. \begin{acknowledgement*} I would like to thank A. Kechris and the anonymous referee for pointing out that there is no need to exclude wandering sets in Theorem \ref{thm:main}. I am also grateful to the referee for a very perceptive and careful reading of the paper, which has led to a much improved manuscript. \end{acknowledgement*} \section{\label{sec:Notation-and-conventions}Notation and conventions} A standard Borel space is a measurable space arising from a complete separable metric space and its Borel $\sigma$-algebra. An automorphism of a measure space is a measurable injection with measurable inverse (for standard Borel spaces measurability of the inverse is automatic). A Borel system $(X,\mathcal{B},T)$ consists of a standard Borel space $(X,\mathcal{B})$ and a Borel automorphism $T$ of it. Given a family of sets $\alpha\subseteq\mathcal{B}$ we write $\sigma(\alpha)\subseteq\mathcal{B}$ for the $\sigma$-algebra generated by $\alpha$, and $\sigma_{T}(\alpha)=\sigma(\bigcup_{n\in\mathbb{Z}}T^{n}\alpha)$ for the smallest $T$-invariant $\sigma$-algebra containing $\alpha$. Similarly for a measurable map $f$ defined on $X$ we write $\sigma(f)$ for the smallest $\sigma$-algebra with respect to which $f$ is measurable and $\sigma_{T}(f)$ for the smallest such $T$-invariant $\sigma$-algebra. A factor map from a Borel system $(X,\mathcal{B},T)$ to a Borel system $(Y,\mathcal{C},S)$ is a map $\pi:X\rightarrow Y$ such that $\pi$ is equivariant: $S\pi=\pi T$. Note that the map need not be onto, and the image need not be measurable (which is why we emphasize factor maps rather than factors). Such a map gives rise to a $T$-invariant sub-$\sigma$-algebra by pulling back $\mathcal{C}$ through $\pi$. For a finite or countable alphabet $\Sigma$ we write $\Sigma^{n}$ for the set of words of length $n$ over $\Sigma$, i.e. sequences $w=w_{1}\ldots w_{n}$ with symbols from $\Sigma$. We write $\Sigma^{*}=\bigcup_{n=0}^{\infty}\Sigma^{n}$. A word $a=\Sigma^{n}$ appears in $b\in\Sigma^{*}$ if there is an index $i$ such that $b_{i}b_{i+1}\ldots b_{i+n-1}=a$. We then say that $a$ appears in $b$ at $i$ or that there is an occurrence of $a$ in $b$ at $i$. We also say that $a$ is a subword of $b$. By intervals we mean integer intervals, so $[u,v]=\{i\in\mathbb{Z}\,:\:u\leq i\leq v\}$ (and similarly for half-open intervals and intervals that are unbounded on one or two sides). Given $a\in\Sigma^{*}$ and an interval $[u,v]$ such that $a_{i}$ is defined for $i\in[u,v]$, the subword of $a$ on $[u,v]$ is $a|_{[u,v]}=a_{u}a_{u+1}\ldots a_{v}$. We denote concatenation of words $a\in\Sigma^{m}$, $b\in\Sigma^{n}$ by $ab=a_{1}\ldots a_{m}b_{1}\ldots b_{n}$. We write $a^{n}$ for the $n$-fold self concatenation of a symbol or word $a$. For a countable set $\Sigma$ we frequently work in the space $\Sigma^{\mathbb{Z}}$ of bi-infinite sequences over $\Sigma$ and less frequently in $\Sigma^{\mathbb{N}}$, the space of one-sided sequences. The notation and terminology used for finite sequences generalizes to infinite sequences where appropriate. By taking the discrete topology on $\Sigma$ and the product topology on the product spaces we find that $\Sigma^{\mathbb{N}}$ and $\Sigma^{\mathbb{Z}}$ are separable metrizable spaces, and compact when $\Sigma$ is finite. In particular they carry the Borel $\sigma$-algebra and together with it form standard Borel spaces. The shift maps $S:\Sigma^{\mathbb{N}}\rightarrow\Sigma^{\mathbb{N}}$ and $S:\Sigma^{\mathbb{Z}}\rightarrow\Sigma^{\mathbb{Z}}$ is defined by \[ (Sx)_{i}=x_{i+1} \] $S$ is onto and with respect to the product topology it is continuous, and hence measurable. It is a bijection of $\Sigma^{\mathbb{Z}}$. For simplicity we use the same letter $S$ to denote shifts on sequence spaces over different alphabets and different index sets ($\mathbb{N}$ or $\mathbb{Z}$). We have already defined the frequency $s(x,A)$ of visits of the orbit of $x\in X$ to $A\subseteq X$, including upper and lower versions. We introduce similar notation in the symbolic setting and for subsets of $\mathbb{Z}$. For $x\in\Sigma^{\mathbb{Z}}$ and $a\in\Sigma^{*}$ let \[ S_{N}(x,a)=\frac{1}{N}\#\{0\leq i<N\,:\,a\mbox{ appears in }x\mbox{ at }i\} \] and define the upper and lower frequencies of $a$ in $x$ by \begin{eqnarray*} \overline{s}(x,a) & = & \limsup_{N\rightarrow\infty}S_{N}(x,a)\\ \underline{s}(x,a) & = & \liminf_{N\rightarrow\infty}S_{N}(x,a) \end{eqnarray*} If the two agree their common value is denoted $s(x,a)$ and called the frequency of $a$ in $x$. The upper and lower densities of a subset $I\subseteq\mathbb{Z}$ is defined in the same way: take \[ S_{N}(I)=\frac{1}{N}|I\cap[0,N-1]| \] and \begin{eqnarray*} \overline{s}(I) & = & \limsup_{N\rightarrow\infty}S_{N}(I)\\ \underline{s}(I) & = & \liminf_{N\rightarrow\infty}S_{N}(I) \end{eqnarray*} The common value, if it exists, is denotes $s(I)$ and called the density of $I$. Note that this is the same as the frequency of $1$ in $1_{I}$. We will also need to use uniform densities. The version we need is the two-sided one. For $I\subseteq\mathbb{Z}$, the upper and lower uniform densities of $I\subseteq\mathbb{Z}$ are \begin{eqnarray*} \overline{s}^{*}(I) & = & \limsup_{N\rightarrow\infty}\left(\sup_{n\in\mathbb{Z}}\frac{1}{N}|I\cap[n,n+N-1]\right)\\ \underline{s}^{*}(I) & = & \liminf_{N\rightarrow\infty}\left(\inf_{n\in\mathbb{Z}}\frac{1}{N}|I\cap[n,n+N-1]\right) \end{eqnarray*} We write $s^{*}(I)$ for the common value if they coincide, and call it the uniform density of $I$. In a Borel system $(X,T)$ and $x\in X$, $A\subseteq X$, we write $\overline{s}^{*}(x,A)$ for $\overline{s}^{*}(\{i\,:\,T^{i}x\in A\})$, and similarly $\underline{s}^{*}(x,A)$. Finally, we note that obvious fact that \[ \underline{s}^{*}(I)\leq\underline{s}(I)\leq\overline{s}(I)\leq\overline{s}^{*}(I) \] and that the set-functions $\overline{s}$ and $\overline{s}^{*}$ are sub-additive. \section{\label{sec:Preliminary-constructions}Preliminary constructions} In this section we establish some basic machinery for manipulating orbits. We first prove some technical results that reformulate our problem in symbolic terms, and establish a marker lemma. We then show how to manipulate subsets of an orbit in a stationary and measurable manner. One result will say that if $A$ is a subset of an orbit with density $\alpha$ and $\beta<\alpha$ then we can select a subset $B$ of $A$ whose density is approximately $\beta$. Another allows us to construct an injection between subsets $C,D$ of an orbit, assuming that the density of $C$ is less than that of $D$. These are rather elementary observations but will play an important role in our coding arguments, since they allow to ``move data around'' inside an orbit. We also prove some other auxiliary results of a technical nature. \subsection{\label{sub:Reformulation-in-terms-of-symbolic-factor-maps}Factor maps and generators} A\emph{ factor map }from a Borel system into $\Sigma^{\mathbb{Z}}$ for a finite set $\Sigma$ is called a symbolic factor map. Given a finite or countable partition $\alpha=\{A_{i}\}_{i\in\Sigma}$ of a Borel system $(X,\mathcal{B},T)$, write $\alpha(x)=i$ if $x\in A_{i}$, and define $\alpha_{*}:X\rightarrow\Sigma^{\mathbb{Z}}$ by $\alpha_{*}(x)_{n}=\alpha(T^{n}x)$. This is a measurable equivariant map, and defines a symbolic factor map if $\alpha$ is finite. The problem of finding a finite generator is equivalent to finding an injective symbolic factor map. To see the equivalence, note that if $\alpha=\{A_{1},\ldots,A_{r}\}$ is a finite generator then the itinerary map $\alpha_{*}$ is a symbolic factor map and injective. Conversely, if $\pi:X\rightarrow\Delta^{\mathbb{Z}}$ is an invective symbolic factor map, then the partition $\{[i]\}_{i\in\Delta}$ is a finite generator for $(\Delta^{\mathbb{Z}},S)$, and equivariance of the factor map implies that $\alpha=\{\pi^{-1}[i]\}_{i\in\Delta}$ is a generator for $X$. \subsection{\label{sub:The-space-of-subsets-of-Z}The space $2^{\mathbb{Z}}$} Let $2^{\mathbb{Z}}$ denote the set of all subsets of $\mathbb{Z}$. We identify each $I\subseteq\mathbb{Z}$ with its indicator sequence $1_{I}\in\{0,1\}^{\mathbb{Z}}$, where \[ 1_{I}(n)=\left\{ \begin{array}{cc} 1 & \mbox{if }n\in I\\ 0 & \mbox{otherwise} \end{array}\right. \] In this way $2^{\mathbb{Z}}$ inherits both a structure and the shift map. We shall apply the shift directly to subsets of $\mathbb{Z}$ and note that it is given by \[ SI=I-1=\{i\in\mathbb{Z}\,:\,i+1\in I\} \] Also, given a Borel system $(X,\mathcal{B},T)$, we can speak of measurable and equivariant $X\rightarrow2^{\mathbb{Z}}$, specifically, $I:X\rightarrow2^{\mathbb{Z}}$ is equivariant if $I(Tx)=SI(x)$. \subsection{\label{sub:Aperiodic-sequences-and-markers}Aperiodic sequences and a marker lemma} Let $\Sigma$ be a countable alphabet, and write \[ \Sigma_{AP}^{\mathbb{Z}}=\{x\in\Sigma^{\mathbb{Z}}\,:\,x\mbox{ is not periodic}\} \] This is an invariant Borel set. In this section and those that follow we construct various factor maps whose domain involves $\Sigma_{AP}^{\mathbb{Z}}$. We note that, instead, one could take any aperiodic Borel system $(X,T)$. Indeed, by Weiss's countable generator theorem \cite{Weiss1989} (strengthened so as not to exclude a $\mathcal{W}$-set using \cite[Corollary 7.6]{Tserunyan2015}), one can embed $(X,T)$ in $(\Sigma_{AP}^{\mathbb{Z}},S)$. \begin{lem} \label{lem:low-frequency-words-exist}For every $x\in\Sigma_{AP}^{\mathbb{Z}}$ and every $\varepsilon>0$ there is a block $a\in\Sigma^{*}$ that occurs in $x$ and satisfies $\underline{s}(x,a)<\varepsilon$. \end{lem} \begin{proof} For a finite or infinite word $y$ let $L_{n}(y)$ denote the set of words of length $n$ appearing in $y$ and $N_{n}(y)=|L_{n}(y)|$ their number. It is well known that $x$ is periodic if and only if $\sup_{n}N_{n}(x)<\infty$, so by assumption there is an $n$ such that $N_{n}(x)>1/\varepsilon$. If for this $n$ we had $\underline{s}(x,a)\geq\varepsilon$ for all $a\in L_{n}(x)$ then we would arrive at a contradiction, since \[ 1\geq\sum_{a\in L_{n}(x^{+})}\underline{s}(x,a)\geq N_{n}(x^{+})\cdot\varepsilon>1 \] Hence there is $a\in L_{n}(x)$ such that $\underline{s}(x,a)<\varepsilon$. \end{proof} Note that a word $a\in\Sigma^{*}$ as in the lemma can be chosen measurably from $x\in\Sigma_{AP}^{\mathbb{Z}}$ and in a manner that is constant over $S$-orbits, since one can simply choose the lexicographically least word satisfying the conclusion. Also note that the hypothesis of the lemma holds automatically if $x\in\Sigma^{\mathbb{Z}}$ contains infinitely many distinct symbols. We say that $I\subseteq\mathbb{Z}$ is $N$-separated if $|j-i|\geq N$ for all distinct $i,j\in I$, and that it is $N$-dense if every interval $[i,i+N-1]$ intersects $I$ non-trivially. Equivalently, the gap between consecutive elements is no larger than $N$. We say that $z\in\{0,1\}^{\mathbb{Z}}$ is $N$-separated or $N$-dense if $z=1_{I}$ for an $N$-separated or $N$-dense set $I$, respectively. We say that $z$ is an $N$-marker if it is $N$-separated and $(N+1)$-dense. More concretely, this means that the distance between consecutive $1$s is $N$ or $N+1$. We require the following version of the Alpern-Rohlin lemma \cite{Alpern1981}, which we state in symbolic terms. \begin{lem} For every $N\in\mathbb{N}$ there is a factor map $\Sigma_{AP}^{\mathbb{Z}}\rightarrow\{0,1\}^{\mathbb{Z}}$ whose image is contained in the $N$-markers.\end{lem} \begin{proof} Fix $N$ and $x\in\Sigma_{AP}^{\mathbb{Z}}$. Choose $a\in\Sigma^{*}$ which occurs in $x$ but $\underline{s}(x,a)<1/N^{2}$. Let \[ I=\{i\in\mathbb{Z}\,:\,a\mbox{ appears in }x\mbox{ at }i\} \] Then $I$ is non-empty and $\underline{s}(I)<1/N^{2}$. Therefore the set \[ I'=\{i\in I\,:\,(i,i+N^{2})\cap I=\emptyset\} \] is non-empty and $N^{2}$-separated. If $I'$ has a least element $i_{0}$ add to $I'$ the numbers $i_{0}-kN^{2}$ for $k=1,2,3\ldots$, and if $I'$ has a maximal element $i_{1}$ add to $I'$ the numbers $i_{1}+kN^{2}$, $k=1,2,3,\ldots$. The resulting set $I''$ is now unbounded above and below and still $N^{2}$-separated. Finally, for each consecutive pair $u<v$ in $I''$, let $L=v-u$ so $L\geq N^{2}$. There is a (unique) representation $L=mN+n(N+1)$ with $m,n\in\mathbb{N}$. Now add to $I''$ all the numbers of the form $u+m'N+n'(N+1)$ for $0<m'\leq m$ and $0<n'\leq n$. Doing this for every consecutive pair $u,v\in I''$, we obtain a set $I'''$ which is measurably determined by $x$, is $N$-separated and $(N+1)$-dense. Set $\pi(x)=1_{I'''}$. This is the desired map. \end{proof} The proposition above might produce a periodic factor; the next one ensures that the image is aperiodic, i.e. it takes an aperiodic sequence on a countable alphabet, and ``reduces'' the number of symbols to two, preserving aperiodicity. This will be used when we construct symbolic factors to ensure that the factors are themselves aperiodic. The proposition may be viewed as a baby version of the generator theorem: it gives a symbolic factor map, which, while not injective, at least preserves the aperiodicity of points in the domain. It appears in a more general setting in \cite[Theorem 8.7]{Tserunyan2015} \begin{prop} \label{prop:two-symbol-AP-factor}For every $N\in\mathbb{N}$ there is a factor map $\pi:\Sigma_{AP}^{\mathbb{Z}}\rightarrow\{0,1\}_{AP}^{\mathbb{Z}}$ whose image is contained in the aperiodic $N$-markers.\end{prop} \begin{proof} Fix $x\in\Sigma_{AP}^{\mathbb{Z}}$. We construct inductively a decreasing sequence of sets $I_{n}\subseteq\mathbb{Z}$ with $I_{n}$ periodic of period $p_{n}$, and $p_{n+1}\geq p_{n}!+p_{n}$. Start by applying the previous lemma to $x$ and $N_{1}=N$ to obtain an $N_{1}$-marker and let $I_{1}\subseteq\mathbb{Z}$ denote the sequence of indices where this marker is $1$. Then $I_{1}$ is $N$-separated. If it is aperiodic set $\pi(x)=1_{I_{1}}$. Otherwise, denote its period by $p_{1}$ and note that $p_{1}\geq N_{1}=N$. Assume that after $n$ steps we have constructed $I_{1}\supseteq I_{n}\supseteq\ldots\supseteq I_{n}$ and that $I_{n}$ is periodic with period $p_{n}$. Apply the previous lemma to $x$ and $N_{n+1}=p_{n}!+p_{n}$, to obtain an $N_{n+1}$-marker, and let $I'_{n+1}$ denote the positions of the $1$s in it, so the gaps in $I'_{n+1}$ are of length at least $N_{n+1}\geq N$. Let $k=\min\{i\in\mathbb{N}\,:\,S^{i}I'_{n+1}\cap I_{n}\neq\emptyset\}$ and set \[ I_{n+1}=S^{k}I'_{n+1}\cap I_{n} \] If $I_{n+1}$ is aperiodic, define $\pi(x)=1_{I_{n+1}}$. Otherwise continue the induction. Suppose we did not stop at a finite stage of the construction. First, we claim that $p_{n}\rightarrow\infty$. To see this, note the gaps in $I_{n+1}$ are of size at least $2p_{n}$, so, since it is periodic, its least period $p_{n+1}$ greater than $p_{n}$. Observe that there is at most one $i\in\mathbb{Z}$ contained in infinitely many (equivalently all) of the $I_{n}$'s, because the gaps in $I_{n}$ tend to infinity. Define $\pi(x)$ by setting $\pi(x)_{i}=1$ if $i$ is in infinitely many $I_{n}$ and for any other $i$ set \begin{eqnarray*} \pi(x)_{i} & = & \max\{n\,:\,i\in I_{n}\}\bmod2\\ & = & \#\{k\,:\,i\in I_{k}\}\bmod2 \end{eqnarray*} It is clear that $x\mapsto\pi(x)$ is measurable and equivariant. We claim that $\pi(x)$ is aperiodic. Indeed, suppose it was periodic with least period $q$. Choose $n$ such that $p_{n}>q$ and define $y\in\{0,1\}^{\mathbb{Z}}$ by \[ y_{i}=\max\{m\leq n\,:\,i\in I_{m}\}\bmod2 \] Clearly $y$ is periodic with period at most $p'=\lcm_{k\leq n}p_{k}\geq p_{n}$. Also, $\pi(x)$ and $y$ agree everywhere except, possibly, on $I_{n+1}$. But the gaps in $I_{n+1}$ are at least $p_{n}+p'$, and in these gaps $\pi(x)$ and $y$ agree, so there is a $j$ such that $\pi(x)$ and $y$ agree on $[j,j+p'+q]$. But then for $i\in[j,j+p-1]$ we have $y_{i}=y_{i+q}$, and since $y$ is $p'$-periodic this means that $y$ is $q$-periodic, a contradiction. We note that $I_{n}\setminus\bigcup_{k>n}I_{k}$ is infinite and unbounded above and below for each $n$, from which it follows easily that $\pi(x)$ contains infinitely many $1$s in both directions. The sequences $\pi(x)$ are $N$-separated, aperiodic and contains infinitely many $1$ in each direction, but the gaps can still be large. To get $N$-markers, begin with $2N^{2}$ instead of $N$. Then replace each block $10^{m}1$ in $\pi(x)$ with a sequence of the form $1(0^{N-1}1)^{k_{1}}(0^{N}1)^{k_{2}}$, where $k_{1},k_{2}\geq1$ and $k_{2}$ is chosen to be minimal. Since $m\geq2N^{2}$ there exists such a choice of $k_{1},k_{2}$. The original location of $1$s is the location of the central $1$s in the sequences $10^{N}10^{N-1}1$, so the new sequence is aperiodic, and is clearly a measurable equivariant function of $x$, as desired. \end{proof} \subsection{\label{sub:Stationary-selection}Stationary selection} In this section we show that one can select a subset of given approximate density from a set of higher density in a shift-invariant manner. Denote \[ [0,1]_{<}^{2}=\{(t_{1},t_{2})\in[0,1]^{2}\,:\,t_{1}<t_{2}\} \] \begin{lem} \label{lem:selecting-a-subset}There is a measurable map $\Sigma_{AP}^{\mathbb{Z}}\times2^{\mathbb{Z}}\times[0,1]_{<}^{2}\rightarrow2^{\mathbb{Z}}$ that assigns to each $y\in\Sigma_{AP}^{\mathbb{Z}}$, $I\subseteq\mathbb{Z}$ and $t_{1}<t_{2}$ a subset $J\subseteq I$ in a manner that is equivariant in the sense that $(Sy,SI,t_{1},t_{2})\mapsto SJ$, and which satisfies $\underline{s}(J)\geq t_{1}\underline{s}(I)$ and $\underline{s}(I\setminus J)\geq(1-t_{2})\underline{s}(I)$, and similarly for upper densities.\end{lem} \begin{rem} The parameter $y$ may seem superfluous, and it would certainly be less cumbersome if we could define the set $J$ using only $I$ and $t_{1}<t_{2}$. But if $I$ is periodic then any equivariant choice of $J\subseteq I$ must be periodic then any set $J$ determined from it equivariantly must have the same period and so the density of these sets must be a multiple of $1/p$, where $p$ is the period of $I$. The role of the parameter $y$ is precisely to break any such periodicity. Also, note that $\underline{s}(I\setminus J)\geq(1-t_{2})\underline{s}(I)$ implies $\overline{s}(J)\leq t_{2}\overline{s}(I)$, and similarly $\overline{s}(I\setminus J)\leq(1-t_{1})\overline{s}(I)$, or with upper and lower densities reversed. But we will not need these upper bounds.\end{rem} \begin{proof} We may assume that $\underline{s}(I)>0$, otherwise there is nothing to prove. Choose rational $t_{1}<\beta_{1}<\beta_{2}<t_{2}$ and $N\in\mathbb{N}$ large enough that $\beta_{1}\underline{s}(I)+\frac{1}{N}<\beta_{2}\underline{s}(I)$. For each finite subset $\emptyset\neq U\subseteq\mathbb{Z}$ choose once and for all a subset $\widehat{U}\subseteq U$ such that $|\widehat{U}|=\left\lfloor \beta_{2}|U|\right\rfloor $, so that \[ \beta_{2}\frac{|U|}{N}-\frac{1}{N}\leq\frac{|\widehat{U}|}{N}\leq\beta_{2}\frac{|U|}{N} \] Let $z=z(y)\in\{0,1\}^{\mathbb{Z}}$ be the $N$-marker derived from $y$ as in Lemma \ref{prop:two-symbol-AP-factor}. Let $U=\{\ldots<u_{-1}<u_{0}<u_{1}<\ldots\}$ denote the positions of $1$'s in $z$, so $u_{n+1}-u_{n}\in\{N,N+1\}$, and let $U_{n}=[u_{n},u_{n+1})$. For each $n$ let \begin{eqnarray*} I_{n} & = & I\cap U_{n}\\ J_{n} & = & \widehat{I_{n}} \end{eqnarray*} and set \[ J=\bigcup_{n\in\mathbb{Z}}J_{n} \] Evidently $J\subseteq I$ and the definition is measurable and equivariant in the stated sense. It remains to estimate the density of $J$. Using the fact that the lengths of $U_{n}$ are uniformly bounded we see that the sequences \[ \frac{1}{n}|I\cap[0,n)|\quad\mbox{and}\quad\frac{1}{u_{n}}\sum_{i=1}^{n}|I_{i}| \] have the same $\limsup$ and $\liminf$ as $n\rightarrow\infty$. Therefore \begin{eqnarray*} \underline{s}(J) & = & \liminf_{n\rightarrow\infty}\frac{1}{u_{n}}\sum_{i=1}^{n}|J_{i}|\\ & \geq & \liminf\frac{1}{u_{n}}\sum_{i=1}^{n}(\beta_{2}|I_{i}|-1)\\ & = & \beta_{2}\underline{s}(I)-\limsup_{n\rightarrow\infty}\frac{n}{u_{n}}\\ & \geq & \beta_{2}\underline{s}(I)-\frac{1}{N}\\ & > & \beta_{1}\underline{s}(I) \end{eqnarray*} where we used the fact that $u_{n}\geq nN-O(1)$. The calculation for $I\setminus J$ is similar, using the fact that $(1-\beta_{2})|U|\leq|U\setminus\widehat{U}|\leq(1-\beta_{1})|U|$. \end{proof} We also will need a version for uniform frequencies: \begin{lem} \label{lem:selecting-uniform-subset}There is a measurable map $\Sigma_{AP}^{\mathbb{Z}}\times2^{\mathbb{Z}}\times[0,1]_{<}^{2}\rightarrow2^{\mathbb{Z}}$ that assigns to each $y\in\Sigma_{AP}^{\mathbb{Z}}$, $I\subseteq\mathbb{Z}$ and $t_{1}<t_{2}$ a subset $J\subseteq I$ so that the assignment is is equivariant in the sense that $(Sy,SI,t_{1},t_{2})\mapsto SJ$, and satisfies $\underline{s}^{*}(J)\geq t_{1}\underline{s}^{*}(I)$ and $\underline{s}^{*}(I\setminus J)\geq(1-t_{2})\underline{s}^{*}(I)$, and similarly for upper uniform densities. \end{lem} The proof is almost exactly the previous one, using the fact that for large enough $N$, for all $n$ we have $\overline{s}^{*}(I)-\frac{1}{N}<\frac{1}{N}|I_{n}|<\overline{s}^{*}(I)+\frac{1}{N}$, and similarly for lower uniform densities. We omit the details. Finally, the following lemma encapsulates the recursive application of the lemmas above. We state the uniform case but the non-uniform one is identical with the requisite changes. Let $Q\subseteq[0,1]^{\mathbb{N}}$ denote the set of sequences $(t_{n})_{n=1}^{\infty}$ such that $\sum t_{n}<1$. \begin{lem} \label{lem:selecting-a-sequence-of-subsets}There is a measurable map $\Sigma_{AP}^{\mathbb{Z}}\times2^{\mathbb{Z}}\times Q\rightarrow(2^{\mathbb{Z}})^{\mathbb{N}}$ that assigns to every $y\in\Sigma_{AP}^{\mathbb{Z}}$, $I\subseteq\mathbb{Z}$, and $(t_{n})_{n=1}^{\infty}\in Q$ a sequence of disjoint subsets $J_{1},J_{2},\ldots\subseteq I$ satisfying $\underline{s}^{*}(J_{n})\geq t_{n}\underline{s}^{*}(I)$, and the assignment is equivariant in the sense that $(Sy,SI,t)\mapsto(SJ_{n})_{n\in\mathbb{N}}$. \end{lem} \begin{proof} Fix $y\in\Sigma_{AP}^{\mathbb{Z}}$ and $I\subseteq\mathbb{Z}$. First, suppose we are given a sequence $0<r_{n}^{-}<r_{n}^{+}<1$. Choose intervals $J_{n}$ recursively applying the previous lemma at stage $n$ to $(y,I\setminus\bigcup_{i<n}J_{i},r_{n}^{-},r_{n}^{+})$. Writing $I_{n}=I\setminus\bigcup_{i<n}J_{i}$ for the interval from which $J_{n}$ was chosen, we have the relations \begin{eqnarray*} \underline{s}^{*}(J_{n}) & \geq & r_{n}^{-}\cdot\underline{s}^{*}(I_{n})\\ \underline{s}^{*}(I_{n}) & \geq & (1-r_{n-1}^{+})\cdot\underline{s}^{*}(I_{n-1}) \end{eqnarray*} Therefore \begin{eqnarray*} \underline{s}^{*}(I_{n}) & \geq & \prod_{i<n}(1-r_{i}^{+})\cdot\underline{s}^{*}(I)\\ \underline{s}^{*}(J_{n}) & \geq & r_{n}^{-}\cdot\prod_{i<n}(1-r_{i}^{+})\underline{s}^{*}(I) \end{eqnarray*} Now let $t_{n}>0$ be given satisfying $\sum t_{n}<1$. We claim that we can choose $0<r_{n}^{-}<r_{n}^{+}<1$ to satisfy \begin{eqnarray*} r_{n}^{-}\prod_{i<n}(1-r_{i}^{+}) & > & t_{n}\\ \prod_{i\leq n}(1-r_{i}^{+}) & > & \sum_{i>n}t_{i} \end{eqnarray*} This is done by induction: For $n=1$, the requirements simplify to $t_{1}<r_{1}^{-}<r_{n}^{+}<1-\sum_{i>1}t_{i}$, and the existence of such $r_{1}^{\pm}$ follows from the inequality $t_{1}<1-\sum_{i>1}t_{i}$, which is our hypothesis. Next, assuming that the inequalities above hold for $n-1$, write $a=\prod_{i<n}(1-r_{i}^{+})$, so by assumption $a>\sum_{i>n-1}t_{i}$. We are looking for $r_{n}^{\pm}$ satisfying $\frac{1}{a}t_{n}<r_{n}^{-}<r_{n}^{+}<1-\frac{1}{a}\sum_{i>n}t_{i}$, and they exist provided that $\frac{1}{a}t_{n}<1-\frac{1}{a}\sum_{i>n}t_{i}$, which, after rearranging, is just the inequality $\sum_{i\geq n}t_{i}<a$, which we know to hold. In conclusion, we have shown how to find $r_{n}^{\pm}$ as above, and by the discussion at the start of the proof we obtain $\underline{s}^{*}(I_{n})>t_{n}\underline{s}^{*}(I)$, as desired. \end{proof} \subsection{\label{sub:Equivariant-partial-injections}Equivariant partial injections } Next, we show how to construct injections between subsets of $\mathbb{Z}$ in a measurable and equivariant manner. The space of all partially defined maps between countable sets $A$ and $B$ can be represented as $(B\cup\{*\})^{A}$, where $*$ is a symbol not already in $B$, and a sequence $(z_{i})$ in this space represents the map $\{i\in A\,:\,z_{i}\neq*\}\rightarrow B$ given there by $i\mapsto z_{i}$. We write $\inj_{*}(A,B)$ for the space of all partially defined injections (the $*$ implying that the maps are partially defined), and note that with the structure above $\inj_{*}(A,B)$ is a Borel set. It is useful to extend the ``shift'' action from sets to functions: for $I,J\subseteq\mathbb{Z}$ and $f:I\rightarrow J$ let $Sf:SI\rightarrow SJ$ be given by $Sf(i)=f(i+1)-1$. We say that a map $X\rightarrow\inj_{*}(\mathbb{Z},\mathbb{Z})$, $x\mapsto f_{x}$, is equivariant if $f_{Tx}=Sf_{x}$, which is just another way of saying that $f_{Tx}(i)=f_{x}(i+1)-1$. \begin{lem} \label{lem:equivarian-injections}Let $y\in\Sigma_{AP}^{\mathbb{Z}}$ and $I,J\subseteq\mathbb{Z}$ sets such that $\overline{s}(I)<\overline{s}(J)$ (or $\underline{s}(I)<\underline{s}(J)$). Then there exists a measurable map $(y,I,J)\mapsto f_{(y,I,J)}\in\inj(I,J)$ that is equivariant in the sense that $(Sy,SI,SJ)\mapsto Sf_{(y,I,J)}$. Furthermore, for any $\overline{s}(I)<s<\overline{s}(J)$ (respectively $\underline{s}(I)<s<\underline{s}(J)$) we can ensure that $\overline{s}(\im(f_{(y,I,J)}))<s$ (respectively $\underline{s}(\im(f_{(y,I,J)}))<s$).\end{lem} \begin{proof} We prove the statement for upper densities, the lower density case being similar. Fixing $y,I,J$ as in the statement, we first show how to construct $f=f_{(y,I,J)}$ without control over the image density. We define $f$ by induction. At the $k$-th stage we say that $i\in I$ and $j\in J$ are free if $f$ is not yet defined on $i$ and $j$ is not yet in the image. Start with $f=\emptyset$. For each $k$, define $f(i)=i+k$ if $i\in I$ and $i+k\in J$ are free, otherwise leave $f$ undefined on $i$. We claim that $f$ is eventually defined on every $i\in I$. To see this note that by the assumption $\overline{s}(J)>\overline{s}(I)$ (or $\underline{s}(J)>\underline{s}(I)$), there exists a $k$ such that $|I\cap[i,i+k]|<|J\cap[i,i+k]|$; it is clear that this $i$ must have been assigned in one of the first $k$ steps. It is clear that $f_{y}:I\rightarrow J$ is injective and that the construction is shift-invariant and measurable, as required. For the second statement, given $s>\overline{s}(I)$ let $t_{1}=\overline{s}(I)/\overline{s}(J)$ and $t_{2}=s/\overline{s}(J)$, and apply Lemma \ref{lem:selecting-a-subset} to $(y,J,t^{-},t^{+})$. We obtain a subset $J'\subseteq J$ depending measurable and equivariantly on the data and satisfying $\overline{s}(I)=t_{1}\overline{s}(J)<\overline{s}(J)<t_{2}\overline{s}(J)=s$. Now apply the first part of this lemma to $(y,I,J')$ to obtain $f_{y}\in\inj(I,J')\subseteq\inj(I,J)$. Since $\im f_{y}\subseteq J'$ we have $\overline{s}(\im(f_{y}))<s$. \end{proof} We require a variant of Lemma \ref{lem:equivarian-injections} that uses uniform densities and produces partial injections $f\in\inj_{*}(\mathbb{Z},\mathbb{Z})$ with bounded displacement. Here $f\in\inj_{*}(\mathbb{Z},\mathbb{Z})$ is said to have bounded displacement if there is a constant $M=M(f)$ (the displacement) such that $|n-f(n)|<M$ for all $n$ in the domain. When $f=f_{z}$ depends on a parameter $z$ the statement that $f_{z}$ has bounded displacement does not indicate that the constant $M(f_{z})$ is uniform in $z$. \begin{lem} \label{lem:equivariant-injections-uniform-versions}Let $y\in\Sigma_{AP}^{\mathbb{Z}}$ and let $I,J\subseteq\mathbb{Z}$ be sets such that $\overline{s}^{*}(I)<\underline{s}^{*}(J)$. Then there exists a measurable map $(y,I,J)\mapsto f=f_{(y,I,j)}\in\inj(I,J)$ that is equivariant in the sense that $(Sy,SI,SJ)\mapsto Sf_{(y,I,J)}$, and such that $f_{(y,I,J)}$ has bounded displacement and satisfies $\overline{s}^{*}(\im f_{(y,I,J)})=\overline{s}^{*}(I)$. \end{lem} The proof is identical to the previous, noting that, because of uniformity, $k$ can be chosen from a fixed bounded set and hence $f_{(y,I,J)}$ has bounded displacement. Then use the fact that the last conclusion of the lemma is a consequence of the earlier ones, because: \begin{lem} \label{lem:bounded-displacement-preserves-uniform-densities}Let $I\subseteq\mathbb{Z}$. Suppose that $f:I\rightarrow\mathbb{Z}$ is an injection with bounded displacement and let $J=\im(f)$. Then $\underline{s}^{*}(J)=\underline{s}^{*}(I)$ and $\overline{s}^{*}(J)=\overline{s}^{*}(I)$. \end{lem} The proof is immediate and we omit it. \section{\label{sec:General-strategy}General strategy} In this section we set the stage for the proof of the main theorem, proving a variety of technical results. The main one is Proposition \ref{prop:general-strategy}, which gives a sufficient condition for the existence of a finite generator that will underly the generator theorems in later sections. It also gives a new characterization of Borel systems without invariant probability measures (see the discussion after the proof). \subsection{\label{sub:Constructing-generators-from-allocations}Constructing generators using allocations} For the following discussion it is convenient to have a concrete representation of $X$. To this end fix a measurable (but not equivariant!) bijection $\eta:X\rightarrow\{0,1\}^{\mathbb{N}}$, which can be done because all standard Borel spaces are isomorphic. We then have, for each $x\in X$, a sequence $\eta(x)$ of bits identifying it uniquely. We call $\eta(x)$ the static name of $x$ (static because its definition does not depend on $T$ in any way). Now, if one wants to produce an injective symbolic factor map $X\rightarrow\{0,1\}^{\mathbb{Z}}$, then one must somehow encode the binary sequence $\eta(x)$ in $\pi(x)$. Since the map is equivariant, this means that $\eta(T^{n}x)$ is encoded in $\pi(T^{n}x)$, which is just a shift of $\pi(x)$, so in fact $\pi(x)$ must encode all the sequences $\eta(T^{n}x)$. Thus, what we want to do is encode the binary array $\widehat{x}\in\{0,1\}^{\mathbb{Z}\times\mathbb{N}}$ given by $\widehat{x}_{i,j}=\eta(T^{i}x)_{j}$ into a linear binary sequence $\pi(x)\in\Delta^{\mathbb{Z}}$, in a measurable and equivariant manner. The most direct approach, which is the one we shall use, is to construct an injection $F_{x}:\mathbb{Z}\times\mathbb{N}\rightarrow\mathbb{Z}$. Then we can define $\pi:X\rightarrow\{0,1\}^{\mathbb{Z}}$ by \begin{equation} \pi(x)_{F_{x}(i,j)}=\widehat{x}_{i,j}=\eta(T^{i}x)_{j}\label{eq:factor-map-from-allocation} \end{equation} and fill in any unused bits with $0$. Then every bit in $\widehat{x}$ has been written somewhere in $\pi(x)$. In order to make the map $\pi$ above measurable and equivariant, we must require the same from $F_{x}$. Endow the space of functions between countable sets $A,B$ with the product structure on $B^{A}$, which makes it into a standard Borel space. The subset consisting of injective maps $A\rightarrow B$ is measurable, and we denote it $\inj(A,B)$. We say that a map $X\rightarrow\inj(\mathbb{Z}\times\mathbb{N},\mathbb{Z})$, $x\mapsto F_{x}$, is equivariant if \[ F_{Tx}(i,j)=F_{x}(i+1,j)-1 \] Given $x\mapsto F_{x}$, for each $n\in\mathbb{N}$ we can define functions $F_{x,n}:\mathbb{Z}\rightarrow\mathbb{Z}$ by $F_{x,n}(i)=F_{x}(i,n)$, and then equivariance in the sense above is the same as equivariance, in the sense of Section \ref{sub:Equivariant-partial-injections}, of each of the maps $X\mapsto\inj(\mathbb{Z},\mathbb{Z})$, $x\mapsto F_{x,n}$. \begin{defn} \label{def:allocation}A map $X\rightarrow\inj(\mathbb{Z}\times\mathbb{N},\mathbb{Z})$ that is measurable and equivariant is called an allocation. \end{defn} If $F_{x}$ is an allocation, then the map $\pi:X\rightarrow\{0,1\}^{\mathbb{Z}}$ given by (\ref{eq:factor-map-from-allocation}) is easily seen to be measurable, and a short calculation shows that it is also equivariant: To see this, let $y=Tx$ and fix $k\in\mathbb{Z}$ and $n\in\mathbb{N}$, let $i=F_{y}(k+1,n)$ and $j=F_{y}(k,n)$, so $\pi(x)_{i}=\eta(T^{k+1}x)_{n}$ and $\pi(y)_{j}=\eta(T^{k}x)_{n}$, and note that \[ j=F_{y}(k,n)=F_{Tx}(k,n)=F_{x}(k+1,n)-1=i-1 \] This means that $\pi(y)_{i-1}=\pi(x)_{i}$ for $i$ in the image of $F_{x}$. Since clearly $\im(F_{y})=\im(F_{x})-1$, we have $\pi(y)_{i-1}=\pi(x)_{i}=0$ for $i\in\mathbb{Z}\setminus\im F_{x}$. Thus we have shown that $\pi(Tx)=\pi(y)=S\pi(x)$. This procedure for encoding $\widehat{x}$ in $\pi(x)$ is not yet reversible, but by (\ref{eq:factor-map-from-allocation}), if we know both $\pi(x)$ and $F_{x}$ then we can recover the sequence $\eta(x)$ (and in fact $\eta(T^{j}x)$ for all $j$), and therefore recover $x$. Thus we have established the following proposition: \begin{prop} \label{prop:allocation-implies-generator-mod-subalgebra}Let $(X,\mathcal{B},T)$ be a Borel system and $F:x\mapsto F_{x}$ an allocation. Then there is a symbolic factor map $\pi:X\rightarrow\{0,1\}^{\mathbb{Z}}$ (equivalently, a two-set partition $\beta$) such that $\sigma(\pi)\lor\sigma(F)=\mathcal{B}$ (respectively $\sigma_{T}(\beta)\lor\sigma(F)=\mathcal{B}$). \end{prop} \subsection{\label{sub:proof-of-general-strategy}Constructing generators from deficient $\omega$-covers } We say that a collection $\alpha=\{A_{i}\}$ of sets is an $\omega$\emph{-cover} of $X$ if every point $x\in X$ belongs to infinitely many of the $A_{i}$. Our main technical tool for constructing generators is the following: \begin{prop} \label{prop:general-strategy}Let $(X,\mathcal{B},T)$ be a Borel system. Let $\alpha=\{A_{i}\}_{i=1}^{\infty}\subseteq\mathcal{B}$ be an $\omega$-cover of $X$ and suppose that either \begin{enumerate} \item [(a)]$\sum_{i=1}^{\infty}\overline{s}(x,A_{i})<1$ for all $x\in X$, or \item [(b)]There is a partition $\mathbb{N}=\bigcup_{u=1}^{\infty}I_{u}$ such that for each $u\in\mathbb{N}$ the collection $\{A_{i}\}_{i\in I_{u}}$ is pairwise disjoint, and for any finite $J\subseteq\mathbb{N}$ we have $\sum_{u}\overline{s}^{*}(\bigcup_{i\in I_{u}\cap J}A_{i})<1-\sup_{i\in\mathbb{N}}\overline{s}^{*}(A_{i})$. \end{enumerate} Then there exists a two-set partition $\beta$ such that $\sigma_{T}(\beta)\lor\sigma_{T}(\alpha)=\mathcal{B}$. In particular, if there exists a finite partition $\gamma$ such that $\alpha\subseteq\sigma_{T}(\gamma)$, then $\beta\lor\gamma$ is a finite generator. \end{prop} One can prove many variants using other conditions than (a) or (b), but these are the ones we will need. \begin{proof} We shall show how to construct an allocation from an $\omega$-cover $\alpha=\{A_{i}\}$ satisfying one of the hypotheses of the proposition. The proposition then follows from Proposition \ref{prop:allocation-implies-generator-mod-subalgebra}. We begin with case (a). Fix $x\in X$, and suppress it in the notation below except when needed, in which case it is indicated with a superscript. Let \[ J_{i}=J_{i}^{x}=\{n\in\mathbb{Z}\,:\,T^{n}x\in A_{i}\} \] Note that $x\mapsto J_{i}$ is equivariant and that, since $\alpha$ is an $\omega$-cover, every $n\in\mathbb{Z}$ belongs to infinitely many of the $J_{i}$. We next want to define injections $f_{i}^{x}:J_{i}\rightarrow\mathbb{Z}\setminus\bigcup_{j<i}\im(f_{j}^{x})$ so that $x\mapsto f_{i}^{x}$ is measurable and equivariant. Assuming we have done this, given $n\in\mathbb{Z}$ and $j\in\mathbb{N}$ let $i(n,j)$ denote the $j$-th index $i$ such that $n\in J_{i}$ (which is well defined since $n$ belongs to infinitely many of the sets $J_{i}$), and define \[ F_{x}(n,j)=f_{i(n,j)}^{x}(n) \] Since the images of the $f_{i}^{x}$'s are disjoint, $F_{x}\in\inj(\mathbb{Z}\times\mathbb{N},\mathbb{Z})$. Clearly $x\mapsto F_{x}$ is measurable. To see that it is equivariant, note that $i^{Tx}(n,j)=i^{x}(n+1,j)$, because $J_{i}=J_{i}-1$, so using equivariance of $x\mapsto f_{i}^{x}$, \[ F_{Tx}(n,j)=f_{i^{Tx}(n,j)}^{Tx}(n)=f_{i^{x}(n+1,j)}^{Tx}(n)=f_{i^{x}(n+1,j)}^{x}(n+1)-1=F_{x}(n+1,j)-1 \] So $x\mapsto F_{x}$ is an allocation. It remains to construct the $f_{i}^{x}$. By Weiss's countable generator theorem \cite{Weiss1989} (for the version which does not exclude a $\mathcal{W}$-set see \cite[Theorem 5.4]{JacksonKechrisLouveau2002} or \cite[Corollary 7.6]{Tserunyan2015}), we may assume that $X\subseteq\Sigma_{AP}^{\mathbb{Z}}$ for a countable alphabet $\Sigma$, and $T$ is the shift map. Choose $s_{i}=s_{i}(x)\in(0,1)$ measurably satisfying $\overline{s}(J_{i})<s_{i}<1-\sum_{j<i}s_{i}$ and $\sum_{i=1}^{\infty}s_{i}<1$, which can be done because of the hypothesis $\sum_{i=1}^{\infty}\overline{s}(J_{i})<1$. Now for $i=1,2,\ldots$ apply Lemma \ref{lem:equivarian-injections} inductively to $(x,J_{i},\mathbb{Z}\setminus\bigcup_{j<i}\im(f_{j}^{x}))$ and $s_{i}$. We can do this because by induction we have \begin{equation} \overline{s}(J_{i})<1-\sum_{j<i}s_{i}<1-\sum_{j<i}\overline{s}(\im(f_{j}))\leq\underline{s}(\mathbb{Z}\setminus\bigcup_{j<i}(\im(f_{j}))\label{eq:remaining-density-estimate} \end{equation} and the construction can be carried through. The construction of $F_{x}$ under assumption (b) follows the same lines with some minor changes. There is no need to introduce the $s_{i}$, but rather proceed directly, using Lemma \ref{lem:equivariant-injections-uniform-versions} to construct the maps, which will have bounded displacement, and Lemma \ref{lem:bounded-displacement-preserves-uniform-densities} to control the density of the images. At stage $i$, note that there is some $N_{i}=N_{i}(x)$ such that \begin{equation} \bigcup_{j<i}\im f_{j}=\bigcup_{u=1}^{N_{i}}\left(\bigcup_{j<i,j\in I_{u}}\im f_{j}\right).\label{eq:100} \end{equation} For each $u$, as $j$ ranges over $j\in I_{u}$, the domains $J_{j}$ of $f_{j}^{x}$ are disjoint, so we can define \[ \widetilde{f}_{i,u}=\widetilde{f}_{i,u}^{x}=\bigcup_{j<i,j\in I_{u}}f_{j}. \] As the union of finitley many maps with bounded displacement, this map has the same property. Thus, noting that \[ \im\widetilde{f}_{i,u}=\bigcup_{j<i,j\in I_{u}}\im f_{j}, \] and using Lemma \ref{lem:bounded-displacement-preserves-uniform-densities}, by (\ref{eq:100}), we have \[ \overline{s}^{*}(\bigcup_{j<i}\im f_{j})\leq\sum_{u=1}^{N_{i}}\overline{s}^{*}(\im\widetilde{f}_{i,u})=\sum_{u=1}^{N_{i}}\overline{s}^{*}(\bigcup_{j<i,j\in I_{u}}\dom f_{j})\leq\sum_{u=1}^{N_{i}}\overline{s}^{*}(\bigcup_{j\in I_{u}}J_{j}) \] (note that the first inequality is valid since the sum is actually over finitely many $u$). But recalling $J_{j}=\{n\,:\,T^{n}x\in A_{j}\}$ and the definition of the sets $I_{u}$ in assumption (b) of the proposition, the last sum is less than $1-\overline{s}^{*}(J_{i})$, so we have \[ \overline{s}^{*}(J_{i})<1-\overline{s}^{*}(\bigcup_{j<i}\im(f_{j}^{x}))\leq\underline{s}^{*}(\mathbb{Z}\setminus\bigcup_{j<i}(\im(f_{j}^{x})) \] Thus, Lemma \ref{lem:equivariant-injections-uniform-versions} lets the construction proceed, and finishes the proof. \end{proof} In the following sections we show that, given a Borel system $(X,\mathcal{B},T)$ without invariant probability measures, one can partition $X$ into two measurable invariant sets (modulo $\mathcal{W}$) such that the first admits an $\omega$-cover satisfying condition (a) of the proposition above, and the second admits an $\omega$-cover satisfying condition (b). Clearly if a system admits an invariant measure then no such partition can exist. Thus, we have arrived at another \,characterizations of Borel systems without invariant probability measures. It would be nicer to eliminate the need to partition the space: perhaps there is always an $\omega$-cover (modulo $\mathcal{W}$) that satisfies (a) (or that satisfies (b)), but we have not been able to show this. \subsection{\label{sub:Generators-for-unions-of-nul-and-div-sets}Generators for unions of $\deficient(\alpha)$, $\nul(A_{i})$s and $\divergent(A_{i})$s} Our strategy, as explained in the introduction, is to divide $X$ into sets of points that are null or divergent for countably many sets $A_{i}$, or deficient for some partition $\alpha$. We now indicate how to modify these sets so as to obtain a partition of $X$ into finitely many sets of the same forms. The following is elementary: \begin{lem} \label{lem:nul-and-div-minus-invariant-set}Let $A,B\in\mathcal{B}$ and assume that $B$ is $T$-invariant. Then $\nul(A)\setminus B=\nul(A\setminus B)$ and $\divergent(A)\setminus B=\divergent(A\setminus B)$. \end{lem} As an immediate consequence, we have \begin{lem} \label{lem:reducing-unions-of-nul-and-div-sets}Let $A_{1},A_{2},\ldots\in\mathcal{B}$ and set $D_{1}=A_{1}$ and $D_{n}=A_{n}\setminus\bigcup_{i=-\infty}^{\infty}T^{i}D_{n-1}$. Then \[ \bigcup_{n=1}^{\infty}\nul(A_{i})=\nul(\bigcup_{n=1}^{\infty}D_{n}) \] and similarly if we replace $\nul($$\cdot)$ by $\divergent(\cdot)$. \end{lem} Thus, noting that $\deficient(\alpha)$ is invariant, we have \begin{lem} \label{lem:disjointifying-nul-and-div-sets}Let $\alpha\subseteq\mathcal{B}$ and $A_{i},B_{i}\in\mathcal{B}$ and suppose that \[ X=\deficient(\alpha)\cup\bigcup_{i=1}^{\infty}\nul(A_{i})\cup\bigcup_{i=1}^{\infty}\divergent(B_{i}) \] Then there are sets $A,B\in\mathcal{B}$ such that $X=\deficient(\alpha)\cup\nul(A)\cup\divergent(B)$ and the union is disjoint.\end{lem} \begin{proof} By Lemma \ref{lem:nul-and-div-minus-invariant-set} we can replace each $A_{i}$ by $A_{i}\setminus\deficient(\alpha)$ and the hypothesis remains. Use the previous lemma to find $A\in\mathcal{B}$ such that $\bigcup_{n=1}^{\infty}\nul(A_{i})=\nul(A)$. By the same reasoning we can replace $B_{i}$ with $B_{i}\setminus(\deficient(\alpha)\cup\nul(A))$ without affecting the hypothesis and find $B\in\mathcal{B}$ with $\bigcup_{i=1}^{\infty}\divergent(B_{i})=\divergent(B)$. But note that $\deficient(\alpha),\nul(A)$ and $\divergent(B)$ are pairwise disjoint and their union is $X$, as desired. \end{proof} \subsection{\label{sub:From-finite-to-2-set-generators}From finite to two-set generators} As explained in the introduction, most of the work in the proof of Theorem \ref{thm:main} goes towards proving the following theorem: \begin{thm} \label{thm:enough-to-find-K-set-generator}There is a natural number $K$ such that every Borel system without invariant probability measures admits a $K$-set generator. \end{thm} This is good enough to get two-set generators, because \begin{prop} \label{prop:reduction-from-K-set-to-2-set}Theorem \ref{thm:enough-to-find-K-set-generator} implies Theorem \ref{thm:main}. Furthermore the generator may be chosen so that the itineraries lie in a given mixing non-trivial shift of finite type.\end{prop} \begin{proof} We first prove the existence of a two-set generator without requirements on the itineraries. The proof is basically a variant of Abramov's formula for entropy of an induced map. Taking a set $A$ with large but bounded return times, the induced map will not have invariant probability measures (because such a measure would lift to one on $X$), and so has a $K$-set generator, which can be converted to a $2$-set generator of $X$ by coding each symbol in the space between returns to $A$. Here is the detailed proof. Fix $(X,\mathcal{B},T)$ without invariant probability measures. By hypothesis we can assume that $X\subseteq\Sigma_{AP}^{\mathbb{Z}}$ for $\Sigma=\{1,\ldots,K\}$, with $T$ being the shift. Let $N=4+2\left\lceil \log_{2}K\right\rceil $ and let $\pi:X\rightarrow\{0,1\}^{\mathbb{Z}}$ be an equivariant measurable map into $N$-markers, as provided by Lemma \ref{prop:two-symbol-AP-factor}. Let $A=\{x\in X\,:\,\pi(x)_{0}=1\}$ and $r_{A}(x)=\min\{n>0\,:\,T^{n}x\in A\}$ the entrance time map. By the $N$-marker property, $r_{A}(x)\leq N+1$ for every $x\in X$, and in particular every forward orbit meets $A$. Let $T_{A}(x)=T^{r_{A}(x)}x$ denote the induced map on $A$ and consider the induced system $(A,\mathcal{B}|_{A},T_{A})$. Then $(X,\mathcal{B},T)$ is isomorphic to the suspension of $(A,\mathcal{B}|_{A},T_{A})$ with the bounded roof function $r_{A}$. If $(A,\mathcal{B}|_{A},T_{A})$ admitted a finite invariant measure then the measure could be lifted to the suspension, and the result would be a finite measure because the roof function is bounded, giving a finite invariant measure on $(X,\mathcal{B},T)$. This is impossible, so by our hypothesis, $(A,\mathcal{B}|_{A},T_{A})$ admits a $K$-set generator $\alpha$. We next define a measurable equivariant map $\widetilde{\pi}:X\rightarrow\{0,1\}^{\mathbb{Z}}$. Fix $x\in X$ and $i$ with $\widetilde{\pi}(x)_{i}=1$. Set $\widetilde{\pi}(x)_{i}=\widetilde{\pi}(x)_{i+1}=\ldots=\widetilde{\pi}(x)_{N/2}=1$ and $\widetilde{\pi}(x)_{1+N/2}=0$. Then in the next $N/2-1$ symbols of $\widetilde{\pi}(x)$ write a binary string identifying $\alpha(T^{i}x)$, using some fixed coding of the elements of $\alpha$ (we can do this because there are $K$ possible values for $\alpha(T^{i}x)$ and $N/2-1>\log_{2}K$ available symbols). After doing this for every $i$ with $\pi(x)_{i}=1$, set any undefined symbols in $\widetilde{\pi}(x)$ to $0$. By the $N$-marker property the gap between $1$s in $\pi(x)$ is at least $N$, so we have not tried to define any symbol more than once, and $\widetilde{\pi}(x)$ is well defined. Evidently $x\mapsto\widetilde{\pi}(x)$ is measurable and equivariant. Now, the word $1^{N/2}0$ occurs only at indices $i$ with $\pi(x)_{i}=1$, so $\pi(x)$ can be recovered from $\widetilde{\pi}(x)$, hence given $\widetilde{\pi}(x)$ we can find all the $i$ such that $T^{i}x\in A$. For such an $i$, we recover $\alpha(T^{i}x)$ by reading off the $N/2-1$ binary digits in $\widetilde{\pi}(x)$ starting at $i+N/2+2$. Thus, $\widetilde{\pi}(x)$ determines $\alpha(T^{i}x)$ for all $i$ such that $T^{i}x\in A$, and since $\alpha$ generates for $T_{A}$, this determines $T^{i}x$ for such $i$, and therefore determines $x$. We have shown that $x\mapsto\widetilde{\pi}(x)$ is an injection, completing the proof of Theorem \ref{thm:main}. Now assume that $Y\subseteq\Lambda^{\mathbb{Z}}$ is a non-trivial mixing shift of finite type (SFT). The modification of the previous proof is rather standard; for definitions and basic techniques related to SFTs can be found e.g. in \cite{LindMarcus1995}. We modify the construction above as follows. Using the mixing property of $Y$, choose words $a_{0},a'_{0}$ and $a_{1}$ in $Y$ such that any concatenation of the words appears in $Y$, and every infinite concatenation has a unique parsing into these words. Also require that the length of $a'_{0}$ is greater by one than the length of $a_{0}$. Choose $N$ now to be large relative to the lengths of these words as well, and proceed as before, except that when building the image $\widetilde{\pi}(x)$ we write copies of $a_{0},a'_{0}$ instead of $0$ and $a_{1}$ instead of $1$; the choice between $a_{0},a'_{0}$ is made in such a way that the length of the final concatenation is precisely the distance between occurrences of visits to $A$. The remaining details are left to the reader. \end{proof} \section{\label{sec:A-generator-theorem-for-nul-sequences}A generator theorem for null points} Recall that $x\in\nul(A)$ if $s(x,A)=0$ and $x\in\bigcup_{n=-\infty}^{\infty}T^{n}A$. In this section we prove: \begin{thm} \label{thm:generator-for-nul}Let $(X,\mathcal{B},T)$ be a Borel system and $A\in\mathcal{B}$. Then $\nul(A)$ has a 4-set generator. \end{thm} Heuristically, this result is a Borel version of the generator theorem for infinite invariant measures. Indeed if $\mu$ is such a measure and $A$ is a set with $0<\mu(A)<\infty$, then by Hopf's ratio ergodic theorem $x\in\nul(A)$ for $\mu$-a.e. $x$. In fact the theorem above recovers (most aspects of) Krengel's generator theorem for such measures. \begin{proof} For $i,j\in\mathbb{Z}$ define \[ A_{i,j}=T^{-i}A \] (note that this does not actually depend on $j$). Then for each $j$ the union $\bigcup_{i\in\mathbb{Z}}A_{i,j}$ includes all $x\in\nul(A)$ such that $T^{n}x\in A$ for some $n\in\mathbb{Z}$, so $\bigcup_{i\in\mathbb{N}}A_{i,j}=\nul A$. Clearly $(A_{i,j})_{i,j\in\mathbb{N}}$ is an $\omega$-cover of $\nul(A)$. But also $A_{i,j}=T^{-i}A$ so $\overline{s}(x,A_{i,j})\leq\overline{s}(x,A)=0$ for every $x\in\nul(A)$, hence \[ \sum_{i,j\in\mathbb{Z}}\overline{s}(x,A_{i,j})=\sum_{i,j\in\mathbb{Z}}0=0<1\qquad\qquad\mbox{for all }x\in\nul(A) \] The hypotheses of Proposition \ref{prop:general-strategy} are satisfied for the system $(\nul(A),\mathcal{B}|_{\nul(A)},T|_{\nul(A)})$, so there is a two-set partition $\beta$ of $\nul(A)$ such that $\sigma_{T}(\beta)\lor\sigma_{T}(\{A_{i,j}\}_{i,j\in\mathbb{N}})=\mathcal{B}|_{\nul(A)}$. But setting $\gamma=\{A,\nul(A)\setminus A\}$, clearly $A_{i,j}\in\sigma_{T}(\gamma)$, so $\beta\lor\gamma$ is a generating partition with four sets. \end{proof} We remark that, up to removing an invariant set from the wandering ideal $\mathcal{W}$, it is possible to define a partition of $\nul(A)$ which, in a sense, is deficient. Specifically, let $\widetilde{A}_{i}\subseteq\nul(A)$ with $T^{i}x\in A$ and $T^{j}x\notin A$ for $0\leq j<i$. Then $\nul(A)\setminus\bigcup\widetilde{A}_{i}$ consists of points which do not enter $A$ in the future, but, by definition of $\nul(A)$, enter it in the past, so $\nul(A)\setminus\bigcup\widetilde{A}_{i}\in\mathcal{W}$. One might hope to apply our coding of deficient partitions to $\alpha=\{\widetilde{A}_{i}\}$. Formally this is not possible, since in our definition of deficient partitions we required positive frequencies. With some adjustment this approach could be made to work. But, in any event, the construction for the deficient case is far more complex than the one above, and such a reduction would not be very enlightening. The construction above applies to many examples of Borel systems without invariant measures. A popular construction of such a system, for example, is to begin with the dyadic odomometer $G$ and build the suspension $X$ with respect to a functions that is continuous except at one point, and has infinite integral with respect to Haar measure on the base. In such constructions we have $X=\nul(G)$, and the short proof above provides a generator. As noted in the introduction, we don't know whether every Borel system without invariant probability measures is of the form $\nul(A)$ for some measurable set $A$. \section{\label{sec:A-generator-theorem-for-div-points}A generator theorem for divergent points} Our purpose in this section is to construct a finite generator for the set $\divergent(A)$ of points which do not have well-defined visit frequencies to $A$. The key to this is Bishop's quantitative result on the decay of the frequency of repeated fluctuations of ergodic averages. \subsection{\label{sub:Bishops-theorem}Bishop's theorem} Birkhoff's ergodic theorem states that, in a probability preserving system, the ergodic averages of an $L^{1}$ function converge a.e.. It is well known that this convergence does not admit a universal rate, even if one fixes the system and varies only the function. Nevertheless, there is an effective version of Birkhoff's theorem, due originally to E. Bishop and subsequently extended by various authors, stated in terms of the probability that there occur many fluctuations of the ergodic averages across a given gap. More precisely, for a map $T:X\rightarrow X$ and $f:X\rightarrow\mathbb{R}$, we say that $x\in X$ has $k$ upcrossings of a real interval $(a,b)\subseteq\mathbb{R}$ (w.r.t. $f$) if there is a sequence $0\leq m_{1}<n_{1}<m_{2}<n_{2}<\ldots<m_{k}<n_{k}$ such that \[ S_{m_{i}}(x,f)<a<b<S_{n_{i}}(x,f)\qquad\mbox{ for }i=1,\ldots,k \] and $S_{n}(x,f)=\frac{1}{n}\sum_{i=0}^{n-1}f(T^{i}x)$. If there is an infinite such sequence we say there are infinitely many upcrossings. Clearly when $X$ carries a measurable structure the set of points with $k$ upcrossings is measurable and we can choose $m_{i}(x),n_{i}(x)$ measurably, e.g. taking the lexicographically least sequence. Bishop's theorem reads as follows. \begin{thm} [Bishop \cite{B66}] Let $(X,\mathcal{B},\mu,T)$ be an ergodic probability-preserving system and $f\in L^{1}(\mu)$. Then for every $a<b$, \[ \mu\left(x\in X\,:\,x\mbox{ has }k\mbox{ upcrosings of }(a,b)\mbox{ (w.r.t. }f\mbox{)}\right)\leq\frac{\left\Vert f\right\Vert _{1}}{k(b-a)} \] \end{thm} The point is that the rate of decay is universal, depending only on the magnitude of the gap and the norm of $f$ (this normalization or one like it is unavoidable in order for the rate to be invariant under scaling of $f$ and $a,b$). What we need is not precisely the last theorem, but a finitistic variant that is used its proof. We give the statement for indicator functions. Given $T:X\rightarrow X$, a set $A\subseteq X$, write \[ U_{a,b,k,N}=\{x\in X\,:\,x\mbox{ has }k\mbox{ upcrossings of }(a,b)\mbox{ w.r.t. }1_{A}\mbox{ up to time }N\} \] ($k$ upcrossings up to time $N$ means that we can choose the times $m_{1},n_{1},\ldots,m_{k},n_{k}$ in the definition with $n_{k}\leq N$). \begin{thm} \label{thm:finiary-Bishop}Let $T:X\rightarrow X$ be a map and $A\subseteq X$. For every $k$, every $a<b$, every $N$ and every $x\in X$, \[ \overline{s}^{*}(x,U_{a,b,k,N})<\frac{2}{k(b-a)} \] \end{thm} In fact this holds with an exponential decay rate \cite{I96}. We do not need this stronger result, all we will use is that the rate is universal (i.e., depends only on $a,b$), but the proof is easier; we give a sketch below. Fix $y\in X$ and a large $L\gg N$, and consider the set of times $I\subseteq\{1,\ldots,L\}$ such that $T^{i}y\in A$ for $i\in I$. Consider the function $f:I\rightarrow\{0,1\}$ such that $f(i)=1_{A}(T^{i}y)$. For each such $i\in I$ there are times $1\leq m_{1}(i)<n_{1}(i)<m_{2}(i)<n_{2}(i)<\ldots<m_{k}(i)<n_{k}(i)\leq N$, witnessing the fact that $T^{i}y$ has $k$ upcrossings up to time $N$. This means that on each of the intervals $A_{i,j}=[i,i+m_{j}(i))$ the average of $f$ is less than $a$, and on each of the intervals $B_{i,j}=[i,i+n_{j}(u))$, the average of $f$ is greater than $b$; and $A_{i,1}\subseteq B_{i,1}\subseteq A_{i,2}\subseteq\ldots\subseteq A_{i,k}\subseteq B_{i,k}$. Given this combinatorial structure, one now shows that one can obtain \emph{disjount} families of intervals $\mathcal{A}_{1},\mathcal{B}_{1},\ldots,\mathcal{A}_{k},\mathcal{B}_{k}$, with the $\mathcal{A}_{\ell}$ family consisting of intervals of the form $A_{i,j}$ and the $\mathcal{B}_{\ell}$ families consisting of intervals $B_{i,j}$, such that \[ \cup\mathcal{A}_{1}\subseteq\cup\mathcal{B}_{1}\subseteq\cup\mathcal{A}_{2}\subseteq\ldots\subseteq\cup\mathcal{A}_{k}\subseteq\cup\mathcal{B}_{k} \] and such that $\cup\mathcal{A}_{1}$ is of size comparable to $I$. This is a variation on the Vitali covering lemma (observe that for each $1\leq j\leq k$, the original intervals $\{A_{i,j}\}_{i\in I}$ may overlap quite a lot). Finally, we observe that the average of $f$ on each $\mathcal{A}_{j}$ is less than $a$, while the average over $\mathcal{B}_{j-1}$ is greater than $b$. Since $f$ is bounded between $0$ and $1$, this says that $|\cup\mathcal{A}_{j}|\geq\frac{b}{a}|\cup\mathcal{B}_{j-1}|$. Thus $|\mathcal{A}_{k}|\geq(\frac{b}{a})^{k-1}|\mathcal{B}_{1}|\geq|I|$. Finally $\mathcal{A}_{k}\subseteq[1,L+N]\subseteq[1,\frac{b}{a}L]$ (since $N\ll L$), and we conclude that $|I|\leq(\frac{b}{a})^{k-2}L$, as desired. Other versions can be found in \cite[Section 2]{CE97} and \cite[Section 2]{KW99}, where one can also read off versions of the statement above. \subsection{\label{sub:Construction-of-the-for-div}Construction of the generator} \begin{thm} \label{thm:generator-for-div}Let $(X,\mathcal{B},T)$ be a Borel system and $A\in\mathcal{B}$. Then $\divergent(A)$ has a 4-set generator.\end{thm} \begin{proof} We can assume (by restriction if necessary) that $X=\divergent(A)$. For $x\in X$ write $\delta=\delta(x)=\overline{s}(x,A)-\underline{s}(x,A)$ and let $a=a(x)=\underline{s}(x,A)+\delta/3$ and $b=b(x)=\overline{s}(x,A)-\delta/3$, so $a(\cdot),b(\cdot)$ are measurable and shift invariant and $\underline{s}(x,A)<a<b<\overline{s}(x,A)$. Let $k_{p}=\left\lceil 2^{p+2}/(b-a)\right\rceil $. For $x\in X$ let $m_{i}(x),n_{i}(x)$ be the lexicographically least upcrossing sequence of $x$ with respect to $(a,b)$, and let $A_{p,n}$ denote the set of points whose $k_{p}$-th upcrossing occurs at time $n$, i.e. \[ A_{p,n}=\{x\in X\,:\,n_{k_{p}}(x)=n\} \] These sets are measurable, and we claim that $\alpha=\{A_{p,n}\}_{p,n\in\mathbb{N}}$ satisfies is an $\omega$-cover of $X$ satisfying the hypothesis of Proposition \ref{prop:general-strategy}. Indeed, for each $p$ and $x\in X$ the $k_{p}$-th upcrossing of $(a(x),b(x))$ occurs at some time or other, i.e. $X=\bigcup_{n=1}^{\infty}A_{p,n}$ for all $p$, which shows that $\alpha$ is an $\omega$-cover of $X$. We now verify the hypothesis of Proposition \ref{prop:general-strategy} (b). For the index set $\mathbb{N}\times\mathbb{N}$ of $\{A_{p,n}\}$ we choose the partition $I_{p}=\{p\}\times\mathbb{N}$, $p\in\mathbb{N}$. We first claim that for every $N$, \begin{equation} \overline{s}^{*}(x,\bigcup_{n=1}^{N}A_{p,n})<\frac{1}{2^{p+1}}\label{eq:oscilation-implies-low-density} \end{equation} This is enough because for any finite $J\in\mathbb{N}\times\mathbb{N}$ there is an $N$ such that $n\leq N$ for every $(p,n)\in J$, and therefore for every $(q,m)\in\mathbb{N}\times\mathbb{N}$, \begin{eqnarray*} \sum_{p=1}^{\infty}\overline{s}^{*}(x,\bigcup_{(p,n)\in J\cap I_{p}}A_{p,n}) & \leq & \sum_{p=1}^{\infty}\overline{s}^{*}(x,\bigcup_{n=1}^{N}A_{p,n})\\ & < & \sum_{p=1}^{\infty}\frac{1}{2^{p+1}}\\ & = & \frac{1}{2}\\ & < & 1-\overline{s}^{*}(x,A_{q,m}) \end{eqnarray*} where the last inequality is because, by (\ref{eq:oscilation-implies-low-density}) again, $\overline{s}^{*}(x,A_{q,m})<1/2$. It remains to prove (\ref{eq:oscilation-implies-low-density}). Fix $\alpha<\beta$ and consider the $T$-invariant set \[ X_{\alpha,\beta}=\{x\in X\,:\,a(x)=\alpha\,,\,b(x)=\beta\} \] Fix $p,N$ and set $k'_{p}=2^{p+2}/(\beta-\alpha)$. Using the notation of Theorem \ref{thm:finiary-Bishop}, we have \begin{equation} X_{\alpha,\beta}\cap\bigcup_{n=1}^{N}A_{p,n}\subseteq U_{\alpha,\beta,k'_{p},N}\label{eq:divergent-proof-union} \end{equation} Therefore for $x\in X_{\alpha,\beta}$, by Theorem \ref{thm:finiary-Bishop} and monotonicity of $\overline{s}^{*}(x,\cdot)$ we have \[ \overline{s}^{*}(x,\bigcup_{n=1}^{N}A_{p,n})\leq\overline{s}^{*}(x,U_{\alpha,\beta,k'_{p},N})<\frac{2}{k'_{p}(\beta-\alpha)}\leq\frac{1}{2^{p+1}} \] This holds for $x\in X_{\alpha,\beta}$, for all $\alpha<\beta$. But every $x\in X$ belongs to some $X_{\alpha,\beta}$ for some $\alpha<\beta$, and the last inequality gives (\ref{eq:oscilation-implies-low-density}). To conclude the proof, ~apply Proposition \ref{prop:general-strategy}, which gives a two-set partition $\beta$ of $X$ such that $\sigma_{T}(\beta)\lor\sigma_{T}(\alpha)=\mathcal{B}$. Taking $\gamma=\{A,X\setminus A\}$, we note that $\alpha$ is $\sigma_{T}(\gamma)$-measurable, so by the same proposition $\beta\lor\gamma$ is a $4$-set generator for $(X,\mathcal{B},T)$. \end{proof} \section{\label{sec:A-generator-theorem}A generator theorem for deficient points, and putting it all together} We say that $x\in\Sigma^{\mathbb{Z}}$ is regular if the frequency $s(x,a)$ exists for every $a\in\Sigma^{*}$, and otherwise call it divergent. Let $\Regular(\Sigma^{\mathbb{Z}})$ and $\Divergent(\Sigma^{\mathbb{Z}})$ denote the sets of regular and divergent points. If $x\in\Sigma^{\mathbb{Z}}$ is such that $s(x,a)$ exists and is positive for all $a\in\Sigma$, write \[ \rho(x)=\sum_{a\in\Sigma}s(x,a) \] and in this case say that $x$ is deficient if $\rho(x)<1$. We then define the defect to be $1-\rho(x)$. Denote for the set of deficient points by $\Deficient(\Sigma^{\mathbb{Z}})$. Finally, say that $x\in\Sigma^{\mathbb{Z}}$ is null if $s(x,a)=0$ for some $a\in\Sigma^{*}$ and write $\Nul(\Sigma^{\mathbb{Z}})$ for the set of null points. Given a Borel system $(X,\mathcal{B},T)$ and a partition $\alpha=\{A_{i}\}_{i\in\Sigma}$, associate to every $x\in X$ its $\alpha$-itinerary, $\alpha_{*}(x)=(\alpha(T^{n}x))_{n\in\mathbb{Z}}\in\Sigma^{\mathbb{Z}}$. We say that $x\in X$ is $\alpha$-regular, $\alpha$-divergent, $\alpha$-deficient or $\alpha$-null if $\alpha_{*}(x)$ is regular, divergent, deficient or null, respectively (this characterization of $\alpha$-deficient points is consistent with the one in the introduction). \subsection{\label{sub:Inducing}Increasing the defect} The goal of this section is to show that, given a defective partition (relative to some point), we can measurably produce another partition which, relative to the point, either has defect arbitrarily close to one, or is divergent. We formulate this in symbolic language. \begin{prop} \label{prop:increasing the defect}For every $\delta>0$ there factor map, \[ \pi_{\delta}:\Deficient(\Sigma^{\mathbb{Z}})\rightarrow\Deficient(\Sigma^{\mathbb{Z}})\cup\Divergent(\Sigma^{\mathbb{Z}}), \] such that if $x\in\Deficient(\Sigma^{\mathbb{Z}})$ and $\pi_{\delta}(x)$ is regular, then $\rho(\pi_{\delta}(x))<\delta$.\end{prop} \begin{proof} The scheme of the proof is as follows. We describe a (measurable) construction which either produces a divergent point $y\in\Sigma^{\mathbb{Z}}$, in which case we can set $\pi_{\delta}(x)=y$, or else produces an integer $p$ and disjoint subsets $J^{(0)},\ldots,J^{(p)}\subseteq\mathbb{Z}$ and partitions $\{J_{j}^{(k)}\}$ of $J^{(k)}$, such that \begin{enumerate} \item [(i)]$s(\mathbb{Z}\setminus\bigcup_{k=0}^{p}J^{(k)})<\delta/2$, \item [(ii)]$\sum s(J_{j}^{(k)})<\frac{\delta}{2}s(J^{(k)})$ for each $k=0,\ldots,p$. \end{enumerate} Then, identifying $\Sigma$ with $\mathbb{N}\times\mathbb{N}$, we can define \[ \pi_{\delta}(x)_{i}=\left\{ \begin{array}{cc} (k,j) & i\in J_{j}^{(k)}\\ (p+1,0) & i\in\mathbb{Z}\setminus\bigcup_{k=0}^{p}J^{(k)} \end{array}\right. \] and we have \begin{eqnarray*} \rho(\pi_{\delta}(x)) & = & \sum_{k=0}^{p}\sum_{j}s(J_{j}^{(k)})+s(\mathbb{Z}\setminus\bigcup_{k=0}^{p}J^{(k)})\\ & < & \sum_{k=0}^{p}\frac{\delta}{2}s(J^{(k)})+\frac{\delta}{2}\\ & < & \delta \end{eqnarray*} so $\pi_{\delta}(x)$ has defect at least $1-\delta$. We turn to the construction. Without loss of generality we assume that $\delta<1/8$ and $\Sigma=\mathbb{N}$. Let $x\in\mathbb{N}^{\mathbb{Z}}$ be regular and deficient. Note that deficiency implies that $x$ is aperiodic. \textbf{Constructing $J^{(0)}$ and $\{J_{j}^{(0)}\}$:} Let $n_{0}=n_{0}(x)$ denote the least integer such that \[ \sum_{j>n_{0}}s(x,j)<\delta^{4}(1-\rho(x)) \] (there exists such $n_{0}$ since $\sum s(x,j)<\infty$ and $1-\rho(x)>0$ by assumption), and and let \[ J^{(0)}=\{i\in\mathbb{Z}\,:\,x_{i}>n_{0}\} \] and \[ J_{j}^{(0)}=\{i\in\mathbb{Z}\,:\,x_{i}=j\} \] so that $\{J_{j}^{(0)}\}_{j>n_{0}}$ partitions $J^{(0)}$. Note that \begin{eqnarray*} s(J^{(0)}) & = & 1-\sum_{j=0}^{n_{0}}s(J_{j})\\ & \geq & 1-\rho(x)\\ & > & 0 \end{eqnarray*} Thus, by choice of $n_{0}$, \begin{equation} \sum_{j>n_{0}}s(J_{j}^{(0)})<\delta^{4}(1-\rho(x))<\delta^{4}s(J^{(0)})\label{eq:c} \end{equation} so (ii) is satisfied. \textbf{Constructing $J^{(k)},\{J_{j}^{(k)}\}$ for $k=1,\ldots,p$: }Our strategy is now to copy a substantial subset of $J^{(0)}$, and the partition induced on it from $\{J_{j}^{(0)}\}$, into the complement of $J^{(0)}$, and repeat this until most of the complement is exhausted. We would like to do this by mapping $J^{(0)}$ to $\mathbb{Z}\setminus J^{(0)}$ using Lemma \ref{lem:equivarian-injections}, but in the process one loses control of the densities of the images of $J_{j}^{(0)}$. But one can control the frequencies if one works with points in $J^{(0)}$ that are moved by at most some large $M$. The details are worked out in the following lemma, which provides the basic step of the strategy: \begin{lem} Let $J\subseteq\mathbb{Z}$ and suppose that $s(J)$ exists and satisfies \[ \frac{1}{2}\delta s(J^{(0)})<s(J)<\delta s(J^{(0)}) \] Then there exists a set $J'\subseteq J$ and a partition $\{J_{j}\}$ of $J'$, all determined measurably by $x$ and $J$, such that one of the following holds: \begin{enumerate} \item [(a)]$s(J')$ does not exist, \item [(b)]$s(J')>\frac{\delta^{2}}{4}s(J^{(0)})$ and $\sum s(x,J_{j})<\delta s(J')$. \end{enumerate} \end{lem} \begin{proof} By assumption $s(J)<\delta s(J^{(0)})<s(J^{(0)})$, so we can apply Lemma 3.8 to $x,J,J^{(0)}$, and obtain an injection $f:J\rightarrow J^{(0)}$, determined measurably by $x,J^{(0)},J$, and hence by $x,J$ (since $J^{(0)}$ is itself determined measurably by $x$). For $m=0,1,2,\ldots$ set \[ U_{m}=\{n\in J\,:\,|f(n)-n|=m\} \] If one of the densities $s(U_{m})$ doesn't exist we define $J'=U_{m}$ and we are in case (a). Thus assume these densities exist. If $\sum_{m}s(U_{m})<\delta s(J)$, we define $J'=J$ and $J_{j}=U_{j}$ , so $\{J_{j}\}$ partitions $J'$, and we are in case (b), Thus, assume that $\sum_{m}s(U_{m})\geq\delta s(J)$. Choose $M\in\mathbb{N}$ such that \begin{equation} \sum_{m=0}^{M}s(U_{m})>\frac{\delta}{2}s(J)\label{eq:a} \end{equation} Set \[ J'=\bigcup_{m\leq M}U_{m} \] Note that by the hypothesis $s(J)\geq\frac{1}{2}\delta s(J^{(0)})$ we have \begin{equation} s(J')=\sum_{m\leq M}s(U_{m})>\frac{\delta}{2}s(J)\geq\frac{\delta^{2}}{4}s(J^{(0)})\label{eq:b} \end{equation} Next, for $j>n_{0}$ define \[ J_{j}=J'\cap(f^{-1}(J_{j}^{(0)})) \] (we leave it undefined for $j\leq n_{0}$). Clearly $\{J_{j}\}$ is a partition of $J'$, and by (\ref{eq:b}) we have the first part of (b). Furthermore, the map $f|_{J'}$, and hence also $(f|_{J'})^{-1}=f^{-1}|_{f(J')}$, displaces points by at most $M$, so these maps preserves densities, and we have \[ s(J_{j})=s(J_{j}^{(0)}\cap f(J'))\leq s(J_{j}^{(0)}) \] Therefore, using (\ref{eq:c}) and (\ref{eq:b}) and the standing assumption $\delta<1/8$, \[ \sum_{j>n_{0}}s(J_{j})\leq\sum_{j>n_{0}}s(J_{j}^{(0)})<\delta^{4}s(J^{(0)})\leq4\delta^{2}s(J')<\frac{\delta}{2}s(J') \] which is the second part of (b). \end{proof} Returning to the proof of the proposition, suppose that $s(\mathbb{Z}\setminus J^{(0)})>\frac{\delta}{2}\geq\frac{\delta}{2}s(J^{(0)})$ (as explained earlier, if not, we are done). Applying Lemma \ref{lem:selecting-a-subset} to $x$ and $I=\mathbb{Z}\setminus J^{(0)}$ to obtain a set $J\subseteq I$ with $\frac{1}{2}\delta s(J^{(0)})<s(I)<\delta s(J^{(0)})$. To this we apply the previous lemma, either obtaining the set $E$ from (a) in the lemma, in which case we define $\pi_{\delta}(x)=1_{E}\in\Divergent(\mathbb{N}^{\mathbb{Z}})$, or else obtaining $J^{(1)}\subseteq J\subseteq\mathbb{Z}\setminus J^{(0)}$ and $\{J_{j}^{(1)}\}$ satisfying (b) of the lemma, which gives property (ii) above, and furthermore, $s(J^{(1)})>\frac{\delta^{2}}{4}s(J^{(0)})$, which is a definite increment. We can repeat this inductively: assuming that we have defined $J^{(\ell)}$ and $\{J_{j}^{(\ell)}\}$ for $\ell<k$ and $s(\bigcup_{\ell<k}J^{(\ell)})\geq\frac{\delta}{2}$ we either define $\pi_{\delta}(x)\in\Divergent(\mathbb{N}^{\mathbb{Z}})$ or obtain $J^{(k)}$ and $\{J_{j}^{(k)}\}$ as required by (ii). At each step the total mass of the $J^{(k)}$s increases by $\delta^{2}s(J^{(0)})/4$, so unless the process terminates early with $\pi_{\delta}(x)\in\Divergent(\mathbb{N}^{\mathbb{Z}})$, after a finite number $p$ if steps we cover a set of density $1-\delta/2$, and are done. \end{proof} We re-formulate the proposition in the language of partitions. \begin{cor} \label{cor:iterated-reduction-of-defect-DS-version}Let $\alpha$ be a countable partition of a Borel system $(X,\mathcal{B},T)$. Then for every $\delta>0$ there is a partition $\alpha'$ of $X$ such that every $x\in\Deficient(\alpha)$ is either $\alpha'$-divergent or else $\sum_{A\in\alpha'}s(x,A)<\delta$.\end{cor} \begin{proof} Compose the itinerary map $\alpha_{*}$ with the factor map $\pi_{\delta}$ from the previous proposition, and pull back the standard generating partition of $\Sigma^{\mathbb{Z}}$ (consisting of length-$1$ cylinders). This is $\alpha'$. \end{proof} \subsection{\label{sub:Deficient-partitions-of-finite-entropy}Deficient partitions of finite empirical entropy} For $x\in\Regular(\Sigma^{\mathbb{Z}})$ set \[ \widetilde{H}(x)=-(1-\rho(x))\log(1-\rho(x))-\sum_{a\in\Sigma}s(x,a)\log s(x,a) \] with the usual convention that $0\log0=0$ and logarithms are in base $2$. This is just the entropy of the infinite probability vector whose coordinates are $s_{a}(x)$ and $1-\rho(x)$. This quantity in general may be infinite, but by merging finite sets of atoms one can always reduce the entropy as much as one wants. \begin{lem} \label{lem:reducing-entropy}There exists a factor map $\pi:\Deficient(\Sigma^{\mathbb{Z}})\rightarrow\Deficient(\mathbb{N}^{\mathbb{Z}})$ such that $\widetilde{H}(\pi(x))<2$ for every $x\in\Deficient(\Sigma^{\mathbb{Z}})$. Furthermore, $\pi$ maps regular points to regular points. \end{lem} One could replace the upper bound $\widetilde{H}(\pi(x))<2$ by $1+\varepsilon$ for any $\varepsilon>0$, but one cannot ask for $H(\pi(x))\leq1$ because this is impossible in the case that $\rho(x)=1/2$. An alternative approach would be to use Proposition \ref{prop:increasing the defect} to decrease the defect, but possibly produce an irregular point. \begin{proof} Fix an ordering of $\Sigma$. Fix $x\in\Deficient(\Sigma^{\mathbb{Z}})$ and partition $\Sigma$ into finite sets $\Sigma_{1},\Sigma_{2},\ldots$inductively: writing $s(x,\Sigma_{n})=\sum_{a\in\Sigma_{n}}s(x,a)$, we choose $\Sigma_{1}$ to be the shortest initial segment such that $s(x,\Sigma_{1})>\frac{9}{10}\sum\sigma(x,a)$, and assuming we have chosen $\Sigma_{1},\ldots,\Sigma_{n-1}$ choose $\Sigma_{n}$ to be the largest initial segment of $\Sigma\setminus\bigcup_{i<n}\Sigma_{i}$ such that $s(x,\Sigma_{n})>\frac{9}{10}\sum_{a\in\Sigma\setminus(\Sigma_{1}\cup\ldots\cup\Sigma_{n-1})}s(x,a)$. Since $\Sigma_{n},\Sigma_{n+1}\subseteq\Sigma\setminus(\Sigma_{1}\cup\ldots\cup\Sigma_{n-1})$ and $\Sigma_{n}$ takes up at least $9/10$ of the set on the right, it is clear that \[ \sigma(x,\Sigma_{n+1})<\frac{9}{10}s(x,\Sigma_{n}) \] so \[ \sigma(x,\Sigma_{n})<\frac{1}{10^{n-1}}\rho(x) \] Evidently the choice of the $\Sigma_{n}$ is measurable. Now define $\pi(x)$ by \[ \pi(x)_{i}=n\qquad\mbox{if }x_{i}\in\Sigma_{n} \] Clearly (using finiteness of $\Sigma_{n}$), \[ s(\pi(x),n)=\sum_{a\in\Sigma_{n}}s(x,a) \] so $\sum_{n\in\mathbb{N}}s(\pi(x),n)=\sum_{a\in\Sigma}s(x,a)=\rho(x)<1$, and $\pi(x)$ is deficient. Also, by the above $s(\pi(x),n)<\rho(x)/10^{n-1}$, so, using $-t\log t\leq1/2$ for $t\in(0,1]$, \begin{eqnarray*} -\sum_{n\in\mathbb{N}}s(\pi(x),n)\log(s(\pi(x),n)) & < & -\sum_{n=1}^{\infty}\rho(x)10^{-n+1}\log\rho(x)10^{-n+1}\\ & < & -\rho(x)\log\rho(x)\cdot\sum_{n=0}^{\infty}10^{-n}+\sum_{n=1}^{\infty}\frac{n\log_{2}10}{10^{n}}\\ & < & \frac{1}{2}\cdot\frac{10}{9}+\frac{10}{81}\cdot\log_{2}10\\ & < & 0.9656\ldots \end{eqnarray*} Since also $-(1-\rho(x))\log(1-\rho(x))\leq1/2$, we obtain $\widetilde{H}(\pi(s))<2$. Finally, if $x$ is regular then so is $\pi(x)$, since each symbol in $\pi(x)$ corresponds to the occurrences of a finite set of symbols in $x$. \end{proof} The reason we are interested in partitions with finite empirical entropy is the following: \begin{thm} \label{thm:Krieger-generator-theorem-baby-case}For every countable alphabet $\Sigma$, the shift-invariant Borel set $\{x\in\Regular(\Sigma^{\mathbb{Z}})\,:\,\widetilde{H}(x)<2\}$ admits a $4$-set generator. \end{thm} This is a consequence of the more general Krieger-type theorem that we state and prove given in Section \ref{sec:Krieger}. We summarize the discussion above in the language of partitions. \begin{cor} \label{cor:reducing-entropy-DS-version}Let $\alpha$ be a countable partition of a Borel system $(X,\mathcal{B},T)$. Let $X'$ denote the set of points that are $\alpha$-regular and $\alpha$-deficient. Then there exist partitions $\alpha',\beta\in\sigma_{T}(\alpha)$ of $X'$ such that every $x\in X'$ is $\alpha'$-regular and $\alpha'$-deficient, $\beta$ has only four sets, and $\alpha'\in\sigma_{T}(\beta)$.\end{cor} \begin{proof} Compose the itinerary map $\alpha_{*}$ with the map from Lemma \ref{lem:reducing-entropy}, so that the image of $X'$ is contained in the set $Y\subseteq\mathbb{N}^{\mathbb{Z}}$ of deficient, regular points $y$ satisfying $\widetilde{H}(y)<2$. Let $\alpha'$ be the pull-back to $X'$ of the standard generating partition of $\mathbb{N}^{\mathbb{Z}}$. Now apply the last theorem to find a four-set partition for $Y$, and let $\beta$ be its pull-back to $X'$. \end{proof} \subsection{\label{sub:Constructing-the-generator-def-case}Constructing the generator} \begin{thm} \label{thm:generator-for-def}Let $(X,\mathcal{B},T)$ be a Borel system and $\alpha=\{A_{i}\}_{i=1}^{\infty}$ a partition. Then $\deficient(\alpha)$ admits a $16$-set generator. \end{thm} \begin{proof} Fix the partition $\alpha$. In the course of the proof we encounter various sets with respect to which the statistical properties of a given point are, a-priori, not known. Every time we encounter such a set $A$ we implicitly separate out the points that are null or divergent for it, and continue to work in the complement of $\nul(A)\cup\divergent(A)$. At the end we will be left with an invariant measurable set $Y\subseteq\deficient(\alpha)$ and a sequences of sets $A_{1},A_{2},\ldots\subseteq\deficient(\alpha)\setminus Y$ such that \[ \deficient(\alpha)=Y\cup\bigcup_{I=1}^{\infty}\left(\nul(A_{i})\cup\divergent(A_{i})\right) \] By Lemmas \ref{lem:reducing-unions-of-nul-and-div-sets} and \ref{lem:disjointifying-nul-and-div-sets} we can find disjoint invariant measurable sets $A',A''$ such that \[ \bigcup_{i=1}^{\infty}\left(\nul(A_{i})\cup\divergent(A_{i})\right)=\nul(A')\cup\divergent(A'') \] By Theorems \ref{thm:generator-for-nul} and \ref{thm:generator-for-div} there are $4$-set generators $\gamma'$ and $\gamma''$ of $\nul(A')$ and $\divergent(A'')$, respectively. Below we shall construct an $8$-set generator $\gamma$ for $Y$. It then will follow that $\gamma\cup\gamma'\cup\gamma''$ is a 12-set generator for $\deficient(\alpha)$, as desired. We turn to the construction. First, separate out the points that are not $\alpha$-regular, i.e. points that are not regular for some set in the countable algebra generated by the $T$-translates of $\alpha$ (note that $x\in\deficient(\alpha)$ only ensures that $x$ is regular for every atom of $\alpha$). Denote the complement of these points by $X'$. Using Corollary \ref{cor:reducing-entropy-DS-version}, we find a $4$-set partition $\beta$ of $X'$ and a countable partition $\alpha'\subseteq\sigma_{T}(\beta)$ such that every $x\in X'$ is $\alpha'$-deficient. Applying Corollary \ref{cor:iterated-reduction-of-defect-DS-version} to $\alpha'$ with $\delta_{k}=2^{-(k+1)}$, we obtain partitions $\alpha'_{k}\subseteq\sigma_{T}(\alpha')$ of such that every $x\in X'$ is $\alpha'_{k}$-divergent or else it is $\alpha'_{k}$-regular and satisfies $\rho(\alpha'_{k})<2^{-(k+1)}$. We separate out the points in the divergent case. Let $Y$ denote the $T$-invariant set that is left after doing this for all $k$. Now, $\bigcup_{k=1}^{\infty}\alpha'_{k}$ is an $\omega$-cover of $Y$, satisfies the hypothesis of Proposition \ref{prop:general-strategy}, and is measurable with respect to $\sigma_{T}(\beta)$. Since $\beta$ has four sets, by Proposition \ref{prop:general-strategy} there exists an $8$-set generator $\gamma$ for $T|_{Y}$, as claimed. \end{proof} \section{\label{sec:Krieger}A generator theorem for countable partitions of finite empirical entropy} In this section we present a version of the Krieger generator theorem for sequences over a countable alphabet which, in a certain sense, have finite entropy. The main novelty is that the statement is ``measureless'', and uses empirical frequencies. For points that are generic for an ergodic shift-invariant probability measure this is a slight improvement over the usual Krieger generator theorem, since it gives some additional control of the exceptional set. More significantly, it applies also in other cases. One non-trivial case is when the point is generic for a non-ergodic measure of finite entropy, but the entropy of the ergodic components is unbounded. Another interesting case occurs when a point has well-defined frequencies for all words but is not generic for any measure, e.g. the empirical frequencies of symbols do not sum to $1$. It is this last case that is relevant in the proof of Theorem \ref{thm:main}. The theorem below is stronger than necessary for the application to Theorem \ref{thm:main}, since for that purpose it would have been enough to find a finite generator for the set of sequences $x$ satisfying $\widetilde{H}(\alpha_{*}(x))<2$. But we have not found a significantly simpler argument for this case. It is worth noting that recently Seward \cite{Seward2012} proved a theorem of this type for probability-preserving actions (of arbitrary groups) using an elegant argument that bears some similarities to ours in the way data is ``moved around an orbit''. However, he uses $\sigma$-additivity of the measure in an apparently crucial way to bound the probability of those symbols that require more than $n$ bits to encode, and this fails in our setting, where the (implicit) measures are only finitely additive. This appears to prevent his argument from working in the Borel category. \subsection{Coding shift-invariant data} The following will be used to encode information about the orbit of a point $x\in\Sigma^{\mathbb{Z}}$, i.e. information that is shift invariant. For instance, in a measure-preserving system it could be used to encode the ergodic component to which $x$ belongs, or, in our setting, the empirical frequencies of words in $x$. A similar coding result for shift-invariant functions was obtained in a more general setting in \cite[Section 9]{Tserunyan2015}. It is convenient to consider the space of partially defined infinite sequences sequences over a finite alphabet $\Sigma$, that is, elements of $\Sigma^{I}$ for $I\subseteq\mathbb{Z}$. Given $x\in\Sigma^{I}$ we define the shift on it by $Sx\in\Sigma^{SI}$, $Sx(i)=x(i+1)$. The space of partially defined sequences carries the usual measurable structure. \begin{lem} \label{lem:coding-shift-invariant-data}Let $\Sigma$ be a finite alphabet, $f:\Sigma_{AP}^{\mathbb{Z}}\rightarrow\{0,1\}^{\mathbb{N}}$ a shift-invariant function. Then to each $x,y\in\Sigma_{AP}^{\mathbb{Z}}$ and $I\subseteq\mathbb{Z}$ with $\underline{s}(I)>0$ one can associate $z=z(x,y,I)\in\{0,1\}^{I}$ measurably and equivariantly (i.e. $(Sx,Sy,SI)\mapsto Sz$), and such that $(x,z)$ determines $f(y)$.\end{lem} \begin{proof} Fix $y\in\Sigma_{AP}^{\mathbb{Z}}$ and $I\in2^{\mathbb{Z}}$ with $\underline{s}(I)>0$. Let $\varepsilon_{n}=3^{-n}$ so that $\sum_{n=1}^{\infty}\varepsilon_{n}<1$. Apply Lemma \ref{lem:selecting-a-sequence-of-subsets} to $x$,$I$ and $(\varepsilon_{n})_{n=1}^{\infty}$. We obtain disjoint sets $J_{0},J_{1},J_{2},\ldots\subseteq I$ with $\underline{s}(J_{n})\geq\varepsilon_{n}\underline{s}(I)$ and in particular $J_{n}\neq\emptyset$. Let $J=\bigcup_{n=1}^{\infty}J_{n}$ and define $z\in\{0,1\}^{I}$ by $z|_{J_{n}}\equiv f(y)_{n}$ and $z|_{I\setminus J}\equiv0$. Since $z$ determines $I$, and $x$ and $I$ determine $J_{1},J_{2},\ldots$, and $z|_{J_{n}}$ determines $f(y)_{n}$ for all $n$, we see that $(x,z)$ determines $f(y)$. Measurability and equivariance are immediate. \end{proof} \subsection{\label{sub:A-finite-coding-lemma}A finite coding lemma} We require some standard facts from the theory of types. Let $\Delta$ be a finite set and for $x\in\Delta^{n}$ let $P_{x}\in\mathcal{P}(\Delta)$ denote the empirical distribution of digits in $x$, i.e. \[ P_{x}(a)=\frac{1}{n}\#\{1\leq i\leq n\,:\,x_{i}=a\} \] This is sometimes called the type of $x$. The type class of $x$ is the set of all sequences with the same empirical distribution: \[ \mathcal{T}_{x}^{n}=\mathcal{T}_{x}^{n}(\Delta)=\{y\in\Delta^{n}\,:\,P_{x}=P_{y}\} \] The set of type classes of sequences of length $n$ is \[ \mathcal{P}_{n}=\mathcal{P}_{n}(\Delta)=\{P_{y}\,:\,y\in\Delta^{n}\} \] The following standard combinatorial facts can be found e.g. in \cite[Theorems 11.1.1 and 11.1.3]{CoverThomas06}: \begin{prop} \label{prop:type-theorem}For every finite set $\Delta$ and $n\in\mathbb{N}$, \[ |\mathcal{P}_{n}|\leq(n+1)^{|\Delta|} \] For every $x\in\Delta^{n}$, \[ \frac{1}{(n+1)^{|\Delta|}}\cdot2^{nH(P_{x})}\leq|\mathcal{T}_{x}^{n}|\leq2^{nH(P_{x})} \] \end{prop} It follows that \begin{cor} \label{cor:generalized-type-theorem}For every finite set $\Delta$, $n\in\mathbb{N}$ and $h>0$, \[ \#\{x\in\Delta^{n}\,:\,H(P_{x})<h\}\leq O(n^{|\Delta|})\cdot2^{nh} \] \end{cor} For $x\in\Delta^{n}$ it is convenient to introduce a $\Delta$-valued random variable $\xi_{x}$ whose distribution is $P_{x}$, i.e. \[ \mathbb{P}(\xi_{x}=a)=P_{x}(a) \] Now suppose that $\Delta=\Delta_{1}\times\Delta_{2}$. Write $\xi^{1},\xi^{2}$ for the coordinate projections. These become random variables once a probability measure is given on $\Delta$. For $x\in\Delta^{n}$ we identify $x$ with the pair of sequences $(x^{1},x^{2})\in\Delta_{1}^{n}\times\Delta_{2}^{n}$ obtained from the first and second coordinates of each symbol, respectively. Then $P_{x}\in\mathcal{P}(\Delta_{1}^{n}\times\Delta_{2}^{n})$ and $\xi_{x}=(\xi_{x^{1}},\xi_{x^{2}})$ is a coupling of $\xi_{x^{1}},\xi_{x^{2}}$, which we denote for ease of reading by $(\xi_{x}^{1},\xi_{x}^{2})$. Given a pair of discrete random variables $X,Y$, we use the slightly non-standard notation \[ H(X|Y=y)=-\sum_{x}\mathbb{P}(X=x|Y=y)\log\mathbb{P}(X=x|Y=y) \] so that $H(X|Y)=\sum_{y}\mathbb{P}(Y=y)H(X|Y=y)$. We also use subscripts to indicate the probability distribution when necessary, as in $H_{P}(\xi^{1}|\xi^{2}=a)$. Finally, we endow $\mathcal{P}(\Delta)$ with the $\ell^{1}$ metric: for $P,Q\in\mathcal{P}(\Delta)$ let \[ \left\Vert P-Q\right\Vert =\sum_{a\in\Delta}|P(a)-Q(a)| \] \begin{prop} \label{prop:relative-type-theorem}Let $\Delta=\Delta_{1}\times\Delta_{2}$. For every $\varepsilon>0$ there exists a $\delta>0$ such that for every $n$ the following holds. Let $P\in\mathcal{P}(\Delta)$ and let $I_{1},\ldots,I_{m}\subseteq[1,n]$ be disjoint intervals such that $J=[1,n]\setminus\bigcup I_{i}$ satisfies $|J|>\varepsilon n$. Let $y\in\Delta_{1}^{n}$ be a fixed sequence, and let $\Lambda=\Lambda(y,I_{1},\ldots,I_{m})\subseteq\Delta^{n}$ denote the set of sequences $x=(y,z)\in\Delta^{n}$ whose first component is the given sequence $y$, and such that $\left\Vert P_{x}-P\right\Vert <\delta$ and $\left\Vert P_{x|_{I_{i}}}-P\right\Vert <\delta$ for every $1\leq i\leq m$. Then \[ |\{z\,:\,(y,z)\in\Lambda\}|<O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J|\cdot(H_{P}(\xi^{2}|\xi^{1})+\varepsilon)} \] In particular if $n$ is large enough relative to $\varepsilon$, then we can ensure \[ |\{z\,:\,(y,z)\in\Lambda\}|<2^{|J|\cdot(H_{P}(\xi^{2}|\xi^{1})+\varepsilon)} \] \end{prop} \begin{proof} Using the continuity of the entropy function and the marginal probability function on the simplex of measures on $\Delta$, we can choose $\delta_{0}>0$ so that if $Q\in\mathcal{P}(\Delta)$ and $|Q-P|<\delta_{0}$, then $Q(\xi^{1}=a)\neq0$ if and only if $P(\xi^{1}=a)\neq0$, and $|H_{P}(\xi^{2}|\xi^{1}=a)-H_{Q}(\xi^{2}|\xi^{1}=a)|<\varepsilon/2$ for these $a$. We also assume that $\delta_{0}\log|\Delta_{2}|\leq\varepsilon/2$. Set $\delta=\varepsilon\delta_{0}/3$. Fix $y$ and consider $x=(y,z)$ as in the statement. Consider $u=x|_{J}$ and $v=x|_{[0,n]\setminus J}$ as new sequences. Note that $P_{v}=\sum\alpha_{i}\cdot P_{x|_{I_{i}}}$ where $\alpha_{i}=|I_{i}|/\sum|I_{i}|$, so \[ \left\Vert P_{v}-P\right\Vert \leq\sum\alpha_{i}\cdot\left\Vert P_{x|_{I_{I}}}-P\right\Vert <\sum\alpha_{i}\delta=\delta \] Therefore \[ \left\Vert P_{x}-P_{v}\right\Vert \leq\left\Vert P_{x}-P\right\Vert +\left\Vert P-P_{v}\right\Vert <2\delta \] Similarly $P_{x}=\frac{|J|}{n}P_{u}+(1-\frac{|J|}{n})P_{v}$, so \begin{eqnarray*} P_{u} & = & \frac{n}{|J|}(P_{x}-(1-\frac{|J|}{n})P_{v})\\ & = & \frac{n}{|J|}(P_{x}-(1-\frac{|J|}{n})P_{x}+(1-\frac{|J|}{n})(P_{x}-P_{v}))\\ & = & P_{x}+(\frac{n}{|J|}-1)(P_{x}-P_{v})\\ & = & P+(P_{x}-P)+(\frac{n}{|J|}-1)(P_{x}-P_{v}) \end{eqnarray*} Since $|J|>\varepsilon n$, \[ \left\Vert P_{u}-P\right\Vert <\delta+(\frac{1}{\varepsilon}-1)2\delta<\frac{3\delta}{\varepsilon}<\delta_{0} \] Now for $a\in\Delta$ let $J_{a}=\{j\in J\,:\,z=a\}$. By choice of $\delta_{0}$ and the fact that $\left\Vert P-P_{u}\right\Vert <\delta_{0}$ we have $J_{a}\neq\emptyset$ if and only if $P(\xi^{1}=a)\neq0$, and for such $a$, \[ |H(\xi_{u}^{2}|\xi_{u}^{1}=a)-H_{P}(\xi^{2}|\xi^{1}=a)|<\frac{\varepsilon}{2} \] Writing $u=(u^{1},u^{2})\in\Delta_{1}^{J}\times\Sigma_{2}^{J}$, this means that \[ u^{2}|_{J_{a}}\in\{w\in\Delta_{2}^{J_{a}}\;:\;H(P_{w})<H_{P}(\xi^{2}|\xi^{1}=a)+\frac{\varepsilon}{2}\} \] so by Corollary \ref{cor:generalized-type-theorem} the number of choices for $u^{2}|_{J_{a}}$ is $O(|J_{a}|^{|\Delta_{2}|})2^{|J_{a}|\cdot(H_{P}(\xi^{2}|\xi^{1}=a)+\varepsilon/2)}$. Multiplying over all $a$ such that $J_{a}\neq\emptyset$, the number of possible values for $u^{2}$ is \begin{eqnarray*} \prod_{a}\{z|_{J_{a}}\,:\,(x,z)\in\Lambda\} & = & \prod_{a}O(|J_{a}|^{|\Delta_{2}|})2^{|J_{a}|\cdot(H_{P}(X_{1}|X_{2}=a)+\varepsilon/2)}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{\sum_{a}|J_{a}|\cdot(H_{P}(\xi^{2}|\xi^{1}=a)+\varepsilon/2)}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J|(\sum_{a}P(\xi_{u}^{1}=a)\cdot H_{P}(\xi^{2}|\xi^{1}=a)+\varepsilon/2)}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J|(\sum_{a}P(X_{1}=a)\cdot H_{P}(\xi^{2}|\xi^{1}=a)+\delta_{0}\log|\Delta_{2}|+\varepsilon/2)} \end{eqnarray*} where in second line we used the identity $P(\xi_{u}^{1}=a)=|J_{a}|/|J|$, and in the last line we used the fact that $\left\Vert P_{u}-P\right\Vert <\delta_{0}$ implies that $|P(\xi_{u}^{1}=a)-P(X_{1}=a)|<\delta_{0}$ and $H_{P}(\xi^{2}|\xi^{1}=a)\leq\log|\Delta_{2}|$. Since we chose $\delta_{0}$ to satisfy $\delta_{0}\log|\Delta_{2}|<\varepsilon/2$ the proof of the first statement is complete. The second statement follows, since by the assumption $|J|>\varepsilon n$ we have $O(n^{|\Delta_{1}|\cdot|\Delta_{2}|})=2^{O(\log n)}=2^{o(|J|)}$. \end{proof} We shall require a slightly stronger version of the proposition above that works with the empirical frequencies of $k$-tuples, rather than of individual symbols. For $x=x_{1}\ldots x_{n}$ and $k\leq n$ define the $k$-th higher block code of $x$ to be the sequence $x^{(k)}=x_{1}^{(k)}\ldots x_{n-k}^{(k)}$ where \[ x_{i}^{(k)}=x_{i}x_{i+1}\ldots x_{i+k-1} \] \begin{prop} \label{prop:relative-type-theorem-higher-block-version}Let $\Delta=\Delta_{1}\times\Delta_{2}$. For every $\varepsilon>0$ and $k$ there exists a $\delta>0$ such that for every $n$ the following holds. Let $P\in\mathcal{P}(\Delta^{k})$ and let $I_{1},\ldots,I_{m}\subseteq[1,n-k+1]$ be disjoint intervals of length at least $\ell$ such that $J=[1,n-k+1]\setminus\bigcup I_{i}$ satisfies $|J|>\varepsilon n$. Let $y\in\Delta_{1}^{n}$ be a fixed sequence, and let $\Lambda=\Lambda(y,I_{1},\ldots,I_{m})\subseteq\Delta^{n}$ denote the set of sequences $x=(y,z)\in\Delta^{n}$ whose first component is the given sequence $y$, and such that $\left\Vert P_{x^{(k)}}-P\right\Vert <\delta$ and $\left\Vert P_{(x|_{I_{i}})^{(k)}}-P\right\Vert <\delta$ for every $1\leq i\leq m$. Then \[ |\{z|_{J}\,:\,(y,z)\in\Lambda\}|<O(n^{|\Delta_{1}||\Delta_{2}|})\cdot2^{|J|\cdot(\frac{1}{k}H_{P}(\xi^{2}|\xi^{1})+\varepsilon)} \] In particular if $n$ is large enough relative to $\varepsilon$, then we can ensure \[ |\{z|\,:\,(y,z)\in\Lambda\}|<2^{|J|\cdot(\frac{1}{k}H_{P}(\xi^{2}|\xi^{1})+\varepsilon)} \] \end{prop} \begin{proof} The idea of the proof is very similar to the previous one, we mention only the new ingredients. As before, using uniform continuity of the functions involved on the simplex of measures on $\Delta$, choose $\delta_{0}>0$ so that if $Q\in\mathcal{P}(\Delta^{k})$ and $|Q-P|<\delta_{0}$ then $Q(\xi^{1}=a)\neq0$ if and only if $P(\xi^{1}=a)\neq0$, and $|H_{P}(\xi^{2}|\xi^{1}=a)-H_{Q}(\xi^{2}|\xi^{1}=a)|<\varepsilon/4$ for these $a$. Assume further that $k\delta_{0}\log|\Delta_{2}|\leq\varepsilon/2$. Choose $\delta$ small enough that the hypothesis implies $\left\Vert P_{x^{(k)}|_{J}}-P\right\Vert <\delta_{0}$. This argument is identical to the one in the previous proof and is based on writing $P_{x^{(k)}}$ as a convex combination of $P_{x^{(k)}|_{J}}$ and the $P_{x^{(k)}|_{I_{i}}}$. Split $J$ into congruence classes modulo $k$: For each $0\leq r<k$, let $J_{r}=J\cap(k\mathbb{Z}+r)$. Observe that $x^{(k)}|_{J_{j}}$ determines $x|_{J_{j}+[0,k-1]}$, which does not yet determine $x|_{J}$, but almost: one easily checks that $J\setminus(J_{j}+[0,k-1])$ is contained in $m$ intervals of length $k$ that share an endpoint with one of the intervals $I_{i}$, and these have total length at most $mk$. Therefore the symbols in $x$ that are not determined by $x^{(k)}|_{J_{j}}$ constitute at most a $mk/n$-fraction of the symbols in $[1,n]$. Since the intervals $I_{1},\ldots,I_{m}$ each have length at least $\ell$ and are contained in $[1,n]$, we have $m\leq n/\ell$, so $mk/n$ can be made arbitrarily small by making $\ell$ large. Thus we can assume that for each $j$ the number of possibilities for $x|_{J\bigtriangleup(J_{j}+[0,k-1])}$ is at most $2^{\varepsilon^{2}n/2}$. Using the relation $P_{x^{(k)}|_{J}}=\sum_{i=0}^{k-1}\frac{|J_{i}|}{|J|}P_{x^{(k)}|_{J_{i}}}$ and $\left\Vert P_{x^{(k)}|_{J}}-P\right\Vert <\delta_{0}$, it follows that there is some $i$ with $\left\Vert P_{x^{(k)}|_{J_{i}}}-P\right\Vert <\delta_{0}$. Arguing exactly as in the previous proof, it follows that for this $i$, \begin{eqnarray*} \mbox{\# possibilities for }z|_{J_{i}+[0,k-1]} & = & \mbox{\# possibilities for }z^{(k)}|_{J_{i}}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J_{i}|\cdot(H_{P}(\xi^{2}|\xi^{1})+\varepsilon/4)}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J_{i}+[0,k-1]|\cdot(\frac{1}{k}H_{P}(\xi^{2}|\xi^{1})+\varepsilon/4)}\\ & = & O(n^{|\Delta_{1}||\Delta_{2}|})2^{|J|\cdot(\frac{1}{k}H_{P}(\xi^{2}|\xi^{1})+\varepsilon/2)} \end{eqnarray*} where in the last line we used the fact that $k|J_{i}|\leq|J|+O(mk/n)$ and, as explained earlier, by making $\ell$ we can ensure $mk/n<\varepsilon/4$ . Putting it all together, recalling that the number of possibilities for $x|_{J\setminus(J_{i}+[0,k-1])}$ is $2^{\varepsilon^{2}n/2}<2^{\varepsilon|J|/2}$, we have the desired bound.y \end{proof} \subsection{A relative generator theorem} In this section we consider regular points $x\in(\Sigma_{1}\times\Sigma_{2})^{\mathbb{Z}}$ with $\Sigma_{1},\Sigma_{2}$ finite, writing them as $x=(y,z)\in\Sigma_{1}^{\mathbb{Z}}\times\Sigma_{2}^{\mathbb{Z}}$. Regularity means in particular that $x$ determines a distribution on $\Sigma_{1}\times\Sigma_{2}$ by $P_{x}(a,b)=s(x,(a,b))$, and also a function $P_{x}^{*}:(\Sigma_{1}\times\Sigma_{2})^{*}\rightarrow[0,1]$ given by $a\mapsto s(x,a)$ which extends to a shift-invariant $\sigma$-finite probability measure $\mu_{x}$ on $(\Sigma_{1}\times\Sigma_{2})^{\mathbb{Z}}$ (this extensibility relies crucially on the fact that the alphabet is finite). As in the last section, we write $\xi_{x}=(\xi_{x}^{1},\xi_{x}^{2})$ for the random variable with distribution $P_{x}$. Regularity of $x=(y,z)$ implies regularity of $y$ and $z$, so we have $\xi_{x}^{1}=\xi_{y}$ and $\xi_{x}^{2}=\xi_{z}$. Write \[ H(x)=H(\xi_{x}) \] Extending the notation of the previous section we denote by $x^{(k)}$ the (infinite) sequence whose $i$-th symbol is $x_{i}^{(k)}=x_{i}x_{i+1}\ldots x_{i+k-1}$. Regularity of $x$ implies that also $x^{(k)}$ is regular for all $k$, hence $H(\xi_{x^{(k)}})$ is defined. Since $\xi_{x^{(k+m)}}$ is a coupling of $\xi_{x^{(k)}}$ and $\xi_{x^{(m)}}$ we have $H(\xi_{x^{(k+m)}})\leq H(\xi_{x^{(k)}})+H(\xi_{x^{(m)}})$ and so the limit \[ h(x)=\lim_{k\rightarrow\infty}\frac{1}{k}H(\xi_{x^{(k)}}) \] exists by sub-additivity. Of course, this is just the Kolmogorov-Sinai entropy of $\mu_{x}$. In the same manner we define $h(z)$, and set \begin{eqnarray*} h(x|z) & = & h(x)-h(z)\\ & = & \lim_{k\rightarrow\infty}\left(\frac{1}{k}H(\xi_{x^{(k)}})-\frac{1}{k}H(\xi_{z^{(k)}})\right)\\ & = & \lim_{k\rightarrow\infty}\frac{1}{k}\left(H(\xi_{x^{(k)}}|\xi_{z^{(k)}})\right) \end{eqnarray*} which is, again, the entropy of $\mu_{x}$ relative to the factor determined by the second coordinate. \begin{thm} \label{thm:relative-Krieger}Let $2\leq Q\in\mathbb{N}$ and $\Sigma_{1},\Sigma_{2}$ finite alphabets. To every $x=(y,z)\in(\Sigma_{1}\times\Sigma_{2})_{AP}^{\mathbb{Z}}$ such that $z$ is aperiodic and $h(x|z)<\log_{2}Q$, and to every $I\subseteq\mathbb{Z}$ such that $\underline{s}^{*}(I)>\frac{1}{\log Q}h(x|z)$, one can associate $w\in\{1,\ldots,Q\}^{I}$ such that the map $(x,I)\mapsto w$ is measurable and equivariant, and $(P_{x}^{*},z,w)$ determines $x$ (equivalently, $y$). \end{thm} The statement probably remains true if we replace the uniform density $\underline{s}^{*}(I)$ with $\underline{s}(I)$, but the uniform assumption allows for a simpler proof that is good enough for our application. Theorem \ref{thm:relative-Krieger} falls short of being a true relative generator theorem, since in order to recover $x$ from $z,w$ we must also know $P_{x}^{*}$. In the probability-preserving category, knowing $P_{x}^{*}$ is analogous to knowing the ergodic component of $x$, and the corresponding theorem would be one that gives a partition that generates for every ergodic component of the measure without guaranteeing that different ergodic components have distinct images under the itinerary map. This shortcoming can be overcome by encoding $P_{x}^{*}$ in $w$ (the information carried by $P_{x}^{*}$ is invariant under the shift, so it can be coded efficiently using Lemma \ref{lem:coding-shift-invariant-data}). But this would lengthen an already long proof, and we prefer to postpone this step to the more general theorem for countable partitions. For simplicity we show how to prove the theorem using a larger output alphabet: we introduce two additional symbols, $[$ and $]$, and produce $w\in\{1,\ldots,Q,[,]\}^{I}$ with the desired properties. We comment at the end how to make do without the extra symbols. In the proof we will build up $w$ gradually, starting with all symbols ``blank''. Formally one could introduce a new symbol with this name and set $w_{i}=blank$ for $i\in I$. As the construction progresses we will re-define more and more occurrences of the ``blank'' symbols to have values from $\{1,\ldots,Q,[,]\}$. We omit the routine verification that the constructions are equivariant and measurable. \subsubsection*{Choosing parameters $\varepsilon,\delta,k$ } By hypothesis $\underline{s}^{*}(I)>h(x|z)$, so setting \[ \varepsilon=\frac{1}{10\log_{Q}|\Sigma_{1}|}\left(\underline{s}^{*}(I)-\frac{1}{\log Q}h(x|z)\right) \] we have $\varepsilon>0$. Choose $\delta$ associated to $\varepsilon$ as in Proposition \ref{prop:relative-type-theorem-higher-block-version}. We can assume that $\delta<\varepsilon$. Since \[ h(x|z)=\lim_{k\rightarrow\infty}\frac{1}{k}\left(H(\xi_{x^{(k)}}|\xi_{z^{(k)}})\right) \] we can choose $k$ such that \[ \frac{1}{k}H(\xi_{x^{(k)}}|\xi_{z^{(k)}})<h(x|z)+\varepsilon\log Q \] \subsubsection*{Choosing $I',I''$} Relying on the definition of \emph{$\varepsilon$ }and choosing a suitable small $0<\eta_{1}<\eta_{2}<1$, apply Lemma \ref{lem:selecting-uniform-subset} to $z,I,\eta_{1},\eta_{2}$. We obtain disjoint subsets $I',I''\subseteq I$ satisfying \begin{eqnarray} \underline{s}^{*}(I') & > & \frac{1}{\log Q}h(x|z)+7\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \label{eq:I-prime-density}\\ \underline{s}^{*}(I'') & > & 3\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \label{eq:I-double-prime-density} \end{eqnarray} We will use each of these sets to encode a different portion of the word $y$. The first, $I'$, will be used to encode ``most'' (a $(1-3\varepsilon)$-fraction) of the symbols of $y$, namely, those that we succeed in covering by intervals with good empirical statistics in a sense to be defined below. The second set, $I''$, will encode the remaining (at most $3\varepsilon$-fraction) symbols of $y$. \subsubsection*{Intervals with good empirical statistics} Observe that \[ P_{x^{(k)}}=\lim_{\ell\rightarrow\infty}P_{x^{(k)}|_{[1,\ell]}} \] in the pointwise sense (as functions on $\Sigma_{1}^{k}\times\Sigma_{2}^{k}$). Since empirical frequencies are invariant under the shift, for every $i\in\mathbb{Z}$ the same limit holds with $S^{i}x$ in place of $x$. It follows that for every $i$ there exists an $\ell_{0}(i)\in\mathbb{N}$ such that \[ \left\Vert P_{x^{(k)}|_{[i,i+\ell]}}-P_{x^{(k)}}\right\Vert <\frac{1}{2}\delta\qquad\mbox{for all }\ell\geq\ell_{0}(i) \] \subsubsection*{The good scales $L_{n}$, intervals $J_{r}(i)$, and sets of candidate points $U_{r}$ } Choose $L_{0}\geq2$ large enough that every interval $J$ of length at least $L_{0}$ satisfies \begin{equation} \frac{1}{|J|}|J\cap I'|>\frac{1}{\log Q}h(x|z)+6\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \label{eq:L-zero-frequency-condition} \end{equation} as can be done by (\ref{eq:I-prime-density}). Define $L_{1},L_{2},\ldots\in\mathbb{N}$ by the recursion \begin{equation} L_{r+1}=\left\lceil 4L_{r}^{2}/\varepsilon^{4}\right\rceil \label{eq:growth-of-L} \end{equation} These will serve as the lengths of the intervals we deal with from now on. We abbreviate \[ J_{r}(i)=[i,i+L_{r}-1] \] For a given length $L_{r}$ we are only interested in points $i\in\mathbb{Z}$ for which this length is long enough to ensure good empirical statistics: set \[ U_{r}=\{i\in\mathbb{Z}\,:\,L_{r}\geq\ell_{0}(i)\} \] Thus, for $i\in U_{r}$ we have $\left\Vert P_{x^{(k)}|_{[i,i+L_{s}]}}-P_{x^{(k)}}\right\Vert <\delta/2$ for all $s\geq r$. Note that $U_{1}\subseteq U_{2}\subseteq\ldots$ and $\bigcup_{r=1}^{\infty}U_{r}=\mathbb{Z}$. \subsubsection*{Choosing the good intervals: $V_{r}$, $\mathcal{J}_{r}$, $E_{r}$} Below we will define, for every $r=1,2,3,\ldots$, subsets \[ V_{r}\subseteq U_{r} \] of ``good'' points and the associated family of intervals \[ \mathcal{J}_{r}=\{J_{r}(i)\}_{i\in V_{r}} \] whose union we denote \[ E_{r}=\cup\mathcal{J}_{r}=\bigcup_{i\in V_{r}}J_{r}(i) \] Similarly let $\mathcal{J}_{<r}=\{J_{s}(i)\,:\,i\in U_{s}\,,\,s<r\}$ and $E_{<r}=\cup\mathcal{J}_{<r}=\bigcup_{s<r}E_{s}$. The construction will satisfy the following properties (note that in (4) the even and odd stages are treated differently): \begin{enumerate} \item $V_{r}\subseteq U_{r}\setminus E_{<r}$. \item For each $r$, the collection of intervals $\mathcal{J}_{r}=\{J_{i}\}_{i\in V_{r}}$ is pairwise disjoint. \item For each $i\in V_{r}$, \[ \frac{1}{|J_{r}(i)|}\left|J_{r}(i)\setminus E_{<r}\right|>3\varepsilon \] \item For odd $r$, if $i\in U_{r}\setminus(E_{<r}\cup E_{r}\cup E_{r+1})$, then either \[ \frac{1}{J_{r}(i)}\left|J_{r}(i)\setminus E_{<r}\right|\leq3\varepsilon \] or \[ \frac{1}{|J_{r+1}(i)|}\left|J_{r+1}(i)\setminus E_{<r+1}\right|\leq3\varepsilon \] \end{enumerate} For the construction we induct over odd $r=1,3,5,\ldots$ and at step $r$ define $V_{r}$ and $V_{r+1}$. Fix an odd $r$ and assume we have defined $V_{s}$ for $s<r$. Set \begin{eqnarray*} U'_{r} & = & U_{r}\setminus E_{<r} \end{eqnarray*} Recall that $z$ is the second component of $x$ and is assumed to be aperiodic. Apply Lemma \ref{prop:two-symbol-AP-factor} to $z$ to obtain an $(L_{r}+L_{r+1})$-marker and let $W_{r}$ denote the set of $1$s in the marker, so $W_{r}$ is unbounded above and the gap between consecutive elements in it is at least $2L_{r}$. Let $i,i'\in W_{r}$ be consecutive elements of $W_{r}$. We define $V_{r}\cap[i,i'-L_{r})$ inductively: assuming we have defined $i_{p}$ for $1\leq p<q$, define $i_{q}$ to be the least element of $\left(U'_{r}\cap[i,i'-L_{r})\right)\setminus\bigcup_{p<q}J_{r}(i_{p})$ that satisfies $\frac{1}{|J_{r}(i_{q})|}|J_{r}(i_{q})\setminus E_{<r}|>3\varepsilon$. Stop when no such element exists. Since we chose $i_{p}$ from the set $U'_{r}$, (1) is immediate. Also, the intervals $J_{r}(i_{p})$ chosen for a given $i,i'\in W_{r}$ are pairwise disjoint by construction, and since we only choose elements of $[i,i'-L_{r})$ we have $J_{r}(i_{p})\subseteq[i,i')$, so the intervals are disjoint from those constructed from other consecutive pairs $j,j'\in W_{r}$. This verifies (2). Property (3) and the first alternative in property (4) are immediate from the construction. Now, to define $V_{r+1}$, for each consecutive $i,i'\in W_{r}$ do exactly the same in the intervals $[i'-L_{r},i')$, using $r+1$ instead of $r$. Since this interval has length $L_{r}<L_{r+1}$ we see that $V_{r+1}$ will contain at most one element, namely the least element $i\in\left(U'_{r}\cap[i'-L_{r},i')\right)\setminus E_{<r+1}$ such that $\frac{1}{|J_{r+1}(i)|}|J_{r+1}(i)\setminus E_{<r+1}|\geq3\varepsilon$, if such an element exists (there can be no more because the interval $[i'-L_{r},i')$ is shorter than the length of the interval $J_{r+1}(i)$, so after one iteration of the induction there are no candidates left). Again (1) is automatic, (3) is like before, and the second alternative of (4) is clear (using $U_{r}\subseteq U_{r+1}$). As for (2), note that the gaps between consecutive elements of $W_{r}$ are at least $L_{r}+L_{r+1}$, so if $i\in W_{r}$ and $j\in[i-L_{r},i)$, then $J_{r+1}(j)\cap[i'-L_{r},i')=\emptyset$ for every $i'\in W_{r}\setminus\{i\}$. This easily implies (2). \subsubsection*{Decomposing $E_{<r}$ into components} Define a component of $E_{<r}$ to be an interval $J$ that satisfies \[ J=\cup\{J'\in\mathcal{J}_{<r}\,:\,J'\cap J\neq\emptyset\} \] and which is minimal in the sense that no proper subinterval of $J$ satisfies the same condition. Clearly the intersection of components is a component, so by the minimality property any two components are either equal or disjoint. We remark that $J$ is just the union of intervals in the intersection graph of $\mathcal{J}_{<r}$, where the graph is defined by connecting two intervals in $\mathcal{J}_{<r}$ if they intersect nontrivially. \begin{lem} \label{lem:components}Every component $[a,b]\subseteq E_{<r}$ is of the form $[a,b]=J_{r_{1}}(i_{1})\cup J_{r_{2}}(i_{2})\cup\ldots\cup J_{r_{m}}(i_{m})$, where $r>r_{1}>r_{2}>\ldots>r_{m}$ and $a=i_{1}<i_{2}<\ldots<i_{m}$. In particular $m<r$ and $|[a,b]|\leq\sum_{s<r_{1}}L_{s}<(1+\varepsilon/2)L_{r_{1}-1}$. \end{lem} \begin{proof} If two intervals from $\mathcal{J}_{<r}$ intersect, then by properties (1) and (2) they do not have the same length, and the left endpoint of the shorter one lies inside the longer one but not vice versa. Thus if neither of the intervals is contained in the other, the shorter must protrude beyond the right side of the longer one. Now fix a component $J$ of $E_{<r}$. Let $J_{r_{1}}(i_{1})\in\mathcal{J}_{<r}$ be the interval (necessarily unique by disjointness of the $V_{i}$s) that has the same left endpoint as $J$. It must be contained in $J$ because $J$ is a component. If $J_{r_{1}}(i_{1})=J$ we are done, otherwise let $J_{r_{1}}(i_{2})$ be the longest interval in $\mathcal{J}_{<r}$ that intersects $J_{r_{1}}(i_{i})$ non-trivially, and note that by the previous paragraph it must be shorter ($r_{2}<r_{1}$), and contained in $J$ because $J$ is a component. Continuing inductively we exhaust $J$. The two last conclusions follow immediately from the first and (\ref{eq:growth-of-L}).\end{proof} \begin{lem} \label{lem:bound-on-number-of-components}Every $J\in\mathcal{J}_{r}$ intersects at most $1+L_{r}/L_{1}$ components of $E_{<r}$.\end{lem} \begin{proof} Let $J=J_{r}(i)\in\mathcal{J}_{r}$ and let $J'_{1},\ldots,J'_{m}$ denote the components in $E_{<r}$ that intersect it non-trivially. They are disjoint, and each has length $\geq L_{1}$ (because it contains some interval from $\mathcal{J}_{<r}$). Also, all except possibly the rightmost component are contained in $J$ (the leftmost one must be contained in $J$ by property (1)). Therefore $|J'_{j}\cap J|\geq L_{1}$ for at least $m-1$ of the intervals. Taken together, this shows that $L_{r}\geq(m-1)L_{1}$, which is what was claimed.\end{proof} \begin{lem} \label{lem:bound-on-component-distribution}If $[a,b]\subseteq E_{<r}$ is a component, then $\left\Vert P_{x^{(k)}|_{[a,b]}}-P_{x^{(k)}}\right\Vert <\delta$. \end{lem} \begin{proof} Write $[a,b]$ as a union as in Lemma \ref{lem:components}. Let $[a,c]=J_{r_{1}}(i_{1})\subseteq[a,b]$. We know that $\left\Vert P_{x^{(k)}|_{[a,c]}}-P_{x^{(k)}}\right\Vert <\delta/2$, and \[ P_{x^{(k)}|_{[a,b]}}=\frac{c-a}{b-a}P_{x^{(k)}|_{[a,c]}}+\frac{b-c-1}{b-a}P_{x^{(k)}|_{[c+1,b]}} \] By Lemma \ref{lem:components}, $|c-a|>(1-\delta/2)|b-a|$, and the conclusion follows.\end{proof} \begin{lem} \label{lem:density-ofJ-intersect-E-less-r}For every $J=J_{r}(i)\in\mathcal{J}_{r}$, \[ \frac{1}{|J\setminus E_{<r}|}|(J\setminus E_{<r})\cap I_{n}|>\frac{1}{\log Q}h(x|z)+5\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \] \end{lem} \begin{proof} Let $J'_{1},\ldots,J'_{m}$ be an enumeration of the components of $E_{<r}$ that intersect $J=J_{r}(i)$ non-trivially, and let $J''_{1},\ldots,J''_{m'}$ denote those maximal intervals in $J\setminus\bigcup_{j=1}^{m}J'_{j}$ whose length is $<\frac{1}{2}\sqrt{L_{1}}$. Then $m'\leq m+1$ and by the previous lemma $m<1+L_{r}/L_{1}$, so the total length of the $J''_{j}$s is \[ \sum_{j=1}^{m'}|J''_{j}|\leq\frac{\sqrt{L_{1}}}{2}\cdot m'<\frac{\sqrt{L_{1}}}{2}(\frac{L_{r}}{L_{1}}+2)<\frac{L_{r}}{\sqrt{L_{1}}} \] By property (3), $|J_{r}(i)\setminus E_{<r}|>3\varepsilon L_{r}$, and $L_{r}/\sqrt{L_{1}}<\varepsilon^{2}L_{r}$ by (\ref{eq:growth-of-L}), so \begin{eqnarray*} |J_{r}(i)\setminus(\bigcup_{j=1}^{m}J'_{j}\cup\bigcup_{j=1}^{m'}J''_{j})| & = & |J_{r}(i)\setminus E_{<r}|-|\bigcup_{j=1}^{m'}J''_{j}|\\ & \geq & |J_{r}(i)\setminus E_{<r}|-\frac{L_{r}}{\sqrt{L_{1}}}\\ & > & (1-\varepsilon)|J_{r}(i)\setminus E_{<r}| \end{eqnarray*} Each maximal interval in $J_{r}(i)\setminus(\bigcup_{j=1}^{m}J'_{j}\cup\bigcup_{j=1}^{m'}J''_{j})$ has length at least $\frac{1}{2}\sqrt{L_{1}}>L_{0}$. Thus by (\ref{eq:L-zero-frequency-condition}) and the above, \begin{eqnarray*} |(J_{r}(i)\setminus E_{<r})\cap I'| & > & \left(\frac{1}{\log Q}h(x|z)+6\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \right)(1-\varepsilon)|J_{r}(i)\setminus E_{<r}|\\ & > & \left(\frac{1}{\log Q}h(x|z)+5\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil \right)\cdot|J_{r}(i)\setminus E_{<r}| \end{eqnarray*} as claimed. \end{proof} \subsubsection*{Encoding $y|_{J}$ for $J\in\mathcal{J}_{r}$} We now define $w|_{I'\cap E_{r}}$ by induction on $r$. Upon entering stage $r$, we will have already defined all symbols in $I'\cap E_{<r}$, and define all remaining symbols in $E_{r}$$\cap I'$. Our objective is that the information recorded at step $r$, together with $y|_{E_{<r}}$, $z$ and $P_{x}^{*}$, will suffice to recover $E_{r}$ and $y|_{E_{r}}$, no matter what additional information is written later in $w$. If this is accomplished then $w|_{I'}$, $z$ and $P_{x}^{*}$ uniquely determine $y|_{E_{<\infty}}$. The actual encoding of information is done as follows. For each component $J$ of $E_{<r}$, we write symbols in the portion of $I'\setminus E_{<r}$ that falls within $J$. To identify this set we mark its beginning and end with brackets, and denote the remainder by $J'$. We want to define the symbols in $J'$ so as to determine $y|_{J}$. We do this by first enumerating all possibilities for $J$ and $y|_{J}$ that are consistent with the region $J'$, and, if the actual word $y|_{J}$ is $N$-th in this list, we record this index $N$ in $J'$. To be able to carry this out, the number of possible values of $N$ must be less than the $Q^{|J'|}$. The estimates below show that this is indeed the case. Let $r$ be given and fix $J\in\mathcal{J}_{r}$. First, let $i_{min}=\min((J\setminus E_{<r})\cap I')$ and $i_{max}=\max((J\setminus E_{<r})\cap I')$ and set $w_{i_{min}}=[$ and $w_{j_{max}}=]$. Note that by Lemma \ref{lem:density-ofJ-intersect-E-less-r} the sets in questions contain more than two elements, and the intervals in $\mathcal{J}_{r}$ are pairwise disjoint, so this is well defined. Next, we want to write symbols $1,\ldots,Q$ to all undefined locations in $J'=[i_{min}+1,i_{max}-1]\setminus E_{<r}$, in such a way that $z,P_{x}$ and $w|_{[i_{min},i_{max}]}$ determine $J$ and $x|_{J}$. We estimate the number of choices for $J$. Using (\ref{eq:L-zero-frequency-condition}) and property (2) of $V_{r}$, \[ 3\varepsilon|L_{r}|=3\varepsilon|J|\leq i_{max}-i_{min}\leq L_{r} \] It follows from (\ref{eq:growth-of-L}) that $i_{min},i_{max}$ determine $r$ . In order to determine $J$, it thus suffices to specify its left endpoint, whose distance from $i_{min}$ is at most $L_{r}$. Thus given $i_{min},i_{max}$ there are at most $L_{r}$ possibilities for $J$ (this is a slight over-estimate but we can afford to make it). Next, we estimate the number of choices for $y|_{J\setminus E_{<r}}$. Let $J'_{1},\ldots,J'_{m}$ denote the components of $E_{<r}$ that intersect $J$ non-trivially. Let \[ F=\cup\{J'_{i}\,:\,J'_{i}\subseteq J\} \] The union consists of all but at most one of the intervals $J'_{i}$, the possible exception occurring at the right end of $J$. By Lemma \ref{lem:bound-on-component-distribution}, each $J'_{j}\subseteq J$ satisfies \[ \left\Vert P_{x^{(k)}|_{J'_{j}}}-P_{x^{(k)}}\right\Vert <\delta \] and by definition of $U_{r}$ and $J_{r}(i)$, \[ \left\Vert P_{x^{(k)}|_{J}}-P_{x^{(k)}}\right\Vert <\delta \] Also, by Lemma \ref{lem:density-ofJ-intersect-E-less-r} the complement of $F$ in $J$ is at least an $\varepsilon$-fraction of $J$. Thus by Proposition \ref{prop:relative-type-theorem-higher-block-version}, \begin{eqnarray*} \#\mbox{ possibilities for }y|_{J\setminus E_{<r}} & \leq & \#\mbox{ possibilities for }y|_{J\setminus F}\\ & \leq & 2^{|J\setminus F|(h(x|z)+\varepsilon)} \end{eqnarray*} Since $J\setminus E_{<r}$ and $J\setminus F$ differ by at most two intervals from $\mathcal{J}_{<r}$, whose combined length is $\leq2L_{r-1}<\frac{1}{2}\varepsilon^{2}L_{r}$, and since by (3) we have $|J\setminus E_{<r}|>3\varepsilon|J|$, we have \[ |J\setminus F|\leq|J\setminus E_{<r}|+\frac{1}{2}\varepsilon^{2}L_{r}<(1+\varepsilon)|J\setminus E_{<r}| \] so, using the trivial bound $h(x|z)\leq\log|\Sigma_{1}|$, \begin{eqnarray*} \#\mbox{ possibilities for }y|_{J\setminus E_{<r}} & < & 2^{|J\setminus E_{<r}|\cdot\left(h(x|z)+3\varepsilon\log|\Sigma_{1}|\right)} \end{eqnarray*} Combining the estimates above, the number of possibilities for the pair $(J,y|_{E_{<r}})$ satisfies \begin{eqnarray*} \#\mbox{possibilities for }(J,y|_{J\setminus E_{<r}}) & < & L_{r}\cdot2^{|J\setminus E_{<r}|\cdot\left(h(x|z)+3\varepsilon\log|\Sigma_{1}|\right)} \end{eqnarray*} The number of symbols we have available to write in is $|(J\setminus E_{<r})\cap I'|-2$ (since the symbols $i_{min},i_{max}$ were used for the brackets), and by Lemma \ref{lem:density-ofJ-intersect-E-less-r} we know that \begin{eqnarray*} |(J\setminus E_{<r})\cap I'| & \geq & \left(\frac{1}{\log Q}h(x|z)+5\varepsilon\log_{Q}|\Sigma_{1}|\right)\cdot|J\setminus E_{<r}| \end{eqnarray*} so, since we are using the alphabet $\{1,\ldots,Q\}$, \begin{eqnarray*} \#\mbox{ different sequences we can produce} & \geq & Q^{\frac{1}{\log Q}h(x|z)+5\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil -2}\\ & = & 2^{|J\setminus E_{<r}|\cdot\left(h(x|z)+5\varepsilon\log Q\log_{Q}|\Sigma_{1}|\right)} \end{eqnarray*} Comparing these two expressions and noting that $|J\setminus E_{<r}|>3\varepsilon L_{r}$ and $\log L_{r}/3\varepsilon L_{r}<\varepsilon$, we find that there are enough undefined symbols in $J\setminus E_{<r}$ to uniquely encode $J$ and $y|_{J\setminus E_{<r}}$. \subsubsection*{Decoding $w|_{I'}$} For each $r$ and $J\in\mathcal{J}_{r}$ the symbols $[,]$ were only to surround an interval $[j,j']\subseteq J$ which was later completely filled in with other symbols from $1,\ldots,Q$. It follows that the pattern of brackets in $w|_{I'}$ forms a legal bracket expression, i.e. each bracket has a unique matching one. Furthermore, as we noted during the construction, $j'-j$ determines the stage $r$ at which they were written. Thus $[j,j']\cap E_{<r}$ can be recognized as the union of interiors of bracketed intervals contained in $[j,j']$, and the data in the pattern $([j,j']\setminus E_{<r})\cap I'$ together with $z$ and $P_{x}^{*}$ determines the (unique) interval $J\in\mathcal{J}_{r}$ such that $j=\min(J\setminus E_{<r})\cap I'$ and $j'=\max(J\setminus E_{<r})\cap I'$, and also determines $y|_{J\setminus E_{<r}}$. In this way $w|_{I'}$ determines $E_{<\infty}$ and $y|_{E_{<\infty}}$. \subsubsection*{Encoding $y|_{\mathbb{Z}\setminus E_{<\infty}}$} It remains to encode $y|_{\mathbb{Z}\setminus E_{<\infty}}$ in $w|_{I''}$. If $E_{<\infty}=\mathbb{Z}$ there is nothing to do and we set $w|_{I''}\equiv0$. Otherwise let $i\in\mathbb{Z}\setminus E_{<\infty}$. Then it belongs to $U_{r}$ for all large enough $r$ and hence, for all large enough $r$, we have $i\in U_{r}\setminus E_{<r+2}$. By (3) either \[ \frac{1}{J_{r}(i)}\left|J_{r}(i)\setminus E_{<r}\right|\leq3\varepsilon\qquad\mbox{or}\qquad\frac{1}{|J_{r+1}(i)|}\left|J_{r+1}(i)\setminus E_{<r+1}\right|\leq3\varepsilon \] It follows that \[ \underline{s}(\mathbb{Z}\setminus E_{<\infty})\leq3\varepsilon \] Since $\underline{s}(I'')>3\varepsilon\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil $ (equation (\ref{eq:I-double-prime-density})), we can apply Lemma \ref{lem:selecting-a-sequence-of-subsets} to $z$ and $I''$ to obtain disjoint $I''_{i}\subseteq I''$, $i=1,\ldots,\left\lceil \log_{Q}|\Sigma_{1}|\right\rceil $, with $\underline{s}(I''_{i})>3\varepsilon$, and apply Lemma \ref{lem:equivarian-injections} to $z$ and each $I''_{i}$ to obtain an injection $f_{i}:\mathbb{Z}\setminus E_{<\infty}\rightarrow I''_{i}$. For each $i\in\mathbb{Z}\setminus E_{<\infty}$ represent $y_{i}\in\Sigma_{1}$ as a string $a_{1}\ldots a_{\left\lceil \log_{Q}|\Sigma|_{1}\right\rceil }$ and set $w_{f_{j}(i)}=a_{j}$. \subsubsection*{Decoding $w|_{I''}$} Since $E_{<r}$ can be recovered from $w$, we can recover $\mathbb{Z}\setminus E_{<r}$ and hence the sets $I''_{i}$ and the injection $f_{i}$. Then for $i\in\mathbb{Z}\setminus E_{<\infty}$ we recover $y_{i}$ by reading off the sequence $w_{f_{1}(i)},w_{f_{2}(i)},\ldots$. \subsubsection*{Reducing the alphabet from size $Q+2$ to $Q$} To make do with $Q$ symbols of output instead of $Q+2$, Choose long enough words $a_{[},a_{]}\in\{1,\ldots,Q\}^{*}$ so that the SFT in $\{1,\ldots,Q\}^{\mathbb{Z}}$ that omits them has entropy greater than $h(x|z)$, and the words cannot overlap themselves or each other. Then we use them in place of the symbols $[,]$ and choose all other sequences in the encoding so that they omit $a_{[},a_{]}$, i.e., so that they are admissible for the SFT defined by omitting these two symbols. We can arrange this SFT to be mixing, and all encoding can be seen to occur in long blocks, which makes this possible. The fact that enough legal sequences exist for the encoding can be ensured, because by choosing $a_{[},a_{]}$ long enough, we can ensure that the topological entropy of the SFT omitting them is still larger than the empirical entropy of $y$. We omit the standard details (for the application to the main theorem of this paper, the version with $Q+2$ symbols suffices). \subsection{\label{sub:Empirical-entropy}\label{sub:Constructing-the-generator-Krieger-case}Constructing the generator} Fix a countable alphabet $\Sigma=\{\sigma_{1},\sigma_{2},\ldots\}$. Let $*$ denote a symbol not in $\Sigma$, let $\Sigma_{n}=\{\sigma_{1},\ldots,\sigma_{n},*\}$, let $\pi_{n}:\Sigma\rightarrow\Sigma_{n}$ denote the map that collapses the symbols $\sigma_{n+1},\sigma_{n+2},\ldots$ to $*$, and extend $\pi_{n}$ pointwise to sequences. Thus $\pi_{n}(x)\in\Sigma_{n}^{\mathbb{Z}}$ is the sequence \[ \pi_{n}(x)_{i}=\left\{ \begin{array}{cc} x_{i} & \mbox{if }x_{i}\in\{\sigma_{1},\ldots,\sigma_{n}\}\\ * & \mbox{otherwise} \end{array}\right. \] It is clear that if $x\in\Sigma^{\mathbb{Z}}$ is regular then so is $\pi_{n}(x)$ for every $n$, so using the notation of the previous section we can define \[ h_{n}(x)=h(\pi_{n}(x)) \] Since $\pi_{n}(x)$ is obtained from $\pi_{n+1}(x)$ by merging occurrences of $\sigma_{n+1}$ and $*$ into the symbol $*$, it is easy to see that $h(\pi_{n+1}(x))\geq h(\pi_{n}(x))$ (either directly, or using the fact that $\pi_{n}$ is a factor map from $(\Sigma_{n+1},\mu_{\pi_{n+1}(x)},S)$ to $(\Sigma_{n},\mu_{\pi_{n}(x)},S)$, and that Kolmogorov-Sinai entropy is non-increasing under factors). Thus we can set \[ h(x)=\lim_{n}h_{n}(x)=\sup_{n}h_{n}(x) \] For the same reason that $h_{n}(x)$ is non-decreasing, this definition of $h(x)$ is independent of the ordering of $\Sigma$: If we choose a different ordering $\Sigma=\{\sigma'_{1},\sigma'_{2},\ldots\}$ and define corresponding $\Sigma'_{n}$ and $\pi'_{n}$ and $h'_{n}(x)$, then for every $n$ we have $\pi_{n}(x)=\pi_{n}(\pi'_{n'}(x))$ for large enough $n'$, so $h_{n}(x)\leq h'_{n'}(x)$, hence if $h'(x)=\sup h'_{n}(x)$ then $h(x)\leq h'(x)$, and reversing the argument we see the two are the same. Thus $h(x)$ does not depend on the particular ordering we chose for $\Sigma$ (although $h(\pi_{n}(x))$ does). Finally, observe that $h(x)\leq\widetilde{H}(x)$. Indeed, write $P_{x}$ for the probability vector on $\Sigma\cup\{*\}$ that gives mass $s_{a}(x)$ to $a\in\Sigma$ and $1-\sum_{a\in\Sigma}s_{a}(x)$ to $*$. Note that $P_{\pi_{n}(x)}=\pi_{n}P_{x}$, which implies \[ h_{n}(x)=\inf_{k}H(\xi_{\pi_{n}(x)^{(k)}})\leq H(\xi_{\pi_{n}(x)})=H(P_{\pi_{n}(x)})\leq H(P_{x})=\widetilde{H}(x) \] Therefore $h(x)=\lim_{n\rightarrow\infty}h_{n}(x)\leq\widetilde{H}(x)$. \begin{thm} \label{thm:generator-for-low-entropy-sequences}Let $Q\in\mathbb{N}$ and let $Y_{Q}\subseteq\Sigma_{AP}^{\mathbb{Z}}$ denote the set of aperiodic and regular sequences $y$ satisfying $h(y)<\log Q$. Then $Y_{Q}$ has a $Q$-set generator. \end{thm} Our aim is to construct an injective factor map $\tau:Y_{Q}\rightarrow\{1,\ldots,Q\}^{\mathbb{Z}}$. As in the proof of Theorem \ref{thm:relative-Krieger}, we begin with $w=\tau(y)$ ``blank'', and define it inductively. Fix $x\in Y_{Q}$. We begin with a preliminary step, which we call step $0$, in which we choose a rather sparse subset $I_{0}\subseteq\mathbb{Z}$ and record all of the frequencies $(s_{a}(x))_{a\in\Sigma^{*}}$ on $w|_{I_{0}}$ (in particular $w|_{I_{0}}$ determines the entropies $h(\pi_{n}(x))$). We also determine a sequence of disjoint sets $I_{1},I_{2},\ldots\subseteq\mathbb{Z}\setminus I_{0}$ with having uniform densities which we will specify later. We do this in a manner that $I_{0},I_{1},\ldots$ can be recovered from $w$ irrespective of what is written later. After the preliminary step is complete, we apply the relative generator theorem (Theorem \ref{thm:relative-Krieger}) inductively to define $w|_{I_{n}}$ in such a way that given $\pi_{n-1}(x)$ and $P_{\pi_{n-1}(x)}$, we can recover $\pi_{n}(x)$ (in fact a minor modification of this strategy is necessary, see below). \subsubsection*{Definition of $\rho_{n}$} FOr $n=1,2,3,\ldots$ choose numbers $\rho_{n}$ in the range \[ \frac{h_{n}(x)-h_{n-1}(x)}{\log Q}<\rho_{n}<1 \] (here $h_{0}(x)=0$) and another rational number $0<\rho_{0}<1$, in such a way that $\sum_{n=0}^{\infty}\rho_{n}<1$. This is possible since by hypothesis, $\sum_{n=1}^{\infty}(h_{n}(x)-h_{n-1}(x))=h(x)<\log Q$. \subsubsection*{Encoding an aperiodic sequence in $w$} The first thing we do will be to encode an aperiodic sequence in $w$. This sequence will be used in both the encoding and decoding of $w$, so we must ensure that it can be recovered no matter what additional data is later written to $w$. Choose $M$ large enough that $1/M<\rho_{0}/4$ and let $w'\in\{0,1\}^{\mathbb{Z}}$ denote the sequence derived from $y$ using Proposition \ref{prop:two-symbol-AP-factor} and parameter $M^{2}$, so $w'$ is aperiodic and the gaps between consecutive $1$s in $w'$ is at least $M^{2}$. For every $i$ such that $w'_{i}=1$, set $w_{i}=w_{i+1}=\ldots,w_{i+M-1}=1$. Next, let $i<j$ be the positions of a pair of consecutive occurrences of $1$ in $w'$, let $k$ be the largest index such that $i+kM<j$, and set $w_{i+M}=w_{i+2M}=\ldots=w_{i+kM}=2$. The point $w$ now has the property that the blank symbols appear in blocks of length at most $M-1$, and each such block is preceded by a $2$ and is terminated by either a $2$ or the word $1^{M}2$. Thus, no matter what symbols are eventually written in the blank sites in $w$, no new occurrences of $1^{M}2$ can be formed, and $w'$ can be recovered: \[ w'=\left\{ \begin{array}{cc} 1 & \mbox{if }1^{M}2\mbox{ occurs in }w\mbox{ at i}\\ 0 & \mbox{otherwise} \end{array}\right. \] We estimate the density of undefined symbols in $w$: Since the gap between $1$'s in $w'$ are at least $M^{2}$ we have $\overline{s}^{*}(w',1)\leq1/M^{2}$, so $s^{*}(w,1)=M\cdot s^{*}(w',1)\leq1/M$. Also, since the distance between $2$'s in $w$ is at least $M$, we have $\overline{s}^{*}(w,2)\leq1/M$. Therefore by choice of $M$, \[ \underline{s}^{*}(w,blank)\geq1-\overline{s}^{*}(w,1)-\overline{s}^{*}(w,2)\geq1-\frac{2}{M}>1-\frac{1}{2}\rho_{0} \] \subsubsection*{Encoding the empirical distribution} Let \[ U=\{n\in\mathbb{N}\,:\,w_{n}\mbox{ is blank}\} \] and \[ \rho=1-\underline{s}_{*}(U) \] so $\rho<\frac{1}{2}\rho_{0}$ and, as explained above, $\rho$ can be recovered from $w$. Apply Lemma \ref{lem:selecting-uniform-subset} to $(w',U,\frac{1}{3}\rho,1-\frac{1}{2}\rho)$ to obtain a subset $I_{0}\subseteq U$ with \begin{eqnarray*} \underline{s}_{*}(I_{0}) & > & \frac{1}{3}\rho\\ & > & 0\\ \underline{s}_{*}(U\setminus I_{0}) & > & (1-\frac{1}{2}\rho)\underline{s}_{*}(U)\\ & > & 1-\rho_{0} \end{eqnarray*} The sets $I_{0},U\setminus I_{0}$ are recoverable from $w$. Choose a measurable map $f:Y_{Q}\rightarrow\{0,1\}^{\mathbb{N}}$ such that $f(x)$ encodes the sequence of frequencies $(s(x,a))_{a\in\Sigma^{*}}$. This is a shift-invariant function, so we can apply Lemma \ref{lem:coding-shift-invariant-data} with the function $f$ to $w',x,$ and $I_{0}$ to define $w|_{I_{0}}$ in such a way that $w'$ and $w|_{I_{0}}$ determine $f(x)$, and hence the frequencies $(s(x,a))_{a\in\Sigma}$. Thus no matter what information is later written in $w$, we can use $w$ to recover $w',I_{0}$, hence $w|_{I_{0}}$, hence $(s(x,a))_{a\in\Sigma}$. In particular $w$ determines $h_{n}(x)$ and $\rho_{n}$ for all $n\geq1$. \subsubsection*{Choosing $I_{1},I_{2},\ldots$} Since $\sum\rho_{n}<1-\rho_{0}<\underline{s}(U\setminus I_{0})$ we can apply Lemma \ref{lem:selecting-a-sequence-of-subsets} to $(w',U\setminus I_{0},(\rho_{n}/(1-\rho_{0}))_{n=1}^{\infty})$ to obtain disjoint subsets $I_{1},I_{2},\ldots\subseteq U\setminus I_{0}$ such that \[ \underline{s}_{*}(I_{n})\geq\frac{\rho_{n}}{1-\rho_{0}}\underline{s}_{*}(U\setminus I_{0})>\rho_{n} \] Since $w',U,I_{0},\rho_{n}$ are all recoverable from $w$ no matter what is written later, the sets $I_{n}$ are recoverable as well. \subsubsection*{Defining $\widetilde{\pi}_{n}$} Let \[ \widetilde{\pi}_{n}(x)=(\pi_{n}(x),w')\in(\Sigma_{n}\times\{0,1\})^{\mathbb{Z}} \] (note that $w'$ depends measurably and equivariantly on $x$). \subsubsection*{Coding $\pi_{n}(x)$} For each $n=1,2,3,\ldots$, observe that \begin{eqnarray*} h(\widetilde{\pi}_{n}(x)|\widetilde{\pi}_{n-1}(x)) & = & h(\pi_{n}(x)|\pi_{n-1}(x),w')\\ & \leq & h(\pi_{n}(x)|\pi_{n-1}(x))\\ & = & h_{n}(x)-h_{n-1}(x)\\ & < & \rho_{n} \end{eqnarray*} Furthermore, $\widetilde{\pi}_{n-1}(x)$ is aperiodic, since $w'$ is (this is the reason we introduced the $w'$: the sequence $\pi_{n}(x)$ might be periodic). Apply the relative generator theorem (Theorem \ref{thm:relative-Krieger}) to $(y,z)=(\widetilde{\pi}_{n}(x),\widetilde{\pi}_{n-1}(x)))$ and $I_{n}$. We obtain a pattern $w|_{I_{n}}$ from which, together with $\widetilde{\pi}_{n-1}(x)$ and $P_{\widetilde{\pi}_{n-1}(x)}$, we can recover $\widetilde{\pi}_{n}(x)$ and in particular $\pi_{n}(x)$. \subsubsection*{Summary} Let us review what has transpired: We encoded an aperiodic sequence $w'$ in $w$, with the property that it can be recovered later no matter how the rest of $w$ is defined. The density of $w'$ itself indicates the density of symbols needed for the encoding, so constructions based on this number can be reproduced knowing only $w'$. Using this we reserved a low-density set $I_{0}$ of blank sites, and encoded the empirical distribution of $x$ in it. We then reserved subsets $I_{1},I_{2},\ldots$ of the remaining blank symbols of sufficient density that in $I_{n}$ one we could record the sequence $\pi_{n}(x)$, the coding being unequivocal given $\pi_{n-1}(x)$ (which is encoded in $I_{n-1}$). \subsubsection*{Decoding} We have already said almost everything about this. From $w$ we can recover $w'$, hence $\rho$, hence $I_{0}$, hence the frequencies $(s_{a}(x))_{a\in\Sigma^{*}}$. These determine $P_{\widetilde{\pi}_{n}(x)}$, and also $\rho_{n}$ and hence $I_{n}$. Now inducing on $n=1,2,\ldots$ we recover $\pi_{n}(x)$ from $\widetilde{\pi}_{n-1}(x)$, $P_{\widetilde{\pi}_{n-1}(x)}$ and $w|_{I_{n}}$. The sequences $\pi_{n}(x)$, $n=1,2,\ldots$, determine $x$. \section{\label{sec:Proofs-of-Corollaries}Proofs of Corollaries \ref{cor:generator-with-entropy} and \ref{cor:universal-Borel-systems}} Here we fill in some details about Corollaries \ref{cor:generator-with-entropy} and \ref{cor:universal-Borel-systems}. We begin with Corollary \ref{cor:generator-with-entropy}. In \cite[ Theorem 1.5]{Hochman2013b} it was shown that if $Y$ is a mixing shift of finite type of topological entropy $h$ and $(X,T)$ is a free Borel system whose invariant measures are all of smaller entropy, or if this holds with one exception which is Bernoulli of entropy $h$, then there is a $T$-invariant Borel set $X_{0}\subseteq X$ such that $X\setminus X_{0}$ supports no invariant probability measure, and a Borel embedding $\pi:X_{0}\rightarrow Y$. Let $Y_{1},Y_{2},\ldots\subseteq Y$ be a pairwise disjoint sequence of mixing shifts of finite type (constructing such a sequence is elementary and we omit the details). We define a sequence of pairwise disjoint $T$-invariant Borel subsets $X_{1},X_{2},\ldots\subseteq X$ supporting no $T$-invariant probability measures, and Borel embeddings $\pi_{i}:X_{i}\rightarrow Y_{i}$, such that $\pi_{0}|_{X\setminus\bigcup X_{i}}\cup\bigcup\pi_{i}$ is an embedding of $X$ into $Y$. Set $X_{1}=X\setminus X_{0}$ and apply Theorem \ref{thm:main} to obtain an embedding $X_{1}\rightarrow Y_{1}$. Let $X_{2}=\pi_{0}^{-1}(\image(\pi_{0})\cap\image(\pi_{1}))$. Note that $X_{2}\subseteq X_{0}$ and hence $X_{2}\cap X_{1}=\emptyset$. Also, $\pi_{1}^{-1}\pi_{0}$ is an isomorphism of $X_{2}$ to a subset of $X_{1}$ and hence $X_{2}$ supports no invariant probability measures. Thus, we can apply Theorem \ref{thm:main} to obtain an embedding $X_{2}\rightarrow Y_{2}$. Proceeding inductively, we define $X_{n+1}=\pi_{0}^{-1}(\image(\pi_{0})\cap\image(\pi_{n}))$, noting that it is disjoint from the previous sets because the $Y_{i}$ are disjoint; and is isomorphic to $(X_{n},T|_{X_{n}})$ and hence supports no $T$-invariant probability measures. Thus, by Theorem \ref{thm:main} we can find an embedding $\pi_{n+1}:X_{n+1}\rightarrow Y_{n+1}$. It is now easy to see that $\pi_{0}|_{X\setminus\bigcup X_{i}}\cup\bigcup\pi_{i}$ is a Borel injection and of course $T$-equivariant, as required. Turning now to Corollary \ref{cor:universal-Borel-systems}, we proceed similarly. By \cite[Theorem 1.5]{Hochman2013b} (together with the Ornstein isomorphism theorem to deal with the measure of maximal entropy), if $(X,T),(Y,S)$ are Borel systems as in the statement of Corollary \ref{cor:universal-Borel-systems}, and for the same $h$, then there is a $T$-invariant Borel subset $X_{0}\subseteq X$ and Borel embedding $\pi_{0}:X_{0}\rightarrow Y$. This can be improved to an embedding $X\rightarrow Y$ by the same argument above, using the fact that there are mixing SFTs embedded in $Y$. By symmetry there are also embeddings $Y\rightarrow X$. One now applies a Cantor-Berenstein argument, as in \cite[Proof of Proposition 1.4]{Hochman2013b}, to obtain an isomorphism. \end{document}
\begin{document} \title{Estimates for the Asymptotic Convergence Factor of Two Intervals\footnote{published in: Journal of Computational and Applied Mathematics {\bf 236} (2011), 26--36.}} \author{Klaus Schiefermayr\footnote{University of Applied Sciences Upper Austria, School of Engineering and Environmental Sciences, Stelzhamerstrasse\,23, 4600 Wels, Austria, \textsc{[email protected]}}} \date{} \maketitle \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{corollary}{Corollary} \newtheorem{lemma}{Lemma} \newtheorem{definition}{Definition} \theoremstyle{definition} \newtheorem*{remark}{Remark} \newtheorem*{example}{Example} \begin{abstract} Let $E$ be the union of two real intervals not containing zero. Then $L_n^r(E)$ denotes the supremum norm of that polynomial $P_n$ of degree less than or equal to $n$, which is minimal with respect to the supremum norm provided that $P_n(0)=1$. It is well known that the limit $\kappa(E):=\lim_{n\to\infty}\sqrt[n]{L_n^r(E)}$ exists, where $\kappa(E)$ is called the asymptotic convergence factor, since it plays a crucial role for certain iterative methods solving large-scale matrix problems. The factor $\kappa(E)$ can be expressed with the help of Jacobi's elliptic and theta functions, where this representation is very involved. In this paper, we give precise upper and lower bounds for $\kappa(E)$ in terms of elementary functions of the endpoints of $E$. \end{abstract} \noindent\emph{Mathematics Subject Classification (2000):} 41A17, 33E05, 41A29, 65F10 \noindent\emph{Keywords:} Estimated asymptotic convergence factor, Inequality, Jacobian elliptic functions, Jacobian theta functions, Two intervals \section{Introduction} For $n\in{\mathbb N}$, let $\mathbb P_n$ denote the set of all polynomials of degree at most $n$ with real coefficients. Let $E$ be the union of two real intervals, i.e., \begin{equation}\label{E} E:=[a_1,a_2]\cup[a_3,a_4],\qquad~a_1<a_2<a_3<a_4, \end{equation} and let the supremum norm $\|\cdot\|_E$ associated with $E$ be defined by \begin{equation} \|P_n\|_E:=\max_{x\in{E}}|P_n(x)| \end{equation} for any polynomial $P_n\in\mathbb P_n$. Consider the following two classical approximation problems: \begin{equation}\label{Ln} L_n(E):=\|T_n(\cdot,E)\|_E:=\min\bigl\{\|P_n\|_E:P_n\in\mathbb P_n\setminus\mathbb P_{n-1},P_n~\text{monic~polynomial}\bigr\} \end{equation} and, $0\notin{E}$, \begin{equation}\label{Lnr} L_n^r(E,0):=\|R_n(\cdot,E,0)\|_E:=\min\bigl\{\|P_n\|_E:P_n\in\mathbb P_n,P_n(0)=1\bigr\}. \end{equation} The optimal (monic) polynomial $T_n(x,E)=x^n+\ldots\in\mathbb P_n\setminus\mathbb P_{n-1}$ in \eqref{Ln} is called the Chebyshev polynomial on $E$ and $L_n(E)$ is called the minimum deviation of $T_n(\cdot,E)$ on $E$. It is well known that the limit \begin{equation}\label{cap} \mathbb CAP{E}:=\lim_{n\to\infty}\sqrt[n]{L_n(E)} \end{equation} exists, where $\mathbb CAP{E}$ is called the Chebyshev constant or the logarithmic capacity of $E$. Concerning the general properties of $\mathbb CAP{C}$, $C\subset\mathbb C$ compact, we refer to \cite{Kirsch} and \cite[chapter\,5]{Ransford}. The optimal polynomial $R_n(\cdot,E,0)\in\mathbb P_n$ in \eqref{Lnr} is called the \emph{minimal residual polynomial} for the degree $n$ on $E$ and the quantity $L_n^r(E,0)$ is called the minimum deviation of $R_n(\cdot,E,0)$ on $E$. Note that we say \emph{for} the degree $n$ but not \emph{of} degree $n$ since the minimal residual polynomial for the degree $n$ on $E$ is a polynomial of degree $n$ or $n-1$, see \cite{Sch-2010}. As above, the limit \begin{equation}\label{kappa} \kappa(E,0):=\lim_{n\to\infty}\sqrt[n]{L_n^r(E,0)} \end{equation} exists, see, e.g.\ \cite{Kuijlaars} or \cite{DTT}, where $\kappa(E,0)$ is usually called the \emph{estimated asymptotic convergence factor}. The approximation problem \eqref{Lnr} and the convergence factor \eqref{kappa} arise for instance in the context of solving large-scale matrix problems by Krylov subspace iterations. There is an enormous literature on these subject, hence we would like to mention only three references, the review of Discroll, Toh and Trefethen\,\cite{DTT}, the book of Fischer\,\cite{Fischer-Book} and the review of Kuijlaars\,\cite{Kuijlaars}. In the case of two intervals, both terms, $\kappa(E,0)$ and $\mathbb CAP{E}$, can be expressed with the help of Jacobi's elliptic and theta functions and this characterization goes back to the work of Achieser\,\cite{Achieser-1932}. Since, in both cases, the representation is very involved, it is desirable to have at least estimates of a simpler form. For $\mathbb CAP{E}$, such estimates are given in \cite{Solynin}, \cite{Sch-2008-1}, and \cite{DubininKarp}. In this paper, we will give a precise upper and lower bound for $\kappa(E,0)$ in terms of elementary functions of the endpoints $a_1,a_2,a_3,a_4$ of $E$. The paper is organized as follows. In Section\,2, we recall the representations of $\kappa(E,0)$ and $\mathbb CAP{E}$ with the help of Jacobi's elliptic and theta functions. Using an inequality between a Jacobian theta function and the Jacobian elliptic functions, proved in Section\,6, we obtain an upper and a lower bound for $\kappa(E,0)$ in Section\,3, which is the main result of the paper. In Section\,4, the following extremum problem is solved: Given the length of the two intervals and the length of the gap between the two intervals, for which set of two intervals the convergence factor $\kappa(E,0)$ gets minimal? In Section\,5, as a byproduct, a new and simple lower bound for $\mathbb CAP{E}$ is derived. Finally, in Section\,6, the notion of Jacobi's elliptic and theta functions is recapitulated and several new inequalities, needed in Section\,3 and 4, are proved. \section{Representation of the Asymptotic Factor and the Logarithmic Capacity in Terms of Jacobi's Elliptic Functions} Let $E$ be given as in \eqref{E} such that $0\notin{E}$. It is convenient to use the linear transformation \begin{equation}\label{ell} \ell(x):=\frac{2x-a_1-a_4}{a_4-a_1}, \end{equation} which maps the set $E$ onto the normed set \begin{equation}\label{Eh} \hat{E}:=[-1,\alpha]\cup[\beta,1], \end{equation} where $\alpha:=\ell(a_2)$ and $\beta:=\ell(a_3)$. For the corresponding Chebyshev polynomials, we have \begin{equation} T_n(x,E)=\Bigl(\frac{a_4-a_1}{2}\Bigr)^nT_n(\ell(x),\hat{E}), \end{equation} thus \begin{equation} L_n(E)=\Bigl(\frac{a_4-a_1}{2}\Bigr)^nL_n(\hat{E}) \end{equation} and \begin{equation} \mathbb CAP{E}=\frac{a_4-a_1}{2}\,\mathbb CAP\hat{E}. \end{equation} Concerning the minimal residual polynomial, there is \begin{equation} R_n(x,E,0)=R_n(\ell(x),\hat{E},\xi), \end{equation} where $\xi:=\ell(0)$, thus \begin{equation} L_n^r(E,0)=L_n^r(\hat{E},\xi) \end{equation} and \begin{equation} \kappa(E,0)=\kappa(\hat{E},\xi), \end{equation} for details, see \cite[Sec.\,3.2]{Fischer-Book}. Let $\hat{E}$ be given as in \eqref{Eh} with $-1<\alpha<\beta<1$ and let $\xi\in\mathbb R\setminus\hat{E}$. Then there exists a (uniquely determined) Green's function for $\hat{E}^{\operatorname{c}}:=\overline{\mathbb C}\setminus\hat{E}$ (where $\overline{\mathbb C}:=\mathbb C\cup\infty$) with pole at infinity, denoted by $g(z;\hat{E}^{\operatorname{c}},\infty)$. The Green's function is defined by the following three properties: \begin{itemize} \item $g(z;\hat{E}^{\operatorname{c}},\infty)$ is harmonic in $\hat{E}^{\operatorname{c}}$. \item $g(z;\hat{E}^{\operatorname{c}},\infty)-\log|z|$ is harmonic in a neighbourhood of infinity. \item $g(z;\hat{E}^{\operatorname{c}},\infty)\to0$ as $z\to\hat{E}$, $z\in\hat{E}^{\operatorname{c}}$. \end{itemize} With the Green's function $g(z;\hat{E}^{\operatorname{c}},\infty)$, the estimated asymptotic convergence factor\\ $\kappa(\hat{E},\xi)$ can be characterized by \begin{equation}\label{kappa-g} \kappa(\hat{E},\xi)=\exp(-g(\xi;\hat{E}^{\operatorname{c}},\infty)). \end{equation} This connection was first observed by Eiermann, Li and Varga\,\cite{ELV} (for more general sets), see also \cite[Sec.\,3.1]{Fischer-Book}, \cite{Kuijlaars} and \cite{DTT}. Let us recall the construction of the Green's function for $\hat{E}^{\operatorname{c}}$, due to Achieser\,\cite{Achieser-1932}, see also \cite{Fischer-1992} and in particular \cite[Chapter\,3]{Fischer-Book}. This characterization is mainly based on a heavy usage of Jacobi's elliptic and theta functions. For the notation and some basic properties of this class of functions, see the beginning of Section\,6. Define the modulus $k$ of Jacobi's elliptic functions $\operatorname{sn}(u)$, $\operatorname{cn}(u)$ and $\operatorname{dn}(u)$ and of Jacobi's theta functions $\Theta(u)$, $H(u)$, $H_1(u)$ and $\Theta_1(u)$ by \begin{equation}\label{k} k=\sqrt{\frac{2(\beta-\alpha)}{(1-\alpha)(1+\beta)}}. \end{equation} Then the complementary modulus $k':=\sqrt{1-k^2}$ is given by \begin{equation}\label{k'} {k'}=\sqrt{\frac{(1+\alpha)(1-\beta)}{(1-\alpha)(1+\beta)}}. \end{equation} Note that $0<k,k'<1$. Let $K\equiv{K}(k)$ be the complete elliptic integral of the first kind and let $K'\equiv{K}'(k):=K(k')$. Let $0<\rho<K$ be uniquely defined by the equation \begin{equation}\label{sn} \operatorname{sn}^2(\rho)=\frac{1-\alpha}{2}. \end{equation} By \eqref{k}, \eqref{sn} and \eqref{sncndn}, \begin{equation}\label{cndn} \operatorname{cn}^2(\rho)=\frac{1+\alpha}{2} \qquad\text{and}\qquad\operatorname{dn}^2(\rho)=\frac{1+\alpha}{1+\beta}. \end{equation} Further, consider the function \begin{equation}\label{phi} \varphi(u):=\frac{\operatorname{sn}^2(u)\operatorname{cn}^2(\rho)+\operatorname{cn}^2(u)\operatorname{sn}^2(\rho)}{\operatorname{sn}^2(u)-\operatorname{sn}^2(\rho)}. \end{equation} Let \[ {\cal{P}}:=\bigl\{u\in\mathbb C:u=\lambda{K}+\operatorname{i}\lambda'K',~0<\lambda<1,~-1<\lambda'\leq1\bigr\} \] then $\varphi:{\cal P}\to\hat{E}^{\operatorname{c}}$ is a bijective mapping and especially the mappings $\varphi:[0,\rho)\to(-\infty,-1]$, $\varphi:[\rho,K]\to[1,\infty)$ and $\varphi:[\operatorname{i}{K}',K+\operatorname{i}{K}']\to[\alpha,\beta]$ are bijective. Then the Green's function for $\hat{E}^{\operatorname{c}}$ is given by \begin{equation} g(z;\hat{E}^{\operatorname{c}},\infty)=\log\Bigl|\frac{H(u+\rho)}{H(u-\rho)}\Bigr|,\quad\text{where}\quad z=\varphi(u). \end{equation} Since $\xi\in\mathbb R\setminus\hat{E}$, $u^*\in(0,K)\cup(\operatorname{i}{K}',K+\operatorname{i}{K}')$ is uniquely determined by the equation $\varphi(u^*)=\xi$. Thus, by \eqref{kappa-g}, the convergence factor $\kappa(\hat{E},\xi)$ can be computed by \[ \kappa(\hat{E},\xi)=\Bigl|\frac{H(u^*-\rho)}{H(u^*+\rho)}\Bigr|. \] Let us summarize these results in the following theorem. \begin{theorem}[Fischer\,\cite{Fischer-Book},~Achieser\,\cite{Achieser-1932}]\label{Thm-kappa} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$, let $\xi\in\mathbb R\setminus\hat{E}$, and let $k\in(0,1)$ and $\rho\in(0,K)$ be given by \eqref{k} and \eqref{sn}, respectively. Then, the asymptotic convergence factor $\kappa(\hat{E},\xi)$ is given by \begin{equation} \kappa(\hat{E},\xi)=\Bigl|\frac{H(u^*-\rho)}{H(u^*+\rho)}\Bigr|, \end{equation} where $u^*\in(0,K)\cup(\operatorname{i}{K}',K+\operatorname{i}{K}')$ is uniquely determined by the equation $\varphi(u^*)=\xi$, $\varphi$ defined in \eqref{phi}. \end{theorem} On the other hand, concerning the logarithmic capacity of $\hat{E}$, Achieser\,\cite{Achieser-1930} proved the following, see also \cite[Cor.\,8]{PehSch-2004}. \begin{theorem}[Achieser\,\cite{Achieser-1930}]\label{Thm-AchieserCap} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$, and let $k\in(0,1)$ and $\rho\in(0,K)$ be given by \eqref{k} and \eqref{sn}, respectively. Then, the logarithmic capacity of $\hat{E}$ is given by \begin{equation}\label{cap1} \mathbb CAP\hat{E}=\frac{1+\beta}{2(1+\alpha)}\cdot\frac{\Theta^4(0)}{\Theta^4(\rho)}. \end{equation} \end{theorem} \section{Bounds for the Asymptotic Covergence Factor of Two Intervals} \begin{theorem}\label{Thm-ineq-kappa} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$ and let $\xi\in\mathbb R\setminus\hat{E}$. Then, for the convergence factor $\kappa(\hat{E},\xi)$, the inequalities \begin{equation}\label{ineq-kappa} \frac{A_2}{A_1}\cdot{B}\leq\kappa(\hat{E},\xi)\leq\frac{A_1}{A_2}\cdot{B} \end{equation} hold, where \begin{equation}\label{A} \begin{aligned} A_1&:=\sqrt[4]{(1-\alpha)(1+\beta)}+\sqrt[4]{(1+\alpha)(1-\beta)},\\ A_2&:=\sqrt[4]{8}\sqrt[4]{\sqrt{(1-\alpha)(1+\beta)}+\sqrt{(1+\alpha)(1-\beta)}}\sqrt[16]{(1-\alpha^2)(1-\beta^2)}, \end{aligned} \end{equation} and $B$ is given in the following: \begin{enumerate} \item For $\alpha<\xi<\beta$, \begin{equation}\label{B1} B:=\frac{\sqrt[4]{(1+\alpha)(1-\beta)}+\sqrt{1-\xi}-\sqrt{(\xi-\alpha)(\beta-\xi)}} {\sqrt[4]{(1+\alpha)(1-\beta)}+\sqrt{1-\xi}+\sqrt{(\xi-\alpha)(\beta-\xi)}}. \end{equation} \item For $\xi\in\mathbb R\setminus[-1,1]$, \begin{equation}\label{B2} \begin{aligned} B:=&\frac{(2\xi-\xi\alpha+\xi\beta-\alpha-\beta)\sqrt[4]{\frac{(1+\alpha)(1-\beta)}{(1-\alpha)(1+\beta)}} +2\sqrt{(\xi-\alpha)(\xi-\beta)}-(\beta-\alpha)\sqrt{\xi^2-1}} {(2\xi-\xi\alpha+\xi\beta-\alpha-\beta)\sqrt[4]{\frac{(1+\alpha)(1-\beta)}{(1-\alpha)(1+\beta)}} +2\sqrt{(\xi-\alpha)(\xi-\beta)}+(\beta-\alpha)\sqrt{\xi^2-1}}\\ &\times\frac{\bigl|\sqrt{(1+\xi)(\xi-\alpha)}-\sqrt{(\xi-1)(\xi-\beta)}\bigr|} {\sqrt{(1+\xi)(\xi-\alpha)}+\sqrt{(\xi-1)(\xi-\beta)}} \end{aligned} \end{equation} \end{enumerate} \end{theorem} \begin{proof} By \eqref{sn}, \eqref{cndn} and \eqref{sncndn}, the mapping $\varphi(u)$ in \eqref{phi} may be rewritten as \begin{equation}\label{phi-1} \varphi(u)=\alpha+\frac{1-\alpha^2}{2\operatorname{sn}^2(u)+\alpha^2-1} \end{equation} Let $u^*\in(0,K)\cup(\operatorname{i}{K}',K+\operatorname{i}{K}')$ be uniquely determined by the equation $\varphi(u^*)=\xi$. Note that \begin{equation}\label{u*} \begin{aligned} \alpha<\xi<\beta&\iff{u}^*\in(\operatorname{i}{K}',K+\operatorname{i}{K}')\\ \xi\in(-\infty,-1)\cup(1,\infty)&\iff{u}^*\in(0,K) \end{aligned} \end{equation} By \eqref{sn} and \eqref{phi-1}, $\varphi(u^*)=0$ is equivalent to \begin{equation}\label{snu*} \operatorname{sn}^2(u^*)=\frac{(1+\xi)(1-\alpha)}{2(\xi-\alpha)}=\frac{1+\xi}{\xi-\alpha}\,\operatorname{sn}^2(\rho). \end{equation} By \eqref{k}, \eqref{cndn}, \eqref{snu*} and \eqref{sncndn}, \begin{equation}\label{cnu*} \operatorname{cn}^2(u^*)=\frac{(\xi-1)(1+\alpha)}{2(\xi-\alpha)}=\frac{\xi-1}{\xi-\alpha}\,\operatorname{cn}^2(\rho) \end{equation} and \begin{equation}\label{dnu*} \operatorname{dn}^2(u^*)=\frac{(\xi-\beta)(1+\alpha)}{(1+\beta)(\xi-\alpha)}=\frac{\xi-\beta}{\xi-\alpha}\,\operatorname{dn}^2(\rho). \end{equation} In order to obtain estimates for $\kappa(\hat{E},\xi)$, we will use the inequality \begin{equation}\label{Theta} \frac{\sqrt[4]{8(1+k')}\sqrt[8]{k'}}{1+\sqrt{k'}} \leq\frac{\Theta(u-\rho)}{\Theta(u+\rho)}\cdot\frac{\sqrt{k'}+\operatorname{dn}(u-\rho)}{\sqrt{k'}+\operatorname{dn}(u+\rho)}\leq \frac{1+\sqrt{k'}}{\sqrt[4]{8(1+k')}\sqrt[8]{k'}} \end{equation} which follows immediately from Lemma\,\ref{Lemma-IneqTheta}. By \eqref{k'}, straightforward computation gives \begin{equation}\label{factor-k'} \frac{1+\sqrt{k'}}{\sqrt[4]{8(1+k')}\sqrt[8]{k'}}=\frac{A_1}{A_2}, \end{equation} where $A_1$ and $A_2$ are defined in \eqref{A}. Further, by \cite[Eq.\,(123.01)]{BF}, \begin{equation}\label{dndn} \begin{aligned} &\frac{\sqrt{k'}+\operatorname{dn}(u+\rho)}{\sqrt{k'}+\operatorname{dn}(u-\rho)}\\ &=\frac{\sqrt{k'}(1-k^2\operatorname{sn}^2(u)\operatorname{sn}^2(\rho))+\operatorname{dn}(u)\operatorname{dn}(\rho)-k^2\operatorname{sn}(u)\operatorname{sn}(\rho)\operatorname{cn}(u)\operatorname{cn}(\rho)} {\sqrt{k'}(1-k^2\operatorname{sn}^2(u)\operatorname{sn}^2(\rho))+\operatorname{dn}(u)\operatorname{dn}(\rho)+k^2\operatorname{sn}(u)\operatorname{sn}(\rho)\operatorname{cn}(u)\operatorname{cn}(\rho)} \end{aligned} \end{equation} We consider the two cases $\alpha<\xi<\beta$ and $\xi\in\mathbb R\setminus[-1,1]$. \begin{enumerate} \item[1.] $\alpha<\xi<\beta$.\\ By \eqref{u*}, $u^*=v^*+\operatorname{i}{K}'$ with $0<v^*<K$. With the formula \cite{AS} \[ H(u+\operatorname{i}{K}')=\operatorname{i}\exp(-\tfrac{\pi{K}'}{4K})\exp(-\tfrac{\operatorname{i}\pi{u}}{2K})\,\Theta(u), \] we get \begin{equation}\label{kappa-case1} \begin{aligned} \kappa(\hat{E},\xi)&=\Bigl|\frac{H(v^*-\rho+\operatorname{i}{K}')}{H(v^*+\rho+\operatorname{i}{K}')}\Bigr| =\Bigl|\frac{\operatorname{i}\exp(-\tfrac{\pi{K}'}{4K})\exp(-\tfrac{\operatorname{i}\pi(v^*-\rho)}{2K})\,\Theta(v^*-\rho)} {\operatorname{i}\exp(-\tfrac{\pi{K}'}{4K})\exp(-\tfrac{\operatorname{i}\pi(v^*+\rho)}{2K})\,\Theta(v^*+\rho)}\Bigr|\\ &=|\exp(\tfrac{\operatorname{i}\pi\rho}{K})|\cdot\Bigl|\frac{\Theta(v^*-\rho)}{\Theta(v^*+\rho)}\Bigr| =\frac{\Theta(v^*-\rho)}{\Theta(v^*+\rho)}. \end{aligned} \end{equation} Thus, by \eqref{Theta} and \eqref{factor-k'}, \begin{equation}\label{ineq-case1} \frac{A_2}{A_1}\cdot\frac{\sqrt{k'}+\operatorname{dn}(v^*+\rho)}{\sqrt{k'}+\operatorname{dn}(v^*-\rho)}\leq\kappa(\hat{E},\xi) \leq\frac{A_1}{A_2}\cdot\frac{\sqrt{k'}+\operatorname{dn}(v^*+\rho)}{\sqrt{k'}+\operatorname{dn}(v^*-\rho)} \end{equation} By \cite[Eq.\,(122.07)]{BF} \[ \operatorname{sn}^2(u^*)=\operatorname{sn}^2(v^*+\operatorname{i}{K}')=\frac{1}{k^2\operatorname{sn}^2(v^*)} \] hence, by \eqref{k} and \eqref{snu*}--\eqref{dnu*}, we obtain the formulae \begin{align} \operatorname{sn}^2(v^*)&=\frac{1}{k^2\operatorname{sn}^2(u^*)}=\frac{(\xi-\alpha)(1+\beta)}{(1+\xi)(\beta-\alpha)},\label{snv*}\\ \operatorname{cn}^2(v^*)&=1-\operatorname{sn}^2(v^*)=\frac{(\beta-\xi)(1+\alpha)}{(1+\xi)(\beta-\alpha)},\label{cnv*}\\ \operatorname{dn}^2(v^*)&=1-k^2\operatorname{sn}^2(v^*)=\frac{(1-\xi)(1+\alpha)}{(1+\xi)(1-\alpha)}.\label{dnv*} \end{align} Starting from relation \eqref{dndn} with $u=v^*$ and using \eqref{k}--\eqref{cndn} and \eqref{snv*}--\eqref{dnv*}, we obtain \[ \frac{\sqrt{k'}+\operatorname{dn}(v^*+\rho)}{\sqrt{k'}+\operatorname{dn}(v^*-\rho)}=B, \] where $B$ is defined in \eqref{B1}. Hence, inequality \eqref{ineq-kappa} follows by \eqref{ineq-case1}. \item[2.] $\xi\in\mathbb R\setminus[-1,1]$.\\ By \eqref{u*}, $0<u^*<K$. By Theorem\,\ref{Thm-kappa}, \eqref{H-H1-T1} and Lemma\,\ref{Lemma-Theta}\,(i), \begin{equation}\label{kappa-case2} \kappa(\hat{E},\xi)=\Bigl|\frac{H(u^*-\rho)}{H(u^*+\rho)}\Bigr| =\Bigl|\frac{\operatorname{sn}(u^*-\rho)}{\operatorname{sn}(u^*+\rho)}\Bigr|\cdot\frac{\Theta(u^*-\rho)}{\Theta(u^*+\rho)} \end{equation} Thus, by \eqref{kappa-case2}, \eqref{Theta} and \eqref{factor-k'}, \begin{equation}\label{ineq-case2} \frac{A_2}{A_1}\cdot\frac{\sqrt{k'}+\operatorname{dn}(u^*+\rho)}{\sqrt{k'}+\operatorname{dn}(u^*-\rho)} \cdot\Bigl|\frac{\operatorname{sn}(u^*-\rho)}{\operatorname{sn}(u^*+\rho)}\Bigr|\leq\kappa(\hat{E},\xi) \leq\frac{A_1}{A_2}\cdot\frac{\sqrt{k'}+\operatorname{dn}(u^*+\rho)}{\sqrt{k'}+\operatorname{dn}(u^*-\rho)} \cdot\Bigl|\frac{\operatorname{sn}(u^*-\rho)}{\operatorname{sn}(u^*+\rho)}\Bigr| \end{equation} By the formulae for $\operatorname{sn}(u+v)$ and $\operatorname{sn}(u-v)$, see \cite[Eq.\,(123.01)]{BF}, together with \eqref{snu*}--\eqref{dnu*}, we get \begin{equation}\label{snsn} \begin{aligned} \Bigl|\frac{\operatorname{sn}(u^*-\rho)}{\operatorname{sn}(u^*+\rho)}\Bigr| &=\Bigl|\frac{\operatorname{sn}(u^*)\operatorname{cn}(\rho)\operatorname{dn}(\rho)-\operatorname{sn}(\rho)\operatorname{cn}(u^*)\operatorname{dn}(u^*)} {\operatorname{sn}(u^*)\operatorname{cn}(\rho)\operatorname{dn}(\rho)+\operatorname{sn}(\rho)\operatorname{cn}(u^*)\operatorname{dn}(u^*)}\Bigr|\\ &=\frac{\bigl|\sqrt{(1+\xi)(\xi-\alpha)}-\sqrt{(\xi-1)(\xi-\beta)}\bigr|} {\sqrt{(1+\xi)(\xi-\alpha)}+\sqrt{(\xi-1)(\xi-\beta)}} \end{aligned} \end{equation} Starting from relation \eqref{dndn} with $u=u^*$ and using \eqref{k}--\eqref{cndn}, \eqref{snu*}--\eqref{dnu*} and \eqref{snsn}, we obtain \[ \frac{\sqrt{k'}+\operatorname{dn}(u^*+\rho)}{\sqrt{k'}+\operatorname{dn}(u^*-\rho)} \cdot\Bigl|\frac{\operatorname{sn}(u^*-\rho)}{\operatorname{sn}(u^*+\rho)}\Bigr|=B, \] where $B$ is defined in \eqref{B2}. Hence, inequality \eqref{ineq-kappa} follows by \eqref{ineq-case2}. \end{enumerate} \end{proof} \begin{remark} \begin{enumerate} \item Let $-1<\alpha<\beta<1$. If $\{\alpha,\beta\}$ changes to $\{-\beta,-\alpha\}$, then, by \eqref{k}, the modulus $k$ does not change, and, by \eqref{sn}, $\rho$ changes to $K-\rho$. Thus, by \eqref{snu*}, \begin{equation} \kappa([-1,\alpha]\cup[\beta,1],\xi)=\kappa([-1,-\beta]\cup[-\alpha,1],\tilde{\xi}), \end{equation} where $\tilde{\xi}$ satisfies the equation \begin{equation} \frac{(1+\xi)(1-\alpha)}{2(\xi-\alpha)}=\frac{(1+\tilde{\xi})(1+\beta)}{2(\tilde{\xi}+\beta)}. \end{equation} Hence, for the plots introduced in (ii), it remains to consider the case $\alpha\leq-\beta$ only. \item In order to underline the goodness of the estimates for $\kappa(\hat{E},\xi)$ given in Theorem\,\ref{Thm-ineq-kappa}, let us present some plots, see Fig.\,\ref{Fig_AsymptoticFactor}. For the six cases $\{\alpha,\beta\}=\{-0.2,0.1\}$, $\{\alpha,\beta\}=\{-0.5,0.0\}$, $\{\alpha,\beta\}=\{-0.5,0.5\}$, $\{\alpha,\beta\}=\{-0.9,-0.3\}$, $\{\alpha,\beta\}=\{-0.9,0.5\}$, $\{\alpha,\beta\}=\{-0.9,0.9\}$, we have plotted the graph of $\kappa(\hat{E},\xi)$ (solid line), the graph of the upper bound in \eqref{ineq-kappa} (dashed line), and the graph of the lower bound in \eqref{ineq-kappa} (dotted line) for $\alpha\leq\xi\leq\beta$. As one can see, the graphs match nearly perfectly, only if the length of the intervals $[-1,\alpha]$ and $[\beta,1]$ is very small, there is a visually recognizable difference between the bounds and the exact value $\kappa(\hat{E},\xi)$. \end{enumerate} \end{remark} \begin{figure} \caption{\label{Fig_AsymptoticFactor} \label{Fig_AsymptoticFactor} \end{figure} \section{An Extremum Problem} In this section, we completely solve the following problem: given the length of the two intervals, say $\ell_1$ and $\ell_2$, and given the length of the gap between the two intervals, say $\ell_3$, for which set of two intervals $E=[a_1,a_2]\cup[a_3,a_4]$ with $a_2-a_1=\ell_1$, $a_4-a_3=\ell_2$ and $a_3-a_2=\ell_3$, the convergence factor $\kappa(E,0)$ is minimal? For the linear transformed problem (see Section\,2), this problem reads as follows. Given $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$, for which $\xi\in(\alpha,\beta)$ the convergence factor $\kappa(\hat{E},\xi)$ is minimal? The answer gives the following theorem. \begin{theorem} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$, and let $k\in(0,1)$ and $\rho\in(0,K)$ be given by \eqref{k} and \eqref{sn}, respectively. Then the convergence factor $\kappa(\hat{E},\xi)$, $\alpha<\xi<\beta$, is minimal for \begin{equation} \xi^*=\alpha+\operatorname{zn}(\rho)\sqrt{(1-\alpha)(1+\beta)}. \end{equation} \end{theorem} \begin{proof} Let $f(u):=\Theta(u-\rho)/\Theta(u+\rho)$. In \cite{Sch-2005}, it is proved that $f''(u)>0$, $0<u<K$, with $f(0)=f(1)=1$. By \eqref{kappa-case1}, $\kappa(\hat{E},\xi)=f(v^*)$, where $v^*$ is uniquely determined by \eqref{snv*}. By Lemma\,\ref{Lemma-dTheta}, \begin{equation}\label{dTheta=0} f'(v^*)=0\iff\operatorname{zn}(\rho)\bigl[1-k^2\operatorname{sn}^2(v^*)\operatorname{sn}^2(\rho)\bigr]=k^2\operatorname{sn}^2(v^*)\operatorname{sn}(\rho)\operatorname{cn}(\rho)\operatorname{dn}(\rho). \end{equation} By \eqref{k}, \eqref{sn}, \eqref{cndn} and \eqref{snv*}, \[ 1-k^2\operatorname{sn}^2(v^*)\operatorname{sn}^2(\rho)=\frac{1+\alpha}{1+\xi} \] and \[ k^2\operatorname{sn}^2(v^*)\operatorname{sn}(\rho)\operatorname{cn}(\rho)\operatorname{dn}(\rho)=\frac{(\xi-\alpha)(1+\alpha)}{(1+\xi)\sqrt{(1-\alpha)(1+\beta)}}. \] Thus, by \eqref{dTheta=0}, \begin{align*} f'(v^*)=0&\iff\frac{1+\alpha}{1+\xi}\cdot\operatorname{zn}(\rho)=\frac{(\xi-\alpha)(1+\alpha)}{(1+\xi)\sqrt{(1-\alpha)(1+\beta)}}\\ &\iff\xi=\alpha+\operatorname{zn}(\rho)\sqrt{(1-\alpha)(1+\beta)}. \end{align*} \end{proof} \section{Bounds for the Logarithmic Capacity of Two Intervals} \begin{theorem}\label{Thm_LB} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha\leq\beta<1$, then \begin{equation}\label{LB-cap} \mathbb CAP\hat{E}\geq\frac{1}{2}\Biggl(\frac{\sqrt[4]{1-\alpha^2}+\sqrt[4]{1-\beta^2}} {\sqrt[4]{(1-\alpha)(1+\beta)}+\sqrt[4]{(1+\alpha)(1-\beta)}}\Biggr)^4=:C_1, \end{equation} where equality is attained if $\alpha=\beta$ or if $\alpha\to-1$ ($\beta$ fixed) or if $\beta\to1$ ($\alpha$ fixed). \end{theorem} \begin{proof} Let $-1<\alpha<\beta<1$ be given, and let $k\in(0,1)$ and $\rho\in(0,K)$ be given by \eqref{k} and \eqref{sn}, respectively. By Theorem\,\ref{Thm-AchieserCap} and Lemma\,\ref{Lemma-IneqTheta}, \[ \mathbb CAP\hat{E}=\frac{1+\beta}{2(1+\alpha)}\cdot\frac{\Theta^4(0)}{\Theta^4(\rho)} \geq\frac{1+\beta}{2(1+\alpha)}\Bigl(\frac{\sqrt{k'}+\operatorname{dn}(\rho)}{1+\sqrt{k'}}\Bigr)^4. \] Using \eqref{k'} and \eqref{cndn}, inequality \eqref{LB-cap} follows. Concerning the cases of equality: If $\alpha=\beta$, then, for $C_1$ in \eqref{LB-cap}, we have $C_1=1/2=\mathbb CAP[-1,1]$. Further, for fixed $\beta$, $\lim_{\alpha\to-1}C_1=(1-\beta)/4=\mathbb CAP[\beta,1]$ and, for fixed $\alpha$, $\lim_{\beta\to1}C_1=(1+\alpha)/4=\mathbb CAP[-1,\alpha]$. \end{proof} \begin{remark} \begin{enumerate} \item In \cite{Solynin}, A.Yu.\,Solynin gave an excellent lower bound for the logarithmic capacity of the union of several intervals, see also \cite{Sch-2008-1} and \cite{Sch-2008-2} for a discussion of this result. Although we could not achieve the goodness of Solynin's bound in the two interval case, we found it useful to give this very simple lower bound \eqref{LB-cap}. \item In the recent paper \cite{DubininKarp}, Dubinin and Karp even improved Solynin's lower bound and, in addition, based on a result of Haliste\,\cite{Haliste}, they gave an upper bound for the logarithmic capacity of several intervals. For the two intervals case, the result reads as follows. \end{enumerate} \end{remark} \begin{theorem}[Dubinin\,\&\,Karp\,\cite{DubininKarp}]\label{Thm-UBcap} Let $\hat{E}:=[-1,\alpha]\cup[\beta,1]$, $-1<\alpha<\beta<1$, then \begin{equation}\label{UB-cap} \mathbb CAP\hat{E}\leq\tfrac{1}{4}\Bigl(\sqrt{(1+\alpha)(1+\beta)}+\sqrt{(1-\alpha)(1-\beta)}\Bigr), \end{equation} where equality is attained if $\alpha=\beta$ or if $\alpha=-\beta$. \end{theorem} \begin{remark} \begin{enumerate} \item Numerical computations show that the upper bound in \eqref{UB-cap} is excellent if the modulus $k$ defined in \eqref{k} is not too large. If the modulus $k$ is near to $1$, i.e., if, for fixed $\alpha$, the endpoint $\beta$ is near $1$, then the upper bound derived in \cite{Sch-2008-1} is better (i.e.\ smaller) than that of \eqref{UB-cap}. \item In Fig.\,\ref{Fig_LogCapacity}, for $\alpha\in\{-0.8,-0.3,0.3,0.8\}$ and $\alpha\leq\beta\leq1$, we have plotted the graph of $\mathbb CAP\hat{E}$ (solid line), the graph of the lower bound \eqref{LB-cap} (dashed line), and the graph of the upper bound \eqref{UB-cap} (dotted line). As one can see, the upper bound matches nearly perfect whereas the lower bound is also quite good. \item With the help of Lemma\,\ref{Lemma-IneqTheta} and analogously to the proof of Theorem\,\ref{Thm_LB}, it is also possible to obtain an upper bound for $\mathbb CAP\hat{E}$. Since from numerical computations it turns out that this upper bound is never better than the very simple upper bound \eqref{UB-cap}, we decided to skip it. \end{enumerate} \end{remark} \begin{figure} \caption{\label{Fig_LogCapacity} \label{Fig_LogCapacity} \end{figure} With the help of Theorem\,\ref{Thm-UBcap}, we get a very accurate inequality for $\Theta(u)/\Theta(0)$. \begin{corollary} For $0<k<1$ and $0\leq{u}\leq{K}$ \begin{equation}\label{IneqTheta-1} \frac{\Theta^4(u)}{\Theta^4(0)}\geq\frac{1}{\operatorname{dn}(u)(\operatorname{cn}^2(u)+k'\operatorname{sn}^2(u))}, \end{equation} where equality is attained if $u=0$ or if $u=\frac{1}{2}K$ or if $u=K$ or if $k\to0$. \end{corollary} \begin{proof} Let $-1<\alpha<\beta<1$ be fixed and let $k\in(0,1)$ and $\rho\in(0,K)$ be given by \eqref{k} and \eqref{sn}. By \eqref{k}, \eqref{sn}, and \eqref{cndn}, \[ \frac{1}{4}\Bigl(\sqrt{(1+\alpha)(1+\beta)}+\sqrt{(1-\alpha)(1-\beta)}\Bigr)=\frac{\operatorname{cn}^2(\rho)+k'\operatorname{sn}^2(\rho)}{2\,\operatorname{dn}(\rho)} \] which together with \eqref{cndn}, Theorem\,\ref{Thm-AchieserCap} and Theorem\,\ref{Thm-UBcap} gives \[ \mathbb CAP\hat{E}=\frac{1}{2\operatorname{dn}^2(\rho)}\cdot\frac{\Theta^4(0)}{\Theta^4(\rho)}\leq\frac{\operatorname{cn}^2(\rho)+k'\operatorname{sn}^2(\rho)}{2\,\operatorname{dn}(\rho)}. \] The cases of equality follow immediately from \eqref{sncndn-K}, Lemma\,\ref{Lemma-ThetaK2} and Lemma\,\ref{Lemma-Theta}. \end{proof} \section{Auxiliary Results for Jacobi's Elliptic and Theta Functions} Let $k$, $0<k<1$, be the modulus of Jacobi's elliptic functions $\operatorname{sn}(u)\equiv\operatorname{sn}(u,k)$, $\operatorname{cn}(u)\equiv\operatorname{cn}(u,k)$, and $\operatorname{dn}(u)\equiv\operatorname{dn}(u,k)$, of Jacobi's theta functions $\Theta(u)\equiv\Theta(u,k)$, $H(u)\equiv{H}(u,k)$, $H_1(u)\equiv{H}_1(u,k)$, and $\Theta_1(u)\equiv\Theta_1(u,k)$, (Jacobi's old notation) and, finally, of Jacobi's zeta function, $\operatorname{zn}(u)\equiv\operatorname{zn}(u,k)$. Here we follow the notation of Carlson and Todd~\cite{CarlsonTodd}, in other references, like \cite{Lawden}, Jacobi's zeta function is denoted by $Z(u)$. Let $k':=\sqrt{1-k^2}$ be the complementary modulus, let $K\equiv{K}(k)$ be the complete elliptic integral of the first kind and let $K'\equiv{K}'(k):=K(k')$. Note that $K,K'\in\mathbb R^+$. Further let $q\equiv{q}(k):=\exp(-\pi{K'}/K)$ be the nome of Jacobi's theta functions. For the definitions and many important properties of Jacobi's elliptic and theta functions, we refer to \cite{BF}, \cite{Lawden} and \cite{AS}. Let us mention that there is a different notation of the four theta functions (e.g.\ in \cite{BF} and \cite{Lawden}) given by $\Theta(u,k)=\theta_0(v,q)=\theta_4(v,q)$, $H(u,k)=\theta_1(v,q)$, $H_1(u,k)=\theta_2(v,q)$ and $\Theta_1(u,k)=\theta_3(v,q)$, where instead of the parameter $k$ the parameter $q$ is used and $v=u\pi/(2K)$. Sometimes also the parameter $\tau=\operatorname{i}{K}'/K$ is used. The main issue of this section is to derive an upper and a lower bound for the theta function $\Theta(u)$ in terms of Jacobi's elliptic function $\operatorname{dn}(u)$ and the modulus $k$, see Lemma\,\ref{Lemma-IneqTheta}. For this reason, we have to prove a sequence of several lemmas. Let us start by repeating some useful formulae. By \cite[Eq.\,(121.00)]{BF}, \begin{equation}\label{sncndn} \operatorname{sn}^2(u)+\operatorname{cn}^2(u)=1,\qquad k^2\operatorname{sn}^2(u)+\operatorname{dn}^2(u)=1, \end{equation} and, by \cite[Eq.\,(1052.02)]{BF}, \begin{equation}\label{H-H1-T1} H(u)=\sqrt{k}\,\operatorname{sn}(u)\,\Theta(u),\,H_1(u)=\tfrac{\sqrt{k}}{\sqrt{k'}}\,\operatorname{cn}(u)\,\Theta(u),\, \Theta_1(u)=\tfrac{1}{\sqrt{k'}}\,\operatorname{dn}(u)\,\Theta(u), \end{equation} and, by \cite[Eq.\,(122.10)]{BF} and \cite[Eq.\,(3.6.2)]{Lawden}, \begin{equation}\label{sncndn-K} \begin{aligned} &\operatorname{sn}(0)=\operatorname{zn}(0)=0,\quad\operatorname{cn}(0)=1,\quad\operatorname{dn}(0)=1,\\ &\operatorname{sn}(K)=1,\quad\operatorname{cn}(K)=\operatorname{zn}(K)=0,\quad\operatorname{dn}(K)=k',\\ &\operatorname{sn}(\tfrac{1}{2}K)=\tfrac{1}{\sqrt{1+k'}},\,\operatorname{cn}(\tfrac{1}{2}K)=\sqrt{\tfrac{k'}{1+k'}}, \,\operatorname{dn}(\tfrac{1}{2}K)=\sqrt{k'},\,\operatorname{zn}(\tfrac{1}{2}K)=\tfrac{1}{2}(1-k), \end{aligned} \end{equation} Further, by \cite[Eq.\,(731.01)--(731.03)]{BF} and \cite[Eqs.\,(3.4.25)~and~(3.6.1)]{Lawden}, \begin{equation}\label{d-sncndnzn} \begin{aligned} \frac{\partial}{\partial{u}}\{\operatorname{sn}(u)\}=\operatorname{cn}(u)\operatorname{dn}(u),\qquad \frac{\partial}{\partial{u}}\{\operatorname{cn}(u)\}=-\operatorname{sn}(u)\operatorname{dn}(u),\\ \frac{\partial}{\partial{u}}\{\operatorname{dn}(u)\}=-k^2\operatorname{sn}(u)\operatorname{cn}(u),\qquad \frac{\partial}{\partial{u}}\{\operatorname{zn}(u)\}=\operatorname{dn}^2(u)-E/K, \end{aligned} \end{equation} and, by \cite[Lem.\,4]{Sch-2005}, \begin{equation}\label{d-Theta} \begin{aligned} \frac{\partial}{\partial{u}}\{\Theta(u)\}=\Theta(u)\operatorname{zn}(u),\quad \frac{\partial}{\partial{u}}\{\Theta_1(u)\}=\tfrac{1}{\sqrt{k'}}\,\Theta(u)\bigl(-k^2\operatorname{sn}(u)\operatorname{cn}(u)+\operatorname{dn}(u)\operatorname{zn}(u)\bigr). \end{aligned} \end{equation} Next, let us collect some basic properties of Jacobi's theta function $\Theta(u)$ in the following lemma. \begin{lemma}\label{Lemma-Theta} The function $\Theta(u)$ has the following properties: \begin{enumerate} \item $\Theta(u)>0$ for $u\in\mathbb R$ and $\Theta(u+2K)=\Theta(u)$ for $u\in\mathbb C$. \item $\Theta(u)$ is strictly monotone increasing in $[0,K]$ and strictly monotone decreasing in $[K,2K]$. \item $\Theta(0)\leq\Theta(u)\leq\Theta(K)$ for $u\in\mathbb R$. \item $\Theta(0)=\Theta_1(K)=\sqrt{k'}\,\Theta(K)=\sqrt{k'}\,\Theta_1(0)=\sqrt{2k'K/\pi}$ \item For $k\to0$ there is $\Theta(u)\to1$, $u\in\mathbb C$. \end{enumerate} \end{lemma} For the next lemma, see Lemma\,2 of \cite{Sch-2008-1}. Unfortunately, there is a misprint in the formula of $H(\tfrac{1}{2}K)$, which is here corrected. \begin{lemma}\label{Lemma-ThetaK2} Let $0<k<1$, then \begin{equation} \begin{aligned} \Theta^4(\tfrac{1}{2}K)&=\Theta_1^4(\tfrac{1}{2}K)=\tfrac{2}{\pi^2}(1+k')\sqrt{k'}K^2,\\ H^4(\tfrac{1}{2}K)&=H_1^4(\tfrac{1}{2}K)=\tfrac{2}{\pi^2}(1-k')\sqrt{k'}K^2. \end{aligned} \end{equation} \end{lemma} \begin{lemma}\label{Lemma-zn} The function \begin{equation} f(u):=\operatorname{zn}(u)-\frac{k^2\operatorname{sn}(u)\operatorname{cn}(u)}{\sqrt{k'}+\operatorname{dn}(u)} \end{equation} has the following properties: \begin{enumerate} \item $f(0)=f(\frac{1}{2}K)=f(K)=0$ \item $f(u)<0$ for $0<u<\frac{1}{2}K$ and $f(u)>0$ for $\frac{1}{2}K<u<K$ \item $f''(0)=f''(\frac{1}{2}K)=f''(K)=0$ \item $f''(u)>0$ for $0<u<\frac{1}{2}K$ and $f''(u)<0$ for $\frac{1}{2}K<u<K$ \end{enumerate} \end{lemma} \begin{proof} (i) follows immediately from \eqref{sncndn-K}. Let us prove (iii) and (iv), from which (ii) follows. Computing and simplifying $f''(u)$ with the help of \eqref{d-sncndnzn} and \eqref{sncndn} leads to \[ f''(u)=\frac{\sqrt{k'}(1+k')(1-k')^3(\operatorname{dn}(u)-\sqrt{k'})\,\operatorname{sn}(u)\,\operatorname{cn}(u)}{(\sqrt{k'}+\operatorname{dn}(u))^3}, \] thus, by \eqref{sncndn-K}, (iii) follows. Since $\operatorname{dn}(\frac{1}{2}K)=\sqrt{k'}$ and $\operatorname{dn}(u)$ is strictly monotone decreasing in $u$, $0\leq{u}\leq{K}$, and since $\operatorname{sn}(u)>0$ and $\operatorname{cn}(u)>0$ for $0<u<K$, assertion\,(iv) follow. \end{proof} \begin{lemma}\label{Lemma-Theta+Theta1} The function \begin{equation} f(u):=\Theta(u)+\Theta_1(u) \end{equation} is strictly monotone decreasing on $[0,\frac{1}{2}K]$ and strictly monotone increasing on $[\frac{1}{2}K,K]$. Moreover, $f(u+K)=f(u)$. \end{lemma} \begin{proof} By \eqref{d-Theta}, \[ f'(u)=\frac{1}{\sqrt{k'}}\,\Theta(u)\bigl(\sqrt{k'}+\operatorname{dn}(u)\bigr)\Bigl(\operatorname{zn}(u)-\frac{k^2\operatorname{sn}(u)\,\operatorname{cn}(u)}{\sqrt{k'}+\operatorname{dn}(u)}\Bigr). \] By Lemma\,\ref{Lemma-Theta}\,(i) and Lemma\,\ref{Lemma-zn}, we get $f'(u)<0$ for $0<u<\frac{1}{2}K$ and $f'(u)>0$ for $\frac{1}{2}K<u<K$. Since $\Theta(u+K)=\Theta_1(u)$ and $\Theta_1(u+K)=\Theta(u)$, the second relation follows. \end{proof} \begin{lemma}\label{Lemma-IneqTheta} For $u\in\mathbb R$, \begin{equation} \sqrt[4]{8(1+k')}\sqrt[8]{k'}\leq\frac{\Theta(u)}{\Theta(0)}\,\bigl(\sqrt{k'}+\operatorname{dn}(u)\bigr)\leq1+\sqrt{k'}, \end{equation} where equality is attained in both inequalities for $k\to0$, in the left inequality for $u=(\nu+\frac{1}{2})K$, $\nu\in{\mathbb{Z}}$, and in the right inequality for $u=\nu{K}$, $\nu\in{\mathbb{Z}}$. \end{lemma} \begin{proof} By Lemma\,\ref{Lemma-Theta+Theta1}, \[ \Theta(\tfrac{1}{2}K)+\Theta_1(\tfrac{1}{2}K)\leq\Theta(u)+\Theta_1(u)\leq\Theta(0)+\Theta_1(0) \] which, by \eqref{H-H1-T1}, Lemma\,\ref{Lemma-ThetaK2} and Lemma\,\ref{Lemma-Theta}, is equivalent to \[ \frac{\sqrt[4]{8(1+k')}\sqrt[8]{k'}}{\sqrt{k'}}\,\Theta(0)\leq\Theta(u)\Bigl(1+\frac{\operatorname{dn}(u)}{\sqrt{k'}}\Bigr) \leq\frac{1+\sqrt{k'}}{\sqrt{k'}}\,\Theta(0). \] The cases of equality follow immediately from \eqref{sncndn-K}, Lemma\,\ref{Lemma-Theta} and Lemma\,\ref{Lemma-ThetaK2}. \end{proof} \begin{lemma}\label{Lemma-dTheta} Let $a\in\mathbb C$ be fixed. Then \begin{equation}\label{dTheta} \frac{\partial}{\partial{u}}\Bigl\{\frac{\Theta(u-a)}{\Theta(u+a)}\Bigr\} =-\frac{\Theta(u-a)}{\Theta(u+a)}\Bigl[2\,\operatorname{zn}(a)-\frac{2k^2\operatorname{sn}^2(u)\operatorname{sn}(a)\operatorname{cn}(a)\operatorname{dn}(a)}{1-k^2\operatorname{sn}^2(u)\operatorname{sn}^2(a)}\Bigr]. \end{equation} \end{lemma} \begin{proof} Using \eqref{d-Theta}, we get (where $\Theta'(u):=\frac{\partial}{\partial{u}}\{\Theta(u)\}$) \begin{align*} \frac{\partial}{\partial{u}}\Bigl\{\frac{\Theta(u-a)}{\Theta(u+a)}\Bigr\} &=\frac{\Theta'(u-a)}{\Theta(u+a)}\cdot\frac{\Theta(u-a)}{\Theta(u-a)}-\frac{\Theta(u-a)\,\Theta'(u+a)}{\Theta^2(u+a)}\\ &=\frac{\Theta(u-a)}{\Theta(u+a)}\Bigl[\frac{\Theta'(u-a)}{\Theta(u-a)}-\frac{\Theta'(u+a)}{\Theta(u+a)}\Bigr]\\ &=-\frac{\Theta(u-a)}{\Theta(u+a)}\bigl[\operatorname{zn}(u+a)-\operatorname{zn}(u-a)\bigr], \end{align*} thus Eq.\,\eqref{dTheta} follows immediately by the formulae \cite[Eq.\,(3.6.2)]{Lawden} \[ \operatorname{zn}(u\pm{a})=\operatorname{zn}(u)\pm\operatorname{zn}(a)\mp{k}^2\operatorname{sn}(u)\operatorname{sn}(a)\operatorname{sn}(u\pm{a}) \] and by the formula for $\operatorname{sn}(u\pm{a})$, see \cite[Eq.\,(123.01)]{BF}. \end{proof} \end{document}
\begin{document} \title[Strongly regular Cayley graphs]{Strongly regular Cayley graphs from partitions of subdifference sets of the Singer difference sets} \author[Momihara and Xiang]{Koji Momihara$^*$, Qing Xiang$^{\dagger}$} \address{Koji Momihara, Faculty of Education, Kumamoto University, 2-40-1 Kurokami, Kumamoto 860-8555, Japan} \email{[email protected]} \address{Qing Xiang, Department of Mathematical Sciences, University of Delaware, Newark, DE 19716, USA} \email{[email protected]} \thanks{$^\ast$Research supported by JSPS under Grant-in-Aid for Young Scientists (B) 17K14236 and Scientific Research (B) 15H03636.} \thanks{$^{\dagger}$Research partially supported by an NSF grant DMS-1600850} \keywords{Affine polar graph, $i$-tight set, $m$-ovoid, quadratic form, Singer difference set, strongly regular graph, subdifference set.} \begin{abstract} In this paper, we give a new lifting construction of ``hyperbolic" type of strongly regular Cayley graphs. Also we give new constructions of strongly regular Cayley graphs over the additive groups of finite fields based on partitions of subdifference sets of the Singer difference sets. Our results unify some recent constructions of strongly regular Cayley graphs related to $m$-ovoids and $i$-tight sets in finite geometry. Furthermore, some of the strongly regular Cayley graphs obtained in this paper are new or nonisomorphic to known strongly regular graphs with the same parameters. \end{abstract} \maketitle \section{Introduction}\label{sec:1} We assume that the reader is familiar with the basic theory of strongly regular graphs and difference sets. For strongly regular graphs (srgs), our main references are \cite{BH} and \cite{cg}. For difference sets, we refer the reader to \cite{Lander} and Chapter 6 of \cite{bjl}. Strongly regular graphs are closely related to many other combinatorial/geometric objects, such as two-weight codes, two-intersection sets, $m$-ovoids, $i$-tight sets, and partial difference sets. For these connections, we refer the reader to \cite[p.~132]{BH}, \cite{CK86, Ma}, and some more recent papers \cite{FMX, DDMR, BLMX} on Cameron-Liebler line classes and hemisystems. Let ${\mathcal{G}}amma$ be a (simple, undirected) graph. The adjacency matrix of ${\mathcal{G}}amma$ is the $(0,1)$-matrix $A$ with both rows and columns indexed by the vertex set of ${\mathcal{G}}amma$, where $A_{xy} = 1$ when there is an edge between $x$ and $y$ in ${\mathcal{G}}amma$ and $A_{xy} = 0$ otherwise. A useful way to check whether a graph is strongly regular is by using the eigenvalues of its adjacency matrix. For convenience we call an eigenvalue {\it restricted} if it has an eigenvector perpendicular to the all-ones vector ${\bf 1}$. (For a $k$-regular connected graph, the restricted eigenvalues are the eigenvalues different from $k$.) \begin{theorem}\label{char} For a simple graph ${\mathcal{G}}amma$ of order $v$, not complete or edgeless, with adjacency matrix $A$, the following are equivalent: \begin{enumerate} \item ${\mathcal{G}}amma$ is strongly regular with parameters $(v, k, \lambda, \mu)$ for certain integers $k, \lambda, \mu$, \item $A^2 =(\lambda-\mu)A+(k-\mu) I+\mu J$ for certain real numbers $k,\lambda, \mu$, where $I, J$ are the identity matrix and the all-ones matrix, respectively, \item $A$ has precisely two distinct restricted eigenvalues. \end{enumerate} \end{theorem} For a proof of Theorem~\ref{char}, we refer the reader to \cite{BH}. An effective method to construct strongly regular graphs is by using Cayley graphs. Let $G$ be an additively written group of order $v$, and let $D$ be a subset of $G$ such that $0\not\in D$ and $-D=D$, where $-D=\{-d\mid d\in D\}$. The {\it Cayley graph over $G$ with connection set $D$}, denoted ${\rm Cay}(G,D)$, is the graph with the elements of $G$ as vertices; two vertices are adjacent if and only if their difference belongs to $D$. In the case when ${\mathbb C}ay(G,D)$ is a strongly regular graph, the connection set $D$ is called a (regular) {\it partial difference set}. Examples of strongly regular Cayley graphs are the Paley graphs ${\rm P}(q)$, where $q$ is a prime power congruent to 1 modulo 4, the Clebsch graph, and the affine orthogonal graphs (\cite{BH}). For ${\mathcal{G}}amma={\rm Cay}(G,D)$ with $G$ abelian, the eigenvalues of ${\mathcal{G}}amma$ are exactly $\chi(D):=\sum_{d\in D}\chi(d)$, where $\chi$ runs through the character group of $G$. This fact reduces the problem of computing eigenvalues of abelian Cayley graphs to that of computing some character sums, and is the underlying reason why the Cayley graph construction has been very effective for the purpose of constructing srgs. The survey of Ma~\cite{Ma} contains much of what is known about partial difference sets and about connections with strongly regular graphs. A $(v,k,\lambda,\mu)$ srg is said to be of {\em{Latin square type}} (respectively, {\it negative Latin square type}) if $(v,k,\lambda,\mu) = (n^2, r(n-\epsilon), \epsilon n+r^2-3 \epsilon r, r^2 - \epsilon r)$ and $\epsilon = 1$ (respectively, $\epsilon=-1$). When $v$ (the number of vertices) is a prime power, many constructions of srgs with Latin square or negative Latin square type parameters are known. For example, the srgs arising from partial spreads of ${\mathrm{PG}}(2m -1,q)$ have Latin square parameters, and the affine orthogonal graphs, ${\rm VO}^{-}(2m,q)$, have negative Latin square type parameters. Still the range of $r$ in the parameters $(n^2, r(n-\epsilon), \epsilon n+r^2-3 \epsilon r, r^2 - \epsilon r)$ of the known srgs of Latin square or negative Latin square type can sometimes be limited; moreover Latin square and negative Latin square type strongly regular Cayley graphs with certain extra properties\footnote{For example, the elements of the connection set must all lie on a quadratic surface.} have found many connections with finite geometric objects such as $m$-ovoids and $i$-tight sets (cf. \cite{FMX, DDMR, BLMX}). Therefore it is of interest to construct more strongly regular Cayley graphs of Latin square or negative Latin square type. The purpose of the current paper is two fold. First, we give new constructions of strongly regular Cayley graphs, and obtain some new srgs. Secondly, we unify and give simpler proofs for some recent constructions of strongly regular Cayley graphs. The paper is organized as follows. In Section 2, we review some basic properties of Gauss sums which will be used in later sections. In Section 3, we give two constructions of strongly regular Cayley graphs on the additive group of ${\mathbb F}_{q^2}$ by lifting a cyclotomic strongly regular graph on ${\mathbb F}_q$. The first lifting construction (Proposition~\ref{theorem:main1}) is of ``elliptic" type, and it was already given in \cite{MX}. The second lifting construction (Proposition~\ref{theorem:main2}) is of ``hyperbolic" type, and this constrution is new. In Section 4, we generalize and unify the constructions of strongly regular Cayley graphs corresponding to $m$-ovoids and $i$-tight sets in \cite{FMX, BLMX}. We give a general construction of strongly regular Cayley graphs by using a certain partition of a subdifference set (and its complement) of the Singer difference set. When the subdifference sets arise from subfields, we recover the results in \cite{FMX, BLMX}. In Sections 5, 6, and 7, we apply the general construction in Section 4 to the three known cases of subdifference sets of the Singer difference sets, namely, the semiprimitive case, the sporadic case, and the subfield case. We either recover strongly regular Cayley graphs constructed in some of our recent papers \cite{Mo, FMX, BLMX}, or we produce new strongly regular Cayley graphs. In particular, Corollaries 7.7 and 7.9 give strongly regular Cayley graphs with the same parameters as the affine polar graphs. By using a computer, it is shown that the newly constructed graphs in Corollaries 7.7 and 7.9 are not isomorphic to the affine polar graphs when the parameters are small. \section{Preliminaries} We will use Gauss sums and Gauss periods to compute character values of certain subsets of ${\mathbb F}_{q}$, the finite field of order $q$. So it is helpful to introduce characters of both kinds of finite fields, and review basic properties of Gauss sums. Let $p$ be a prime, $f$ a positive integer, and $q=p^f$. The canonical additive character $\psi_{{\mathbb F}_q}$ of ${\mathbb F}_q$ is defined by $$\psi_{{\mathbb F}_q}\colon{\mathbb F}_q\to {\mathbb C}^{\ast},\qquad\psi_{{\mathbb F}_q}(x)=\zeta_p^{\text{Tr} _{q/p}(x)},$$ where $\zeta_p={\rm exp}(\frac {2\pi i}{p})$ is a complex primitive $p$-th root of unity and ${\mathrm{Tr}}r _{q/p}$ is the trace function from ${\mathbb F}_q$ to ${\mathbb F}_p$ defined by $\text{Tr}_{q/p}(x) = x + x^p + x^{p^2} +\cdots + x^{p^{f-1}}$. All the additive characters of ${\mathbb F}_q$ can be obtained from the canonical one. For $a\in {\mathbb F}_q$, define \begin{equation}\label{additive} \psi_a(x)=\psi_{{\mathbb F}_q}(ax), \;\forall x\in {\mathbb F}_q. \end{equation} Then $\{\psi_a\mid a\in {\mathbb F}_q\}$ is the group of additive characters of ${\mathbb F}_q$. For a multiplicative character $\chi$ and the canonical additive character $\psi_{{\mathbb F}_q}$ of ${\mathbb F}_q$, define the {\it Gauss sum} by \[ G_q(\chi)=\sum_{x\in {\mathbb F}_q^\ast}\chi(x)\psi_{{\mathbb F}_q}(x). \] Some basic properties of Gauss sums are listed below: \begin{proposition}{\em (\cite[Theorem 5.2]{LN97})} Let $\chi$ be a multiplicative character of ${\mathbb F}_{q}$. Then, the following hold: \begin{enumerate} \item[(i)] $G_q(\chi)\overline{G_q(\chi)}=q$ if $\chi$ is nontrivial; \item[(ii)] $G_q(\chi^p)=G_q(\chi)$, where $p$ is the characteristic of ${\mathbb F}_q$; \item[(iii)] $G_q(\chi^{-1})=\chi(-1)\overline{G_q(\chi)}$; \item[(iv)] $G_q(\chi)=-1$ if $\chi$ is trivial. \end{enumerate} \end{proposition} Let $\omega$ be a fixed primitive element of ${\mathbb F}_q$ and $N$ a positive integer dividing $q-1$. For $0\le i\le N-1$ we set $C_i^{(N,q)}=\omega^i C_0$, where $C_0$ is the subgroup of index $N$ of ${\mathbb F}_q^\ast$. The {\it Gauss periods} associated with these cosets are defined by $\psi_{{\mathbb F}_q}(C_i^{(N,q)}):=\sum_{x\in C_i^{(N,q)}}\psi_{{\mathbb F}_q}(x)$, $0\le i\le N-1$, where $\psi_{{\mathbb F}_q}$ is the canonical additive character of ${\mathbb F}_q$. By orthogonality of characters, the Gauss periods can be expressed as a linear combination of Gauss sums: \begin{equation} \psi_{{\mathbb F}_q}(C_i^{(N,q)})=\frac{1}{N}\sum_{j=0}^{N-1}G_q(\chi^{j})\chi^{-j}(\omega^i), \; 0\le i\le N-1, \end{equation} where $\chi$ is any fixed multiplicative character of order $N$ of ${\mathbb F}_q$. For example, if $N=2$, we have \begin{equation}\label{eq:Gaussquad} \psi_{{\mathbb F}_q}(C_i^{(2,q)})=\frac{-1+(-1)^iG_q(\eta)}{2},\; 0\le i\le 1, \end{equation} where $\eta$ is the quadratic character of ${\mathbb F}_q$. The quadratic Gauss sum, $G_q(\eta)$, can be evaluated explicitly. \begin{theorem}\cite[Theorem~5.15]{LN97} \label{thm:Gauss} Let $q=p^s$ be a prime power with $p$ a prime and $\eta$ be the quadratic character of ${\mathbb F}_q$. Then, \begin{equation}\label{eq:Gaussquad1} G_q(\eta)=\begin{cases} (-1)^{s-1}q^{1/2}& \text{ if } p\equiv 1\,({\mathrm{mod\,\,}}{4}), \\ (-1)^{s-1}i^s q^{1/2} & \text{ if } p\equiv 3\,({\mathrm{mod\,\,}}{4}). \end{cases} \end{equation} \end{theorem} Also, in the semi-primitive case, the Gauss sum can be computed. \begin{theorem}\label{thm:semiprim}{\em (\cite[Theorem~11.6.3]{BEW97})} Let $p$ be a prime. Suppose that $N>2$ and $p$ is semi-primitive modulo $N$, i.e., there exists a positive integer $j$ such that $p^j\equiv -1\pmod{N}$. Choose $j$ minimal and write $f=2js$ for any positive integer $s$. Let $\chi$ be a multiplicative character of order $N$ of ${\mathbb F}_{p^f}$. Then, \[ p^{-f/2}G_{p^f}(\chi)= \left\{ \begin{array}{ll} (-1)^{s-1},& \mbox{if $p=2$,}\\ (-1)^{s-1+(p^j+1)s/N},& \mbox{if $p>2$. } \end{array} \right. \] \end{theorem} The following theorems are referred to as the {\it Davenport-Hasse lifting formula} and the {\it Davenport-Hasse product formula}, respectively. \begin{theorem}\label{thm:lift}{\em (\cite[Theorem~5.14]{LN97})} Let $m$ be a positive integer. Let $\chi$ be a nontrivial multiplicative character of ${\mathbb F}_{q}$ and $\chi'$ be the lift of $\chi$ to ${\mathbb F}_{q^m}$, i.e., $\chi'(x)=\chi({\mathbb N}orm_{{\mathbb F}_{q^m}/{\mathbb F}_q}(x))$ for $x\in {\mathbb F}_{q^m}$, where ${\mathbb N}orm_{{\mathbb F}_{q^m}/{\mathbb F}_q}$ is the norm from ${\mathbb F}_{q^m}$ to ${\mathbb F}_{q}$. Then, \[ G_{q^m}(\chi')=(-1)^{m-1} G_{q}(\chi)^m. \] \end{theorem} \begin{theorem} \label{thm:Stickel2}{\em (\cite[Theorem~11.3.5]{BEW97})} Let $\eta$ be a multiplicative character of order $\ell>1$ of ${\mathbb F}_{q}$. For every nontrivial multiplicative character $\chi$ of ${\mathbb F}_{q}$, \[ G_{q}(\chi)=\frac{G_{q}(\chi^\ell)}{\chi^\ell(\ell)} \prod_{i=1}^{\ell-1} \frac{G_{q}(\eta^i)}{G_{q}(\chi\eta^i)}. \] \end{theorem} We will use the following formula later. \begin{theorem}\label{prop:charaadd}{\em (\cite[Theorem~5.33]{LN97})} Let $\psi_{{\mathbb F}_q}$ be the canonical additive character of ${\mathbb F}_q$ with $q$ odd, and let $f(x)=a_2x^2+a_1x+a_0\in {\mathbb F}_q[x]$ with $a_2\not=0$. Then \[ \sum_{x\in {\mathbb F}_q}\psi_{{\mathbb F}_q}(f(x))=\psi_{{\mathbb F}_q}(a_0-a_1^2(4a_2)^{-1})\eta(a_2)G_q(\eta), \] where $\eta$ is the quadratic character of ${\mathbb F}_q$. \end{theorem} \section{Basic lifting constructions} \subsection{Subdifference sets of the Singer difference sets}\label{sec:Quo} Let $p$ be a prime, $f\geq 1$, $m\geq 2$ be integers and $q=p^f$. Let $L$ be a complete system of coset representatives of ${\mathbb F}_q^\ast$ in ${\mathbb F}_{q^m}^\ast$. We can, and do, choose $L$ in such a way that ${\mathrm{Tr}}r_{q^m/q}(x)=0$ or $1$ for any $x\in L$. Let \[ L_0=\{x\in L\,|\,{\mathrm{Tr}}r_{q^m/q}(x)=0\}\mbox{ and } L_1=\{x\in L\,|\,{\mathrm{Tr}}r_{q^m/q}(x)=1\}. \] Then, \begin {equation*}\label{Singer} H_0=\{\overline{x}\in {\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast\,|\,x\in L_0\} \end{equation*} represents a hyperplane of the projective space ${\mathrm{PG}}(m-1,q)$, and it is a so-called {\it Singer difference set} in the cyclic group ${\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast$. (Here $\overline{x}=x{\mathbb F}_q^\ast$ represents the projective point corresponding to the one-dimensional subspace over ${\mathbb F}_q$ spanned by $x$.) Any nontrivial multiplicative character $\chi$ of exponent $(q^m-1)/(q-1)$ of ${\mathbb F}_{q^m}^\ast$ induces a character of the quotient group ${\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast$, which will also denoted by $\chi$. Moreover, every character of ${\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast$ arises in this way. By a result given in \cite{Y}, for any nontrivial multiplicative character $\chi$ of exponent $(q^m-1)/(q-1)$ of ${\mathbb F}_{q^m}^\ast$, we have $$\chi(H_0)=G_{q^m}(\chi)/q.$$ Assume that $N\,|\,(q^m-1)/(q-1)$. Then $$\overline{C_0}:=C_0^{(N,q^m)}/{\mathbb F}_q^\ast\le {\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast.$$ Let $S$ be a complete system of coset representatives of $\overline{C_0}$ in ${\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast$, and set $G=\{s\overline{C_0}\,|\,s\in S\}\simeq {\mathbb F}_{q^m}^\ast/C_0^{(N,q^m)}$. For convenience, we will use $\tilde{s}$ to denote $s\overline{C_0}$. In the rest of this section, we will assume that ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)}) $ is strongly regular, where $N\,|\,(q^m-1)/(q-1)$. Such a strongly regular graph is called {\it cyclotomic}. The following three series of cyclotomic strongly regular graphs were known~\cite{SW}: \begin{itemize} \item[(1)] (subfield case) $C_0^{(N,q^m)}={\mathbb F}_{q^d}^\ast$ where $d\,|\,m$, \item[(2)] (semi-primitive case) $-1\in \langle p\rangle\le ({\mathbb Z}/N{\mathbb Z})^\ast$, \item[(3)] (sporadic case) ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ has one of the eleven sets of parameters given in Table~\ref{Tab1}. \begin{table}[h] \caption{Eleven sporadic examples} \sigmaspace{-0.5cm} \label{Tab1} $$ \begin{array}{|c||c|c|c|c|c|c|c|c|c|c|c|} \hline N& 11&19 &35 &37 &43 &67 &107&133&163&323&499\\ \hline q^m&3^5&5^9&3^{12}&7^9&11^7&17^{33}&3^{53}&5^{18}&41^{81}&3^{144} &5^{249}\\ \hline \end{array} $$ \end{table} \end{itemize} We mention in passing that Schmidt and White~\cite{SW} conjectured that besides the above three cases, there are no more cyclotomic strongly regular graphs. \begin{conj} Let $N\,|\,\frac{q^m-1}{q-1}$ with $N>1$. If ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular, then one of (1), (2), or (3) above holds. \end{conj} This conjecture remains open. Some partial results were obtained in \cite{SW}. Assume that ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)}) $ is strongly regular, where $N\,|\,\frac{q^m-1}{q-1}$. Then $|H_0\cap s\overline{C_0}|$, $s\in S$, take exactly two values. It follows that $|H_0\cap s\overline{C_0}|-|H_0\cap \overline{C_0}|=0$ or $\delta$, where $\delta$ is a nonzero integer. For any nontrivial multiplicative character $\chi$ of ${\mathbb F}_{q^m}$ of exponent $N$, \begin{align*} \chi(H_0)=\,&\sum_{s\in S}|H_0\cap s\overline{C_0}|\chi(\tilde{s})\\ =\,&\sum_{s\in S}(|H_0\cap s\overline{C_0}|-|H_0\cap \overline{C_0}|)\chi(\tilde{s})\\ =\,&\delta\sum_{s\in S'}\chi(\tilde{s}), \end{align*} where \begin{equation}\label{def:S'} S'=\{s\in S : |H_0\cap s\overline{C_0}|-|H_0\cap \overline{C_0}|=\delta\}. \end{equation} Thus \begin{equation}\label{eq:sum} \sum_{s\in S'}\chi(\tilde{s})=\frac{\chi(H_0)}{\delta}=\frac{G_{q^m}(\chi)}{\delta q}. \end{equation} It follows that $\delta$ is a power of $p$. Furthermore, noting that $G_{q^m}(\chi)\overline{G_{q^m}(\chi)}=q^m$, we see that the set $\{\tilde{s}\mid s\in S'\}\subset G$ forms a difference set, which is called a {\it subdifference set} of $H_0$~\cite{Mc}. Let $\omega$ be a primitive element of ${\mathbb F}_{q^m}$. Then we could choose $S=\{\overline{1}, \overline{\omega}, \ldots , \overline{\omega}^{N-1}\}$, where $\overline{\omega}=\omega{\mathbb F}_q^\ast$. In this way, since $S'$ is a subset of $S$, we define \begin{equation}\label{eq:subdi} I=\{0\le i\le N-1\mid \overline{\omega}^i\in S'\}. \end{equation} In the rest of the paper, we will also call $I$ a subdifferecne set in ${\mathbb Z}_N$ of the Singer difference set. \subsection{Two lifting constructions}\label{sec:two} Let ${\gamma}mma$ be a primitive element of ${\mathbb F}_{q^{2m}}$ and set $\omega={\gamma}mma^{q^m+1}$. Then, $\omega$ is a primitive element in ${\mathbb F}_{q^m}$. Let $C_j^{(N,q^{2m})}={\gamma}mma^j\langle {\gamma}mma^N\rangle$, $0\le j\le N-1$. The following lifting construction was already given in~\cite{MX}. For completeness, we repeat the construction here. \begin{proposition} \label{theorem:main1} Assume that ${\mathbb F}_q^\ast\le C_0^{(N,q^m)}\le {\mathbb F}_{q^m}^\ast$ and ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular. Let $I$ be the corresponding subdifferecne set defined in \eqref{eq:subdi}. Let \begin{equation}\label{ellip} E=\bigcup_{i\in I}C_i^{(N,q^{2m})}. \end{equation} Then ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$ is a strongly regular graph with negative Latin square type parameters $(n^2,r(n+1),-n+r^2+3r,r^2+r)$, where $n=q^m$ and $r=(q^m-1)|I|/N$. \end{proposition} {\sigmaspace{-0.0cm}\bf Proof: \,} Let $\psi_{{\mathbb F}_{q^{2m}}}$ be the canonical additive character of ${\mathbb F}_{q^{2m}}$ and let $\chi_N'$ be a multiplicative character of order $N$ of ${\mathbb F}_{q^{2m}}$. We will show that $\psi_{{\mathbb F}_{q^{2m}}}({\gamma}mma^a E)$, $0\leq a\leq N-1$, take exactly two distinct values. By the orthogonality of characters, we compute \begin{eqnarray*} S_a=N\cdot \psi_{{\mathbb F}_{q^{2m}}}({\gamma}mma^a E)+|I|=\sum_{j=1}^{N-1}G_{q^{2m}}(\chi_N'^{-j})\sum_{i\in I}\chi_N'^{j}({\gamma}mma^{a+i}). \end{eqnarray*} Since $N\,|\,\frac{q^m-1}{q-1}$, there is a multiplicative character $\chi_N$ of ${\mathbb F}_{q^m}$ of order $N$ such that $\chi_N'({\gamma}mma)=\chi_N(\omega)$, i.e., $\chi_N'$ is the lift of $\chi_N$. By the Davenport-Hasse lifting formula, we have \[ S_a =-\sum_{j=1}^{N-1}{\chi_{N}}^{j}(\omega^{a})G_{q^m}(\chi_{N}^{-j})G_{q^m}(\chi_{N}^{-j}) \sum_{i\in I}\chi_{N}^{j}(\omega^{i}). \] On the other hand, from the definition of $I$, we have \begin{equation}\label{eq:defIuse} \sum_{i\in I}\chi_N^j(\omega^i)=\sum_{s\in S'}\chi_N^j(\tilde{s})=\frac{G_{q^m}(\chi_N^j)}{\delta q}. \end{equation} Hence, \begin{align*} S_a =\,&-\frac{1}{\delta q} \sum_{j=1}^{N-1}\chi_{N}^{j}(\omega^{a})G_{q^m}(\chi_{N}^{-j})G_{q^m}(\chi_{N}^{-j})G_{q^m}(\chi_{N}^{j})\nonumber\\ =\,&- \frac{q^{m-1}}{\delta} \sum_{j=1}^{N-1}\chi_{N}^{j}(\omega^{a})G_{q^m}(\chi_{N}^{-j})\\ =\,&-q^m\sum_{j=1}^{N-1}\sum_{i \in I}\chi_N^{-j}(\omega^i)\chi_N^{j}(\omega^a) =q^m|I|-\begin{cases} q^m N,& \text{ if } a\in I, \\ 0,& \text{ if } a\not\in I. \end{cases} \end{align*} Thus, $\psi_{{\mathbb F}_{q^{2m}}}({\gamma}mma^a E)=\frac{S_{a}-|I|}{N}$, $0\leq a\leq N-1$, take exactly two distinct values $\frac{(q^m-1)|I|}{N}$ and $\frac{(q^m-1)|I|}{N}-q^m$. Therefore ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$ is strongly regular. The parameters of ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$ can be computed in a straightforward way. We omit the details. { $\square$} \begin{remark}{\em If ${\mathbb C}ay({\mathbb F}_{q^m}, C_0^{(N,q^m)})$ is a cyclotomic strongly regular graph in the subfield case with $N=\frac{q^m-1}{q-1}$, then $C_0^{(N,q^m)}={\mathbb F}_q^\ast$, $S={\mathbb F}_{q^m}^\ast/{\mathbb F}_q^\ast$, and $S'=H_0$. In this case, we find that \[ E=\{x\in {\mathbb F}_{q^{2m}}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(x^{q^m+1})=0\}, \] where ${\mathrm{Tr}}r_{q^m/q}(x^{q^m+1})$ is a nondegenerate ${\mathbb F}_q$-valued elliptic quadratic form on ${\mathbb F}_{q^{2m}}$. Therefore it is appropriate to call the lifting construction given in Proposition~\ref{theorem:main1} an {\it elliptic} type lifting construction.} \end{remark} We give a new lifting construction, which is of ``hyperbolic" type. \begin{proposition} \label{theorem:main2} Let $\omega$ be a primitive element of ${\mathbb F}_{q^m}$. Assume that ${\mathbb F}_q^\ast\le C_0^{(N,q^m)}\le {\mathbb F}_{q^m}^\ast$ and ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular. Let $I$ be the corresponding subdifferecne set defined in \eqref{eq:subdi}. Let \begin{equation}\label{hyperb} H=\{(y,y^{-1}x\omega^\ell)\,|\,x \in C_0^{(N,q^m)},y \in {\mathbb F}_{q^m}^\ast,\ell \in I\}\subseteq {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}. \end{equation} Then ${\mathbb C}ay({\mathbb F}_{q^m} \times {\mathbb F}_{q^m}, H)$ is a strongly regular graph with Latin square type parameters $(n^2,r(n-1),n+r^2-3r,r^2-r)$, where $n=q^m$ and $r=(q^m-1)|I|/N$. \end{proposition} {\sigmaspace{-0.0cm}\bf Proof: \,} Let $\psi_{{\mathbb F}_{q^m}}$ be the canonical additive character of ${\mathbb F}_{q^{m}}$ and let $\chi_{q^m-1}$ be a multiplicative character of order $q^m-1$ of ${\mathbb F}_{q^{m}}$. Each additive character of ${\mathbb F}_{q^m}\times {\mathbb F}_{q^m}$ has the form \begin{equation}\label{fieldchara22} \psi_{a,b}((x,y))=\psi_{{\mathbb F}_{q^m}}(ax+by),\; \quad (x,y)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, \end{equation} where $(a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}$. Then, by the definition of $H$, we need to compute the character values: \[ S_{a,b}:=\sum_{y\in {\mathbb F}_{q^m}^\ast}\sum_{x\in C_0^{(N,q^m)}}\sum_{\ell\in I}\psi_{{\mathbb F}_{q^m}}(ay+bxy^{-1}\omega^\ell), \, \, \quad (0,0)\neq (a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}. \] In the case where either one of $a$ or $b$ is zero, it is clear that $S_{a,b}=-(q^m-1)|I|/N$. Now, we assume that $a\not=0$ and $b\not=0$. By the orthogonality of characters, we have \begin{equation}\label{eq:hypfir1} S_{a,b}=\frac{1}{(q^m-1)^2}\sum_{j,k=0}^{q^m-2} \sum_{y\in {\mathbb F}_{q^m}^\ast}\sum_{x\in C_0^{(N,q^m)}}\sum_{\ell\in I}G_{q^m}(\chi_{q^m-1}^{-j})G_{q^m}(\chi_{q^m-1}^{-k})\chi_{q^m-1}^{j}(a) \chi_{q^m-1}^{k}(bx\omega^\ell)\chi_{q^m-1}^{j-k}(y). \end{equation} Since $\sum_{y\in {\mathbb F}_{q^m}^\ast}\chi_{q^m-1}^{j-k}(y)=q^m-1$ or $0$ according as $j\equiv k\,({\mathrm{mod\,\,}}{q^m-1})$ or $j\not\equiv k\,({\mathrm{mod\,\,}}{q^m-1})$, continuing from \eqref{eq:hypfir1}, we have \begin{equation}\label{eq:hypfir2} S_{a,b}=\frac{1}{q^m-1}\sum_{j=0}^{q^m-2} \sum_{x\in C_0^{(N,q^m)}}\sum_{\ell\in I}G_{q^m}(\chi_{q^m-1}^{-j})^2\chi_{q^m-1}^{j}(a) \chi_{q^m-1}^{j}(bx\omega^\ell). \end{equation} Let $\chi_{N}:=\chi_{q^m-1}^{\frac{q^m-1}{N}}$. Since $\sum_{x\in C_0^{(N,q^m)}}\chi_{q^m-1}^{j}(x)=(q^m-1)/N$ or $0$ according as $j\equiv 0\,({\mathrm{mod\,\,}}{\frac{q^m-1}{N}})$ or $j\not\equiv 0\,({\mathrm{mod\,\,}}{\frac{q^m-1}{N}})$, continuing from \eqref{eq:hypfir2}, we have \[ S_{a,b}=\frac{1}{N}\sum_{j=0}^{N-1} \sum_{\ell\in I}G_{q^m}(\chi_{N}^{-j})^2\chi_{N}^{j}(ab\omega^\ell). \] On the other hand, by \eqref{eq:defIuse}, we have $ \sum_{i\in I}\chi_N^j(\omega^i)=\frac{G_{q^m}(\chi_N^j)}{\delta q}$. Hence, we have \begin{align*} S_{a,b}-\frac{|I|}{N} =\,&\frac{1}{\delta qN} \sum_{j=1}^{N-1}\chi_{N}^{j}(ab)G_{q^m}(\chi_{N}^{-j})G_{q^m}(\chi_{N}^{-j})G_{q^m}(\chi_{N}^{j})\nonumber\\ =\,& \frac{q^{m-1}}{\delta N} \sum_{j=1}^{N-1}\chi_{N}^{j}(\omega^{a})G_{q^m}(\chi_{N}^{-j})\\ =\,&\frac{q^{m}}{N}\sum_{j=1}^{N-1}\sum_{\ell \in I}\chi_N^{-j}(\omega^\ell)\chi_N^{j}(ab) =-\frac{q^m|I|}{N}+\begin{cases} q^m,& \text{ if } \log_{\omega}(ab)\in I\, ({\mathrm{mod\,\,}}{N}), \\ 0,& \text{ if } \log_{\omega}(ab)\not\in I\, ({\mathrm{mod\,\,}}{N}). \end{cases} \end{align*} Thus $\psi_{a,b}(H)=S_{a,b}$, $(0,0)\neq (a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}$, take exactly two distinct values $-\frac{(q^m-1)|I|}{N}$ and $-\frac{(q^m-1)|I|}{N}+q^m$. Therefore ${\mathbb C}ay({\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, H)$ is strongly regular. The parameters of ${\mathbb C}ay({\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, H)$ can be computed in a straightforward way. We omit the details. { $\square$} \begin{remark}{\em Under the assumptions of Proposition~\ref{theorem:main2}, set \[ H':=H\cup \{(0,x)\,|\,x \in {\mathbb F}_{q^m}^\ast\}\cup \{(x,0)\,|\,x \in {\mathbb F}_{q^m}^\ast\}. \] Then, ${\mathbb C}ay({\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, H')$ is also strongly regular. Furthermore, if ${\mathbb C}ay({\mathbb F}_{q^m}, C_0^{(N,q^m)})$ is a cyclotomic strongly regular graph in the subfield case with $N=\frac{q^m-1}{q-1}$, we have \[ H'=\{(0,0)\neq (x,y)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}\,|\,{\mathrm{Tr}}r_{q^m/q}(xy)=0\}, \] where ${\mathrm{Tr}}r_{q^m/q}(xy)$ is a nondegenerate hyperbolic quadratic form from ${\mathbb F}_{q^m}\times {\mathbb F}_{q^m}$ to ${\mathbb F}_{q}$. Hence, it is appropriate to call the lifting construction in Proposition~\ref{theorem:main2} a {\it hyperbolic} type lifting construction.} \end{remark} We now apply Propositions~\ref{theorem:main1} and ~\ref{theorem:main2} to the known cyclotomic strongly regular graphs. We first apply the two propositions to the semi-primitive examples. In this case, we have $|I|=1$. \begin{corollary}\label{cor:semi} Let $p$ be a prime, $N\ge 2$, $q^m=p^{2js}$, where $s\ge 2$, $N\,|\,(p^j+1)$, and $j$ is the smallest such positive integer. For $\epsilon\in \{1,-1\}$, there exists an $(n^2,r(n-\epsilon),\epsilon n+r^2-3\epsilon r,r^2-\epsilon r)$ strongly regular Cayley graph with $n=q^m$ and $r=(q^m-1)/N$. \end{corollary} Next we apply Propositions~\ref{theorem:main1} and \ref{theorem:main2} to the subfield examples. In this case, we have $N=\frac{q^{m}-1}{q^d-1}$ and $|I|=\frac{q^{m-d}-1}{q^d-1}$, where $d\,|\,m$. \begin{corollary}\label{cor:singer} Let $q$ be a prime power and $m\ge 3$ a positive integer. For $\epsilon\in \{1,-1\}$, there exists an $(n^2,r(n-\epsilon),\epsilon n+r^2-3\epsilon r,r^2-\epsilon r)$ strongly regular Cayley graph with $n=q^m$ and $r=q^{m-d}-1$. \end{corollary} When $d=1$, the strongly regular graphs obtained in Corollary~\ref{cor:singer} were already known~\cite{Ma}. Finally, we apply Propositions~\ref{theorem:main1} and \ref{theorem:main2} to the eleven sporadic examples of cyclotomic strongly regular graphs. In this case, the values of $k:=|I|$ are given in \cite[Table II]{SW}. \begin{corollary}\label{cor-spor} For $\epsilon\in \{1,-1\}$, there exists an $(n^2,r(n-\epsilon),\epsilon n+r^2-3\epsilon r,r^2-\epsilon r)$ strongly regular Cayley graph with $n=q^m$ and $r=k(q^m-1)/N$ in each of the following cases: \begin{eqnarray*} (q^m,N,k)&=&(3^5,11,5),(5^9,19,9),(3^{12},35,17),(7^9,37,9),(11^7,43,21),(17^{33},67,33)\\ & &(3^{53},107,53),(5^{18},133,33),(41^{81},163,81),(3^{144},323,161),(5^{249},499,249). \end{eqnarray*} \end{corollary} \section{Halving the connection sets $E$ and $H$ and their complements} In a couple of recent papers \cite{FMX, BLMX}, motivated by existence questions concerning finite geometric objects such as $m$-ovoids and $i$-tight sets, we used a certain partition of the Singer difference set to construct strongly regular Cayley graphs with special properties which give the desired $m$-ovoids and $i$-tight sets. We now realize that the constructions can be done in a more general setting, namely, we can do the construction by partitioning a subdifference set of the Singer difference set in a certain way. In the case where the cyclotomic strongly regular graph comes from a subfield, the subdifference set of the Singer difference set is actually a Singer difference set; so in this case, we recover the previous constructions. We will also use a certain partition of the complement of a subdifference of the Singer difference sets to construct more strongly regular Cayley graphs. Assume that $N\geq 2$ is odd, $N\,|\,\frac{q^m-1}{q-1}$, and ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular. Let $I$ be the corresponding subdifference set in ${\mathbb Z}_N$ defined in (\ref{eq:subdi}). Let $S_1, S_2$ be a partition of $I$ and let $S_i'\equiv 2^{-1}S_i\,({\mathrm{mod\,\,}}{N})$ and $S_i''\equiv 2^{-1}S_i'\,({\mathrm{mod\,\,}}{N})$ for $i=1,2$. Define \begin{equation}\label{eq:defX1} X:=2S_1'' \cup (2S_2''+N)\,({\mathrm{mod\,\,}}{2N}). \end{equation} Let $J_1:=\{0,3\}$ and $J_2:=\{1,2\}$. Define \begin{equation}\label{eq:defI} Y:=\{Ni+4j \hspace{-0.2cm}\pmod{4N}: (i,j)\in (J_1\times S_1'') \cup (J_2\times S_2'')\}. \end{equation} It is clear that $X\equiv 2^{-1}I\,({\mathrm{mod\,\,}}{N})$ and $Y\equiv I\,({\mathrm{mod\,\,}}{N})$. Similarly, let $T_1, T_2$ be a partition of ${\mathbb Z}_N\setminus I$ and let $T_i'\equiv 2^{-1}T_i\,({\mathrm{mod\,\,}}{N})$ and $T_i''\equiv 2^{-1}T_i'\,({\mathrm{mod\,\,}}{N})$ for $i=1,2$. Define \begin{equation}\label{eq:defX2} \widehat{X}:=2T_1'' \cup (2T_2''+N)\,({\mathrm{mod\,\,}}{2N}). \end{equation} Furthermore, define \begin{equation}\label{eq:defI2} \widehat{Y}:=\{Ni+4j \hspace{-0.2cm}\pmod{4N}: (i,j)\in (J_1\times T_1'') \cup (J_2\times T_2'')\}, \end{equation} where $J_1=\{0,3\}$ and $J_2=\{1,2\}$. \subsection{Decompositions of ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$ and its complement}\label{sec:minus} In this subsection, we always assume that $q^m\equiv 3\,({\mathrm{mod\,\,}}{4})$. We will consider decompositions of ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$ and its complement, where $E$ is defined in (\ref{ellip}). We define \begin{equation}\label{eq:defD-} E_1:=\bigcup_{i \in Y}C_i^{(4N,q^{2m})}, \end{equation} where $C_i^{(4N,q^{2m})}:={\gamma}mma^i \langle {\gamma}mma^{4N}\rangle$, ${\gamma}mma$ is a primitive element of ${\mathbb F}_{q^{2m}}$, and $Y$ is defined in \eqref{eq:defI}. Since $Y\equiv I\pmod N$, we see that $E_1$ is a subset of $E$, and $|E_1|=|E|/2$. The (additive) character values of $E_1$ are given by the following lemma. \begin{lemma}\label{rem:quad-} Let $\psi_{{\mathbb F}_{q^{2m}}}$ and $\psi_{{\mathbb F}_{q^m}}$ be the canonical additive characters of ${\mathbb F}_{q^{2m}}$ and ${\mathbb F}_{q^{m}}$, respectively. For $a\in {\mathbb Z}_{4N}$, define $b\equiv 4^{-1}a\,({\mathrm{mod\,\,}}{N})$ and $c\equiv 2b\,({\mathrm{mod\,\,}}{2N})$. Then, \begin{align} \psi_{{\mathbb F}_{q^{2m}}}({\gamma}mma^a E_1)=&\,\frac{\rho_p \delta_a q^m}{2G_{q^m}(\eta)}\left(2\psi_{{\mathbb F}_{q^m}}(\omega^c\bigcup_{t\in X}C_t^{(2N,q^m)})-\psi_{{\mathbb F}_{q^m}} (\omega^c \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)})\right)\nonumber\\ &\, \hspace{3.0cm}+\frac{(q^m-1)|I|}{2N}-\left\{ \begin{array}{ll} \frac{q^m}{2}, & \mbox{ if $c \in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise,} \end{array} \right.\label{eq:DD-} \end{align} where $\delta_a=1$ or $-1$ depending on whether {$a\equiv 0,N\,({\mathrm{mod\,\,}}{4})$ or $a\equiv 2,3N\,({\mathrm{mod\,\,}}{4})$,} and $\rho_p=1$ or $-1$ depending on whether $p\equiv 7\,({\mathrm{mod\,\,}}{8})$ or $p\equiv 3\,({\mathrm{mod\,\,}}{8})$. Furthermore, $\eta$ is the quadratic character of ${\mathbb F}_{q^m}$. \end{lemma} This lemma is a common generalization of the results in \cite{BLMX} and \cite{Mo}. Its proof is the same as those in \cite{BLMX} and \cite{Mo}. We therefore omit the proof. Next, we consider a decomposition of the complement of ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E)$. Let \begin{equation}\label{eq:defD-2} E_2:=\bigcup_{i \in \widehat{Y}}C_i^{(4N,q^{2m})}, \end{equation} where $\widehat{Y}$ is defined in \eqref{eq:defI2}. The (additive) character values of $E_2$ are given by the following lemma. \begin{lemma}\label{rem:quad-2} With the same notation as in Lemma~\ref{rem:quad-}, \begin{align} \psi_{{\mathbb F}_{q^{2m}}}({\gamma}mma^a E_2)=&\,\frac{\rho_p \delta_a q^m}{2G_{q^m}(\eta)}\left(2\psi_{{\mathbb F}_{q^m}}(\omega^c\bigcup_{t\in \widehat{X}}C_t^{(2N,q^m)})-\psi_{{\mathbb F}_{q^m}} (\omega^c \bigcup_{t\in {\mathbb Z}_N\setminus 2^{-1}I}C_t^{(N,q^m)})\right)\nonumber\\ &\, \hspace{3.0cm}+\frac{(q^m-1)(N-|I|)}{2}-\left\{ \begin{array}{ll} 0, & \mbox{ if $c \in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$,}\\ \frac{q^m}{2}, & \mbox{ otherwise.} \end{array} \right.\label{eq:DD-2} \end{align} \end{lemma} \begin{remark}\label{rem:chara}{\em \begin{itemize} \item[(i)] If $X$ defined in \eqref{eq:defX1} satisfies that \begin{equation}\label{eq:3-chara} 2\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in X}C_{t}^{(2N,q^m)})- \psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in 2^{-1}I}C_{t}^{(N,q^m)})=\left\{ \begin{array}{ll} \pm G_{q^m}(\eta), & \mbox{ if $c\in 2^{-1}I\, ({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise, } \end{array} \right. \end{equation} substituting \eqref{eq:3-chara} into \eqref{eq:DD-}, we find that the nontrivial additive character values of $E_1$ take two distinct values $\frac{(q^m-1)|I|}{2N}$ and $\frac{(q^m-1)|I|}{2N}-q^m$, implying that ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E_1)$ is strongly regular. \item[(ii)] If $\widehat{X}$ defined in \eqref{eq:defX2} satisfies that \begin{equation}\label{eq:3-chara2} 2\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in \widehat{X}}C_{t}^{(2N,q^m)})- \psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in {\mathbb Z}_N\setminus 2^{-1}I}C_{t}^{(N,q^m)})=\left\{ \begin{array}{ll} 0, & \mbox{ if $c\in 2^{-1}I\, ({\mathrm{mod\,\,}}{N})$,}\\ \pm G_{q^m}(\eta), & \mbox{ otherwise, } \end{array} \right. \end{equation} substituting \eqref{eq:3-chara2} into \eqref{eq:DD-2}, we find that the nontrivial additive character values of $E_2$ take two distinct values $\frac{(q^m-1)(N-|I|)}{2N}$ and $\frac{(q^m-1)(N-|I|)}{2N}-q^m$, implying that ${\mathbb C}ay({\mathbb F}_{q^{2m}}, E_2)$ is strongly regular. \end{itemize}} \end{remark} \subsection{Decompositions of ${\mathbb C}ay({\mathbb F}_{q^{m}}\times {\mathbb F}_{q^{m}}, H)$ and its complement} In this subsection, we assume that $q^m\equiv 1\,({\mathrm{mod\,\,}}{4})$, $N$ is an odd divisor of $\frac{q^m -1}{q-1}$, and $\gcd{(N,\frac{q^m-1}{N})}=1$. Define \begin{equation}\label{def:D+} H_1:=\{(xy,xy^{-1}z\omega^{\ell})\,|\,x\in C_0^{(N,q^m)},y\in C_0^{(\frac{q^m-1}{N},q^m)},z\in C_0^{(4N,q^m)},\ell \in Y\}\subseteq {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, \end{equation} where $\omega$ is a primitive element of ${\mathbb F}_{q^m}$ defined in Subsection~\ref{sec:two} and $Y$ is defined in \eqref{eq:defI}. In the definition of $H_1$, since $x^2z\omega^\ell \in \bigcup_{\ell \in I}C_\ell^{(N,q^m)}$, we see that $H_1$ is a subset of $H$. Moreover, $|H_1|=|H|/2.$ The (additive) character values of $H_1$ are given in the following lemma. \begin{lemma}\label{mainconstruction2+} Let $\psi_{a,b}$ be an additive character of ${\mathbb F}_{q^m}\times {\mathbb F}_{q^m}$ defined in \eqref{fieldchara22} and $\psi_{{\mathbb F}_{q^m}}$ be the canonical additive character of ${\mathbb F}_{q^m}$. For $(a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}\setminus \{(0,0)\}$ with $ab=0$, it holds that $\psi_{a,b}(H_1) =-(q^m-1)|I|/2N$. For $(a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}\setminus \{(0,0)\}$ with $ab\not=0$, it holds that \begin{align} \psi_{a,b}(H_1)=&\,\frac{\eta(2\omega^c)G_{q^m}(\eta)\delta_{a,b} }{2} \left(2\psi_{{\mathbb F}_{q^m}}(\omega^c\bigcup_{t\in X}C_t^{(2N,q^3)})-\psi_{{\mathbb F}_{q^m}} (\omega^c \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)})\right)\nonumber\\ &\, \hspace{3.0cm}-\frac{(q^m-1)|I|}{2N}+\left\{ \begin{array}{ll} \frac{q^m}{2}, & \mbox{ if $c \in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise,} \end{array} \right.\label{eq:DD+} \end{align} where $c$ is defined by $\omega^c=(ab)^{\frac{N+1}{2}}$ and $\delta_{a,b}=1$ or $-1$ depending on whether {$\log_{\omega}(a^{-1}b)\equiv 0,N\,({\mathrm{mod\,\,}}{4})$ or $\log_{\omega}(a^{-1}b)\equiv 2,3N\,({\mathrm{mod\,\,}}{4})$. } Furthermore, $\eta$ is the quadratic character of ${\mathbb F}_{q^m}$. \end{lemma} This lemma is a generalization of \cite[Theorem~4.1]{FMX}. Since the proof is similar to that of \cite[Theorem~4.2]{FMX}, we omit the proof. Next, we consider a decomposition of the complement of ${\mathbb C}ay({\mathbb F}_{q^{m}}\times {\mathbb F}_{q^m}, H)$. Define \begin{equation}\label{def:D+} H_2:=\{(xy,xy^{-1}z\omega^{\ell})\,|\,x\in C_0^{(N,q^m)},y\in C_0^{(\frac{q^m-1}{N},q^m)},z\in C_0^{(4N,q^m)},\ell \in \widehat{Y}\}\subseteq {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, \end{equation} where $\widehat{Y}$ is defined in \eqref{eq:defI2}. The character values of $H_2$ are given in the following lemma. \begin{lemma}\label{mainconstruction3+} For $(a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}\setminus \{(0,0)\}$ with $ab=0$, it holds that $\psi_{a,b}(H_2) =-(q^m-1)(N-|I|)/2N$. For $(a,b)\in {\mathbb F}_{q^m}\times {\mathbb F}_{q^m}\setminus \{(0,0)\}$ with $ab\not=0$, it holds that \begin{align} \psi_{a,b}(H_2)=&\,\frac{\eta(2\omega^c)G_{q^m}(\eta)\delta_{a,b} }{2} \left(2\psi_{{\mathbb F}_{q^m}}(\omega^c\bigcup_{t\in \widehat{X}}C_t^{(2N,q^3)})-\psi_{{\mathbb F}_{q^m}} (\omega^c \bigcup_{t\in {\mathbb Z}_N\setminus 2^{-1}I}C_t^{(N,q^m)})\right)\nonumber\\ &\, \hspace{3.0cm}-\frac{(q^m-1)(N-|I|)}{2N}+\left\{ \begin{array}{ll} 0, & \mbox{ if $c \in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$,}\\ \frac{q^m}{2}, & \mbox{ otherwise, } \end{array} \right.\label{eq:DD+2} \end{align} where $c$ is defined by $\omega^c=(ab)^{\frac{N+1}{2}}$. \end{lemma} \begin{remark}\label{re:cha:hyp}{\em Similarly to Remark~\ref{rem:chara}, if the set $X$ defined in \eqref{eq:defX1} satisfies \eqref{eq:3-chara}, the nontrivial additive character values of $H_1$ take two distinct values $-\frac{(q^m-1)|I|}{2N}$ and $-\frac{(q^m-1)|I|}{2N}+q^m$, implying that ${\mathbb C}ay({\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, H_1)$ is strongly regular. Also, if $\widehat{X}$ defined in \eqref{eq:defX2} satisfies \eqref{eq:3-chara2}, then the nontrivial additive character values of $H_2$ take two distinct values $-\frac{(q^m-1)(N-|I|)}{2N}$ and $-\frac{(q^m-1)(N-|I|)}{2N}+q^m$, implying that ${\mathbb C}ay({\mathbb F}_{q^m}\times {\mathbb F}_{q^m}, H_2)$ is strongly regular.} \end{remark} \section{Partition of subdifference sets and their complements in semi-primitive case} In this section, we consider a partition of the subdifference sets $I$ in semi-primitive case. We will use the same notation as in Section 4. We assume that $N$ is odd and $q^{m}=p^{2js}$, where $p$ is a prime, $s\ge 2$, $N\,|\,(p^j+1)$, and $j$ is the smallest such positive integer. In this case, ${\mathbb C}ay({\mathbb F}_{q^{m}},C_0^{(N,q^m)})$ is strongly regular and we have $I=\{0\}$~\cite{SW}. Furthermore, by Theorem~\ref{thm:semiprim}, the Gauss sums with respect to multiplicative characters of exponent $N$ of ${\mathbb F}_{q^m}$ can be explicitly evaluated as \begin{equation}\label{eq:Gsemi} G_{q^m}(\chi_N^i)=(-1)^{s-1}\sqrt{q^m}, \quad 1\le i\le N-1. \end{equation} \begin{theorem}\label{thm:semi-} With the same notation as in Section 4, under the above assumptions, the partition $(S_1,S_2)=(\{0\},\emptyset)$ of $I$ satisfies the condition~\eqref{eq:3-chara} of Remark~\ref{rem:chara} (i). \end{theorem} {\sigmaspace{-0.0cm}\bf Proof: \,} By the definition~\eqref{eq:defX1} of $X$, we have $X=\{0\}$. Write \[ P_c:=2\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in X}C_t^{(2N,q^m)})-\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)}). \] By \eqref{eq:3-chara}, we need to prove that \[ P_c=\left\{ \begin{array}{ll} \pm G_{q^m}(\eta), & \mbox{ if $c\equiv 0\, ({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise,} \end{array} \right. \] where $\eta$ is the quadratic character of ${\mathbb F}_{q^m}$. Let $\chi_{N}$ be a multiplicative character of {order $N$} of ${\mathbb F}_{q^m}$. By the orthogonality of characters, we have \begin{align} P_c=&\,\frac{1}{N}\left(\sum_{i=0}^{N-1}\sum_{j=0,1}\sum_{t\in X}G_{q^m}(\chi_{N}^i\eta^j)\chi_{N}^{-i}\eta^{j}(\omega^{c+t})-\sum_{i=0}^{N-1}\sum_{t\in 2^{-1}I}G_{q^m}(\chi_{N}^i)\chi_{N}^{-i}(\omega^{c+t})\right) \nonumber\\ =&\,\frac{1}{N}\sum_{i=0}^{N-1}\sum_{t\in X}G_{q^m}(\chi_{N}^i\eta)\chi_{N}^{-i}\eta(\omega^{c+t}). \label{eq:hiki} \end{align} By the Davenport-Hasse product formula and \eqref{eq:Gsemi}, we have \[ G_{q^m}(\chi_{N}^i\eta)=\frac{G_{q^m}(\chi_N^{2i})G_{q^m}(\eta)}{G_{q^m}(\chi_{N}^i)}=G_{q^m}(\eta). \] On the other hand, by the definition of $X$, we have $\sum_{t\in X}\chi_{N}^{-i}\eta(\omega^{t})=1$. Continuing from \eqref{eq:hiki}, we have \[ P_c=\frac{\eta(\omega^{c})G_{q^m}(\eta)}{N}\sum_{i=0}^{N-1}\chi_{N}^{-i}(\omega^{c}) = \left\{ \begin{array}{ll} \eta(\omega^{c})G_{q^m}(\eta), & \mbox{ if $c \equiv 0\,({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise.} \end{array} \right. \nonumber \] This completes the proof of the theorem. { $\square$} \sigmaspace{0.3cm} Similarly to the theorem above, we have the following. \begin{theorem}\label{thm:spoc2} With the notations above, the partition $(T_1, T_2)=({\mathbb Z}_N\setminus \{0\},\emptyset)$ of ${\mathbb Z}_N\setminus I$ satisfies the condition~\eqref{eq:3-chara2} of Remark~\ref{rem:chara} (ii). \end{theorem} Since $q^m=p^{2js}$, we have $q^m\equiv 1\,({\mathrm{mod\,\,}}{4})$. We can only apply the lifting construction of hyperbolic type. By Lemma~\ref{mainconstruction2+}, Remark~\ref{re:cha:hyp} and Theorem~\ref{thm:semi-}, we obtain the following. \begin{corollary}\label{cor-semi-} Let $N$ be odd and $q^{m}=p^{2js}$, where $p$ is a prime, $s\ge 2$, $N\,|\,(p^j+1)$, and $j$ is the smallest such positive integer. Assume that $\gcd{(N,\frac{q^m-1}{N})}=1$. Then, there exists a $(q^{2m},r(q^{m}-1),q^m+r^2-3 r,r^2- r)$ strongly regular Cayley graph, where $r=(q^m-1)/2N$. \end{corollary} \sigmaspace{0.3cm} Similarly to the corollary above, by Lemma~\ref{mainconstruction3+}, Remark~\ref{re:cha:hyp} and Theorem~\ref{thm:spoc2}, we obtain the following corollary. \begin{corollary}\label{cor-semi-2} Let $N$ be odd and $q^{m}=p^{2js}$, where $p$ is a prime, $s\ge 2$, $N\,|\,(p^j+1)$, and $j$ is the smallest such positive integer. Assume that $\gcd{(N,\frac{q^m-1}{N})}=1$. Then, there exists a $(q^{2m},r(q^{m}-1),q^m+r^2-3 r,r^2- r)$ strongly regular Cayley graph, where $r=(N-1)(q^m-1)/2N$. \end{corollary} \section{Partition of subdifference sets and their complements in sporadic case} In this section, we consider partitions of the subdifference set $I$ and its complement in the sporadic case. \begin{theorem}\label{thm:spo} Assume that $N\geq 2$ is odd, $N\;|\;\frac{q^m-1}{q-1}$, ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular and $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$, where $p$ is the characteristic of ${\mathbb F}_{q^m}$. Let $I$ be the corresponding subdifference set defined in (\ref{eq:subdi}). Then, the partition $(S_1,S_2)=(I,\emptyset)$ of $I$ satisfies the condition~\eqref{eq:3-chara} of Remark~\ref{rem:chara} (i). \end{theorem} {\sigmaspace{-0.0cm}\bf Proof: \,} Let $I'\equiv 4^{-1}I\,({\mathrm{mod\,\,}}{N})$. By the definition~\eqref{eq:defX1} of $X$, we have $X\equiv 2I'\,({\mathrm{mod\,\,}}{2N})$. Write \[ P_c:=2\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in X}C_t^{(2N,q^m)})-\psi_{{\mathbb F}_{q^m}}(\omega^c \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)}). \] Let $\chi_{N}$ be a multiplicative character of ${\mathbb F}_{q^m}$ of order $N$ and $\eta$ be the quadratic character of ${\mathbb F}_{q^m}$. Similarly to the proof of Theorem~\ref{thm:semi-}, we have \begin{align} P_c=\frac{1}{N}\sum_{i=1}^{N-1}\sum_{t\in X}G_{q^m}(\chi_{N}^i\eta)\chi_{N}^{-i}\eta(\omega^{c+t})+\frac{\eta(\omega^c)G_{q^m}(\eta)|I|}{N}. \label{eq:hiki2} \end{align} By the Davenport-Hasse product formula, we have \[ G_{q^m}(\chi_{N}^i\eta)=\frac{G_{q^m}(\chi_N^{2i})G_{q^m}(\eta)}{G_{q^m}(\chi_{N}^i)}. \] On the other hand, by \eqref{eq:sum}, for $i\not=0$ \[ \sum_{t\in X}\chi_{N}^{-i}\eta(\omega^{t})=\sum_{t\in 2I'}\chi_{N}^{-i}(\omega^{t})=\frac{G_{q^m}(\chi_N^{-2^{-1}i})}{\delta q}. \] Substituting these into \eqref{eq:hiki2}, we have \begin{equation}\label{eq:subdi2} P_c=\frac{\eta(\omega^{c})G_{q^m}(\eta)}{\delta q N}\sum_{i=1}^{N}\frac{G_{q^m}(\chi_N^{2i})G_{q^m}(\chi_N^{-2^{-1}i})}{G_{q^m}(\chi_{N}^i)}\chi_{N}^{-i}(\omega^{c})+\frac{\eta(\omega^c)G_{q^m}(\eta)|I|}{N}. \end{equation} By the assumption that $-2\in \langle p\rangle \,({\mathrm{mod\,\,}}{N})$, we have $G_{q^m}(\chi_N^{-2^{-1}i})=G_{q^m}(\chi_N^i)$. Therefore, continuing from \eqref{eq:subdi2}, we have \begin{align*} P_c=&\,\frac{\eta(\omega^{c})G_{q^m}(\eta)}{\delta q N}\sum_{i=1}^{N}G_{q^m}(\chi_N^{2i})\chi_{N}^{-i}(\omega^{c})+\frac{\eta(\omega^c)G_{q^m}(\eta)|I|}{N}\\ =&\,\frac{\eta(\omega^{c})G_{q^m}(\eta)}{N}\left(\sum_{i=1}^{N}\sum_{t\in I}\chi_N^{2i}(\omega^t)\chi_{N}^{-i}(\omega^{c})+|I|\right)\\ =&\,\frac{\eta(\omega^{c})G_{q^m}(\eta)}{N}\sum_{i=0}^{N}\sum_{t\in I}\chi_N^{2i}(\omega^t)\chi_{N}^{-i}(\omega^{c})= \left\{ \begin{array}{ll} \eta(\omega^{c})G_{q^m}(\eta), & \mbox{ if $c \in 2I\,({\mathrm{mod\,\,}}{N})$,}\\ 0, & \mbox{ otherwise.} \end{array} \right. \end{align*} Since the subdifference set $I$ is invariant under the multiplication by $p$ modulo $N$, by the assumption that $-2^{-1}\in \langle p\rangle \,({\mathrm{mod\,\,}}{N})$, the condition $c\in 2I\,({\mathrm{mod\,\,}}{N})$ is equivalent to that $c\in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$. This completes the proof of the theorem. { $\square$} \sigmaspace{0.3cm} Similarly to the theorem above, we have the following. \begin{theorem}\label{thm:spoc} Assume that $N\geq 2$ is odd, $N\;|\;\frac{q^m-1}{q-1}$, ${\mathbb C}ay({\mathbb F}_{q^m},C_0^{(N,q^m)})$ is strongly regular and $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$. Then the partition $(T_1, T_2)=({\mathbb Z}_N\setminus I,\emptyset)$ of ${\mathbb Z}_N\setminus I$ satisfies the condition~\eqref{eq:3-chara2} of Remark~\ref{rem:chara} (ii). \end{theorem} There are ten sporadic examples of cyclotomic strongly regular graphs satisfying the condition $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$. In particular, when $q^m\equiv 3\,({\mathrm{mod\,\,}}{4})$, we obtain the following result. \begin{corollary}\label{cor-spor} There exists a $(q^{2m},r(q^{m}+1),-q^m+r^2+3 r,r^2+ r)$ strongly regular Cayley graph with $r=k(q^m-1)/2N$ in each of the following cases: \[ (q^m,N,k)=(3^5,11,5),(11^7,43,21),(3^{53},107,53). \] \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} It is clear that $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$ in these cases. Then, by applying Lemma~\ref{rem:quad-}, Remark~\ref{rem:chara}~(i) and Theorem~\ref{thm:spo} to these examples, the corollary now follows. { $\square$} \sigmaspace{0.3cm} Similarly to the corollary above, by applying Lemma~\ref{rem:quad-2}, Remark~\ref{rem:chara}~(ii) and Theorem~\ref{thm:spoc} to the three sporadic cyclotomic strongly regular graphs in Corollary~\ref{cor-spor}, we obtain the following. \begin{corollary}\label{cor-spor2} There exists a $(q^{2m},r(q^{m}+1),-q^m+r^2+3 r,r^2+ r)$ strongly regular Cayley graph with $r=(N-k)(q^m-1)/2N$ in each of the following cases: \[ (q^m,N,k)=(3^5,11,5),(11^7,43,21),(3^{53},107,53). \] \end{corollary} \sigmaspace{0.3cm} In the case where $(q^m,N,k)=(7^9,37,9)$, the condition that $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$ is not satisfied. We checked by computer that there is no partition of the subdifference set $I$ satisfying the condition~\eqref{eq:3-chara}. On the other hand, we checked that there is a partition of ${\mathbb Z}_{37}\setminus I$ satisfying the condition~\eqref{eq:3-chara2}: $T_1= 2I$ and $T_2={\mathbb Z}_{37}\setminus (I\cup 2I)$. Hence, we have the following corollary. \begin{corollary}\label{cor-spor} There exists a $(q^{2m},r(q^m+1),-q^m+r^2+3 r,r^2+ r)$ strongly regular Cayley graph with $r=(N-k)(q^m-1)/2N$ in the case where $(q^m,N,k)=(7^9,37,9)$. \end{corollary} Next, we consider the case where $q^m\equiv 1\,({\mathrm{mod\,\,}}{4})$. \begin{corollary}\label{cor-spor3} There exists a $(q^{2m},r(q^m-1),q^m+r^2-3r,r^2- r)$ strongly regular Cayley graph with $r=k(q^m-1)/2N$ in each of the following cases: \begin{align*} (q^m,N,k)=&\,(3^{12},35,17),(5^9,19,9),(17^{33},67,33), (5^{18},133,33),\\ &\,(41^{81},163,81),(3^{144},323,161),(5^{249},499,249). \end{align*} \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} It is clear that $\gcd{(N,\frac{q^m-1}{N})}=1$ and $-2\in \langle p\rangle\,({\mathrm{mod\,\,}}{N})$ in these cases. Then, by applying Lemma~\ref{mainconstruction2+}, Remark~\ref{re:cha:hyp} and Theorem~\ref{thm:spo} to these examples, the corollary now follows. { $\square$} \sigmaspace{0.3cm} Similarly to the corollary above, by applying Lemma~\ref{mainconstruction3+}, Remark~\ref{re:cha:hyp} and Theorem~\ref{thm:spoc} to these examples, we obtain the following corollary. \begin{corollary}\label{cor-spor4} There exists a $(q^{2m},r(q^{m}-1),q^m+r^2-3 r,r^2- r)$ strongly regular Cayley graph with $r=(N-k)(q^m-1)/2N$ in each of the following cases: \begin{align*} (q^m,N,k)=&\,(3^{12},35,17),(5^9,19,9),(17^{33},67,33), (5^{18},133,33),\\ &\,(41^{81},163,81),(3^{144},323,161),(5^{249},499,249). \end{align*} \end{corollary} \section{Partitions of subdifference sets and their complements in the subfield case} In this section, we consider partitions of the subdifference set $I$ and its complement in subfield case. We assume that $m$ is odd and $N=\frac{q^m-1}{q-1}$. In this case, ${\mathbb C}ay({\mathbb F}_{q^{m}},C_0^{(N,q^m)})$ is strongly regular and we have \begin{equation}\label{eq:sub1} I:=\{i\,({\mathrm{mod\,\,}}{N}):{\mathrm{Tr}}r_{q^m/q}(w^i)=0\}. \end{equation} \subsection{A partition of the Singer difference set $I$ defined in (\ref{eq:sub1}) when $m=3$} In the case where $m=3$, a partition of the Singer difference set $I$ satisfying the condition~\eqref{eq:3-chara} of Remark~\ref{rem:chara}~(i) was found in \cite[Theorem~3.7]{FMX}. Regarding ${\mathbb F}_{q^3}$ as a $3$-dimensional vector space over ${\mathbb F}_q$, we use ${\mathbb F}_{q^3}$ as the underlying vector space of ${\mathrm{PG}}(2,q)$. The points of ${\mathrm{PG}}(2,q)$ are $\langle{\omega^i}\rangle$, $0\le i\le N-1$, and the lines of ${\mathrm{PG}}(2,q)$ are \begin{equation}\label{eqn_Lu} L_i:=\{\langle{x}\rangle:\,{\mathrm{Tr}}r_{q^3/q}(\omega^i x)=0\},\, \, \, \, 0\le i\le N-1. \end{equation} The Singer difference set $I$ corresponds to the typical line $L_0$. Consider a nondegenerate quadratic form $f: {\mathbb F}_{q^3}\rightarrow {\mathbb F}_q$ defined by $f(x)= \text{Tr}_{q^3/q}(x^2)$, which defines a conic $\cQ$ in ${\mathrm{PG}}(2,q)$ containing $q+1$ points. Consequently, each line $L$ of ${\mathrm{PG}}(2,q)$ meets $\cQ$ in $0$, $1$ or $2$ points. Consider the following subset of ${\mathbb Z}_N$: \begin{equation}\label{eqn_IQ} I_\cQ:=\{i\, ({\mathrm{mod\,\,}}{N}):f(\omega^{i})=0\}=\{d_0,d_1,\ldots, d_{q}\}, \end{equation} where the elements are numbered in any unspecified order. Thus, $\cQ=\{\langle{\omega^{d_i}}\rangle:\,0\le i\le q\}$. Furthermore, by the definition of $f$ and $I$, $I_\cQ\equiv 2^{-1}I\pmod{N}$. For $d_0\in I_\cQ$, define \[ {\mathcal X}:=\{\omega^{d_i}{\mathrm{Tr}}r_{q^3/q}(\omega^{d_0+d_i}):\,1\le i\le q\}\cup\{2 \omega^{d_0}\} \] and \begin{equation}\label{eqn_defX2} X:=\{\log_{\omega}(x)\, ({\mathrm{mod\,\,}}{2N}):\, x\in {\mathcal X}\}\subset {\mathbb Z}_{2N}. \end{equation} Clearly, $|X|=|I_{\mathcal Q}|$ and $X\equiv I_\cQ\pmod{N}$. If we use any other $d_i$ instead of $d_0$ in the definition of ${\mathcal X}$, then the resulting set $X'$ satisfies that $X'\equiv X\, ({\mathrm{mod\,\,}}{2N})$ or $X'\equiv X+N\, ({\mathrm{mod\,\,}}{2N})$~\cite[Lemma~3.4]{FMX}. The set $X$ can be expressed as \begin{equation}\label{eqn_defX} X=2S_1''\cup (2S_2''+N)\pmod{2N} \end{equation} for some $S_1'',S_2''\subseteq {\mathbb Z}_N$ with $|S_1''|+|S_2''|=q+1$. Define $S_i'\equiv 2S_i''\pmod{N}$ and $S_i\equiv 2S_i'\pmod{N}$ for $i=1,2$. Then, $S_1'\cup S_2'\equiv I_\cQ\pmod{N}$ and $S_1\cup S_2\equiv I\pmod{N}$, i.e., $X$ induces partitions of $I_\cQ$ and $I$, respectively. \begin{theorem}\label{thm:sub6}{\em \cite[Theorem 3.7]{FMX}} The set $X$ defined in \eqref{eqn_defX2} satisfies the condition \eqref{eq:3-chara} of Remark~\ref{rem:chara} (i). \end{theorem} As corollaries, we have the following. \begin{corollary}\label{cor:el} For a prime power $q\equiv 3\,({\mathrm{mod\,\,}}{4})$, there exists a $(q^{6},r(q^{3}+1),-q^3+r^2+3 r,r^2+ r)$ strongly regular Cayley graph, where $r=(q^2-1)/2$. \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} By Lemma~\ref{rem:quad-}, Remark~\ref{rem:chara}~(i) and Theorem~\ref{thm:sub6}, the corollary now follows. { $\square$} \sigmaspace{0.3cm} The connection set $E_1\subseteq {\mathbb F}_{q^6}$ of the strongly regular Cayley graph ${\mathbb C}ay({\mathbb F}_{q^6}, E_1)$ obtained in Corollary~\ref{cor:el} corresponds to a $\frac{(q+1)}{2}$-ovoid in an elliptic quadric ${\mathcal Q}^-(5,q)$. See \cite{BLMX}. \begin{corollary}\label{cor:hyp} For a prime power $q\equiv 5,9\,({\mathrm{mod\,\,}}{12})$, there exists a $(q^6,r(q^3-1),q^3+r^2-3r,r^2-r)$ strongly regular Cayley graph, where $r=(q^2-1)/2$. \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} It is clear that $\gcd{(N,\frac{q^3-1}{N})}=1$ if $N=q^2+q+1$ and $q\equiv 5,9\,({\mathrm{mod\,\,}}{12})$. Then, by Lemma~\ref{mainconstruction2+}, Remark~\ref{rem:chara}~(ii) and Theorem~\ref{thm:sub6}, the corollary now follows. { $\square$} \sigmaspace{0.3cm} The connection set {$H_1\subseteq {\mathbb F}_{q^3}\times {\mathbb F}_{q^3}$} of the strongly regular Cayley graph {${\mathbb C}ay({\mathbb F}_{q^3}\times {\mathbb F}_{q^3}, H_1)$} obtained in Corollary~\ref{cor:hyp} corresponds to a $\frac{(q^2-1)}{2}$-tight set in a hyperbolic quadric ${\mathcal Q}^+(5,q)$. See \cite{DDMR,FMX}. It would be interesting to find a desired partition of $I$ when $m$ is odd and $m>3$. We leave this as an open problem. \subsection{A partition of the complement of the Singer difference set with odd $m$} In this section, we consider a partition of the complement of the Singer difference set $I\,({\mathrm{mod\,\,}}{\frac{q^m-1}{q-1}})$, where $m>1$ is an arbitrary odd integer. Note that the set $2^{-1}I\,({\mathrm{mod\,\,}}{\frac{q^m-1}{q-1}})$ corresponds to a nondegenerate parabolic quadric ${\mathcal Q}(m-1,q)$ of ${\mathrm{PG}}(m-1,q)$. Let $N=\frac{q^m-1}{q-1}$ and $\omega$ be a primitive element of ${\mathbb F}_{q^m}$, where $q$ is an odd prime power and $m>1$ is an odd integer. Define \begin{align*} A=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(x^{2})= 0\},\\ A_0=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(x^{2})\in C_0^{(2,q)}\},\\ A_1=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(x^{2})\in C_1^{(2,q)}\}. \end{align*} Let $a_1\in A$, and define $H_1=\{x\in {\mathbb F}_{q^m}^\ast\mid {\mathrm{Tr}}r_{q^m/q}(a_1x)=0\}$. Note that $A$ represents a nondegenerate parabolic quadric of ${\mathrm{PG}}(m-1,q)$ and $H_1$ is a tangent hyperplane\footnote{Strictly speaking, we should say that {$H_1\cup\{0\}$ is a hyperplane.}} to $A$ at point $\langle a_1\rangle$. Thus $A\cap H_1$ is a cone of order one with vertex $\langle a_1\rangle$, and $|A\cap H_1|=q^{m-2}-1$. If $m=3$, we stop this process. Otherwise, we continue by choosing $a_2\in A\cap H_1$ such that $a_1,a_2$ are linearly independent over ${\mathbb F}_q$, and define $H_2=\{x\in {\mathbb F}_{q^m}^\ast\mid {\mathrm{Tr}}r_{q^m/q}(a_2x)=0\}$. Then $H_1\cap H_2$ is a hyperplane of $H_1$. Note that $A\cap H_1$ represents a degenerate quadric (a cone of order one) in $H_1$, and $H_1\cap H_2$ contains the vertex $\langle a_1\rangle$, we see that $A\cap H_1\cap H_2$ is a cone of order two (cf. \cite{games}), and $|A\cap H_1\cap H_2|=q^{m-3}-1$. More generally, we define \begin{align*} H_{\ell}=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(xa_\ell)= 0\}, \, \quad a_\ell\in A \cap H_{1}\cap \cdots \cap H_{\ell-1},\\ H_{\ell,0}=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(xa_\ell)\in C_0^{(2,q)}\}, \, \quad a_\ell\in A \cap H_{1}\cap \cdots \cap H_{\ell-1},\\ H_{\ell,1}=&\,\{x\in {\mathbb F}_{q^m}^\ast\,|\,{\mathrm{Tr}}r_{q^m/q}(xa_\ell)\in C_1^{(2,q)}\}, \, \quad a_\ell\in A \cap H_{1}\cap \cdots \cap H_{\ell-1}, \end{align*} where $2\le \ell \le \frac{m-1}{2}$. We can always choose $a_1,\ldots, a_{\frac{m-1}{2}}$ so that they are linearly independent over ${\mathbb F}_q$. The reason is as follows: assume that $a_1,\ldots,a_{\ell-1}$ with $2\le \ell \le \frac{m-1}{2}$ are independent; since $a_1,\ldots,a_{\ell-1} \in A\cap H_{1}\cap \cdots \cap H_{\ell-1}$ and \begin{equation}\label{eq:numnu} |A\cap H_{1}\cap \cdots \cap H_{\ell-1}|=q^{m-\ell}-1, \end{equation} there are at least $m-\ell$ independent elements in $A\cap H_{1}\cap \cdots \cap H_{\ell-1}$ including $a_1,\ldots,a_{\ell-1}$; hence, we can choose an element $a_{\ell} \in A\cap H_{1}\cap \cdots \cap H_{\ell-1}$ so that $a_1,\ldots,a_\ell$ are independent over ${\mathbb F}_q$ whenever $\ell \le \frac{m-1}{2}$. Let $b$ be a fixed element of $(H_1\cap \cdots \cap H_{\frac{m-1}{2}})\setminus A$. Since $H_1\cap \cdots \cap H_{\frac{m-1}{2}}$ and $A\cap H_1\cap \cdots \cap H_{\frac{m-1}{2}}$ correspond to a $\frac{(m-1)}{2}$-flat and a $\frac{(m-3)}{2}$-flat, respectively, in ${\mathrm{PG}}(m-1,q)$, the set $(H_1\cap \cdots \cap H_{\frac{m-1}{2}})\setminus A$ can be represented as \[ (H_1\cap \cdots \cap H_{\frac{m-1}{2}})\setminus A=\{a_1x_1+\cdots+ a_\frac{m-1}{2} x_\frac{m-1}{2}+by\,|\,x_1,\ldots,x_{\frac{m-1}{2}}\in {\mathbb F}_q, y \in {\mathbb F}_{q}^\ast\}. \] Let $T_1=(A_0\cap H_{1,0})\cup (A_1\cap H_{1,1})$ and more generally \[ T_\ell:=(A_0 \cap H_{1} \cap \cdots \cap H_{\ell-1} \cap H_{\ell,0}) \cup (A_1 \cap H_{1} \cap \cdots \cap H_{\ell-1} \cap H_{\ell,1}), \quad \, 2\le \ell \le \frac{m-1}{2}, \] and \[ B:=\{a_1x_1+\cdots+ a_\frac{m-1}{2} x_\frac{m-1}{2}+by\,|\,x_1,\ldots,x_{\frac{m-1}{2}}\in {\mathbb F}_q, y \in C_0^{(2,q)}\}. \] Finally, define \[ D:=\left(\bigcup_{\ell=1}^{(m-1)/2}T_\ell\right)\cup B. \] It is clear that \[ \omega^{N} T_\ell=(A_0 \cap H_{1} \cap \cdots \cap H_{\ell-1} \cap H_{\ell,1}) \cup (A_1 \cap H_{1} \cap \cdots \cap H_{\ell-1} \cap H_{\ell,0}) \] and \[ \omega^NB=\{a_1x_1+\cdots+ a_\frac{m-1}{2} x_\frac{m-1}{2}+by\,|\,x_1,\ldots,x_{\frac{m-1}{2}}\in {\mathbb F}_q, y \in C_1^{(2,q)}\}. \] Hence, $D\cap \omega^{N}D=\emptyset$ and $D\cup \omega^{N}D={\mathbb F}_{q^m}^\ast \setminus A$. Thus, there exists a subset $\widehat{X}\subseteq {\mathbb Z}_{2N}$ such that $D=\bigcup_{t\in \widehat{X}}C_t^{(2N,q^m)}$ and $\widehat{X}\equiv {\mathbb Z}_N\setminus 2^{-1}I \,({\mathrm{mod\,\,}}{N})$. The set $\widehat{X}$ induces a partition of the complement of $2^{-1}I\,({\mathrm{mod\,\,}}{N})$. \begin{theorem}\label{thm:coSing} The set $\widehat{X}$ defined above satisfies the condition~\eqref{eq:3-chara2} of Remark~\ref{rem:chara} (ii). \end{theorem} To prove this theorem, we need the following lemmas. \begin{lemma}\label{lem:coSing} It holds that \[ \psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell)= \left\{ \begin{array}{ll} {\frac{(-1)^{i+\epsilon \frac{m-1}{2}}q^{\frac{m-1}{2}}(-1+(-1)^{j+\tau} G_q(\eta'))}{2},} & \mbox{ if $\omega^a \in A_i\cap H_1\cap \cdots \cap H_{\ell-1}\cap H_{\ell,j}$, $i,j=0,1,$ }\\ -\frac{q^{m-\ell-1}(q-1)}{2}, & \mbox{ if $\omega^a \in \langle a_1,\ldots,a_\ell\rangle\setminus \langle a_1,\ldots,a_{\ell-1}\rangle$, }\\ \frac{q^{m-\ell-1}(q-1)^2}{2}, & \mbox{ if $\omega^a \in \langle a_1,\ldots,a_{\ell-1}\rangle$, }\\ 0, & \mbox{ otherwise, } \end{array} \right. \] where $\eta'$ is the quadratic character of ${\mathbb F}_q$ and $\epsilon=0$ or $1$ according as $q\equiv 1\,({\mathrm{mod\,\,}}{4})$ or $q\equiv 3\,({\mathrm{mod\,\,}}{4})$. {Furthermore, $\tau$ is defined by $2\in C_\tau^{(2,q)}$.} \end{lemma} The proof of this lemma is complicated. Therefore, we postpone the proof to the Appendix. \begin{lemma}\label{lem:coSing2} It holds that \begin{align*} \psi_{{\mathbb F}_{q^m}}(\omega^a B)= \left\{ \begin{array}{ll} q^{\frac{m-1}{2}}\frac{(-1+G_q(\eta'))}{2}, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{a}b)\in C_0^{(2,q)}$, $\omega^a \in (H_1\cap \cdots \cap H_\frac{m-1}{2})\setminus A$, }\\ q^{\frac{m-1}{2}}\frac{(-1-G_q(\eta'))}{2}, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{a}b)\in C_1^{(2,q)}$, $\omega^a \in (H_1\cap \cdots \cap H_\frac{m-1}{2})\setminus A$, }\\ \frac{q^{\frac{m-1}{2}}(q-1)}{2}, & \mbox{ if $\omega^a \in A \cap H_1\cap \cdots \cap H_\frac{m-1}{2}$,} \\ 0, & \mbox{ otherwise.} \end{array} \right. \end{align*} \end{lemma} {\sigmaspace{-0.0cm}\bf Proof: \,} We compute the character values of $B$: \begin{align*} \psi_{{\mathbb F}_{q^m}}(\omega^a B)=&\,\sum_{x_1,\ldots,x_{\frac{m-1}{2}}\in {\mathbb F}_q} \sum_{y\in C_0^{(2,q)}} \psi_{{\mathbb F}_{q^m}}(\omega^a a_1x_1)\cdots \psi_{{\mathbb F}_{q^m}}(\omega^a a_\frac{m-1}{2}x_\frac{m-1}{2})\psi_{{\mathbb F}_{q^m}}(\omega^a b y)\\ =&\,\left(\prod_{i=1}^{\frac{m-1}{2}}\sum_{x_i\in {\mathbb F}_q} \psi_{{\mathbb F}_{q}}({\mathrm{Tr}}r_{q^m/q}(\omega^a a_i)x_i)\right)\left(\sum_{y\in C_0^{(2,q)}}\psi_{{\mathbb F}_q}({\mathrm{Tr}}r_{q^m/q}(\omega^a b) y)\right). \end{align*} If ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_i)\not=0$ for some $i=1,\ldots,\frac{m-1}{2}$, then it is clear that $\psi_{{\mathbb F}_{q^m}}(\omega^a B)=0$. Otherwise, we have \begin{align*} \psi_{{\mathbb F}_{q^m}}(\omega^a B)=&\, q^{\frac{m-1}{2}}\sum_{y\in C_0^{(2,q)}}\psi_{{\mathbb F}_q}({\mathrm{Tr}}r_{q^m/q}(\omega^a b) y)\\ =&\, \left\{ \begin{array}{ll} \frac{q^{\frac{m-1}{2}}(-1+G_q(\eta'))}{2}, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{a}b)\in C_0^{(2,q)}$, }\\ \frac{q^{\frac{m-1}{2}}(-1-G_q(\eta'))}{2}, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{a}b)\in C_1^{(2,q)}$, }\\ \frac{q^{\frac{m-1}{2}}(q-1)}{2}, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{a}b)=0$.} \end{array} \right. \end{align*} Since ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_1)=\cdots ={\mathrm{Tr}}r_{q^m/q}(\omega^a a_\frac{m-1}{2})={\mathrm{Tr}}r_{q^m/q}(\omega^a b)=0$ if and only if $\omega^a \in A \cap H_1\cap \cdots \cap H_\frac{m-1}{2}$, the assertion of the lemma follows. { $\square$} \sigmaspace{0.3cm} We are now ready to prove Theorem~\ref{thm:coSing}. {\bf Proof of Theorem~\ref{thm:coSing}:} \, From Lemmas~\ref{lem:coSing} and \ref{lem:coSing2}, we have \begin{align*} \psi_{{\mathbb F}_{q^m}}(\omega^a\bigcup_{t\in \widehat{X}}C_t^{(2N,q^m)})=&\,\psi_{{\mathbb F}_{q^m}}(\omega^a D)=\sum_{i=1}^{\frac{m-1}{2}}\psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell) +\psi_{{\mathbb F}_{q^m}}(\omega^a B)\\ =&\, \left\{ \begin{array}{ll} {\frac{(-1)^{i+\epsilon \frac{m-1}{2}}q^{\frac{m-1}{2}}(-1+(-1)^{j+\tau} G_q(\eta'))}{2},}, & \mbox{ if $\omega^a \in A_i\cap H_1\cap \cdots \cap H_{\ell-1}\cap H_{\ell,j}$ }\\ & \mbox{ \hspace{1cm}for $\ell=1,\ldots,\frac{m-1}{2}$, $i,j=0,1$, }\\ \frac{q^{\frac{m-1}{2}}(-1+(-1)^i G_q(\eta'))}{2}, & \mbox{ if $\omega^a\in (H_1\cap \cdots \cap H_{\frac{m-1}{2}})\setminus A$ and }\\ & \mbox{ \hspace{1cm}${\mathrm{Tr}}r_{q^m/q}(\omega^a b)\in C_i^{(2,q)}$ for $i=0,1,$}\\ 0, & \mbox{ if $\omega^a \in A$. } \end{array} \right. \end{align*} On the other hand, since $\psi_{{\mathbb F}_{q^m}}(\omega^a \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)})=\psi_{{\mathbb F}_{q^m}}(\omega^a D)+\psi_{{\mathbb F}_{q^m}}(\omega^{a+N} D)$, we have \[ \psi_{{\mathbb F}_{q^m}}(\omega^a \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)})\\ = \left\{ \begin{array}{ll} -(-1)^{i+\epsilon \frac{m-1}{2}}q^{\frac{m-1}{2}}, & \mbox{ if $\omega^a \in A_i\setminus (H_1\cap \cdots \cap H_{\frac{m-1}{2}})$, $i=0,1,$}\\ -q^{\frac{m-1}{2}}, & \mbox{ if $\omega^a\in (H_1\cap \cdots \cap H_{\frac{m-1}{2}})\setminus A$.}\\ 0, & \mbox{ if $\omega^a \in A$.} \end{array} \right. \] Hence, we have \begin{equation*} 2\psi_{{\mathbb F}_{q^m}}(\omega^a\bigcup_{t\in \widehat{X}}C_t^{(2N,q^m)})-\psi_{{\mathbb F}_{q^m}}(\omega^a \bigcup_{t\in 2^{-1}I}C_t^{(N,q^m)})= \left\{ \begin{array}{ll} 0, & \mbox{ if $a\in 2^{-1}I\,({\mathrm{mod\,\,}}{N})$,}\\ \pm q^{\frac{m-1}{2}}G_q(\eta'), & \mbox{ otherwise.} \end{array} \right. \end{equation*} Thus, we conclude that $\widehat{X}$ satisfies the condition \eqref{eq:3-chara2} in Remark~\ref{rem:chara} (ii). { $\square$} \sigmaspace{0.3cm} As corollaries, we obtain the following. \begin{corollary}\label{cor:aff1} For a prime power $q\equiv 3\,({\mathrm{mod\,\,}}{4})$ and an odd integer $m>1$, there exists a $(q^{2m},r(q^{m}+1),-q^m+r^2+3 r,r^2+ r)$ strongly regular Cayley graph with $r=q^{m-1}(q-1)/2$. \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} By Lemma~\ref{rem:quad-}, Remark~\ref{rem:chara}~(i) and Theorem~\ref{thm:coSing}, the corollary now follows. { $\square$} \begin{remark} {\em The strongly regular graph obtained in Corollary~\ref{cor:aff1} has the same parameter as the affine polar graph of elliptic type. Let ${\mathcal{G}}amma $ be the strongly regular graph of Corollary~\ref{cor:aff1} with $q=3$ and $m=3$. We checked by using a computer that ${\mathcal{G}}amma$ is {\bf not} isomorphic to the affine polar graph ${\mathrm{AP}}^-$ with the same parameters. In particular, the size of the full automorphism group of ${\mathcal{G}}amma$ (resp. ${\mathrm{AP}}^-$) is $2^2\cdot 3^7 \cdot 7$ (resp. $2^{10}\cdot 3^{12} \cdot 5\cdot 7$). } \end{remark} \begin{corollary}\label{cor:aff2} For a prime power $q\equiv 1\,({\mathrm{mod\,\,}}{4})$ and an odd integer $m>1$ such that $\gcd{(q-1,\frac{q^m-1}{q-1})}=1$, there exists a $(q^{2m},r(q^m-1),q^m+r^2-3r,r^2-r)$ strongly regular Cayley graph, where $r=q^{m-1}(q-1)/2$. \end{corollary} {\sigmaspace{-0.0cm}\bf Proof: \,} By Lemma~\ref{mainconstruction2+}, Remark~\ref{rem:chara}~(ii) and Theorem~\ref{thm:coSing}, the corollary now follows. { $\square$} \begin{remark} {\em The strongly regular graph obtained in Corollary~\ref{cor:aff2} has the same parameters as the affine polar graph of hyperbolic type. Let ${\mathcal{G}}amma$ be the strongly regular graph of Corollary~\ref{cor:aff2} with $q=5$ and $m=3$. We checked by using a computer that ${\mathcal{G}}amma$ is {\bf not} isomorphic to the affine polar graph ${\mathrm{AP}}^+$ with the same parameters. In particular, the size of the full automorphism group of ${\mathcal{G}}amma$ (resp. ${\mathrm{AP}}^+$) is $2^3\cdot 5^6 \cdot 31$ (resp. $2^{11} \cdot 3^2\cdot 5^{12}\cdot 13 \cdot 31$). } \end{remark} \section*{Appendix: Proof of Lemma~\ref{lem:coSing}} In this appendix, we give a proof of Lemma~\ref{lem:coSing}. \sigmaspace{0.1in} \noindent{\bf Proof of Lemma~\ref{lem:coSing}.} \, For $j=0$ or $1$, the characteristic function $g_{A,j}$ of $\{x\in {\mathbb F}_{q^m}\,|\,{\mathrm{Tr}}r_{q^m/q}(x^2)\in C_j^{(2,q)}\}$ is given by \begin{equation}\label{eq:ch1} g_{A,j}(x)=\frac{1}{q}\sum_{d\in {\mathbb F}_q}\sum_{s\in C_j^{(2,q)}}\psi_{{\mathbb F}_{q^m}}(dx^2)\psi_{{\mathbb F}_{q}}(-ds). \end{equation} Similarly, the characteristic functions $g_{a_\ell}$ and $g_{a_\ell,j}$ of $\{x\,|\,{\mathrm{Tr}}r_{q^m/q}(xa_\ell)=0\}$ and $\{x\,|\,{\mathrm{Tr}}r_{q^m/q}(xa_\ell)\in C_j^{(2,q)}\}$ are, respectively, given by \begin{equation}\label{eq:ch2} g_{a_{\ell}}(x)=\frac{1}{q}\sum_{d\in {\mathbb F}_q}\psi_{{\mathbb F}_{q^m}}(dxa_\ell) \end{equation} and \begin{equation}\label{eq:ch3} g_{a_{\ell},j}(x)=\frac{1}{q}\sum_{d\in {\mathbb F}_q}\sum_{s\in C_j^{(2,q)}}\psi_{{\mathbb F}_{q^m}}(dxa_\ell)\psi_{{\mathbb F}_{q}}(-ds), \, \quad j=0,1. \end{equation} We compute the character values $\psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell)$. By the definition of $T_\ell$, we have \begin{equation}\label{eq:cha4} \psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell)=\sum_{j=0,1}\sum_{x\in {\mathbb F}_{q^m}}g_{A,j}(x)g_{a_1}(x)\cdots g_{a_{\ell-1}}(x)g_{a_{\ell},j}(x)\psi_{{\mathbb F}_{q^m}}(\omega^a x) \end{equation} By substituting \eqref{eq:ch1}, \eqref{eq:ch2} and \eqref{eq:ch3} into \eqref{eq:cha4}, we have \begin{equation} \psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell) =\frac{1}{q^{\ell+1}}\sum_{x\in {\mathbb F}_{q^m}}\sum_{j=0,1}\sum_{d_0,d_1,\ldots,d_{\ell}\in {\mathbb F}_q} \psi_{{\mathbb F}_{q^m}}(d_0x^{2}+(\omega^a+\sum_{i=1}^\ell d_ia_i)x) \psi_{{\mathbb F}_q}(d_0 C_j^{(2,q)})\psi_{{\mathbb F}_q}(d_{\ell} C_j^{(2,q)}). \label{eq:compe} \end{equation} We compute the right hand side of \eqref{eq:compe} by dividing into the two partial sums: $\Sigma_1$ and $\Sigma_2$, where $\Sigma_1$ is the contribution of the summands with $d_0=0$ and $\Sigma_{2}$ is the contribution of the summands with $d_0\not=0$. Thus, $\psi_{{\mathbb F}_{q^m}}(\omega^a T_\ell)=\Sigma_1+\Sigma_{2}$. It is clear that \begin{align*} \Sigma_1=&\,\frac{q-1}{2q^{\ell+1}}\sum_{x\in {\mathbb F}_{q^m}}\sum_{j=0,1}\sum_{d_1,\ldots,d_{\ell}\in {\mathbb F}_q} \psi_{{\mathbb F}_{q^m}}((\omega^a+\sum_{i=1}^\ell d_ia_i)x) \psi_{{\mathbb F}_q}(d_{\ell} C_j^{(2,q)})\\ =&\,\left\{ \begin{array}{ll} \frac{-q^{m-\ell -1}(q-1)}{2}, & \mbox{ if $\omega^a\in \langle a_1,\ldots,a_\ell\rangle\setminus \langle a_1,\ldots,a_{\ell-1}\rangle$,}\\ \frac{q^{m-\ell-1}(q-1)^2}{2}, & \mbox{ if $\omega^a\in \langle a_1,\ldots,a_{\ell-1}\rangle$,}\\ 0, & \mbox{ otherwise.} \end{array} \right. \end{align*} We next consider the partial sum $\Sigma_{2}$. By Theorem~\ref{prop:charaadd}, \begin{equation}\label{eq:outin} \Sigma_{2}=\frac{G_{q^m}(\eta)}{q^{\ell+1}}\sum_{j=0,1}\sum_{d_0\in {\mathbb F}_q^\ast}\sum_{d_1,\ldots,d_{\ell}\in {\mathbb F}_q} \psi_{{\mathbb F}_{q^m}}(-4^{-1}d_0^{-1}(\omega^a+\sum_{i=1}^\ell d_ia_i)^2)\eta(d_0) \psi_{{\mathbb F}_q}(d_0 C_j^{(2,q)}) \psi_{{\mathbb F}_q}(d_{\ell} C_j^{(2,q)}), \end{equation} where $\eta$ is the quadratic character of ${\mathbb F}_{q^m}$. Since ${\mathrm{Tr}}r_{q^m/q}(a_{i}a_j)=0$ for $i,j\in \{1,\ldots,\ell\}$, we have \[ {\mathrm{Tr}}r_{q^m/q}((\omega^a+\sum_{i=1}^\ell d_ia_i)^2)={\mathrm{Tr}}r_{q^m/q}(\omega^{2a}+ {2}\omega^a\sum_{i=1}^\ell d_ia_i). \] Continuing from \eqref{eq:outin}, we have \[ \Sigma_{2}=\frac{G_{q^m}(\eta)}{q^{\ell+1}}\sum_{j=0,1}\sum_{h=0,1}(-1)^h\psi_{{\mathbb F}_q}(C_{j+h}^{(2,q)})\sum_{d_1,\ldots,d_{\ell}\in {\mathbb F}_q} \psi_{{\mathbb F}_{q}}(-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a}+ {2}\omega^a\sum_{i=1}^\ell d_ia_i)C_h^{(2,q)}) \psi_{{\mathbb F}_q}(d_{\ell} C_j^{(2,q)}). \] If ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_i)\not=0$ for some $i=1,2,\ldots,\ell-1$, it is clear that $\Sigma_{2}=0$. Furthermore, if ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)=0$, it also holds that $\Sigma_{2}=0$. Thus, we assume that ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_i)=0$ for all $i =1,2,\ldots,\ell-1$ and ${\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)\not=0$. In this case, we have \begin{equation}\label{eq:P2} \Sigma_{2}=\frac{G_{q^m}(\eta)}{q^2}\sum_{j=0,1}\sum_{h=0,1}(-1)^h\psi_{{\mathbb F}_q}(C_{j+h}^{(2,q)})\sum_{d_{\ell}\in {\mathbb F}_q} \psi_{{\mathbb F}_{q}}(-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a}+ {2}\omega^ad_\ell a_\ell)C_h^{(2,q)}) \psi_{{\mathbb F}_q}(d_{\ell} C_j^{(2,q)}). \end{equation} We compute the right hand side of \eqref{eq:P2} by dividing into the two partial sums: $\Sigma_{2.0}$ and $\Sigma_{2,1}$, where $\Sigma_{2,0}$ is the contribution of the summands with $d_\ell=0$ and $\Sigma_{2,1}$ is the contribution of the summands with $d_\ell\not=0$. Thus, $\Sigma_2=\Sigma_{2,0}+\Sigma_{2,1}$. By \eqref{eq:Gaussquad}, we have \begin{align*} \Sigma_{2,0}=&\,\frac{(q-1)G_{q^m}(\eta)}{2q^2}\sum_{j=0,1}\sum_{h=0,1}(-1)^h\psi_{{\mathbb F}_q}(C_{j+h}^{(2,q)}) \psi_{{\mathbb F}_{q}}(-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})C_h^{(2,q)})\\ =&\, -\frac{(q-1)G_{q^m}(\eta)}{2q^2}\left\{ \begin{array}{ll} G_q(\eta'), & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_0^{(2,q)}$,}\\ -G_q(\eta'), & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_1^{(2,q)}$,}\\ 0, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{2a})=0$,} \end{array} \right. \end{align*} where $\eta'$ is the quadratic character of ${\mathbb F}_q$. On the other hand, by \eqref{eq:Gaussquad}, \begin{align*} \Sigma_{2,1} =&\,\frac{G_{q^m}(\eta)}{q^2}\sum_{j,h,k=0,1}(-1)^h \psi_{{\mathbb F}_q}(C_{j+h}^{(2,q)}) \psi_{{\mathbb F}_{q}}(-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a}) {C_h^{(2,q)}})\psi_{{\mathbb F}_q}(-2{\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)C_{h+k}^{(2,q)}) \psi_{{\mathbb F}_q}(C_{j+k}^{(2,q)})\\ =&\,\frac{G_{q^m}(\eta)}{q^2}\left\{ \begin{array}{ll} \frac{G_{q}(\eta')(-1+G_q(\eta')^3)}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_0^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^aa_\ell)\in C_0^{(2,q)}$,}\\ \frac{-G_{q}(\eta')(1+G_q(\eta')^3)}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_0^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)\in C_1^{(2,q)}$,}\\ \frac{-G_{q}(\eta')(-1+G_q(\eta')^3)}{2}, & \mbox{ if $- {\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_1^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^aa_\ell)\in C_0^{(2,q)}$,}\\ \frac{G_{q}(\eta')(1+G_q(\eta')^3)}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_1^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)\in C_1^{(2,q)}$,}\\ 0, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{2a})=0$.} \end{array} \right. \end{align*} Noting that $G_{q^m}(\eta)=G_q(\eta')^m$ and $G_{q}(\eta')^2=(-1)^\epsilon q$, we have \begin{align*} \Sigma_2=&\,\Sigma_{2,0}+\Sigma_{2,1}\\ =&\,(-1)^{\epsilon \frac{m+1}{2}}q^{\frac{m-1}{2}}\left\{ \begin{array}{ll} \frac{-1+(-1)^\epsilon G_q(\eta')}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_0^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^a a_\ell)\in C_0^{(2,q)}$,}\\ \frac{-1-(-1)^\epsilon G_q(\eta')}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_0^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^aa_\ell)\in C_1^{(2,q)}$,}\\ \frac{1-(-1)^\epsilon G_q(\eta')}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_1^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^aa_\ell)\in C_0^{(2,q)}$,}\\ \frac{1+(-1)^\epsilon G_q(\eta')}{2}, & \mbox{ if $-{\mathrm{Tr}}r_{q^m/q}(\omega^{2a})\in C_1^{(2,q)}$, $-2{\mathrm{Tr}}r_{q^m/q}(\omega^{a}a_\ell)\in C_1^{(2,q)}$,}\\ 0, & \mbox{ if ${\mathrm{Tr}}r_{q^m/q}(\omega^{2a})=0$.} \end{array} \right. \end{align*} This completes the proof of the lemma. { $\square$} \end{document}
\begin{document} \title{$SU(1,1)$ covariant $s$-parametrized maps} \author{Andrei~B~Klimov$^{1,*}$, Ulrich~Seyfarth$^{2}$, Hubert~de Guise$^{3}$ and Luis~L~S\'{a}nchez-Soto$^{2,4}$} \begin{abstract} We propose a practical recipe to compute the ${s}$-parametrized maps for systems with $SU(1,1)$ symmetry using a connection between the ${Q}$ and ${P} $ symbols through the action of an operator invariant under the group. The particular case of the self-dual (Wigner) phase-space functions, defined on the upper sheet of the two-sheet hyperboloid (or, equivalently, inside the Poincar\'{e} disc) are analyzed. \end{abstract} \address{$^{1}$ Departamento de F\'{\i}sica, Universidad de Guadalajara, 44420~Guadalajara, Jalisco, Mexico} \address{$^{2}$ Max-Planck-Institut f\"{u}r die Physik des Lichts, Staudtstra\ss e 2, 91058~Erlangen, Germany} \address{$^{3}$ Department of Physics, Lakehead University, Thunder Bay, Ontario P7B 5E1, Canada} \address{$^{4}$ Departamento de \'{O}ptica, Facultad de F\'{\i}sica, Universidad Complutense, 28040~Madrid, Spain} \ead{[email protected]} \begin{indented} \item[\today] \end{indented} \noindent\textit{Keywords}: $SU(1,1)$, Wigner function, Phase-space methods \submitto{\JPA} \eqnobysec \section{Introduction} Phase-space approaches often unveil hidden facets of quantum systems and shed light on their underlying kinematical and dynamical properties~\cite{Hillery:1984aa,Lee:1995aa,Schroek:1996aa,Ozorio:1998aa,Schleich:2001aa,QMPS:2005aa,Polkovnikov:2010aa,Weinbub:2018aa}. This type of analysis is now common in many areas, especially for systems with Heisenberg-Weyl~\cite{Glauber:1963aa,Sudarshan:1963aa,Agarwal:1968aa,Cahill:1969aa,Agarwal:1970aa,Gadella:1995aa} or ${SU}(2)$ symmetries \cite{Agarwal:1981aa,Varilly:1989aa}, and has been extended to other dynamical groups \sugg{such as} ${SU}(N)$~\cite{Klimov:2010aa,Tilma:2016aa} or ${E}(2)$~\cite{Gadella:1991aa,Nieto:1998cr,Plebanski:2000fk,Kastrup:2006cr,Rigas:2011by,Kastrup:2016aa}. Following the pioneering work of Moyal~\cite{Moyal:1949aa}, \sugg{Groenewold~\cite{Groenewold:1946aa}} and Stratonovich~\cite{Stratonovich:1956aa}, the states of a quantum system in the Hilbert space $\mathcal{H}$ that carries an irreducible representation (irrep) $\Lambda $ of a dynamical group $G$ can be mapped into functions of a classical phase-space $\mathcal{M}$, wherein $G$ acts transitively. The structure of the manifold $\mathcal{M}$ is closely related to a set of coherent states $\{|\zeta \rangle \}$ labelled with phase-space coordinates $\zeta \in \mathcal{M}$~\cite{Onofri:1975aa}. When coherent states can be constructed as translates of a fixed cyclic vector \cite{Perelomov:1986ly,Zhang:1990aa,Gazeau:2009aa} two mutually dual maps are naturally defined: they put in correspondence each operator $\hat{A}$ acting in the Hilbert space of the quantum system, with the so-called ${Q}$ and ${P}$ symbols, respectively, defined as~\cite{Husimi:1940aa,Kano:1965aa,Berezin:1975mw} \begin{equation} {Q}_{A}(\zeta )=\langle \zeta |\hat{A}|\zeta \rangle \,,\qquad \qquad \hat{A} =\int \rmd \mu (\zeta )\;P_{A}(\zeta )\;|\zeta \rangle \langle \zeta |\,, \label{QP} \end{equation} where $d\mu (\zeta )$ is the normalized invariant measure on $\mathcal{M}$. These symbols allow the computation of average values as a convolution \begin{equation} \Tr(\hat{A}\hat{\varrho})=\int \rmd\mu (\zeta )\,P_{A}(\zeta )Q_{\varrho }(\zeta )\,, \label{av QP} \end{equation} with $\hat{\varrho}$ the density operator for the system. \sugg{In theory, $Q$- and $P$-maps are both exact and contain complete information about the system. In practice, however, they are not always suitable for the analysis of quantum correlations. In particular, the $P$-symbols may become singular, whereas the $Q$-symbols are too smooth and do not exhibit the full quantum interference pattern. Moreover, in the semiclassical limit, the description of the dynamics in terms of the $P$- and $Q$-functions is not always appropriate: the corrections are of first order in the expansion parameter (whose form is dictated by the symmetry of the system), which may lead to a considerable reduction of the timescale over which the semiclassical approximation is valid.} \sugg{The Wigner map, $\hat{A} \leftrightarrow W_{A}(\zeta )$, is free of these difficulties. It satisfies} \begin{equation} \Tr(\hat{A} \hat{\varrho}) = \int \rmd\mu (\zeta )\; W_{A}(\zeta )\,W_{\varrho}(\zeta ) \,. \label{sdW} \end{equation} The Wigner symbol of the density matrix (the so-called Wigner function) is not singular (for physical states), and has been shown to be very useful for analysis of the quantum states both in the deep quantum and semiclassical limits~\cite{Klimov:2017aa,Valtierra:2017aa}. More generally one can introduce a \sugg{parametrized family} of trace-like maps generated by kernels $\hat{w}^{(s)}\left( \zeta \right) $ \begin{equation} {W}_{A}^{(s)}(\zeta )=\Tr [ \hat{A} \, \hat{w}^{(s)}( \zeta ) ]\,, \label{Ws} \end{equation} where the parameter $s$ has an explicit interpretation in terms of ordering for the Heisenberg-Weyl algebra, with $\pm 1$, $0$ associated with ${P}$-, ${Q}$- and Wigner maps respectively~\cite{Cahill:1969aa}. \sugg{The same kind of mapping exists for higher symmetries, albeit the parameter $s$ is basically considered as a \emph{duality} parameter, in the sense that the average values are computed by integrating $s$- and $-s$symbols of the observable and the density matrix; that is,} \begin{equation} \langle \hat{A}\rangle =\int \rmd\mu (\zeta ) \; W_{A}^{(s)}(\zeta )\, W_{\varrho}^{(-s)}(\zeta ) = \int \rmd\mu (\zeta )\; W_{A}^{(-s)}(\zeta )\,W_{\varrho}^{(s)}(\zeta ) \, . \label{sW} \end{equation} \sugg{The Wigner function corresponds to $s=0$, so it is self-dual dual in this context. Unfortunately, the explicit construction of $s$-ordered maps and, especially, of the Wigner map is not as transparent as for the ${Q}$ and ${P}$ maps.} When the group $G$ is compact, its \sugg{unitary representations are finite dimensional} and the kernels $\hat{w}^{(s)}$ can be expanded in a basis of tensor operators $\{\hat{T}_{\nu}^{\lambda}\}$~\cite{Fano:1959ly} \begin{equation} \hat{w}^{(s)}(\zeta ) = \sum_{\lambda ,\nu} w_{\lambda \nu}^{(s)}(\zeta ) \, \hat{T}_{\nu}^{\lambda} \, , \end{equation} where $\lambda $ is a representation label appearing in the decomposition \begin{equation} \Lambda \otimes \Lambda^{\ast} = \oplus n_{\lambda} \lambda \, , \label{eq:LambdaLambdastar} \end{equation} \sugg{where $n_{\lambda}$ is the number of times the irrep $\lambda$ appears in the decomposition and} the expansion coefficients $w_{\lambda \nu}^{(s)}(\zeta )$ can be expressed in terms of harmonic functions and appropriate Clebsch-Gordan coefficients~\cite{Brif:1999kx}. When the Hilbert space of states is infinite-dimensional, delicate questions of convergence must be given careful attention, especially as the maps involve traces over infinitely many basis states of products of operators that can be formally represented by infinite-dimensional matrices. In particular, the decomposition of the product on the left hand side of (\mathop{\mathrm{Re}} \nolimitsf{eq:LambdaLambdastar}) is non longer a direct sum but can include a direct integral of representations of the continuous type~\cite{Lindblad:1970aa,Repka:1978aa} making the construction of the irreducible tensor operators significantly more laborious and quite nontrivial~\cite{Holman:1966aa,Wang:1970aa}. In the cases of locally flat classical phase-space corresponding to, e.g., the underlying $H(1)$ and $E(2)$ symmetries, sets of $s$-ordered map can be constructed ``by hand", in order to satisfy the basic requirements of normalization, invertibility and covariance under group action. Except for the previous examples of noncompact symmetries and to the best of our knowledge, no self-dual maps from operators acting irreducibly in an infinite-dimensional Hilbert space into Wigner-like functions satisfying the Moyal-Stratanovich postulates have been discussed in details, even if applications of $SU(1,1)$ $Q$- and $P$- functions were discussed in \cite{Orowski:1990aa,Brif:1997aa,Kastrup:2003aa,Seyfarth:2020aa}. In this paper we remedy this situation: we present practical expressions for the ${s}$-ordered Wigner functions of systems with ${SU}(1,1)$ symmetry using a connection between the ${Q}$ and ${P}$ maps through the action of an operator invariant under the group. Notably, a self-dual mapping kernel is obtained as a ``half-way" operator between $\hat{w}^{(+)}$ and $\hat{w}^{(-)} $~\cite{Figueroa:1990aa}. The phase-space functions are defined on the upper sheet of the two-sheet hyperboloid or equivalently in the interior of the Poincar\'{e} disc. Beyond this solution to the technical problem of constructing ${SU}(1,1)$ Wigner functions, there are several reasons to investigate ${SU}(1,1)$ states in phase-space: ${SU}(1,1)$ plays a pivotal role in connection with what can be called two-photon effects~\cite{Wodkiewicz:1985aa,Gerry:1985aa,Gerry:1991aa,Gerry:1995kq}. The topic is experiencing a revival in popularity due to the recent realization of a nonlinear SU(1,1) interferometer~\cite{Jing:2011aa, Hudelist:2014aa}. According to the proposal of Yurke \textit{et al.}~\cite{Yurke:1986yg}, this device would allow one to improve the phase measurement sensitivity in a remarkable manner~\cite{Chekhova:2016aa,Li:2016aa}. In addition, the dynamics of such states strongly depends on the distinct possible plane sections of the hyperboloid \cite{Banerji:1999aa}. \section{General setup for ${SU}(1,1)$} \subsection{Coherent states and the coset space ${SU}(1,1)/U(1)$} The Lie algebra $\mathfrak{su}(1,1)$ is spanned by the operators $\{\hat{K}_0,\hat{ K}_1,\hat{K}_2\}$ with commutation relations \begin{equation} [\hat{K}_1,\hat{K}_2] = - \rmi \hat{K}_0\, , \qquad [ \hat{K}_2,\hat{K}_0] = + \rmi \hat{K}_1 \, , \qquad [ \hat{K}_0,\hat{K}_1] = + \rmi \hat{K}_2\, . \end{equation} We consider first a Hilbert space $\mathcal{H}$ that carries an irrep labelled by the Bargman index $k=\frac{1}{2},1,\frac{3}{2},2,\ldots $ of the group $G={SU}(1,1)$; the representation $k$ is in the positive discrete series. This explicitly excludes the single-mode even and odd harmonic oscillator states, which belong to the $k=\frac{1}{4}$ and $\frac{3}{4}$ irreps, respectively. \sugg{States in the irrep $k$ satisfy} \begin{equation} \hat{K}_{0}|k,k+m\rangle = (k+m)|k,k+m\rangle \,, \qquad \hat{K}_{-}|k,k\rangle = 0 \,, \label{K0action} \end{equation} \sugg{where $m=0,1,\ldots$ and $\hat{K}_{\pm}=\pm \rmi(\hat{K}_{1}\pm \rmi\hat{K}_{2})$.} Let $H \subset G$ be the $U(1)$ subgroup of $G$ that leaves $|k,k\rangle $ invariant, up to a phase; $H$ is generated by exponentiating $\hat{K}_{0}$. The ${SU}(1,1)$ coherent states for the positive discrete series are labelled by points $\zeta $ in the interior of the Poincar\'{e} disc, $|\zeta |<1$, $\{|\zeta \rangle \in \mathcal{H},\zeta \in \mathcal{M=}{SU}(1,1)/U(1)\}$ and constructed as orbits of the cyclic vector $|k,k\rangle $~\cite{Perelomov:1986ly}, \begin{equation} |\zeta \rangle = \hat{D}(\zeta )|k,k\rangle , \qquad \hat{D}(\zeta )= \rme^{\zeta \hat{K}_{+}} \rme^{-\ln (1-|\zeta |^{2})\hat{K}_{0}} \rme^{-\zeta ^{\ast}\hat{K}_{-}}\,. \label{CS} \end{equation} The unit disc can be lifted to the upper sheet of the two-sheeted hyperboloid by inverse stereographic map; this hyperboloid is our classical phase space, where points are parametrized by the hyperbolic Bloch vector \begin{equation} \mathbf{n}= (\cosh \tau ,\sinh \tau \cos \phi ,\sinh \tau \sin \phi )^{\top} \,, \label{BV} \end{equation} and where $\tau $ and $\phi $ are related to the complex number $\zeta $ through $\zeta =\tanh (\tau /2 ) \rme^{-\rmi\phi}$. \sugg{The symplectic 2-form on the hyperboloid~\cite{Perelomov:1986ly}} \begin{equation} \rmd \omega = \sinh \tau \, \rmd\tau \wedge \rmd\phi , \end{equation} \sugg{induces the following Poisson bracket} \begin{equation} \{ f,g \} = \frac{1}{\sinh \tau} \left( \frac{\partial f}{\partial \tau} \frac{\partial g}{\partial \varphi} - \frac{\partial f}{\partial \varphi} \frac{\partial g}{\partial \tau}\right) \, , \label{PB} \end{equation} \sugg{where $f(\tau ,\phi )$ and $g(\tau ,\phi )$ are smooth functions. In particular, the components $\mathbf{n}=(n_{0},n_{1},n_{2})^{\top}$ of the Bloch vector (\mathop{\mathrm{Re}} \nolimitsf{BV}) satisfy the relations} \begin{equation} \{n_{1},n_{2}\} = - n_{0} \, , \qquad \{n_{2},n_{0}\} = n_{1} \, \qquad \{n_{0}, n_{1}\} = n_{2} \, . \end{equation} In the basis $\{|k,k+m\rangle : m=0,1,\ldots\}$ the coherent states can be expanded as \begin{equation} |\zeta \rangle = (1-|\zeta |^{2})^{k}\sum_{m=0}^{\infty} \left [ \frac{\Gamma(m+2k)}{m!\Gamma (2k)}\right ]^{1/2} \zeta^{m}|k,k+m\rangle \, , \label{cs11} \end{equation} and resolve the identity for $k>1/2$ \begin{equation} \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} = \frac{2k-1}{\pi} \int \rmd \mu (\zeta )\, |\zeta \rangle \langle \zeta | \,, \label{Nk} \end{equation} (for $k=1/2$, the limit $k\rightarrow 1/2$ \sugg{must be taken in the final expressions}), where the invariant measure is given by \begin{equation} \rmd \mu (\zeta ) = \frac{\rmd^{2}\zeta}{(1-|\zeta |^{2})^{2}} = \frac{1}{4} \sinh \tau \rmd\tau \rmd \phi , \qquad \rmd^{2}\zeta =\rmd \mathop{\mathrm{Re}} \nolimits \zeta \; \rmd \mathop{\mathrm{Im}} \nolimits \zeta \, . \label{eq:invmeas} \end{equation} ${SU}(1,1)$ coherent states are not orthogonal; their overlap in the discrete irrep $k$ is given by \begin{equation} |\langle \zeta |\zeta^{\prime}\rangle |^{2} = \left ( \frac{1+\mathbf{n} \cdot \mathbf{n}^{\prime}}{2}\right)^{-2k} \, , \end{equation} where $\mathbf{n}\cdot \mathbf{n}^{\prime}$ is a pseudo-scalar product on the hyperboloid, \begin{equation} \mathbf{n} \cdot \mathbf{n}^{\prime}=\cosh \tau \cosh \tau^{\prime} -\cos (\phi -\phi^{\prime})\sinh \tau \sinh \tau^{\prime} \equiv \cosh \xi \, . \label{nn'} \end{equation} \subsection{The kernels} The $SU(1,1)$ quantization kernels $\hat{w}^{(s)}(\zeta )$, \sugg{generating dual maps according to (\mathop{\mathrm{Re}} \nolimitsf{sW}), are operators} labelled by points of $\mathcal{M=}{SU}(1,1)/U(1)$. Their explicit form depends on the representation index $k$, but we will not explicitly write this dependence to avoid burdening the notation. The \emph{boundary} kernels $\hat{w}^{(\pm)}(\zeta )$ define direct and inverse projections on the set of coherent states (\mathop{\mathrm{Re}} \nolimitsf{cs11})~\cite{Kastrup:2006cr}: \begin{eqnarray} \hat{A}=\frac{2k-1}{\pi}\int \rmd\mu (\zeta ){P}_{A}(\zeta )\, |\zeta \rangle \langle \zeta |\,, \nonumber \\ \\ {P}_{A}(\zeta )=\Tr [ \hat{A} \hat{w}^{(+)}(\zeta )]\,, \qquad \qquad {Q}_{A}(\zeta )=\Tr [ \hat{A} \hat{w}^{( -)}(\zeta )]\,, \nonumber \label{P} \end{eqnarray} and $\hat{w}^{(-)}(\zeta )=|\zeta \rangle \langle \zeta |$. In~\mathop{\mathrm{Re}} \nolimitsf{appA} we show that \sugg{there is a class of $s$-parametrized kernels} that are connected to $\hat{w}^{(\pm )}(\zeta )$ through the following relations: \begin{eqnarray} \hat{w}^{(s)}(\zeta ) & = & \frac{2}{\pi}\int \rmd\mu (\zeta ^{\prime}) \int \rmd\lambda \,\lambda \tanh (\pi \lambda )\, \Phi_{k}^{\frac{1}{2}-\frac{s}{2}}(\lambda ) \, P_{-\frac{1}{2}+\rmi\lambda} (\zeta^{\prime -1} \zeta ) \hat{w}^{(+)}(\zeta ^{\prime})\,, \nonumber \label{Rww+-} \\ & = & \frac{2}{\pi}\int \rmd(\zeta ^{\prime})\int \rmd\lambda \, \lambda \tanh(\pi \lambda )\,\Phi_{k}^{-\frac{1}{2}-\frac{s}{2}}(\lambda ) \,P_{-\frac{1}{2}+\rmi\lambda}(\zeta ^{\prime -1} \zeta ) \hat{w}^{(-)}(\zeta ^{\prime}) \,, \nonumber \\ \end{eqnarray} where $\Phi_{k}(\lambda )$ is \begin{equation} \Phi_{k}(\lambda )= \frac{(2k-1)|\Gamma (2k-\frac{1}{2}+\rmi\lambda )|^{2}} {\Gamma ^{2}(2k)} \stackrel{\lambda \gg 1}{\sim} \lambda^{4k-3/2} \rme^{-\pi \lambda} \, , \label{Phi} \end{equation} and $P_{-\frac{1}{2}+i\lambda}(x)$ is the Legendre function~\cite{Erdelyi:1955aa,NIST:DLMF} with $P_{- \frac{1}{2}+\rmi\lambda}(\zeta^{\prime -1}\zeta )=P_{-\frac{1}{2}+\rmi\lambda}(\mathbf{n}\cdot \mathbf{n}^{\prime})$. \sugg{The invariant integration of the SU(1,1) covariant kernels $\hat{w}^{(\pm )}(\zeta )$ does warrant the covariance of the family $\hat{w}^{(s)}(\zeta )$.} By construction, the kernels (\mathop{\mathrm{Re}} \nolimitsf{Rww+-}) satisfy the overlap relation \begin{equation} \frac{2k-1}{4\pi}\Tr [ \hat{w}^{(s)}(\zeta ) \hat{w}^{(-s)}(\zeta^{\prime}) ] = \delta (\zeta ^{\prime},\zeta ) = \delta (\cosh \tau -\cosh \tau ^{\prime})\, \delta (\phi -\phi ^{\prime} ) \, , \label{sd} \end{equation} and the normalization conditions \begin{equation} \Tr [ \hat{w}^{(s)}(\zeta ) ]=1\,, \qquad \quad \frac{2k-1}{\pi}\int \rmd\mu(\zeta ) \, \hat{w}^{(s)}(\zeta )=\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} \,. \label{ints} \end{equation} In particular, the Wigner symbol ($s=0$) of an operator $\hat{A}$ is related to $Q$- and $P$- symbols by \begin{eqnarray} \fl \qquad \qquad {W}_{A}(\zeta ) & \equiv & \Tr [ \hat{A}\hat{w}^{(0)}(\zeta )] \nonumber \\ &=&\frac{2}{\pi}\int \rmd\mu (\zeta ^{\prime})\, {g}_{k}^{(+)}(\zeta^{\prime -1}\zeta ) {P}_{A}(\zeta ^{\prime}) = \frac{2}{\pi} \int \rmd\mu(\zeta^{\prime}) {g}_{k}^{(-)}(\zeta ^{\prime -1}\zeta ){ Q}_{A}(\zeta^{\prime})\,, \label{W+-} \end{eqnarray} where \begin{equation} {g}_{k}^{(\pm )}(\zeta ^{\prime -1}\zeta ) = \int_{0}^{\infty}\rmd\lambda \,\lambda \tanh (\pi \lambda )\; \Phi_{k}^{\pm \frac{1}{2}}(\lambda ) P_{-\frac{1}{2}+\rmi\lambda}(\mathbf{n}\cdot \mathbf{n}^{\prime}) \, . \label{gk} \end{equation} In \sugg{consequence, the Wigner symbols satisfy the normalization} \begin{equation} \frac{2k-1}{\pi}\int \rmd\mu (\zeta ){W}_{A}(\zeta )=1\,. \end{equation} The map (\mathop{\mathrm{Re}} \nolimitsf{Ws}) generated by the kernels in (\mathop{\mathrm{Re}} \nolimitsf{Rww+-}) is invertible in the standard sense: \begin{equation} \hat{A} = \frac{2k-1}{\pi} \int \rmd \mu (\zeta ) \, W_{A}^{(s)}(\zeta )\, \hat{w}^{(-s)}(\zeta )\, . \label{inverse} \end{equation} The self-duality condition \sugg{of the Wigner map} is obviously satisfied here and average values are computed in accordance with equation~(\mathop{\mathrm{Re}} \nolimitsf{sdW}): \begin{equation} \langle \hat{A}\rangle = \frac{2k-1}{\pi} \int \rmd \mu (\zeta) W_{A} ( \zeta ) W_{\rho} ( \zeta ) \, . \label{averageW} \end{equation} We note that the equations~(\mathop{\mathrm{Re}} \nolimitsf{Rww+-}) \sugg{can also be formally represented in the compact form} \begin{equation} \hat{w}^{(s)}(\zeta ) = \Phi_{k}^{\frac{1}{2} - \frac{s}{2}}(\mathcal{L}^{2}) \, \hat{w}^{(+)}(\zeta ) = \Phi_{k}^{-\frac{1}{2}-\frac{s}{2}} (\mathcal{L}^{2})\, \hat{w}^{(-)}(\zeta ), \label{opW} \end{equation} with \begin{equation} \Phi_{k}(\mathcal{L}^{2}) = - \frac{\pi \mathcal{L}^{2}} {\cos ( \pi \sqrt{1/4+\mathcal{L}^{2}})} \prod_{m=1}^{2k-2} \left[ 1-\frac{\mathcal{L}^{2}}{m(m+1)}\right] \,, \label{PiopM} \end{equation} and {$\mathcal{L}^{2}$} is the Laplace operator on the hyperboloid~\cite{Alonso:2002aa} \begin{equation} \mathcal{L}^{2}=\frac{\partial^{2}}{\partial \tau^{2}}+ \coth \tau \frac{\partial}{\partial \tau} + \frac{1}{\sinh^{2}\tau}\frac{\partial^{2}}{\partial \varphi^{2}}. \label{eq:Lphyp} \end{equation} The function ${g}_{k}^{(-)}$ \sugg{in equation~\mathop{\mathrm{Re}} \nolimitsf{gk} is singular}, as one can see using the asymptotic behavior in (\mathop{\mathrm{Re}} \nolimitsf{Phi}). This makes it inconvenient for calculations. In practice, the Wigner functions of physical states can be numerically generated only from the $P$-function; i.e., in terms of the ${g}_{k}^{(+)}$ function. \sugg{It is worth noting that the relations~(\mathop{\mathrm{Re}} \nolimitsf{Rww+-}) allow one to express the star product of $s$-parametrized symbols~\cite{Groenewold:1946aa}; i.e.,} \begin{equation} W_{fg}^{(s)}=W_{f}^{(s_{1})} \ast W_{g}^{(s_{2})} \, , \label{sp} \end{equation} \sugg{in the integral form~\cite{Brif:1999kx}} \begin{equation} W_{fg}^{(s)} = \int \rmd \mu (\zeta_{1})\rmd \mu (\zeta_{2}) L_{s,s_{1},s_{2}} ( \zeta , \zeta_{1}, \zeta_{2} ) W_{f}^{(s_{1})}(\zeta_{1}) \, W_{g}^{(s_{2})}(\zeta_{2}) \, , \label{SP} \end{equation} \sugg{where} \begin{equation} L_{s,s_{1},s_{2}} ( \zeta , \zeta_{1}, \zeta_{2} ) = \Tr [ \hat{w}^{(s)}(\zeta ) \, \hat{w}^{(s_{1})}(\zeta_{1}) \, \hat{w}^{(s_{2})}(\zeta _{2}) ]\, . \label{K} \end{equation} \sugg{In particular, the Wigner symbol of a product of two operators can be conveniently represented in terms of the convolution of the corresponding $P$-symbols according to} \begin{equation} \fl W_{fg}^{(0)}=\Phi_{k}^{-\frac{1}{2}}(\mathcal{L}^{2}) \left( \frac{2k-1}{\pi}\right)^{2} \int \rmd \mu (\zeta_{1})\rmd \mu (\zeta_{2}) P_{f}(\zeta_{1}) \, P_{g}(\zeta_{2}) \, \langle \zeta_{2}|\zeta \rangle \langle \zeta |\zeta_{1}\rangle \langle \zeta_{1}|\zeta_{2}\rangle \, . \end{equation} \section{Examples of Wigner functions} \subsection{Coherent states} The Wigner function for $SU(1,1)$ coherent states is fairly easy to obtain using equation~(\mathop{\mathrm{Re}} \nolimitsf{W+-}), since the ${P}$-function of a coherent state $|\zeta_{0}\rangle $, is a $\delta $-function on the hyperboloid: \begin{equation} {P}_{|\zeta_{0}\rangle} (\zeta ) = \frac{4\pi}{2k-1} \delta (\zeta,\zeta_{0}) = \frac{4\pi}{2k-1} \delta (\cosh \tau -\cosh \tau_{0})\delta (\phi-\phi_{0}) \, . \end{equation} Then, the corresponding Wigner function is \begin{equation} {W}_{|\zeta_{0}\rangle}(\zeta ) = \frac{2}{2k-1} {g}_{k}^{(+)}(\zeta_{0}^{-1}\zeta ) \, . \end{equation} In the particular case of the lowest weight state $|\zeta_{0}\rangle =|k,k\rangle $ the Wigner function is \begin{equation} {W}_{|k,k\rangle} (\zeta ) = \frac{2}{2k-1} \int_{0}^{\infty} \rmd\lambda \; \lambda \tanh (\pi \lambda ) \, \Phi_{k}^{\frac{1}{2}}(\lambda ) \; P_{-\frac{1}{2}+ \rmi\lambda}(\cosh \tau )\,. \label{kkWF} \end{equation} In figure~\mathop{\mathrm{Re}} \nolimitsf{fig:1} we plot the Wigner functions of equation~(\mathop{\mathrm{Re}} \nolimitsf{kkWF}) of the ground state $|k,k\rangle $ as a distribution on the Poincar\'e disc for two irreps with $k=1$ and $k=5$ respectively. The distribution becomes narrower as $k$ increase. The difference in the scale is due to the normalization factor $\sim 2k-1$ appearing in~(\mathop{\mathrm{Re}} \nolimitsf{ints}). \begin{figure} \caption{Plots of the SU(1,1) Wigner function of the ground state $ |k,k\rangle$ on the Poincar\'{e} \label{fig:1} \end{figure} A more interesting case is the Wigner function for the superposition of two $SU(1,1)$ coherent states: \begin{equation} |\Psi \rangle =\alpha | \zeta_{0}\rangle + \beta |\zeta_{1}\rangle \,. \end{equation} The corresponding Wigner functions exhibits interference and has the form (see~\mathop{\mathrm{Re}} \nolimitsf{appB}) \begin{equation} {W}_{|\Psi \rangle}(\zeta ) = |\alpha |^{2} {W}_{|\zeta_{0}\rangle} (\zeta ) + |\beta |^{2} {W}_{|\zeta_{1}\rangle}(\zeta )+ 2 \mathop{\mathrm{Re}} \nolimits [ \alpha \beta^{\ast} \, W_{\zeta_{0}\zeta_{1}}(\zeta ) ] \, , \end{equation} where $W_{\zeta_{0}\zeta_{1}}(\zeta )$ is \begin{equation} W_{\zeta_{0}\zeta_{1}}(\zeta )=\frac{2(1-|\zeta_{0}|^{2})^{k} (1-|\zeta_{1}|^{2})^{k}}{(2k-1) (1- \zeta_{0}\zeta_{1}^{\ast})^{2k}} {g}_{k}^{(+)} \left( \frac{2 ( 1-\zeta^{\ast} \zeta_{0}) (1-\zeta_{1}^{\ast} \zeta)}{(1-|\zeta |^{2})(1-\zeta_{0} \zeta_{1}^{\ast})} - 1 \right) . \end{equation} The Wigner function allows to visualize the interference pattern appearing in phase-space discription of pure states superposition, and thus distinguish them from mixed states. In figure~\mathop{\mathrm{Re}} \nolimitsf{fig:2} we plot the Wigner function of even and odd superpositions of $SU(1,1)$ coherent states (cat-like states) \begin{equation} |\Psi \rangle = \frac{N}{\sqrt{2}} (|\zeta_{0}\rangle \pm |-\zeta_{0}\rangle ) \, , \label{cats} \end{equation} where $N= ( 1+\cosh^{-2k}\tau_{0} )^{-1/2}$. \begin{figure} \caption{Plots of the SU(1,1) Wigner function of the cat states in equation~( \protect\mathop{\mathrm{Re} \label{fig:2} \end{figure} The analytical expression for the Wigner function reads \begin{eqnarray} {W}_{|\Psi \rangle}(\tau ,\phi ) & = & \frac{N^{2}}{2k-1} \int_{0}^{\infty} \rmd \lambda \, \lambda \tanh ( \pi \lambda ) \, \Phi_{k}^{\frac{1}{2}}(\lambda ) \; \left[ P_{-\frac{1}{2}+\rmi\lambda} ( \cosh \xi_{+} ) \right. \nonumber \\ &+ &\left. P_{-\frac{1}{2}+\rmi \lambda} ( \cosh \xi_{-}) \pm \frac{2}{\cosh^{2k}\tau_{0}} \mathop{\mathrm{Re}} \nolimits P_{-\frac{1}{2}+\rmi \lambda}( z(\tau ,\phi )) \right] , \label{Wcats} \end{eqnarray} with \begin{eqnarray} \cosh \xi_{\pm} & = & \cosh \tau \cosh \tau_{0} \mp \cos \phi \sinh \tau \sinh \tau_{0} \, , \nonumber \\ && \\ z(\tau ,\phi ) & = & \frac{\cosh \tau - \rmi \sinh \tau_{0}\sinh \tau \sin \phi}{ \cosh \tau_{0}} \, . \nonumber \end{eqnarray} The last term in equation~(\mathop{\mathrm{Re}} \nolimitsf{Wcats}) describes the interference pattern. We point out that this pattern becomes more pronounced (i.e., the number of oscillatons increases) as the representation index $k$ grows. \subsection{Number states} \begin{figure} \caption{Plots of the SU(1,1) Wigner function of the excited states on the Poincar\'{e} \label{fig:3} \end{figure} The Wigner function of the $SU(1,1)$ number states \begin{equation} |k,k+m\rangle =\sqrt{\frac{\Gamma (2k)}{m!\Gamma (m+2k)}} \hat{K} _{+}^{m}|k,k\rangle , \end{equation} is obtained in~\mathop{\mathrm{Re}} \nolimitsf{appB} and given by \begin{eqnarray} \fl \qquad \qquad {W}_{|m\rangle}(\zeta ) & = & \frac{\Gamma (2k)} {(2k-1)\pi m! \Gamma (m+2k)} \int_{0}^{\infty} \rmd \lambda \, \lambda \tanh ( \pi \lambda ) \, \Phi_{k}^{\frac{1}{2}}(\lambda ) \nonumber \\ & \times & \int \rmd \tau^{\prime} \rmd\phi^{\prime} \delta (\tau^{\prime}) [\cosh^{4} (\tau^{\prime}/{2}) \, {\mathcal{L}}^{\prime 2} ]^{m} [ \cosh^{4k} (\tau^{\prime}/2) \, P_{-\frac{1}{2}+\rmi \lambda}(\cosh \xi ) ] , \end{eqnarray} where $\cosh \xi =\cosh \tau \cosh \tau^{\prime}-\cos (\phi-\phi^{\prime}) \sinh \tau \sinh \tau^{\prime}$ and where ${\mathcal{L}}^{\prime 2}$ is the Laplace operator in the hyperboloid, which acts on the primed variables. The Wigner function of the first excited state is \begin{eqnarray} {W}_{|1\rangle}(\zeta ) &=&\frac{1}{( 2k-1) k} \int_{0}^{\infty} \rmd \lambda \, \lambda \tanh ( \pi \lambda ) \; \Phi_{k}^{\frac{1}{2}}(\lambda) (2k- 1/4 - \lambda^{2}) P_{-\frac{1}{2} + \rmi\lambda}( \cosh \tau ) \nonumber \\ & = &\frac{1}{( 2k-1) k} \left( 2k + \frac{\partial^{2}}{\partial\tau^{2}} + \coth \tau \frac{\partial}{\partial \tau}\right) {g}_{k}^{(+)} ( \cosh \tau ) \, . \label{W1} \end{eqnarray} Figure~\mathop{\mathrm{Re}} \nolimitsf{fig:3} illustrates the Wigner functions of the states $|k,k+1\rangle $ and $|k,k+3\rangle $ in the representation with $k=1$. \section{Applications: $\mathfrak{su}(1,1)$ dynamics} In quantum optics the $\mathfrak{su}(1,1)$ algebra naturally appears in the analysis of the non-degenerate parametric amplifier, with \begin{equation} \hat{K}_{+}= \hat{a}^{\dagger} \hat{b}^{\dagger}, \qquad \hat{K}_{-} = \hat{a} \hat{b} , \qquad \hat{K}_{0} = \case{1}{2} ( \hat{a}^{\dagger} \hat{a} + \hat{b}^{\dagger}\hat{b} + \leavevmode\hbox{\small1\normalsize\kern-.33em1} ) \, , \label{tm} \end{equation} and where $\hat{a} $ and $\hat{b}$ are the standard boson operators. The coherent states (\mathop{\mathrm{Re}} \nolimitsf{cs11}) form a convenient (but overcomplete) basis in each Hilbert space with a fixed difference $\Delta n$ of excitations between the modes $a$ and $b$. The $SU(1,1)$-irreducible subspaces are carrier spaces for irreps labelled by $k=\case{1}{2} ( 1+|\Delta n|) $. The evolution generated by Hamiltonians in the enveloping algebra of (\mathop{\mathrm{Re}} \nolimitsf{tm}) can be suitably described as dynamics of $SU(1,1)$ quasidistributions on the hyperboloid or equivalently on the Poincar\'{e} disc. The phase-space evolution on the hyperboloid generated by $\mathfrak{su}(1,1) $ Hamiltonians significantly differs from the dynamics on the two-dimensional sphere, the homogeneous space for $SU(2)$: while any Hamiltonian linear on the $SU(2)$ generators is equivalent to $\hat{H} =\omega \hat{S}_{z}$, there are compact and non-compact orbits in the case of the $SU(1,1)$ systems. In general, the dynamics of an initial state $ |\psi_{0}\rangle $ induced by an operator $T_{g}$ corresponding to a irrep of an element \begin{equation} g=\left( \begin{array}{cc} \alpha & \beta \\ \beta^{\ast} & \alpha^{\ast} \end{array} \right) ,\qquad |\alpha |^{2}-|\beta |^{2}=1 \, , \end{equation} of the $SU(1,1)$ leads to an appropriate transformation of the Wigner function argument \begin{equation} W_{T_{g}|\psi_{0}\rangle}(\zeta ) = W_{|\psi_{0}\rangle} \left ( \frac{-\alpha^{\ast}\zeta +\beta}{\beta^{\ast}\zeta -\alpha}\right) \, , \end{equation} as a consequence of the Wigner function covariance under group transformations~\cite{Perelomov:1986ly}. In particular, in case of compact evolution, the Hamiltonian \begin{equation} \hat{H}=\chi \hat{K}_{0} \, , \label{K0} \end{equation} generates rotation around the $z$-axis, and yields \begin{equation} W_{|\zeta_{0}\rangle}(\zeta |t)= W_{|\zeta_{0}\rangle} ( \rme^{\rmi\chi t}\zeta ) \, , \end{equation} or, equivalently, \begin{equation} W_{|\zeta_{0}\rangle} (\tau ,\phi |t)= W_{|\zeta_{0}\rangle}(\tau,\phi-\chi t) \, . \end{equation} Any Hamiltonian $SU(1,1)$ equivalent to that in equation~(\mathop{\mathrm{Re}} \nolimitsf{K0}) leads to a rotation of the initial distribution along an ellipse obtained as an intersection of the hyperboloid and an inclined plane. The noncompact evolution is generated by $SU(1,1)$ Hamiltonians equivalent to \begin{equation} \hat H=\chi \hat{K}_{2} \, . \label{Hn} \end{equation} For instance, the phase-space dynamics of the state $|\zeta_{0}=\tanh \tau_{0}/2\rangle $ governed by (\mathop{\mathrm{Re}} \nolimitsf{Hn}) leads to \begin{equation} W_{|\zeta_{0}\rangle}(\zeta |t) = W_{|\zeta_{0}\rangle}\left( \frac{\zeta\cosh \frac{\chi t}{2}+\sinh \frac{\chi t}{2}} {\zeta \sinh \frac{\chi t}{2}+\cosh \frac{\chi t}{2}} \right) \, , \end{equation} which explicitly exhibits a boost generated by (\mathop{\mathrm{Re}} \nolimitsf{Hn}), e.g. \begin{equation} W_{|\zeta_{0}\rangle}(\tau ,\phi =0|t) = W_{|\zeta_{0}\rangle}(\tau +\chi t,\phi =0) \, . \end{equation} \section{Concluding remarks} In this work we have developed a basic and practical setup for a consistent introduction of the Wigner map for the quantum systems with $SU(1,1)$ symmetry group acting irreducibly in a corresponding Hilbert space. The Wigner function generated by the kernels (\mathop{\mathrm{Re}} \nolimitsf{W+-}) allow to faithfully represent states of quantum systems with underlying $SU(1,1)$ symmetry as distributions on the upper sheet of the hyperboloid or the Poincar\'{e} disc. In the framework of our approach, the Wigner kernel can be formally obtained both from $Q$ and $P$ kernels. In a manner reminiscent of the Heisenberg-Weyl group, the transformation taking from $\hat{w}^{(-)}(\zeta )$ to $\hat{w}^{(0)}(\zeta )$ is singular. Thus, a practical way of obtaining the Wigner function is from the $P$-function of the corresponding state. \ack We dedicate this work to the memory of Prof. David J. Rowe, of the University of Toronto. The work of ABK is partially supported by the Grant 254127 of CONACyT (Mexico); HdG is supported in part by NSERC of Canada, LLSS is supported by the Spanish Ministerio de Ciencia e Innovaci\'on (Grant PGC2018- 099183-B-I00). \appendix \section{Properties of $\hat w^{(s)}$} \label{appA} We start with a full set of Perelomov-type coherent states $\{|\zeta \rangle \in \mathcal{H}\}$ generated from a fiducial state $|\psi_{0}\rangle $ and labelled by coordinates $\zeta $ of $\mathcal{M}$, a homogeneous space of \sugg{the} dynamical symmetry group $G=SU(1,1)$. We further assume that $\mathcal{H}$ carries an irrep $\Lambda $ in the positive discrete series of $SU(1,1)$, \sugg{labelled by the Bargman index}$k=\frac{1}{2},1,\frac{3}{2},2,\ldots.$ Here, $\mathcal{M}=SU(1,1)/U(1)$ where $U(1)$ is the subgroup generated by $\hat{K}_{0}$. The ${Q}$-and ${P}$-kernels $\hat{w}^{\left( \pm \right)}(\zeta )$, are connected through the relation \begin{equation} \hat{w}^{(-)}(\zeta ) = \frac{2k-1}{\pi} \int \rmd\mu (\zeta^{\prime}) | \langle \zeta^{\prime}|\zeta \rangle |^{2} \; \hat{w}^{(+)}(\zeta^{\prime}) \, , \label{wwsu11} \end{equation} where $\rmd\mu (\zeta )$ is the invariant measure (\mathop{\mathrm{Re}} \nolimitsf{eq:invmeas}). They satisfy the duality relation \begin{equation} \frac{2k-1}{4\pi}\, \Tr [ \hat{w}^{(+)}(\zeta^{\prime}) \hat{w}^{(-)}(\zeta) ] = \delta (\zeta ,\zeta^{\prime}) = \delta (\cosh \tau^{\prime}-\cosh \tau )\delta (\phi^{\prime}-\phi )\, . \label{dual} \end{equation} Following the general ideas of \cite{Figueroa:1990aa} we observe that \begin{equation} \delta (\cosh \tau -\cosh \tau^{\prime})\delta (\phi -\phi^{\prime})= \frac{1}{2\pi}\sum_{n=-\infty}^{\infty} \int \rmd\lambda \; \lambda \tanh (\pi\lambda) \, u_{n}^{\lambda}(\zeta ) u_{n}^{\lambda \ast} (\zeta^{\prime}) \; , \end{equation} where \begin{eqnarray} u_{n}^{\lambda}(\zeta ) & = & \frac{1}{2\pi}\int_{0}^{2\pi}\rmd \theta \, [ \cosh \tau -\sinh \tau \cos (\theta -\phi )]^{-\frac{1}{2}+\rmi\lambda} \rme^{ \rmi n \theta} \nonumber \\ & = & ( -1 )^{n}\frac{\Gamma (\frac{1}{2}+\rmi \lambda )} { \Gamma (\frac{1}{2}+\rmi\lambda +n)} \; P_{-\frac{1}{2}+\rmi\lambda}^{n} (\cosh \tau ) \rme^{\rmi n \phi} \, , \label{un} \end{eqnarray} are the harmonic functions on the upper sheet of the hyperboloid $\mathcal{M} ={SU}(1,1)/U(1)$. The functions $u_{n}^{\lambda}(\zeta )$ are eigenfunctions of the Laplace operator {$\mathcal{L}^{2}$} (\mathop{\mathrm{Re}} \nolimitsf{eq:Lphyp}) on the hyperboloid \begin{equation} {\mathcal{L}^{2}}u_{n}^{\lambda}(\zeta ) = -\left( \lambda^{2}+\frac{1}{4}\right) u_{n}^{\lambda}(\zeta )\,, \label{casimir11} \end{equation} and satisfy the following sum rule~\cite{Erdelyi:1955aa}, defining the zonal functions on ${SU}(1,1)/U(1)$: \begin{equation} \sum_{n=-\infty}^{\infty}u_{n}^{\lambda}(\zeta ) u_{n}^{\ast\lambda}(\zeta^{\prime}) = P_{-\frac{1}{2}+\rmi \lambda} (\cosh \xi ) \, , \label{Phf} \end{equation} and $\cosh \xi$ has been defined in~(\mathop{\mathrm{Re}} \nolimitsf{nn'}). The harmonic functions of equation~(\mathop{\mathrm{Re}} \nolimitsf{un}) also satisfy the orthogonality condition \begin{equation} \lambda \tanh (\pi \lambda )\int \rmd\tau \rmd\phi \,\sinh \tau \,u_{n}^{\lambda}(\zeta )u_{n^{\prime}}^{\lambda ^{\prime}\ast}(\zeta ) =2\pi \delta_{nn^{\prime}}\delta (\lambda -\lambda ^{\prime}). \end{equation} The expansion of a function $f(\zeta )$ on a hyperboloid on the basis of $ u_{n}^{\lambda}(\zeta )$ has thus the form \begin{equation} \fl f(\zeta ) = \sum_{n=-\infty}^{\infty}\int \rmd\lambda \lambda \tanh (\pi \lambda ) \,u_{n}^{\lambda}(\zeta )f_{n\lambda}\, , \qquad f_{n\lambda} = \int \rmd\mu (\zeta )\, u_{n}^{\lambda \ast}(\zeta )f(\zeta ) \, . \end{equation} The functions $u_{n}^{\lambda}(\zeta )$ are nothing but the representation of elements of the basis of the principal continuous series, labelled by $-\frac{1}{2}+\rmi\lambda $,~\cite{Perelomov:1986ly} \begin{equation} \hat{K}_{0}|\lambda ,n\rangle = n|\lambda ,n\rangle \,, \qquad \hat{K}_{\pm}|\lambda ,n\rangle = \left( \pm \case{1}{2}\mp \rmi\lambda +n\right) |\lambda ,n\rangle \,, \end{equation} \sugg{with $n \in \mathbb{Z}$ and} $u_{n}^{\lambda}(\zeta )=\langle \zeta |\lambda ,n\rangle $. It is easy to see that a differential operator $\hat{\Phi}_{\Lambda}(\zeta ) $, depending explicitly \sugg{on the Bargman index $k$ that labels} the representation $\Lambda $ and returning the squared coherent state overlap $|\langle \zeta ^{\prime}|\zeta \rangle |^{2}$ from $ \delta (\zeta ^{\prime},\zeta )$ should be invariant under group transformations: given $\hat{\Phi}_{\Lambda}(\zeta )\delta (\zeta ^{\prime},\zeta )=|\langle \zeta ^{\prime}|\zeta \rangle |^{2}$, then, by transitivity of $|\langle \zeta ^{\prime}|\zeta \rangle |^{2}$ and $\delta (\zeta ,\zeta ^{\prime})$ we have \begin{equation} \fl\hat{\Phi}_{\Lambda}(g\zeta )\delta (g\zeta ^{\prime},\zeta )=|\langle g\zeta ^{\prime}|\zeta \rangle |^{2}=|\langle \zeta ^{\prime}|g^{-1}\zeta \rangle |^{2}=\hat{\Phi}_{\Lambda}(\zeta )\delta (\zeta ^{\prime },g^{-1}\zeta )=\hat{\Phi}_{\Lambda}(\zeta )\delta (g\zeta ^{\prime},\zeta ), \end{equation} where $g\in SU(1,1)$. Thus, the operator $\hat{\Phi}_{\Lambda} (\zeta )\equiv \hat{\Phi}_{k} (\zeta )$ is conveniently expressed as a \emph{function} $\Phi_{k}$ of the operator $\mathcal{L}^{2}$ , the differential realization of the quadratic Casimir $\mathcal{C}_{2}$ on the hyperboloid: \begin{equation} \hat{\Phi}_{k}(\zeta )=\Phi_{k}(\mathcal{L}^{2}). \end{equation} Explicitly, for the square of the scalar product of two $SU(1,1)$ coherent states in the representation labelled with $k=1/2,1,3/2,...$ we have \begin{eqnarray} \fl\frac{2k-1}{4\pi}|\langle \zeta ^{\prime}|\zeta \rangle |^{2} &=&\frac{ 2k-1}{4\pi}\left( \frac{1+\cosh \xi}{2}\right) ^{-2k}=\hat{\Phi}_{k}({ \mathcal{L}^{2}})\delta (\cosh \tau -\cosh \tau ^{\prime})\delta (\phi -\phi ^{\prime}) \nonumber \\ \;\; &=&\frac{1}{2\pi}\int \rmd\lambda \;\lambda \tanh \left( \pi \lambda \right) P_{-\frac{1}{2}+\rmi\lambda}(\cosh \xi )\Phi_{k}(\lambda ). \label{Phik} \end{eqnarray} In consequence, equation~(\mathop{\mathrm{Re}} \nolimitsf{wwsu11}) can be rewritten as \begin{equation} \hat{w}^{(-)}(\zeta )=\frac{2}{\pi}\int \rmd\mu (\zeta ^{\prime})\hat{w} ^{(+)}(\zeta ^{\prime})\int \rmd\lambda \;\lambda \tanh (\pi \lambda )P_{- \frac{1}{2}+\rmi\lambda}(\cosh \xi )\Phi_{k}(\lambda )\,. \label{w-w+} \end{equation} The inversion of equation~(\mathop{\mathrm{Re}} \nolimitsf{Phik}) is given by~\cite{Erdelyi:1955aa} \begin{equation} \Phi_{k}(\lambda )=\frac{2k-1}{2}\int_{1}^{\infty}\rmd x \left( \frac{1+x}{2 }\right)^{-2k} P_{-\frac{1}{2}+\rmi \lambda}(x). \end{equation} The above integral can be exactly computed with the result \begin{equation} \Phi_{k}(\lambda )=\frac{\left( 2k-1\right) |\Gamma \left( 2k-\frac{1}{2}+ \rmi\lambda \right) |^{2}}{\Gamma^{2}(2k)}, \end{equation} and its normalization follows from equation~(\mathop{\mathrm{Re}} \nolimitsf{Phik}) \begin{equation} \frac{2}{2k-1}\int \rmd\lambda \; \lambda \tanh ( \pi \lambda) \Phi _{k}(\lambda )=1. \end{equation} Formally, one can represent equation~(\mathop{\mathrm{Re}} \nolimitsf{w-w+}) in an operational form \begin{equation} \hat{w}^{(-)}(\zeta )=\Phi_{k}(\mathcal{L}^{2})\hat{w}^{(+)}(\zeta ), \label{opw} \end{equation} where $\Phi_{k}(\mathcal{L}^{2})$ is given in equation~(\mathop{\mathrm{Re}} \nolimitsf{PiopM}). Now, \sugg{we can formally introduce ${s}$-parametrized kernels $\hat{w}^{(s)}(\zeta )$ related to $\hat{w}^{(\pm )}(\zeta )$ as} \begin{eqnarray} \hat{w}^{(s)}(\zeta ) &=&\frac{2}{\pi}\int \rmd\mu (\zeta ^{\prime})\hat{w} ^{(+)}(\zeta ^{\prime})\int \rmd\lambda \;\lambda \tanh (\pi \lambda )P_{- \frac{1}{2}+\rmi\lambda}(\cosh \xi )\Phi_{k}^{\frac{1}{2}-\frac{s}{2} }(\lambda )\, \nonumber \\ &= &\frac{2}{\pi}\int \rmd\mu (\zeta ^{\prime})\hat{w}^{(-)}(\zeta ^{\prime })\int \rmd\lambda \;\lambda \tanh (\pi \lambda )P_{-\frac{1}{2}+\rmi\lambda }(\cosh \xi )\Phi_{k}^{-\frac{1}{2}-\frac{s}{2}}(\lambda ) \nonumber \\ \end{eqnarray} \sugg{that satisfy the overlap relation} \begin{equation} \frac{2k-1}{4\pi}\Tr [ \hat{w}^{(s)}(\zeta ) \hat{w}^{(-s)}(\zeta^{\prime}) ]=\delta (\zeta ^{\prime},\zeta )=\delta (\cosh \tau -\cosh \tau ^{\prime })\delta (\phi -\phi ^{\prime \prime}). \end{equation} In particular, the self-dual Wigner kernel, $s=0$, is obtained from $\hat{w} ^{(\pm )}(\zeta )$ kernels by \begin{eqnarray} \hat{w}^{(0)}(\zeta ) &=&\frac{2}{\pi}\int \rmd\lambda \;\lambda \tanh (\pi \lambda )\Phi_{k}^{1/2}(\lambda )\int \rmd\mu (\zeta ^{\prime})\hat{w} ^{(+)}(\zeta ^{\prime})P_{-\frac{1}{2}+\rmi\lambda}(\cosh \xi ) \nonumber \\ &=&\Phi_{k}^{1/2}(\mathcal{L}^{2})\hat{w}^{(+)}(\zeta )\, , \nonumber \label{w+} \\ && \\ \hat{w}^{(0)}(\zeta ) &=&\frac{2}{\pi}\int \rmd\lambda \;\lambda \tanh (\pi \lambda )\Phi_{k}^{-1/2}(\lambda )\int \rmd\mu (\zeta ^{\prime})\hat{w} ^{(-)}(\zeta ^{\prime})P_{-\frac{1}{2}+\rmi\lambda}(\cosh \xi ) \nonumber \\ &=&\Phi_{k}^{-1/2}(\mathcal{L}^{2})\hat{w}^{(-)}(\zeta ) \, . \nonumber \label{w-} \end{eqnarray} In this way, $\hat{w}^{(0)}(\zeta )$ automatically satisfies the self-duality condition \begin{equation} \frac{2k-1}{4\pi}\Tr [ \hat{w}^{(0)}(\zeta )\hat{w}^{(0)}(\zeta ^{\prime}) ]=\delta (\cosh \tau -\cosh \tau ^{\prime})\delta (\phi -\phi ^{\prime}) \, . \end{equation} Since the kernels $\hat{w}^{(\pm )}(\zeta )$ satisfy the normalization conditions~(\mathop{\mathrm{Re}} \nolimitsf{ints}), one obtains from equation~(\mathop{\mathrm{Re}} \nolimitsf{w-}) \begin{equation} \Tr [ \hat{w}^{(0)}(\zeta ) ]=\Phi_{k}^{1/2}(\mathcal{L}^{2})\Tr [ \hat{w}^{(+)}(\zeta )]=1, \end{equation} since $\Phi_{k}(\mathcal{L}^{2})\,1=1$. In addition, using the self-adjoitness of $\Phi_{k}(\mathcal{L}^{2})$ one has \begin{eqnarray} \fl\frac{2k-1}{\pi}\int \rmd\mu (\zeta )\hat{w}^{(0)}(\zeta ) &=&\frac{2k-1 }{\pi}\int \rmd\mu (\zeta )\Phi_{k}^{1/2}(\mathcal{L}^{2})\hat{w} ^{(+)}(\zeta )=\frac{2k-1}{\pi}\int \rmd\mu (\zeta )\hat{w}^{(+)}(\zeta )= \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} \,. \nonumber \\ && \end{eqnarray} It is straightforward to obtain the average of the Wigner kernel over the coherent states; i.e., the $Q$-function of the Wigner kernel \begin{equation} \langle \zeta ^{\prime}|\hat{w}^{(0)}(\zeta )|\zeta ^{\prime}\rangle = \frac{2}{2k-1}\int \rmd\lambda \,\lambda \tanh (\pi \lambda )P_{-\frac{1}{2}+ \rmi\lambda}(\cosh \xi )\Phi_{k}^{1/2}(\lambda ), \end{equation} which is a convergent integral. \section{Wigner functions of some number states and superpositions} \label{appB} In this Appendix we obtain the Wigner functions of the number states and nondiagonal projector on the coherent states. In order to obtain the Wigner function of the $SU(1,1)$ number states \begin{equation} |k,k+m\rangle =\sqrt{\frac{\Gamma (2k)}{m!\Gamma (m+2k)}} \hat{K} _{+}^{m}|k,k\rangle , \label{m} \end{equation} we notice that \begin{eqnarray} \fl \qquad \qquad \hat{K}_{+}^{m}|k,k\rangle \langle k,k|\hat{K}_{-}^{n} & = & \frac{2k-1}{\pi} \int \hat{K}_{+}^{m}|\zeta \rangle \langle \zeta |\hat{K} _{-}^{n}{P}_{|k,k\rangle}(\zeta ) \nonumber \\ & = & \frac{2k-1}{\pi}\int \rmd\mu (\zeta )\left[ D_{L}^{m}(\hat{K} _{+})D_{R}^{n}(\hat{K}_{-})|\zeta \rangle \langle \zeta |\right] {P} _{|k,k\rangle}(\zeta ), \end{eqnarray} where \begin{equation} \fl D_{L}(\hat{K}_{+}) = (1-|\zeta |^{2})^{2k}\partial_{\zeta} (1-|\zeta|^{2})^{-2k} \, , \qquad D_{R}(\hat{K}_{-}) = (1-|\zeta |^{2})^{2k}\partial_{\zeta^{\ast}}(1-|\zeta |^{2})^{-2k}, \end{equation} and \begin{equation} {P}_{|k,k\rangle}(\zeta )=\frac{2}{2k-1}\frac{1}{\sinh \tau} \delta (\tau ) \end{equation} is the ${P}$-symbol for the lowest weight state $|k,k\rangle $ of irrep $k$. In consequence, the ${P}$-function corresponding to the matrix element $ |k,k+m\rangle \langle k,k+n|$ has the form \begin{eqnarray} {P}_{mn}(\zeta )& =\frac{(-1)^{m+n}}{(1-|\zeta |^{2})^{2k-2}}{N}_{k;mn} \; \partial_{\zeta}^{m}\partial_{\zeta^{\ast}}^{n} [ (1-|\zeta|^{2})^{2k-2} \,{P }_{|k,k\rangle}(\zeta ) ] , \nonumber \label{Pnm} \\ & & \\ {N}_{k;mn}& =\frac{\Gamma (2k)}{\sqrt{m!n!\Gamma (m+2k)\Gamma (n+2k)}}\,. \nonumber \end{eqnarray} Substituting the above expression into equation~(\mathop{\mathrm{Re}} \nolimitsf{W+-}) and integrating by parts we obtain after simplification the Wigner symbol of $|k,k+m\rangle \langle k,k+n|$, \begin{eqnarray} {W}_{mn}(\zeta ) & = & \frac{{N}_{k;mn}}{\left( 2k-1\right) \pi} \int_{0}^{\infty}\rmd\lambda \; \lambda \tanh ( \pi \lambda ) \Phi_{k}^{ \frac{1}{2}}(\lambda ) \nonumber \\ & \times & \int \rmd\tau^{\prime}\rmd\phi^{\prime}\delta (\tau^{\prime})\partial_{\zeta^{\prime}}^{m}\partial_{\zeta ^{\prime \ast}}^{n}\left[ \cosh^{4k} (\tau^{\prime}/{2}) \, P_{-\frac{1}{2}+\rmi \lambda}(\cosh \xi )\right] , \label{Wmn} \end{eqnarray} where \begin{eqnarray} \partial_{\zeta}& =\mathrm{e}^{\rmi \phi}\cosh^{2} ( \tau/2 ) \partial _{\tau} + \frac{\rmi}{2} \rme^{\rmi\phi} \coth ( \tau / 2 ) \partial_{\phi} \, , \nonumber \\ & & \\ \partial_{\zeta}& =\mathrm{e}^{- \rmi \phi}\cosh^{2} ( \tau/2 ) \partial _{\tau} - \frac{\rmi}{2} \rme^{- \rmi\phi} \coth ( \tau / 2 ) \partial_{\phi} \, . \nonumber \end{eqnarray} The Wigner function of the state (\mathop{\mathrm{Re}} \nolimitsf{m}) is immediatly obtained from (\mathop{\mathrm{Re}} \nolimitsf{Wmn}). In order to compute the symbol $W_{\zeta_{0}\zeta_{1}}(\zeta )$ of the nondiagonal projector $|\zeta_{0}\rangle \langle \zeta_{1}|$ we note that \begin{eqnarray} \fl \qquad |\zeta_{0}\rangle \langle \zeta_{1}| &= &(1-|\zeta _{0}|^{2})^{k} (1-|\zeta_{1}|^{2})^{k} \nonumber \\ &\times & \sum_{m,n=0}^{\infty}\left[ \frac{\Gamma (m+2k)}{m!\Gamma (2k)} \right ]^{1/2} \left[ \frac{\Gamma (n+2k)}{n!\Gamma (2k)}\right ]^{1/2} \zeta_{0}^{m}\zeta_{1}^{\ast n}|k,k+m\rangle \langle k,k+n|. \end{eqnarray} Recalling that the $P$-symbol of the matrix element $|k,k+m\rangle \langle k,k+n|$ is given in equation~(\mathop{\mathrm{Re}} \nolimitsf{Pnm}), we obtain the $P$-symbol of $ |\zeta_{0}\rangle \langle \zeta_{1}|$: \begin{eqnarray} P_{\zeta_{0}\zeta_{1}}(\zeta ) & = & (1-|\zeta_{0}|^{2})^{k} (1-|\zeta_{1}|^{2})^{k}(1-|\zeta |^{2})^{-2k+2} \nonumber \\ &\times&\exp (-\zeta_{0}\partial_{\zeta}- \zeta_{1}^{\ast}\partial _{\zeta^{\ast}}) [ (1-|\zeta |^{2})^{2k-2}P_{|k,k\rangle}(\zeta )] \, . \end{eqnarray} Substituting the above into equation~(\mathop{\mathrm{Re}} \nolimitsf{W+-}) and integrating by parts yelds \begin{eqnarray} \fl \qquad W_{\zeta_{0}\zeta_{1}}(\zeta ) & = & \frac{4}{( 2k-1 ) \pi} \int_{0}^{\infty}\rmd \lambda \, \lambda \tanh ( \pi \lambda )\; \Phi_{k}^{ \frac{1}{2}}(\lambda )(1-|\zeta_{0}|^{2})^{k}(1-|\zeta_{1}|^{2})^{k} \nonumber \label{ksiksi} \\ & \times & \int \rmd\mu (\zeta^{\prime})\frac{\delta (\tau^{\prime})}{\sinh \tau^{\prime}}\exp (\zeta_{0}\partial_{\zeta^{\prime}}+ \zeta_{1}^{\ast}\partial_{\zeta^{\prime \ast}}) [ (1-|\zeta^{\prime }|^{2})^{-2k}P_{-\frac{1}{2}+\rmi\lambda}(\cosh \xi ) ] , \end{eqnarray} where now \begin{equation} \cosh \xi =\frac{2|1-\zeta^{\ast}\zeta^{\prime}|^{2}}{(1-|\zeta |^{2})(1-|\zeta^{\prime}|^{2})}-1 \, . \end{equation} Integrating equation~(\mathop{\mathrm{Re}} \nolimitsf{ksiksi}) over $\mu (\zeta^{\prime})$ yields \begin{eqnarray} \fl W_{\zeta_{0}\zeta_{1}}(\zeta ) & = & \frac{2}{2k-1}\frac{ (1-|\zeta_{0}|^{2})^{k}(1-|\zeta_{1}|^{2})^{k}} {(1-\zeta_{0}\zeta _{1}^{\ast})^{2k}} \label{Wksiksi} \nonumber \\ &\times& \int_{0}^{\infty}\rmd\lambda \lambda \tanh \left( \pi \lambda \right) \Phi_{k}^{\frac{1}{2}}(\lambda )P_{-\frac{1}{2}+\rmi\lambda}\left( \frac{2\left( 1-\zeta^{\ast}\zeta_{0}\right) \left( 1-\zeta _{1}^{\ast}\zeta \right)}{(1-|\zeta |^{2})(1-\zeta_{0}\zeta_{1}^{\ast})} -1\right) \, . \end{eqnarray} \end{document}
\begin{document} \draft \tighten \def\kern-3pt \hrule width\hsize \kern3pt{\kern-3pt \hrule width\hsize \kern3pt} \title{Impediments to mixing classical and quantum dynamics} \author{J. Caro and L.L. Salcedo} \address{ {~} \\ Departamento de F\'{\i}sica Moderna \\ Universidad de Granada \\ E-18071 Granada, Spain } \date{\today} \maketitle \thispagestyle{empty} \begin{abstract} The dynamics of systems composed of a classical sector plus a quantum sector is studied. We show that, even in the simplest cases, (i) the existence of a consistent canonical description for such mixed systems is incompatible with very basic requirements related to the time evolution of the two sectors when they are decoupled. (ii) The classical sector cannot inherit quantum fluctuations from the quantum sector. And, (iii) a coupling among the two sectors is incompatible with the requirement of physical positivity of the theory, i.e., there would be positive observables with a non positive expectation value. \end{abstract} \pacs{PACS numbers:\ \ 03.65.Bz, 03.65.Sq, 03.65.Fd} \section{Introduction} \label{sec:1} Ever since the beginnings of quantum mechanics, physical systems have been considered which are composed of a quantum mechanical sector plus another sector described in classical terms~\cite{Maddox:1995}. For instance, this issue is central in the quantum theory of the measurement when the apparatus is treated classically. The same situation appears also at a less fundamental level. There are many systems in the literature which are routinely treated using a mixed quantum-classical description even if, as far as we know, they are well accounted for by quantum mechanics. Molecular theory or quantum optics are just two instances of this. The mixed description is used as a convenient approximation which greatly simplifies the treatment of such systems. In other cases some degrees of freedom are treated classically because no complete quantum theory exist for them. A typical example is the coupling of matter to gravity. In this case it is standard to use a mean field treatment (called semiclassical gravity) where a classical gravitational field obeys Einstein equations using as source the expectation value of the energy-momentum tensor of the quantum matter fields~\cite{Rosenfeld:1963}. When used in early universe cosmology, this approach leads to universes which are much too uniform as compared to present observations~\cite{Brandenberger:1985cz}. This has been attributed~\cite{Boucher:1988ua} to the fact that the mean field approach misses the secondary quantum fluctuations induced on the classical gravitational field by its coupling to the quantum matter fields, the so-called quantum backreaction~\cite{Anderson:1995tn}. Of course, in a full quantum treatment, the gravitational field would present their own primary quantum fluctuations. Such a treatment has not been pursued because of the lack of a renormalizable quantum theory of gravitation. In principle one would expect that the renormalizability problem would be less severe when the gravitational field is classical although with quantum backreaction, however, Ref.~\cite{Salcedo:1994sn} shows that very likely this will not be the case. There, it is shown that removing the primary quantum fluctuations of just one sector makes a formerly renormalizable theory into a non renormalizable one. The problem of mixing classical and quantum degrees of freedom has been addressed by many authors from different points of view. A very incomplete list is ~\cite{Boucher:1988ua,Anderson:1995tn,Salcedo:1994sn,DeWitt:1962,Aleksandrov:1981,Jones:1994,Salcedo:1996jr,Diosi:1996,Prezhdo:1997gs,Diosi:1997mt}. There is no generally accepted definition of what is meant by a classical-quantum system. This is natural since, as far as we know, no such system exits in nature. In the present work a ``quantum-classical mixing'' will mean some limit case of a quantum system. The issue that we want to study is whether such a limit can actually be taken in a way which is universal and internally consistent. Universal here refers to the existence of a well-defined set of rules to be applied to any quantum-quantum system to obtain its classical-quantum version. Since our mixed systems are just degenerated cases of quantum systems, no new universal parameters should be introduced and thus our conclusions do not directly apply to approaches such as that in \cite{Diosi:1997mt}. As is well known, the Poisson bracket, which governs the classical dynamics, can be obtained as a limit of the quantum commutator by means of the Wigner transformation (see section ~\ref{sec:2}). Such classical limit preserves a number of mathematical properties of the original quantum commutator and this makes the classical dynamics internally consistent. By internally consistent we mean that the classical dynamics does not give any clue that it is just an approximation since it displays all the correct properties that one would expect (see below in this section). Let us now consider a quantum system with two subsystems or sectors, which in general will be mutually interacting. One can ask whether it is possible to take the classical limit in just one of the sectors and still have an internally consistent dynamics for the resulting mixed quantum-classical system. After some definitions, this becomes a mathematical problem with some physical input which will be called the semiquantization problem here. As noted above, quantum-classical mixed systems exists abundantly in the literature, where they are understood as approximations to a fully quantum dynamics. They are not meant to be consistent so they are not under debate here. Let us clarify in what sense classical mechanics is a consistent limit of the quantum mechanics and, in passing, introduce some notation. It is a common feature of both classical and quantum mechanics that the dynamics can be described in Heisenberg picture by an evolution equation of the form \begin{equation} \frac{d A}{dt}= (A,H)+\frac{\partial A}{\partial t}\,, \label{eq:1} \end{equation} where, $t$ is the time, $A$ is an arbitrary observable and $H$ is the Hamiltonian of the system. The term $\frac{\partial A}{\partial t}$ takes into account the intrinsic time dependence of $A$. On the other hand, the term $(A,H)$ describes the dynamic evolution of $A$. In the quantum case the observables are self-adjoint operators in a Hilbert space and the bracket $(\,,)$ is essentially the commutator. In the classical case the observables are real functions on the phase space and the dynamical bracket is the Poisson bracket: \begin{eqnarray} (A,B)_q &=& \frac{1}{i\hbar}[A,B]= \frac{1}{i\hbar}(AB-BA)\,,\nonumber \\ (A,B)_c &=& \{A,B\} = \sum_i\left( \frac{\partial A}{\partial x_i}\frac{\partial B}{\partial k_i} -\frac{\partial A}{\partial k_i}\frac{\partial B}{\partial x_i} \right)\,. \label{eq:2} \end{eqnarray} Mathematically, the quantum and classical brackets have a number of remarkable properties. First, they are universal in the sense that they are independent of the particular dynamics. The latter is specified by the Hamiltonian which in principle can be any observable of the system. Second, they are Lie brackets, that is, they are linear, antisymmetric and satisfy the Jacobi identity: \begin{eqnarray} &&(A,B)=-(B,A)\,, \nonumber \\ &&((A,B),C)+((B,C),A)+((C,A),B)=0\,. \end{eqnarray} The antisymmetry of the bracket ensures that time independent Hamiltonians are conserved. The linearity guarantees that if $A(t)$ and $B(t)$ are two observables which only have dynamical evolution (i.e., without intrinsic time dependence), and $a$ and $b$ are two real constants, the observable $C_1(t)=aA(t)+bB(t)$ is also free of intrinsic time dependence. Likewise, the Jacobi identity ensures that the observable $C_2(t)=(A(t),B(t))$ also evolves dynamically only: \begin{equation} \frac{dC_2}{dt}= (\frac{dA}{dt},B)+(A,\frac{dB}{dt}) =((A,H),B)+(A,(B,H))= ((A,B),H)= (C_2,H)\,. \end{equation} The third equality requires the Jacobi identity since $A$, $B$ and $H$ can be arbitrary. In particular, this property ensures the preservation of the canonical relations among canonical variables. Third, for any Hamiltonian, the dynamic evolution operator $(\ ,H)$ is a derivation, that is, satisfies Leibniz's rule: \begin{equation} (AB,H)=(A,H)B+A(B,H)\,. \end{equation} Being a derivation guarantees that the product of observables is consistent with time evolution, i.e., the observable $C_3(t)=A(t)B(t)$ is free of intrinsic time dependence if $A$ and $B$ are. In particular, this ensures that the commutation relations among canonical variables are preserved. (Note that commutation relations here means the commutator, which may or may not coincide with the dynamical Lie bracket.) Fourth, the brackets are such that the reality or hermiticity conditions on the observables (in the classical or quantum cases respectively) are preserved by time evolution. Giving up these properties would imply that some of the previous constructions are not preserved by time evolution and this would introduce an intrinsic time dependence in the dynamics. Note that this is different from the question of whether the dynamics is conservative or not; a non conservative Hamiltonian $H(t)$ introduces a privileged origin of time in the dynamics but then the Hamiltonian $H^\prime(t)=H(t-\tau)$ defines a dynamics which is precisely the same as before except that the time is shifted by $\tau$, provided that the dynamical bracket has all the properties noted above. In the absence of any of these properties there would be a universal privileged time, universal meaning independent of the particular Hamiltonian ~\cite{Salcedo:1996jr}. Three further important remarks are, first, that the equivalence between Schr\"odinger and Heisenberg pictures can only be proved if the bracket is a derivation, since it requires that the product of observables be preserved by time evolution, second, when the bracket is a derivation, c-number observables are automatically free of dynamic evolution (since $(1,H)=(1^2,H)=2(1,H)=0$, the second equality coming from Leibniz rule), otherwise this property requires an independent postulate, and third, the Lie bracket property is essential if one wants the dynamical system to carry representations of symmetry groups of transformations using the observables as infinitesimal generators. The operator $(\,,H)$ is just a particular case corresponding to the group of time translations, hence generalizing what we have already said to other transformation groups, in the absence of the Lie bracket property the dynamics would introduce intrinsic violations of rotational invariance, etc. In \cite{Salcedo:1996jr} a study of the semiquantization problem was carried out making two natural assumptions, first that the semiquantized theory should enjoy all mathematical properties common to both quantum and classical dynamics and second that when the two sectors are decoupled they should evolve as if they were isolated, according to their usual quantum or classical dynamics. More precisely it was required the existence of a Heisenberg picture, a canonical structure plus the condition that the product of two observables were preserved by the time evolution. It was found that under these assumptions the only consistent dynamics are either purely quantum or purely classical. It was also found that removing the canonical structure condition allows other dynamics but they are trivial in the sense that the classical variables do not inherit fluctuations from their coupling to the quantum sector, that is, there is no quantum backreaction on the classical sector. This is the case of the semiclassical dynamics commented above, where the classical variables are coupled to the expectation values of the relevant quantum observables. In the present work, the consistency of a universal semiquantization is studied assuming only a canonical structure or assuming only physical positivity of the resulting theory. We consider the simplest systems such as those described by position-momentum pairs or field theories of real scalar fields. In section ~\ref{sec:2} we study some existent proposals of the universal type to the semiquantization problem and show that they fail to be consistent. In section~\ref{sec:3}, we find that any universal canonical semiquantization fails to fulfill some natural requirements when the two sectors are decoupled. In section ~\ref{sec:4} it is found that the requirement of physical positivity of the semiquantum theory prevents the existence of quantum backreaction or even the coupling among the quantum and classical sectors. Section \ref{sec:5} summarizes our conclusions. \section{The quantum-classical bracket} \label{sec:2} The general setting is as follows. There is one quantum sector and one classical sector. We will consider only systems which are described by conjugate canonical variables of the type position and momentum, that is, Hilbert spaces of the form L$^2$(R$^n$) in the quantum case. The observables are formed out of the classical canonical variables $x_i$, $k_i$, $i=1,\dots,n_c$ and the quantum ones $q_i$, $p_i$, $i=1,\dots,n_q$. Therefore, they are functions defined on the phase space of the classical sector which take values on operators on the Hilbert space of the quantum sector. The classical variables are commuting numbers whereas $[q_i,q_j]=[p_i,p_j]=0$, $[q_i,p_j]=i\hbar\delta_{ij}$, as usual. The standard proposal for the quantum-classical bracket is \begin{equation} (A,B)_s=(A,B)_q+ \frac{1}{2}((A,B)_c-(B,A)_c)\,. \label{eq:10} \end{equation} (The Poisson bracket $\{A,B\}$ is defined by eq.~(\ref{eq:2}) also when $A$ and $B$ are non commuting quantities.) This dynamical bracket has been proposed by various authors ~\cite{Boucher:1988ua,Aleksandrov:1981,Prezhdo:1997gs} starting from different considerations. It should be noted, however, that \cite{Boucher:1988ua} uses a Schr\"odinger picture and so this bracket is used only to evolve the density matrix. This will be discussed further in section~\ref{sec:4}. The bracket of ~\cite{Anderson:1995tn}, $(A,B)_s^\prime=(A,B)_q+ (A,B)_c$, is similar except that it is not antisymmetric. In \cite{Prezhdo:1997gs} a Wigner representation is chosen for the quantum operators. This bracket can be obtained as follows. Let us start from a fully quantum system with two sectors. The Hilbert space will be ${\cal H}_q\otimes{\cal H}_c$ with ${\cal H}_{q,c}= {\rm L}^2({\rm R}^{n_{q,c}})$. In order to take a classical limit later, let us apply a Wigner transformation to the sector ${\cal H}_c$: \begin{equation} A(x,k;\hbar_c)= \int d^{n_c}y e^{-iy\cdot k/\hbar_c}\langle x+\frac{1}{2}y|\hat{A}|x-\frac{1}{2}y\rangle \,. \label{eq:5} \end{equation} Here $\hat{A}$ is the original operator on the full Hilbert space ${\cal H}_q\otimes{\cal H}_c$. $|x\rangle$ is a basis state with well-defined position in the space ${\cal H}_c$ only. Therefore, $A$ is an operator on ${\cal H}_q$ and a function on the phase space spanned by $x_i$ and $k_i$, $i=1,\dots,n_c$. This transformation can be inverted so that $\hat{A}$ can be recovered from $A$, thus $A$ is a faithful representation of $\hat{A}$. The representation will depend on the positive parameter $\hbar_c$ which is entirely arbitrary. The Wigner transformation naturally defines a product among functions on the phase space, namely (with obvious notation) $A*B$ is defined as the Wigner representation of $\hat{A}\hat{B}$. Of course, if $A$, $B$ are regarded as $\hbar_c$-independent functions, the operation represented by $*$ will depend on $\hbar_c$ explicitly. The commutator $[\hat{A},\hat{B}]$ is represented by $[A,B]_*=A*B-B*A$ and so the fully quantum dynamical bracket is represented by $\frac{1}{i\hbar}[A,B]_*$: \begin{equation} (\hat{A},\hat{B})_q=\frac{1}{i\hbar}[\hat{A},\hat{B}]\to \frac{1}{i\hbar}[A,B]_* \,. \end{equation} In order to obtain the dynamical bracket of the mixed quantum-classical system, it remains to take the classical limit in the sector ${\cal H}_c$. This can be done using the identity \begin{equation} e^{ix\cdot k/\hbar_c}=(2\pi\hbar_c)^{n_c} e^{i\hbar_c\partial_x\cdot\partial_k}\delta(x)\delta(k)\,, \end{equation} which allows to express the product $*$ as \begin{equation} (A*B)(x,k)= e^{\frac{1}{2}i\hbar_c(\partial^{(A)}_x\cdot\partial^{(B)}_k -\partial^{(A)}_k\cdot\partial^{(B)}_x)}A(x,k)B(x,k)\,. \label{eq:6} \end{equation} Here, $\partial^{(A)}_x$ means derivative of the $x$ dependence in $A$ only, etc. This formula is convenient to study the limit of small $\hbar_c$. An expansion in powers of $\hbar_c$ gives \begin{equation} A*B= AB+\frac{i\hbar_c}{2}\{A,B\}+O({\hbar_c}^2)\,. \label{eq:7} \end{equation} Therefore, the dynamical bracket takes the form \begin{equation} \frac{1}{i\hbar}[A,B]_*= \frac{1}{i\hbar}[A,B]+ \frac{1}{2}\frac{\hbar_c}{\hbar}\left(\{A,B\}-\{B,A\}\right) +\frac{\hbar_c}{\hbar}O(\hbar_c)\,. \label{eq:8} \end{equation} Taking now $\hbar_c=\hbar$ and neglecting terms of $O(\hbar)$ one gets \begin{equation} \frac{1}{i\hbar}[A,B]+ \frac{1}{2}\left(\{A,B\}-\{B,A\}\right) \,, \end{equation} which is just the quantum-classical bracket $(\,,\,)_s$ defined in eq.~(\ref{eq:10}). The idea would be that neglecting higher order terms in $\hbar_c$ corresponds to the classical limit in the sector ${\cal H}_c$. In fact, when there is no quantum sector present, $n_q=0$, $A$ and $B$ are commuting quantities and the limit $\hbar=\hbar_c\to 0$ of $\frac{1}{i\hbar}[A,B]_*$ is well-defined and gives the Poisson bracket. On the other hand, if there is no classical sector, $n_c=0$, all terms containing $\hbar_c$ vanish (cf. eq.~(\ref{eq:6})) and the prescription reproduces the usual quantum commutator. The construction of this bracket is somewhat tricky and in fact it does not define a consistent coupling among the classical and quantum sectors. As already noted, it is not a derivation (it does not satisfies Leibniz rule). Even if one does not insist on this requirement, the bracket $(\,,\,)_s$ does not define a canonical structure because it fails to fulfill the Jacobi identity. This is readily checked by taking three observables $A=qx$, $B=qpx$ and $C=pk^2$, where $q$, $p$ are position and momentum variables of a one dimensional quantum subsystem, and $x$, $k$ refer to the position and momentum of the classical subsystem, also one dimensional. By direct computation one finds \begin{equation} ((A,B)_s,C)_s+((B,C)_s,A)_s+((C,A)_s,B)_s=-\frac{1}{2}(i\hbar)^2\,. \end{equation} (In order to show that the Jacobi identity is violated, it is necessary to use at least two cubic operators. The identity is preserved if all the operators involved are at most quadratic in $x$, $k$, $q$ and $p$. Also, for the identity to fail at least two of the operators should be of mixed quantum-classical type.) Since the product $A*B$ is just a (faithful) representation of the ordinary product of operators, it is associative and thus the corresponding commutator $[A,B]_*$ satisfies the Jacobi identity for any value of $\hbar_c$. The violation of the Jacobi identity in $(\,,\,)_s$ comes from the truncation of the commutator at $O(\hbar)$ after taking $\hbar_c=\hbar$. Expanding the exact (untruncated) commutator in powers of $\hbar_c$ one finds \begin{equation} [A,B]_*={\cal C}_0(A,B) +\hbar_c{\cal C}_1(A,B) +\hbar_c^2{\cal C}_2(A,B)+\cdots \,, \end{equation} The coefficients ${\cal C}_n$ are independent of $\hbar_c$ by definition and can be computed using eq.~(\ref{eq:6}). In particular, the coefficients ${\cal C}_0$ and ${\cal C}_1$ can be read off from eq.~(\ref{eq:8}), being the commutator and the Poisson bracket respectively. The Jacobi identity then yields a separate identity for each power of $\hbar_c$ \begin{eqnarray} 0 &=& {\cal C}_0({\cal C}_0(A,B),C)+\hbox{c.p.} \nonumber \\ 0 &=& {\cal C}_0({\cal C}_1(A,B),C) +{\cal C}_1({\cal C}_0(A,B),C) +\hbox{c.p.} \\ 0 &=& {\cal C}_0({\cal C}_2(A,B),C) +{\cal C}_1({\cal C}_1(A,B),C) +{\cal C}_2({\cal C}_0(A,B),C) +\hbox{c.p.} \nonumber \end{eqnarray} (Where c.p. stands for cyclic permutations of $A$, $B$, and $C$.) When there is just one quantum sector, the only non vanishing coefficient is ${\cal C}_0$ and the first equation yields the Jacobi identity for the commutator. On the other hand, when there is just a classical sector, ${\cal C}_0$ vanishes and the third equation yields the Jacobi identity for the Poisson bracket (of commuting quantities). If there are two sectors ${\cal C}_0$ no longer vanishes and keeping only ${\cal C}_0+\hbar_c{\cal C}_1$, as in $(\,,\,)_s$, violates the Jacobi identity at $O({\hbar_c}^2)$. As a rule, the operation of keeping only the leading order in the expansion preserves the Jacobi identity since this can be seen as a limit case. In the purely quantum case the leading order is ${\cal C}_0$ and in the purely classical case it is ${\cal C}_1$. However, in general, working with the series truncated at $O(\hbar_c^n)$ preserves the Jacobi identity only modulo $O(\hbar_c^n)$. Contrary to the opinion expressed in Ref.~\cite{Prezhdo:1997gs}, we think a dynamics leading to the bracket in eq.~(\ref{eq:10}) cannot be consistent. In our opinion, it is clear that any dynamics in a Heisenberg picture necessarily defines a concrete dynamical bracket and a consistent dynamics can only yield a consistent bracket. Therefore, stating that the resulting dynamical bracket is ``naive'' or ``simplistic'' ~\cite{Prezhdo:1997gs} does not solve the consistency problem. As follows from our previous discussion, the bracket $[\,,\,]_*$ is indeed perfectly consistent, so the bracket $(\,,\,)_s$ would enjoy the Jacobi identity if it were a true limit case of $[\,,\,]_*$, but it is not. It is simply an arbitrary prescription. This fact, as well as the fact that the Jacobi identity is a non linear relation, accounts for the failure of that bracket to be consistent. This is a good place to illustrate what we mean by an internally consistent semiquantization. The semiquantization given by $(\,,\,)_s$ is not internally consistent since checking the Lie bracket property one finds that it is not preserved, therefore it can be concluded, without using external information, that something is missing and that $(\,,\,)_s$ is intrinsically an approximation. As discussed above, a non-Lie dynamical bracket introduces violations of symmetries and can have a limited range of applicability only. Since $(\,,\,)_s$ violates Jacobi at second order in $\hbar_c$, it would make sense to add the second order term ${\cal C}_2$, so that Jacobi is preserved. However, new violations would appear at higher orders and, in addition, the classical sector would be less ``classical''. It is to be expected that a systematic correction of the bracket in order to exactly fulfill the Jacobi identity would end up in a quantum-quantum system (except, may be if only particular subclasses of observables are involved). At this point we can recall that a similar conclusion was reached by De Witt long ago ~\cite{DeWitt:1962}. He showed that consistency with the uncertainty principle in the quantum sector, requires to introduce systematic corrections in the system in such a way that one ends up with a fully quantum system in both sectors. Of course, there is no problem in having a mixed system if the two sectors are never coupled, but in general, only fully classical or fully quantum dynamics are consistent. His construction pursued to obtain a consistent action functional to describe the mixed system thus effectively implying a canonical structure. \section{Obstructions to a canonical semiquantization} \label{sec:3} The semiquantization problem, that is, the construction of a consistent dynamics for a mixed quantum-classical system, reminds the quantization problem. The quantization problem in its most naive form consists in associating to each function $A(x,k)$ on a classical phase space an operator $\hat{A}=A(q,p)$ in L$^2$(R$^n$) using the quantization rules $x_i\to q_i$, $k_i\to p_i$ and $1\to I$ (the identity operator) and choosing the ordering of the operators in such a way that $\{A,B\}\to (i\hbar)^{-1}[\hat{A},\hat{B}]$. As is known, the quantization problem posed in this form does not have a solution~\cite{VanHove:1951}; for arbitrary functions there is no way to choose the order of the operators so that the Poisson bracket goes into commutator. The semiquantization problem can also be seen as a problem of ordering of operators since the trouble comes because $\{A,B\}$ fails to fulfill the Jacobi identity when $A$ and $B$ are not commuting. As we have seen, the naive antisymmetrization implied by $(\,,\,)_s$ is insufficient to produce a Lie bracket in general. The naive quantization problem has a solution when restricted to the subspace of quadratic operators (namely, using $xk\to \frac{1}{2}(qp+pq)$) and likewise $(\,,\,)_s$ is a Lie bracket when restricted to the subspace of at most quadratic operators. This is because in that case the coefficients ${\cal C}_n$ vanish for $n\ge 2$. In order to solve the semiquantization problem, we could start trying different combinations of commutators and Poisson brackets, or equivalently different orderings for the operators. Instead of that, we will show that under very general conditions the problem does not have a solution within the canonical framework. This puts a strong constraint on the kind of semiquantizations one should look for. Let ${\cal A}_c$ be the set of classical observables, i.e., real functions on the phase space of the classical sector, and ${\cal A}_q$ be set of quantum observables, operators on the Hilbert space of the quantum sector. The full set of observables is ${\cal A}= {\cal A}_c\otimes{\cal A}_q$, so a general observable will be of the form $A=\sum_{ij}C_iQ_j$ where $C_i$ and $Q_j$ are purely classical and purely quantum observables, respectively. Let $(\,,\,)$ denote the semiquantum dynamical bracket, which will be assumed to be a Lie bracket. We will consider dynamics satisfying the following postulates \begin{equation} (CQ,C^\prime) = (C,C^\prime)_cQ \,,\quad (CQ,Q^\prime) = (Q,Q^\prime)_qC \,, \label{eq:17} \end{equation} for arbitrary purely classical observables $C$, $C^\prime$ and arbitrary purely quantum observables $Q$, $Q^\prime$. These postulates can be justified as follows. In a system formed by two quantum subsystems, the observables of one sector commute those of the other sector and so $[A_1A_2,A_1^\prime]= [A_1,A_1^\prime]A_2$ for arbitrary $A_1$, $A_1^\prime$ in one sector and $A_2$ in the other sector. If the quantum-classical system is a limit of the quantum-quantum one obtains the above postulates. Note that the postulates are free of any ordering problem. For another argument, consider that the Hamiltonian of the semiquantum system is of the form $H= C^\prime+Q^\prime$. Since both sectors are not coupled by an interaction term, each sector should evolve separately as if it were isolated. This implies that when the Hamiltonian is purely classical, a classical observable should evolve classically and furthermore, a quantum observable must not evolve. Reversing the roles of quantum and classical, and putting this in infinitesimal form it follows that \begin{equation} (C,C^\prime) = (C,C^\prime)_c \,,\quad (Q,Q^\prime) = (Q,Q^\prime)_q \,, \quad (Q,C)=0\,. \label{eq:18} \end{equation} These relations are weaker than our postulates. On the other hand, our postulates can be derived from these ones if in addition it is assumed that $(\,,C)$ and $(\,,Q)$ should be derivations. By themselves the axioms in eqs.~(\ref{eq:18}) are too weak to sufficiently constrain the form of the bracket. In order to be able to draw definite conclusions we will make a stronger assumption which is equivalent to our postulates in eqs.~(\ref{eq:17}) . Namely, we demand that when the Hamiltonian is classical, an observable $QC$ should evolve into $QC(t)$ where $C(t)$ is the classical evolution of $C$. In infinitesimal form this yields the first postulate. The second postulate follows similarly. This constraint on the form of the evolution of $QC$ is automatically satisfied if the quantum-classical system derives as a limit from a quantum-quantum system and therefore it is a very natural requirement. Note that there is an implicit assumption of universality in the argument, i.e. the dynamical bracket should be the same for all Hamiltonians and any observable can be a Hamiltonian. Then we can state the following theorem: {\bf Theorem 1:} Let ${\cal A}={\cal A}_c\otimes{\cal A}_q$ be of the position-momentum type in both sectors. Then, no Lie bracket $(\,,\,)$ in ${\cal A}$ can fulfill the axioms \begin{equation} (A,C) = (A,C)_c \,,\quad (A,Q) = (A,Q)_q \, \label{eq:19} \end{equation} for all $C\in{\cal A}_c$, $Q\in{\cal A}_q$ and $A\in{\cal A}$. Note that, because all observables are of the form $\sum_{ij}C_iQ_j$, these axioms are equivalent to those in eq.~(\ref{eq:17}). The bracket $(\,,\,)_s$ satisfies these axioms and thus it is a particular case. It should be remarked that the theorem only applies to systems described by position and momentum conjugate variables. Other quantum-classical mixtures, e.g., a quantum sector with a finite dimensional Hilbert space such as a spin system plus some classical sector, are not directly ruled out by this theorem. Also, the incompatibility refers to a semiquantization of the complete class of observables. As noted above, the bracket $(\,,\,)_s$, which fulfills our postulates, is a Lie bracket in the restricted subclass of observables which are at most quadratic in $q,p,x,k$. What follows in this section is devoted to the proof of this theorem. In order to prove the incompatibility stated in the theorem, let us assume that $(\,,\,)$ is a Lie bracket which satisfies our axioms. It will be sufficient to consider a system with a one-dimensional quantum sector and a one-dimensional classical sector. Also, in what follows we will take $\hbar=1$ since keeping $\hbar$ variable (but strictly positive) does not add anything to the proof. Let us consider the set of observables \begin{equation} e_r=e^c_re^q_r\,,\quad e^c_r=e^{ik_rx-ix_rk}\,,\quad e^q_r= e^{ip_rq-iq_rp}\,. \end{equation} Where $x_r$, $k_r$, $q_r$ and $p_r$ are arbitrary real numbers and $x$, $k$, $q$ and $p$ are the dynamical variables. The observables of the form $e^c_r$ form a basis of ${\cal A}_c$ and those of the form $e^q_r$ form a basis of ${\cal A}_q$. This latter statement is more clearly seen by using the form $e^q_r= e^{-\frac{1}{2}ip_rq_r} e^{ip_rq}e^{-iq_rp}$ since $e^{ip_rq}$ and $e^{-iq_rp}$ are basis of the operators which are functions of $q$ and $p$ respectively and any operator in L$^2$(R) can be normal ordered putting the $q$ at the left of the $p$. Therefore, $e_r$ defines a (linear) basis of ${\cal A}$. Let us see that the bracket can be determined up to a c-number function. Using the postulates, it is immediate that \begin{equation} (e_r,x)=ix_re_r\,,\quad (e_r,k)=ik_re_r\,,\quad (e_r,q)=iq_re_r\,,\quad(e_r,p)=ip_re_r\,. \end{equation} The Jacobi identity can be expressed as \begin{equation} \delta_C(A,B)=(\delta_C A,B)+(A, \delta_C B)\,,\quad \delta_C :=(\,, C)\,, \label{eq:22} \end{equation} for arbitrary $A$, $B$ and $C$. Therefore, the Jacobi identity requires \begin{eqnarray} ((e_r,e_s),x) &=& i(x_r+x_s)(e_r,e_s)\,, \quad ((e_r,e_s),k) = i(k_r+k_s)(e_r,e_s)\,, \nonumber \\ ((e_r,e_s),q) &=& i(q_r+q_s)(e_r,e_s)\,, \quad ((e_r,e_s),p) = i(p_r+p_s)(e_r,e_s)\,. \end{eqnarray} On the other hand, $(e_r,e_s)$ will be a linear combination of the $e_t$ and the previous equations imply that $x_t=x_r+x_s$, $k_t=k_r+k_s$, $q_t=q_r+q_s$ and $p_t=p_r+p_s$. In summary, \begin{eqnarray} (e_r,e_s) &=& F(q_r,p_r,x_r,k_r;q_s,p_s,x_s,k_s) e^{i(k_r+k_s)x-i(x_r+x_s)k+i(p_r+p_s)q-i(q_r+q_s)p} \nonumber \\ &:=& F_{rs}e_{r+s} \,. \label{eq:21} \end{eqnarray} Here $F$ is some real function which depends on the particular bracket only. The postulates are consistent with this form and correspond to \begin{eqnarray} F(q_r,p_r,x_r,k_r;0,0,x_s,k_s) &:=& F^c_{rs} = v_{rs}\,, \nonumber \\ F(q_r,p_r,x_r,k_r;q_s,p_s,0,0) &:=& F^q_{rs} = 2\sin(\frac{u_{rs}}{2})\,, \label{eq:25} \end{eqnarray} where we have introduced the variables \begin{equation} u_{rs}=p_rq_s-q_rp_s\,,\quad v_{rs}= k_rx_s-x_rk_s\,. \end{equation} The functions $F^c$ and $F^q$ come from computing $(e^c_r,e^c_s)_c$ and $(e^q_r,e^q_s)_q$, respectively. Up to now we have imposed the Jacobi identity only when one of the operators is $x$, $k$, $q$ or $p$. The full Jacobi identity follows from considering $((e_r,e_s),e_t)$. It is immediate that the Lie bracket property can be expressed as \begin{eqnarray} && F_{rs} = -F_{sr} \,,\quad \nonumber \\ && F_{rs}F_{r+s,t}+F_{st}F_{s+t,r}+F_{tr}F_{t+r,s} = 0 \,. \label{eq:26} \end{eqnarray} It is interesting to note that eqs.~(\ref{eq:26}) are valid in a purely classical or purely quantum case. So $F^c$ and $F^q$ (and in fact, their generalization for any number of degrees of freedom) are solutions of those relations. The quantum-classical bracket of the previous section gives \begin{equation} F^s_{rs}= 2\sin(\frac{u_{rs}}{2}) + v_{rs}\cos(\frac{u_{rs}}{2})\,. \end{equation} This bracket comes from a classical expansion (but not a limit) around the quantum-quantum case: \begin{eqnarray} F^{qq}_{rs} &=& 2\sin(\frac{u_{rs}}{2})\cos(\frac{v_{rs}}{2}) + 2\cos(\frac{u_{rs}}{2})\sin(\frac{v_{rs}}{2}) \nonumber \\ &=& 2\sin(\frac{u_{rs}}{2} + \frac{v_{rs}}{2})\,, \end{eqnarray} (which, of course, is equivalent to a two dimensional quantum case). $F^s$ satisfies the axioms, eqs.~(\ref{eq:25}), but fails to fulfill the Jacobi identity, second eq.~(\ref{eq:26}). Let us show that the eqs.~(\ref{eq:25}) and (\ref{eq:26}) are, in fact, incompatible. As proven in appendix~\ref{app:A}, these equations imply that $F$ can only depend on the combinations $u$ and $v$ introduced above, that is \begin{equation} F_{rs}=F(u_{rs},v_{rs})\,. \label{EQ:29} \end{equation} This follows only from the Lie bracket property of $F$ and the fact that the boundary conditions, eqs.~(\ref{eq:25}), depend also on $u$ and $v$. Using eq.~(\ref{EQ:29}), the postulates become \begin{equation} F(u,0)= 2\sin(u/2) \,, \quad F(0,v)= v\,, \label{eq:31} \end{equation} and the Lie bracket conditions become \begin{eqnarray} && F(u,v)=-F(-u,-v)\,, \nonumber \\ && F(u_{rs},v_{rs})F(u_{rt}+u_{st},v_{rt}+v_{st})+\hbox{c.p.} =0 \,. \label{eq:30} \end{eqnarray} As proven in appendix~\ref{app:A}, these Lie bracket conditions plus $\partial_uF(0,0)=\partial_vF(0,0)=1$, only admit the solutions \begin{equation} F(u,v)=\frac{1}{h}\sin(hu+hv)\,, \label{EQ:33} \end{equation} ($h$ being and arbitrary constant) or the degenerated case $F(u,v)=u+v$, which cannot accommodate the two postulates, eqs.~(\ref{eq:31}), for any value of $h$. This implies that the function $F$ is only consistent with Jacobi if it is purely quantum $F(x)=2\sin(x/2)$ or purely classical $F(x)=x$, but does not admit mixed types. This completes the proof. The proof of the incompatibility of the axioms can also be done by an alternative method which is constructive (but requires to perform symbolic calculations with the help of a computer). We will devote the reminder of this section to discuss this method. Let us consider a basis of the space ${\cal A}$ with observables of the form $q^rp^sx^tk^\ell$, where $r,s,t,\ell= 0,1,\dots$. (The order $qpxk$ will be taken as the canonical order of these variables.) Each basis element can be assigned a degree given by $n=r+s+t+\ell$. Let us use the notation $C_n$, $Q_n$, $M_n$ and $A_n$ to refer to basis elements of degree $n$ which are a purely classical (i.e., $r=s=0$), purely quantum ($t=\ell=0$), mixed quantum-classical ($r+s > 0$ and $t+\ell > 0$) and arbitrary, respectively. Using the axioms, the brackets of each pair of basis elements can be worked out except those of the form $(M_n,M_{n^\prime})$ (where necessarily $n,n^\prime \ge 2$). Using the Jacobi identity, as in eq.~(\ref{eq:22}), for $\xi=q,p,x,k$, yields \begin{eqnarray} ((M_n,M_{n^\prime}),\xi) &=& ((M_n,\xi),M_{n^\prime})+ (M_n,(M_{n^\prime},\xi)) \nonumber \\ &=& (M_{n-1},M_{n^\prime})+ (M_n,M_{n^\prime-1})\,. \end{eqnarray} Since the knowledge of $(A,\xi)$ for $\xi=q,p,x,k$ determines $A$ completely up to an additive c-number constant, this relation allows to determine $(M_n,M_{n^\prime})$ by induction up to an additive constant. Such constants play a similar role as the function $F$ in the plain wave basis used above. Their number increases rapidly with $n$ and $n^\prime$. We proceed by selecting values for these constants so that the Jacobi identity is fulfilled for arbitrary operators, if possible. Because the Jacobi identity is trivial unless two of the operators involved are of mixed type, only the case $\langle M,M,A\rangle$ gives information on the constants. At step 1 we consider the brackets of the form $(M_2,M_2)$ which contain 6 unknown constants. These constants are uniquely determined imposing the Jacobi identity to the triples $\langle M_2,M_2,Q_2 \rangle$ and $\langle M_2,M_2,C_2\rangle$. The Jacobi identity for $\langle M_2,M_2,M_2\rangle$ turns out to be fulfilled automatically. From now on, these 6 constants are fixed to their unique value. At step 2, we consider $(M_2,M_3)$ which contain 48 unknowns. These constants are uniquely determined imposing Jacobi to $\langle M_2,M_3,Q_2\rangle$ and $\langle M_2,M_3,C_2\rangle$, and again Jacobi for $\langle M_2,A_3,M_2 \rangle$ comes out automatically. The 48 unknowns are then fixed to their unique value. At step 3, the 100 unknowns in $(M_2,M_4)$ are fixed to their unique value which is determined from $\langle M_2,M_4,Q_2\rangle$ and $\langle M_2,M_4,C_2\rangle$, and Jacobi for $\langle M_2,A_4,M_2\rangle$ is automatic. All these constants are those corresponding to the bracket $(\,,\,)_s$ of section~\ref{sec:2}. Finally, at step 4 the procedure breaks down. The 66 unknowns of $(M_3,M_3)$ are uniquely determined by $\langle M_3,Q_3,M_2\rangle$ and $\langle M_3,C_3,M_2\rangle$, but they turn out to be inconsistent with the Jacobi identity for $\langle M_3,M_3,M_2\rangle$. \section{Quantum backreaction and positivity} \label{sec:4} In this section we want to discuss obstructions to mixing quantum and classical systems not related to a canonical structure but to the requirement of positivity. Here we no longer require the existence of a canonical structure plus a Heisenberg picture, etc. In view of this we will need a definition of what it is meant by a quantum-classical system. We will demand that the quantum variables obey to the usual quantum commutation relations and the classical variables commute. Because the quantum variables do not commute, the usual proof shows that they must be subjected to the uncertainty principle. They have primary quantum fluctuations. (They are primary because they exist even in the absence of a coupling to other degrees of freedom.) If the two sectors are coupled, the classical observables may have induced fluctuations, the so-called quantum backreaction. The two requirements of primary fluctuations in the quantum variables and commutation of the classical variables is just what we mean by ``quantum-classical'' mixing. We want to argue that even these weak conditions, plus another natural requirement, namely, that the mixed quantum-classical system must correspond to some limit case of a quantum-quantum system, forbid the possibility of a quantum backreaction on the classical sector. Since such secondary fluctuations are expected to appear whenever the two sectors are coupled by an interaction term, this would suggest that no consistent quantum-classical mixing exists. The requirement that the mixed system should be a limit of a full quantum system seems weak but still has non trivial consequences: if some quantity of the quantum-quantum system is always positive, the corresponding quantity must at least be non negative in the quantum-classical system. For instance, in the purely quantum theory the variance of any observable must be non negative, i.e., $\langle A^2\rangle \ge \langle A\rangle^2$, where $\langle \,\rangle$ refers to the quantum average. The equal sign corresponds to an observable which is free of quantum fluctuations and so it implies $\langle f(A)\rangle = f(\langle A\rangle)$ for any function $f(x)$ as well. Then the same properties must hold in the mixed quantum-classical system. Of course, they hold in the purely classical system in particular. This is equivalent to say that whatever is the measure corresponding to the fluctuations in the mixed system (induced by the quantum sector) it must be non negative. As is well-known in quantum mechanics, if the commutator of two observables does not vanish on some quantum state vector, at least one of the observables must present quantum fluctuations in that state. Here we will need a reciprocal of this statement: {\bf Theorem 2:} In a purely quantum theory, let $|\psi_0\rangle$ be the normalized ground state and $A$ any observable, then i) \begin{equation} 0 \le -\frac{i\hbar}{2}\langle[A,\dot A]\rangle_{\psi_0}\,, \label{eq:35} \end{equation} and ii) when the ground state is not degenerated, the equal sign holds if and only if $A$ is free of dispersion in $|\psi_0\rangle$. This can be proved as follows. Let $H$ be the Hamiltonian and $E_0$ the ground state energy, $H|\psi_0\rangle=E_0|\psi_0\rangle$, then, because $H-E_0$ is non negative and $A$ is self-adjoint, the operator $A(H-E_0)A$ is also non negative. Therefore \begin{equation} 0\le \langle A(H-E_0)A\rangle_{\psi_0}= -\frac{1}{2}\langle[A,[A,H]]\rangle_{\psi_0}\,. \end{equation} Eq.~(\ref{eq:35}) follows then from the relation $i\hbar\dot A= [A,H]$. To show ii), assume that $A$ has no dispersion in the ground state, $\langle A^2\rangle_{\psi_0}=\langle A\rangle^2_{\psi_0}$, then $A|\psi_0\rangle$ is proportional to $|\psi_0\rangle$ and automatically $\langle[A,B]\rangle_{\psi_0}=0$ for any $B$. Conversely, assume that the equal sign applies in eq.~(\ref{eq:35}), then $0=\langle A(H-E_0)A\rangle_{\psi_0}$, and so $A|\psi_0\rangle$ is another ground state. If this is not degenerated $A|\psi_0\rangle$ must be proportional to $|\psi_0\rangle$ and $A$ is free of dispersion in the ground state. A corollary of this theorem is that when $A$ commutes with $\dot A$, $A$ cannot have quantum fluctuations in a non degenerated ground state, so in some sense it is a reciprocal of the argument leading to the uncertainty principle. It has immediate consequences to the semiquantization problem. For a mixed system at zero temperature (and so in the ground state) the classical variables $x_i$ and $k_i=\dot{x}_i$ will commute with each other and, being the limit of a quantum-quantum system, they will be free of quantum fluctuations. In other words, since any induced quantum fluctuations would spoil the commutativity of the classical variables, in a quantum-classical mixing there cannot be quantum backreaction on the classical sector. It should be noted that there are actually proposals of mixed quantum-classical systems in which the classical variables commute and at the same time have secondary quantum fluctuations. Such prescriptions exists, without invalidating our conclusions above, because they violate positivity of the measure of the quantum fluctuations. That is, positive observables do not have a positive expectation value. A first example is the proposal in ~\cite{Boucher:1988ua}. There the quantum-classical system is described in terms of a density matrix which depends on the quantum variables $q,p$ and the classical variables $x,k$. The evolution is described in the Schr\"odinger picture in the form $\dot\rho=(\rho,H)$ where $H$ is the Hamiltonian and $(\,,\,)$ is the dynamical bracket. In ~\cite{Boucher:1988ua} the bracket is completely determined by imposing several natural requirements; it should reduce to the commutator or Poisson bracket as particular cases, the evolution preserves the hermiticity and the trace of the density matrix, and it is invariant under classical canonical transformations and quantum unitary transformations. The result is again the bracket $(\,,\,)_s$ in eq.~(\ref{eq:10}). As noted by the authors, there is, however, one essential requirement which is violated by this construction, namely, if one starts with a positive density matrix $\rho$, its positivity is not preserved by the evolution, in general. It is noteworthy that, since the observables do not evolve, this construction does not introduce any intrinsic time dependence in the dynamics. For instance, the commutator $[q,p]=i\hbar$ is automatically time independent and the classical variables always commute. As noted, when $(\,,H)$ is not a derivation, the Heisenberg and Schr\"odinger dynamics are no longer equivalent and there is no contradiction with our discussion above, which refers to the Heisenberg picture. Nevertheless, because Jacobi is not satisfied, there will be problems implementing time-independent symmetry transformations, in addition to the positivity problem. Another proposal is that of~\cite{Salcedo:1994sn}. There, it is noted that the stochastic quantization program~\cite{Parisi:1981ys,Damgaard:1987rr,Namiki:1992} leads to a natural definition of a semiquantized dynamics. As it may be recalled, in the stochastic quantization approach the dynamical variables evolve in a fictitious time, the simulation time, following a stochastic differential equation, the Langevin equation, which corresponds to a particular Monte Carlo method to sample the Euclidean path integral of the system. For instance, let $S[\phi]$ be the Euclidean action of quantum field theory with $n$ scalar fields $\phi_i(x)$, $i=1,\dots,n$ in a flat four dimensional space-time. Then the functional integral with Boltzmann weight $e^{-S/\hbar}$ is correctly sampled by the equilibrium distribution of a random walk described by the following Langevin equation ~\cite{Damgaard:1987rr} \begin{equation} \frac{\partial\phi_i(x;\tau)}{\partial\tau} =-\frac{\delta S[\phi]} {\delta\phi_i(x;\tau)} +\sqrt{\hbar}\eta_i(x;\tau) \,. \end{equation} Here $\tau$ is the simulation time and $\eta_i(x;\tau)$ are independent stochastic centered Gaussian variables normalized to \begin{equation} \langle\!\langle\eta_i(x;\tau)\eta_{i'}(x';\tau')\rangle\!\rangle = 2\delta_{ii'}\delta(x-x')\delta(\tau-\tau')\,. \end{equation} The variables $\eta_i(x;\tau)$ introduce the quantum fluctuations in the system. In their absence, the fields $\phi$ would fall into a solution of the (Euclidean) classical equations of motion, $\delta S/\delta\phi_i(x)=0$. The $\hbar$ dependence of the Langevin equation suggests a natural definition for the semiquantized system~\cite{Salcedo:1994sn}, namely, to replace $\hbar$ by $\hbar_i=0,1$ where $1$ corresponds to a quantum degree of freedom and $0$ to a classical one. (We will use units $\hbar=1$ from now on.) The classical degrees of freedom will not have primary quantum fluctuations but, if they are coupled to the quantum sector, they will present induced secondary fluctuations. In order to see what consequences follow from such proposal, let us take the example studied in \cite{Salcedo:1994sn}. Consider a system composed by two relativistic fields with a quadratic action \begin{equation} S(\phi_1,\phi_2)=\int d^4x\left( \frac{1}{2}(\partial\phi_1)^2+\frac{1}{2}m_1^2\phi_1^2+ \frac{1}{2}(\partial\phi_2)^2+\frac{1}{2}m_1^2\phi_2^2+g\phi_1\phi_2 \right)\,. \end{equation} Since the action is translationally invariant, it is convenient to use a momentum representation: \begin{equation} S(\phi_1,\phi_2)=\int \frac{d^4k}{(2\pi)^4} \,\frac{1}{2}\Phi^\dagger(k) M(k)\Phi(k)\,, \end{equation} where \begin{equation} \quad \Phi(k)=\pmatrix{\tilde\phi_1(k) \cr \tilde\phi_2(k)} \,, \quad M(k)=\pmatrix{k^2+m_1^2 & g \cr g & k ^2+m_2^2 \cr}\, \end{equation} and $\tilde\phi_i(k)$ is the Fourier transform of $\phi_i(x)$. As usual, we will assume $m_1^2,m_2^2>0$ and $m_1^2m_2^2>g^2$, so that $M(k)$ is positive definite. Because the action is quadratic, the equilibrium solution of the Langevin equation can be solved in closed form. The connected two-point function or propagator is given in momentum space by the following matrix~\cite{Salcedo:1994sn} \begin{equation} W(k)= \frac{\hbar_2(k^2+m_1^2) +\hbar_1(k^2+m_2^2)} {(k^2+m_1^2) +(k^2+m_2^2)}W_Q(k) + \frac{\hbar_1-\hbar_2} {(k^2+m_1^2) +(k^2+m_2^2)}\sigma_z\,, \end{equation} where $\sigma_z$ refers to the Pauli matrix ${\rm diag}(1,-1)$, and $W_Q(k)$ is the inverse matrix of $M(k)$. In this formula the $\hbar_i$ are arbitrary non negative numbers. The propagator in $x$-space is \begin{equation} \langle T\phi_i(y)\phi_j(x)\rangle = \int\frac{d^4k}{(2\pi)^4} e^{-ik(y-x)}\,W_{ij}(k) \,. \end{equation} This Green's function is directly connected since $\langle \phi_i(x)\rangle=0$. It can be shown~\cite{Salcedo:1994sn} that there are no connected Green's functions of three or more points, so the system is Gaussian. From the form of $W(k)$ it follows that in the fully classical case, $\hbar_i=0$, the connected two-point function vanishes implying that the fields are free from fluctuations. On the other hand, in the fully quantum case, $\hbar_i=1$, $W$ is just $W_Q=M^{-1}$ which is the standard quantum propagator. If $\hbar_1=1$ and $\hbar_2=0$, and, in addition, $g\ne 0$, $\langle (\phi_2(x))^2\rangle$ will not vanish and thus $\phi_2$ is subjected to induced secondary fluctuations. When $g=0$ both sectors are decoupled. On the other hand, we can obtain the equal-time commutation relations of the fields by considering the large momentum limit of the propagator: \begin{equation} W(k) = \frac{1}{k^2} \pmatrix{ \hbar_1 & 0 \cr 0 & \hbar_2 \cr } + O(\frac{1}{k^4})\,. \label{eq:44} \end{equation} This directly implies \begin{equation} \delta(y^0-x^0)\langle [\phi_i(y),\dot\phi_j(x)]\rangle = \hbar_i\delta_{ij}\delta(x-y)\,. \label{eq:45} \end{equation} Therefore, if $\hbar_1=1$ and $\hbar_2=0$, the field $\phi_2$ will be classical, in the sense that it commutes with its conjugate momentum, even if it is subjected to quantum backreaction from the quantum field $\phi_1$. We have checked that this system is a quantum-classical mixture according to our previous definition and also that there is quantum backreaction, but we still have to see if it preserves positivity. It can be shown~\cite{Salcedo:1994sn} that the matrix $W(k)$ is definite positive for all momenta and arbitrary non negative $\hbar_i$. However, as is well known, physical positivity corresponds rather to the stronger requirement of reflection positivity in Euclidean space~\cite{Glimm:1987}. Since the theory is quadratic, it is sufficient to study the Lehmann representation of the propagator, which comes from inserting a complete set of eigenstates: \begin{equation} W_{ij}(k)= \int\,d\mu\frac{\rho_{ij}(\mu)}{k^2+\mu} \end{equation} where the spectral density is defined as \begin{equation} \rho_{ij}(q^2) = (2\pi)^3\sum_n\delta^4(p_n-q) \langle 0|\phi_i(0)|n\rangle \langle n|\phi_j(0)|0\rangle \,. \end{equation} Reflection positivity requires $\rho(\mu)\ge 0$. For the purely quantum case we have \begin{equation} W_Q(k) = \frac{P_+}{k^2+m_+^2} + \frac{P_-}{k^2+m_-^2} \,, \end{equation} where $P_\pm$ are the two orthogonal projectors onto the normal modes, corresponding to diagonalize $M(k)$, and $m^2_\pm={1\over 2}(m_1^2+m_2^2 \pm R)$ are their squared masses (with $R = \sqrt{(m_1^2-m_2^2)^2 +4g^2}$). For arbitrary $\hbar_i$ it is found \begin{equation} W(k) = \frac{Q_+}{k^2+m_+^2} + \frac{Q_-}{k^2+m_-^2} + \frac{Q_3}{k^2 + m_3^2} \label{eq:49} \end{equation} where $m_3^2=\frac{1}{2}(m_1^2+m_2^2)$, and \begin{eqnarray} Q_\pm &=& \left( \frac{\hbar_1+\hbar_2}{2}\pm\frac{(\hbar_1-\hbar_2)(m_1^2-m_2^2)}{2R} \right)P_\pm \,, \nonumber \\ Q_3 &=& \frac{\hbar_1-\hbar_2}{2} \left(\sigma_z -\frac{m_1^2-m_2^2}{R}(P_+-P_-)\right) \,. \end{eqnarray} One can see that there is an extra mode, namely, $m_3^2$. Unfortunately, whereas $Q_\pm$ are non negative, $Q_3$ is not in general, since ${{\rm tr}}\,(Q_3) = 0$. This means that the covariance matrix $W(k)$ is positive but not reflection positive except in the trivial cases $\hbar_1=\hbar_2$ or $g=0$. The latter case describes two non interacting sectors, and the first case corresponds to two classical sectors if $\hbar_1=\hbar_2=0$ or two quantum sectors if $\hbar_1=\hbar_2>0$. Beyond these trivial cases, this theory does not define a Hilbert space with positive definite metric, i.e., it does not define a positive physical measure, and for instance, one can construct operators with negative variance. In other words, the probabilistic interpretation (of which the classical case is a limit) breaks down. The theory must be rejected (or else work with a restricted set of observables, which in this context would be ad hoc). Mathematically, the lack of reflection positivity is a direct consequence of the commutation relations eq.~(\ref{eq:45}). In effect, as noted, the commutation relations are equivalent to eq.~(\ref{eq:44}), and comparing with eq.~(\ref{eq:49}) for large $k^2$, it follows that $\hbar_2= (Q_+ + Q_- + Q_3)_{22}$. Therefore, if $\phi_2(x)$ is classical and so $\hbar_2=0$, the cancellation requires $(Q_3)_{22}$ to be negative. This argument can be expected to hold on general grounds. Indeed, we have the following theorem: {\bf Theorem 3:} A theory of relativistic scalar fields which is quadratic, reflection positive, translationally invariant and $\langle\phi_i\rangle=0$, cannot have quantum and classical sectors unless they are decoupled. This can be proved as follows. Under the assumptions, all information on the theory is contained in the propagator or equivalently in the spectral density $\rho(\mu)$. For simplicity we consider theories with one quantum field $\phi_1$ and one classical field $\phi_2$. Being a classical field means that $\phi_2$ and $\dot\phi_2$ commute at equal time. Then $\langle T\phi_2(x)\phi_2(y)\rangle$ and $\langle T\phi_2(x)\dot\phi_2(y)\rangle$ are continuous regarded as functions of $t=x_0-y_0$ at $t=0$. This implies that the function $\langle T\phi_2(x)\phi_2(y)\rangle$ is continuous and with continuous first derivative at $t=0$. As a consequence its Fourier transform must be $W_{22}(k)=O(\frac{1}{k^4})$ for large $k^2$. From the Lehmann representation, this implies that $\rho_{22}(\mu)$ must average to zero (otherwise $W_{22}(k)=O(\frac{1}{k^2})$) and then reflection positivity requires $\rho_{22}=0$ everywhere. At this point we have already shown that the connected propagator of a classical field must vanish and so the classical field cannot have secondary quantum fluctuations. This is in agreement with our Theorem 2 above. The stronger statement in Theorem 3 comes from noting that if $\rho_{22}=0$, positivity of the matrix $\rho$ requires $\rho_{12}=\rho_{21}=0$ as well, therefore there is no mixing among the two sectors. \section{Summary and conclusions} \label{sec:5} In the present work, we study the internal consistency of semiquantization schemes of the universal type. In the Introduction it was argued that the classical dynamics is an internally consistent limit of the quantum dynamics, since the Poisson bracket preserves a number of essential properties of the quantum commutator (Lie bracket property, Leibniz's rule and hermiticity). Both dynamics, quantum and classical, are of the universal type since they have a fixed dynamical bracket independent of the particular Hamiltonian. There it is also discussed what unacceptable consequences would follow by giving up any of the above mentioned properties, namely, intrinsic breaking of symmetries and lack of hermiticity. In section \ref{sec:2} the standard quantum-classical dynamical bracket for a system with two sectors (each of the type position-momentum) is derived as a ``limit'' of the quantum-quantum bracket with the help of the Wigner representation. It is argued that such a bracket is not internally consistent, since it fails to satisfy both the Jacobi identity and the Leibniz's rule. It is pointed out that this failure is due to the fact that such a bracket is not a true limit case but rather a truncation at second order in an expansion in $\hbar$. On the contrary the classical limit (in all sectors) is a true limit since it keeps only the leading order in $\hbar$ and so it preserves both the Jacobi identity and the Leibniz's rule. In section \ref{sec:3} the semiquantization problem in its canonical version is studied. It is pointed out the similarity of this problem with that of the naive quantization of classical systems, which is known not to have a solution for arbitrarily large spaces of observables. Both problems can be tied to the ordering problem of operators. Roughly speaking, the canonical semiquantization problem consists in finding a Lie bracket in the algebra of observables of the mixed quantum-classical theory which interpolates between the Poisson bracket and the quantum commutator. (The Leibniz's rule, which is common to both classical and quantum dynamics, is not imposed on the semiquantum bracket, since it was already shown in \cite{Salcedo:1996jr} that such a requirement plus the Lie bracket condition only allows for purely classical or purely quantum dynamics, at least for systems of the position-momentum type.) However, in order to determine the bracket of observables of mixed type, some assumptions have to be made. Our assumptions are cast in the axioms in eq.~(\ref{eq:17}). They follow either from assuming that the quantum-classical system is a limit of a quantum-quantum system or else from natural requirements on the behavior of the two sectors when they are decoupled. Such axioms are sufficiently general as to cover the case of the standard quantum-classical bracket of section \ref{sec:2}. Theorem 1 is the main result of section \ref{sec:3}. It states that, if both sectors are of the position-momentum type (and are non trivial, i.e., they are not zero dimensional) no Lie bracket exists which satisfies the axioms. In other words, under the assumptions, there is no consistent canonical semiquatization. It would be interesting to know whether the axioms allow for consistent semiquantizations when one or both sectors are not of the position-momentum type. For instance, one can consider a particle with classical position and momentum but quantum spin. It would also be interesting to know whether Theorem 1 can be adapted when the axioms are weakened to those in eq.~(\ref{eq:18}), or else, to find what kind of consistent semiquantizations are obtained. In section \ref{sec:4} the requirement of a canonical structure for the semiquantized system is dropped. It is only imposed that the variables in the quantum sector are not commuting and those in the classical sector are commuting, and further that positive observables must have positive expectation values. This latter requirement follows immediately if the mixed classical-quantum system is the limit of a quantum-quantum system. Being non commuting, the standard argument shows that the quantum variables must have primary quantum fluctuations. In section \ref{sec:4} a sort of reciprocal of the uncertainty principle is proven, Theorem 2, which implies that commuting variables cannot have fluctuations. Therefore, under the assumptions, any semiquantization scheme either does not have quantum backreaction on the classical sector or else breaks positivity. One example of the first possibility is semiclassical gravity. Two examples of the second possibility are discussed in section \ref{sec:4}. Finally a further Theorem 3 is presented which shows that, for scalar fields with quadratic Lagrangians, positivity of the theory not only forbids any quantum backreaction on the classical sector but also any coupling among the two sectors. These negative results, contained in the three theorems proven in the text, put constraints to the form of possible semiquantization approaches, especially those of universal type. A further negative result has already been noted: quantum-classical field theories fail to be renormalizable even if their corresponding quantum-quantum version is renormalizable~\cite{Salcedo:1994sn}. Nevertheless, it should be kept in mind that our conclusions only apply under the assumptions made, and so more general forms of mixed quantum-classical systems cannot be ruled out. \section*{Acknowledgments} This work is supported in part by funds provided by the Spanish DGICYT grant no. PB95-1204 and Junta de Andaluc\'{\i}a grant no. FQM0225. \appendix{}\section{Proof of eqs.~(\ref{EQ:29}) and ~(\ref{EQ:33})} \label{app:A} In order to prove eq.~(\ref{EQ:29}), let us consider the Jacobi identity, eq.~(\ref{eq:26}), with $x_t=k_t=0$. This yields \begin{equation} F_{rs}F^q_{r+s,t}+F^q_{st}F_{t+s,r}+F^q_{tr}F_{t+r,s}=0 \,. \end{equation} Let us recall that $F^q_{rs}=2\sin(u_{rs}/2)$, with $u_{rs}=p_rq_s-q_rp_s$. Using the antisymmetry of $F$, the relation can be written as \begin{equation} F^q_{st}F_{r,s+t}=F^q_{r+s,t}F_{rs}-F^q_{rt}F_{r+t,s} \,. \label{eq:A1} \end{equation} Note that $F_{rs}$, $F_{r,s+t}$ and $F_{r+t,s}$ all have the same dependence on the classical variables $x_r,k_r,x_s,k_s$ and so these variables can be treated just as fixed parameters in $F$, thus we can concentrate on the dependence on the quantum sector only and use the notation $F_{rs}=F(q_r,p_r;q_s,p_s)$. Now let us take $q_t=\lambda q_r$ and $p_t=\lambda p_r$, then $u_{tr}=0$ and $u_{r+s,t}=u_{st}$ and so $F^q_{tr}=0$ and $F^q_{r+s,t}=F^q_{st}$. Thus eq.~(\ref{eq:A1}) implies \begin{equation} F_{r,s+t}=F_{rs}\,, \end{equation} or in other words, the quantity $F(q_r,p_r;q_s+\lambda q_r,p_s+\lambda p_r)$ is actually independent of $\lambda$. Choosing $\lambda=-p_s/p_r$ yields \begin{equation} F_{rs}=F(q_r,p_r;\frac{u_{rs}}{p_r},0)\,. \end{equation} Likewise, using an analogous argument or using antisymmetry, $F(q_r+\lambda q_s,p_r+\lambda p_s;q_s,p_s)$ is also independent of $\lambda$, thus \begin{equation} F_{rs}=F(0,p_r;\frac{u_{rs}}{p_r},0)\,. \end{equation} This implies that $F_{rs}$ depends at most on $u_{rs}$ and $p_r$. A similar argument shows that $F_{rs}$ may depend at most on $u_{rs}$ and $q_r$, therefore it depends only on $u_{rs}$. Everything can be repeated for the dependence of $F$ on the classical variables using that $F^c_{rs}=v_{rs}$ is an antisymmetric function of $v_{rs}= k_rx_s-x_rk_s$. Thus, $F_{rs}$ depends only on $u_{rs}$ and $v_{rs}$. Next, let us prove eq.~(\ref{EQ:33}). First of all, note that the three variables $x^u=u_{st}$, $y^u=u_{tr}$ and $z^u=u_{rs}$ are independent, i.e, the triple $(x^u,y^u,z^u)$ can take any value in R$^3$, and similarly $x^v$, $y^v$ and $z^v$ in the classical sector. Second, the structure of eqs.~(\ref{eq:30}) is two dimensional but can naturally be extended to any number of dimensions. Let us denote by $x^i$, $i=1,\dots,n$ the corresponding variables, i.e., $F=F(x)$ (in our particular case, $n=2$ with $x^1=u$ and $x^2=v$). Then the equations take the form \begin{eqnarray} && F(x)=-F(-x)\,, \nonumber \\ && F(x)F(y-z)+F(y)F(z-x)+F(z)F(x-y) =0 \,, \label{eq:A5} \\ && F_i(0)=1\,, \nonumber \end{eqnarray} where $F_i(x) := \partial_iF(x)$. The last relation comes from $\partial_uF(0,0)=\partial_vF(0,0)=1$ which follows from the postulates. (Actually, only $\partial_iF(0)\ne 0$ is essential for the present argument). It is clear from the formulas that if $f$ is a one dimensional solution of these equations, \begin{equation} F(x)=f(x^1+\cdots +x^n) \label{eq:A6} \end{equation} is also a solution of the $n$-dimensional problem. Let us show that this is, in fact, the general solution. Applying $\partial^2/\partial y^i\partial z^j$ at $y=z=0$ to the Jacobi identity in eq.~(\ref{eq:A5}) yields \begin{equation} 0=-F(x)F_{ij}(0)+F_i(0)F_j(-x)-F_j(0)F_i(x)\,, \end{equation} that is, $F_i(x)=F_j(x)$ for all $i,j$. This implies eq.~(\ref{eq:A6}). (This can be seen in the two dimensional case by making a change of variables to $x^1+x^2$ and $x^1-x^2$, and by induction for the higher dimensional cases.) The form $F(x)=f(x^1+\cdots+x^n)$ is already incompatible with the axioms, since the classical sector requires $f(x)=x$ whereas the quantum sector requires $f(x)=2\sin(x/2)$. In order to find the general form of $f(x)$ it is sufficient take the first derivative on $z$ at $z=x$ and then the first derivative on $y$ at $y=0$ in the Jacobi identity for $f$. This yields \begin{equation} 0=f(x)f''(x)+f'^2(0)-f'^2(x)\,, \end{equation} which together with $f(-x)=-f(x)$ and $f'(0)=1$ yields $f(x)=\frac{1}{h}\sin(hx)$ or $f(x)=x$. \begin{references} \bibitem{Maddox:1995} J.~Maddox, Nature {\bf 373} (1995) 469. \bibitem{Rosenfeld:1963} L.~Rosenfeld, Nucl. Phys. {\bf 40} (1963) 353. \bibitem{Brandenberger:1985cz} R.H.~Brandenberger, Rev. Mod. Phys. {\bf 57} (1985) 1. \bibitem{Boucher:1988ua} W.~Boucher and J.~Traschen, Phys. Rev. {\bf D37} (1988) 3522. \bibitem{Anderson:1995tn} A.~Anderson, Phys. Rev. Lett. {\bf 74} (1995) 621. \bibitem{Salcedo:1994sn} L.L.~Salcedo, hep-th/9410007. \bibitem{DeWitt:1962} B.S.~DeWitt, J. Math. Phys. {\bf 3} (1962) 619. \bibitem{Aleksandrov:1981} I.V.~Aleksandrov, Z. Naturforsch. Teil {\bf A36} (1981) 902. \bibitem{Jones:1994} K.R.W.~Jones, Phys. Rev. {\bf A50} (1994) 1062. \bibitem{Salcedo:1996jr} L.L.~Salcedo, Phys. Rev. {\bf A54} (1996) 3657. \bibitem{Diosi:1996} L.~Di\'osi, Quantum Semiclass. Opt. {\bf 8} (1996) 309. \bibitem{Prezhdo:1997gs} O.V.~Prezhdo and V.V.~Kisil, Phys. Rev. {\bf A56} (1997) 162. \bibitem{Diosi:1997mt} L.~Diosi and J.J.~Halliwell, Phys. Rev. Lett. {\bf 81} (1998) 2846. \bibitem{VanHove:1951} L.~Van Hove, Mem. Acad. Roy. Belg., {\bf 26} (1951). \bibitem{Parisi:1981ys} G.~Parisi and Y.~Wu, Sci. Sin. {\bf 24} (1981) 483. \bibitem{Damgaard:1987rr} P.H.~Damgaard and H.~Huffel, Phys. Rept. {\bf 152} (1987) 227. \bibitem{Namiki:1992} M.~Namiki, I.~Ohba, K.~Okano, Y.~Yamanaka, A.K.~Kapoor, H.~Nakazato and S.~Tanaka, {\em Stochastic quantization}, Lecture notes in physics, Springer-Verlag, (Berlin, Germany, 1992). \bibitem{Glimm:1987} J.~Glimm and A.~Jaffe, {\em Quantum Physics}, Springer-Verlag (New York, USA, 1987). \end{references} \end{document}
\begin{document} \title[Ordering Fr\'echet filters]{Comparing Fr\'echet-Urysohn filters with two pre-orders} \author{S. Garcia-Ferreira} \address{Centro de Ciencias Matem\'aticas, Universidad Nacional Aut\'onoma de M\'exico, Campus Morelia, Apartado Postal 61-3, Santa Maria, 58089, Morelia, Michoac\'an, M\'exico} \email{[email protected]} \author{J. E. Rivera-G\'omez} \address{Centro de Ciencias Matem\'aticas, Universidad Nacional Aut\'onoma de M\'exico, Campus Morelia, Apartado Postal 61-3, Santa Maria, 58089, Morelia, Michoac\'an, M\'exico} \email{[email protected]} \thanks{Research of the first-named author was supported by CONACYT grant no. 176202 and PAPIIT grant no. IN-101911} \subjclass[2000]{Primary 54A20, 54D55: secondary 54D80, 54G20} \date{} \dedicatory{} \keywords{Fr\'echet-Urysohn filter, $FAN$-filter, Arens space, almost disjoint family, maximal almost disjoint family, Todor\v{c}evi\'c-Uzc\'ategui pre-order, Rudin-Keisler pre-order} \begin{abstract} A filter $\mathcal{F}$ on $\omega$ is called Fr\'echet-Urysohn if the space with only one non-isolated point $\omega \mathfrak{c}up \{\mathcal{F}\}$ is a Fr\'echet-Urysohn space, where the neighborhoods of the non-isolated point are determined by the elements of $\mathcal{F}$. In this paper, we distinguish some Fr\'echet-Urysohn filters by using two pre-orderings of filters: One is the Rudin-Keisler pre-order and the other one was introduced by Todor\v{c}evi\'c-Uzc\'ategui in \mathfrak{c}ite{tu05}. In this paper, we construct an $RK$-chain of size $\mathfrak{c}^+$ which is $RK$-above of avery $FU$-filter. Also, we show that there is an infinite $RK$-antichain of $FU$-filters. \end{abstract} \maketitle \section{Notation and preliminaries} All filters will be taken on $\omega$ and be free. For an infinite set $X$, we let $[X]^{< \omega}=\{A\subseteq X:|A|< \omega\}$ and $[X]^{\omega}=\{A\subseteq X:|A|=\omega\}$. For $A, B\in [\omega]^{\omega}$, $A\subseteq^*B$ means that $A \setminus B$ is finite. If $S \in [\omega]^\omega$, we say that $S \to \mathcal{F}$ if $S \subseteq^* F$ for every $F \in \mathcal{F}$. If $\mathcal{F}$ is a filter, then $C(\mathcal{F}) = \{ S \in [\omega]^\omega : S \to \mathcal{F} \}$ is the set of all sequences converging to $\mathcal{F}$. For a filter $\mathcal{F}$, we let $\mathcal{I}_\mathcal{F} = \{\omega \setminus F:F\in \mathcal{F}\}$ ({\it the dual ideal}) and for an ideal $\mathcal{I}$, we let $\mathcal{F}_\mathcal{I} = \{\omega \setminus I : I\in \mathcal{I}\}$ ({\it the dual filter}). If $\mathcal{F}$ is a filter and $f:\omega \to \omega$ is a function, then we define the filter $f[\mathcal{F}]=\{ F :f^{-1}(F)\in \mathcal{F}\}$. For $A \in [\omega]^\omega$, we define $\mathcal{F}_r(A) = \{ B \subseteq A : |A \setminus B| < \omega\}$. In particular, $\mathcal{F}_r(\omega) := \mathcal{F}_r$ is the {\it Fr\'echet filter}. We say that an infinite family $\mathcal{A} \subseteq [\omega]^{\omega}$ is {\it almost disjoint} ($AD$-family) if $A\mathfrak{c}ap B$ is finite for distinct $A, B \in \mathcal{A}$. The ideal generated by an $AD$-family $\mathcal{A}$ is $\mathcal{I}(\mathcal{A})=\{ X \subseteq \omega: \exists \mathcal{A}'\in[\mathcal{A}]^{< \omega}(X \subseteq^*\bigcup \mathcal{A}')\}$. An $AD$-family $\mathcal{A}$ is called {\it maximal almost disjoint} ($MAD$-{\it family}) if it is not contained properly in another $AD$-family. More general, if $\mathcal{B} \subseteq [\omega]^\omega$, then we say that a family $\mathcal{A}$ is {\it maximal} in $\mathcal{B}$ if $\mathcal{A} \subseteq \mathcal{B}$ and for every $B \in \mathcal{B}$ there is $A \in \mathcal{A}$ such that $|A \mathfrak{c}ap B| = \omega$. For a nonempty $\mathcal{A}\subseteq [\omega]^{\omega}$, we define $\mathcal{A}^{\bot}=\{B \in [\omega]^{\omega} : \forall A\in \mathcal{A}(|A\mathfrak{c}ap B|<\omega)\}$, $\mathcal{A}^+ = \{B \in[\omega]^{\omega}:\forall A\in \mathcal{A}(|A\mathfrak{c}ap B| \neq \emptyset)\}$ and $\mathcal{A}^* = \{B \in[\omega]^{\omega}: |\{ A \in \mathcal{A} : |A \mathfrak{c}ap B| = \omega \}| \geq \omega \}$. Notice that, for a filter $\mathcal{F}$, we have that $\mathcal{F}^+ = \mathcal{P}(\omega) \setminus \mathcal{I}_\mathcal{F}$ and, for an arbitrary ideal $\mathcal{I}$, $\mathcal{I}^\perp$ is always an ideal and $\mathcal{I} \subseteq \mathcal{I}^{\perp \perp}$. For each $X \in[\omega]^{\omega}$ and $\mathcal{A} \subseteq [\omega]^{\omega}$ we let $\mathcal{A}|_{X}=\{ A \mathfrak{c}ap X : A \in \mathcal{A} \ \text{and} \ |A \mathfrak{c}ap X|=\omega\}$. Observe that if $\mathcal{A}$ is an $AD$-family and $B \in \mathcal{A}^*$, then $\mathcal{A}|_B = \{ A \mathfrak{c}ap B : A \in \mathcal{A} \ \text{and} \ |A \mathfrak{c}ap B| = \omega \}$ is an $AD$-family on $B$. Now, let $\mathcal{F}$ be a filter on $\omega$ and consider the space $\xi(\mathcal{F}) = \omega\mathfrak{c}up \{F\}$ whose topology is defined as follows: All elements of $\omega$ are isolated and the neighborhoods of $\mathcal{F}$ are of the form $\{\mathcal{F}\} \mathfrak{c}up F$ where $F \in \mathcal{F}$. One class of spaces which has been extensively studied in Topology is the following: A space $X$ is called a {\it Fr\'echet-Urysohn} space (for short $FU$-space) if for each $x\in X$ such that $x\in cl_X A$, there is a sequence in $A$ converging to $x$. \begin{definition} A filter $\mathcal{F}$ is called a $FU$-{\it filter} if the space $\xi(\mathcal{F})$ is Fr\'echet-Urysohn. \end{definition} The ``smallest'' $FU$-filter is the Fr\'echet filter $\mathcal{F}_r$ and the countable $FAN$-filter is also an example of a $FU$-filter which does not have a countable base. By using $AD$-families, in the paper \mathfrak{c}ite{gu09}, the authors pointed out the existence of $2^\frak{c}$ pairwise non-equivalent $FU$-filters. In other terms, we have that $\mathcal{F}$ is a $FU$-filter iff for every $A \in \mathcal{F}^+$ there is $S \in [A]^\omega$ such that $S \subseteq^* F$ for all $F \in \mathcal{F}$. Two notions that will help us to distinguish $FU$-filters are the following. \begin{definition}\label{tu} Let $\mathcal{F}$ and $\mathcal{G}$ be two filters on $\omega$. \begin{enumerate} \item $\mathcal{F}\leq_{RK}\mathcal{G}$ if there is a function $f: \omega \to \omega$ such that $f[\mathcal{G}] = \mathcal{F}$ (i. e., $F\in \mathcal{F}$ iff $f^{-1}(F)\in \mathcal{G}$). \item ({\bf \mathfrak{c}ite{tu05} }) $\mathcal{F}\leq_{TU}\mathcal{G}$ if there are $A\in \mathcal{G}^+$, $B\in \mathcal{F}$ and a bijection $f:A\to B$ such that $f[\mathcal{G}|_{B}]=\mathcal{F}|_A$. \end{enumerate} \end{definition} We assert that these two relations $\leq_{RK}$ and $\leq_{TU}$ are reflexive and transitive but they are not anti\-sy\-mmetric. \begin{definition} Let $\mathcal{F}$ and $\mathcal{G}$ be filters on $\omega$. \begin{enumerate} \item $\mathcal{F}\approx\mathcal{G}$ if there is a bijection function $f: \omega \to \omega$ such that $f[\mathcal{F}]= \mathcal{G}$. \item $\mathcal{F}\approx_{RK}\mathcal{G}$ if $\mathcal{F}\leq_{RK}\mathcal{G}$ and $\mathcal{G}\leq_{RK}\mathcal{F}$. \end{enumerate} \end{definition} The definition introduce in $(1)$ can be generalized as: If $A,B\in[\omega]^{\omega}$ and $\mathcal{F}, \mathcal{G}$ are filters on $A$ and $B$, respectively, then $\mathcal{F}$ and $\mathcal{G}$ are called {\it equivalent} if there is a bijection $f: A \to B$ such that $f[\mathcal{F}] = \mathcal{G}$. It is evident that $\mathcal{F} \approx\mathcal{G}$ implies that $\mathcal{F}\approx_{RK}\mathcal{G}$ for every pair of filters $\mathcal{F}$ and $\mathcal{G}$. However, we do not know if the inverse implication holds for the class of $FU$-filters: \begin{question} Are there two $FU$-filters $\mathcal{F}$ and $\mathcal{G}$ such that $\mathcal{F}\approx_{RK}\mathcal{G}$ and $\mathcal{F} \not\approx\mathcal{G}$ ? \end{question} By the symbol $\mathcal{F} <_{RK}\mathcal{G}$ we shall understand that $\mathcal{F}\leq_{RK}\mathcal{G}$ and $\mathcal{F} \not\approx\mathcal{G}$. This paper is a continuation of the work done in the article \mathfrak{c}ite{gr01}. The second section is devoted to recall some basic properties of the $FU$-filters. We give combinatorial properties which are equivalent to the $RK$-order and the $TU$-order. These equivalences will allow to construct $FU$-filters with some interesting properties. In the third section, we show that if $\mathcal{F}\leq \mathcal{F}_{\mathcal{P}}$, then either $\mathcal{F}$ is relatively equivalent to the Fr\'echet filter or equivalent to $\mathcal{F}_{\mathcal{P}}$. The pre-orders $\leq_{RK}$ and $\leq_{TU}$ are compared in the forth section. We show that $\leq_{TU} \subseteq \leq_{RK}$ in the category of $FU$-filters. We also prove that if $\mathcal{A}$ is a $NMAD$-family of size $\mathfrak{c}$ which is completely separable, then $S_{\mathcal{P}}\nleq_{TU}S_{\mathcal{A}}$ and $S_{\mathcal{P}} \leq_{RK} S_{\mathcal{A}}$. In the fifth section, we construct an $RK$-chain of size $\mathfrak{c}^+$ which is $RK$-above of each $FU$-filter. The sixth section is devoted to study the $RK$-incomparability of $FU$-filters. We use the $\alpha_i$ properties to show the $RK$- incomparability of certain $FU$-filters. Besides, we construct an infinite $RK$-antichain of $FU$-filters. \section{Fr\'echet-Urysohn Filters} In order to study the $FU$-filters and their relationships we list some useful facts. Let $\mathcal{F}$ be a filter on $\omega$. \begin{enumerate} \item If $A\in[\omega]^{\omega}$, then $\mathcal{F}\in cl_{\xi(\mathcal{F})}A$ iff $A\in \mathcal{F}^+$. \item $S \to \mathcal{F}$ iff $S \in \mathcal{I}_\mathcal{F}^{\perp}$. \item $\mathcal{F}$ is a $FU$-filter iff $\mathcal{I}_\mathcal{F}^{\perp \perp} = \mathcal{I}_\mathcal{F}$. \end{enumerate} It is not hard to prove that if $\emptyset \neq \mathcal{D}\subseteq [\omega]^{\omega}$, then $$ \mathcal{F}_{\mathcal{D}}=\{F\subseteq \omega:\forall D\in \mathcal{D}(D\subseteq^*F)\} $$ is a $FU$-filter. By using this kind of filters and $AD$-families it is possible to characterize the $FU$-filters as follows: \begin{lemma}\label{simon}{\bf \mathfrak{c}ite{si98}} A filter $\mathcal{F}$ is a $FU$-filter iff there is an $AD$-family $\mathcal{A}$ maximal in $I^\perp_{\mathcal{F}}$ such that $\mathcal{F}=\mathcal{F}_{\mathcal{A}}$. \end{lemma} We can see directly from this characterization that $\mathcal{A}$ is a $MAD$-family iff $\mathcal{F}_{\mathcal{A}} = \mathcal{F}_r$. More general, if $\mathcal{F}_{\mathcal{A}}$ is the filter generated by an $AD$-family $\mathcal{A}$ and $B\to \mathcal{F}_{\mathcal{A}}$, then there is $A\in \mathcal{A}$ such that $|A\mathfrak{c}ap B|=\omega$. Observe from Lemma \ref{simon} that for every infinite $\mathcal{D}\subseteq [\omega]^{\omega}$, we can find an $AD$-family $\mathcal{A}$ such that $\mathcal{F}_{\mathcal{D}} = \mathcal{F}_{\mathcal{A}}$. In what follows, when we write $\mathcal{F}_{\mathcal{A}}$ we shall always assume that $\mathcal{A}$ is an $AD$-family. Given a $FU$-filter $\mathcal{G}$, we say that $\mathcal{G}$ is {\it relatively equivalent} to the Fr\'echet filter iff $A \to \mathcal{G}$ and $A \in \mathcal{G}$. In particular, we have that $\mathcal{G} = \{ G \mathfrak{c}up E : G \in \mathcal{F}_r(A) \ \text{and} \ E \subseteq \omega \setminus A \}$. If we do not require that the function $f$ to be onto in the definition of the $RK$-order, then we would have that $\mathcal{F}_r(A) \leq_{RK} \mathcal{F}_r$ for every $A \in [\omega]^{\omega}$. For our convenience, in the definition of $RK$-order, we shall always require that the function involved be onto. This convenience is based on the next theorem which was proved in \mathfrak{c}ite{gr01}. \begin{theorem}\label{new-RK} Let $\mathcal{F}$ and $\mathcal{G}$ filters such that $\mathcal{G} \neq \mathcal{F}_r$ and $\mathcal{F}$ is not relatively equivalent to the Fr\'echet filter. If $\mathcal{F} \leq_{RK} \mathcal{G}$, then there is a surjective function $g: \omega \to \omega$ such that $g[\mathcal{G}] = \mathcal{F}$. \end{theorem} In virtue of the previous theorem, we remark that if $\mathcal{F}\leq_{RK}\mathcal{F}_r$, then $\mathcal{F}=\mathcal{F}_r$. It was pointed out in \mathfrak{c}ite{gr01} that every filter $\mathcal{G}$ which has a nontrivial convergent sequence satisfies that $\mathcal{F}_r\leq_{RK} \mathcal{G}$. Next, let us describe another useful construction of $FU$-filters which has been very important in the construction of special $FU$-filters (see, for instance, the article \mathfrak{c}ite{si}): For every $AD$-family $\mathcal{A}$, we define $S_{\mathcal{A}}=\mathcal{F}_{\mathcal{I}(\mathcal{A})}$. In general, $S_{\mathcal{A}}$ is not a $FU$-filter, for instance if $\mathcal{A}$ is a $MAD$-family, then $S_{\mathcal{A}}$ does not have any nontrivial convergent sequence. In order that $S_{\mathcal{A}}$ be a $FU$-filter we need some special $AD$-families: An $AD$-family $\mathcal{A}$ is said to be {\it nowhere $MAD$-family} ($NMAD$-family) if for every $X \in \mathcal{I}(\mathcal{A})^+$ there is $A \in \mathcal{I}(\mathcal{A})^{\bot}\mathfrak{c}ap [X]^{\omega}$. We remark that if $\mathcal{A}$ is a $NMAD$-family, then every infinite subfamily $\mathcal{B}$ of $\mathcal{A}$ is also a $NMAD$-family. \begin{theorem}\label{sfu} ({\bf \mathfrak{c}ite{si}}) Given an $AD$-family $\mathcal{A}$, we have that the filter $S_{\mathcal{A}}$ is a $FU$-filter iff $\mathcal{A}$ is a $NMAD$-family. \end{theorem} If $\mathcal{P}=\{P_n:n<\omega\}\subseteq [\omega]^{\omega}$ is an infinite partition of $\omega$ in infinite subsets, by Theorem \ref{sfu}, then $\mathcal{F}_{\mathcal{P}}$ is an $FU$-filter which is known as the $FAN$-filter. We also know that $S_{\mathcal{P}}$ and $\mathcal{F}_r$ are the only $FU$-filters with countable base. In what follows, when we write $S_{\mathcal{P}}$ we shall understand that $\mathcal{P}$ is an infinite partition of $\omega$ in infinite subsets. For the filters of the form $S_{\mathcal{A}}$ is very easy to know their characters as we shall see next. \begin{lemma}\label{char-nmad} If $\mathcal{A}$ is an $AD$-family on $\omega$, then $\mathfrak{c}hi(S_{\mathcal{A}})=|\mathcal{A}|$. \end{lemma} The following combinatorial statements are equivalent to the $RK$-order and were proved in \mathfrak{c}ite[Th. 3.4]{gr01}. \begin{theorem}\label{theoeq} Let $\mathcal{A}$ and $\mathcal{B}$ be $AD$-families on $\omega$. The following statements are equivalent. \begin{enumerate} \item $\mathcal{F}_{\mathcal{A}}\leq_{RK}\mathcal{F}_{\mathcal{B}}$ via the function $f: \omega \to \omega$. \item \begin{enumerate} \item $\forall F\in \mathcal{F}_{\mathcal{A}}(f^{-1}(F)\in \mathcal{F}_{\mathcal{B}})$, and \item $\forall G\in \mathcal{F}_{\mathcal{B}}(f[G]\in \mathcal{F}_{\mathcal{A}})$. \end{enumerate} \item \begin{enumerate} \item $\forall n<\omega \forall B\in \mathcal{B}(|f^{-1}(n)\mathfrak{c}ap B|<\omega)$, \item $\forall B\in \mathcal{B} \forall C\in \mathcal{A}^{\bot}(|f[B]\mathfrak{c}ap C|<\omega)$, and \item $\forall S \in C(\mathcal{A}) \exists B\in \mathcal{B}(|f^{-1}(S)\mathfrak{c}ap B|=\omega)$. \end{enumerate} \end{enumerate} \end{theorem} The next result can be obtained by a slight modification of the proof of the previous theorem. \begin{theorem}\label{theoeq2} Let $\mathcal{A}$ and $\mathcal{B}$ be $NMAD$-families on $\omega$. The following statements are equivalent. \begin{enumerate} \item $S_{\mathcal{A}}\leq_{RK}S_{\mathcal{B}}$ via the function $f: \omega \to \omega$. \item \begin{enumerate} \item $\forall F\in S_{\mathcal{A}}(f^{-1}(F)\in S_{\mathcal{B}})$, and \item $\forall G\in S_{\mathcal{B}}(f[G]\in S_{\mathcal{A}})$. \end{enumerate} \item \begin{enumerate} \item $\forall I\in \mathcal{I}(\mathcal{A}) (f^{-1}(I)\in \mathcal{I}(\mathcal{B}))$, and \item $\forall M \in \mathcal{I}(\mathcal{A})^{\bot}\mathfrak{c}ap[\omega]^{\omega} \exists R\in \mathcal{I}(\mathcal{B})^{\bot}\mathfrak{c}ap[\omega]^{\omega}(|f^{-1}(M)\mathfrak{c}ap R|=\omega)$. \end{enumerate} \end{enumerate} \end{theorem} the last two theorems will be very useful in the construction of spacial filters. \section{$FAN$-filter} In what follows, $\mathcal{P}$ will stand for an infinite partition of $\omega$ in infinite subsets. In the paper \mathfrak{c}ite{gr01}, we proved that the $FAN$-filter $\mathcal{F}_\mathcal{P}$ and the $S_\mathcal{P}$ filter are not $RK$-comparable. We know, by lemma \ref{char-nmad}, that the only filters which are $RK$-predecessors of $S_{\mathcal{P}}$ are either itself or a filter that is relatively equivalent to the Fr\'echet filter. All these remarks lead us to ask in \mathfrak{c}ite[Q. 5.12]{gr01} what are the $RK$-predecessors of the $FAN$-filter ?: {\bf Question.} Is there an $AD$-family $\mathcal{A}$ such that $\mathcal{F}_r <_{RK} \mathcal{F}_\mathcal{A} <_{RK} \mathcal{F}_\mathcal{P}$ ? We shall respond this question in the next theorem. \begin{lemma} Let $\mathcal{A}$ be an $AD$-family on $\omega$ and $f: \omega \to \omega$ be a surjective function such that the restriction $f|_{\mathcal{A}}$ is finite-to-one for each $A\in \mathcal{A}$. If $\mathcal{D}_f :=\{f[A]:A\in\mathcal{A}\}$, then $\mathcal{F}_{\mathcal{D}_f}\leq_{RK}\mathcal{F}_A$ via $f$. \end{lemma} \begin{proof} We have to prove that $\mathcal{F}_{\mathcal{D}_f}=f[\mathcal{F}_{\mathcal{A}}]$. Let $F\in\mathcal{F}_{\mathcal{D}_f}$ and assume that $F\notin f[\mathcal{F}_{\mathcal{A}}]$. Then there is $A\in \mathcal{A}$ such that $A\setminus f^{-1}(F)$ is infinite. Then, we have that $f[A\setminus f^{-1}(F)]$ is infinite and $f[A\setminus f^{-1}(F)]\to \mathcal{F}_{D_f}$, but $f[A\setminus f^{-1}(F)]\mathfrak{c}ap F=\emptyset$ which is impossible. Thus $F\in f[\mathcal{F}_{\mathcal{A}}]$. So, we obtain that $\mathcal{F}_{\mathcal{D}_f} \subseteq f[\mathcal{F}_{\mathcal{A}}]$. Now fix $H\in f[\mathcal{F}_{\mathcal{A}}]$ and suppose that $H\notin \mathcal{F}_{\mathcal{D}_f}$. Then there is $A\in \mathcal{A}$ such that $f[A]\setminus H$ is infinite. We know that $A\subseteq^*f^{-1}(H)$ which implies that $f[A]\subseteq^* H$, but this is a contradiction. Thus $H\in \mathcal{F}_{\mathcal{D}_f}$. This proves that $f[\mathcal{F}_{\mathcal{A}}] \subseteq \mathcal{F}_{\mathcal{D}_f}$. Therefore, $\mathcal{F}_{\mathcal{D}_f}=f[\mathcal{F}_{\mathcal{A}}]$ and hence $\mathcal{F}_{\mathcal{D}_f}\leq_{RK}\mathcal{F}_A$ via the function $f$. \end{proof} \begin{lemma} If $\mathcal{F}_{\mathcal{A}}\leq_{RK}\mathcal{F}_{\mathcal{B}}$ via the function $f$, then $\mathcal{F}_{\mathcal{D}_f} = \mathcal{F}_{\mathcal{A}}$. \end{lemma} \begin{proof} Observe that for each $B\in \mathcal{B}$, $f[B]\subseteq^* F$ for all $F\in \mathcal{F}_{\mathcal{A}}$. Hence, we have that $\mathcal{F}_{\mathcal{A}}\subseteq \mathcal{F}_{\mathcal{D}_f}$. Now let $G\in \mathcal{F}_{\mathcal{D}_f}$ and suppose that $G \notin \mathcal{F}_{\mathcal{A}}$. Then we can find $A\in \mathcal{A}$ such that $|A\setminus G|=\omega$. Since $A\setminus G\to \mathcal{F}_{\mathcal{A}}$, by Theorem \ref{theoeq}, there is $B\in \mathcal{B}$ such that $B\mathfrak{c}ap (f^{-1}(A)\setminus f^{-1}(G))$ is infinite, but this contradicts the fact $B\subseteq^* f^{-1}(G)$. Thus, we must have that $G\in \mathcal{F}_A$. Therefore, $\mathcal{F}_{\mathcal{D}_f} = \mathcal{F}_{\mathcal{A}}$. \end{proof} \begin{theorem}\label{theofan} If $\mathcal{F}\leq_{RK} \mathcal{F}_{\mathcal{P}}$, then either $\mathcal{F}$ is relatively equivalent to the Fr\'echet filter or equivalent to $\mathcal{F}_{\mathcal{P}}$. \end{theorem} \begin{proof} It is known that $\mathcal{F}$ is an $FU$-filter (for a proof see \mathfrak{c}ite{gr01}). Let $f:\omega \to \omega$ be a function such that $f[\mathcal{F}_{\mathcal{P}}]=\mathcal{F}$ and $\mathcal{P}=\{P_n:n<\omega\}$. Notice that $\mathcal{D}_f=\{f[P_n]:n<\omega\}$ is a cover of $\omega$ and $\mathcal{F}_{\mathcal{D}_f}=\mathcal{F}$ by the previous lemma. First assume that there is $n < \omega$ such that $|f[P_{m}] \setminus \big(\bigcup_{i<n} f[P_i] \big)| < \omega$ for all $n < m < \omega$. It is clearly that $\bigcup_{i<n_k} f[P_i]=A\in \mathcal{F}$ and since $A\to \mathcal{F}$, we also have that $\mathcal{F}$ is relatively equivalent to the Fr\'echet filter. Now, set $Q_0=f[P_0]$ and inductively define a pairwise disjoint family $\mathcal{Q}=\{ Q_k : k < \omega\}$ of infinite subsets of $\omega$, and a strictly increasing sequence $(n_k)_{k < \omega}$ in $\omega$ so that $n_0 = 0$, $Q_{k}=f[P_{n_{k}}]\setminus \big(\bigcup_{i<n_{k-1}} Q_i\big)$ is infinite and $n_{k}$ is the smallest with this property. Since each element of $\mathcal{Q}$ is contained in an element of $\mathcal{D}_f$, we have that $\mathcal{F}_{\mathcal{D}_f}\subseteq \mathcal{F}_{\mathcal{Q}}$. Let $F\in \mathcal{F}_{\mathcal{Q}}$ and fix $P_n\in \mathcal{P}$. Let $k=min\{ i < \omega : n \leq n_i <\omega\}$. Then, by construction, we obtain that $f[P_n]\subseteq^*\bigcup_{i\leq n_k} Q_i\subseteq^*F$. Hence, $F \in \mathcal{F}$. Therefore, $\mathcal{F}=\mathcal{F}_{\mathcal{Q}}$. \end{proof} To finish this section we pose the following question. \begin{question} Let $\mathcal{F}$ be a $FU$-filter non-equivalent to the Fr\'echet filter. If $\mathcal{G} \leq_{RK} \mathcal{F}$ implies that either $\mathcal{G}$ is relatively equivalent to the Fr\'echet filter or equivalent to $\mathcal{G}$, must $\mathcal{F}$ be equivalent to the $FAN$-filter ? \end{question} \section{Todor\v{c}evi\'c-Uzc\'ategui pre-order} In this section, we shall compare the $FU$-filters by using the pre-order which has been introduced in Definition \ref{tu}. The first goal is to compare this pre-order with the $RK$-order. To do that we reformulate the definition of the $TU$-order to make it a little bite easier to handle. Before this reformulation we need to prove a well-known property of certain filters. \begin{lemma}\label{lemref} Let $\mathcal{F}$ be a filter non-relatively equivalent to the Fr\'echet filter. Then for every $A\in \mathcal{F}\setminus \mathcal{F}_r$ there is a bijection $f: A \to \omega$ such that $f[\mathcal{F}|_{A}]=\mathcal{F}$. In particular, $\mathcal{F} \approx \mathcal{F}|_A$ for all $A \in \mathcal{F} \setminus \mathcal{F}_r$. \end{lemma} \begin{proof} Let $A\in\mathcal{F}\setminus \mathcal{F}_r$. Since $\mathcal{F}$ is not relatively equivalent to the Fr\'echet filter there is $B\in \mathcal{F}\mathfrak{c}ap [A]^{\omega}$ such that $A\setminus B\in [A]^{\omega}$. Define $f:A\to \omega$ so that $f|_B$ is the identity on $B$ and $f[A\setminus B]=\omega\setminus B$ as a bijection. Let $F\in \mathcal{F}$. Then we have that $$f^{-1}(F)=f^{-1}(F\mathfrak{c}ap B)\mathfrak{c}up f^{-1}(F\setminus B)=(F\mathfrak{c}ap B)\mathfrak{c}up f^{-1}(F\setminus B).$$ Since $F\mathfrak{c}ap B\in \mathcal{F}|_A$, we must have that $f^{-1}(F)\in \mathcal{F}|_A$. This shows that $\mathcal{F}\subseteq \mathcal{F}|_A$. Now, fix $G\in \mathcal{F}|_A$. Notice that $$f[G]=f[G\mathfrak{c}ap B]\mathfrak{c}up f[G\setminus B]=(G\mathfrak{c}ap B) \mathfrak{c}up f[G\setminus B].$$ As $G\in \mathcal{F}|_{A}$, there is $H\in \mathcal{F}$ such that $G=H\mathfrak{c}ap A$ and since that $G\mathfrak{c}ap B= H\mathfrak{c}ap A \mathfrak{c}ap B\in \mathcal{F}$, then $f[G]\in \mathcal{F}$. Thus, $\mathcal{F}|_A \subseteq \mathcal{F}$. Therefore, $f[\mathcal{F}|_{A}]=\mathcal{F}$. \end{proof} The next corollary conveniently reformulates the definition of the $TU$-order. \begin{corollary} Let $\mathcal{F}$ and $\mathcal{G}$ two filters. Then, $\mathcal{F}\leq_{TU} \mathcal{G}$ iff either \begin{enumerate} \item $\mathcal{F}$ is a relatively equivalent to the Fr\'echet filter, or \item there are $A\in \mathcal{G}^+$ and a bijection $f:A\to \omega$ such that $f[\mathcal{G}|_A]=\mathcal{F}$. \end{enumerate} \end{corollary} In virtue of the previous corollary, we shall always assume that the element of the filter witnessing being a $TU$-predecessor is $\omega$. However, the positive element that witnesses being a $TU$-successor cannot be replace by an element of the $FU$-filter as in the $TU$-order: that is, if we use only members of the filters in the $TU$-order we obtain the $RK$-order as it is shown in the next corollary. \begin{corollary} Let $\mathcal{F}$ and $\mathcal{G}$ be two filters non-relatively equivalent to the Fr\'echet filter. Then $\mathcal{F}\leq_{RK}\mathcal{G}$ iff for each $A\in \mathcal{G}\setminus \mathcal{F}_r$ and $B\in \mathcal{F}\setminus \mathcal{F}_r$ there is a surjection $f:A\to B$ such that $f[\mathcal{G}|_{A}]=\mathcal{F}|_B$. \end{corollary} We will see in the next theorem that the $TU$-order implies the $RK$-order whenever the $TU$-predecessor lies in the category of the $FU$-filters. Let us remark that if $A\in \mathcal{G}^+\setminus \mathcal{G}$, then $\omega\setminus A\in \mathcal{G}^+\setminus \mathcal{G}$ and $$ \mathcal{G}=\mathcal{G}|_{A}\oplus\mathcal{G}|_{\omega\setminus A}:=\{F\mathfrak{c}up E: F\in \mathcal{G}|_{A} \ {\text and} \ E\in \mathcal{G}|_{\omega\setminus A} \}. $$ \begin{theorem} Let $\mathcal{F}$ and $\mathcal{G}$ two filters such that $C(\mathcal{F})\neq \emptyset$. If $\mathcal{F}\leq_{TU} \mathcal{G}$, then $\mathcal{F}\leq_{RK} \mathcal{G}$. \end{theorem} \begin{proof} Suppose that $A\in\mathcal{G}^+$ and $f:A\to \omega$ witnesses that $f[\mathcal{G}|_A]=\mathcal{F}$. Fix $M\in C(\mathcal{F})$. Define $g:\omega\to\omega$ so that $g|_A=f$ and $g[\omega\setminus A]=M$ as a bijection. Let $F\in \mathcal{F}$. Since every element of $M$ has exactly two pre-images, we have that $g^{-1}(F)=f^{-1}(F)\bigcup g^{-1}(F\mathfrak{c}ap M)$. Clearly $f^{-1}(F)\in \mathcal{G}|_A$ and since $M\subseteq^{*} F$, then $g^{-1}(F\mathfrak{c}ap M)\mathfrak{c}ap (\omega\setminus A) \in \mathcal{F}_r(\omega\setminus A)$. By the above remark, we obtain that $g^{-1}(F)\in \mathcal{G}$. So $\mathcal{F}\subseteq g[\mathcal{G}]$. Now fix $G\in \mathcal{G}$. Then we have that $$g[G]=g[G\mathfrak{c}ap A]\mathfrak{c}up g[G\setminus A]= f[G\mathfrak{c}ap A]\mathfrak{c}up g[G\setminus A]. $$ Since $f[G\mathfrak{c}ap A]\in \mathcal{F}$, $g[G]\in \mathcal{F}$. Thus, $g[\mathcal{G}]\subseteq \mathcal{F}$. This proves that $g[\mathcal{G}]=\mathcal{F}$. Therefore, $\mathcal{F}\leq_{RK} \mathcal{G}$. \end{proof} \begin{corollary}\label{coroturk} Let $\mathcal{F}$ and $\mathcal{G}$ two $FU$-filters. If $\mathcal{F} \leq_{TU}\mathcal{G}$, then $\mathcal{F}\leq_{RK}\mathcal{G}$. \end{corollary} In a general context, one may ask what about the implications $\leq_{RK} \mathcal{R}ightarrow \leq_{TU}$ and $\leq_{TU} \mathcal{R}ightarrow \leq_{RK}$ without any restrictions on the filters ? We just have seen that the $TU$-order implies the $RK$-order on the class of $FU$-filters, which is also true for every pair of ultrafilters $\mathcal{U}, \mathcal{V}$ (i. e., $\mathcal{U}\leq_{TU} \mathcal{V} \mathcal{R}ightarrow \mathcal{U}\leq_{RK} \mathcal{V}$). Let us see, in the next examples, that both implications could fail in general: The {\it Arens filter} $\mathcal{F}_a$ is defined by an infinite partition $\mathcal{P}=\{P_n:n<\omega\}$ of $\omega$ and the Fr\'echet filter on each $P_n$ as follows: $$\mathcal{F}_a:=\{F\subseteq \omega:\{n<\omega:P_n\mathfrak{c}ap P\in \mathcal{F}_r(P_n)\}\in \mathcal{F}_r\}.$$ \noindent The filter $\mathcal{F}_a$ is sequential but it is not an $FU$-filter. Besides, we know that the filter $\mathcal{F}_a|_A$ is a copy of $\mathcal{F}_a$ for every $A\in \mathcal{F}_a^+$. Thus if $\mathcal{F}\leq_{TU} \mathcal{F}_a$, then $\mathcal{F}\approx \mathcal{F}_a$. However, the Fr\'echet filter is a $RK$-predecessor of the Arens filter via the function $f:\omega\to\omega$, defined by $f[P_n]=n$ for each $n<\omega$. This example shows that the implication $\mathcal{F}\leq_{RK}\mathcal{G}\mathcal{R}ightarrow \mathcal{F}\leq_{TU}\mathcal{G}$ does not hold in general. Now, we describe an example to show that the implication $\mathcal{F}\leq_{TU}\mathcal{G}\mathcal{R}ightarrow \mathcal{F}\leq_{RK} \mathcal{G}$ could be false. Choose $M, N\in [\omega]^{\omega}$ so that $M\mathfrak{c}ap N=\emptyset$ and $M\mathfrak{c}up N=\omega$. We know that $\mathcal{F}_a \leq_{TU} \mathcal{F}_{a}(M)\oplus \mathcal{G} =\{F\mathfrak{c}up G: F\in \mathcal{F}_a(M) \ \text{and} \ G\in \mathcal{G}\}$, where $\mathcal{F}_{a}(M)$ is a copy of the Arens filter on $M$ and $\mathcal{G}$ is a filter on $N$. Let $\mathcal{G}$ be an arbitrary $FU$-filter and suppose that $\mathcal{F}_a\leq_{RK}\mathcal{F}_{a}\oplus \mathcal{G}$ via the function $f$. Since $\mathcal{G}$ is an $FU$-filter, there is $R\in[N]^{\omega}$ such that $R\to \mathcal{F}_{a}(M)\oplus \mathcal{G}$ and then $f[R]\to \mathcal{F}_a$, but $\mathcal{F}_a$ does not have any nontrivial convergent sequence. This shows that the implication $\mathcal{F}\leq_{TU}\mathcal{G}\mathcal{R}ightarrow \mathcal{F}\leq_{RK}\mathcal{G}$ could fail in general. Next, we will use $FU$-filters of the form $S_{\mathcal{A}}$ to show that $\leq_{RK}\nsubseteq \leq_{TU}$. Before that we need to prove a theorem. \begin{theorem}\label{theomini} Let $\mathcal{A}$ and $\mathcal{B}$ be $NMAD$-families such that $S_{\mathcal{A}}\leq_{RK}S_{\mathcal{B}}$ via a function $f:\omega\to \omega$ which satisfies that $|f^{-1}(n)|=\omega$ for all $n<\omega$. If $\mathcal{C}$ is a $NMAD$ family such that $\mathcal{B}\subseteq \mathcal{C}$, then $S_{\mathcal{A}}\leq_{RK}S_{\mathcal{C}}$ via the function $f$. \end{theorem} \begin{proof} In order to show that $f[S_{\mathcal{C}}] = S_{\mathcal{A}}$, we use clause $(3)$ of Theorem $\ref{theoeq2}$. By using the statement $(3)(a)$, we know that $f^{-1}(n)\in \mathcal{I}(\mathcal{B})$ for all $n<\omega$. Hence, for each $n < \omega$, we have that $|f^{-1}(n)\mathfrak{c}ap C|<\omega$ for every $C\in \mathcal{C}\setminus \mathcal{B}$. Let $M\in \mathcal{I}(\mathcal{A})^{\bot}$. Notice that the containment $f[S_{\mathcal{B}}]\subseteq S_\mathcal{A}$ implies that there is $N\in \mathcal{I}(\mathcal{B})^{\bot}$ such that $|f^{-1}(M)\mathfrak{c}ap N|=\omega$.Thus, we obtain that $f^{-1}(M)\in S_\mathcal{C}^{+}=\mathcal{P}(\omega)\setminus \mathcal{I}(\mathcal{C})$. There are two cases to be consider. The first one is when $f^{-1}(M)=M_0\mathfrak{c}up M_1$ where $M_0\in \mathcal{I}(\mathcal{C})$ and $M_1\in \mathcal{I}(\mathcal{C})^{\bot}\mathfrak{c}ap [\omega]^{\omega}$. This case is clearly done since $M_1$ does the job. For the second one assume that $f^{-1}(M)\in \mathcal{C}^{*}$. Since $\mathcal{C}$ is a $NMAD$-family, then there is $N\in \mathcal{I}(\mathcal{C})^{\bot}\mathfrak{c}ap [\omega]^{\omega}$ such that $|f^{-1}(M)\mathfrak{c}ap N|=\omega$. Thus, $f[S_{\mathcal{C}}] = S_{\mathcal{A}}$. Therefore, we conclude that $S_{\mathcal{A}}\leq_{RK}S_{\mathcal{C}}$ via the function $f$. \end{proof} Let $\mathcal{A}$ be an $AD$ family on $\omega$. For each $A\in\mathcal{A}$ choose $E_A\in [\omega]^{< \omega}$ and consider one of the sets either $A'=A\mathfrak{c}up E_A$ or $A'=A\setminus E_A$. It is not difficult to show that $S_{\mathcal{A}}=S_{\mathcal{A}'}$ where $\mathcal{A}'=\{A':A\in \mathcal{A}\}$. For our convenience, without lose of generality, we shall assume that each $AD$-family $\mathcal{A}$ always contains a partition $\{A_{n}:n<\omega\}\subseteq \mathcal{A}$ of $\omega$ in infinite subsets. We show next that $S_{\mathcal{P}}$ is an $RK$-minimal filter in the realm of the filters of the form $S_{\mathcal{A}}$ where $\mathcal{A}$ is a $NMAD$-family. \begin{corollary}\label{com} $S_{\mathcal{P}}\leq_{RK}S_{\mathcal{A}}$ for every $NMAD$-family $\mathcal{A}$. \end{corollary} \begin{proof} Let $\mathcal{A}$ be an $AD$-family on $\omega$ such that $\mathcal{A}'=\{A_n:n<\omega\}\subseteq \mathcal{A}$ is a partition of $\omega$. Enumerate $\mathcal{P}$ as $\{P_n:n<\omega\}$ and we define $f:\omega\to \omega$ so that $f[A_n]=P_n$ and $|f^{-1}(n)|$ is infinite for each $n<\omega$. It is straightforward to prove that $S_{\mathcal{P}}\leq_{RK}S_{\mathcal{A}'}$ via $f$. Therefore, from Theorem \ref{theomini} we deduce that $S_{\mathcal{P}}\leq_{RK}S_{\mathcal{A}}$ via the function $f$. \end{proof} The answer to the following question will be very useful to understand the filter $S_{\mathcal{A}}$. \begin{question} Is true that $S_{\mathcal{P}}\leq_{RK}S_{\mathcal{A}}$ for every $AD$-family $\mathcal{A}$ ? \end{question} We recall that an $AD$-family is said to be {\it completely separable} if for every $M\in \mathcal{B}^*$, there is $B\in \mathcal{B}$ such that $B\subseteq M$. In the paper \mathfrak{c}ite{si}, P. Simon showed, in $ZFC$, the existence of a completely separable $NMAD$-family of size $\mathfrak{c}$. \begin{corollary} If $\mathcal{A}$ is a $NMAD$-family of size $\mathfrak{c}$ which is completely separable, then $S_{\mathcal{P}}\nleq_{TU}S_{\mathcal{A}}$ and $S_{\mathcal{P}} \leq_{RK} S_{\mathcal{A}}$. \end{corollary} \begin{proof} Let $\mathcal{A}$ be a completely separable $NMAD$-family of size $\mathfrak{c}$. In the article \mathfrak{c}ite{si}, the author showed that this family satisfies that $|\{A\in \mathcal{A}:|M\mathfrak{c}ap A|=\omega\}|=\mathfrak{c}$ for all $M\in \mathcal{A}^*$. If $M\in S_{\mathcal{A}}^+$ and $|\{A\in \mathcal{A}:|M\mathfrak{c}ap A|=\omega\}|< \omega$, then $S_{\mathcal{A}}|_M$ is a relatively equivalent to the Fr\'echet filter. If $M\in \mathcal{A}^*$, then $S_{\mathcal{A}}|_M$ has character equal to $\mathfrak{c}$. Thus, $S_{\mathcal{A}}$ has not a copy of $S_{\mathcal{P}}$. Therefore, $S_{\mathcal{P}}\nleq_{TU}S_{\mathcal{A}}.$ On the other hand, by Corollary \ref{com}, we have that $S_{\mathcal{P}} \leq_{RK} S_{\mathcal{A}}$. \end{proof} The behavior of the filters of the form $S_{\mathcal{A}}$, where $\mathcal{A}$ is completely separable, under the $RK$-order is not well-know yet. For instance, we do not the answer to the following question. \begin{question} Are there two completely separable $NMAD$-families $\mathcal{A}$ and $\mathcal{B}$ such that their filters $S_{\mathcal{A}}$ and $S_{\mathcal{B}}$ are $RK$-incomparable ? \end{question} In the last section, we will construct two $NMAD$-families of size $\mathfrak{c}$ whose respective filters are $RK$-incomparable. \section{Chains of $FU$-filters in the $RK$-order and $TU$-order.} First, we shall describe an operation of filters that preserves the $FU$-property and produces $RK$-successors: Let $I$ be a set, $\mathcal{F}$ a (not necessarily free) filter on $I$ and $\mathcal{A} = \{ A_i : i \in I \}$ an $AD$-family. For each $i \in I$, choose a free filter $\mathcal{F}_i$ on the set $A_i$. Then we define $$ \sum_{\mathcal{F}}\mathcal{F}_i := \{ F \subseteq \omega :\{i\in I: F\mathfrak{c}ap A_i \in \mathcal{F}_i\}\in \mathcal{F}\} $$ and $$ \prod_{i \in I}\mathcal{F}_i := \{ F \subseteq \omega : \forall i \in I(F\mathfrak{c}ap A_i \in \mathcal{F}_i)\}. $$ Notice that if the filter on $I$ is the trivial filter $\{I\}$, then $$ \prod_{i \in I}\mathcal{F}_i := \sum_{\{I\}}\mathcal{F}_i. $$ The filter $\prod_{i \in I}\mathcal{F}_i$ is referred as the product of the filters $\{ F_i : i \in I\}$. Several interesting properties of this operation of filters are contained in \mathfrak{c}ite{gr01}. It is evident that $\sum_{\mathcal{F}}\mathcal{F}_i$ is always a free filter on $\omega$ and that $F \in (\prod_{i \in I}\mathcal{F}_i)^+$ iff there is $i \in I$ such that $F \in \mathcal{F}_i^+$. Hence, we deduce that $\prod_{i \in I}\mathcal{F}_i$ is a $FU$-filter iff $\mathcal{F}_i$ is a $FU$-filter for all $i \in I$. We remark that $\sum_{\mathcal{F}}\mathcal{F}_i $ is not, in general, an $FU$-filter: for instance the Arens filter $\mathcal{F}_a$. In this context, the $FAN$-filter is the filter $\prod_{n < \omega}\mathcal{F}_r(P_n)$ where $\{ P_n :n < \omega\}$ is a partition of $\omega$ in infinite subsets. The product of finitely many filters $\mathcal{F}_0,....., \mathcal{F}_n$ will be denote by $\mathcal{F}_0 \oplus \mathcal{F}_1 \oplus ..... \oplus \mathcal{F}_n$. We point out that if $\mathcal{A} = \{ A_i : i \in I \}$ is an $AD$-family and $\mathcal{A}_i$ is an $AD$-family on $A_i$, for each $i \in I$, then $$ \prod_{i \in I}\mathcal{F}_{\mathcal{A}_i} = \mathcal{F}_{\bigcup_{i \in I}\mathcal{A}_i}. $$ To construct $RK$-up-directed chains we need the following lemma from \mathfrak{c}ite[4.2]{gr01}. \begin{lemma}\label{many-su} Let $A\in[\omega]^{\omega}$. Suppose that $\mathcal{A} = \{ A_i : i \in I \}\mathfrak{c}up \{A\}$ is an $AD$-family and $\mathcal{A}_i$ is an $AD$-family on $\omega$, for each $i \in I$. If $f_i: \omega \to A_i$ is a bijection, for every $i \in I$ and, $\mathcal{B}$ is an $AD$-family on $A$, then $\mathcal{F}_{\mathcal{A}_j} \leq_{RK} \mathcal{F}_{\bigcup_{i \in I}f_i[\mathcal{A}_i]\mathfrak{c}up \mathcal{B}}$ for all $j \in I$. \end{lemma} We remark that $\leq_{RK}$ can be replaced by $\leq_{TU}$ in the previous Lemma. \begin{theorem} If $\{\mathcal{A}_{\xi}:\xi<\mathfrak{c}\}$ is a collection of $AD$-families, then there is an $AD$-family $\mathcal{C}$ such that $\mathcal{F}_{\mathcal{A}_{\xi}}<_{RK}\mathcal{F}_{\mathcal{C}}$ for all $\xi<\mathfrak{c}$. \end{theorem} \begin{proof} Fix $A\in[\omega]^{\omega}$ so that $\omega\setminus A$ is infinite and let $\{A_{\xi}:\xi<\mathfrak{c}\}$ be an $AD$-family on $\omega\setminus A$. For each $\xi<\mathfrak{c}$ choose a bijection $f_{\xi}:\omega\to A_{\xi}$. By the previous lemma we obtain that $\mathcal{F}_{A_{\xi}}\leq_{RK}\mathcal{F}_{\mathfrak{c}up_{\xi<\mathfrak{c}}f[A_{\xi}]\mathfrak{c}up \mathcal{B}}$ for all $\xi<\mathfrak{c}$ and for every $AD$-family $\mathcal{B}$ on $A$. We know that there are $2^{\mathfrak{c}}$ pairwise distinct $AD$-families on $A$, and since every filter $\mathcal{F}_{\mathcal{A}_{\xi}}$ has at most $\mathfrak{c}$-many $RK$-predecessors, we can find an $AD$-family $\mathcal{B}$ such that $\mathcal{F}_{\mathcal{B}} \nleq_{RK} \mathcal{F}_{\mathcal{A}_{\xi}} $ for all $\xi<\mathfrak{c}$. Therefore, $\mathcal{F}_{\mathcal{A}_{\xi}}<_{RK}\mathcal{F}_{\mathcal{C}}$ for all $\xi<\mathfrak{c}$, where $\mathcal{C}=\bigcup_{\xi<\mathfrak{c}}f[A_{\xi}]\mathfrak{c}up \mathcal{B}$. \end{proof} \begin{corollary} There is a strictly increasing $RK$-chain of $FU$-filters of size $\mathfrak{c}^+$ $RK$-above every $FU$-filter. \end{corollary} \section{$RK$-Incomparability of $FU$-filters} In this section, we construct an $RK$-antichain consisting of $FU$-filters. The authors of \mathfrak{c}ite{tu05} have proved the existence of a $TU$-antichain of size $\mathfrak{c}^+$ consisting of $FU$-filters. The next notions introduced by A. V. Arhangel'skii in \mathfrak{c}ite{ar} will help us to distinguish several $FU$-filters. \begin{definition} Let $X$ be an space and $x\in X$. A {\it sheaf} of $x$ is a family of sequences $\{C_n:n<\omega\}$ in $X$ converging to $x$. We say that $x$ is an $\alpha_i$-{\it point} (for each $i=1,2,3,4$) if for every sheaf $\{C_n:n<\omega\}$ of $x$ there is a sequence $B$ converging to $x$ such that: \begin{enumerate} \item[] ($\alpha_1$) $C_n\subseteq^*B$, for all $n<\omega$. \item[] ($\alpha_2$) $C_n\subseteq^*B$, for all $n<\omega$. \item[] ($\alpha_3$) $|C_n\mathfrak{c}ap B|=\omega$, for infinitely many $n<\omega$. \item[] ($\alpha_4$) $C_n\mathfrak{c}ap B\neq\emptyset$, for infinitely many $n<\omega$. \end{enumerate} The space $X$ is called $\alpha_i$-{\it space} if every point in $X$ is an $\alpha_i$-point. In particular, a filter $\mathcal{F}$ is an $\alpha_i$-{\it filter} if its nonisolated point is an $\alpha_i$ point in the space $\xi(\mathcal{F})$, for every $i=1,2,3,4$. \end{definition} It is straightforward to prove the following implications: $$ first \ countability \mathcal{R}ightarrow \alpha_1 \mathcal{R}ightarrow \alpha_2 \mathcal{R}ightarrow \alpha_3\mathcal{R}ightarrow \alpha_4. $$ The $FAN$-filter is a canonical example of a $FU$-filter which is not an $\alpha_4$-filter; indeed, it is well-know that a space is not an $\alpha_4$-space iff the space contains a copy of $FAN$-space (for a prove see \mathfrak{c}ite{sw}). In the article \mathfrak{c}ite{si}, P. Simon constructed a completely separable $NMAD$-family $\mathcal{A}$ of size $\mathfrak{c}$ such that $S_{\mathcal{A}}$ is an $\alpha_4$-filter which is not an $\alpha_3$-filter. For this $AD$-family $\mathcal{A}$, it is easy to show that $\mathcal{F}_{\mathcal{A}}$ is also an $\alpha_4$-filter that is not an $\alpha_3$-filter. In the following, we shall use a standard well-known technic to construct $FU$-filters by using the Cantor tree $2^{<\omega} = \bigcup_{n < \omega}2^n$: For each $x\in 2^{\omega}$ we define $A_x=\{x|n:n<\omega\}\subseteq 2^{<\omega}$. For every infinite $X\subseteq 2^{\omega}$ we have that $\mathcal{A}_{X}=\{A_x:x\in X\}$ is an $NMAD$-family on $2^{<\omega}$. By identifying $2^{<\omega}$ with $\omega$, the family $\mathcal{A}_{X}$ can be considered as a family of subsets of $\omega$. P. Nyikos (\mathfrak{c}ite{ny1}) proved that $S_{\mathcal{A}_X}$ is an $\alpha_3$-filter for all infinite $X\subseteq 2^{\omega}$. He also showed that there is $Z\subseteq 2^{\omega}$ for which $S_{\mathcal{A}_Z}$ is an $\alpha_2$-filter, but in general this assertion could fail; for instance, $S_{\mathcal{A}_{2^{\omega}}}$ is not an $\alpha_2$-filter. All examples of $FU$-filters given above lie in $ZFC$. Nyikos have proved in \mathfrak{c}ite{ny2}, under the assumption $\omega_1=\mathfrak{b}$, that there is an $FU$-space that is $\alpha_2$-space but it fails to be $\alpha_1$-space. In the same paper, it was proved that if $\omega_1<\mathfrak{b}$, then there is an $FU$-space which is $\alpha_1$ but it is not a first countable space. Years later, A. Dow (\mathfrak{c}ite{dow}) proved that the implication ``$\alpha_2\mathcal{R}ightarrow \alpha_1$'' holds inside of the Lavers Model and together with J. Stepr\={a}ns \mathfrak{c}ite{dowst} constructed a model of $ZFC$ in which every $\alpha_1$-space is a first countable space. The existence of an $\alpha_2$-space which is not an $\alpha_1$-space, and the existence of an $\alpha_1$-space which is not an first countable space are still open problems in $ZFC$. Now let us prove that the properties $\alpha_2$, $\alpha_3$ and $\alpha_4$ are preserved by the $RK$-order down-directed. \begin{theorem}\label{theoalphas} Let $\mathcal{F}_{\mathcal{A}}$ and $\mathcal{F}_{\mathcal{B}}$ be two $FU$-filters. If $\mathcal{F}_{\mathcal{B}}$ is an $\alpha_i$-filter and $\mathcal{F}_{\mathcal{A}}\leq_{RK}\mathcal{F}_{\mathcal{B}}$, then $\mathcal{F}_{\mathcal{A}}$ is also an $\alpha_i$-filter, for each $i=2,3,4$. \end{theorem} \begin{proof} We only give a proof for the $\alpha_2$-property since the procedure for $\alpha_3$ and $\alpha_4$ is exactly the same. Let $\{C_n:n<\omega\}\subset C(\mathcal{F}_{\mathcal{A}})$ be a sheaf of $\mathcal{F}_{\mathcal{A}}$ and $f:\omega\to\omega$ such that $f[\mathcal{F}_{\mathcal{B}}]=\mathcal{F}_{\mathcal{A}}$. By Theorem \ref{theoeq}, we can find $B_n\in\mathcal{B}$ such that $|f^{-1}(C_n)\mathfrak{c}ap B_n|=\omega$ for every $n<\omega$. Notice that $\{f^{-1}(C_n)\mathfrak{c}ap B_n:n<\omega\}$ is a sheaf of $\mathcal{F}_{\mathcal{B}}$. Since $\mathcal{F}_{\mathcal{B}}$ is an $\alpha_2$-filter, then there is a sequence $B$ converging to $\mathcal{F}_{\mathcal{B}}$ such that $|B\mathfrak{c}ap (f^{-1}(C_n)\mathfrak{c}ap B_n)|=\omega$ for all $n<\omega$. We remark that $f[B]\to \mathcal{F}_{\mathcal{A}}$. Fix $n<\omega$. Let us prove that $|f[B]\mathfrak{c}ap C_n|=\omega$. Indeed, we have that $$ f[B\mathfrak{c}ap (f^{-1}(C_n)\mathfrak{c}ap B_n)]\subseteq f[B]\mathfrak{c}ap (C_n\mathfrak{c}ap f[B_n]) \subseteq f[B]\mathfrak{c}ap C_n. $$ Since $B\mathfrak{c}ap (f^{-1}(C_n)\mathfrak{c}ap B_n) \to \mathcal{F}_{\mathcal{B}}$, then $|f[B\mathfrak{c}ap (f^{-1}(C_n)\mathfrak{c}ap B_n)]|=\omega$ and so $|f[B]\mathfrak{c}ap C_n|=\omega$. Therefore, $\mathcal{F}_{\mathcal{A}}$ is an $\alpha_2$-filter. \end{proof} For an arbitrary $NMAD$-family $\mathcal{A}$, we know that the filter $\mathcal{F}_{\mathcal{A}}$ cannot be an $\alpha_3$-filter. Thus we obtain the following corollary. \begin{corollary}\label{a} $S_\mathcal{P}$ is not an $RK$-successor of $\mathcal{F}_{\mathcal{A}}$ for any $NMAD$-family $\mathcal{A}$. \end{corollary} The next corollary is consequence a from Corollary \ref{a} and Corollary 5.6 from \mathfrak{c}ite{gr01}. \begin{corollary} $S_\mathcal{P}$ is $RK$-incomparable with every filter $\mathcal{F}_{\mathcal{A}}$ such that $|\mathcal{A}|<\mathfrak{b}$. \end{corollary} By using the Corollary \ref{coroturk}, Theorem \ref{theoalphas} and some facts quoted above we obtain the next result. \begin{corollary} $\mathcal{F}_{\mathcal{P}}\leq_{RK} \mathcal{F}$ iff $\mathcal{F}_{\mathcal{P}}\leq_{TU} \mathcal{F}$. \end{corollary} Now we show that some of the $FU$-filters already described above are $RK$-incomparable. \begin{theorem} Let $\mathcal{A}$ be a $NMAD$-family completely separable of size $\mathfrak{c}$. Then there is a set $X\subseteq 2^{\omega}$ such that $\mathcal{F}_{\mathcal{P}}$, $S_{\mathcal{A}}$ and $S_{\mathcal{A}_X}$ form an $RK$-antichain. \end{theorem} \begin{proof} Notice that there are $2^{\mathfrak{c}}$ pairwise non-homeomorphic filters of the form $S_{\mathcal{A}_X}$ whit $|X|=\mathfrak{c}$. We can choose one of them satisfying $S_{\mathcal{A}_X}\nleq_{RK} S_{\mathcal{A}}$. We know that $S_{\mathcal{A}_X}$ is an $\alpha_3$-filter, $S_{\mathcal{A}}$ is an $\alpha_4$-filter which is not an $\alpha_3$-filter and the $FAN$-filter $\mathcal{F}_{\mathcal{P}}$ is not an $\alpha_4$-filter. Hence, by Theorem \ref{theoalphas}, we obtain that $$ \mathcal{F}_{\mathcal{P}}\nleq_{RK} S_{\mathcal{B}}\nleq_{RK} S_{\mathcal{A}_X} \ \text{and} \ \mathcal{F}_{\mathcal{P}}\nleq_{RK} S_{\mathcal{A}_{X}}. $$ According to Theorem \ref{theofan}, we have that $S_{\mathcal{A}_X}\nleq_{RK}\mathcal{F}_{\mathcal{P}}$ and $S_{\mathcal{A}}\nleq_{RK}\mathcal{F}_{\mathcal{P}}$. Therefore, $\mathcal{F}_{\mathcal{P}}$, $S_{\mathcal{A}}$ and $S_{\mathcal{A}_{X}}$ are pairwise $RK$-incomparable. \end{proof} Our next task is the construction of an infinite $RK$-antichain consisting of $FU$-filters. Such filters will be the form $S_{\mathcal{A}_X}$ for suitable sets $X\subseteq 2^{\omega}$. For our purposes it is important to remark the next characterization of the convergent sequences in $S_{\mathcal{A}_X}$: {\bf Remark.} For $X\in 2^{\omega}$ and $N\in[2^{<\omega}]^{\omega}$, the following statements are equivalents: \begin{enumerate} \item $N\to S_{\mathcal{A}_X}$. \item $N\in \mathcal{I}(\mathcal{A}_X)^{\bot}$. \item For all $K\in[N]^{\omega}$ there is either: \begin{enumerate} \item $x\in 2^{\omega}\setminus X$ such that $|A_x \mathfrak{c}ap K| = \omega$ or \item an infinite antichain $M$ such that $|M \mathfrak{c}ap K| = \omega$. \end{enumerate} \end{enumerate} Thus, we may consider only branches and antichains of $2^{<\omega}$. The following equivalence is a consequence of the Theorem \ref{theoeq2} and our last remark. \begin{lemma}\label{lemmi} Let $X_0,X_1\subseteq 2^{\omega}$ and $f:2^{\omega}\to 2^{\omega}$ a surjection. Then, $f[S_{\mathcal{A}_{X_1}}]\neq S_{\mathcal{A}_{X_0}}$ iff one of the following conditions is satisfied: \begin{enumerate} \item There is $x\in 2^{\omega}\setminus X_1$ such that $f[A_x] \nrightarrow S_{\mathcal{A}_{X_0}}$. \item There is an infinite antichain $M$ such that $f[M] \nrightarrow S_{\mathcal{A}_{X_0}}$. \item There is $y\in 2^{\omega}\setminus X_0$ such that $f^{-1}(A_y) \in \mathcal{I}(\mathcal{A}_{X_1})$. \item There is an infinite antichain $M$ such that $f^{-1}(M) \in \mathcal{I}(\mathcal{A}_{X_1})$. \end{enumerate} \end{lemma} We would like to point out that clauses (1) and (2) imply $S_{\mathcal{A}_{X_0}}\nsubseteq f[S_{\mathcal{A}_{X_1}}]$, and conditions (3) and (4) imply $f[S_{\mathcal{A}_{X_1}}]\nsubseteq S_{\mathcal{A}_{X_0}}$. If there is an infinite antichain $M$ such that $|f[M]|<\omega$, then we may avoid this kind of functions, since $f$ cannot be a witness of the $RK$-comparability for any pair of $FU$-filters. Thus, in what follows, we shall always assume that $f|_M$ is finite-to-one at every antichain $M$. Let us show in the next lemma that we can always extend the sets $X_0$ and $X_1$ in order to have witnesses for the $RK$-incomparability of their respective $FU$-filters of the extensions. \begin{lemma}\label{lemant} Let $X_0$ and $X_1$ be nonempty subsets of $2^{\omega}$ such that $|2^{\omega} \setminus (X_0 \mathfrak{c}up X_1)| \geq \omega$ and $f:2^{<\omega}\to 2^{<\omega}$ a surjection such that $f|_M$ is finite-to-one for every infinite antichain $M$. Then, there are $X'_0,X_1', Y_0, Y_1 \subseteq 2^{\omega}$ such that $X_0\subsetneq X_0'$, $0 < |X_0'\setminus X_0|<\omega$, $X_1\subsetneq X_1'$, $0 < |X_1'\setminus X_1|<\omega$, $0 < |Y_0|, |Y_1|< \omega$, $X_0'\mathfrak{c}ap Y_0=\emptyset = X_1'\mathfrak{c}ap Y_1$ and at least one of the following conditions holds: \begin{enumerate} \item[(a)] There is $y\in Y_1$ such that $f[A_y] \nrightarrow S_{\mathcal{A}_{X_0'}}$. \item[(b)] There is an infinite antichain $M$ such that $f[M] \nrightarrow S_{\mathcal{A}_{X_0'}}$. \item[(c)] There is $x\in Y_0$ such that $f^{-1}(A_x) \in \mathcal{I}(\mathcal{A}_{X_1'})$. \item[(d)] There is an infinite antichain $M$ such that $f^{-1}(M) \in \mathcal{I}(\mathcal{A}_{X_1'})$. \end{enumerate} Thus, by Lemma \ref{lemmi}, we have that $f[S_{\mathcal{A}_{X_1'}}]\neq S_{\mathcal{A}_{X_0'}}$. \end{lemma} \begin{proof} We need to consider two cases: Case I. Suppose that $f[S_{\mathcal{A}_{X_1}}]\neq S_{\mathcal{A}_{X_0}}$. Notice that if the witnesses of the $RK$-incomparability is an antichain satisfying either (2) or (4) of Lemma \ref{lemmi}, then we can extend arbitrarily $X_0\subseteq X_0'$, $X_1\subseteq X_1'$ and find $Y_0$, $Y_1$ such that $X_0'\mathfrak{c}ap Y_0=\emptyset$ and $X_1'\mathfrak{c}ap Y_1=\emptyset$ easily. Hence, either (b) or (d) holds. Now suppose that the witness is a branch that satisfies (1). There is $y\in 2^{\omega}\setminus X_1$ and $x\in X_0$ such that $|f[A_y]\mathfrak{c}ap A_x|=\omega$. Define $Y_1=\{y\}$, $X_1'=X_1\mathfrak{c}up W$ where $W\subseteq 2^{\omega}\setminus Y_1$, and $X_0'$, $Y_0$ arbitrarily such that $X_0'\mathfrak{c}ap Y_0=\emptyset$. Thus we have (a). Assume now that the witness is a branch satisfying (3). There is $v\in 2^{\omega}\setminus X_0$ such that $f^{-1}(A_v)\in \mathcal{I}(\mathcal{A}_{X_1})$. Define $Y_0=\{v\}$, $X_0'=X_0\mathfrak{c}up W$ where $W\subseteq 2^{\omega}\setminus Y_0$, and $X_1'$, $Y_1$ arbitrarily such that $X_1'\mathfrak{c}ap Y_1=\emptyset$. In this case (c) is satisfied. In each case we have one of the conditions. Case II. Suppose that $f[S_{\mathcal{A}_{X_1}}]=S_{\mathcal{A}_{X_0}}$. We shall prove that $X_0$ and $X_1$ can be extend and find $Y_0$ and $Y_1$ so that their extensions will satisfy either (b) or (c). \begin{enumerate} \item[(i)] Assume that there are $y\in 2^{\omega}\setminus X_0$ and a nonempty finite set $\mathcal{B}\subseteq \{A_v:v\in 2^{\omega}\}$ such that $f^{-1}(A_z)\subseteq^*\bigcup \mathcal{B}$. Notice that $X_1\setminus \{v\in 2^{\omega}: A_v\in \mathcal{B}\}\neq \emptyset$. In this case, we set $Y_0=\{y\}$, $X_0'=X_0 \mathfrak{c}up W$ where $W\subseteq 2^{\omega}\setminus Y_0$, $X_1'=X_1\mathfrak{c}up \{v\in 2^{\omega}: A_v\in \mathcal{B}\}$ and $Y_1$ a finite nonempty set such that $X_1'\mathfrak{c}ap Y_1=\emptyset$. Thus, we have (c). \item[(ii)] Now suppose that there is an antichain $M$ and $x\in 2^{\omega}\setminus X_0$ such that $|f[M]\mathfrak{c}ap A_x|=\omega$. In this case, we put $X_0'=X_0\mathfrak{c}up \{x\}$, $Y_0$ a finite nonempty set such that $X_0'\mathfrak{c}ap Y_0=\emptyset$. We can extend $X_1$ arbitrarily and find $Y_1$ a finite nonempty set such that $X_1'\mathfrak{c}ap Y_1=\emptyset$. Hence, we have (b0). \end{enumerate} If (i) and (ii) fail, then $f^{-1}(A_z)\in \mathcal{I}(\mathcal{A}_{2^{\omega}})^+$, for each $z\in 2^{\omega}\setminus X_0$, and for every infinite antichain $M$ we have that $f[M]\in \mathcal{I}(\mathcal{A}_{2^\omega})^{\bot}$. Hence $f[M]$ cannot meet any branch $A_y$ in an infinite set, for all $y\in 2^{\omega}$. Fix $z\in 2^{\omega}\setminus X_0$. As $f^{-1}(A_z)\in \mathcal{I}(\mathcal{A}_{2^{\omega}})^+$, then $f^{-1}(A_z)$ contains an infinite antichain $K$. Since $f[K]$ is infinite and $f[K]\subseteq f[f^{-1}(A_z)]=A_z$, we get a contradiction to the negation of (ii). Thus, either (i) or (ii) is satisfied. \end{proof} We are ready to construct an infinite $RK$-antichain with $FU$-filters of character equal to $\mathfrak{c}$. \begin{theorem} For every infinite cardinal $\kappa<\mathfrak{c}$, there is a family $\{X_{\alpha}:\alpha<\kappa \}\subseteq[2^{\omega}]^{\mathfrak{c}}$ such that $\{S_{X_{\alpha}}:\alpha < \kappa\}$ is an $RK$-antichain. \end{theorem} \begin{proof} Let $\{f_{\beta}:\beta<\mathfrak{c}\}$ be an enumeration of all surjections $f_{\beta}:2^{<\omega}\to 2^{<\omega}$ for which $f_{\beta}|_M$ is finite-to-one for every infinite antichain $M$. By an inductive procedure, for every $\beta < \mathfrak{c}$ we shall construct, for every $\alpha<\kappa$, sets $X_{\beta}^{\alpha}\subseteq 2^{\omega}$ and $Y_{\beta}^{\alpha}\subseteq 2^{\omega}$ so that: \begin{enumerate} \item $X^{\alpha}_{\beta}\mathfrak{c}ap Y^{\alpha}_{\beta}=\emptyset$ for every $\alpha<\kappa$. \item $X^{\alpha}_{\mu}\subseteq X^{\alpha}_{\nu}$ and $Y^{\alpha}_{\mu}\subseteq Y^{\alpha}_{\nu}$ if $\mu < \nu < \mathfrak{c}$. \item For distinct $\gamma, \delta <\kappa$ one of the following conditions holds: \begin{enumerate} \item There is $x\in Y^{\gamma}_{\beta+1}$ such that $f_\beta[A_x] \nrightarrow S_{\mathcal{A}_{X^{\delta}_{\beta+1}}}$. \item There is an infinite antichain $M$ such that $f_\beta[M] \nrightarrow S_{\mathcal{A}_{X^{\delta}_{\beta+1}}}$. \item There is $y\in Y^{\delta}_{\beta+1}$ such that $f_\beta^{-1}(A_y) \in \mathcal{I}(\mathcal{A}_{X^{\gamma}_{\beta+1}})$. \item There is an infinite antichain $M$ such that $f_\beta^{-1}(M) \in \mathcal{I}(\mathcal{A}_{X^{\gamma}_{\beta+1}})$. \end{enumerate} \item $|X^{\alpha}_{\beta}|, |Y^{\alpha}_{\beta}|\leq \kappa \mathfrak{c}dot |\beta|$ for every $\alpha<\kappa$. \end{enumerate} Choose arbitrary distinct elements $x_0, y_0\in 2^{\omega}$ and define $X_{0}^{\alpha}=\{x_0\}$ and $Y_{0}^{\alpha}=\{y_0\}$, for every $\alpha<\kappa$. Assume that for $\beta<\mathfrak{c}$ the sets $X_{\theta}^{\alpha}$ and $Y_{\theta}^{\alpha}$ have been defined for all $\theta < \beta$ and $\alpha<\kappa$ so that all of them satisfy the conditions (1), (2), (3) and (4). If $\beta<\mathfrak{c}$ is a limit ordinal, then we define $X_{\beta}^{\alpha}=\bigcup_{\theta<\beta} X_{\theta}^{\alpha}$ and $Y_{\beta}^{\alpha}=\bigcup_{\theta<\beta} Y_{\theta}^{\alpha}$ for each $\alpha < \kappa$. Now, suppose that $\beta = \theta +1$. We shall define $X_{\beta+1}^{\alpha}$ and $Y_{\beta+1}^{\alpha}$. Notice from (4) that $$ |2^{\omega}\setminus \big[\big(\bigcup_{\alpha<\kappa} X_{\theta}^{\alpha}\big) \mathfrak{c}up \big(\bigcup_{\alpha<\kappa} Y_{\theta}^{\alpha}\big) \big]|= \mathfrak{c}. $$ Fix $\alpha < \kappa$. According to Lemma \ref{lemant}, for every $\gamma < \kappa$ we can find finite nonempty sets $B_{\gamma}^{\alpha}$, $C_{\gamma}^{\alpha}$, $D_{\gamma}^{\alpha}$ and $E_{\gamma}^{\alpha}$ so that the following conditions holds: \begin{enumerate} \item[(i)] $B_{\gamma}^{\alpha} \mathfrak{c}up C_{\gamma}^{\alpha} \mathfrak{c}up D_{\gamma}^{\alpha} \mathfrak{c}up E_{\gamma}^{\alpha}\subseteq 2^{\omega}\setminus \big[\big(\bigcup_{\alpha<\kappa} X_{\theta}^{\alpha}\big) \bigcup \big(\bigcup_{\alpha<\kappa} Y_{\theta}^{\alpha}\big) \big].$ \item[(ii)] One of the following conditions hold \begin{enumerate} \item There is $x\in Y^{\alpha}_{\theta}\mathfrak{c}up D^{\alpha}_{\gamma}$ such that $f_\beta[A_x] \nrightarrow S_{\mathcal{A}_{X^{\gamma}_{\theta}\mathfrak{c}up C^{\alpha}_{\gamma}}}$. \item There is an infinite antichain $M$ such that $f_\beta[M] \nrightarrow S_{\mathcal{A}_{X^{\gamma}_{\theta}\mathfrak{c}up C^{\alpha}_{\gamma}}}$. \item There is $y\in Y^{\gamma}_{\theta}\mathfrak{c}up E^{\alpha}_{\gamma}$ such that $f_\beta^{-1}(A_y) \in \mathcal{I}(\mathcal{A}_{X^{\alpha}_{\theta}\mathfrak{c}up B^{\alpha}_{\gamma}})$. \item There is an infinite antichain $M$ such that $f_\beta^{-1}(M) \in \mathcal{I}(\mathcal{A}_{X^{\alpha}_{\theta}\mathfrak{c}up B^{\alpha}_{\gamma}})$. \end{enumerate} \item[(iii)] $\big[(\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}B_{\gamma}^{\alpha})\mathfrak{c}ap \big[\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}D_{\gamma}^{\alpha}\big]=\emptyset$ \item[(iv)] $\big[(\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}C_{\gamma}^{\alpha})\mathfrak{c}ap \big[\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}E_{\gamma}^{\alpha}\big]=\emptyset$ \end{enumerate} Define $$ X_{\beta}^{\alpha}=X_{\theta}^{\alpha}\mathfrak{c}up (\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}B_{\gamma}^{\alpha}) \mathfrak{c}up (\bigcup_{\gamma\in\kappa\setminus \{\alpha\}} C_{\alpha}^{\gamma}) $$ and $$ Y_{\beta}^{\alpha}=Y_{\theta}^{\alpha}\mathfrak{c}up (\bigcup_{\gamma\in \kappa\setminus \{\alpha\}}D_{\gamma}^{\alpha}) \mathfrak{c}up (\bigcup_{\gamma\in\kappa\setminus \{\alpha\}} E_{\alpha}^{\gamma}). $$ Conditions (1), (2), (3) and (4) are clearly satisfied. For every $\alpha<\kappa$ define $X_{\alpha}=\bigcup_{\beta<\mathfrak{c}}X_{\beta}^{\alpha}$. Thus, by the construction and Lemma \ref{lemant} we have that, for every $\alpha, \gamma <\kappa$, $$f_{\beta}[S_{\mathcal{A}_{X_{\alpha}}}]\neq S_{\mathcal{A}_{X_{\gamma}}}.$$ Therefore $\{S_{X_{\alpha}}:\alpha<\kappa\}$ is an infinite $RK$-antichain of $FU$-filters which have character equal to $\mathfrak{c}$. \end{proof} We end the paper with the following question that the authors could not solve it. \begin{question} Is there an $RK$-antichain of $FU$-filters of size $\mathfrak{c}$ ? \end{question} \end{document}
\mathbf{t}extbf{b}egin{equation}gin{document} \mathbf{t}itle{Expanding $K$-theoretic Schur $Q$-functions} \mathbf{t}extbf{a}uthor{ Yu-Cheng Chiu\mathbf{t}hanks{ Department of Mathematics, ETH Z\"{u}rich, \mathbf{t}t [email protected] } \mathbf{t}extbf{a}nd Eric Marberg\mathbf{t}hanks{ Department of Mathematics, HKUST, \mathbf{t}t [email protected] } } \mathbf{t}extbf{d}ate{} \title{Expanding $K$-theoretic Schur $Q$-functions} \mathbf{t}extbf{b}egin{equation}gin{abstract} We derive several identities involving Ikeda and Naruse's $K$-theoretic Schur $P$- and $Q$-functions. Our main result is a formula conjectured by Lewis and the second author which expands each $K$-theoretic Schur $Q$-function in terms of $K$-theoretic Schur $P$-functions. This formula extends to some more general identities relating the skew and dual versions of both power series. We also prove a shifted version of Yeliussizov's skew Cauchy identity for symmetric Grothendieck polynomials. Finally, we discuss some conjectural formulas for the dual $K$-theoretic Schur $P$- and $Q$-functions of Nakagawa and Naruse. We show that one such formula would imply a basis property expected of the $K$-theoretic Schur $Q$-functions. \mathbf{t}extbf{e}nd{abstract} \mathbf{s}etcounter{tocdepth}{2} \mathbf{s}ection{Introduction} This article proves some identities relating the \mathbf{t}extbf{e}mph{$K$-theoretic Schur $P$- and $Q$-functions} introduced by Ikeda and Naruse in \mathbf{t}extbf{c}ite{IkedaNaruse}. To motivate the definition of these power series and to frame our main results, we start by reviewing some classical background material on generating functions for shifted tableaux. Let $\lambda = (\lambda_1>\lambda_2>\mathbf{t}extbf{d}ots>0)$ be a \mathbf{t}extbf{e}mph{strict partition}, that is, a strictly decreasing sequence of positive integers. The \mathbf{t}extbf{e}mph{shifted diagram} of $\lambda $ is the set of pairs $\mathsf{SD}_{\lambda} := \{ (i,j) \in \mathbb{Z} \mathbf{t}imes \mathbb{Z} : 0 < i \leq j< i + \lambda_i\}$. We usually refer to the elements of this set as ``positions'' or ``boxes.'' A \mathbf{t}extbf{e}mph{shifted tableau} of shape $\lambda$ is a filling of $\mathsf{SD}_{\lambda}$ by positive half-integers. For any $i \in \mathbb{Z}$ let $i' :=i-\frac{1}{2}$. Then one may think of the entries of a shifted tableau as consisting of positive integers $i$ and primed numbers $i'$. A shifted tableau is \mathbf{t}extbf{e}mph{semistandard} if the following conditions hold: \mathbf{t}extbf{b}egin{equation}n \item[(S1)] The entries in each row and column are weakly increasing. \item[(S2)] No unprimed number $i$ occurs more than once in a given column. \item[(S3)] No primed number $i'$ occurs more than once in a given row. \mathbf{t}extbf{e}nd{equation}n Let ${\rm ShYT}_Q(\lambda)$ denote the set of semistandard shifted tableaux of shape $\lambda$. Define ${\rm ShYT}_P(\lambda) \mathbf{s}ubseteq {\rm ShYT}_Q(\lambda)$ to be the subset of tableaux also satisfying: \mathbf{t}extbf{b}egin{equation}n \item [(S4)] No primed number occurs in any diagonal position $(j,j)\in \mathsf{SD}_{\lambda}$. \mathbf{t}extbf{e}nd{equation}n We refer to elements of ${\rm ShYT}_P(\lambda)$ and ${\rm ShYT}_Q(\lambda)$ as \mathbf{t}extbf{e}mph{$P$-shifted} and \mathbf{t}extbf{e}mph{$Q$-shifted tableaux}, respectively. We draw shifted tableaux in French notation: \[ \mathbf{t}extbf{y}tableausetup{boxsize=0.5cm,aligntableaux=center} \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & 4 & \none\\ \none & 2 & 3' & \none\\ 1 & 2' & 3' & 3 \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \mathbf{t}ext{a $P$-shifted tableau}\\ \mathbf{t}ext{of shape $(4,2,1)$} \mathbf{t}extbf{e}nd{array} \quad\quad\quad \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & 4' & \none\\ \none & 2' & 3' & \none\\ 1 & 2' & 3' & 3 \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \mathbf{t}ext{a $Q$-shifted tableau}\\ \mathbf{t}ext{of shape $(4,2,1)$}. \mathbf{t}extbf{e}nd{array} \] The \mathbf{t}extbf{e}mph{weight} of a shifted tableau $T$ is the monomial $x^T := \prod_{i\mathfrak{g}eq 1} x_i^{m_i}$ where $m_i$ is the number of times that $i$ or $i'$ appears in $T$. For example, we have $x^T = x_1 x_2^2 x_3^3x_4$ for both of the shifted tableaux shown as examples above. The \mathbf{t}extbf{e}mph{Schur $P$- and $Q$-functions} indexed by $\lambda$ are the power series \mathbf{t}extbf{b}egin{equation} P_{\lambda} := \mathbf{s}um_{T\in{\rm ShYT}_P(\lambda)} x^T\quad\mathbf{t}ext{and}\quad Q_{\lambda} := \mathbf{s}um_{T\in{\rm ShYT}_Q(\lambda)} x^T. \mathbf{t}extbf{e}nd{equation} Any way of toggling the primes in the diagonal entries of a $Q$-shifted tableau results in another $Q$-shifted tableau of the same weight, so it is clear that \mathbf{t}extbf{b}egin{equation}\label{easy-eq} Q_{\lambda} = 2^{\mathbf{t}extbf{e}ll(\lambda)} P_{\lambda}\quad\mathbf{t}ext{where }\mathbf{t}extbf{e}ll(\lambda) := |\{ i : \lambda_i>0\}| = | \{ i : (i,i) \in \mathsf{SD}_\lambda\}|. \mathbf{t}extbf{e}nd{equation} It is well-known that $P_\lambda$ and $Q_\lambda$ are symmetric functions of bounded degree. They were first defined in work of Schur on the projective representations of the symmetric group but have since appeared in various other contexts. We are interested in generalizations of $P_\lambda$ and $Q_\lambda$ that are similar generating functions for set-valued shifted tableaux. A \mathbf{t}extbf{e}mph{set-valued shifted tableau} of shape $\lambda$ is a filling of $\mathsf{SD}_{\lambda}$ by nonempty finite subsets of $ \{ \frac{1}{2} i : 0< i \in \mathbb{Z}\} = \{ 1' < 1 < 2'<2<\mathbf{t}extbf{d}ots\}$. We consider a sequence of such subsets $S_1,S_2,S_3,\mathbf{t}extbf{d}ots$ to be \mathbf{t}extbf{e}mph{weakly increasing} if $\max(S_i) \leq \min(S_{i+1})$ for all $i$. With this convention, we may define a set-valued shifted tableau to be \mathbf{t}extbf{e}mph{semistandard} if it satisfies the same conditions (S1)-(S3) as above. We write ${\rm SetShYT}_Q(\lambda)$ for the set of all semistandard set-valued shifted tableaux of shape $\lambda$, and ${\rm SetShYT}_P(\lambda)$ for the subset of such tableaux also satisfying (S4). We refer to elements of ${\rm SetShYT}_P(\lambda)$ and ${\rm SetShYT}_Q(\lambda)$ as \mathbf{t}extbf{e}mph{set-valued $P$-shifted} and \mathbf{t}extbf{e}mph{set-valued $Q$-shifted tableaux}, respectively: \[ \mathbf{t}extbf{y}tableausetup{boxsize=0.6cm,aligntableaux=center} \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & 345 & \none\\ \none & 2 & 3' & \none\\ 1 & 2' & 2 & 3'3 \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \mathbf{t}ext{a set-valued $P$-shifted tableau}\\ \mathbf{t}ext{of shape $(4,2,1)$} \mathbf{t}extbf{e}nd{array} \quad\quad \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & 3'5 & \none\\ \none & 2' 2 & 3' & \none\\ 1 & 2' & 3' & 34 \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \mathbf{t}ext{a set-valued $Q$-shifted tableau}\\ \mathbf{t}ext{of shape $(4,2,1)$.} \mathbf{t}extbf{e}nd{array} \] The \mathbf{t}extbf{e}mph{weight} $x^T$ of a set-valued shifted tableau $T$ is defined in the same way as in the non-set-valued case; for both tableaux in the preceding example one has $x^T = x_1 x_2^3 x_3^4 x_4x_5$. Write $T_{ij}$ for the entry of a set-valued shifted tableau in position $(i,j)$ and define $|T| := \mathbf{s}um_{(i,j) \in \mathsf{SD}_{\lambda}} |T_{ij}|$ and $ |\lambda| := |\mathsf{SD}_{\lambda}|.$ Then $|T|-|\lambda|$ is the difference between the degree of $x^T$ and the size of $\mathsf{SD}_{\lambda}$. Finally, let $\mathbf{t}extbf{b}egin{equation}ta$ be a variable that commutes with each $x_i$. The \mathbf{t}extbf{e}mph{$K$-theoretic Schur $P$- and $Q$-functions} indexed by $\lambda$ are the power series in $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ given by \mathbf{t}extbf{b}egin{equation}\label{GP-GQ-def} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_{\lambda} &:= \mathbf{s}um_{T\in{\rm SetShYT}_P(\lambda)} \mathbf{t}extbf{b}egin{equation}ta^{|T|-|\lambda|} x^T,\\ G\hspace{-0.2mm}Q_{\lambda} &:= \mathbf{s}um_{T\in{\rm SetShYT}_Q(\lambda)} \mathbf{t}extbf{b}egin{equation}ta^{|T|-|\lambda|} x^T. \mathbf{t}extbf{e}nd{aligned}\mathbf{t}extbf{e}nd{equation} We recover $P_{\lambda}$ from $G\hspace{-0.2mm}P_{\lambda}$ and $Q_{\lambda}$ from $G\hspace{-0.2mm}Q_{\lambda}$ by setting $\mathbf{t}extbf{b}egin{equation}ta=0$. Both $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ are symmetric in the $x_i$ variables \mathbf{t}extbf{c}ite[\S3.4]{IkedaNaruse} and homogeneous of degree $|\lambda| $ if we set $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta) = -1$ and $\mathbf{t}extbf{d}eg(x_i)=1$. Ikeda and Naruse introduced these functions in \mathbf{t}extbf{c}ite{IkedaNaruse} for applications in $K$-theory. Specializations of $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ represent the structure sheaves of Schubert varieties in the $K$-theory of the maximal isotropic Grassmannians of orthogonal and symplectic types \mathbf{t}extbf{c}ite[Cor. 8.1]{IkedaNaruse}. More precisely, the $G\hspace{-0.2mm}P$- and $G\hspace{-0.2mm}Q$-functions represent Schubert classes in connective $K$-theory, so can be turned into cohomology classes or elements of the Grothendieck ring of vector bundles on setting $\mathbf{t}extbf{b}egin{equation}ta=0$ and $\mathbf{t}extbf{b}egin{equation}ta=1$, respectively. The $G\hspace{-0.2mm}P$- and $G\hspace{-0.2mm}Q$-functions are also ``stable limits'' of connective $K$-theory classes of orbit closures for symplectic and orthogonal groups acting on the type A flag variety \mathbf{t}extbf{c}ite{MP2020,MP2021}. For more results about these functions and various extensions, see \mathbf{t}extbf{c}ite{NakagawaNaruse,NakagawaNaruse0,Naruse}. Our first main result is a $K$-theoretic analogue of equation \mathbf{t}extbf{e}qref{easy-eq}. The relevant identity is subtler than in the classical case, and was predicted as \mathbf{t}extbf{c}ite[Conj. 5.15]{LM2021}. It expresses each $K$-theoretic Schur $Q$-function as a finite linear combination of $K$-theoretic Schur $P$-functions with integer coefficients. If $\lambda = (\lambda_1>\lambda_2>\mathbf{t}extbf{d}ots>0)$ and $\mu = (\mu_1>\mu_2>\mathbf{t}extbf{d}ots>0)$ are strict partitions with $\mu_i \leq \lambda_i$ for all $i$ then we write $\mu \mathbf{s}ubseteq \lambda$ and define $\mathsf{SD}_{\lambda/\mu} := \mathsf{SD}_{\lambda}\mathbf{s}etminus \mathsf{SD}_{\mu}$ and $|\lambda/\mu| := |\mathsf{SD}_{\lambda/\mu} |$. We also let $\mathbf{t}extbf{c}ol(\lambda/\mu) := | \{ j : (i,j) \in \mathsf{SD}_{\lambda/\mu} \mathbf{t}ext{ for some }i\}|$ denote the number of distinct columns occupied by the positions in $\mathsf{SD}_{\lambda/\mu}$. \mathbf{t}extbf{b}egin{equation}gin{theorem} \label{to-prove} If $\mu$ is a strict partition with $\mathbf{t}extbf{e}ll(\mu)$ parts then \mathbf{t}extbf{b}egin{equation}\label{q-to-p-eq} G\hspace{-0.2mm}Q_\mu = 2^{\mathbf{t}extbf{e}ll(\mu)} \mathbf{s}um_{\lambda} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu| } G\hspace{-0.2mm}P_\lambda \mathbf{t}extbf{e}nd{equation} where the sum is over strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a \mathbf{t}extbf{e}mph{vertical strip}, that is, a subset with at most one position in each row. \mathbf{t}extbf{e}nd{theorem} For example, it holds that $G\hspace{-0.2mm}Q_{(3,2)} = 4 G\hspace{-0.2mm}P_{(3,2)} + 2\mathbf{t}extbf{b}egin{equation}ta G\hspace{-0.2mm}P_{(4,2)} - \mathbf{t}extbf{b}egin{equation}ta^2 G\hspace{-0.2mm}P_{(4,3)}$ and $ G\hspace{-0.2mm}Q_{(n)} = 2 G\hspace{-0.2mm}P_{(n)} + \mathbf{t}extbf{b}egin{equation}ta G\hspace{-0.2mm}P_{(n+1)}$ for all integers $n>0$. This formula applies even when $\mu = \mathbf{t}extbf{e}mptyset$ is the empty partition, as then the sum has only one term indexed by $\lambda = \mathbf{t}extbf{e}mptyset$, giving $G\hspace{-0.2mm}Q_\mathbf{t}extbf{e}mptyset = G\hspace{-0.2mm}P_\mathbf{t}extbf{e}mptyset = 1$. We prove Theorem~\mathbf{r}ef{to-prove} in Section~\mathbf{r}ef{res-sect}. The additional complexity in \mathbf{t}extbf{e}qref{q-to-p-eq} compared to \mathbf{t}extbf{e}qref{easy-eq} is related to the fact that in a set-valued $Q$-shifted tableau, a diagonal entry may contain both $i$ and $i'$. When this happens there is no simple way to remove all primes from the diagonal without changing the relevant weight. As a corollary, we may classify when only positive coefficients appear in the $G\hspace{-0.2mm}P$-expansion of $G\hspace{-0.2mm}Q_\mu$. Let $\mathbb{N} := \{0,1,2,\mathbf{t}extbf{d}ots,\}$. \mathbf{t}extbf{b}egin{equation}gin{corollary}\label{positive_case} If $\mu$ is a strict partition then $G\hspace{-0.2mm}Q_\mu$ is an $\mathbb{N}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $G\hspace{-0.2mm}P$-functions if and only if all distinct parts of $\mu$ differ by at least two. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} It suffices by Theorem~\mathbf{r}ef{to-prove} to observe that there exists a strict partition $\lambda \mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) =\mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip and $\mathbf{t}extbf{c}ol(\lambda/\mu) \not\mathbf{t}extbf{e}quiv |\lambda/\mu| \ (\mathrm{mod}\ 2)$ if and only if $\mu_i - \mu_{i+1} = 1$ for some $i \in [\mathbf{t}extbf{e}ll(\mu)-1]$. \mathbf{t}extbf{e}nd{proof} We also prove a few more results. Theorem~\mathbf{r}ef{to-prove} has some enumerative consequences which we discuss in Section~\mathbf{r}ef{bij-sect}. The $G\hspace{-0.2mm}P$- and $G\hspace{-0.2mm}Q$-functions have skew versions $G\hspace{-0.2mm}P_{\lambda/\mu}$ and $G\hspace{-0.2mm}Q_{\lambda/\mu}$, which are generating functions for set-valued tableaux of shifted skew shapes. In Section~\mathbf{r}ef{skew-sect} we derive an extension of Theorem~\mathbf{r}ef{to-prove} for these power series, along with some other related identities. There are also dual power series $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ defined by Nakagawa and Naruse \mathbf{t}extbf{c}ite{NakagawaNaruse} from $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ via a Cauchy identity. Section~\mathbf{r}ef{dual-sect} contains some further results about these functions, including a dual form of Theorem~\mathbf{r}ef{to-prove} (see Corollary~\mathbf{r}ef{to-prove2}) and a skew Cauchy identity (see Theorem~\mathbf{r}ef{GP-cauchy-thm}). In Section~\mathbf{r}ef{last-sect}, we recall a conjectural formula for $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ from \mathbf{t}extbf{c}ite{NakagawaNaruse}. We then explain a new conjectural formula for the related functions $j\hspace{-0.2mm}p_\lambda := \omega(\mathfrak{g}p_\lambda)$ and $j\hspace{-0.2mm}q_\lambda := \omega(\mathfrak{g}q_\lambda)$ obtained by applying the algebra automorphism $\omega $ that sends $ s_\lambda \mapsto s_{\lambda^\mathbf{t}op}$. We show that these new conjectures would imply a conjecture of Ikeda and Naruse about the $G\hspace{-0.2mm}Q$-functions forming a $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-basis for a ring. \mathbf{s}ubsection*{Acknowledgements} This work was partially supported by grants ECS 26305218 and GRF 16306120 from the Hong Kong Research Grants Council. We thank Joel Lewis for several helpful comments. \mathbf{s}ection{Preliminaries} Fix a positive integer $n$ and continue to let $\mathbf{t}extbf{b}egin{equation}ta, x_1,x_2,\mathbf{t}extbf{d}ots $ be commuting variables. For any $f \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ let $ f(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][x_1,x_2,\mathbf{t}extbf{d}ots,x_n]$ be the polynomial obtained by setting $x_{n+1} = x_{n+2} = \mathbf{t}extbf{d}ots=0$. Also define \mathbf{t}extbf{b}egin{equation} x\oplus y := x +y + \mathbf{t}extbf{b}egin{equation}ta xy\quad\mathbf{t}ext{and}\quad x\ominus y :=\mathbf{t}frac{x-y}{1+\mathbf{t}extbf{b}egin{equation}ta y} .\mathbf{t}extbf{e}nd{equation} If $\lambda=(\lambda_1,\lambda_2,\mathbf{t}extbf{d}ots)$ is a finite sequence of integers then let $x^\lambda := \prod_{i} x_i^{\lambda_i}$. Ikeda and Naruse use the following formulas as their definition of the $K$-theoretic Schur $P$- and $Q$-functions \mathbf{t}extbf{c}ite[Def. 2.1]{IkedaNaruse}. They derive the set-valued tableau generating functions given in the introduction as \mathbf{t}extbf{c}ite[Thm. 9.1]{IkedaNaruse}. \mathbf{t}extbf{b}egin{equation}gin{theorem}[{See \mathbf{t}extbf{c}ite{IkedaNaruse}}] \label{ik-thm} If $\lambda$ is a strict partition with $r := \mathbf{t}extbf{e}ll(\lambda) \leq n$ then \[\mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) &= \frac{1}{(n-r)!} \mathbf{s}um_{w \in S_n} w\left( x^\lambda \prod_{i=1}^r \prod_{j=i+1}^n \frac{x_i\oplus x_j}{x_i\ominus x_j}\mathbf{r}ight), \\ G\hspace{-0.2mm}Q_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) &= \frac{1}{(n-r)!} \mathbf{s}um_{w \in S_n} w\left( x^\lambda \prod_{i=1}^r (2+\mathbf{t}extbf{b}egin{equation}ta x_i) \prod_{j=i+1}^n \frac{x_i\oplus x_j}{x_i\ominus x_j}\mathbf{r}ight), \mathbf{t}extbf{e}nd{aligned}\] where $w \in S_n$ acts on rational functions by permuting the $x_i$ variables while fixing $\mathbf{t}extbf{b}egin{equation}ta$. \mathbf{t}extbf{e}nd{theorem} For classical background on the Schur $P$ and $Q$-functions, see \mathbf{t}extbf{c}ite[\S{III.8}]{Macdonald}, \mathbf{t}extbf{c}ite[\S5-\S9]{Stembridge1989}, or the appendix in \mathbf{t}extbf{c}ite{Stembridge1997}. As $\lambda$ ranges over all strict partitions the functions $P_\lambda$ (respectively, $Q_\lambda$) are a $\mathbb{Z}$-basis for a subring of $\mathbb{Z}[[x_1,x_2,\mathbf{t}extbf{d}ots]]$. The same is true of the polynomials $P_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n)$ (respectively, $Q_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n)$) if $\lambda$ ranges over strict partitions with $\mathbf{t}extbf{e}ll(\lambda)\leq n$ \mathbf{t}extbf{c}ite[\S{III.8}]{Macdonald}. Since $P_\lambda=G\hspace{-0.2mm}P_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=0}$ and $Q_\lambda=G\hspace{-0.2mm}Q_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=0}$ it follows that the $G\hspace{-0.2mm}P_\lambda$'s (respectively, the $G\hspace{-0.2mm}Q_\lambda$'s) are linearly independent over $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$, as are the polynomials $G\hspace{-0.2mm}P_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n)$ (respectively, $G\hspace{-0.2mm}Q_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n)$) as $\lambda$ ranges over all strict partitions with at most $n$ parts. In fact, Ikeda and Naruse show that the sets $\{ G\hspace{-0.2mm}P_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) : \mathbf{t}extbf{e}ll(\lambda) \leq n\}$ and $\{ G\hspace{-0.2mm}Q_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) : \mathbf{t}extbf{e}ll(\lambda) \leq n\}$ are both $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-bases for subrings of $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][x_1,x_2,\mathbf{t}extbf{d}ots,x_n]$ \mathbf{t}extbf{c}ite[Thm. 3.1 and Prop. 3.2]{IkedaNaruse}. An analogous basis property is known to hold for the set of all formal power series $G\hspace{-0.2mm}P_\lambda$'s and is expected to hold for the $G\hspace{-0.2mm}Q_\lambda$'s; see the discussion before Corollary~\mathbf{r}ef{last-cor}. The $G\hspace{-0.2mm}P$- and $G\hspace{-0.2mm}Q$-power series are generalizations of Ivanov's \mathbf{t}extbf{e}mph{factorial $P$- and $Q$-functions} \mathbf{t}extbf{c}ite{Ivanov}, which have another generalization studied by Okada in \mathbf{t}extbf{c}ite{Okada}. Comparing Theorem~\mathbf{r}ef{ik-thm} and \mathbf{t}extbf{c}ite[Lem. 2.4]{Okada} suggests a common generalization of these functions which might be interesting to consider in future work. \mathbf{s}ection{Expansions}\label{res-sect} We prove Theorem~\mathbf{r}ef{to-prove} in this section. For $m \in \mathbb{N}$ let $[m] := \{1,2,\mathbf{t}extbf{d}ots,m\}$. Fix an integer $n>0$. Given a nonzero vector $\lambda \in \mathbb{N}^n$, we define rational functions \mathbf{t}extbf{b}egin{equation}\label{AB-eq} \mathbf{t}extbf{b}egin{equation}gin{aligned} A_\lambda &:=\frac{1}{r!}\mathbf{s}um_{w\in S_r} w\left( x^\lambda \prod_{i=1}^r \prod_{j=i+1}^n \frac{x_i\oplus x_j}{x_i\ominus x_j}\mathbf{r}ight), \\ B_\lambda &:= \frac{1}{r!}\mathbf{s}um_{w\in S_r} w\left( x^\lambda \prod_{i=1}^r (2+\mathbf{t}extbf{b}egin{equation}ta x_i) \prod_{j=i+1}^n \frac{x_i\oplus x_j}{x_i\ominus x_j}\mathbf{r}ight), \mathbf{t}extbf{e}nd{aligned}\mathbf{t}extbf{e}nd{equation} where $r := \max\{ i \in [n] : \lambda_i \neq 0\}$. Note the implicit dependence on $n$ in these formulas. For convenience we also set $A_0 = B_0 = 1$. \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{key-lem1} Let $\lambda$ be a strict partition with $r:= \mathbf{t}extbf{e}ll(\lambda) \leq n$. Fix $m \in [r]$. Let \[\mu := (\lambda_1 > \lambda_2 > \mathbf{t}extbf{d}ots > \lambda_m) \quad\mathbf{t}ext{and}\quad \nu:= (\lambda_{m+1} > \lambda_{m+2} > \mathbf{t}extbf{d}ots > \lambda_r).\] Then it holds that \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) &= \frac{1}{(n-m)!} \mathbf{s}um_{w\in S_n} w\left( A_\mu G\hspace{-0.2mm}P_\nu(x_{m+1},x_{m+2},\mathbf{t}extbf{d}ots,x_n) \mathbf{r}ight), \\ G\hspace{-0.2mm}Q_\lambda(x_1,x_2,\mathbf{t}extbf{d}ots,x_n) &= \frac{1}{(n-m)!} \mathbf{s}um_{w\in S_n} w\left( B_\mu G\hspace{-0.2mm}Q_\nu(x_{m+1},x_{m+2},\mathbf{t}extbf{d}ots,x_n) \mathbf{r}ight). \mathbf{t}extbf{e}nd{aligned} \] \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} Choose any polynomial $f(x) \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][x]$ and let $Z_i := f(x_i) \prod_{j=i+1}^n \frac{x_i \oplus x_j}{x_i \ominus x_j}$ for each $i \in [r]$. By Theorem~\mathbf{r}ef{ik-thm}, the expression \mathbf{t}extbf{b}egin{equation}\label{first-expr} \frac{1}{(n-r)!}\mathbf{s}um_{w\in S_n} w \left( x^{\lambda} Z_1Z_2\mathbf{t}extbf{c}dots Z_r\mathbf{r}ight) \mathbf{t}extbf{e}nd{equation} gives $G\hspace{-0.2mm}P_\lambda(x_1,\mathbf{t}extbf{d}ots,x_n) $ when $f(x) = 1$ and $G\hspace{-0.2mm}Q_\lambda(x_1,\mathbf{t}extbf{d}ots,x_n) $ when $f(x)=2+\mathbf{t}extbf{b}egin{equation}ta x$. Let $ S_{m}$ and $H_{n-m} \mathbf{t}extbf{c}ong S_{n-m}$ be the subgroups of permutations in $S_n$ fixing each $i \in[n]\mathbf{s}etminus[m]$ and $i \in [m]$ respectively. Then we can rewrite \mathbf{t}extbf{e}qref{first-expr} as \[ \mathbf{t}frac{1}{(n-r)! m! (n-m)!} \mathbf{s}um_{w\in S_n} w \left( \mathbf{s}um_{(g,h)\in S_m\mathbf{t}imes H_{n-m}} gh\Bigl( x^\mu Z_1\mathbf{t}extbf{c}dots Z_m\Bigr) gh\Bigl(x^{\mathbf{t}ilde \nu} Z_{m+1}\mathbf{t}extbf{c}dots Z_r\Bigr)\mathbf{r}ight) \] where $\mathbf{t}ilde \nu$ is the sequence formed from $\nu$ by prepending $m$ zeros. The subgroups $S_m$ and $H_{n-m}$ commute, and each $h \in H_{n-m}$ fixes $x^\mu Z_1\mathbf{t}extbf{c}dots Z_m$ while each $g \in S_m$ fixes $x^{\mathbf{t}ilde \nu} Z_{m+1}\mathbf{t}extbf{c}dots Z_r$. The preceding expression is therefore equal to \[ \mathbf{t}frac{1}{(n-r)! } \mathbf{s}um_{w\in S_n} w \left(\mathbf{t}frac{1}{m!}\mathbf{s}um_{g \in S_m} g\Bigl( x^\mu Z_1\mathbf{t}extbf{c}dots Z_m\Bigr)\mathbf{t}extbf{c}dot \mathbf{t}frac{1}{(n-m)!}\mathbf{s}um_{h \in H_{n-m}} h\Bigl(x^{\mathbf{t}ilde \nu} Z_{m+1}\mathbf{t}extbf{c}dots Z_r\Bigr)\mathbf{r}ight). \] If $f(x)=1$ then the internal sums here are $ \mathbf{t}frac{1}{m!}\mathbf{s}um_{g \in S_m} g\Bigl( x^\mu Z_1\mathbf{t}extbf{c}dots Z_m\Bigr) = A_\mu$ and $ \mathbf{t}frac{1}{(n-m)!}\mathbf{s}um_{h \in H_{n-m}} h\Bigl(x^{\mathbf{t}ilde \nu} Z_{m+1}\mathbf{t}extbf{c}dots Z_r\Bigr)= G\hspace{-0.2mm}P_\nu(x_{m+1},\mathbf{t}extbf{d}ots,x_n)$ by Theorem~\mathbf{r}ef{ik-thm}. This proves the first identity. The other follows by taking $f(x) = 2+\mathbf{t}extbf{b}egin{equation}ta x$. \mathbf{t}extbf{e}nd{proof} For $r \in [n]$ let $\Pi^{r,n}(x) := \prod_{i=1}^r \prod_{j=i+1}^n \frac{x_i\oplus x_j}{x_i\ominus x_j}$. \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{lemma1} Choose integers $1\leq p<q \leq r$ and let $t_{pq} := (p,q) \in S_r$. Then \[ t_{pq} \left((1+\mathbf{t}extbf{b}egin{equation}ta x_p)^{q-p} \Pi^{r,n}(x) \mathbf{r}ight) = - (1+\mathbf{t}extbf{b}egin{equation}ta x_p)^{q-p}\Pi^{r,n}(x).\] Consequently if $f(x) \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][x_1,x_2,\mathbf{t}extbf{d}ots]$ is any polynomial fixed by $t_{pq}$ then \[\mathbf{s}um_{w \in S_r} w\left(f(x) (1+\mathbf{t}extbf{b}egin{equation}ta x_p)^{q-p} \Pi^{r,n}(x) \mathbf{r}ight) = 0.\] \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} The first identity is a straightforward exercise in algebra. The second claim follows since $\mathbf{s}um_{w \in S_r} w = \frac{1}{2} \mathbf{s}um_{w \in S_r} w (1+ t_{pq}) \in \mathbb{Z} S_r$. \mathbf{t}extbf{e}nd{proof} Choose an integer $m \in [n]$ and let $\operatorname{del}ta :=(m,m-1,\mathbf{t}extbf{d}ots,3,2,1).$ For each $i \in [m]$ let $\mathbf{t}extbf{e}_i := (0,\mathbf{t}extbf{d}ots,0,1,0,\mathbf{t}extbf{d}ots,0)$ be the standard basis vector in $\mathbb{Z}^m$. \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{cancel} Let $v \in \mathbb{N}^m$. Then $A_{\operatorname{del}ta+v} + \mathbf{t}extbf{b}egin{equation}ta A_{\operatorname{del}ta + v+\mathbf{t}extbf{e}_i} = 0$ whenever $v_{i+1} = v_i + 1$ for some $ i\in [m-1]$ or $v_{i+2} =v_{i+1}+1=v_i+1$ for some $ i\in [m-2]$. \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} First assume $v_{i+1} = v_i + 1$ for some $ i\in [m-1]$. Then $x^{\operatorname{del}ta+v}$ is a polynomial fixed by $t_{i,i+1}$ so $ A_{\operatorname{del}ta+v} + \mathbf{t}extbf{b}egin{equation}ta A_{\operatorname{del}ta + v+\mathbf{t}extbf{e}_i} = \frac{1}{m!} \mathbf{s}um_{w\in S_m} w ( x^{\operatorname{del}ta+v} (1+\mathbf{t}extbf{b}egin{equation}ta x_{i}) \Pi^{m,n}(x) ) =0 $ by Lemma~\mathbf{r}ef{lemma1}. Next suppose that $v_{i+2} =v_{i+1}+1=v_i+1$ for some $ i\in [m-2]$. Let $\mathbf{t}extbf{a}lpha := \operatorname{del}ta+v-\mathbf{t}extbf{e}_i$. Then we can write $\mathbf{t}extbf{b}egin{equation}ta (A_{\operatorname{del}ta+v} + \mathbf{t}extbf{b}egin{equation}ta A_{\operatorname{del}ta + v+\mathbf{t}extbf{e}_i})$ as \[ \mathbf{t}frac{1}{m!} \mathbf{s}um_{w\in S_m} w ( x^{\mathbf{t}extbf{a}lpha} (1+\mathbf{t}extbf{b}egin{equation}ta x_i)^2 \Pi^{m,n}(x) ) - \mathbf{t}frac{1}{m!} \mathbf{s}um_{w\in S_m} w ( x^{\mathbf{t}extbf{a}lpha} (1+\mathbf{t}extbf{b}egin{equation}ta x_i) \Pi^{m,n}(x) ).\] Since $x^{\mathbf{t}extbf{a}lpha}$ is fixed by $t_{i,i+2}$ and $t_{i,i+1}$, applying Lemma~\mathbf{r}ef{lemma1} with $(p,q) = (i,i+2)$ and $(p,q)=(i,i+1)$ shows that both terms are zero. The ring of rational functions in $\mathbf{t}extbf{b}egin{equation}ta,x_1,x_2,\mathbf{t}extbf{d}ots,x_n$ is an integral domain, so $ A_{\operatorname{del}ta+v} + \mathbf{t}extbf{b}egin{equation}ta A_{\operatorname{del}ta + v+\mathbf{t}extbf{e}_i} =0 $. \mathbf{t}extbf{e}nd{proof} After expanding $B_\operatorname{del}ta$ in terms of the $A_\lambda$'s, one can apply many cancellations from Lemma~\mathbf{r}ef{cancel}. We will use the next lemma to organize these cancellations. This lemma involves a certain directed graph $\mathbf{t}extbf{c}G_m$ for $m\mathfrak{g}eq 2$ which we define inductively. In general, the vertex set of $\mathbf{t}extbf{c}G_m$ consists of all nonempty subsets of $[m]$ excluding sets of the form $[i]$ for $i$ odd and including two copies of $[i]$ for $i$ even. When $m\in \{2,3\}$ the graph $\mathbf{t}extbf{c}G_m$ is given explicitly by \[ \mathbf{t}extbf{c}G_2:=\mathbf{t}extbf{b}oxed{\mathbf{t}extbf{b}egin{equation}gin{tikzpicture}[baseline=(z.base)] \pgfsetlinewidth{1bp} \node (z) at (0, 0.5) {}; \node (a) at (0,0) {$\{1,2\}$}; \node (b) at (1,0) {$\{1,2\}$}; \node (c) at (0.5,1) {$\{2\}$}; \mathbf{t}extbf{d}raw [->] (a) -- (c); \mathbf{t}extbf{d}raw [->] (b) -- (c); \mathbf{t}extbf{e}nd{tikzpicture}} \quad\mathbf{t}ext{and}\quad \mathbf{t}extbf{c}G_3:=\mathbf{t}extbf{b}oxed{\mathbf{t}extbf{b}egin{equation}gin{tikzpicture}[baseline=(z.base)] \pgfsetlinewidth{1bp} \node (z) at (0, 0.5) {}; \node (a) at (0,0) {$\{1,2\}$}; \node (b) at (1,0) {$\{1,2\}$}; \node (c) at (2,0) {$\{1,3\}$}; \node (d) at (3,0) {$\{2,3\}$}; \node (e) at (0.5,1) {$\{2\}$}; \node (f) at (2.5,1) {$\{3\}$}; \mathbf{t}extbf{d}raw [->] (a) -- (e); \mathbf{t}extbf{d}raw [->] (b) -- (e); \mathbf{t}extbf{d}raw [->] (c) -- (f); \mathbf{t}extbf{d}raw [->] (d) -- (f); \mathbf{t}extbf{e}nd{tikzpicture}}. \] Assume that $m\mathfrak{g}eq 4$ and that $\mathbf{t}extbf{c}G_{m-2}$ and $\mathbf{t}extbf{c}G_{m-1}$ have been constructed. Let $\mathcal{A}$ be the set of vertices $S \in \mathbf{t}extbf{c}G_m$ with $\{m-2,m-1,m\}\mathbf{s}ubseteq S$, let $\mathcal{B}$ be the set of vertices $S \in \mathbf{t}extbf{c}G_m$ with $m \notin S$, and let $\mathcal{C}$ be the set of remaining vertices in $\mathbf{t}extbf{c}G_m$. All of the doubled vertices $[i] \in \mathbf{t}extbf{c}G_m$ for $i =2,4,6,\mathbf{t}extbf{d}ots$ belong to $\mathcal{A}$ or $\mathcal{B}$. The edges of $\mathbf{t}extbf{c}G_m$ are given as follows. The three sets $\mathcal{A}$, $\mathcal{B}$, and $\mathcal{C}$ are each unions of connected components. An edge goes from $S \in \mathcal{A}$ to $T \in \mathcal{A}$ if and only if the edge $S \mathbf{s}etminus\{m-1,m\} \mathbf{t}o T \mathbf{s}etminus\{m-1,m\}$ exists in $\mathbf{t}extbf{c}G_{m-2}$. An edge goes from $S \in \mathcal{B}$ to $T \in \mathcal{B}$ if and only if the same edge $S \mathbf{t}o T$ exists in $\mathbf{t}extbf{c}G_{m-1}$. The elements of $\mathcal{C}$ consist of the distinct unions $ S \mathbf{s}qcup \{m-1,m\}, $ $ S \mathbf{s}qcup \{m-2,m\},$ and $ S \mathbf{s}qcup \{m\} $ as $S$ ranges over all subsets of $[m-3]$, and for each $S\mathbf{s}ubseteq [m-3]$ there are edges $S \mathbf{s}qcup \{m-1,m\} \mathbf{t}o S \mathbf{s}qcup \{m\}$ and $S \mathbf{s}qcup \{m-2,m\} \mathbf{t}o S \mathbf{s}qcup \{m\}$. \mathbf{t}extbf{b}egin{equation}gin{example} If $m=4$ then \mathbf{t}extbf{b}egin{equation}n \item[] $\mathcal{A}$ has elements $\{1,2,3,4\}, \{1,2,3,4\}, \{2,3,4\}$; \item[] $\mathcal{B}$ has elements $\{1,2\}, \{1,2\}, \{3\}, \{1,3\}, \{2,3\}, \{2\}$; \item[] $\mathcal{C}$ has elements $\{1,3,4\}, \{1,2,4\}, \{1,4\}, \{3,4\}, \{2,4\}, \{4\}$; \mathbf{t}extbf{e}nd{equation}n and the graph $\mathbf{t}extbf{c}G_4$ is \[ {\mathbf{s}mall\mathbf{t}extbf{b}oxed{\mathbf{t}extbf{b}egin{equation}gin{tikzpicture}[baseline=(z.base),xscale=0.85] \pgfsetlinewidth{1bp} \node (z) at (0, 0.5) {}; \node (a) at (0,0) {$\{1,2,3,4\}$}; \node (b) at (1.8,0) {$\{1,2,3,4\}$}; \node (c) at (0.9,1) {$\{2,3,4\}$}; \mathbf{t}extbf{d}raw [->] (a) -- (c); \mathbf{t}extbf{d}raw [->] (b) -- (c); \mathbf{t}extbf{e}nd{tikzpicture} \mathbf{t}extbf{b}egin{equation}gin{tikzpicture}[baseline=(z.base),xscale=0.9] \pgfsetlinewidth{1bp} \node (z) at (0, 0.5) {}; \node (a) at (0,0) {$\{1,2\}$}; \node (b) at (1,0) {$\{1,2\}$}; \node (c) at (2,0) {$\{1,3\}$}; \node (d) at (3,0) {$\{2,3\}$}; \node (e) at (0.5,1) {$\{2\}$}; \node (f) at (2.5,1) {$\{3\}$}; \mathbf{t}extbf{d}raw [->] (a) -- (e); \mathbf{t}extbf{d}raw [->] (b) -- (e); \mathbf{t}extbf{d}raw [->] (c) -- (f); \mathbf{t}extbf{d}raw [->] (d) -- (f); \mathbf{t}extbf{e}nd{tikzpicture} \mathbf{t}extbf{b}egin{equation}gin{tikzpicture}[baseline=(z.base),xscale=0.9] \pgfsetlinewidth{1bp} \node (z) at (0, 0.5) {}; \node (a) at (0,0) {$\{1,3,4\}$}; \node (b) at (1.5,0) {$\{1,2,4\}$}; \node (c) at (2.75,0) {$\{3,4\}$}; \node (d) at (3.75,0) {$\{2,4\}$}; \node (e) at (0.75,1) {$\{1,4\}$}; \node (f) at (3.25,1) {$\{4\}$}; \mathbf{t}extbf{d}raw [->] (a) -- (e); \mathbf{t}extbf{d}raw [->] (b) -- (e); \mathbf{t}extbf{d}raw [->] (c) -- (f); \mathbf{t}extbf{d}raw [->] (d) -- (f); \mathbf{t}extbf{e}nd{tikzpicture} }}. \] \mathbf{t}extbf{e}nd{example} \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{graph} For each $m\mathfrak{g}eq 2$ the graph $\mathbf{t}extbf{c}G_m$ has the following properties: \mathbf{t}extbf{b}egin{equation}n \item[(a)] Each directed edge has the form $S \mathbf{s}qcup \{i\} \mathbf{t}o S$ for an integer $i$ with either $S \mathbf{t}extbf{c}ap \{i,i+1\} = \{i+1\}$ or $S \mathbf{t}extbf{c}ap \{i,i+1,i+2\} = \{i+2\}$. \item[(b)] Each vertex either has indegree $2$ and outdegree $0$ or has indegree $0$ and outdegree $1$. \mathbf{t}extbf{e}nd{equation}n \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} The explicit graphs $\mathbf{t}extbf{c}G_2$ and $\mathbf{t}extbf{c}G_3$ have these properties. Assume $m\mathfrak{g}eq 4$ and define the vertex subsets $\mathcal{A}$, $\mathcal{B}$, and $\mathcal{C}$ in $\mathbf{t}extbf{c}G_m$ as above. The edges in $\mathcal{C}$ have properties (a) and (b) by definition. The edges in $\mathcal{B}$ have properties (a) and (b) by induction, since these vertices form a copy of $\mathbf{t}extbf{c}G_{m-1}$. The edges in $\mathcal{A}$ also have the desired properties by induction, since the subgraph on these vertices is isomorphic to $\mathbf{t}extbf{c}G_{m-2}$ via the map $S \mapsto S \mathbf{s}etminus\{m-1,m\}$. \mathbf{t}extbf{e}nd{proof} Our last step before proving Theorem~\mathbf{r}ef{to-prove} is to derive a simplified form of the desired identity involving the functions $A_\lambda$ and $B_\lambda$. \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{key-lem2} Suppose $\mu = (q,q-1,q-2,\mathbf{t}extbf{d}ots,p)$ for integers $q\mathfrak{g}eq p > 0$. Then \mathbf{t}extbf{b}egin{equation}\label{kl2-eq} B_\mu = 2^{\mathbf{t}extbf{e}ll(\mu)} \mathbf{s}um_{\lambda} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu|} A_\lambda\mathbf{t}extbf{e}nd{equation} where the sum is over strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip, and $\mathbf{t}extbf{c}ol(\lambda/\mu)$ is the number of columns occupied by $\mathsf{SD}_{\lambda/\mu}$. \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} We first prove the lemma in the case when $q=m$ and $p=1$. Then $\mu=\operatorname{del}ta:=(m,m-1,\mathbf{t}extbf{d}ots,3,2,1)$ and \mathbf{t}extbf{e}qref{kl2-eq} becomes \mathbf{t}extbf{b}egin{equation}\label{key-result-eq} B_\operatorname{del}ta = 2^m A_\operatorname{del}ta - 2^m\mathbf{s}um_{i=1}^m (-\mathbf{t}extbf{b}egin{equation}ta/2)^i A_{\operatorname{del}ta + \mathbf{t}extbf{e}_1 + \mathbf{t}extbf{e}_2 + \mathbf{t}extbf{d}ots + \mathbf{t}extbf{e}_i}. \mathbf{t}extbf{e}nd{equation} For a subset $S \mathbf{s}ubseteq [m]$, let $\mathbf{t}extbf{e}_S := \mathbf{s}um_{i \in S} \mathbf{t}extbf{e}_i$. It follows by expanding the definition of $B_\operatorname{del}ta$ in \mathbf{t}extbf{e}qref{AB-eq} that \mathbf{t}extbf{e}qref{key-result-eq} is equivalent to $ \mathbf{s}um_{ S\mathbf{s}ubseteq [m]} 2^{m-|S|}\mathbf{t}extbf{b}egin{equation}ta^{|S|}A_{\operatorname{del}ta+\mathbf{t}extbf{e}_S} = 2^m A_\operatorname{del}ta - \mathbf{s}um_{i=1}^m (-1)^i 2^{m-i}\mathbf{t}extbf{b}egin{equation}ta^i A_{\operatorname{del}ta + \mathbf{t}extbf{e}_{[i]}} $, which we can rewrite as the identity \mathbf{t}extbf{b}egin{equation}\label{staircase_toprove} \mathbf{s}um_{ S\mathbf{s}ubseteq [m]} \mathrm{ch}i(S)2^{m-|S|}\mathbf{t}extbf{b}egin{equation}ta^{|S|}A_{\operatorname{del}ta+\mathbf{t}extbf{e}_S}=0 \mathbf{t}extbf{e}nd{equation} where $\mathrm{ch}i(S)$ is defined to be $2$ if $S=[i]$ for any $i \in \{2,4,6,\mathbf{t}extbf{d}ots\}$, $0$ if $S= \mathbf{v}arnothing$ or $S = [i]$ for any $i \in \{1,3,5,\mathbf{t}extbf{d}ots\}$, and $1$ otherwise. By Lemma~\mathbf{r}ef{graph}, the left-hand side of \mathbf{t}extbf{e}qref{staircase_toprove} is precisely \[ \mathbf{s}um_{S \in \mathbf{t}extbf{c}G_m}2^{m-|S|}\mathbf{t}extbf{b}egin{equation}ta^{|S|}A_{\operatorname{del}ta+\mathbf{t}extbf{e}_S} = \mathbf{s}um_{\{S\mathbf{t}o T\} \in \mathbf{t}extbf{c}G_m}2^{m-|S|}\mathbf{t}extbf{b}egin{equation}ta^{|S|} (A_{\operatorname{del}ta+\mathbf{t}extbf{e}_S} + \mathbf{t}extbf{b}egin{equation}ta A_{\operatorname{del}ta+\mathbf{t}extbf{e}_T}) \] where the first sum is over the (sometimes repeated) vertices of the graph $\mathbf{t}extbf{c}G_m$ and second sum is over the edges in $\mathbf{t}extbf{c}G_m$. In view of Lemma~\mathbf{r}ef{cancel} and property (a) in Lemma~\mathbf{r}ef{graph}, every term in the last sum is zero so \mathbf{t}extbf{e}qref{key-result-eq} holds. For the general identity, observe that if $\lambda$ is a strict partition with $r$ parts then $ x_1x_2\mathbf{t}extbf{c}dots x_r A_\lambda = A_{\lambda + 1^r} $ and $ x_1x_2\mathbf{t}extbf{c}dots x_r B_\lambda = B_{\lambda + 1^r} $ where $1^r = (1,1,\mathbf{t}extbf{d}ots,1) \in \mathbb{N}^r$. Therefore, setting $m=q-p+1$ in \mathbf{t}extbf{e}qref{key-result-eq} and multiplying both sides by $(x_1x_2\mathbf{t}extbf{d}ots x_{q-p+1})^{p-1}$ gives $ B_\mu = 2^{q-p+1} A_\mu - 2^{q-p+1}\mathbf{s}um_{i=1}^{q-p+1} (-\mathbf{t}extbf{b}egin{equation}ta/2)^i A_{\mu + \mathbf{t}extbf{e}_{[i]}} $ which can be rewritten as \mathbf{t}extbf{e}qref{kl2-eq}. \mathbf{t}extbf{e}nd{proof} If $\lambda= (\lambda_1 \mathfrak{g}eq \mathbf{t}extbf{d}ots\mathfrak{g}eq \lambda_p>0)$ and $\mu=(\mu_1\mathfrak{g}eq \mathbf{t}extbf{d}ots \mathfrak{g}eq \mu_q>0)$ are partitions then let $\lambda\mu$ denote their concatenation; this will be another partition if $\lambda_p\mathfrak{g}eq \mu_1$. Given a strict partition $\mu$, define $\Lambda(\mu)$ to be the set of strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. We can now prove Theorem~\mathbf{r}ef{to-prove}, which states that $G\hspace{-0.2mm}Q_\mu = 2^{\mathbf{t}extbf{e}ll(\mu)} \mathbf{s}um_{\lambda\in \Lambda(\mu)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu| } G\hspace{-0.2mm}P_\lambda$. \mathbf{t}extbf{b}egin{equation}gin{proof}[Proof of Theorem~\mathbf{r}ef{to-prove}] Let $\mu=(\mu_1 > \mathbf{t}extbf{d}ots >\mu_r>0)$ be a nonempty strict partition. We first prove the desired identity specialized to the variables $x_1,x_2,\mathbf{t}extbf{d}ots,x_n$, so assume our fixed value of $n$ has $n\mathfrak{g}eq r>0$. Let $q = \mu_1$ and suppose $m \in [r]$ is maximal with $\mu_m = q + 1-m$. We proceed by induction on $r-m$. In the base case when $m=r$, the result to prove is Lemma~\mathbf{r}ef{key-lem2}. It remains to deal with the inductive step. Assume $1 \leq m < r$ and set \[\mathfrak{g}amma := (\mu_1 > \mu_2 > \mathbf{t}extbf{d}ots > \mu_m) \quad\mathbf{t}ext{and}\quad \nu:= (\mu_{m+1} > \mu_{m+2} > \mathbf{t}extbf{d}ots > \mu_r).\] Then $\mathfrak{g}amma = (q,q-1,q-2,\mathbf{t}extbf{d}ots,p)$ for $p := \mu_m$ and $\mathfrak{g}amma_m \mathfrak{g}eq \nu_1+2$. We may assume by induction that the desired identity holds when $\mu$ is replaced by $\nu$, since this replacement transforms $r\mapsto r-m$ and $m\mapsto $ (some positive number) so reduces the difference $r-m$. This assumption and Lemma~\mathbf{r}ef{key-lem2} imply that $ B_\mathfrak{g}amma G\hspace{-0.2mm}Q_\nu(x_{m+1},\mathbf{t}extbf{d}ots,x_n) $ is equal to \[ 2^{r} \mathbf{s}um_{\mathbf{t}ilde\mathfrak{g}amma \in \Lambda(\mathfrak{g}amma)}\mathbf{s}um_{\mathbf{t}ilde\nu \in \Lambda(\nu)} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma)+ \mathbf{t}extbf{c}ol(\mathbf{t}ilde\nu/\nu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma| + |\mathbf{t}ilde\nu/\nu|} A_{\mathbf{t}ilde\mathfrak{g}amma} G\hspace{-0.2mm}P_{\mathbf{t}ilde\nu}(x_{m+1},\mathbf{t}extbf{d}ots,x_n). \] Using both parts of Lemma~\mathbf{r}ef{key-lem1}, we deduce that $ G\hspace{-0.2mm}Q_\mu(x_1,\mathbf{t}extbf{d}ots,x_n)$ is equal to \[ 2^{r} \mathbf{s}um_{\mathbf{t}ilde\mathfrak{g}amma \in \Lambda(\mathfrak{g}amma)}\mathbf{s}um_{\mathbf{t}ilde\nu \in \Lambda(\nu)} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma)+ \mathbf{t}extbf{c}ol(\mathbf{t}ilde\nu/\nu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma| + |\mathbf{t}ilde\nu/\nu|} G\hspace{-0.2mm}P_{\mathbf{t}ilde\mathfrak{g}amma\mathbf{t}ilde\nu}(x_1,\mathbf{t}extbf{d}ots,x_n).\] Since $\mu = \mathfrak{g}amma\nu$ and $\mathfrak{g}amma_m \mathfrak{g}eq \nu_1+2$, the concatenation map $(\mathbf{t}ilde\mathfrak{g}amma,\mathbf{t}ilde\nu) \mapsto \mathbf{t}ilde\mathfrak{g}amma\mathbf{t}ilde\nu$ is a bijection $ \Lambda(\mathfrak{g}amma)\mathbf{t}imes \Lambda(\nu)\mathbf{t}extbf{x}rightarrow{\mathbf{s}im} \Lambda(\mu)$ and if $\lambda = \mathbf{t}ilde\mathfrak{g}amma\mathbf{t}ilde\nu$ for $(\mathbf{t}ilde\mathfrak{g}amma,\mathbf{t}ilde\nu)\in \Lambda(\mathfrak{g}amma)\mathbf{t}imes \Lambda(\nu)$ then $ \mathbf{t}extbf{c}ol(\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma)+ \mathbf{t}extbf{c}ol(\mathbf{t}ilde\nu/\nu) = \mathbf{t}extbf{c}ol(\lambda/\mu)$ and $|\mathbf{t}ilde\mathfrak{g}amma/\mathfrak{g}amma| + |\mathbf{t}ilde\nu/\nu| = |\lambda/\mu|.$ Hence \[ G\hspace{-0.2mm}Q_\mu(x_1,\mathbf{t}extbf{d}ots,x_n) = 2^{r} \mathbf{s}um_{\lambda \in\Lambda(\mu)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu|} G\hspace{-0.2mm}P_{\lambda}(x_1,\mathbf{t}extbf{d}ots,x_n).\] This even holds when $\mu=\mathbf{t}extbf{e}mptyset$, so taking the limit as $n\mathbf{t}o\infty$ gives the theorem. \mathbf{t}extbf{e}nd{proof} \mathbf{s}ection{Weight-preserving bijections}\label{bij-sect} As the $G\hspace{-0.2mm}Q_{\lambda}$'s and $G\hspace{-0.2mm}P_{\lambda}$'s are generating functions for set-valued shifted tableaux, Theorem~\mathbf{r}ef{to-prove} has some enumerative consequences, which we describe here. Let $\mathcal{X} \mathbf{s}ubseteq \{1,2,3,\mathbf{t}extbf{d}ots\}\mathbf{t}imes \{1,2,3,\mathbf{t}extbf{d}ots\}$ be a set of positions. Given a set-valued shifted tableau $T$, define $\mathbf{u}nprimemax^\mathcal{X}(T)$ to be the tableau formed from $T$ by removing the prime from the largest element of $T_{ij}$ for each $(i,j) \in \mathcal{X}$, whenever this element is not already primed. If $\mathcal{X} =\{(1,1),(1,2),(1,3)\}$ then \[ \mathbf{t}extbf{y}tableausetup{boxsize=0.8cm,aligntableaux=center} \mathbf{u}nprimemax^\mathcal{X}\left( \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & 34' & \none\\ 2'2 & 3' & 34'6' \mathbf{t}extbf{e}nd{ytableau}\mathbf{r}ight) =\mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & 34' & \none\\ 2'2 & 3 & 34'6 \mathbf{t}extbf{e}nd{ytableau} \] for example. If $T$ is semistandard and $\mathcal{X} \mathbf{s}ubseteq \{ (1,1),(2,2),(3,3),\mathbf{t}extbf{d}ots \}$ then $\mathbf{u}nprimemax^\mathcal{X}(T)$ is also semistandard. This property may fail if $\mathcal{X}$ is not a subset of the main diagonal (as we see in the previous example). If $\lambda$ is a strict partition and $\mathcal{X} \mathbf{s}ubseteq \{ (i,i) : i \in [\mathbf{t}extbf{e}ll(\lambda)]\}$ is a set of diagonal positions, then each $T \in {\rm SetShYT}_P(\lambda)$ has exactly $2^{|\mathcal{X}|}$ preimages in ${\rm SetShYT}_Q(\lambda)$ under $\mathbf{u}nprimemax^{\mathcal{X}}$. Given strict partitions $\lambda \mathbf{s}upseteq \mu$ define ${\rm SetShYT}_P(\lambda : \mu) $ to be the set of semistandard set-valued shifted tableaux $T \in {\rm SetShYT}_Q(\lambda)$ with \[ \mathbf{u}nprimemax^{\mathcal{X}}(T) \in {\rm SetShYT}_P(\lambda)\mathbf{t}ext{ for }\mathcal{X} := \{ (i,i) : \lambda_i = \mu_i\}.\] The diagonal entry in row $i$ of such a tableau can have at most one primed element if $\lambda_i = \mu_i$ and no primed elements if $\lambda_i >\mu_i$. Finally, for a strict partition $\mu$ let $\Lambda^\pm(\mu)$ be the set of strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda)=\mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip and $(-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)+|\lambda/\mu|} = \pm 1$. Then $\Lambda(\mu) = \Lambda^+(\mu)\mathbf{s}qcup \Lambda^-(\mu)$ and this decomposition reflects the decomposition of the right side of Theorem~\mathbf{r}ef{to-prove} into positive and negative terms. \mathbf{t}extbf{b}egin{equation}gin{corollary}\label{bijection} For each strict partition $\mu$ there is a weight-preserving bijection \[ {\rm SetShYT}_Q(\mu) \mathbf{s}qcup \mathbf{t}extbf{b}igsqcup_{\lambda \in \Lambda^-(\mu)}{\rm SetShYT}_P(\lambda : \mu) \mathbf{t}o\mathbf{t}extbf{b}igsqcup_{\lambda \in \Lambda^+(\mu)}{\rm SetShYT}_P(\lambda:\mu). \] \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} To see that the domain of the given map is indeed a disjoint union, observe that the set $\Lambda^-(\mu)$ is empty if and only if all parts of $\mu$ differ by at least two. Since $\mathbf{t}extbf{c}ol(\mu/\mu)+|\mu/\mu| = 0$, the set $\Lambda^+(\mu)$ is never empty and $ {\rm SetShYT}_Q(\mu) $ and ${\rm SetShYT}_P(\lambda : \mu)$ are disjoint for all $\lambda \in \Lambda^-(\mu)$. If $\lambda \in \Lambda^\pm(\mu)$ then $\mathbf{t}extbf{e}ll(\mu) - |\lambda/\mu| = |\{i : \lambda_i=\mu_i\}|$. Thus, moving the negative terms on the right side of \mathbf{t}extbf{e}qref{q-to-p-eq} to the left side, then substituting the generating functions for $G\hspace{-0.2mm}Q_\mu$ and $G\hspace{-0.2mm}P_\lambda$ in \mathbf{t}extbf{e}qref{GP-GQ-def}, and finally setting $\mathbf{t}extbf{b}egin{equation}ta=1$ gives \mathbf{t}extbf{b}egin{equation}\label{bij1} \mathbf{s}um_{T \in {\rm SetShYT}_Q(\mu)} x^T + \mathbf{s}um_{\lambda\in\Lambda^-(\mu)} \mathbf{s}um_{T \in {\rm SetShYT}_P(\lambda)} 2^{|\{ i : \lambda_i=\mu_i\}|} x^T \mathbf{t}extbf{e}nd{equation} is equal to \mathbf{t}extbf{b}egin{equation}\label{bij2} \mathbf{s}um_{\lambda\in\Lambda^+(\mu)} \mathbf{s}um_{T \in {\rm SetShYT}_P(\lambda)} 2^{|\{ i : \lambda_i=\mu_i\}|} x^T. \mathbf{t}extbf{e}nd{equation} We have $ \mathbf{s}um_{T \in {\rm SetShYT}_P(\lambda)} 2^{|\{ i : \lambda_i=\mu_i\}|} x^T = \mathbf{s}um_{T \in {\rm SetShYT}_P(\lambda:\mu)} x^T$ for any strict partition $\lambda$, and the corollary follows by substituting this identity into \mathbf{t}extbf{e}qref{bij1} and \mathbf{t}extbf{e}qref{bij2} and equating coefficients. \mathbf{t}extbf{e}nd{proof} It is an interesting open problem to find a bijective proof of Theorem~\mathbf{r}ef{to-prove}. One way to achieve this would be to construct an explicit map realizing Corollary~\mathbf{r}ef{bijection}. This is easy to do when $\mu = (n)$ has only one nonzero part, in which case the bijection in Corollary~\mathbf{r}ef{bijection} is a map \mathbf{t}extbf{b}egin{equation}\label{one-row-map} {\rm SetShYT}_Q(n) \mathbf{t}o {\rm SetShYT}_P(n : n) \mathbf{s}qcup {\rm SetShYT}_P(n+1). \mathbf{t}extbf{e}nd{equation} The set $ {\rm SetShYT}_P(n : n) $ is contained in ${\rm SetShYT}_Q(n)$ and is the union of ${\rm SetShYT}_P(n ) $ and the set of tableaux formed from elements of $ {\rm SetShYT}_P(n ) $ by adding a prime to the largest number in box $(1,1)$. A bijection \mathbf{t}extbf{e}qref{one-row-map} is given by mapping each $T \in {\rm SetShYT}_P(n : n) $ to itself and each $T \in {\rm SetShYT}_Q(n) \mathbf{s}etminus {\rm SetShYT}_P(n : n)$ to the tableau in $ {\rm SetShYT}_P(n+1)$ formed by adding $\frac{1}{2}$ to the smallest primed number $i' = i -\frac{1}{2}$ in box $(1,1)$, and then splitting this diagonal box into two adjacent boxes containing all numbers $\leq i$ and $>i$, respectively. This map is weight-preserving and would send \[ \mathbf{t}extbf{y}tableausetup{boxsize=1cm,aligntableaux=center} \mathbf{t}extbf{b}egin{equation}gin{ytableau} 12'3'3 & 34 & 5' \mathbf{t}extbf{e}nd{ytableau} \quad\mapsto\quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} 12 & 3'3 & 34 & 5' \mathbf{t}extbf{e}nd{ytableau} \] for example. It seems difficult to generalize this idea to larger shapes. Even in the next simplest case $\mu=(3,1)$ we do not know of a straightforward way to describe a weight-preserving bijection of the form in Corollary~\mathbf{r}ef{bijection}. In \mathbf{t}extbf{c}ite[\S9.2]{IkedaNaruse}, Ikeda and Naruse derive another set of combinatorial formulas for $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ as generating functions for \mathbf{t}extbf{e}mph{excited Young diagrams}. One could also try to find a bijective proof of Theorem~\mathbf{r}ef{to-prove} via these expressions. \mathbf{s}ection{Skew analogues} \label{skew-sect} Let $\mu \mathbf{s}ubseteq \lambda$ be strict partitions. A \mathbf{t}extbf{e}mph{semistandard (skew) shifted tableau} of shape $\lambda/\mu$ is a filling of $\mathsf{SD}_{\lambda/\mu} := \mathsf{SD}_\lambda\mathbf{s}etminus\mathsf{SD}_\mu$ by positive half-integers such that rows and columns are weakly increasing, with no primed entries repeated in a row and no unprimed entries repeated in a column. Let ${\rm ShYT}_Q(\lambda/\mu)$ be the set of all such tableaux and let ${\rm ShYT}_P(\lambda/\mu)$ be the subset in which primed entries are disallowed from diagonal positions. We define both sets to be empty if $\mu \not\mathbf{s}ubseteq\lambda$. The \mathbf{t}extbf{e}mph{skew Schur $P$- and $Q$-functions} are \mathbf{t}extbf{b}egin{equation} P_{\lambda/\mu} := \mathbf{s}um_{T\in{\rm ShYT}_P(\lambda/\mu)} x^T\quad\mathbf{t}ext{and}\quad Q_{\lambda/\mu} := \mathbf{s}um_{T\in{\rm ShYT}_Q(\lambda/\mu)} x^T \mathbf{t}extbf{e}nd{equation} where as usual $x^T := \prod_{i\mathfrak{g}eq 1} x_i^{m_i}$ with $m_i$ denoting the number of entries of $T$ equal to $i$ or $i'$. To motivate these symmetric functions, we need some additional notation. If $f \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$, then write $f(x,y)$ for the power series $f(x_1,y_1,x_2,y_2,\mathbf{t}extbf{d}ots)$ where $x_1,x_2,\mathbf{t}extbf{d}ots$ and $y_1,y_2,\mathbf{t}extbf{d}ots$ are separate sets of commuting variables; we also set $f(x) := f(x_1,x_2,\mathbf{t}extbf{d}ots)=f$ and $f(y) := f(y_1,y_2,\mathbf{t}extbf{d}ots)$. If $f$ is symmetric then specializing $f(x,y)$ to finitely many variables gives \[ f(x_1,y_1,x_2,y_2,\mathbf{t}extbf{d}ots,x_n,y_n) = f(x_1,x_2,\mathbf{t}extbf{d}ots,x_n,y_1,y_2,\mathbf{t}extbf{d}ots,y_n). \] It follows that we can write \mathbf{t}extbf{b}egin{equation}\label{skew-eq1} P_\lambda(x,y) = \mathbf{s}um_{\nu } P_\nu(x) P_{\lambda/\nu}(y) \quad\mathbf{t}ext{and}\quad Q_\lambda(x,y) = \mathbf{s}um_{\nu } Q_\nu(x) Q_{\lambda/\nu}(y) \mathbf{t}extbf{e}nd{equation} where in both sums $\nu$ ranges over all strict partitions \mathbf{t}extbf{c}ite[Eq. (8.2)]{Stembridge1989}. Define the set ${\rm SetShYT}_Q(\lambda/\mu)$ of \mathbf{t}extbf{e}mph{semistandard set-valued (skew) shifted tableaux} of shape $\lambda/\mu$ in the same way as ${\rm SetShYT}_Q(\lambda)$, just replacing references to ``fillings of $\mathsf{SD}_\lambda$'' by ``fillings of $\mathsf{SD}_{\lambda/\mu}$.'' Let ${\rm SetShYT}_P(\lambda/\mu)$ be the subset of tableaux in ${\rm SetShYT}_Q(\lambda/\mu)$ with no primed numbers in any diagonal boxes. The \mathbf{t}extbf{e}mph{skew $K$-theoretic Schur $P$- and $Q$-functions} are then \mathbf{t}extbf{b}egin{equation}\label{skew-GP-GQ-def} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_{\lambda/\mu} &:= \mathbf{s}um_{T\in{\rm SetShYT}_P(\lambda/\mu)} \mathbf{t}extbf{b}egin{equation}ta^{|T| - |\lambda/\mu|} x^T, \\ G\hspace{-0.2mm}Q_{\lambda/\mu} &:= \mathbf{s}um_{T\in{\rm SetShYT}_Q(\lambda/\mu)} \mathbf{t}extbf{b}egin{equation}ta^{|T| - |\lambda/\mu|} x^T, \mathbf{t}extbf{e}nd{aligned} \mathbf{t}extbf{e}nd{equation} where $x^T$ is defined in the same way as for elements of ${\rm SetShYT}_Q(\lambda)$. When $\mu \not\mathbf{s}ubseteq\lambda$ we consider both ${\rm SetShYT}_P(\lambda/\mu)$ and ${\rm SetShYT}_Q(\lambda/\mu)$ to be empty so that $G\hspace{-0.2mm}P_{\lambda/\mu}=G\hspace{-0.2mm}Q_{\lambda/\mu}=0$. These generalizations of $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ were first defined in \mathbf{t}extbf{c}ite[\S4.6]{LM2021} in the context of \mathbf{t}extbf{e}mph{enriched set-valued $P$-partitions}. The $K$-theoretic version of \mathbf{t}extbf{e}qref{skew-eq1} involves a variant of these power series. The \mathbf{t}extbf{e}mph{removable boxes} of $\mu$ are the positions $(i,j) \in \mathsf{SD}_\mu$ such that $\mathsf{SD}_\mu \mathbf{s}etminus\{(i,j)\}$ is the shifted diagram of another strict partition. Let $Rem(\mu)$ be the set of removable boxes of the strict partition $\mu$. For strict partitions $\mu\mathbf{s}ubseteq \lambda$ define \mathbf{t}extbf{b}egin{equation} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \mu} &:= \mathbf{s}um_{\mathbf{s}ubstack{\nu\mathbf{s}ubseteq \mu,\hspace{0.5mm} \mathsf{SD}_{\mu/\nu} \mathbf{s}ubseteq Rem(\mu)} } \mathbf{t}extbf{b}egin{equation}ta^{|\mu/\nu|} G\hspace{-0.2mm}P_{\lambda/\nu}, \\ G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu} &:= \mathbf{s}um_{\mathbf{s}ubstack{\nu\mathbf{s}ubseteq \mu,\hspace{0.5mm} \mathsf{SD}_{\mu/\nu} \mathbf{s}ubseteq Rem(\mu)} } \mathbf{t}extbf{b}egin{equation}ta^{|\mu/\nu|} G\hspace{-0.2mm}Q_{\lambda/\nu}, \mathbf{t}extbf{e}nd{aligned} \mathbf{t}extbf{e}nd{equation} where in both sums $\nu$ must be a strict partition. For strict partitions $\mu \not\mathbf{s}ubseteq\lambda$ we set $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu}=G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}=0$. Then the definitions of $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ as set-valued shifted tableau generating functions imply that \mathbf{t}extbf{b}egin{equation}\label{skew-eq2} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_\lambda(x,y) &= \mathbf{s}um_{\nu } G\hspace{-0.2mm}P_\nu(x) G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\nu}(y), \\ G\hspace{-0.2mm}Q_\lambda(x,y) &= \mathbf{s}um_{\nu } G\hspace{-0.2mm}Q_\nu(x) G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\nu}(y), \mathbf{t}extbf{e}nd{aligned} \mathbf{t}extbf{e}nd{equation} where both sums are over all strict partitions $\nu$. Since the $G\hspace{-0.2mm}P_\nu$'s and $G\hspace{-0.2mm}Q_\nu$'s are linearly independent and symmetric, these identities imply that $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \mu}$ and $G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu}$ are symmetric. Hence $G\hspace{-0.2mm}P_{\lambda/ \mu}$ and $G\hspace{-0.2mm}Q_{\lambda/ \mu}$ are also symmetric as they can be written in terms of $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \mu}$ and $G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu}$ via inclusion-exclusion \mathbf{t}extbf{c}ite[Cor. 5.7]{LM2021}. More strongly, $ G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu}$ (respectively, $ G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \mu}$) is a possibly infinite $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $G\hspace{-0.2mm}Q$-functions (respectively, $G\hspace{-0.2mm}P$-functions) by \mathbf{t}extbf{c}ite[Cor 5.13]{LM2021}. Since both functions, when nonzero, are homogeneous of degree $|\lambda| - |\mu|$ if we set $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta) =-1$ and $ \mathbf{t}extbf{d}eg(x_i) = 1$, it follows that \mathbf{t}extbf{b}egin{equation} \label{skew-exp-eq} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu} &= \mathbf{s}um_\nu \widehat a_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\mu|+|\nu|-|\lambda|} G\hspace{-0.2mm}Q_\nu, \\ G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \mu} &= \mathbf{s}um_\nu \widehat b_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\mu|+|\nu|-|\lambda|}G\hspace{-0.2mm}P_\nu, \mathbf{t}extbf{e}nd{aligned} \mathbf{t}extbf{e}nd{equation} for unique integers $ \widehat a_{\mu\nu}^\lambda, \widehat b_{\mu\nu}^\lambda \in \mathbb{Z}$ which must be zero when $|\mu|+|\nu| < |\lambda|$. It is expected that these coefficients are all nonnegative, and nonzero for only finitely many strict partitions $\nu$; see \mathbf{t}extbf{c}ite[Conj. 5.14]{LM2021}. Given strict partitions $\mu \mathbf{s}ubseteq \lambda$, we consider the statistic \[\mathsf{overlap}(\lambda/\mu) := |\{ (i,j) \in \mathsf{SD}_{\lambda/\mu} : (i-1,j) \in \mathsf{SD}_{\lambda/\mu}\}|.\] This quantity is closely related to $\mathbf{t}extbf{c}ol(\lambda/\mu) := | \{ j : (i,j) \in \mathsf{SD}_{\lambda/\mu}\}|$. \mathbf{t}extbf{b}egin{equation}gin{lemma}\label{overlap_cancel} Let $\mu \mathbf{s}ubseteq \lambda$ be strict partitions with $\mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$. Then \[ \mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mathbf{t}extbf{e}ta)} 2^{\mathsf{overlap}(\mathbf{t}extbf{e}ta/\mu)} = \mathbf{s}um_{\mathfrak{g}amma} (-1)^{\mathbf{t}extbf{c}ol(\mathfrak{g}amma/\mu)} 2^{\mathsf{overlap}(\lambda/\mathfrak{g}amma)} = \mathbf{t}extbf{b}egin{equation}gin{cases}1 &\mathbf{t}ext{if $\mu=\lambda$} \\ 0&\mathbf{t}ext{if $\mu \neq \lambda$}\mathbf{t}extbf{e}nd{cases} \] where the first summation is over all strict partitions $\mathbf{t}extbf{e}ta$ with $\mu \mathbf{s}ubseteq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \lambda$ such that $\mathsf{SD}_{\lambda/\mathbf{t}extbf{e}ta}$ is a vertical strip, and the second summation is over all strict partitions $\mathfrak{g}amma$ with $\mu \mathbf{s}ubseteq \mathfrak{g}amma \mathbf{s}ubseteq \lambda$ such that $\mathsf{SD}_{\mathfrak{g}amma/\mu}$ is a vertical strip. \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} The desired identity is clear if $\mu =\lambda$. Assume $\mu\neq \lambda$ so that $\mathsf{SD}_{\lambda/\mu}$ is nonempty. We start by showing that the first sum is zero. Suppose the rightmost box of $\mathsf{SD}_{\lambda/\mu}$ is in column $n$ and $\mathsf{SD}_{\lambda/\mu}$ contains $k>0$ boxes in this column. Choose a strict partition $\mathbf{t}extbf{e}ta$ with $\mu \mathbf{s}ubseteq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \lambda$ such that $\mathsf{SD}_{\lambda/\mathbf{t}extbf{e}ta}$ is a vertical strip. Let $L := \{ (i,j) \in \mathsf{SD}_{\mathbf{t}extbf{e}ta/\mu} : j<n\}$ and $R := \{ (i,j) \in \mathsf{SD}_{\mathbf{t}extbf{e}ta/\mu} : j=n\}$ so that $\mathsf{SD}_{\mathbf{t}extbf{e}ta/\mu} = L \mathbf{s}qcup R $. Because $\mathsf{SD}_{\lambda/\mathbf{t}extbf{e}ta}$ is a vertical strip, there are only $k+1$ possibilities for $R$, which must be a set of adjacent positions at the bottom of column $n$ in $\mathsf{SD}_{\lambda/\mu}$. Moreover, when $L$ is fixed and $\mathbf{t}extbf{e}ta$ varies, each of these possibilities for $R$ occurs exactly once. Now observe that if $r := |R|$ then \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} \mathsf{overlap}(\mathbf{t}extbf{e}ta/\mu) &= | \{ (i,j) \in L : (i-1,j)\in L\} | +| \{ (i,j) \in R : (i-1,j)\in R\} | \\& = | \{ (i,j) \in L : (i-1,j)\in L\} | +\max \{0,r-1\} \mathbf{t}extbf{e}nd{aligned} \] and also \[ \mathbf{t}extbf{c}ol(\lambda/\mathbf{t}extbf{e}ta) = |\{ j : (i,j) \in \mathsf{SD}_{\lambda/\mathbf{t}extbf{e}ta} \mathbf{s}qcup R \}| -\mathbf{t}extbf{b}egin{equation}gin{cases} 1 &\mathbf{t}ext{if }r=k \\ 0&\mathbf{t}ext{if }0\leq r < k. \mathbf{t}extbf{e}nd{cases} \] Since $\mathsf{SD}_{\lambda/\mathbf{t}extbf{e}ta} \mathbf{s}qcup R= \mathsf{SD}_{\lambda/\mu} - L $, we can rewrite the preceding identity as \[ \mathbf{t}extbf{c}ol(\lambda/\mathbf{t}extbf{e}ta) = |\{ j : (i,j) \in \mathsf{SD}_{\lambda/\mu} - L \}| + \min \{ 0, k-r-1\}.\] By substituting these formulas and factoring out the terms depending on $L$, we deduce that the sum $\mathbf{s}um_{\mathbf{t}extbf{e}ta } (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mathbf{t}extbf{e}ta)} 2^{\mathsf{overlap}(\mathbf{t}extbf{e}ta/\mu)} $ is a multiple of \mathbf{t}extbf{b}egin{equation}\label{zero-sum-eq} \mathbf{s}um_{r=0}^k (-1)^{\min\{0, k-r-1\}} 2^{\max\{0,r-1\}} = 2^{0} + 2^{0} + 2^{1} + \mathbf{t}extbf{d}ots + 2^{k-2} - 2^{k-1} = 0\mathbf{t}extbf{e}nd{equation} and so is zero itself. A similar argument shows that the other sum in the lemma is zero. Suppose now that the \mathbf{t}extbf{e}mph{leftmost} box of $\mathsf{SD}_{\lambda/\mu}$ is in column $n$ and $\mathsf{SD}_{\lambda/\mu}$ contains $k>0$ boxes in this column. Choose a strict partition $\mathfrak{g}amma$ with $\mu \mathbf{s}ubseteq \mathfrak{g}amma \mathbf{s}ubseteq \lambda$ such that $\mathsf{SD}_{\mathfrak{g}amma/\mu}$ is a vertical strip. Let $L := \{ (i,j) \in \mathsf{SD}_{\lambda/\mathfrak{g}amma} : j=n\}$ and $R := \{ (i,j) \in \mathsf{SD}_{\lambda/\mathfrak{g}amma} : j>n\}$ so that $\mathsf{SD}_{\lambda/\mathfrak{g}amma} = L \mathbf{s}qcup R $. Because $\mathsf{SD}_{\mathfrak{g}amma/\mu}$ is a vertical strip, there are now only $k+1$ possibilities for $L$, which must be a set of adjacent positions at the top of column $n$ in $\mathsf{SD}_{\lambda/\mu}$, and when $R$ is fixed and $\mathfrak{g}amma$ varies, each of these possibilities occurs exactly once. Finally, if $\mathbf{t}extbf{e}ll := |L|$ then we have \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} \mathsf{overlap}(\lambda /\mathfrak{g}amma) &= | \{ (i,j) \in R : (i-1,j)\in R\} | +| \{ (i,j) \in L : (i-1,j)\in L\} | \\& = | \{ (i,j) \in R : (i-1,j)\in R\} | +\max \{0,\mathbf{t}extbf{e}ll-1\} \mathbf{t}extbf{e}nd{aligned} \] and also \[ \mathbf{t}extbf{c}ol(\mathfrak{g}amma/\mu) = |\{ j : (i,j) \in \mathsf{SD}_{\mathfrak{g}amma/\mu} \mathbf{s}qcup L\}| -\mathbf{t}extbf{b}egin{equation}gin{cases} 1 &\mathbf{t}ext{if }\mathbf{t}extbf{e}ll=k \\ 0&\mathbf{t}ext{if }0\leq \mathbf{t}extbf{e}ll < k.\mathbf{t}extbf{e}nd{cases} \] Since $\mathsf{SD}_{\mathfrak{g}amma/\mu} \mathbf{s}qcup L= \mathsf{SD}_{\lambda/\mu} - R $, we can rewrite the preceding identity as \[ \mathbf{t}extbf{c}ol(\mathfrak{g}amma/\mu) = |\{ j : (i,j) \in \mathsf{SD}_{\lambda/\mu} - R\}| + \min \{ 0, k-\mathbf{t}extbf{e}ll-1\}.\] By substituting these formulas and factoring out the terms depending on $R$, we deduce that $\mathbf{s}um_{\mathfrak{g}amma} (-1)^{\mathbf{t}extbf{c}ol(\mathfrak{g}amma/\mu)} 2^{\mathsf{overlap}(\lambda/\mathfrak{g}amma)}$ is also a multiple of \mathbf{t}extbf{e}qref{zero-sum-eq}, as needed. \mathbf{t}extbf{e}nd{proof} \mathbf{t}extbf{b}egin{equation}gin{remark} For strict partitions $\lambda$ and $\mu$, define $M_{\lambda\mu}:=0$ when $\mu\not\mathbf{s}ubseteq \lambda$ or $\mathbf{t}extbf{e}ll(\mu)\neq \mathbf{t}extbf{e}ll(\lambda)$, and otherwise set $M_{\lambda\mu} := 2^{\mathsf{overlap}(\lambda/\mu)}$. Also define $N_{\lambda\mu} :=0$ when $\mu\not\mathbf{s}ubseteq \lambda$ or $\mathbf{t}extbf{e}ll(\mu) \neq \mathbf{t}extbf{e}ll(\lambda)$ or $\mathsf{SD}_{\lambda/\mu}$ is not a vertical strip, and otherwise set $N_{\lambda\mu} := (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)}$. Lemma~\mathbf{r}ef{overlap_cancel} asserts that the matrices $\left[M_{\lambda\mu}\mathbf{r}ight]$ and $\left[N_{\lambda\mu}\mathbf{r}ight]$ are inverses. \mathbf{t}extbf{e}nd{remark} We can now derive a skew generalization of Theorem~\mathbf{r}ef{to-prove}. \mathbf{t}extbf{b}egin{equation}gin{theorem}\label{skew-to-prove} Suppose $\nu\mathbf{s}ubseteq \mu$ are strict partitions. Then \[ G\hspace{-0.2mm}Q_{\mu\mathbf{s}s \nu} = \mathbf{s}um_{(\kappa,\lambda)} 2^{\mathbf{t}extbf{e}ll(\mu)-\mathbf{t}extbf{e}ll(\nu)+\mathsf{overlap}(\nu/\kappa)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\nu/\kappa| + |\lambda/\mu|} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s\kappa} \] where the sum is over all pairs of strict partitions $(\kappa,\lambda)$ with $\kappa\mathbf{s}ubseteq \nu \mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu) \leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. \mathbf{t}extbf{e}nd{theorem} \mathbf{t}extbf{b}egin{equation}gin{proof} Expanding $G\hspace{-0.2mm}Q_\mu(x,y)$ using Theorem~\mathbf{r}ef{to-prove} and then applying \mathbf{t}extbf{e}qref{skew-eq2} gives \[ G\hspace{-0.2mm}Q_\mu(x,y) = \mathbf{s}um_\lambda \mathbf{s}um_{\nu} 2^{\mathbf{t}extbf{e}ll(\mu)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu| } G\hspace{-0.2mm}P_{\nu}(x) G\hspace{-0.2mm}P_{\lambda\mathbf{s}s \nu}(y) \] where the first sum is over strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip and the second sum is over all strict partitions $\nu \mathbf{s}ubseteq \lambda$. Alternatively, using Theorem~\mathbf{r}ef{to-prove} to expand the right side of \mathbf{t}extbf{e}qref{skew-eq2} gives \[ G\hspace{-0.2mm}Q_\mu(x,y) = \mathbf{s}um_{\mathbf{t}extbf{e}ta } \mathbf{s}um_{\nu}2^{\mathbf{t}extbf{e}ll(\nu)} (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\nu/\mathbf{t}extbf{e}ta| } G\hspace{-0.2mm}P_{\nu}(x) G\hspace{-0.2mm}Q_{\mu \mathbf{s}s \mathbf{t}extbf{e}ta}(y) \] where the first sum is over all strict partitions $\mathbf{t}extbf{e}ta \mathbf{s}ubseteq \mu$ and the second sum over strict partitions $\nu\mathbf{s}upseteq \mathbf{t}extbf{e}ta$ with $\mathbf{t}extbf{e}ll(\nu) = \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)$ such that $\mathsf{SD}_{\nu/\mathbf{t}extbf{e}ta}$ is a vertical strip.\footnote{ When using Theorem~\mathbf{r}ef{to-prove} to expand $G\hspace{-0.2mm}Q_\mathbf{t}extbf{e}ta(x)$ in $G\hspace{-0.2mm}Q_\mu(x,y) = \mathbf{s}um_\mathbf{t}extbf{e}ta G\hspace{-0.2mm}Q_\mathbf{t}extbf{e}ta(x) G\hspace{-0.2mm}Q_{\mu\mathbf{s}s\mathbf{t}extbf{e}ta}(y)$ one expects to see the factor $2^{\mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)}$, but this can be changed to $2^{\mathbf{t}extbf{e}ll(\nu)}$ since $\mathbf{t}extbf{e}ll(\nu) = \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)$.} Equating the coefficients of $G\hspace{-0.2mm}P_\nu$ in these expressions gives \[ \mathbf{s}um_{\mathbf{t}extbf{e}ta} 2^{\mathbf{t}extbf{e}ll(\nu)} (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\nu/\mathbf{t}extbf{e}ta|} G\hspace{-0.2mm}Q_{\mu\mathbf{s}s \mathbf{t}extbf{e}ta} = \mathbf{s}um_{\lambda} 2^{\mathbf{t}extbf{e}ll(\mu)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s\nu} \] where the sums are over certain strict partitions $\mathbf{t}extbf{e}ta$ and $\lambda$; to be precise, assuming $\nu \mathbf{s}ubseteq \mu$, the preceding equation is equivalent to \[ G\hspace{-0.2mm}Q_{\mu\mathbf{s}s\nu} = \mathbf{s}um_{\lambda} 2^{\mathbf{t}extbf{e}ll(\mu)-\mathbf{t}extbf{e}ll(\nu)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s\nu} - \mathbf{s}um_{\mathbf{t}extbf{e}ta\mathbf{s}ubsetneq\nu} (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\nu/\mathbf{t}extbf{e}ta|} G\hspace{-0.2mm}Q_{\mu\mathbf{s}s \mathbf{t}extbf{e}ta} \] where the first sum is over strict partitions $\lambda \mathbf{s}upseteq \mu $ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip and the second sum is over strict partitions $\mathbf{t}extbf{e}ta \mathbf{s}ubsetneq \nu$ with $\mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta) = \mathbf{t}extbf{e}ll(\nu)$ such that $\mathsf{SD}_{\nu/\mathbf{t}extbf{e}ta}$ is a vertical strip. When $\nu = (m,m-1,\mathbf{t}extbf{d}ots,2,1)$ for some $m \in \mathbb{N}$, the sum over $\mathbf{t}extbf{e}ta$ has zero terms and the preceding formula reduces to the desired identity. Otherwise, we may assume by induction that the desired formula holds for each $G\hspace{-0.2mm}Q_{\mu\mathbf{s}s \mathbf{t}extbf{e}ta}$. Substituting these formulas into the displayed equation gives an expression for $G\hspace{-0.2mm}Q_{\mu\mathbf{s}s\nu}$ as a linear combination of $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\kappa}$'s where $\kappa$ and $\lambda$ range over all strict partitions with $\kappa\mathbf{s}ubseteq \nu \mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu) \leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. The coefficient of $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\nu}$ in this expansion is $2^{\mathbf{t}extbf{e}ll(\mu)-\mathbf{t}extbf{e}ll(\nu)}(-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)}(\frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|}$ as desired. The coefficient of $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\kappa}$ when $\kappa \mathbf{s}ubsetneq \nu\mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu) \leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ and $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip is \[- \mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)}(\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\nu/\mathbf{t}extbf{e}ta|}\left(2^{\mathbf{t}extbf{e}ll(\mu)-\mathbf{t}extbf{e}ll\left(\mathbf{t}extbf{e}ta\mathbf{r}ight)+\mathsf{overlap}(\mathbf{t}extbf{e}ta/\kappa)}(-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)}(\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\mathbf{t}extbf{e}ta/\kappa|+|\lambda/\mu|}\mathbf{r}ight) \] where the sum is over all strict partitions $\mathbf{t}extbf{e}ta$ with $\kappa \mathbf{s}ubseteq \mathbf{t}extbf{e}ta \mathbf{s}ubsetneq \nu$ such that $\mathsf{SD}_{\nu/\mathbf{t}extbf{e}ta}$ is a vertical strip. Since then $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta) = \mathbf{t}extbf{e}ll(\nu)$, we can rewrite this as \[2^{\mathbf{t}extbf{e}ll(\mu)-\mathbf{t}extbf{e}ll\left(\nu\mathbf{r}ight)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|+|\nu/\kappa|} \mathbf{s}um_{\mathbf{t}extbf{e}ta } (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)+1} 2^{\mathsf{overlap}(\mathbf{t}extbf{e}ta/\kappa)},\] so it suffices to show that $\mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\nu/\mathbf{t}extbf{e}ta)+1} 2^{\mathsf{overlap}(\mathbf{t}extbf{e}ta/\kappa)} = 2^{\mathsf{overlap}(\nu/\kappa)}$ where the sum is again over $\mathbf{t}extbf{e}ta$ with $\kappa \mathbf{s}ubseteq \mathbf{t}extbf{e}ta \mathbf{s}ubsetneq \nu$ such that $\mathsf{SD}_{\nu/\mathbf{t}extbf{e}ta}$ is a vertical strip. After moving all terms to one side, this identity is half of Lemma~\mathbf{r}ef{overlap_cancel}. \mathbf{t}extbf{e}nd{proof} We mention one corollary, which was noted in passing above. Let $\operatorname{del}ta_m := (m,m-1,\mathbf{t}extbf{d}ots,2,1)$ for $m \in \mathbb{N}$. The following recovers Theorem~\mathbf{r}ef{to-prove} when $m=0$. \mathbf{t}extbf{b}egin{equation}gin{corollary} Suppose $\mu$ is a strict partition with $\operatorname{del}ta_m \mathbf{s}ubseteq \mu$. Then \[ G\hspace{-0.2mm}Q_{\mu\mathbf{s}s \operatorname{del}ta_m} = 2^{\mathbf{t}extbf{e}ll(\mu)-m} \mathbf{s}um_{\lambda} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu|} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s\operatorname{del}ta_m} \] where the sum is over strict partitions $\lambda\mathbf{s}upseteq \mu$ with $\mathbf{t}extbf{e}ll(\lambda) = \mathbf{t}extbf{e}ll(\mu)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} If $\nu =\operatorname{del}ta_m$ in Theorem~\mathbf{r}ef{skew-to-prove}, then the only strict partition $\kappa \mathbf{s}ubseteq \nu$ with $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu)$ is $\kappa=\nu=\operatorname{del}ta_m$, which has $\mathsf{overlap}(\nu/\kappa) = |\nu/\kappa| = 0$. \mathbf{t}extbf{e}nd{proof} There is some interest in determining when there are coincidences $P_{\lambda/\mu} = P_{\nu/\kappa}$ and $Q_{\lambda/\mu} = Q_{\nu/\kappa}$ among the skew Schur $P$- and $Q$-functions \mathbf{t}extbf{c}ite{BW09,GillespieSalois,Salmasian}. This phenomenon is less well-understood than for skew Schur functions. One could consider the same problem for the skew $G\hspace{-0.2mm}P$- and $G\hspace{-0.2mm}Q$-functions. In particular, any equality $G\hspace{-0.2mm}P_{\lambda/\mu} = G\hspace{-0.2mm}P_{\nu/\kappa}$ would imply that $P_{\lambda/\mu} = P_{\nu/\kappa}$ (and likewise for the $Q$-functions), but it is not clear if the converse always holds. With these questions in mind, we explain one nontrivial equality between Schur $Q$-functions that generalizes to the $K$-theoretic setting. Define the \mathbf{t}extbf{e}mph{flip} of a skew shape $\lambda/\mu$ to be the skew shape $\phi(\lambda/\mu)$ whose shifted diagram $\mathsf{SD}_{\phi(\lambda/\mu)}$ is formed by reflecting $\mathsf{SD}_{\lambda/\mu}$ across a line perpendicular to the main diagonal, so that in French notation the bottom row becomes the rightmost column. We refer to this operation as ``flipping the diagram $\mathsf{SD}_{\lambda/\mu}$'': \[ \mathbf{t}extbf{y}tableausetup{boxsize=0.5cm,aligntableaux=center} \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & \\ \none & & \\ \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & & \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \lambda/\mu = (4,2,1)/(2) \mathbf{t}extbf{e}nd{array} \quad\mathbf{t}ext{flips to}\quad \mathbf{t}extbf{b}egin{equation}gin{array}{c} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none & \none & \\ \none & & & \\ \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \mathbf{t}extbf{e}nd{ytableau} \\[-7pt]\\ \phi(\lambda/\mu) = (4,3,1)/(3). \mathbf{t}extbf{e}nd{array} \] The following reduces to \mathbf{t}extbf{c}ite[Prop. IV.13]{DeWitt} when $\mathbf{t}extbf{b}egin{equation}ta=0$: \mathbf{t}extbf{b}egin{equation}gin{proposition}\label{GQ_equal} Let $\mu \mathbf{s}ubseteq \lambda$ be strict partitions, then \[G\hspace{-0.2mm}P_{\phi(\lambda/\mu)} = G\hspace{-0.2mm}P_{\lambda/\mu} \quad\mathbf{t}ext{and}\quad G\hspace{-0.2mm}Q_{\phi(\lambda/\mu)} = G\hspace{-0.2mm}Q_{\lambda/\mu}. \] \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} We first prove the $G\hspace{-0.2mm}Q$-identity. Suppose $T \in {\rm SetShYT}_Q(\lambda/\mu)$. Write $\min(T)$ and $\max(T)$ for the minimal and the maximal numbers appearing in any entry of $T$. Let $n:=\lceil\min(T)\mathbf{r}ceil$ be whichever of $\min(T)$ or $\min(T)+\frac{1}{2}$ is an integer, and define $N:=\lceil\max(T)\mathbf{r}ceil$ similarly. Now let $\phi_Q(T)$ be the set-valued shifted tableau of shape $\phi(\lambda/\mu)$ formed by flipping $T$ and then replacing each number $a$ in each set-valued entry by $n+N-\frac{1}{2}-a$. The rows and columns of $\phi_Q(T)$ are weakly increasing since the rows and columns of $T$ are weakly increasing. As exactly one of $a$ or $n+N-\frac{1}{2}-a$ is primed, the tableau $\phi_Q(T)$ does not have any unprimed numbers repeated in a column or primed numbers repeated in a row. We conclude that $\phi_Q$ defines a map ${\rm SetShYT}_Q(\lambda/\mu) \mathbf{t}o {\rm SetShYT}_Q(\phi(\lambda/\mu))$, which is clearly invertible. Notice that the weight of $\phi_Q(T)$ is $x^{\phi_Q(T)} = \mathbf{s}igma(x^T)$, where $\mathbf{s}igma$ is the permutation with $\mathbf{s}igma(a) = n+N-a$ for all $n \leq a \leq N$. Since the values of $n$ and $N$ are determined by the monomial $x^T$, the symmetry of $G\hspace{-0.2mm}Q_{\lambda/\mu}$ implies that \mathbf{t}extbf{b}egin{equation}\label{calc-eq}G\hspace{-0.2mm}Q_{\lambda/\mu} = \mathbf{s}um_{T\in{\rm SetShYT}_Q(\lambda/\mu)} \mathbf{t}extbf{b}egin{equation}ta^{|\phi_Q(T)|-|\lambda/\mu|} x^{\phi_Q(T)}= G\hspace{-0.2mm}Q_{\phi(\lambda/\mu)}.\mathbf{t}extbf{e}nd{equation} The proof of the $G\hspace{-0.2mm}P$-identity is similar, except now for $T \in {\rm SetShYT}_P(\lambda/\mu)$ we define $\phi_P(T)$ from $\phi_Q(T)$ by adding $\frac{1}{2}$ to all numbers in diagonal positions. Since all numbers in diagonal entries of $\phi_Q(T)$ are primed when $T \in {\rm SetShYT}_P(\lambda/\mu)$, we have $x^{\phi_P(T)} = x^{\phi_Q(T)}$ and it follows that $\phi_P$ is a bijection ${\rm SetShYT}_P(\lambda/\mu) \mathbf{t}o {\rm SetShYT}_P(\phi(\lambda/\mu))$. We can therefore replace every letter ``$Q$'' in \mathbf{t}extbf{e}qref{calc-eq} by ``$P$'' to deduce that $ G\hspace{-0.2mm}P_{\lambda/\mu} =G\hspace{-0.2mm}P_{\phi(\lambda/\mu)}$. \mathbf{t}extbf{e}nd{proof} It is not clear if there is a meaningful way to extend the preceding result to $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu}$ and $G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}$. If one defines $\phi(\lambda\mathbf{s}s\mu) := \mathbf{t}ilde \lambda \mathbf{s}s \mathbf{t}ilde \mu$ where $\phi(\lambda/\mu)= \mathbf{t}ilde\lambda/\mathbf{t}ilde \mu$, for example, then it may hold that $G\hspace{-0.2mm}P_{\phi(\lambda\mathbf{s}s\mu)} \neq G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu} $ and $G\hspace{-0.2mm}Q_{\phi(\lambda\mathbf{s}s\mu)} \neq G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}$. \mathbf{s}ection{Dual functions}\label{dual-sect} Let $\overline{x} := \frac{-x}{1+\mathbf{t}extbf{b}egin{equation}ta x}$ so that $x \oplus \overline x = 0$. Nakagawa and Naruse \mathbf{t}extbf{c}ite{NakagawaNaruse} define the \mathbf{t}extbf{e}mph{dual $K$-theoretic Schur $P$- and $Q$-functions} $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ to be the unique elements of $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ indexed by strict partitions $\lambda$ such that \mathbf{t}extbf{b}egin{equation}\label{cauchy-eq} \mathbf{s}um_\lambda G\hspace{-0.2mm}Q_\lambda(x) \mathfrak{g}p_\lambda(y) = \mathbf{s}um_\lambda G\hspace{-0.2mm}P_\lambda(x) \mathfrak{g}q_\lambda(y) = \prod_{i,j \mathfrak{g}eq 1} \frac{ 1 - \overline{x_i} y_j}{1-x_iy_j}.\mathbf{t}extbf{e}nd{equation} Both sums in this \mathbf{t}extbf{e}mph{Cauchy identity} are over all strict partitions $\lambda$. The power series $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ are a special case of the \mathbf{t}extbf{e}mph{dual universal factorial Schur $P$- and $Q$-functions} in \mathbf{t}extbf{c}ite[Def. 3.2]{NakagawaNaruse}. One reason these specialization are interesting (compared to the more general ``universal'' functions) is because they have conjectural formulas as generating functions for \mathbf{t}extbf{e}mph{shifted reverse plane partitions} \mathbf{t}extbf{c}ite[Conj 5.1]{NakagawaNaruse}. We will discuss this idea in Section~\mathbf{r}ef{last-sect}. Both $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ are symmetric in the $x_i$ variables and of degree $|\lambda|$ if we set $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta) =0$ and $\mathbf{t}extbf{d}eg(x_i) = 1$ \mathbf{t}extbf{c}ite[\S3.2]{NakagawaNaruse}. The sets $\{ \mathfrak{g}p_\lambda :\lambda\mathbf{t}ext{ is a strict partition} \}$ and $\{\mathfrak{g}q_\lambda :\lambda\mathbf{t}ext{ is a strict partition}\}$ are $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-bases for subrings of $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ by \mathbf{t}extbf{c}ite[Thm. 3.1]{NakagawaNaruse}, and it holds that $ P_{\lambda} = \mathfrak{g}p_{\lambda}|_{\mathbf{t}extbf{b}egin{equation}ta=0} $ and $ Q_{\lambda} =\mathfrak{g}q_{\lambda}|_{\mathbf{t}extbf{b}egin{equation}ta=0} $ \mathbf{t}extbf{c}ite[\S3.2]{NakagawaNaruse}. \mathbf{t}extbf{b}egin{equation}gin{proposition}\label{recover-prop} We recover $\mathfrak{g}p_\lambda$ from $\mathfrak{g}p_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=1}$ (respectively, $\mathfrak{g}q_\lambda$ from $\mathfrak{g}q_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=1}$) by substituting $x_i \mapsto \mathbf{t}extbf{b}egin{equation}ta^{-1} x_i$ for all $i$ and then multiplying by $\mathbf{t}extbf{b}egin{equation}ta^{|\lambda|}$. As such, if we set $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta)=\mathbf{t}extbf{d}eg(x_i)=1$ then $ \mathfrak{g}p_{\lambda}$ and $\mathfrak{g}q_{\lambda}$ are homogeneous of degree $|\lambda|$. \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} We recover the original form of $\prod_{i,j \mathfrak{g}eq 1} \mathbf{t}frac{ 1 - \overline{x_i} y_j}{1-x_iy_j}$ after setting $\mathbf{t}extbf{b}egin{equation}ta=1$ by substituting $x_i \mapsto \mathbf{t}extbf{b}egin{equation}ta x_i$ and $y_j \mapsto \mathbf{t}extbf{b}egin{equation}ta^{-1} y_j$ for all $i$ and $j$. It follows that if we define $\mathfrak{g}p^{(1)}_\lambda:=\mathfrak{g}p_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=1}$ and $G\hspace{-0.2mm}Q^{(1)}_\lambda := G\hspace{-0.2mm}Q_\lambda|_{\mathbf{t}extbf{b}egin{equation}ta=1}$ then \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} \mathbf{s}um_\lambda G\hspace{-0.2mm}Q^{(1)}_\lambda(\mathbf{t}extbf{b}egin{equation}ta x_1, \mathbf{t}extbf{b}egin{equation}ta x_2,\mathbf{t}extbf{d}ots) \mathfrak{g}p^{(1)}_\lambda(\mathbf{t}extbf{b}egin{equation}ta^{-1} y_1, \mathbf{t}extbf{b}egin{equation}ta^{-1} y_2,\mathbf{t}extbf{d}ots) &= \prod_{i,j \mathfrak{g}eq 1} \mathbf{t}frac{ 1 - \overline{x_i} y_j}{1-x_iy_j} \\&= \mathbf{s}um_\lambda G\hspace{-0.2mm}Q_\lambda(x) \mathfrak{g}p_\lambda(y). \mathbf{t}extbf{e}nd{aligned}\] As it is clear from \mathbf{t}extbf{e}qref{GP-GQ-def} that $G\hspace{-0.2mm}Q_\lambda(x) = \mathbf{t}extbf{b}egin{equation}ta^{-|\lambda|}G\hspace{-0.2mm}Q^{(1)}_\lambda(\mathbf{t}extbf{b}egin{equation}ta x_1, \mathbf{t}extbf{b}egin{equation}ta x_2,\mathbf{t}extbf{d}ots)$, this equation can only hold if $ \mathfrak{g}p_\lambda(y)= \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|} \mathfrak{g}p^{(1)}_\lambda(\mathbf{t}extbf{b}egin{equation}ta^{-1} y_1, \mathbf{t}extbf{b}egin{equation}ta^{-1} y_2,\mathbf{t}extbf{d}ots)$ which is equivalent to what is claimed in the lemma. The identity for $\mathfrak{g}q_\lambda$ follows similarly. \mathbf{t}extbf{e}nd{proof} Theorem~\mathbf{r}ef{to-prove} has a dual version that gives a $\mathfrak{g}p$-expansion of $\mathfrak{g}q_\lambda$: \mathbf{t}extbf{b}egin{equation}gin{corollary}\label{to-prove2} If $\lambda$ is a strict partition then \[ \mathfrak{g}q_\lambda = 2^{\mathbf{t}extbf{e}ll(\lambda)} \mathbf{s}um_{\mu} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu| } \mathfrak{g}p_\mu \] where the sum is over strict partitions $\mu \mathbf{s}ubseteq \lambda$ with $\mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} Expand $G\hspace{-0.2mm}Q_\mu(x)$ in $ \mathbf{s}um_\mu G\hspace{-0.2mm}Q_\mu(x) \mathfrak{g}p_\mu(y) = \mathbf{s}um_\lambda G\hspace{-0.2mm}P_\lambda(x) \mathfrak{g}q_\lambda(y)$ using Theorem~\mathbf{r}ef{to-prove} and then equate the coefficients of $G\hspace{-0.2mm}P_\lambda(x)$. \mathbf{t}extbf{e}nd{proof} For example, $\mathfrak{g}q_{(m,\mathbf{t}extbf{d}ots,3,2,1)} = 2^m\mathfrak{g}p_{(m,\mathbf{t}extbf{d}ots,3,2,1)}$ for any $m\in \mathbb{N}$ and $\mathfrak{g}q_{(n)} = 2 \mathfrak{g}p_{(n)} + \mathbf{t}extbf{b}egin{equation}ta \mathfrak{g}p_{(n-1)}$ when $n \mathfrak{g}eq 2$. There is also an analogue of Corollary~\mathbf{r}ef{positive_case}: \mathbf{t}extbf{b}egin{equation}gin{corollary} If $\lambda$ is a strict partition with $m$ parts then $\mathfrak{g}q_\lambda$ is an $\mathbb{N}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $\mathfrak{g}p$-functions if and only if $\lambda - (m,\mathbf{t}extbf{d}ots,3,2,1)$ is also strict. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} One can check that if $\lambda - (m,\mathbf{t}extbf{d}ots,3,2,1)$ is strict then every $\mu$ indexing the sum in Corollary~\mathbf{r}ef{to-prove2} has $\mathbf{t}extbf{c}ol(\lambda/\mu) = |\lambda/\mu|$, while if $\lambda - (m,\mathbf{t}extbf{d}ots,3,2,1)$ is not strict then at least one such $\mu$ has $\mathbf{t}extbf{c}ol(\lambda/\mu) =1$ and $|\lambda/\mu|=2$. \mathbf{t}extbf{e}nd{proof} Recall that $\widehat a_{\mu\nu}^\lambda$ and $\widehat b_{\mu\nu}^\lambda$ are the integers appearing in the respective expansions of $G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}$ and $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu}$ in \mathbf{t}extbf{e}qref{skew-exp-eq}. These numbers are zero if $|\mu|+|\nu| < |\lambda|$. \mathbf{t}extbf{b}egin{equation}gin{proposition}\label{ab-prop1} If $\mu$ and $\nu$ are strict partitions then \mathbf{t}extbf{b}egin{equation}\label{dual-exp-eq2} \mathfrak{g}p_\mu \mathfrak{g}p_\nu = \mathbf{s}um_{\lambda} \widehat a_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\mu|+|\nu|-|\lambda|} \mathfrak{g}p_{\lambda} \mathbf{t}ext{ and } \mathfrak{g}q_\mu \mathfrak{g}q_\nu = \mathbf{s}um_{\lambda} \widehat b_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\mu|+|\nu|-|\lambda|} \mathfrak{g}q_{\lambda} \mathbf{t}extbf{e}nd{equation} where the sums are over all strict partitions $\lambda$. \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} This follows by a standard argument similar to the proof of \mathbf{t}extbf{c}ite[Prop. 8.2]{Stembridge1989}, which is equivalent to \mathbf{t}extbf{e}qref{dual-exp-eq2} when $\mathbf{t}extbf{b}egin{equation}ta=0$. Let $\mathbf{t}extsf{D}elta(x;y) := \prod_{i,j \mathfrak{g}eq 1} \frac{ 1 - \overline{x_i} y_j}{1-x_iy_j}$. Introduce a third set of commuting variables $z_1,z_2,z_3,\mathbf{t}extbf{d}ots$. Then \[\mathbf{t}extbf{b}egin{equation}gin{aligned} \mathbf{s}um_{\lambda}\mathbf{s}um_{\mu} G\hspace{-0.2mm}Q_\mu(x)G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu}(y) \mathfrak{g}p_\lambda(z) &= \mathbf{s}um_\lambda G\hspace{-0.2mm}Q_\lambda(x,y) \mathfrak{g}p_\lambda(z) = \mathbf{t}extsf{D}elta(x,z) \mathbf{t}extsf{D}elta(y,z) \\&= \left(\mathbf{s}um_\mu G\hspace{-0.2mm}Q_\mu(x) \mathfrak{g}p_\mu(z)\mathbf{r}ight)\left(\mathbf{s}um_\nu G\hspace{-0.2mm}Q_\nu(y) \mathfrak{g}p_\nu(z)\mathbf{r}ight) \mathbf{t}extbf{e}nd{aligned}\] by equations \mathbf{t}extbf{e}qref{skew-eq2} and \mathbf{t}extbf{e}qref{cauchy-eq}. The first identity follows by substituting the formula \mathbf{t}extbf{e}qref{skew-exp-eq} for $G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s \mu}$ into the first expression and extracting the coefficients of $G\hspace{-0.2mm}Q_\mu(x) G\hspace{-0.2mm}Q_\nu(y)$. The second identity follows similarly. \mathbf{t}extbf{e}nd{proof} Since $G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu} = G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}=0$ when $\mu\not\mathbf{s}ubseteq\lambda $, we already know from \mathbf{t}extbf{e}qref{skew-exp-eq} that \mathbf{t}extbf{b}egin{equation} \widehat a_{\mu\nu}^\lambda =\widehat b_{\mu\nu}^\lambda =0\mathbf{t}extbf{e}nd{equation} when $\mu \not\mathbf{s}ubseteq \lambda$ (and, as noted above, when $|\lambda| > |\mu| + |\nu| $). We also have \mathbf{t}extbf{b}egin{equation}\widehat a_{\mu\nu}^\lambda =\widehat a_{\nu\mu}^\lambda \quad\mathbf{t}ext{and}\quad \widehat b_{\mu\nu}^\lambda =\widehat b_{\nu\mu}^\lambda \mathbf{t}extbf{e}nd{equation} by Proposition~\mathbf{r}ef{ab-prop1} since $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ is a commutative ring. If $\mu$ and $\nu$ are strict partitions then \mathbf{t}extbf{c}ite[Props. 3.4 and 3.5]{IkedaNaruse} imply that \mathbf{t}extbf{b}egin{equation} \label{skew-exp-eq2} \mathbf{t}extbf{b}egin{equation}gin{aligned} G\hspace{-0.2mm}P_\mu G\hspace{-0.2mm}P_\nu &= \mathbf{s}um_\lambda a_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|-|\mu|-|\nu|} G\hspace{-0.2mm}P_\lambda, \\ G\hspace{-0.2mm}Q_\mu G\hspace{-0.2mm}Q_\nu &= \mathbf{s}um_\lambda b_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|-|\mu|-|\nu|} G\hspace{-0.2mm}Q_\lambda, \mathbf{t}extbf{e}nd{aligned}\mathbf{t}extbf{e}nd{equation} for unique integers $ a_{\mu\nu}^\lambda , b_{\mu\nu}^\lambda \in \mathbb{Z}$. It is known from \mathbf{t}extbf{c}ite{CTY} that the $G\hspace{-0.2mm}P$-expansion is finite with every $a_{\mu\nu}^\lambda \in \mathbb{N}$, but \mathbf{t}extbf{e}mph{a priori} the coefficients $b_{\mu\nu}^\lambda$ could be nonzero for infinitely many strict partitions $\lambda$. We will discuss the problem of showing that the $G\hspace{-0.2mm}Q$-expansion is finite in Section~\mathbf{r}ef{last-sect}. It is again clear that \mathbf{t}extbf{b}egin{equation}\label{ab-ba-eq} a_{\mu\nu}^\lambda = a_{\nu\mu}^\lambda \quad\mathbf{t}ext{and}\quad b_{\mu\nu}^\lambda = b_{\nu\mu}^\lambda \mathbf{t}extbf{e}nd{equation} but the following property requires an argument. \mathbf{t}extbf{b}egin{equation}gin{proposition}\label{contain-bound-lem} One has $a^\lambda_{\mu\nu} = b^\lambda_{\mu\nu} =0$ if $\mu \not\mathbf{s}ubseteq \lambda$ or $\nu\not \mathbf{s}ubseteq\lambda$ or $|\mu| + |\nu|>|\lambda|$. \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} \mathbf{t}extbf{c}ite[Prop. 3.4]{IkedaNaruse} asserts that any finite product of $G\hspace{-0.2mm}P$-functions is a (possibly infinite) $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $G\hspace{-0.2mm}P$-functions, while \mathbf{t}extbf{c}ite[Prop. 3.5]{IkedaNaruse} states an analogous property for the $G\hspace{-0.2mm}Q$-functions. Since $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ are both homogeneous of degree $|\lambda|$ if we set $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta)=-1$ and $\mathbf{t}extbf{d}eg(x_i)=1$, it must hold that $a^\lambda_{\mu\nu} = b^\lambda_{\mu\nu} =0$ when $|\mu| + |\nu|>|\lambda|$. It remains to show that both coefficients are zero when $\mu \not\mathbf{s}ubseteq \lambda$. This follows for $a_{\mu\nu}^\lambda$ from the Littlewood--Richardson rule for the $G\hspace{-0.2mm}P_{\lambda}$'s due to Clifford, Thomas, and Yong \mathbf{t}extbf{c}ite[Thm. 1.2]{CTY}: in this result, the coefficient $C^{\lambda}_{\mu\nu}$ is equal to $a_{\mu\nu}^\lambda$ by \mathbf{t}extbf{c}ite[Cor. 8.1]{IkedaNaruse}. We turn to the $b^{\lambda}_{\mu\nu}$ coefficients. When $p$ is a positive integer, there is a Pieri-type formula for the $G\hspace{-0.2mm}Q$-expansion of $G\hspace{-0.2mm}Q_\mu G\hspace{-0.2mm}Q_{(p)}$ due to Buch and Ravikumar \mathbf{t}extbf{c}ite[Cor. 5.6]{BuchRavikumar}: in this result, the coefficient $c^\lambda_{\mu,p}$ is equal to $b^{\lambda}_{\mu\nu}$ for $\nu=(p)$ again by \mathbf{t}extbf{c}ite[Cor. 8.1]{IkedaNaruse}. Buch and Ravikumar's formula implies that $b^{\lambda}_{\mu\nu} = 0$ if $\mu \not\mathbf{s}ubseteq\lambda$ and $\mathbf{t}extbf{e}ll(\nu) \leq 1$. It follows that if $p_1,p_2,\mathbf{t}extbf{d}ots,p_k$ are any positive integers then the product $G\hspace{-0.2mm}Q_\mu G\hspace{-0.2mm}Q_{(p_1)}G\hspace{-0.2mm}Q_{(p_2)}\mathbf{t}extbf{c}dots G\hspace{-0.2mm}Q_{(p_k)}$ is a possibly infinite $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $G\hspace{-0.2mm}Q_\lambda$'s indexed by strict partitions with $\lambda \mathbf{s}upseteq \mu$. From this observation, to complete the proof it is enough to show that each $G\hspace{-0.2mm}Q_\nu$ is a possibly infinite $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of products of the form $G\hspace{-0.2mm}Q_{(p_1)}G\hspace{-0.2mm}Q_{(p_2)}\mathbf{t}extbf{c}dots G\hspace{-0.2mm}Q_{(p_k)}$. This claim is a consequence of \mathbf{t}extbf{c}ite[Thm. 5.8]{NakagawaNaruse0}, which gives a formula for a more general \mathbf{t}extbf{e}mph{universal factorial Hall--Littlewood $Q$-function} $H\hspace{-0.2mm}Q^{\mathbb{L}}_\nu(\mathbf{x}_n;t\hspace{0.5mm} | \hspace{0.5mm} \mathbf{b})$, essentially as a linear combination of products of other such functions indexed by one-row partitions. The notation in \mathbf{t}extbf{c}ite{NakagawaNaruse0} makes it slightly nontrivial to connect this formula to our situation. However, the property needed for $G\hspace{-0.2mm}Q_\nu$ follows directly from the discussion in \mathbf{t}extbf{c}ite[\S5.2.4]{NakagawaNaruse0} (in particular, from \mathbf{t}extbf{c}ite[Thm. 5.10]{NakagawaNaruse0}), once one observes that the generating function \[ G\hspace{-0.2mm}Q(u\hspace{0.5mm} |\hspace{0.5mm} \mathbf{b})^{(k)} := \frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta u} \prod_{j=1}^n \frac{1 + (u^{-1}+ \mathbf{t}extbf{b}egin{equation}ta)x_j}{1+ (u^{-1} + \mathbf{t}extbf{b}egin{equation}ta)\overline{x_j}} \mathbf{t}imes \prod_{j=1}^k (1 + (u^{-1}+\mathbf{t}extbf{b}egin{equation}ta)b_j)\] in \mathbf{t}extbf{c}ite[Eq.\ (5.10)]{NakagawaNaruse0} reduces when $\mathbf{b} = \mathbf{0}$ and $n\mathbf{t}o \infty$ to the expression below: \mathbf{t}extbf{b}egin{equation}gin{lemma} When expanded as a power series in $u^{-1}$, the expression \[ \frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta u} \prod_{j=1}^\infty \frac{ 1 + (u^{-1}+\mathbf{t}extbf{b}egin{equation}ta) x_j}{1 + (u^{-1} + \mathbf{t}extbf{b}egin{equation}ta) \overline{x_j}}\] is equal to $\mathbf{s}um_{n \in \mathbb{Z}} G\hspace{-0.2mm}Q_{(n)} u^{-n}$ where we set $G\hspace{-0.2mm}Q_{(n)} := (-\mathbf{t}extbf{b}egin{equation}ta)^{-n}$ for $n<0$. \mathbf{t}extbf{e}nd{lemma} \mathbf{t}extbf{b}egin{equation}gin{proof} This is essentially \mathbf{t}extbf{c}ite[Rem. 5.11]{Hud} given \mathbf{t}extbf{c}ite[Def. 3.5]{Hud}. Here is a self-contained proof explained to us by Joel Lewis. Let $\mathbf{t}extbf{e}ll(T)$ denote the number boxes in a tableau $T$. Then $\mathbf{s}um_{n> 0} G\hspace{-0.2mm}Q_{(n)}t^n = \mathbf{s}um_{T} \mathbf{t}extbf{b}egin{equation}ta^{|T|-\mathbf{t}extbf{e}ll(T)} x^{T} t^{\mathbf{t}extbf{e}ll(T)}$ where the sum is over all semistandard set-valued shifted tableaux $T$ with a nonempty one-row shape. Such a tableau $T$ is specified uniquely by the following choices: \mathbf{t}extbf{b}egin{equation}gin{itemize} \item The finite set of positive integers $\{j_1< \ldots< j_k\}$ such that at least one of $j_i$ or $j_i'$ appears in some box of $T$. \item The numbers $n_1, \ldots, n_k>0$ such that $j_i$ or $j_i'$ appears in $n_i$ boxes of $T$. \item For each $i \in [k]$, whether the first box containing $j_i$ or $j_i'$ contains just $j_i$, just $j_i'$, or both $j_i$ and $j_i'$. \item For each $i>1$, whether the first box containing $j_i$ or $j_i'$ contains no smaller numbers, or contains at least one of $j_{i-1}$ or $j_{i-1}'$. \mathbf{t}extbf{e}nd{itemize} For the tableau $T$ corresponding to this data, the value of $|T|$ (respectively, $\mathbf{t}extbf{e}ll(T)$) depends on the numbers $n_1,\ldots,n_k$ and the choices in the third (respectively, fourth) bullet point. It follows that we can write $ \mathbf{s}um_{n>0} G\hspace{-0.2mm}Q_{(n)} t^n$ as \[ \mathbf{s}um_{\mathbf{s}ubstack{ k>0 \\ 0<j_1 < \mathbf{t}extbf{d}ots < j_k \\ n_1, \mathbf{t}extbf{d}ots, n_k > 0 }} (2 x_{j_1} + \mathbf{t}extbf{b}egin{equation}ta x_{j_1}^2)t \mathbf{t}imes (x_{j_1} t)^{n_1 - 1} \mathbf{t}imes \prod_{i = 2}^k (2\mathbf{t}extbf{b}egin{equation}ta x_{j_i} + \mathbf{t}extbf{b}egin{equation}ta^2 x_{j_i}^2)(\mathbf{t}frac{t}{\mathbf{t}extbf{b}egin{equation}ta} + 1) \mathbf{t}imes (x_{j_i} t)^{n_i - 1} \] Fixing $k$ and $j_1<\mathbf{t}extbf{d}ots<j_k$ and summing over $n_1,\mathbf{t}extbf{d}ots,n_k$ turns this into \[ \mathbf{s}um_{\mathbf{s}ubstack{ k>0 \\ 0<j_1 < \mathbf{t}extbf{d}ots < j_k}} \frac{(2x_{j_1} + \mathbf{t}extbf{b}egin{equation}ta x_{j_1}^2)t}{ 1 - x_{j_1} t} \mathbf{t}imes \prod_{i = 2}^k \frac{(2x_{j_i} + \mathbf{t}extbf{b}egin{equation}ta x_{j_i}^2)(t + \mathbf{t}extbf{b}egin{equation}ta)}{1 - x_{j_i} t}. \] Pulling out a factor of $\frac{t}{t+\mathbf{t}extbf{b}egin{equation}ta} = \frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta t^{-1}}$ transforms this to \[ \mathbf{s}um_{\mathbf{s}ubstack{S \mathbf{s}ubseteq \{1,2,\mathbf{t}extbf{d}ots\} \\ 0<|S| <\infty}} \mathbf{t}frac{t}{t+\mathbf{t}extbf{b}egin{equation}ta}\prod_{j \in S} \mathbf{t}frac{(2x_{j} + \mathbf{t}extbf{b}egin{equation}ta x_{j}^2)(t + \mathbf{t}extbf{b}egin{equation}ta)}{1 - x_{j} t} = \mathbf{t}frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta t^{-1}}\left(\prod_{j=1}^\infty \left(1 + \mathbf{t}frac{(2x_{j} + \mathbf{t}extbf{b}egin{equation}ta x_{j}^2)(t + \mathbf{t}extbf{b}egin{equation}ta)}{1 - x_{j} t} \mathbf{r}ight) - 1\mathbf{r}ight). \] Since $ 1 + \frac{(2x + \mathbf{t}extbf{b}egin{equation}ta x^2)(t + \mathbf{t}extbf{b}egin{equation}ta )}{1 - x t} = \frac{(1 + \mathbf{t}extbf{b}egin{equation}ta x)(1 + \mathbf{t}extbf{b}egin{equation}ta x + xt)}{1 - xt} = \frac{1 + (t+\mathbf{t}extbf{b}egin{equation}ta)x}{1 + (t+\mathbf{t}extbf{b}egin{equation}ta)\overline{x}}$ we conclude that \[\mathbf{s}um_{n>0} G\hspace{-0.2mm}Q_{(n)} t^n = \frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta t^{-1}}\prod_{j=1}^\infty \frac{1 + (t+\mathbf{t}extbf{b}egin{equation}ta)x_j}{1 + (t+\mathbf{t}extbf{b}egin{equation}ta)\overline{x_j}} - \frac{1}{1+\mathbf{t}extbf{b}egin{equation}ta t^{-1}}.\] The desired formula follows by replacing the formal parameter $t$ by $u^{-1}$. \mathbf{t}extbf{e}nd{proof} \noindent This completes the proof of Proposition~\mathbf{r}ef{contain-bound-lem}. \mathbf{t}extbf{e}nd{proof} For any strict partitions $\lambda$ and $\mu$ we define \mathbf{t}extbf{b}egin{equation}\label{skew-g-def} \mathfrak{g}q_{\lambda/\mu} := \mathbf{s}um_{\nu} a_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|-|\mu|-|\nu|} \mathfrak{g}q_\nu \mathbf{t}ext{ and } \mathfrak{g}p_{\lambda/\mu} := \mathbf{s}um_{\nu} b_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|-|\mu|-|\nu|} \mathfrak{g}p_\nu \mathbf{t}extbf{e}nd{equation} where $\nu$ ranges over all strict partitions. Proposition~\mathbf{r}ef{contain-bound-lem} implies that these symmetric functions have bounded degree and are zero whenever $\mu\not\mathbf{s}ubseteq\lambda$. As $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$ are homogeneous of degree $|\lambda|$ when $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta)= 1$, it follows that $\mathfrak{g}p_{\lambda/\mu}$ and $\mathfrak{g}q_{\lambda/\mu}$ are homogeneous of degree $|\lambda|-|\mu|$ when nonzero. \mathbf{t}extbf{b}egin{equation}gin{proposition} If $\lambda$ is a strict partition then \mathbf{t}extbf{b}egin{equation}\label{skew-eq3} \mathfrak{g}p_\lambda(x,y) = \mathbf{s}um_\nu \mathfrak{g}p_\nu(x) \mathfrak{g}p_{\lambda/\nu}(y) \quad\mathbf{t}ext{and}\quad \mathfrak{g}q_\lambda(x,y) = \mathbf{s}um_\nu \mathfrak{g}q_\nu(x) \mathfrak{g}q_{\lambda/\nu}(y) \mathbf{t}extbf{e}nd{equation} where both sums are over all strict partitions $\nu$. \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} These identities are equivalent to the first two parts of \mathbf{t}extbf{c}ite[Prop. 3.2]{NakagawaNaruse}. We show how to derive the first identity for completeness. Observe that \[\mathbf{t}extbf{b}egin{equation}gin{aligned} \mathbf{s}um_\lambda G\hspace{-0.2mm}Q_\lambda(x) \mathfrak{g}p_\lambda(y,z) &= \mathbf{t}extsf{D}elta(x,y) \mathbf{t}extsf{D}elta(x,z) \\&= \left(\mathbf{s}um_\mu G\hspace{-0.2mm}Q_\mu(x) \mathfrak{g}p_\mu(y)\mathbf{r}ight)\left(\mathbf{s}um_\nu G\hspace{-0.2mm}Q_\nu(x) \mathfrak{g}p_\nu(z)\mathbf{r}ight) \\& = \mathbf{s}um_{\mu, \nu,\lambda} b_{\mu\nu}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda|-|\mu|-|\nu|} G\hspace{-0.2mm}Q_\lambda(x) \mathfrak{g}p_\mu(y) \mathfrak{g}q_\nu(z) \\& =\mathbf{s}um_{\lambda} \left(G\hspace{-0.2mm}Q_\lambda(x)\mathbf{s}um_\mu \mathfrak{g}p_\mu(y) \mathfrak{g}p_{\lambda/\mu}(z)\mathbf{r}ight) \mathbf{t}extbf{e}nd{aligned}\] by \mathbf{t}extbf{e}qref{cauchy-eq} and \mathbf{t}extbf{e}qref{skew-g-def}. Now equate the coefficients of $G\hspace{-0.2mm}Q_\lambda(x)$. \mathbf{t}extbf{e}nd{proof} \mathbf{t}extbf{b}egin{equation}gin{remark} Setting $\mathbf{t}extbf{b}egin{equation}ta=0$ transforms \mathbf{t}extbf{e}qref{skew-eq3} to \mathbf{t}extbf{e}qref{skew-eq1}, so $P_{\lambda/\mu} = \mathfrak{g}p_{\lambda/\mu}|_{\mathbf{t}extbf{b}egin{equation}ta=0}$ and $Q_{\lambda/\mu} =\mathfrak{g}q_{\lambda/\mu}|_{\mathbf{t}extbf{b}egin{equation}ta=0}.$ Thus $\mathfrak{g}p_{\lambda/\mu}$ and $\mathfrak{g}q_{\lambda/\mu}$ are each nonzero if and only if $\mu \mathbf{s}ubseteq\lambda$. \mathbf{t}extbf{e}nd{remark} We may now prove a dual version of Theorem~\mathbf{r}ef{skew-to-prove}. \mathbf{t}extbf{b}egin{equation}gin{theorem}\label{skew-to-prove2} Suppose $\kappa\mathbf{s}ubseteq \lambda$ are strict partitions. Then \[ \mathfrak{g}q_{\lambda/\kappa} = \mathbf{s}um_{(\nu,\mu)} 2^{\mathbf{t}extbf{e}ll(\lambda)-\mathbf{t}extbf{e}ll(\kappa)+\mathsf{overlap}(\nu/\kappa)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\nu/\kappa| + |\lambda/\mu|} \mathfrak{g}p_{\mu/\nu} \] where the sum is over all pairs of strict partitions $(\nu,\mu)$ with $\kappa\mathbf{s}ubseteq \nu \mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu) \leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. \mathbf{t}extbf{e}nd{theorem} \mathbf{t}extbf{b}egin{equation}gin{remark} If there were a bilinear form that made $\{ \mathfrak{g}q_{\lambda/\kappa} \}$ the dual basis of $\{ G\hspace{-0.2mm}P_{\lambda/\kappa} \}$ and $\{ \mathfrak{g}p_{\mu/\nu} \}$ the dual basis of $\{ G\hspace{-0.2mm}Q_{\mu/\nu} \}$, then this result would follow immediately from Theorem~\mathbf{r}ef{skew-to-prove}. Since the various skew functions are not even linearly independent, no such form is available and we need another argument. \mathbf{t}extbf{e}nd{remark} \mathbf{t}extbf{b}egin{equation}gin{proof} Expanding $\mathfrak{g}q_\lambda(x,y)$ using Corollary~\mathbf{r}ef{to-prove2} and then applying \mathbf{t}extbf{e}qref{skew-eq3} gives \[ \mathfrak{g}q_\lambda(x,y) = \mathbf{s}um_\mu \mathbf{s}um_{\kappa} 2^{\mathbf{t}extbf{e}ll(\lambda)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\lambda/\mu| } \mathfrak{g}p_{\kappa}(x) \mathfrak{g}p_{\mu/ \kappa}(y) \] where the outer sum is over all strict partitions $\mu$ with $\mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip and the inner sum is over strict partitions $\kappa$ with $\kappa \mathbf{s}ubseteq \mu$ (since $\mathfrak{g}p_{\mu/\kappa} =0$ if $\kappa\not\mathbf{s}ubseteq \mu$). Alternatively, using Corollary~\mathbf{r}ef{to-prove2} to expand the right side of \mathbf{t}extbf{e}qref{skew-eq3} gives \[ \mathfrak{g}q_\lambda(x,y) = \mathbf{s}um_{\mathbf{t}extbf{e}ta} \mathbf{s}um_{\kappa} 2^{\mathbf{t}extbf{e}ll(\kappa)} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)} (-\mathbf{t}extbf{b}egin{equation}ta/2)^{|\mathbf{t}extbf{e}ta/\kappa| } \mathfrak{g}p_{\kappa}(x) \mathfrak{g}q_{\lambda / \mathbf{t}extbf{e}ta}(y) \] where the outer sum is over all strict partitions $\mathbf{t}extbf{e}ta$ with $\mathbf{t}extbf{e}ta\mathbf{s}ubseteq \lambda$ (since $\mathfrak{g}q_{\lambda/\mathbf{t}extbf{e}ta} =0$ when $\mathbf{t}extbf{e}ta \not\mathbf{s}ubseteq \lambda$) and the inner sum over $\kappa$ with $\kappa\mathbf{s}ubseteq \mathbf{t}extbf{e}ta$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)$ such that $\mathsf{SD}_{\mathbf{t}extbf{e}ta/\kappa}$ is a vertical strip. Equating coefficients of $\mathfrak{g}p_\kappa$ gives \[ \mathbf{s}um_\mathbf{t}extbf{e}ta 2^{\mathbf{t}extbf{e}ll(\kappa)} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\mathbf{t}extbf{e}ta/\kappa| } \mathfrak{g}q_{\lambda / \mathbf{t}extbf{e}ta} = \mathbf{s}um_\mu 2^{\mathbf{t}extbf{e}ll(\lambda)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu| } \mathfrak{g}p_{\mu/ \kappa} \] where the sums are over certain strict partitions $\mathbf{t}extbf{e}ta$ and $\mu$; we can rewrite this as \[ \mathfrak{g}q_{\lambda / \kappa} = \mathbf{s}um_{\mu} 2^{\mathbf{t}extbf{e}ll(\lambda)-\mathbf{t}extbf{e}ll(\kappa)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu| } \mathfrak{g}p_{\mu/ \kappa} - \mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\mathbf{t}extbf{e}ta/\kappa| } \mathfrak{g}q_{\lambda / \mathbf{t}extbf{e}ta} \] where the first sum is over all strict partitions $\mu$ with $\kappa \mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip, and the second sum is over all strict partitions $\mathbf{t}extbf{e}ta$ with $\kappa\mathbf{s}ubsetneq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)$ such that $\mathsf{SD}_{\mathbf{t}extbf{e}ta/\kappa}$ is a vertical strip. If $\kappa = \lambda$ then $\mathfrak{g}q_{\lambda/\kappa} = 1=\mathfrak{g}p_{\lambda/\kappa}$ as the theorem predicts. Otherwise, we may assume by induction that the desired formula holds for $\mathfrak{g}q_{\lambda/ \mathbf{t}extbf{e}ta}$ with $\kappa\mathbf{s}ubsetneq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \lambda$. Substituting these formulas into the equation above expands $\mathfrak{g}q_{\lambda/\kappa}$ as a linear combination of $\mathfrak{g}p_{\mu/\nu}$'s where $\mu$ and $\nu$ range over the strict partitions with $\kappa \mathbf{s}ubseteq \nu \mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda$ and $\mathbf{t}extbf{e}ll(\kappa) = \mathbf{t}extbf{e}ll(\nu) \leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ such that $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip. The coefficient of $\mathfrak{g}p_{\mu/\kappa}$ in this expansion is $2^{\mathbf{t}extbf{e}ll(\lambda)-\mathbf{t}extbf{e}ll(\kappa)}(-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)}(\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|}$ as desired. The coefficient of $\mathfrak{g}p_{\mu/\nu}$ when $\kappa \mathbf{s}ubsetneq \nu\mathbf{s}ubseteq \mu \mathbf{s}ubseteq \lambda $ and $\mathbf{t}extbf{e}ll(\kappa)=\mathbf{t}extbf{e}ll(\nu)\leq \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\lambda)$ and $\mathsf{SD}_{\lambda/\mu}$ is a vertical strip is the sum \[ -\mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)}(\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\mathbf{t}extbf{e}ta/\kappa|} \left( 2^{\mathbf{t}extbf{e}ll(\lambda) - \mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)+\mathsf{overlap}(\nu/\mathbf{t}extbf{e}ta)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\nu/\mathbf{t}extbf{e}ta| + |\lambda/\mu|}\mathbf{r}ight) \] over all strict partitions $\mathbf{t}extbf{e}ta$ where $\kappa \mathbf{s}ubsetneq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \nu$ and $\mathsf{SD}_{\mathbf{t}extbf{e}ta/\kappa}$ is a vertical strip. Rewriting this as $2^{\mathbf{t}extbf{e}ll(\lambda)-\mathbf{t}extbf{e}ll(\mathbf{t}extbf{e}ta)} (-1)^{\mathbf{t}extbf{c}ol(\lambda/\mu)} (\mathbf{t}frac{-\mathbf{t}extbf{b}egin{equation}ta}{2})^{|\lambda/\mu|+|\nu/\kappa|} \mathbf{s}um_{\mathbf{t}extbf{e}ta } (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)+1} 2^{\mathsf{overlap}(\nu/\mathbf{t}extbf{e}ta)}$, we see that it suffices to show that $\mathbf{s}um_{\mathbf{t}extbf{e}ta} (-1)^{\mathbf{t}extbf{c}ol(\mathbf{t}extbf{e}ta/\kappa)+1} 2^{\mathsf{overlap}(\nu/\mathbf{t}extbf{e}ta)} = 2^{\mathsf{overlap}(\nu/\kappa)}$ where the sum is again over $\mathbf{t}extbf{e}ta$ with $\kappa \mathbf{s}ubsetneq \mathbf{t}extbf{e}ta \mathbf{s}ubseteq \nu$ such that $\mathsf{SD}_{\mathbf{t}extbf{e}ta/\kappa}$ is a vertical strip. After rearranging terms, this identity is the second half of Lemma~\mathbf{r}ef{overlap_cancel}. \mathbf{t}extbf{e}nd{proof} By putting everything together we can also prove a generalization of \mathbf{t}extbf{e}qref{cauchy-eq}. This gives two shifted analogues of Yeliussizov's skew Cauchy identity for \mathbf{t}extbf{e}mph{symmetric Grothendieck polynomials} \mathbf{t}extbf{c}ite[Thm. 5.1]{Y2019}. \mathbf{t}extbf{b}egin{equation}gin{theorem}\label{GP-cauchy-thm} Let $\mu$ and $\nu$ be strict partitions. Then \mathbf{t}extbf{b}egin{equation}\label{GP-cauchy} \mathbf{s}um_{\lambda} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s \mu}( x) \mathfrak{g}q_{\lambda / \nu}( y) = \prod_{i,j\mathfrak{g}eq 1} \frac{1-\overline{x_i} y_j}{1-x_iy_j} \mathbf{s}um_\kappa G\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}q_{\mu / \kappa}( y) \mathbf{t}extbf{e}nd{equation} and \mathbf{t}extbf{b}egin{equation}\label{GQ-cauchy} \mathbf{s}um_{\lambda} G\hspace{-0.2mm}Q_{\lambda \mathbf{s}s \mu}( x) \mathfrak{g}p_{\lambda / \nu}( y) = \prod_{i,j\mathfrak{g}eq 1} \frac{1-\overline{x_i} y_j}{1-x_iy_j} \mathbf{s}um_\kappa G\hspace{-0.2mm}Q_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}p_{\mu / \kappa}( y) \mathbf{t}extbf{e}nd{equation} where $\overline{x_i} := \frac{-x_i}{1+\mathbf{t}extbf{b}egin{equation}ta x_i}$ and where $\lambda$ and $\kappa$ range over all strict partitions.\footnote{ Equivalently, one can restrict $\lambda$ to the strict partitions containing both $\mu$ and $\nu$ since otherwise $G\hspace{-0.2mm}Q_{\lambda \mathbf{s}s \mu}( x) \mathfrak{g}p_{\lambda / \nu}( y) =0$, and one can restrict $\kappa$ to the strict partitions contained in both $\mu$ and $\nu$ since otherwise $G\hspace{-0.2mm}Q_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}p_{\mu / \kappa}=0$.} \mathbf{t}extbf{e}nd{theorem} \mathbf{t}extbf{b}egin{equation}gin{proof} Let $w_1,w_2,w_3,\mathbf{t}extbf{d}ots$ be a fourth set of commuting variables. The first expression in \mathbf{t}extbf{e}qref{GP-cauchy} is the coefficient of $G\hspace{-0.2mm}P_{\mu}(w)\mathfrak{g}q_{\nu}(z)$ in \[ \mathbf{s}um_{\lambda} G\hspace{-0.2mm}P_\lambda(w,x) \mathfrak{g}q_\lambda(z,y) = \mathbf{t}extsf{D}elta(w;z)\mathbf{t}extsf{D}elta(w;y)\mathbf{t}extsf{D}elta(x;z)\mathbf{t}extsf{D}elta(x;y)\] by \mathbf{t}extbf{e}qref{skew-eq2}, \mathbf{t}extbf{e}qref{cauchy-eq}, and \mathbf{t}extbf{e}qref{skew-eq3}. Since $\mathbf{t}extsf{D}elta(x;y) = \prod_{i,j\mathfrak{g}eq 1} \frac{1-\overline{x_i} y_j}{1-x_iy_j}$, to prove \mathbf{t}extbf{e}qref{GP-cauchy} it suffices to show that $\mathbf{s}um_\kappa G\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}q_{\mu / \kappa}( y)$ is the coefficient of $G\hspace{-0.2mm}P_{\mu}(w)\mathfrak{g}q_{\nu}(z)$ in $\mathbf{t}extsf{D}elta(w;z)\mathbf{t}extsf{D}elta(w;y)\mathbf{t}extsf{D}elta(x;z)$. By \mathbf{t}extbf{e}qref{cauchy-eq} this product is equal to \[ \mathbf{s}um_{\kappa}\mathbf{s}um_\mathfrak{g}amma\mathbf{s}um_{\lambda} G\hspace{-0.2mm}P_\kappa(w) \mathfrak{g}q_\kappa(z)\mathbf{t}extbf{c}dot G\hspace{-0.2mm}P_\mathfrak{g}amma(w) \mathfrak{g}q_\mathfrak{g}amma(y)\mathbf{t}extbf{c}dot G\hspace{-0.2mm}P_\lambda(x) \mathfrak{g}q_\lambda(z). \] Once we rearrange the terms of this expression as \[ \mathbf{s}um_{\kappa}\mathbf{s}um_\mathfrak{g}amma\mathbf{s}um_{\lambda} G\hspace{-0.2mm}P_\kappa(w) G\hspace{-0.2mm}P_\mathfrak{g}amma(w) \mathbf{t}extbf{c}dot G\hspace{-0.2mm}P_\lambda(x) \mathfrak{g}q_\mathfrak{g}amma(y) \mathbf{t}extbf{c}dot \mathfrak{g}q_\kappa(z)\mathfrak{g}q_\lambda(z) \] it follows by \mathbf{t}extbf{e}qref{dual-exp-eq2} and \mathbf{t}extbf{e}qref{skew-exp-eq2} that it is equal to \[ \mathbf{s}um_{\kappa}\mathbf{s}um_\mathfrak{g}amma\mathbf{s}um_{\lambda}\mathbf{s}um_\mu\mathbf{s}um_\nu \mathbf{t}extbf{b}egin{equation}ta^{|\mu|-|\kappa|-|\mathfrak{g}amma|} a_{\kappa\mathfrak{g}amma}^\muG\hspace{-0.2mm}P_\mu(w) \mathbf{t}extbf{c}dot G\hspace{-0.2mm}P_\lambda(x) \mathfrak{g}q_\mathfrak{g}amma(y) \mathbf{t}extbf{c}dot \mathbf{t}extbf{b}egin{equation}ta^{|\kappa|+ |\lambda|- |\nu|} \widehat b_{\kappa\lambda}^{\nu}\mathfrak{g}q_\nu(z) .\] On rearranging the terms of this expression to be \[ \mathbf{s}um_{\kappa}\mathbf{s}um_\mathfrak{g}amma\mathbf{s}um_{\lambda}\mathbf{s}um_\mu\mathbf{s}um_\nu G\hspace{-0.2mm}P_\mu(w) \mathbf{t}extbf{c}dot \mathbf{t}extbf{b}egin{equation}ta^{|\kappa|+ |\lambda|- |\nu|} \widehat b_{\kappa\lambda}^{\nu} G\hspace{-0.2mm}P_\lambda(x) \mathbf{t}extbf{c}dot \mathbf{t}extbf{b}egin{equation}ta^{|\mu|-|\kappa|-|\mathfrak{g}amma|} a_{\kappa\mathfrak{g}amma}^\mu \mathfrak{g}q_\mathfrak{g}amma(y)\mathbf{t}extbf{c}dot \mathfrak{g}q_\nu(z) \] we deduce by \mathbf{t}extbf{e}qref{skew-exp-eq} and \mathbf{t}extbf{e}qref{skew-g-def} that \[ \mathbf{t}extsf{D}elta(w;z)\mathbf{t}extsf{D}elta(w;y)\mathbf{t}extsf{D}elta(x;z) = \mathbf{s}um_{\kappa} \mathbf{s}um_\mu\mathbf{s}um_\nu G\hspace{-0.2mm}P_\mu(w) \mathbf{t}extbf{c}dot G\hspace{-0.2mm}P_{\nu\mathbf{s}s\kappa}(x)\mathbf{t}extbf{c}dot \mathfrak{g}q_{\mu/\kappa}(y) \mathbf{t}extbf{c}dot \mathfrak{g}q_\nu(z) . \] The coefficient of $G\hspace{-0.2mm}P_{\mu}(w)\mathfrak{g}q_{\nu}(z)$ in the last expression is $\mathbf{s}um_\kappa G\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}q_{\mu / \kappa}( y)$, so \mathbf{t}extbf{e}qref{GP-cauchy} holds. The second identity \mathbf{t}extbf{e}qref{GQ-cauchy} follows by the same argument after interchanging the symbols $G\hspace{-0.2mm}P \leftrightarrow G\hspace{-0.2mm}Q$, $\mathfrak{g}p \leftrightarrow \mathfrak{g}q$, $a \leftrightarrow b$ and $\widehat a \leftrightarrow \widehat b$. \mathbf{t}extbf{e}nd{proof} \mathbf{s}ection{Conjectural generating functions}\label{last-sect} In this final section we discuss some conjectural formulas for $\mathfrak{g}p_{\lambda/\mu}$ and $\mathfrak{g}q_{\lambda/\mu}$ and two related dual functions. Let $\mu \mathbf{s}ubseteq \lambda$ be strict partitions. A \mathbf{t}extbf{e}mph{shifted reverse plane partition} of shape $\lambda/\mu$ is a filling of $\mathsf{SD}_{\lambda/\mu}$ by positive half-integers $ \{1'<1<2'<2<\mathbf{t}extbf{d}ots\}$ such that rows and columns are weakly increasing. Examples include \mathbf{t}extbf{b}egin{equation}\label{rpp-ex} \mathbf{t}extbf{y}tableausetup{boxsize=0.5cm,aligntableaux=center} \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none &\none & \none & 2 \\ \none &\none & 1' & 2 \\ \none &\none[\mathbf{t}extbf{c}dot] & 1' & 1 \\ \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & 4' \mathbf{t}extbf{e}nd{ytableau} \quad\mathbf{t}ext{and}\quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none &\none & \none & 2' \\ \none &\none & 1' & 1 \\ \none &\none[\mathbf{t}extbf{c}dot] & 1' & 1 \\ \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & \none[\mathbf{t}extbf{c}dot] & 4 \mathbf{t}extbf{e}nd{ytableau} \mathbf{t}extbf{e}nd{equation} which both have shape $(5,3,2,1)/(4,1)$. The \mathbf{t}extbf{e}mph{weight} of a shifted reverse plane partition $T$ is the monomial $x^{\wt_RPP(T)} := \prod_{i\mathfrak{g}eq 1} x_i^{c_i+r_i}$ where $c_i$ is the number of distinct columns of $T$ containing $i$ and $r_i$ is the number of distinct rows of $T$ containing $i'$. This monomial has degree $|\wt_RPP(T)| := \mathbf{s}um_{i\mathfrak{g}eq 1} (c_i+r_i)$, which may be less than $|T| := |\lambda/\mu|$. Let $\mathcal{M}RPP_Q(\lambda/\mu)$ be the set of shifted reverse plane partitions of shape $\lambda/\mu$. Let $\mathcal{M}RPP_P(\lambda/\mu)$ denote the subset of elements in $\mathcal{M}RPP_Q(\lambda/\mu)$ whose diagonal entries are all primed. The examples in \mathbf{t}extbf{e}qref{rpp-ex} belong to $\mathcal{M}RPP_Q(5321/41)$ and $\mathcal{M}RPP_P(5321/41)$, respectively; both have weight $x_1^3 x_2 x_4$. Nakagawa and Naruse present the following conjecture in \mathbf{t}extbf{c}ite{NakagawaNaruse}: \mathbf{t}extbf{b}egin{equation}gin{conjecture}[{\mathbf{t}extbf{c}ite[Conj. 5.1]{NakagawaNaruse}}] \label{conj1} If $\mu\mathbf{s}ubseteq \lambda$ are strict partitions then \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} \mathfrak{g}p_{\lambda/\mu} &= \mathbf{s}um_{T\in \mathcal{M}RPP_P(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\mu| - |\wt_RPP(T)|} x^{\wt_RPP(T)} ,\\ \mathfrak{g}q_{\lambda/\mu} &= \mathbf{s}um_{T\in \mathcal{M}RPP_Q(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\mu| - |\wt_RPP(T)|} x^{\wt_RPP(T)} . \mathbf{t}extbf{e}nd{aligned} \] \mathbf{t}extbf{e}nd{conjecture} \mathbf{t}extbf{b}egin{equation}gin{example}\label{conj1-ex} If $\lambda =(2,1)$ and $\mu=\mathbf{t}extbf{e}mptyset$ then $ \mathcal{M}RPP_P(\lambda/\mu)$ consists of \[ \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] &c' \\ a' & b \mathbf{t}extbf{e}nd{ytableau}, \quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] &c' \\ a' & b' \mathbf{t}extbf{e}nd{ytableau}, \quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] & b' \\ a' & a \mathbf{t}extbf{e}nd{ytableau}, \quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] &b' \\ a' & b' \mathbf{t}extbf{e}nd{ytableau}, \quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] & a' \\ a' & a' \mathbf{t}extbf{e}nd{ytableau}, \quad\mathbf{t}ext{and}\quad \mathbf{t}extbf{b}egin{equation}gin{ytableau} \none[\mathbf{t}extbf{c}dot] & b' \\ a' & a' \mathbf{t}extbf{e}nd{ytableau} \] for all positive integers $a<b<c$, so Conjecture~\mathbf{r}ef{conj1} asserts that \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} \mathfrak{g}p_{21} &= 2\mathbf{s}um_{a<b<c} x_a x_b x_c + \mathbf{s}um_{a<b} (x_a^2x_b + x_ax_b^2) - \mathbf{t}extbf{b}egin{equation}ta \mathbf{s}um_a x_a^2 - \mathbf{t}extbf{b}egin{equation}ta \mathbf{s}um_{a<b} x_ax_b = s_{21} - \mathbf{t}extbf{b}egin{equation}ta s_2. \mathbf{t}extbf{e}nd{aligned}\] If $\lambda =(2,1)$ and $\mu=\mathbf{t}extbf{e}mptyset$ then adding primes to the diagonal is a weight-preserving 4-to-1 map $ \mathcal{M}RPP_Q(\lambda/\mu)\mathbf{t}o \mathcal{M}RPP_P(\lambda/\mu)$ so Conjecture~\mathbf{r}ef{conj1} also predicts that $\mathfrak{g}q_{21} =4s_{21} - 4\mathbf{t}extbf{b}egin{equation}ta s_2$. These expression match the definitions from \mathbf{t}extbf{e}qref{cauchy-eq}. \mathbf{t}extbf{e}nd{example} \mathbf{t}extbf{b}egin{equation}gin{remark} The cited conjecture \mathbf{t}extbf{c}ite[Conj. 5.1]{NakagawaNaruse} only states these formulas when $\mu=\mathbf{t}extbf{e}mptyset$ and $\mathbf{t}extbf{b}egin{equation}ta=-1$. However, this special case implies the general result. In detail, if we knew the conjecture when $\mathbf{t}extbf{b}egin{equation}ta=-1$ then we could derive the general statement using Proposition~\mathbf{r}ef{recover-prop}. In turn, if we knew that $ \mathfrak{g}p_{\lambda} = \mathbf{s}um_{T\in \mathcal{M}RPP_P(\lambda)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda| - |\wt_RPP(T)|} x^{\wt_RPP(T)}$, and hence that the combinatorial generating function were symmetric, then we would have \[\mathbf{t}extbf{b}egin{equation}gin{aligned} \mathfrak{g}p_\lambda(x,y) &=\mathbf{s}um_{\mathbf{s}ubstack{\nu\mathbf{s}ubseteq\lambda \\ T\in \mathcal{M}RPP_P(\nu) \\ U\in \mathcal{M}RPP_P(\lambda/\nu)}} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda| - |\wt_RPP(T)|- |\wt_RPP(U)|} x^{\wt_RPP(T)} y^{\wt_RPP(U)} \\ & = \mathbf{s}um_{\nu \mathbf{s}ubseteq \lambda} \mathfrak{g}p_\nu(x) \mathbf{s}um_{U\in \mathcal{M}RPP_P(\lambda/\nu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\nu| - |\wt_RPP(U)|} y^{\wt_RPP(U)}. \mathbf{t}extbf{e}nd{aligned} \] We could then derive $ \mathfrak{g}p_{\lambda/\nu} = \mathbf{s}um_{T\in \mathcal{M}RPP_P(\lambda/\nu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\nu| - |\wt_RPP(T)|} x^{\wt_RPP(T)}$ by comparing the preceding identity with \mathbf{t}extbf{e}qref{skew-eq3} and equating the coefficients of $\mathfrak{g}p_\nu$. The reductions in $\mathfrak{g}q$-case are similar. \mathbf{t}extbf{e}nd{remark} Nakagawa and Naruse give another conjectural formula for $\mathfrak{g}q_\lambda$ as a certain Pfaffian in \mathbf{t}extbf{c}ite[\S6.2]{NakagawaNaruse0}, but we will not discuss this here. Write $\lambda^\mathbf{t}op$ for the transpose of a partition $\lambda$. Let $\omega$ be the operator acting on (possibly infinite) $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combinations of Schur functions $f=\mathbf{s}um_\lambda c_\lambda s_\lambda$ as $\omega(f) := \mathbf{s}um_\lambda c_\lambda s_{\lambda^\mathbf{t}op}$. For strict partitions $\mu$ and $ \lambda$ define \mathbf{t}extbf{b}egin{equation}\label{jpq-eq} j\hspace{-0.2mm}p_{\lambda/\mu} := \omega(\mathfrak{g}p_{\lambda/\mu}) \quad\mathbf{t}ext{and}\quad j\hspace{-0.2mm}q_{\lambda/\mu} := \omega(\mathfrak{g}q_{\lambda/\mu}), \mathbf{t}extbf{e}nd{equation} setting $j\hspace{-0.2mm}p_{\lambda} := j\hspace{-0.2mm}p_{\lambda/\mathbf{t}extbf{e}mptyset}$ and $j\hspace{-0.2mm}q_{\lambda} := j\hspace{-0.2mm}q_{\lambda/\mathbf{t}extbf{e}mptyset}$. These are finite $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combinations of Schur functions which are nonzero if and only if $\mu \mathbf{s}ubseteq \lambda$, since the same is true of the $\mathfrak{g}p$- and $\mathfrak{g}q$-functions. When nonzero, $j\hspace{-0.2mm}p_{\lambda/\mu}$ and $j\hspace{-0.2mm}q_{\lambda/\mu}$ are homogeneous of degree $|\lambda|-|\mu|$ under the convention that $\mathbf{t}extbf{d}eg(\mathbf{t}extbf{b}egin{equation}ta) = \mathbf{t}extbf{d}eg(x_i) = 1$. As $\omega$ fixes all skew Schur $P$- and $Q$-functions \mathbf{t}extbf{c}ite[Ex. 3(a), {\S}III.8]{Macdonald}, we have \mathbf{t}extbf{b}egin{equation} P_{\lambda/\mu} = j\hspace{-0.2mm}p_{\lambda/\mu}|_{\mathbf{t}extbf{b}egin{equation}ta=0} \quad\mathbf{t}ext{and}\quad Q_{\lambda/\mu} =j\hspace{-0.2mm}q_{\lambda/\mu}|_{\mathbf{t}extbf{b}egin{equation}ta=0}. \mathbf{t}extbf{e}nd{equation} Additionally, since $\omega$ is an automorphism of the Hopf algebra of bounded degree symmetric power series in $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$, the identity \mathbf{t}extbf{e}qref{skew-eq3} is equivalent to \mathbf{t}extbf{b}egin{equation}\label{j-eq} j\hspace{-0.2mm}p_\lambda(x,y) = \mathbf{s}um_\mu j\hspace{-0.2mm}p_\mu(x) j\hspace{-0.2mm}p_{\lambda/\mu}(y) \quad\mathbf{t}ext{and}\quad j\hspace{-0.2mm}q_\lambda(x,y) = \mathbf{s}um_\mu j\hspace{-0.2mm}q_\mu(x) j\hspace{-0.2mm}q_{\lambda/\mu}(y) \mathbf{t}extbf{e}nd{equation} where both sums are over all strict partitions $\mu$. We may also describe a conjectural generating function formula for $ j\hspace{-0.2mm}p_{\lambda/\mu} $ and $ j\hspace{-0.2mm}q_{\lambda/\mu} $. This formula appears to be new. A partition of a set $S$ is a set $\Pi$ of disjoint nonempty subsets $B \mathbf{s}ubseteq S$, called \mathbf{t}extbf{e}mph{blocks}, with $S = \mathbf{t}extbf{b}igsqcup_{B \in \Pi} B$. Assume $\mu \mathbf{s}ubseteq \lambda$. We define a \mathbf{t}extbf{e}mph{shifted bar tableau} of shape $\lambda/\mu$ to be a pair $T=(V,\Pi)$, where $V$ is a semistandard shifted tableau of shape $\lambda/\mu$ and $\Pi$ is a partition of $\mathsf{SD}_{\lambda/\mu}$ such that each block $B \in \Pi$ is a set of adjacent boxes containing the same entry in $V$. Because $V$ is semistandard, each of these blocks must consist of a contiguous ``bar'' within a single row or single column. One might draw a shifted bar tableau as a picture like \mathbf{t}extbf{b}egin{equation}\label{colors-eq} \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=![cyan!75]2 & =]![cyan!75] \mathbf{t}extbf{y}nobottom & ![pink!75]3'\mathbf{t}extbf{y}nobottom \\ ]=![red!75]1 & =]![red!75] & ]=]![blue!60] 1 & ]=]![pink!75] \mathbf{t}extbf{y}notop & ![magenta!75]3 \mathbf{t}extbf{e}nd{young} \mathbf{t}extbf{e}nd{equation} to represent \[ T=(V,\Pi) = \left(\hspace{0.5mm} \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & 2 & 2 & 3'\\ 1 & 1 & 1 & 3' & 3 \mathbf{t}extbf{e}nd{young} ,\hspace{0.5mm} \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]= \mathbf{t}extbf{c}dot & =] \mathbf{t}extbf{c}dot \mathbf{t}extbf{y}nobottom & \mathbf{t}extbf{c}dot\mathbf{t}extbf{y}nobottom \\ ]= \mathbf{t}extbf{c}dot & =]\mathbf{t}extbf{c}dot & ]=]\mathbf{t}extbf{c}dot & ]=] \mathbf{t}extbf{c}dot \mathbf{t}extbf{y}notop & \mathbf{t}extbf{c}dot \mathbf{t}extbf{e}nd{young}\hspace{0.5mm} \mathbf{r}ight). \] Let $\mathcal{V}alShYTQ(\lambda/\mu)$ denote the set of all shifted bar tableaux of shape $\lambda/\mu$ and let $\mathcal{V}alShYTP(\lambda/\mu)$ be the subset of such pairs $T=(V,\Pi)$ where $V $ has no primed diagonal entries. Given $T = (V,\Pi) \in\mathcal{V}alShYTQ(\lambda/\mu)$ we set \[ |T| := |\Pi| \quad\mathbf{t}ext{and}\quad x^T := \prod_{i\mathfrak{g}eq 1} x_i^{b_i} \] where $b_i$ is the number of blocks in $\Pi$ containing $i$ or $i'$ in $V$. The example $T$ shown in \mathbf{t}extbf{e}qref{colors-eq} belongs to $\mathcal{V}alShYTP(\lambda/\mu)$ for $\lambda = (5,3)$ and $\mu=\mathbf{t}extbf{e}mptyset$ and has $|T| = 5$ and $x^T = x_1^2 x_2 x_3^2$. If every block in $\Pi$ has size one then $|T| = |\lambda/\mu|$ and $x^T = x^V$. If $\Pi$ has as few blocks as possible given $V$, then $x^T = x^{\wt_RPP(V)}$. Finally, observe that if $V$ is fixed, then the sum of $x^T$ over all $\Pi$ such that $T=(V,\Pi)$ is a shifted bar tableau is $ \prod_{i \mathfrak{g}eq 1} x_i^{r_i + c_i} (x_i +1)^{m_i - r_i - c_i }$ where $r_i$ is the number of rows of $V$ containing an entry equal to $i$, $c_i$ is the number of columns of $V$ containing an entry equal to $i'$, and $m_i$ is the number of boxes of $V$ containing $i$ or $i'$. \mathbf{t}extbf{b}egin{equation}gin{conjecture}\label{conj2} If $\mu\mathbf{s}ubseteq \lambda$ are strict partitions then \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} j\hspace{-0.2mm}p_{\lambda/\mu} &= \mathbf{s}um_{T\in \mathcal{V}alShYTP(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\mu|-|T|} x^{T} ,\\ j\hspace{-0.2mm}q_{\lambda/\mu} &= \mathbf{s}um_{T\in \mathcal{V}alShYTQ(\lambda/\mu)} (-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda/\mu| -|T|} x^{T} . \mathbf{t}extbf{e}nd{aligned} \] \mathbf{t}extbf{e}nd{conjecture} As with Conjecture~\mathbf{r}ef{conj1}, to prove this result it would suffice to assume $\mu=\mathbf{t}extbf{e}mptyset$. \mathbf{t}extbf{b}egin{equation}gin{example} If $\lambda =(2,1)$ and $\mu=\mathbf{t}extbf{e}mptyset$ then $ \mathcal{V}alShYTP(\lambda/\mu)$ consists of \[ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]c \\ ]=]![red!75]a & ]=]![yellow!75]b \mathbf{t}extbf{e}nd{young}, \quad \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]c \\ ]=]![red!75]a & ]=]![yellow!75]b' \mathbf{t}extbf{e}nd{young}, \quad \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b \\ ]=]![red!75]a & ]=]![yellow!75]a \mathbf{t}extbf{e}nd{young}, \quad \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b \\ ]=]![red!75]a & ]=]![yellow!75]b' \mathbf{t}extbf{e}nd{young}, \quad\mathbf{t}ext{and}\quad \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b \\ ]=![red!75]a & =]![red!75] \mathbf{t}extbf{e}nd{young} \] for all positive integers $a<b<c$, so Conjecture~\mathbf{r}ef{conj2} asserts that \[ \mathbf{t}extbf{b}egin{equation}gin{aligned} j\hspace{-0.2mm}p_{21} &= 2\mathbf{s}um_{a<b<c} x_a x_b x_c + \mathbf{s}um_{a<b} (x_a^2x_b + x_ax_b^2) - \mathbf{t}extbf{b}egin{equation}ta \mathbf{s}um_{a<b} x_ax_b = s_{21} - \mathbf{t}extbf{b}egin{equation}ta s_{11}. \mathbf{t}extbf{e}nd{aligned}\] As in Example~\mathbf{r}ef{conj1-ex}, there is a weight-preserving 4-to-1 map $ \mathcal{V}alShYTQ(21/\mathbf{t}extbf{e}mptyset)\mathbf{t}o \mathcal{V}alShYTP(21/\mathbf{t}extbf{e}mptyset)$; this is given by either removing all diagonal primes or applying \[ \left\{\ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b \\ ]=![red!75]a & =]![red!75] \mathbf{t}extbf{e}nd{young},\ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b' \\ ]=![red!75]a & =]![red!75] \mathbf{t}extbf{e}nd{young},\ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b' \mathbf{t}extbf{y}nobottom \\ ]=]![red!75]a & ]=]![cyan!75] \mathbf{t}extbf{y}notop \mathbf{t}extbf{e}nd{young},\ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b' \mathbf{t}extbf{y}nobottom \\ ]=]![red!75]a' & ]=]![cyan!75] \mathbf{t}extbf{y}notop \mathbf{t}extbf{e}nd{young}\ \mathbf{r}ight\} \mapsto \left\{\ \mathbf{t}extbf{b}egin{equation}gin{young}[14pt][c] , & ]=]![cyan!75]b \\ ]=![red!75]a & =]![red!75] \mathbf{t}extbf{e}nd{young}\ \mathbf{r}ight\}.\] Thus Conjecture~\mathbf{r}ef{conj2} also predicts that $j\hspace{-0.2mm}q_{21} =4s_{21} - 4\mathbf{t}extbf{b}egin{equation}ta s_{11}$. These formulas are consistent with Example~\mathbf{r}ef{conj1-ex} as $j\hspace{-0.2mm}p_{21} = \omega(\mathfrak{g}p_{21})$ and $j\hspace{-0.2mm}q_{21} = \omega(\mathfrak{g}q_{21})$. \mathbf{t}extbf{e}nd{example} \mathbf{t}extbf{b}egin{equation}gin{remark} One can systematically test Conjecture~\mathbf{r}ef{conj1} by substituting into the Cauchy identity \mathbf{t}extbf{e}qref{cauchy-eq} both the set-valued generating functions for $G\hspace{-0.2mm}P_\lambda$ and $G\hspace{-0.2mm}Q_\lambda$ and the predicted reverse plane partition generating functions for $\mathfrak{g}p_\lambda$ and $\mathfrak{g}q_\lambda$, then truncating all three expressions in \mathbf{t}extbf{e}qref{cauchy-eq} to finitely many variables and finite degree, and finally checking that the resulting polynomials match. All computer calculations we have done along these lines support the conjecture. To test Conjecture~\mathbf{r}ef{conj2}, one can compute the Schur polynomial expansions of the right hand expressions in Conjecture~\mathbf{r}ef{conj1} and \mathbf{r}ef{conj2} restricted to finitely many variables $x_1,x_2,\mathbf{t}extbf{d}ots,x_n$. If $n$ is large enough, then the corresponding expansions should be related by transposing all partition indices. We have used a computer to verify Conjecture~\mathbf{r}ef{conj2} in this way for all strict partitions $\lambda$ with $|\lambda| \leq 6$. \mathbf{t}extbf{e}nd{remark} Conjectures~\mathbf{r}ef{conj1} and \mathbf{r}ef{conj2} are shifted analogues of results in \mathbf{t}extbf{c}ite[\S9]{LamPyl}. One may be able to adapt the operator methods used there and in \mathbf{t}extbf{c}ite{Y2019} to prove both formulas. We will not pursue this here, beyond verifying the one-row case: \mathbf{t}extbf{b}egin{equation}gin{proposition} Conjectures~\mathbf{r}ef{conj1} and \mathbf{r}ef{conj2} hold when $\mathbf{t}extbf{e}ll(\lambda) \leq 1$. \mathbf{t}extbf{e}nd{proposition} \mathbf{t}extbf{b}egin{equation}gin{proof} When $\mathbf{t}extbf{e}ll(\lambda)=0$ the conjectures assert that $\mathfrak{g}p_\mathbf{t}extbf{e}mptyset=\mathfrak{g}q_\mathbf{t}extbf{e}mptyset=j\hspace{-0.2mm}p_\mathbf{t}extbf{e}mptyset=j\hspace{-0.2mm}q_\mathbf{t}extbf{e}mptyset =1$. This follows by comparing constant coefficients in \mathbf{t}extbf{e}qref{cauchy-eq} since $G\hspace{-0.2mm}P_\mathbf{t}extbf{e}mptyset = G\hspace{-0.2mm}Q_\mathbf{t}extbf{e}mptyset=1$. Suppose $\mathbf{t}extbf{e}ll(\lambda)=1$. We may assume $\mu=\mathbf{t}extbf{e}mptyset$ in view of \mathbf{t}extbf{e}qref{skew-eq3} and \mathbf{t}extbf{e}qref{j-eq}. We may also assume $\mathbf{t}extbf{b}egin{equation}ta=-1$ since if $f $ is $ \mathfrak{g}p_\lambda$, $\mathfrak{g}q_\lambda$, $j\hspace{-0.2mm}p_\lambda$, $j\hspace{-0.2mm}q_\lambda$, or one of the conjectural generating functions, then by Proposition~\mathbf{r}ef{recover-prop} we can recover $f$ from $f|_{\mathbf{t}extbf{b}egin{equation}ta=-1}$ by substituting $x_i\mapsto -\mathbf{t}extbf{b}egin{equation}ta^{-1} x_i$ for all $i$ and then multiplying the result by $(-\mathbf{t}extbf{b}egin{equation}ta)^{|\lambda|}$. Let $ \overline \mathfrak{g}p_\lambda := \mathbf{s}um_{T\in \mathcal{M}RPP_P(\lambda)} x^{\wt_RPP(T)}$ and $ \overline \mathfrak{g}q_\lambda := \mathbf{s}um_{T\in \mathcal{M}RPP_Q(\lambda)} x^{\wt_RPP(T)}$. We wish to show that $ \mathfrak{g}p_{(n)}= \overline \mathfrak{g}p_{(n)}$ and $ \mathfrak{g}q_{(n)}= \overline \mathfrak{g}q_{(n)}$ when $n$ is a positive integer. Nakagawa and Naruse assert that these identities hold \mathbf{t}extbf{c}ite[Prop. 5.2]{NakagawaNaruse} but do not provide a proof. Here is an argument. The function $G\hspace{-0.2mm}P_\lambda(t)$ obtained by setting $x_1=t$ and $x_2=x_3=\mathbf{t}extbf{d}ots=0$ is $t^n$ if $\lambda=(n)$ and $0$ if $\mathbf{t}extbf{e}ll(\lambda)>1$. Thus, if we set $x_1 = t$ and $x_i = 0$ for $i>0$ then \mathbf{t}extbf{e}qref{cauchy-eq} becomes \[ \mathbf{s}um_{n\mathfrak{g}eq 0} t^n \mathfrak{g}q_{(n)}(y) = \prod_{j \mathfrak{g}eq 1}\mathbf{t}frac{ 1 -\frac{-t}{1-t} y_j}{1-ty_j} .\] Rewriting $y_j$ as $x_j$, we deduce that $\mathfrak{g}q_{(n)}=\mathfrak{g}q_{(n)}(x)$ is the coefficient of $t^n$ in \[ \prod_{j \mathfrak{g}eq 1} \mathbf{t}frac{ 1 -\frac{-t}{1-t} x_j}{1-tx_j} = \prod_{j\mathfrak{g}eq 1} (1 + x_j t+x_j t^2 +x_j t^3+ \mathbf{t}extbf{d}ots)(1 + x_jt + x_j^2t^2 + x_j^3t^3+\mathbf{t}extbf{d}ots).\] Each way of expanding this product into monomials corresponds to a unique one-row shifted reverse plane partition $T$: if we multiply the $a$th term of $(1 + x_j t+x_j t^2 +x_j t^3+ \mathbf{t}extbf{d}ots)$ with the $b$th term of $(1 + x_jt + x_j^2t^2 + x_j^3t^3+\mathbf{t}extbf{d}ots)$, considering $1$ to be the $0$th term of both sums, then the resulting monomial is $x^{\wt_RPP(T)} t^{|T|}$ where $T$ is the one-row shifted reverse plane partition with exactly $a=a(j)$ entries equal to $j'$ and $b=b(j)$ entries equal to $j$ for each $j\mathfrak{g}eq 1$. It follows that the coefficient of $t^n$ in the above product is also $\overline \mathfrak{g}q_{(n)}$, so $\mathfrak{g}q_{(n)}=\overline \mathfrak{g}q_{(n)}$. We have $\overline \mathfrak{g}q_{(1)} = 2 \overline \mathfrak{g}p_{(1)} = 2(x_1+x_2+ x_3+\mathbf{t}extbf{d}ots)$, and if $n\mathfrak{g}eq 2$ then $\overline \mathfrak{g}p_{(n)}-\overline \mathfrak{g}p_{(n-1)} = \mathbf{s}um_T x^{\wt_RPP(T)}$ where the sum is over one-row shifted reverse plane partitions $T$ of size $n$ whose first two boxes contain distinct entries, the first of which is primed. Removing the prime from the first entry defines a weight-preserving bijection from the set of such $T$ to $\mathcal{M}RPP_Q(n) - \mathcal{M}RPP_P(n)$, so it follows that $\overline \mathfrak{g}q_{(n)} = 2\overline \mathfrak{g}p_{(n)}-\overline \mathfrak{g}p_{(n-1)}$ when $n\mathfrak{g}eq 2$. Since we likewise have $\mathfrak{g}q_{(1)} = 2 \mathfrak{g}p_{(1)}$ and $ \mathfrak{g}q_{(n)} = 2 \mathfrak{g}p_{(n)}- \mathfrak{g}p_{(n-1)}$ when $n\mathfrak{g}eq 2$ by Corollary~\mathbf{r}ef{to-prove2}, we deduce by induction that $\mathfrak{g}p_{(n)}=\overline \mathfrak{g}p_{(n)}$ for all $n$. An \mathbf{t}extbf{e}mph{(unshifted) reverse plane partition} of shape $\lambda$ is a filling of the (unshifted) diagram $\mathbf{t}extsf{D}_\lambda := \{ (i,j) : 1 \leq j \leq \lambda_i\}$ by positive integers, such that rows and columns are weakly increasing. The weight of such an object is defined in the same way as for shifted reverse plane partitions. As noted in \mathbf{t}extbf{c}ite[Prop. 5.3]{NakagawaNaruse}, it is easy to see that $\overline \mathfrak{g}p_{(n)} = \mathbf{s}um_T x^{\wt_RPP(T)}$ where the sum is over all reverse plane partitions $T$ of any of the hook shapes $a1^{n-a} := (a,1,1,\mathbf{t}extbf{d}ots,1)$ for $a \in [n]$; the relevant weight-preserving bijection is given by moving all primed entries in an element of $ \mathcal{M}RPP_P(n)$ from the first row to the first column with primes removed. This sum is precisely $\mathbf{s}um_{a=1}^n g_{a1^{n-a}}$ where $g_\lambda$ is the \mathbf{t}extbf{e}mph{dual stable Grothendieck polynomial} discussed, for example, in \mathbf{t}extbf{c}ite[\S9.1]{LamPyl}. An \mathbf{t}extbf{e}mph{(unshifted) bar tableau} of shape $\lambda$ is defined in the same way as a shifted bar tableau, except the underlying tableau is a filling of $\mathbf{t}extsf{D}_\lambda$ (rather than $\mathsf{SD}_\lambda$) by positive integers (rather than positive half-integers); this is called a \mathbf{t}extbf{e}mph{valued-set tableau} in \mathbf{t}extbf{c}ite[\S9.8]{LamPyl}. The weight $x^T$ is the same as in the shifted case. Let $\overlinej\hspace{-0.2mm}p_{\lambda} := \mathbf{s}um_{T\in \mathcal{V}alShYTP(\lambda)} x^{T}$ and $\overlinej\hspace{-0.2mm}q_{\lambda} := \mathbf{s}um_{T\in \mathcal{V}alShYTQ(\lambda)} x^{T}$. Then $\overline j\hspace{-0.2mm}p_{(n)} = \mathbf{s}um_T x^{T}$ where the sum is over all bar tableaux $T$ of any of the hook shapes $a1^{n-a}$ for $a \in [n]$; the relevant weight-preserving bijection is again given by moving all primed entries in an element of $\mathcal{V}alShYTP(n)$ from the first row to the first column with primes removed. (Each primed entry comprised its own block in the first row and is assigned to its own block in the first column.) This sum is precisely $\mathbf{s}um_{a=1}^n j_{a1^{n-a}}$ where $j_\lambda$ is the generating function discussed in \mathbf{t}extbf{c}ite[\S9.8]{LamPyl}, which has $j_\lambda = \omega(g_\lambda)$ for all partitions $\lambda$ \mathbf{t}extbf{c}ite[Prop. 9.25]{LamPyl}. Combining the last two paragraphs shows that $\overline j\hspace{-0.2mm}p_{(n)} = \omega(\overline \mathfrak{g}p_{(n)})$ so $j\hspace{-0.2mm}p_{(n)} = \omega( \mathfrak{g}p_{(n)}) = \omega(\overline \mathfrak{g}p_{(n)}) = \overline j\hspace{-0.2mm}p_{(n)} $ for all $n$. Finally, we have $\overlinej\hspace{-0.2mm}q_{(1)} = 2 \overline j\hspace{-0.2mm}p_{(1)}=2(x_1+x_2+ x_3+\mathbf{t}extbf{d}ots)$, and if $n\mathfrak{g}eq 2$ then $\overlinej\hspace{-0.2mm}p_{(n)}- \overlinej\hspace{-0.2mm}p_{(n-1)} = \mathbf{s}um_T x^T$ where the sum is over one-row shifted bar tableaux $T$ of size $n$ whose first two entries are unprimed but not in the same block. Adding a prime to the diagonal entry gives a weight-preserving bijection from the set of such $T$ to $\mathcal{V}alShYTQ(n) - \mathcal{V}alShYTP(n)$, so $\overlinej\hspace{-0.2mm}q_{(n)} = 2\overline j\hspace{-0.2mm}p_{(n)} - \overlinej\hspace{-0.2mm}p_{(n-1)}$ when $n\mathfrak{g}eq 2$. Since the same formulas relate $j\hspace{-0.2mm}q_{(n)}$ to $j\hspace{-0.2mm}p_{(n)}$ by the linearity of $\omega$, we must have $j\hspace{-0.2mm}q_{(n)} = \overlinej\hspace{-0.2mm}q_{(n)}$. \mathbf{t}extbf{e}nd{proof} Conjecture~\mathbf{r}ef{conj2} has some consequences regarding the numbers $a^\lambda_{\mu\nu}$ and $b^\lambda_{\mu\nu}$. \mathbf{t}extbf{b}egin{equation}gin{theorem}\label{conj-lem} Suppose the formula for $j\hspace{-0.2mm}q_{\lambda/\mu}$ (respectively, $j\hspace{-0.2mm}p_{\lambda/\mu}$) in Conjecture~\mathbf{r}ef{conj2} holds. Then the coefficients in the product expansions \mathbf{t}extbf{e}qref{skew-exp-eq2} satisfy $a^\lambda_{\mu\nu} =0$ (respectively, $b^\lambda_{\mu\nu} =0$) whenever $\mathbf{t}extbf{e}ll(\lambda) > \mathbf{t}extbf{e}ll(\mu) + \mathbf{t}extbf{e}ll(\nu)$. \mathbf{t}extbf{e}nd{theorem} \mathbf{t}extbf{b}egin{equation}gin{proof} Our argument is based on Yeliussizov's proof of \mathbf{t}extbf{c}ite[Thm. 8.4]{Y2019}. Since consecutive diagonal entries in a semistandard shifted tableau differ by at least one, the assumed formula in Conjecture~\mathbf{r}ef{conj2} implies that $j\hspace{-0.2mm}q_{\nu}(x_1,\mathbf{t}extbf{d}ots,x_n) \neq 0$ if and only if $\mathbf{t}extbf{e}ll(\nu) \leq n$ and that $j\hspace{-0.2mm}q_{\lambda/\mu}(x_1,\mathbf{t}extbf{d}ots,x_n) = 0$ whenever $\mathbf{t}extbf{e}ll(\lambda) > n + \mathbf{t}extbf{e}ll(\mu)$. As $j\hspace{-0.2mm}q_{\nu}|_{\mathbf{t}extbf{b}egin{equation}ta=0} = Q_{\nu} $, the set of polynomials $\{ j\hspace{-0.2mm}q_\kappa(x_1,\mathbf{t}extbf{d}ots,x_n) : \mathbf{t}extbf{e}ll(\kappa) \leq n\}$ is linearly independent over $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$ since $\{ Q_\kappa(x_1,\mathbf{t}extbf{d}ots,x_n) : \mathbf{t}extbf{e}ll(\kappa) \leq n\}$ is linearly independent. Applying $\omega$ to \mathbf{t}extbf{e}qref{skew-g-def} gives $j\hspace{-0.2mm}q_{\lambda/\mu} = \mathbf{s}um_\kappa a_{\mu\kappa}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda| - |\mu| - |\kappa|} j\hspace{-0.2mm}q_\kappa$. Thus if $a_{\mu\nu}^\lambda \neq 0$ and $n = \mathbf{t}extbf{e}ll(\nu)$ then $ j\hspace{-0.2mm}q_{\lambda/\mu}(x_1,\mathbf{t}extbf{d}ots,x_n) = \mathbf{s}um_{\mathbf{t}extbf{e}ll(\kappa)\leq n} a_{\mu\kappa}^\lambda \mathbf{t}extbf{b}egin{equation}ta^{|\lambda| - |\mu| - |\kappa|} j\hspace{-0.2mm}q_\kappa(x_1,\mathbf{t}extbf{d}ots,x_n) \neq 0 $. But this means that $\mathbf{t}extbf{e}ll(\lambda) \leq n + \mathbf{t}extbf{e}ll(\mu) = \mathbf{t}extbf{e}ll(\mu) + \mathbf{t}extbf{e}ll(\nu)$ as desired. The claim about $b^\lambda_{\mu\nu}$ follows by the same argument after swapping $j\hspace{-0.2mm}q\leftrightarrow j\hspace{-0.2mm}p$ and $Q\leftrightarrow P$. \mathbf{t}extbf{e}nd{proof} Ikeda and Naruse conjectured that the product expansions in \mathbf{t}extbf{e}qref{skew-exp-eq2} both have finitely many nonzero terms \mathbf{t}extbf{c}ite[Conj. 3.1 and 3.2]{IkedaNaruse}. For the $G\hspace{-0.2mm}P$-functions, this follows from results in \mathbf{t}extbf{c}ite{CTY}, which also establish that each coefficient $a^\lambda_{\mu\nu} \in \mathbb{N} $; for other proofs see \mathbf{t}extbf{c}ite[\S4]{Hamaker}, \mathbf{t}extbf{c}ite[\S1.2]{M2021}, or \mathbf{t}extbf{c}ite[\S8]{PechenikYong}. The same claim for the $G\hspace{-0.2mm}Q$-functions appears still to be open, but would be a consequence of Conjecture~\mathbf{r}ef{conj2} by the following corollary. Specifically, this corollary shows that Conjecture~\mathbf{r}ef{conj2} implies \mathbf{t}extbf{c}ite[Conj. 3.2]{IkedaNaruse}. Even given these conjectures, it is still an open problem to find a Littlewood--Richardson rule to compute $b^\lambda_{\mu\nu}$ outside the Pieri formula case $\nu=(p)$ handled in \mathbf{t}extbf{c}ite{BuchRavikumar}. \mathbf{t}extbf{b}egin{equation}gin{corollary}\label{last-cor} Suppose the formula for $j\hspace{-0.2mm}p_{\lambda/\mu}$ in Conjecture~\mathbf{r}ef{conj2} holds. Then each product $G\hspace{-0.2mm}Q_\muG\hspace{-0.2mm}Q_\nu$ is a finite $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-linear combination of $G\hspace{-0.2mm}Q_\lambda$'s, so the set $\{G\hspace{-0.2mm}Q_\lambda : \mathbf{t}ext{$\lambda$ is a strict partition}\}$ is a $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-basis for a subring of $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} Theorem~\mathbf{r}ef{conj-lem} implies that $G\hspace{-0.2mm}Q_\muG\hspace{-0.2mm}Q_\nu = \mathbf{s}um_{\mathbf{t}extbf{e}ll(\lambda) \leq n} b^{\lambda}_{\mu\nu}\mathbf{t}extbf{b}egin{equation}ta^{|\lambda| - |\mu| - |\nu|} G\hspace{-0.2mm}Q_\lambda$ when $n = \mathbf{t}extbf{e}ll(\mu)+ \mathbf{t}extbf{e}ll(\nu)$. This must be a finite sum since the set $\{ G\hspace{-0.2mm}Q_\lambda(x_1,\mathbf{t}extbf{d}ots,x_n) : \mathbf{t}extbf{e}ll(\lambda) \leq n\}$ is a $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta]$-basis for a subring of $\mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][x_1,\mathbf{t}extbf{d}ots,x_n]$ by \mathbf{t}extbf{c}ite[Prop. 3.2]{IkedaNaruse}. \mathbf{t}extbf{e}nd{proof} For strict partitions $\mu\mathbf{s}ubseteq \lambda$ we may likewise define \mathbf{t}extbf{b}egin{equation}\label{JPQ-eq} J\hspace{-0.2mm}P_{\lambda/\mu} := \omega(G\hspace{-0.2mm}P_{\lambda/\mu}) \quad\mathbf{t}ext{and}\quad J\hspace{-0.2mm}Q_{\lambda/\mu} := \omega(G\hspace{-0.2mm}Q_{\lambda/\mu}) , \mathbf{t}extbf{e}nd{equation} setting $J\hspace{-0.2mm}P_{\lambda} := J\hspace{-0.2mm}P_{\lambda/\mathbf{t}extbf{e}mptyset}$ and $J\hspace{-0.2mm}Q_{\lambda} := J\hspace{-0.2mm}Q_{\lambda/\mathbf{t}extbf{e}mptyset}$. Interpreting these as combinatorial generating functions is easier than in the dual case. By \mathbf{t}extbf{c}ite[Cor. 6.6]{LM2021} we have \mathbf{t}extbf{b}egin{equation}\label{J-eq} J\hspace{-0.2mm}P_{\lambda/\mu} = G\hspace{-0.2mm}P_{\lambda/\mu}(\mathbf{t}frac{x}{1-\mathbf{t}extbf{b}egin{equation}ta x}) \quad\mathbf{t}ext{and}\quad J\hspace{-0.2mm}Q_{\lambda/\mu} = G\hspace{-0.2mm}Q_{\lambda/\mu}(\mathbf{t}frac{x}{1-\mathbf{t}extbf{b}egin{equation}ta x}) \mathbf{t}extbf{e}nd{equation} where $f(\frac{x}{1-\mathbf{t}extbf{b}egin{equation}ta x})$ denotes the power series obtained from $f \in \mathbb{Z}[\mathbf{t}extbf{b}egin{equation}ta][[x_1,x_2,\mathbf{t}extbf{d}ots]]$ by substituting $x_i \mapsto \frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i} = x_i + \mathbf{t}extbf{b}egin{equation}ta x_i^2 + \mathbf{t}extbf{b}egin{equation}ta^2 x_i^3 + \mathbf{t}extbf{d}ots$ for all $i$. Using \mathbf{t}extbf{e}qref{J-eq} it is straightforward to turn the formulas \mathbf{t}extbf{e}qref{skew-GP-GQ-def} into expressions for $J\hspace{-0.2mm}P_{\lambda/\mu}$ and $J\hspace{-0.2mm}Q_{\lambda/\mu}$ as generating functions $\mathbf{s}um_T \mathbf{t}extbf{b}egin{equation}ta^{|T| - |\lambda/\mu|} x^T$ for \mathbf{t}extbf{e}mph{semistandard weak set-valued shifted tableaux}\footnote{ A \mathbf{t}extbf{e}mph{semistandard weak set-valued shifted tableau} of shape $\lambda/\mu$ is defined in the same way as a semistandard set-valued shifted tableau, except the entries of such a tableau are finite nonempty \mathbf{t}extbf{e}mph{multisets} of positive half-integers.} of shape $\lambda/\mu$, with primed numbers excluded from the diagonal in the $J\hspace{-0.2mm}P$ case. See \mathbf{t}extbf{c}ite[\S3]{Hamaker}, for example, where the power series denoted $K_\lambda$ is the same as $J\hspace{-0.2mm}P_{\lambda}|_{\mathbf{t}extbf{b}egin{equation}ta=1}$. Since $\omega$ is linear, if we define \mathbf{t}extbf{b}egin{equation} J\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu} := \omega(G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu}) \quad\mathbf{t}ext{and}\quad J\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu} := \omega(G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}) \mathbf{t}extbf{e}nd{equation} then it also holds that \mathbf{t}extbf{b}egin{equation}\label{JJ-eq} J\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu} = G\hspace{-0.2mm}P_{\lambda\mathbf{s}s\mu}(\mathbf{t}frac{x}{1-\mathbf{t}extbf{b}egin{equation}ta x}) \quad\mathbf{t}ext{and}\quad J\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu} = G\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\mu}(\mathbf{t}frac{x}{1-\mathbf{t}extbf{b}egin{equation}ta x}) \mathbf{t}extbf{e}nd{equation} and it follows from \mathbf{t}extbf{e}qref{skew-eq2} that \mathbf{t}extbf{b}egin{equation}\label{skew-e4} \mathbf{t}extbf{b}egin{equation}gin{aligned} J\hspace{-0.2mm}P_\lambda(x,y) &= \mathbf{s}um_{\nu } J\hspace{-0.2mm}P_\nu(x) J\hspace{-0.2mm}P_{\lambda\mathbf{s}s\nu}(y), \\ J\hspace{-0.2mm}Q_\lambda(x,y) &= \mathbf{s}um_{\nu} J\hspace{-0.2mm}Q_\nu(x) J\hspace{-0.2mm}Q_{\lambda\mathbf{s}s\nu}(y), \mathbf{t}extbf{e}nd{aligned} \mathbf{t}extbf{e}nd{equation} where the sums are over all strict partitions $\nu$. One can write down Cauchy identities relating each pair of $G\hspace{-0.2mm}P$/$j\hspace{-0.2mm}q$, $G\hspace{-0.2mm}Q$/$j\hspace{-0.2mm}p$, $J\hspace{-0.2mm}P$/$\mathfrak{g}q$, $J\hspace{-0.2mm}Q$/$\mathfrak{g}p$, $J\hspace{-0.2mm}P$/$j\hspace{-0.2mm}q$, and $J\hspace{-0.2mm}Q$/$j\hspace{-0.2mm}p$ functions. These provide a shifted analogue of \mathbf{t}extbf{c}ite[Cor. 6.3]{Y2019}. Recall that $\overline{x_i} := \frac{-x_i}{1+\mathbf{t}extbf{b}egin{equation}ta x_i} $ and $ \mathbf{t}extsf{D}elta(x,y) := \prod_{i,j\mathfrak{g}eq 1} \frac{1-\overline{x_i} y_j}{1-x_iy_j}.$ \mathbf{t}extbf{b}egin{equation}gin{corollary} Let $\mu$ and $\nu$ be strict partitions. Then: \mathbf{t}extbf{b}egin{equation}n \item[(a)] $\mathbf{t}extsf{D}elta(x,-y) \mathbf{s}um_{\lambda} G\hspace{-0.2mm}P_{\lambda \mathbf{s}s \mu}( x) j\hspace{-0.2mm}q_{\lambda / \nu}( y) = \mathbf{s}um_\kappa G\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) j\hspace{-0.2mm}q_{\mu / \kappa}( y)$; \item[(b)] $\mathbf{t}extsf{D}elta(x,-y)\mathbf{s}um_{\lambda} G\hspace{-0.2mm}Q_{\lambda \mathbf{s}s \mu}( x) j\hspace{-0.2mm}p_{\lambda / \nu}( y) =\mathbf{s}um_\kappa G\hspace{-0.2mm}Q_{\nu \mathbf{s}s \kappa}( x) j\hspace{-0.2mm}p_{\mu / \kappa}( y)$; \item[(c)] $\mathbf{t}extsf{D}elta(-x,y)\mathbf{s}um_{\lambda} J\hspace{-0.2mm}P_{\lambda \mathbf{s}s \mu}( x) \mathfrak{g}q_{\lambda / \nu}( y) = \mathbf{s}um_\kappa J\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}q_{\mu / \kappa}( y)$; \item[(d)] $\mathbf{t}extsf{D}elta(-x,y)\mathbf{s}um_{\lambda} J\hspace{-0.2mm}Q_{\lambda \mathbf{s}s \mu}( x) \mathfrak{g}p_{\lambda / \nu}( y) =\mathbf{s}um_\kappa J\hspace{-0.2mm}Q_{\nu \mathbf{s}s \kappa}( x) \mathfrak{g}p_{\mu / \kappa}( y)$; \item[(e)] $\mathbf{s}um_{\lambda} J\hspace{-0.2mm}P_{\lambda \mathbf{s}s \mu}( x) j\hspace{-0.2mm}q_{\lambda / \nu}(y) = \mathbf{t}extsf{D}elta(-x,-y) \mathbf{s}um_\kappa J\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) j\hspace{-0.2mm}q_{\mu / \kappa}( y)$; \item[(f)] $\mathbf{s}um_{\lambda} J\hspace{-0.2mm}Q_{\lambda \mathbf{s}s \mu}( x) j\hspace{-0.2mm}p_{\lambda / \nu}( y) =\mathbf{t}extsf{D}elta(-x,-y) \mathbf{s}um_\kappa J\hspace{-0.2mm}Q_{\nu \mathbf{s}s \kappa}( x) j\hspace{-0.2mm}p_{\mu / \kappa}( y)$. \mathbf{t}extbf{e}nd{equation}n As usual the sums are over all strict partitions $\lambda$ and $\kappa$. \mathbf{t}extbf{e}nd{corollary} \mathbf{t}extbf{b}egin{equation}gin{proof} As $\omega$ interchanges the elementary and complete symmetric functions $e_n := s_{1^n} $ and $h_n:= s_{(n)} $, it follows that $\omega$ (extended to an automorphism of the ring of symmetric functions in the $x_i$ variables over $\mathbb{Z}[t]$) also interchanges $E(t;x) := \mathbf{s}um_{n\mathfrak{g}eq 0} e_n t^n = \prod_{i \mathfrak{g}eq 1} (1+x_i t)$ and $H(t;x) :=\mathbf{s}um_{n\mathfrak{g}eq 0} h_n t^n = \prod_{i\mathfrak{g}eq 1} \frac{1}{1-x_i t}$. The desired identities are straightforward to derive from Theorem~\mathbf{r}ef{GP-cauchy} using this observation with \mathbf{t}extbf{e}qref{jpq-eq} and \mathbf{t}extbf{e}qref{JJ-eq}, since one has \[ \mathbf{t}extsf{D}elta(x,y) = \prod_{i \mathfrak{g}eq 1} E(-\overline{x_i}; y) H(x_i;y) \quad\mathbf{t}ext{and}\quad -\overline{x_i}|_{x_i \mapsto \frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i}} = \frac{\frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i}}{1+\mathbf{t}extbf{b}egin{equation}ta \frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i}} = x_i. \] For example, substituting $x_i \mapsto \frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i}$ and then applying the version of $\omega$ which acts only on symmetric functions in the $y$-variables transforms Theorem~\mathbf{r}ef{GP-cauchy} to \[ \mathbf{s}um_{\lambda} J\hspace{-0.2mm}P_{\lambda \mathbf{s}s \mu}( x) j\hspace{-0.2mm}q_{\lambda / \nu}(y) = \prod_{i \mathfrak{g}eq 1} H(x_i; y) E(\mathbf{t}frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i};y)\mathbf{s}um_\kappa J\hspace{-0.2mm}P_{\nu \mathbf{s}s \kappa}( x) j\hspace{-0.2mm}q_{\mu / \kappa}( y). \] This implies (e) as $ \prod_{i \mathfrak{g}eq 1} H(x_i; y) E(\mathbf{t}frac{x_i}{1-\mathbf{t}extbf{b}egin{equation}ta x_i};y) = \prod_{i,j\mathfrak{g}eq 1} \frac{1 + \mathbf{t}frac{x_iy_j}{1-\mathbf{t}extbf{b}egin{equation}ta x_i} }{1-x_i y_j} = \mathbf{t}extsf{D}elta(-x,-y).$ The other parts are derived in a similar way. \mathbf{t}extbf{e}nd{proof} \mathbf{t}extbf{b}egin{equation}gin{thebibliography}{99} \mathbf{t}extbf{b}ibitem{BW09} F. Barekat and S. van Willigenburg. ``Composition of transpositions and equality of ribbon Schur $Q$-functions''. In: \mathbf{t}extbf{e}mph{Electron. J. Comb.} 16 (2009), R110. \mathbf{t}extbf{b}ibitem{BuchRavikumar} A. S. Buch and V. Ravikumar. ``Pieri rules for the $K$-theory of cominuscule Grassmannians''. In: \mathbf{t}extbf{e}mph{J. Reine Angew. Math.} 668 (2012), pp. 109--132. \mathbf{t}extbf{b}ibitem{CTY} E. Clifford, H. Thomas, and A. Yong. ``$K$-theoretic Schubert calculus for OG$(n, 2n+1)$ and jeu de taquin for shifted increasing tableaux''. In: \mathbf{t}extbf{e}mph{J. Reine Angew. Math.} 690 (2014), pp. 51--63. \mathbf{t}extbf{b}ibitem{DeWitt} E. A. DeWitt. ``Identities relating Schur s-functions and Q-functions". PhD Thesis, University of Michigan, (2012). \mathbf{t}extbf{b}ibitem{GillespieSalois} M. Gillespie and K. Salois. ``Inequality of a class of near-ribbon skew Schur $Q$-functions''. Preprint (2021), {\mathbf{t}t arXiv:2107.14212}. \mathbf{t}extbf{b}ibitem{Hamaker} Z. Hamaker, A. Keilthy, R. Patrias, L. Webster, Y. Zhang, and S. Zhou. ``Shifted Hecke insertion and the $K$-theory of OG$(n, 2n+1)$''. In: \mathbf{t}extbf{e}mph{J. Combin. Theory Ser. A} 151 (2017), pp. 207--240. \mathbf{t}extbf{b}ibitem{Hud} T. Hudson, T. Ikeda, T. Matsumura, and H. Naruse. ``Double Grothendieck polynomials for symplectic and odd orthogonal Grassmannians''. In: \mathbf{t}extbf{e}mph{J. Algebra} 546 (2020), pp. 294--314. \mathbf{t}extbf{b}ibitem{IkedaNaruse} T. Ikeda and H. Naruse. ``$K$-theoretic analogues of factorial Schur $P$- and $Q$-functions''. In: \mathbf{t}extbf{e}mph{Adv. Math.} 243 (2013), pp. 22--66. \mathbf{t}extbf{b}ibitem{Ivanov} V. N. Ivanov. ``Interpolation analogues of Schur $Q$-functions''. In: \mathbf{t}extbf{e}mph{Zap. Nauchn. Sem. S.-Peterburg. Otdel. Mat. Inst. Steklov.} (POMI) 307 (2004) Teor. Predst. Din. Sist. Komb. i Algoritm. Metody. 10, 99--119, 281--282; translation in \mathbf{t}extbf{e}mph{J. Math. Sci.} (N. Y.) 131 (2) (2005) 5495--5507. \mathbf{t}extbf{b}ibitem{LamPyl} T. Lam and P. Pylyavskyy. ``Combinatorial Hopf algebras and K-homology of Grassmannians.'' In: \mathbf{t}extbf{e}mph{IMRN} (2007), rnm125. \mathbf{t}extbf{b}ibitem{LM2021} J. B. Lewis and E. Marberg. ``Enriched set-valued P-partitions and shifted stable Grothendieck polynomials''. In: \mathbf{t}extbf{e}mph{Math. Z.} 299 (2021), pp. 1929--1972. \mathbf{t}extbf{b}ibitem{Macdonald} I. G. Macdonald, Symmetric Functions and Hall Polynomials, 2nd ed., Oxford University Press, New York, 1999. \mathbf{t}extbf{b}ibitem{M2021} E. Marberg. ``Shifted insertion algorithms for primed words''. Preprint (2021), {\mathbf{t}t arXiv:2104.11437}. \mathbf{t}extbf{b}ibitem{MP2020} E. Marberg and B. Pawlowski. ``$K$-theory formulas for orthogonal and symplectic orbit closures''. In: \mathbf{t}extbf{e}mph{Adv. Math.} 372 (2020), 107299. \mathbf{t}extbf{b}ibitem{MP2021} E. Marberg and B. Pawlowski. ``On some properties of symplectic Grothendieck polynomials''. In: \mathbf{t}extbf{e}mph{J. Pure Appl. Algebra} 225.1 (2021), 106463. \mathbf{t}extbf{b}ibitem{NakagawaNaruse} M. Nakagawa and H. Naruse, ``Universal factorial Schur $P,Q$-functions and their duals''. Preprint (2018), {\mathbf{t}t arXiv:1812.03328}. \mathbf{t}extbf{b}ibitem{NakagawaNaruse0} M. Nakagawa and H. Naruse. ``Generating functions for the universal factorial Hall-Littlewood $P$- and $Q$-functions''. Preprint (2021), {\mathbf{t}t arXiv:1705.04791v3}. \mathbf{t}extbf{b}ibitem{Naruse} H. Naruse. ``Elementary proof and application of the generating function for generalized Hall-Littlewood functions''. In: \mathbf{t}extbf{e}mph{J. Algebra} 516 (2018), pp. 197--209. \mathbf{t}extbf{b}ibitem{Okada} S. Okada. ``A generalization of Schur's $P$- and $Q$-functions''. In: \mathbf{t}extbf{e}mph{S\'eminaire Lotharingien de Combinatoire} 81 (2020), Article B81k. \mathbf{t}extbf{b}ibitem{PechenikYong} O. Pechenik and A. Yong. ``Genomic tableaux''. In: \mathbf{t}extbf{e}mph{J. Algebr. Comb.} 45 (2017), pp. 649--685. \mathbf{t}extbf{b}ibitem{Salmasian} H. Salmasian. ``Equality of Schur's $Q$-functions and their skew analogues''. In: \mathbf{t}extbf{e}mph{Ann. Comb}, 12 (2006), pp. 325--346. \mathbf{t}extbf{b}ibitem{Stembridge1989} J. R. Stembridge. ``Shifted tableaux and the projective representations of symmetric groups''. In: \mathbf{t}extbf{e}mph{Adv. Math.} 74 (1989), pp. 87--134. \mathbf{t}extbf{b}ibitem{Stembridge1997} J. R. Stembridge. ``Enriched P-partitions''. In: \mathbf{t}extbf{e}mph{Trans. Amer. Math. Soc.} 349.2 (1997), pp. 763--788. \mathbf{t}extbf{b}ibitem{Y2019} D. Yeliussizov. ``Symmetric Grothendieck polynomials, skew Cauchy identities, and dual filtered Young graphs''. In: \mathbf{t}extbf{e}mph{J. Combin. Theory Ser. A} 161 (2019), pp. 453--485. \mathbf{t}extbf{e}nd{thebibliography} \mathbf{t}extbf{e}nd{document}
\begin{document} \title{Spectral triples from stationary Bratteli diagrams\footnote{Work supported by the ANR grant {\em SubTile} \begin{abstract} We define spectral triples for stationary Bratteli diagrams and study associated Dirichlet forms. We describe several examples, and emphasize the case of substitution tiling spaces, which are foliated spaces with self-similar Cantor transversals, and leaves homeomorphic to ${\mathbb R}^d$. We derive two types of Dirichlet forms for tilings: one of transversal type, and one of longitudinal type whose infinitesimal generator is similar to a Laplacian in ${\mathbb R}^d$. The spectrum of the forms is the set of continuous dynamical eigenfunctions. \end{abstract} \tableofcontents \section{Introduction} Even though noncommutative geometry \cite{Co94} was invented to describe (virtual) noncommutative spaces it turned out also to provide new perspectives on (classical) commutative spaces. In particular Connes' idea of spectral triples aiming at a spectral description of geometry has generated new concepts, or shed new light on existing ones, for topological spaces: dimension spectrum, Seeley type coefficients, spectral state, or Dirichlet forms are notions which are derived from the spectral triple and we will talk about them here. Indeed, we study in this paper certain spectral triples for commutative algebras which are associated with stationary Bratteli diagrams, that is, with the space of infinite paths on a finite oriented graph. Such Bratteli diagrams occur in systems with self-similarity such as the tiling systems defined by substitutions. Our construction follows from earlier onces for metric spaces which go under the name "direct sum of point pairs" \cite{Christensen} or "approximating graph" \cite{KS10}, suitably adapted to incorporate the self-similar symmetry. The construction is therefore more rigid. The so-called Dirac operator $D$ of the spectral triple will depend on a parameter $\rho$ which is related to the self-similar scaling. We observe a new feature which, we believe, ought to be interpreted as a sign of self-similarity: The zeta function is periodic with purely imaginary period $\mathfrak prac{2\pi i}{\log \rho}$. Correspondingly, what corresponds to the Seeley coefficients (in the case of manifolds) in the expansion of the trace of the heat-kernel $e^{-tD^2}$ is here given by functions of $\log t$ which are $\mathfrak prac{2\pi }{\log \rho}$-periodic. This has consequences for the usual formulae for tensor products of spectral tiples. If we take the tensor product of two such triples and compare the spectral states ${\mathcal T}_{1,2}$ for the individual factors with the spectral state ${\mathcal T}$ of the tensor product, then a formula like ${\mathcal T}(A_1\otimes A_2) = {\mathcal T}_1(A_1){\mathcal T}_2(A_2)$ will not always hold due to resonance phenomena of the involved periodicities. The heat kernel we were referring to above is the kernel of the semigroup generated by $D^2$ and hence does not involve the algebra. But the spectral triple gives in principle rise to other semigroups whose generators may be defined by Dirichlet forms of the type $Q(f,g) = {\mathcal T}([D,f]^*[D,g])$. Here $f,g$ are represented elements of the algebra and ${\mathcal T}$ a state. We will take for ${\mathcal T}$ the spectral state, a common choice, but not the only possible one. Pearson-Bellissard \cite{PB09}, for instance, choose the standard operator trace. There is however a difficulty, namely it is a priori not clear what is the right domain of definition of $Q$. As we will conclude from this work, the choice of domain is crucial and needs additional ingredients. This is why we can only discuss rigorously Dirichlet forms and Laplacians (their infinitesimal generators) in the second part of the paper, when we consider our applications. Our main application will be to the tiling space of a substitution tiling. In this case the finite oriented graph defining the spectral triple is the substitution graph. Moreover, the spectral triple is essentially described by the tensor product of two spectral triples of the above type, one for the transversal and one for the longitudinal direction. There will be thus two parameters, $\rho_{tr}$ and $\rho_{lg}$. The additional ingredients, which will allow us to define a domain for the Dirichlet form $Q$, are the dynamical eigenfunctions of the translation action on the tiling space. We have to suppose that these span the Hilbert space of $L^2$-functions on the tiling space and we are thus lead, by Solomyak's theorem \cite{Sol07}, to consider Pisot substitutions. This means that the Perron-Frobenius eigenvalue of the substitution matrix is $\theta^d$, where $\theta$ is a Pisot number and $d$ the dimension of the tiling. Our main result about the Dirichlet form can then be qualitatively explained as follows: In order to have non-trivial Dirichlet forms the parameters $\rho_{tr}$ and $\rho_{lg}$ have to be fixed such that $\rho_{lg} = \theta^{-1}$ and $\rho_{tr} = |\theta'|$ where $\theta'$ is an algebraic conjugate to $\theta$, distinct from $\theta$, with maximal modulus. The modulus $ |\theta'|$ is strictly smaller than $1$ by the Pisot-property and larger than $\theta^{-1}$ (equality holds only for quadratic unimodular Pisot numbers). It then follows that the Laplacian defined by the Dirichlet form can be interpreted as an elliptic operator on the maximal equicontinuous factor of the translation action on the tiling space. \paragraph{Summary of results} After a quick introduction to spectral triples we are first concerned with the properties of their zeta functions in the case that the expansion of the trace of the heat kernel $e^{-tD^2}$ is not simply an expansion into powers of $t$ but of the type \begin{equation} \label{eq-intro-heat} {\rm Tr\,}(e^{-tD^2}) {\mathscr A}ym f(-\log t) \; t^\alpha \,, \end{equation} with $\RMe(\alpha)<0$, and $f:{\mathbb R}_+ \rightarrow {\mathbb R}$ a bounded locally integrable function such that $\lim_{s\rightarrow 0^+} s{\mathcal L}[f](s)$ exists and is non zero, where ${\mathcal L}$ is the Laplace transform. A non-constant $f$ in that expansion has consequences which we did not expect at first. We are lead in Section~\ref{ssec-specstate} to study classes of operators on ${\mathcal B}({\mathfrak H})$ which have a compatible behavior. An operator $A\in {\mathcal B}({\mathfrak H})$ is {\it weakly regular} if there exists a bounded locally integrable function $f_A:{\mathbb R}_+ \rightarrow {\mathbb R}$, for which $\lim_{s\rightarrow 0^+} s{\mathcal L}[f_A](s)$ exists and is non zero, such that \begin{equation} \label{eq-intro-wreg} {\rm Tr\,}(e^{-tD^2}A) {\mathscr A}ym f_A(-\log t) \; t^\alpha \,, \end{equation} where $\alpha$ is the same as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-intro-heat}. For such operators, the spectral state does not depend on a choice of a Dixmier trace and is given by: \[ {\mathcal T}(A) = \lim_{s\rightarrow 0^+} \mathfrak prac{{\mathcal L}[f_A](s)}{{\mathcal L}[f](s)} \,, \] where $f$ is the same as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-intro-heat}, see Lemma~\ref{lem-specstate}. We also define {\it strongly regular} operators, for which one has in particular $f_A = {\mathcal T}(A) f$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-intro-wreg} (see Lemma~\ref{cor-streg}). Regular operators have an interesting behavior under tensor product, which we will use in the applications to tilings. If the spectral triple is a tensor product: \(({\mathcal A},{\mathfrak H},D)=({\mathcal A}_1\otimes{\mathcal A}_2,{\mathfrak H}_1\otimes{\mathfrak H}_2,D_1\otimes {\mathbf 1} + \chi \otimes D_2)\), where $\chi$ is a grading on $({\mathcal A}_1,{\mathfrak H}_1,D_1)$, then one has: \[ {\mathcal T}(A_1\otimes A_2) = \lim_{s\rightarrow 0^+} \mathfrak prac{ {\mathcal L}[f_{A_1}f_{A_2}](s) }{ {\mathcal L}[f_1f_2](s) }\,, \] where $f_i$ is as in _{\mbox{\scriptsize\em eq}}ref{eq-intro-heat} for $D_i$, and $f_{A_i}$ as in _{\mbox{\scriptsize\em eq}}ref{eq-intro-wreg} for $A_i$, for each factor $i=1,2$ of the tensor product, see Lemma~\ref{lem-prodstate}. In general, only if both $A_1$ and $A_2$ are strongly regular for the individual spectral triples the state will factorize as \({\mathcal T}(A_1\otimes A_2)={\mathcal T}_1(A_1){\mathcal T}_2(A_2)\). Here ${\mathcal T}_i$ denotes the spectral state of \(({\mathcal A}_i,{\mathfrak H}_i,D_i)\), $i=1,2$ (see Corollary~\ref{cor-prodstrongreg}). It is easy to build examples for which this equality fails for more general operators: for example ${\mathcal T}_1(A_1)={\mathcal T}_2(A_2)=0$ and ${\mathcal T}(A_1\otimes A_2)\neq 0$. In Section~\ref{sec-ST-Bratteli} we study spectral triples associated with a stationary Bratteli diagram, that is for the C$^{\mathscr A}t$-algebra of continuous functions on the Cantor set of (half-) infinite paths on a finite oriented graph. These depend on the matrix $A$ encoding the edges between two levels in the diagram (called here a {\em graph matrix} and assumed to be primitive), a parameter $\rho\in(0,1)$ to account for self-similar scaling, and a horizontal structure ${\hat{a}}t{\mathcal H}$ (a set of edges linking the edges of the Bratteli diagram). We determine the spectral information of such spectral triples. In Theorem~\ref{thm-ST} we derive the Connes-distance, and show under which conditions it yields the Cantor topology on the path space. We compute the zeta-function $\zeta(z)={\rm Tr\,}(|D|^{-z})$ and the expansion of the heat-kernel. \noindent {\bf Theorem} (Theorems~\ref{thm-zeta}, and~\ref{thm-heatkernel}, and Remark~\ref{rem-heatkernel} in the main text.) {\em Consider a spectral triple associated with a stationary Bratteli diagram with graph matrix $A$ and parameter $0<\rho<1$. Assume that $A$ is diagonalizable with eigenvalues $\lambda_1, \cdots,\lambda_p$. \begin{itemize} \item The zeta-function $\zeta$ extends to a meromorphic function on ${\mathbb C}$ which is invariant under translation \(z\mapsto z+\mathfrak prac{2\pi \imath}{\log \rho}\). It has only simple poles and these are at \(\mathfrak prac{\log\lambda_j+2\pi \imath k}{-\log \rho}, \: k\in {\mathbb Z}, j=1,\ldots p\). In particular, the spectral dimension (abscissa of convergence of $\zeta$) is equal to $s_0=\mathfrak prac{\log \lambda_{\text{\rm \tiny PF}}}{\log \rho}$, where $\lambda_{\text{\rm \tiny PF}}$ is the Perron-Frobenius eigenvalue of $A$. The residue at the pole \(\mathfrak prac{\log\lambda_j+2\pi \imath k}{-\log \rho}\) is given by $\mathfrak prac{C^j_{{\hat{a}}t{\mathcal H}}\lambda_j}{-\log\rho}$. \item The Seeley expansion of the heat-kernel is given by \[ {\rm Tr\,}(e^{-t D^2}) = \sum_{j : |\lambda_j| > 1} C^{j}_{{\hat{a}}t{\mathcal H}} \, \mathfrak p_{-2\log \rho,\log \lambda_j}({-\log t}) \; t^{\mathfrak prac{\log \lambda_j}{2\log\rho}} \ +C^{j_0}_{{\hat{a}}t{\mathcal H}}\mathfrak prac{-\log t}{-2\log \rho} + h(t) \, \] where $h$ is entire, $\mathfrak p_{r,a}$ is an $r$-periodic smooth function, and $j_0$ is such that $\lambda_{j_0}=1$. \end{itemize} } \noindent The constants $C^{j}_{{\hat{a}}t{\mathcal H}}$ are given in (\ref{eq-CH}), they depend on the choice of horizontal edges ${\hat{a}}t\mathcal H$. The function $\mathfrak p_{r,a}$ is explicitly given in equations~_{\mbox{\scriptsize\em eq}}ref{eq-fnper1} and~_{\mbox{\scriptsize\em eq}}ref{eq-fnper2}, and its average over a period is \(br \mathfrak p_{r,a}=\mathfrak prac{1}{r} \Gamma(\mathfrak prac{a}{r})\). If $A$ is not diagonalizable then $\zeta$ has poles of higher order and the heat-kernel expansion is more involved (with powers of $\log (t)$ depending on the order of the poles) see Remark~\ref{rem-zeta} and Theorem~\ref{thm-heatkernel}. In Section~\ref{sec-tilings} we apply our findings to substitution tiling spaces $\Omega_\Phi$. We consider geometric substitutions of the simplest form, as in \cite{Grunbaum}, which are defined by a decomposition rule followed by a rescaling, i.e.\ each prototile is decomposed into smaller tiles, which, when stretched by a common factor $\theta>1$ (the dilation factor) are congruent to some original tile. The result of the substitution on a tile is called a supertile (and by iteration then an $n$-th order supertile). If one applies only the decomposition rule one obtains smaller tiles, which we call microtiles. The substitution induces a hyperbolic action on $\Omega_\Phi$. Our approximating graph will be invariant under this action. But $\Omega_\Phi$ carries a second action, that by translation of the tilings. Although the translation action will not play a direct role in the construction of the spectral triple, it will be crucial in Section~\ref{ssec-DirForm} to define a domain for the Dirichlet forms. The approximating graph for $\Omega_\Phi$ is constructed with the help of {\em doubly} infinite paths over the substitution graph. {\em Half} infinite paths describe its canonical transversal. We use this structure to construct a spectral triple for $C(\Omega_\Phi)$ essentially as a tensor product of two spectral triples, one obtained from the substitution graph and the other from the reversed substitution graph. Indeed, the first of the two spectral triples describes the transversal and the second the longitudinal part of $\Omega_\Phi$. Since the graph matrix of the reversed graph is the transpose of the original graph matrix we will have to deal with only one set of eigenvalues $\lambda_1,\cdots,\lambda_p$. It turns out wise, however, to keep two dilation parameters $\rho_{tr}$ and $\rho_{lg}$ as independent parameters, although they will later be related to the dilation factor $\theta$ of the substitution. We obtain: \noindent {\bf Theorem} (Theorem~\ref{thm-STOmega} in the main text.) {\em The spectral triple for $C(\Omega_\Phi)$ is finitely summable with spectral dimension \[ s_0 = \mathfrak prac{d\log\theta}{-\log\rho_{tr}} + \mathfrak prac{d\log\theta}{-\log\rho_{lg}}\,, \] which is the sum of the spectral dimensions of the triples associated with the transversal and to the longitudinal part. The zeta function $\zeta(z)$ has a simple pole at $s_0$ with positive residue. The spectral measure is equal to the unique invariant probability measure on $\Omega_\Phi$. } We discuss in Section~\ref{ssect-Pisot} the particularities of Pisot substitutions. These are substitutions for which the dilation factor $\theta$ is a Pisot number: an algebraic integer greater than $1$ all of whose Galois conjugates have modulus less than $1$. Their dynamical system $(\Omega_\Phi,{\mathbb R}^d)$ factors onto an inverse limit of $dJ$-tori, its maximal equicontinuous factor ${\hat{a}}t E$, where $J$ is the algebraic degree of $\theta$. The substitution induces a hyperbolic homeomorphism on that inverse limit which allows us to split the tangent space at each point into a stable and an unstable subspace, $S$ and $U$. The latter is $d$-dimensional and can be identified with the space in which the tiling lives. $S$ can be split further into eigenspaces of the hyperbolic map, namely $S = S_1+S_2$ where $S_2$ is the direct sum of eigenspaces to the Galois conjugates of $\theta$ which are next to leading in modulus, that is have maximal modulus among the Galois conjugates which are distinct from $\theta$. This prepares the ground for the study of Dirichlet forms and Laplacians on Pisot substitution tiling spaces in Section~\ref{ssec-DirForm}. The main issue is to find a domain for the bilinear form on $C(\Omega_\Phi)$ defined by \[ Q(f,g) = {\mathcal T}\bigl( [D,\pi(f)]^{\mathscr A}t [D,\pi(g)] \bigr) \,. \] $Q$ decomposes into two forms $Q_{tr}$ and $Q_{lg}$, a transversal and a longitudinal one, which turn out to be Dirichlet forms on a suitable core, once the parameters have been fixed to $\rho_{tr}=|\theta_2|$ and $\rho_{lg}=\theta^{-1}$, where $\theta_2$ is a next to leading Galois conjugate of $\theta$. Our main theorem is the following: \noindent {\bf Theorem} (Theorem~\ref{thm-DirForm} in the main text.) {\em Consider a Pisot substitution tiling of ${\mathbb R}^d$ with Pisot number $\theta$ of degree $J$. Suppose that the tiling dynamical system has purely discrete dynamical spectrum. Let $\theta_2,\cdots \theta_L$ be the subleading conjugates of $\theta$ so that in particular $\theta_j=|\theta_2|e^{\imath \alpha_j}$ for $2\le j\le L\le J$. Assume that for all $j\neq j'$ one has \( \alpha_j - \alpha_{j'} + 2\pi k + 2\pi \mathfrak prac{\log|\theta_2|}{\log\theta} k' \neq 0\,, \mathfrak porall k,k' \in {\mathbb Z} \,. \) Then the space of finite linear combinations of dynamical eigenfunctions is a core for $Q$ on which it is closable. Furthermore, $Q = Q_{tr} + Q_{lg}$, and $Q_{tr/lg}$ has generator $\Delta_{tr/lg}=\sum_{h\in {\hat{a}}t{\mathcal H}_{tr/lg}}\Delta_{tr/lg}^h$ given on an eigenfunction $f_\beta$ to eigenvalue $\beta$ by} \begin{eqnarray*} \Delta_{lg}^h f_\beta &=& -c_{lg}(2\pi)^2 \textrm{freq}(t_{s^2(h)}) \beta(a_h)^2 f_\beta ,\\ \Delta_{tr}^h f_\beta &=& -c_{tr}(2\pi)^2 \textrm{freq}(t_{s^2(h)}) \langle \widetilde{r_h}^star,\beta \rightarrowngle^2_{{\mathbb R}^{dJ}} f_\beta . \end{eqnarray*} We explain the notation as far as possible without going into details. A longitudinal horizontal edge $h\in {\hat{a}}t{\mathcal H}_{lg}$ encodes a vector of translation between two tiles in some supertile. Whereas a transversal horizontal edge $h\in{\hat{a}}t{\mathcal H}_{tr}$ encodes a vector $r_h$ which should be thought of as a return vector between supertiles of a given type sitting in an even larger supertile~--~hence as a large vector~--~a longitudinal horizontal edge $h\in{\hat{a}}t{\mathcal H}_{lg}$ stands for a vector of translation $a_h$ in a microtile between even smaller microtiles~--~so is a small vector. $t_{s^2(h)}$ is the tile (up to similarity) from which the translation encoded by $h$ starts and $\textrm{freq}(t_{s^2(h)})$ its frequency in the tiling. By definition an eigenvalue is an element $\beta\in{{\mathbb R}^d}^*$ for which exists a function $f_\beta:\Omega_\Phi\to{\mathbb C}$ satisfying $f_\beta(\omega+a) = e^{2\pi i \beta(a)} f_\beta(\omega)$ (we write the translation action simply by $(a,\omega) \mapsto \omega+a$) but the geometric construction discussed in Section~\ref{ssect-Pisot} allows us to view $\beta$ also as an element of $S\text{\rm op}lus U$ so that $\beta(a) = \langle \widetilde a,\beta\rightarrowngle$ ($\langle\cdot,\cdot\rightarrowngle$ is a scalar product on $S\text{\rm op}lus U$). Here we wrote $\widetilde a$ for the vector in $U$ corresponding to $a$ via the identification of $U$ with the space in which the tiling lives. Finally ${}^star:U\to S_2\subset S$ is the reduced star map. This is Moody's star map followed by a projection onto $S_2$ along $S_1$. The values for the constants $c_{tr}$ and $c_{lg}$ are given in (\ref{eq-ctr}) and (\ref{eq-clg}). The transverse Laplacian can therefore be seen as a Laplacian on the maximal equicontinuous factor ${\hat{a}}t E$. The longitudinal Laplacian can be written explicitly on $\Omega_\Phi$, and turns out to be a Laplacian on the leaves, namely it reads $\Delta_{lg} = c_{lg} \nabla_{lg}^\dagger {\mathcal K} \nabla_{lg}$, where $\nabla_{lg}$ is the longitudinal gradient, and ${\mathcal K}$ a tensor, see equation~_{\mbox{\scriptsize\em eq}}ref{eq-lgLaplace}. \section{Preliminaries for spectral triples} \label{sec-prelim} A spectral triple $({\mathcal A}, {\mathfrak H}, D)$ for a unital $C^{\mathscr A}t$-algebra ${\mathcal A}$ is given by a Hilbert space ${\mathfrak H}$ carrying a faithfull representation $\pi$ of ${\mathcal A}$ by bounded operators, and an unbounded self-adjoint operator $D$ on ${\mathfrak H}$ with compact resolvent such that, the set of $a\in {\mathcal A}$ for which the commutator $[D,\pi(a)]$ extends to a bounded operator on ${\mathfrak H}$ forms a dense subalgebra ${\mathcal A}_0 \subset {\mathcal A}$. The operator $D$ is referred to as the Dirac operator. In all examples here it will be assumed to be invertible, with compact inverse. The spectral triple $({\mathcal A}, {\mathfrak H}, D)$ is termed {\em even} if there exists a ${\mathbb Z} / 2$-grading operator $\chi$ on ${\mathfrak H}$ which commutes with $\pi(a)$, $a\in {\mathcal A}$, and anticommutes with $D$. We will consider here the case of commutative $C^{\mathscr A}t$-algebras, ${\mathcal A}=(C(X), \|\cdot \|_\infty)$, of continuous functions over a compact Hausdorff space $X$ with the sup-norm, so we may speak about a spectral triple for the space $X$. The spaces we consider will be far from being manifolds and our main interest lies in the differential structure defined by the spectral triple. More specifically we restrict our attention to the Laplace operator(s) defined by it. The approach to defining Laplace operators via spectral triples has been considered earlier, for fractals by \cite{GI03,GI05,La97} and for ultrametric Cantor sets and tiling spaces in \cite{PB09,JS10,KS10}. \subsection{Zeta function and heat kernel} Since the resolvent of $D$ is supposed compact ${\rm Tr\,}(|D|^{-s})$ can be expressed as a Dirichlet series in terms of the eigenvalues of $|D|$.\mathfrak pootnote{For simplicity we suppose (as will be the case in our applications) that $ \ker(D)$ is trivial, otherwise we would have to work with ${\rm Tr\,}_{ \ker(D)^\perp }(|D|^{-s})$ or remove the kernel of $D$ by adding a finite rank perturbation.} The spectral triple is called {\em finitely summable} if the Dirichlet series is summable for some $s\in{\mathbb R}$ and hence defines a function \[ \zeta(z) = {\rm Tr\,}(|D|^{-z})\,, \] on some half plane $\{z\in{\mathbb C}: \RMe(z)>s_0\}$ which is called the {\em zeta-function} of the spectral triple. The smallest possible value for $s_0$ in the above (the {\em abscissa of convergence} of the Dirichlet series) is called the {\em metric dimension} of the spectral triple. We call $\zeta$ {\em simple} if $\lim_{s\to s_0^+}(s-s_0)\zeta(s)$ exists. This is for instance the case if $\zeta$ can be meromorphically extended and then has a simple pole at $s_0$. We will refer then to the meromorphic extension also simply as the zeta function of the triple. Another quantity to look at is the heat kernel $e^{-tD^2}$ of the square of the Dirac operator. Thanks to the Mellin transform $$ \Gamma(s)\mu^{-2s} = \int_0^\infty e^{-t\mu^2} t^{s-1} dt,$$ where $\mu>0$ and \(\Gamma(s)=\int_0^{+\infty} e^{-t}t^{s-1}dt\) is the Gamma function, one can relate the zeta function to the heat kernel as follows: $$ \Gamma(s)\zeta(2s) = \int_0^\infty {\rm Tr\,}(e^{-tD^2}) t^{s-1} dt.$$ This of course makes sense only if $e^{-tD^2}$ is trace class for all $t>0$, which is anyway a necessary condition for finite summability. Notice that the trace class condition implies also that $s\mapsto \int_\delta^\infty {\rm Tr\,}(e^{-tD^2})t^{s-1}dt$ is holomorphic for all $\delta>0$. The above formula is particularily usefull if one knows the asymptotic expansion of ${\rm Tr\,}(e^{-tD^2})$ at $t\to 0$, or only its leading term.\mathfrak pootnote{A function $f:\RM^{>0}\to {\mathbb C}$ is asymptotically equivalent to $g:\RM^{>0}\to \RM$ at $t\to 0$, written $f{\mathscr A}ym g$, if $|f - g|=o(|g|)$. $f=O(g)$ means that $\exists M>0,\exists \delta>0,\mathfrak porall 0<t<\delta: |f(t)|\leq M|g(t)|$, and $f=o(g)$ means that $\mathfrak porall\epsilon>0,\exists\delta>0,\mathfrak porall 0<t<\delta: |f(t)|\leq \epsilon|g(t)|$.} It is well known that the form of the asymptotic expansion is related to the singularites of the zeta-function \cite{CoMa08,Iochum}. For instance, an expansion of the form $${\rm Tr\,}(e^{-tD^2}) = \sum_\alpha c_\alpha t^{\alpha} + h(t),$$ with $\RMe (\alpha)<0$, $c_\alpha\in {\mathbb C}$ and $h$ a function which is bounded at $0$ (in particular without logarithmic terms like $\log t$) implies that the zeta-function has a simple pole at $-2{\alpha}$ with residue equal to $2c_\alpha/\Gamma(-{\alpha})$ and is regular at $0$ \cite{CoMa08}. We will see however, that the situation is quite different in our case where we have to replace $c_\alpha$ by functions that are periodic in $\log t$. Recall the Laplace transform of a function $f$ at $s$ : \begin{equation} \label{eq-Laplace} {\mathcal L}[f](s):=\int_0^\infty f(x)e^{-sx}dx \,. \end{equation} We assume therefore in the sequel that the asymptotic behaviour of the trace of the heat-kernel is given by \begin{equation} \label{eq-asym} {\rm Tr\,}(e^{-tD^2})stackrel{t\to 0}{\sim} f(-\log t) t^{\alpha} \end{equation} where $\alpha<0$ and $f:\RM^{\geq 0}\to \RM$ is a bounded, locally integrable function for which $\lim_{s\to 0}s{\mathcal L}[f](s)$ exists and is different from $0$. This is the weakest assumption needed for $\zeta$ to be simple and have non-negative abscissa of convergence, and to be able to compute its residui explicitly, as the following Lemma shows (see also Remark~\ref{rem-freg} for a regular example of such $f$). \begin{lemma}\label{lem-heat-zeta} If the trace of the heat-kernel satisfies (\ref{eq-asym}) then $\zeta$ is simple, has abscissa of convergence \[s_0 = -2\alpha \quad\mbox{and}\quad \mathfrak prac12\Gamma(\mathfrak prac{s_0}2)\lim_{s\to s_0^+}(s-s_0)\zeta(s) = \lim_{s\to 0}s{\mathcal L}[f](s).\] If, moreover, $\mathcal L[f](s)$ admits a meromorphic extension with simple pole at $0$ and ${\rm Tr\,}(e^{-tD^2}) - f(-\log t) t^{\alpha}=O(t^{\beta})$ (with $\beta>\alpha$) then $\zeta(s)$ has a simple pole at $s_0=-2\alpha$ and hence $\mathfrak prac12\Gamma(\mathfrak prac{s_0}2)\mbox{\rm Res}(\zeta,s_0)=\mbox{\rm Res}({\mathcal L}[f],0) = \lim_{s\to 0}s{\mathcal L}[f](s)$. \end{lemma} \begin{proof} We adapt the arguments of \cite{Iochum}. Let $h(t) = {\rm Tr\,}(e^{-tD^2}) - f(-\log t) t^{\alpha}$ and $M=\sup_x |f(x)|$. Then for all $\epsilon>0$ exists $\delta\leq 1$ such that $|h(t)|\leq \epsilon M t^\alpha$ if $t<\delta$. In particular, $H_\delta(s) := \int_0^\delta h(t) t^{s-1}dt $ satisfies $|H_\delta(s)| \leq \epsilon M\mathfrak prac{\delta^{\alpha+s}}{\alpha+s}$, provided $\alpha+s>0$. Now, again for $\alpha+s>0$ $$ \Gamma(s)\zeta(2s) = \int_{0}^\infty{\rm Tr\,}(e^{-tD^2})t^{s-1}dt = \int_0^\delta f(-\log t) t^{\alpha+s-1}dt + H_\delta(s) + g_\epsilon(s)$$ where \(g_\epsilon(s) = \int_{\delta}^\infty{\rm Tr\,}(e^{-tD^2})t^{s-1}dt\) is holomorphic in $s$. This shows that $\zeta(2s)$ is finite for $s>-\alpha$. Furthermore \[\lim_{s\to -\alpha^+}(\alpha+s)\int_0^\delta f(-\log t) t^{\alpha+s-1}dt = \lim_{s\to 0^+}s \int_{-\infty}^{\log \delta} f(-\tau)e^{\tau s}d\tau = \lim_{s\to 0^+}s {\mathcal L}[f](s)\] where we have used in the last equation that $\lim_{s\to 0^+}s \int_{\log \delta}^0 f(-\tau)e^{\tau s}d\tau = 0 $. Since $\epsilon>0$ is arbitrary we conclude that $$\lim_{s\to -\alpha^+}(\alpha+s)\Gamma(s)\zeta(2s) = \lim_{s\to 0^+}s {\mathcal L}[f](s).$$ Hence $s_0 = -2\alpha$ is the abscissa of convergence. Now if $h(t)$ is of order $t^\beta$ we can find $M>0$ and $\delta>0$ such that $|h(t)t^{-\beta}|\leq M$ if $0<t<\delta$. If $\beta>\alpha$ the function $t\mapsto t^{s+\beta-1}$ is integrable on $(0,\delta)$ as long as $s$ lies in a sufficiently small neighbourhood of $-\alpha$. Since $s\mapsto t^{s+\beta-1}$ is holomorphic for all $t>0$ we find that $H_\delta(s) = \int_0^\delta (h(t)t^{-\beta}) t^{s+\beta-1}dt$ is holomorphic near $s=-\alpha$ which shows that $(\alpha+s)\zeta(2s)$ is holomorphic there, too. Thus $\zeta$ has a simple pole at $-2\alpha$ and we have the above stated formula for its residue. \end{proof} \begin{rem} \label{rem-freg} If $f(\tau) = e^{ia\tau}$ then $\mathcal L[f](s) = \mathfrak prac{1}{s-ia}$. Thus if $f$ is the restriction of a periodic function of class $C^1$ then upon using its representation as a Fourier series we see that $s\mathcal L[f](s)$ extends to an analytic function around $0$ and $$\lim_{s\to 0}s\mathcal L[f](s) = br f$$ the mean of $f$. \end{rem} \subsection{Spectral state} \label{ssec-specstate} Given a bounded operator $A$ on ${\mathfrak H}$ such that $|D|^{-s_0}A$ is in the Dixmier ideal we consider the expression \[ {\mathcal T}(A) = {\rm Tr\,}_\omega(|D|^{-s_0}A) / {\rm Tr\,}_\omega(|D|^{-s_0}) \] which depends a priori on the choice of Dixmier trace ${\rm Tr\,}_\omega$. With a little luck, however, $\lim_{s\to s_{0}^+} \mathfrak prac1{\zeta(s)} {\rm Tr\,}(|D|^{-s}A)$ exists and then \cite{Co94} \[ {\mathcal T}(A) = \lim_{s\to s_{0}^+} \mathfrak prac1{\zeta(s)} {\rm Tr\,}(|D|^{-s}A). \] We provide here a criterion for that. Note that the Mellin transform allows us to write \[ {\rm Tr\,}(|D|^{-s}A) = \mathfrak prac1{\Gamma(\mathfrak prac s2)} \int_0^\infty {\rm Tr\,}(e^{-t D^2} A)t^{\mathfrak prac s2-1}dt.\] We call $A\in{\mathcal B}({\mathfrak H})$ {\em strongly regular} if there exists a number $c_A$ such that \[ {\rm Tr\,}(e^{-tD^2}A)-c_A {\rm Tr\,}(e^{-tD^2})=o\bigl({\rm Tr\,}(e^{-tD^2})\bigr). \] If $c_A \neq 0$ one can thus say that ${\rm Tr\,}(e^{-tD^2}A){\mathscr A}ym c_A {\rm Tr\,}(e^{-tD^2})$. In the context in which the heat kernel satisfies (\ref{eq-asym}) it is useful to consider the notion of {\em weakly regular} operators $A\in{\mathcal B}({\mathfrak H})$. These are operators which satisfy \begin{equation} \label{eq-wreg} {\rm Tr\,}(e^{-tD^2}A){\mathscr A}ym f_A(-\log t) t^\alpha \end{equation} where $\alpha$ is the same as in (\ref{eq-asym}) and $f_A:\RM^{\geq 0}\to {\mathbb C}$ is a bounded, non-zero, locally integrable function for which $\lim_{s\to 0}s{\mathcal L}[f_A](s)$ exists. Clearly, strongly regular operators are weakly regular and $f_A=c_A f$ in this case, where $f$ is given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-asym} (one actually has $c_A={\mathcal T}(A)$, see Corollary~\ref{cor-streg}). \begin{lemma} \label{lem-specstate} Assume that the trace of the heat-kernel satisfies (\ref{eq-asym}) and that $A\in {\mathcal B}({\mathfrak H})$ is weakly regular, that is, satisfies (\ref{eq-wreg}). Then $\lim_{s\to s_{0}^+} \mathfrak prac1{\zeta(s)} {\rm Tr\,}(|D|^{-s}A)$ exists and is equal to $${\mathcal T}(A) = \lim_{s\to 0} \mathfrak prac{{\mathcal L}[f_A](s)}{{\mathcal L}[f](s)}.$$ \end{lemma} \begin{proof} Under the hypothesis, for all $\epsilon$ we can find a $\delta$ such that, if $s>s_0$, \[ \left|\int_0^\delta \big({\rm Tr\,}(e^{-t D^2} A) - f_A(-\log t) t^{\alpha}\big)t^{\mathfrak prac s2-1}dt \right|\leq \epsilon \int_0^\delta |f_A(-\log t)|t^{\alpha+\mathfrak prac s2-1}dt\leq \epsilon M_A \mathfrak prac{\delta^{\alpha+\mathfrak prac s2}}{\alpha+\mathfrak prac s2} \] where $M_A$ is an upper bound for $|f_A(-\log t)|$. Since $\int_\delta^\infty {\rm Tr\,}(e^{-t D^2} )t^{\mathfrak prac s2-1}dt$ and hence also $\int_\delta^\infty {\rm Tr\,}(e^{-t D^2} A)t^{\mathfrak prac s2-1}dt$ are finite for all $\delta>0$ we get ($\alpha = -\mathfrak prac{s_0}2$) \[\lim_{s\to s_{0}^+} \mathfrak prac1{\Gamma(\mathfrak prac s2)\zeta(s)}\left| \int_0^\infty {\rm Tr\,}(e^{-t D^2} A)t^{\mathfrak prac s2-1}dt - \int_0^1 f_A(-\log t)t^{\mathfrak prac {s-s_0}2-1}dt \right| \leq \epsilon \widetilde M_A.\] Notice that $\int_0^1 f_A(-\log t)t^{s-1}dt = {\mathcal L}[f_A](s)$. Since $\epsilon$ was arbitrary we conclude that \[ \lim_{s\to s_{0}^+} \mathfrak prac1{\zeta(s)} {\rm Tr\,}(|D|^{-s}A) = \lim_{s\to s_{0}^+} \mathfrak prac{ {\mathcal L}[f_A](\mathfrak prac{s-s_0}2)}{\Gamma(\mathfrak prac s2)\zeta(s)} = \lim_{s\to 0} \mathfrak prac{{\mathcal L}[f_A](s)}{{\mathcal L}[f](s)} . \] \end{proof} \begin{coro} \label{cor-streg} If $A\in{\mathcal B}({\mathfrak H})$ is strongly regular, then \({\rm Tr\,}(e^{-tD^2}A){\mathscr A}ym {\mathcal T}(A) {\rm Tr\,}(e^{-tD^2}A)\). In other words, the functions in equations~_{\mbox{\scriptsize\em eq}}ref{eq-asym} and~_{\mbox{\scriptsize\em eq}}ref{eq-wreg} satisfy $f_A={\mathcal T}(A) f$. \end{coro} \begin{proof} If $A$ is strongly regular, then it is also weakly regular with $f_A=c_A f$. The Laplace transform is linear so \({\mathcal L}[f_A](s)=c_A{\mathcal L}[f](s)\), and Lemma~\ref{lem-specstate} then implies $c_A={\mathcal T}(A)$. \end{proof} Order the eigenvalues of $|D|$ increasingly (without counting muliplicity) and let $F_n$ be the $n$-th eigenspace of $|D|$. \begin{coro} \label{cor-cesar} Let $A\in{\mathcal B}({\mathfrak H})$ and \(br A_n=\mathfrak prac{{\rm Tr\,}_{{F}_n} (A|_{{F}_n})}{\dim F_n} \). If the limit \[ br A=\lim_{n\to\infty}br A_n \] exists then $A$ is strongly regular and ${\mathcal T}(A)=br A$. \end{coro} \begin{proof} Write \(c_n = e^{-t\mu_n^2} \dim F_n\) where $\mu_n$ is the $n$-th eigenvalue of $|D|$. One has \begin{eqnarray*} {\rm Tr\,}(e^{-tD^2} A ) - br A \, {\rm Tr\,}(e^{-tD^2} ) & = & \sum_{n\ge 1} (br A_n - br A) c_n \end{eqnarray*} Now fix an $\epsilon>0$, and choose and integer $N_\epsilon$ such that \(|br A_n -br A|\le \epsilon\) for all $n\ge N_\epsilon$. Then the series of the r.h.s.\ can be bound by \((\sup_n|br A_n -br A| )\sum_{n< N_\epsilon} c_{n} + \epsilon \sum_{n\ge N_\epsilon} c_{n}\). Using \(\sum_{n\ge N_\epsilon} c_{n}\leq {\rm Tr\,}(e^{-t D^2})\) we find that for all $\epsilon$ exists $C_\epsilon$ such that \[ \Bigl| {\rm Tr\,}(e^{-tD^2} A ) - br A {\rm Tr\,}(e^{-tD^2}) \Bigr| \leq C_{\epsilon} + \epsilon {\rm Tr\,}(e^{-t D^2}) . \] Since ${\rm Tr\,}(e^{-t D^2})$ tends to $+\infty$ if $t$ tends to $0$ this shows that ${\rm Tr\,}(e^{-t D^2}A)stackrel{t\to 0}{\sim}f_A(-\log t)t^\alpha$ with $f_A = br{A} f$. Applying Lemma~\ref{lem-specstate} we see that ${\mathcal T}(A)=br{A}$. \end{proof} In the commutative case, when ${\mathcal A}=C(X)$ for a compact Hausdorff space $X$, we are particularly concerned with operators of the form $A =\pi(f)$, for $f\in C(X)$ or for any Borel-measurable function $f$ on $X$. By Riesz representation theorem the functional $f\mapsto {\mathcal T}(\pi(f))$ gives a measure on $X$, which we call the {\em spectral measure}. \subsection{Laplacians} \label{ssec-Laplace} It is tempting to define a quadratic form by \begin{equation} \label{eq-Dirform} Q(a,b) = {\mathcal T}([D,\pi(a)]^*[D,\pi(b)]) \end{equation} on elements on which this expression makes sense. It is however not so easy to determine a domain for such a form. Our interest here lies in the commutative case, ${\mathcal A}=C(X)$, and our question is as follows: Let $\mu$ be the spectral measure on $X$ and consider the Hilbert space $L^2(X) = L^2(X,\mu)$. Notice that this is usually not the Hilbert space of the spectral triple. We embed ${\mathcal A}_0$ (continuous functions having bounded commutator with $D$) into $L^2(X)$, assuming that the measure is faithful. Does there exist a core (a dense linear subspace of $L^2(X)$) which is contained in ${\mathcal A}_0$ and on which $Q$ is well-defined, yielding a symmetric closable quadratic form which satisfies the Markov property? If ${\mathcal D}$ is the domain of the closure of $Q$ then the general theory guaranties the existence of a positive operator $\Delta$ such that $Q(f,g) = (f,\Delta g)$ and this operator generates a Markov process on $L^2(X)$. We refer to $\Delta$ as a Laplacian. We emphasize that $\Delta$ will depend on the choice of a domain. We won't be able to give general answers here, but we look at specific examples related to self-similarity. \subsection{Direct sums} \label{ssec-sums} The direct sum of two spectral triples $({\mathcal A}_1,{\mathfrak H}_1,D_1)$ and $({\mathcal A}_2,{\mathfrak H}_2,D_2)$ is the spectral triple $({\mathcal A},{\mathfrak H},D)$ given by \[ {\mathcal A} = {\mathcal A}_1\text{\rm op}lus {\mathcal A}_2,\quad {\mathfrak H} = {\mathfrak H}_1\text{\rm op}lus{\mathfrak H}_2,\quad D = D_1\text{\rm op}lus D_2. \] with direct sum representation. The zeta function $\zeta$ of the direct sum is clearly the sum of the zeta functions $\zeta_i$ of the summands and thus its abscissa of convergence $s_0$ is equal to the largest abscissa of the two zeta functions $\zeta_i$. Let ${\mathcal T}$ denote the spectral state of the direct sum triple and ${\mathcal T}_i$ those of the summands and assume that all zeta-functions are simple. Then, for regular operators $A_1$ and $A_2$ we have \begin{equation} \label{eq-sum-state} {\mathcal T}(A) = \mathfrak prac1{c_1+c_2} \bigl( c_1{\mathcal T}(A_1) + c_2{\mathcal T}(A_2) \bigr) \end{equation} where $c_i = \lim_{s\to s_0^+} (s-s_0) \zeta_i(s)$. Notice that $c_1=0$ if the abscissa of convergence of $\zeta_1$ is smaller than that of $\zeta_2$, in which case ${\mathcal T}(A)={\mathcal T}(A_2)$ (and similiarily with $1$ and $2$ exchanged: ${\mathcal T}(A)={\mathcal T}(A_1)$). Notice also that $c_1+c_2 = \lim_{s\to s_0^+} (s-s_0) \zeta(s)$. It is pretty clear that the quadratic form $Q$ used to define the Laplacian is the sum of the quadratic forms of the individual triples, again leaving questions about its domain aside. \subsection{Tensor products} \label{ssec-tensorprod} The tensor product of an even spectral triple $({\mathcal A}_1,{\mathfrak H}_1,D_1)$ with grading operator $\chi$ and another spectral triple $({\mathcal A}_2,{\mathfrak H}_2,D_2)$ is the spectral triple $({\mathcal A},{\mathfrak H},D)$ given by \[ {\mathcal A} = {\mathcal A}_1\otimes {\mathcal A}_2,\quad {\mathfrak H} = {\mathfrak H}_1\otimes{\mathfrak H}_2,\quad D = D_1\otimes 1 + \chi\otimes D_2. \] Notice that $D^2 = D_1^2\otimes 1 + 1\otimes D_2^2$. It follows that the trace of the heat kernel is multiplicative: ${\rm Tr\,}(e^{-tD^2}) = {\rm Tr\,}_{{\mathfrak H}_1}(e^{-tD_1^2}){\rm Tr\,}_{{\mathfrak H}_2}(e^{-tD_2^2})$. This allows one to obtain information on the zeta function, the spectral state, and the quadratic form $Q$ of the tensor product. \begin{lemma} \label{lem-prod} Suppose that ${\rm Tr\,}_{{\mathfrak H}_1}(e^{-tD_1^2})$ and ${\rm Tr\,}_{{\mathfrak H}_2}(e^{-tD_2^2})$ satisfy (\ref{eq-asym}) with $f = f_1$ and $f=f_2$, respectively. Suppose that $\lim_{s\to 0} s\mathcal L[f_1f_2](s)$ exists and is non-zero. Then the metric dimension $s_0$ of the tensor product spectral triple is the sum of the metric dimensions of the factors, and the zeta function $\zeta$ of the tensor product is simple with \[\mathfrak prac12\Gamma(\mathfrak prac{s_0}2)\lim_{s\to s_0^+} (s-s_0)\zeta(s) = \lim_{s\to 0} s\mathcal L[f_1f_2](s).\] \end{lemma} \begin{proof} Due to the multiplicativity of the trace of the heat kernel we have $$ {\rm Tr\,}(e^{-tD^2}) stackrel{t\to 0}{\sim} f_1(-\log t)f_2(-\log t) t^{\alpha_1+\alpha_2}$$ and hence the result follows from Lemma~\ref{lem-heat-zeta}. \end{proof} \begin{lemma} \label{lem-prodstate} Assume the conditions of Lemma~\ref{lem-prod}. Let $A_1\in{\mathcal B}({\mathfrak H}_1)$ and $A_2\in{\mathcal B}({\mathfrak H}_2)$ be weakly regular with functions $f_{A_1}$ and $f_{A_2}$. Then $${\mathcal T}(A_1\otimes A_2) = \lim_{s\to 0^+}\mathfrak prac{{\mathcal L}[f_{A_1}f_{A_2}](s)}{{\mathcal L}[f_1f_2](s)}.$$ \end{lemma} \begin{proof} Let $\epsilon>0$ and choose $\delta$ such that $|{\rm Tr\,}_{{\mathfrak H}_i}(e^{-tD_i^2}A_i) - f_{A_i}(-\log t)t^{\alpha_i} |\leq \epsilon t^{\alpha_i}$ provided $0<t<\delta$. Then \begin{eqnarray*} \int_0^\delta {\rm Tr\,}_{\mathfrak H}(e^{-tD^2}A_1\otimes A_2)t^{s-1}dt &=& \int_0^\delta {\rm Tr\,}_{{\mathfrak H}_1}(e^{-tD^2_1}A_1){\rm Tr\,}_{{\mathfrak H}_2}(e^{-tD^2_2} A_2) t^{s-1}dt \\ & =& \int_0^\delta\big(f_{A_1}(-\log t)f_{A_2}(-\log t) + O(\epsilon)\big) t^{\alpha_1+\alpha_2+s-1}dt \\ & =& {\mathcal L}[f_{A_1}f_{A_2}](\alpha_1+\alpha_2+s) + O(\epsilon)\mathfrak prac{\delta^{\alpha_1+\alpha_2+s}}{\alpha_1+\alpha_2+s} \end{eqnarray*} from which the result follows by arguments similar to the above. \end{proof} \begin{coro} \label{cor-prodstrongreg} Let $A_1$ and $A_2$ be weakly regular operators. \begin{enumerate}[(i)] \item If $A_1$ is strongly regular then ${\mathcal T}(A_1\otimes A_2) = {\mathcal T}_1(A_1){\mathcal T}({\mathbf 1} \otimes A_2) $. \item If both $A_1$ and $A_2$ are strongly regular then ${\mathcal T}(A_1\otimes A_2) = {\mathcal T}_1(A_1){\mathcal T}_2(A_2) $. \end{enumerate} \end{coro} \begin{rem} The result of the corollary says that the spectral state factorizes for tensor products of strongly regular operators. This corresponds to the formula on page~563 in \cite{Co94}. It should be noticed, however, that this factorisation is in general not valid for tensor products of weakly regular operators, since the Laplace transform of a product is not the product of the Laplace transforms. We consider below examples of this type. \end{rem} \begin{lemma} \label{lem-prodform} Let $a_i,b_i\in{\mathcal A}_i$, and set \( \{d;a,b\} = [d,\pi(a)]^*[d,\pi(b)]\). \begin{enumerate}[(i)] \item Then one has \[ Q\bigl( a_1\otimes a_2, b_1\otimes b_2 \bigr) = {\mathcal T} \bigl( \{ D_1; a_1,b_1 \} \otimes \pi(a_2^{\mathscr A}t b_2) \bigr) + {\mathcal T}\bigl( \pi(a_1^{\mathscr A}t b_1) \otimes \{ D_2; a_2,b_2 \} \bigr) \,. \] \item If \(\pi(a_i^{\mathscr A}t b_i)\), $i=1,2$, are strongly regular then \[ Q\bigl( a_1\otimes a_2, b_1\otimes b_2 \bigr) = {\mathcal T} \bigl( \{ D_1; a_1,b_1 \} \otimes {\mathbf 1} \bigr) {\mathcal T}_2 \bigl( \pi(a_2^{\mathscr A}t b_2) \bigr) +{\mathcal T}_1\bigl( \pi(a_1^{\mathscr A}t b_1)\bigr) {\mathcal T}\bigl( {\mathbf 1} \otimes \{ D_2; a_2,b_2 \} \bigr) \,, \] \end{enumerate} \end{lemma} \begin{proof} Since $[D,\pi(a_1\otimes a_2)] = [D_1,\pi(a_1)]\otimes \pi(a_2) + \chi \pi(a_1)\otimes [D_2,\pi(a_2)]$ we get \begin{eqnarray*} [D,\pi(a_1\otimes a_2)]^*[D,\pi(b_1\otimes b_2)]&=& [D_1,\pi(a_1)]^*[D_1,\pi(b_1)]\otimes \pi(a_2^*b_2) \\ &&+\pi(a_1^*b_1) \otimes [D_2,\pi(a_2)]^*[D_2,\pi(b_2)]\\ &&+[D_1,\pi(a_1)]^*\chi\pi(b_1) \otimes \pi(a_2)^*[D_2,\pi(b_2)]\\ &&+\chi\pi(a_1^*)[D_1,\pi(b_1)] \otimes [D_2,\pi(a_2)]^*\pi(b_2) \end{eqnarray*} Since ${\rm Tr\,}(\chi A) = 0$ for any odd operator $A\in {\mathcal B}({\mathfrak H}_1)$ the contributions of the last two lines vanish under the trace, hence under ${\mathcal T}$, and we get the first statement. Point (ii) follows from Corollary~\ref{cor-prodstrongreg}. P \end{proof} \section{The spectral triple associated with a stationary Bratteli diagram} \label{sec-ST-Bratteli} An oriented graph \({\mathcal G}=({\mathcal V}, {\mathcal E})\) is the data of a set of vertices ${\mathcal V}$ and a set of edges ${\mathcal E}$ with two maps, \(\xymatrix{{\mathcal E} \ar@<.3ex>[r]^{r} \ar@<-.3ex>[r]_{s} & {\mathcal V}}\), one assigning to an edge $\varepsilon$ its source vertex $s(\varepsilon)$ and the second assigning its range $r(\varepsilon)$. The {\em graph matrix} of ${\mathcal G}$ is the matrix $A$ with coefficients $A_{vw}$ equal to the number of edges which have source $v$ and range $w$. We construct a spectral triple from the following data (see Figure~\ref{fig-graph} for an illustration of the construction): \begin{enumerate} \item A finite oriented graph ${\mathcal G}=({\mathcal V},{\mathcal E})$ with a distinguished one-edge-loop ${l^*}$. We suppose that the graph is strongly connected : for any two vertices $v_1,v_2$ there exists an oriented path from $v_1$ to $v_2$ and an oriented path from $v_2$ to $v_1$. This is equivalent to saying that the graph matrix $A$ is irreducible. We will further assume that $A$ is {\em primitive} (see below). Alternatively we can pick a distinguished loop made of $p>1$ edges, and resume the case described above by replacing the matrix $A$ by $A^p$ and considering its associated graph ${\mathcal G}^p$ instead of ${\mathcal G}$. \item A function ${\hat{a}}t\tau:{\mathcal E}\to{\mathcal E}$ satisfying: for all $\varepsilon\in {\mathcal E}$ \begin{enumerate} \item if $r(\varepsilon)$ is the vertex of ${l^*}$ then ${\hat{a}}t\tau(\varepsilon)={l^*}$, \item if $r(\varepsilon)$ is not the vertex of ${l^*}$ then ${\hat{a}}t\tau(\varepsilon)$ is an edge starting at $r(\varepsilon)$ and such that $r({\hat{a}}t\tau(\varepsilon))$ is closer\mathfrak pootnote{for the combinatorial graph metric, where non-loop edges have length $1$, and loop edges length $0$.} to the vertex of ${l^*}$ in ${\mathcal G}$. \end{enumerate} \item A symmetric subset ${\hat{a}}t \mathcal H={\mathcal H}({\mathcal G})$ of ${\mathcal E}\times{\mathcal E}$: \[ {\hat{a}}t\mathcal H \subseteq \left\{ (\varepsilon,\varepsilon') \in {\mathcal E}\times{\mathcal E} \, : \, \varepsilon\neq \varepsilon', \; s(\varepsilon)=s(\varepsilon') \right\} \] This can be understood as a graph with vertices ${\mathcal E}$ and edges ${\hat{a}}t\mathcal H$, which has no loops. We fix an orientation of the edges in ${\hat{a}}t\mathcal H$, and write \({\hat{a}}t\mathcal H = {\hat{a}}t\mathcal H^{+} \cup {\hat{a}}t\mathcal H^{-} \) for the decomposition into positively and negatively oriented edges. \item A real number $\rho \in (0,1)$. \end{enumerate} {\em Notation.} We still denote the range and source maps by $r,s$ on ${\hat{a}}t{\mathcal H}$: \(\xymatrix{{\hat{a}}t{\mathcal H} \ar@<.3ex>[r]^{r} \ar@<-.3ex>[r]_{s} & {\mathcal E}}\). We allow compositions with the source and range maps from ${\mathcal E}$ to ${\mathcal V}$, which we denote by \(s^2, r^2, sr, rs\): \(\xymatrix{{\hat{a}}t{\mathcal H} \ar@<.6ex>[r]^{r^2,s^2} \ar@<.3ex>[r] \ar[r] \ar@<-.3ex>[r]_{rs,sr} & {\mathcal V}}\). See Figure~\ref{fig-graph} for an illustration with the Fibonacci matrix $A=\left(\begin{array}{cc} 1 & 1 \\ 1 & 0 \end{array}\right)$. \begin{figure} \caption{{\small The graph ${\mathcal G} \label{fig-graph} \end{figure} \begin{figure} \caption{{\small The stationnary Bratteli diagram associated with the Fibonacci matrix .} \label{fig-brat} \end{figure} We denote by $\Pi_n({\mathcal G})$, or simply by $\Pi_n$ if the graph is understood, the set of paths of length $n$ over ${\mathcal G}$: sequences of $n$ edges $\varepsilon_1\cdots \varepsilon_n$ such that $r(\varepsilon_i)=s(\varepsilon_{i+1})$. We also set $\Pi_0({\mathcal G})={\mathcal V}$. We extend the range and source maps to paths: if $\gamma=\varepsilon_1\cdots \varepsilon_n$ then $r(\gamma):=r(\varepsilon_n), s(\gamma):=s(\varepsilon_1)$. Also, given $\gamma= \varepsilon_1 \cdots \varepsilon_i \cdots \varepsilon_n$, we denote by $\gamma_i=\varepsilon_i$ the $i$-th edge along the path. The number of paths of length $n$ starting from $v$ and ending in $w$ is then ${A^n}_{vw}$. We require that $A$ is {\em primitive}: \(\exists N \in {\mathbb N}, \, \mathfrak porall v,w, \, A^N_{vw}>0\). (For the graph ${\mathcal G}$, this means that for any two vertices $v,w$, there is at least one oriented paths of length $N$ from $v$ to $w$; for the graph ${\mathcal G}^N$ this means that for any two vertice $v,w$, there is at least one oriented edge from $v$ to $w$.) Under this assumption, $A$ has a non-degenerate positive eigenvalue $\lambda_{\text{\rm \tiny PF}}$ which is strictly larger than the modulus of any other eigenvalue. This is the Perron-Frobenius eigenvalue of $A$. Let us denote by $L$ and $R$ the left and right Perron-Frobenius eigenvectors of $A$ ({\it i.e.}\ associated with $\lambda_{\text{\rm \tiny PF}}$), normalized so that \begin{equation} \label{eq-normalization} \sum_{j}R_j = 1,\quad \sum_{j} R_j L_j = 1\, . \end{equation} Let us also write the minimal polynomial of $A$ as \(\mu_A(\lambda)=\Pi_{k=1}^p (\lambda-\lambda_k)^{m_k}\) with $\lambda_1=\lambda_{\text{\rm \tiny PF}}, m_1=1$. Then from the Jordan decomposition of $A$ one can compute the asymptotics of the powers of $A$ as follows \cite{HJ94}: \begin{equation} \label{eq-An} A^n_{ij}= R_i L_j \; \lambda_{\text{\rm \tiny PF}}^n + \sum_{k=2}^p P^{(ij)}_k(n) \; \lambda_k^n \,, \end{equation} where $P^{(ij)}_k$ is a polynomial of degree $m_k$ if $n\ge m_k$, and of degree less than $m_k$ if $n<m_k$. Let $M_j$ be the algebraic multiplicity of the $j$-th eigenvalue of $A$ (hence $M_1=1$). Let $R^{j,l}$, for $1\le j \le p$ and $1\le l \le M_j$, be a basis of (right) eigenvectors of $A$: \(AR^{j,l} = \lambda_j R^{j,l}\). Let also $L^{j,l}$ be a basis of left eigenvectors of $A$ normalized so that \(R^{j,l} \cdot L^{j',l'}=\delta_{jj'} \delta_{ll'}\). So $R^{1,1}=R$ and $L^{1,1}=L$ as defined in equation~_{\mbox{\scriptsize\em eq}}ref{eq-An}. For $1\le j \le p$, let us define \begin{equation} \label{eq-CH} C^j_{{\hat{a}}t{\mathcal H}} = \mathfrak prac{1}{\lambda_j} \sum_{l=1}^{M_j} \sum_{\substack{v\in{\mathcal V} \\ h\in{\hat{a}}t{\mathcal H}}} R^{j,l}_v L^{j,l}_{s^2(h)}\,. \end{equation} Given ${\mathcal G}$ we consider the topological space $\Pi_\infty$ of all (one-sided) infinite paths over ${\mathcal G}$ with the standard topology. It is compact and metrisable. The set $\Pi_{\infty {\mathscr A}t}$ of infinite paths which eventually become ${l^*}$ forms a dense set. \begin{rem} \label{rem-brat1}{\em This construction is equivalent to that of a {\em stationary Bratteli diagram} \cite{Bra72}: this is an infinite directed graph, with a copy of the vertices ${\mathcal V}$ at each level $n\ge 0$, and a copy of the edges ${\mathcal E}$ linking the vertices at level $n$ to those at level $n+1$ for all $n$ (there is also a root, and edges linking it down to the vertices at level $0$). So for instance, the set $\Pi_n$ of paths of length $n$, is viewed here as the set of paths from level $0$ down to level $n$ in the diagram. See Figure~\ref{fig-brat} for an illustration (the root is represented by the hollow circle to the left). } \end{rem} Given ${\hat{a}}t\tau$ we obtain an embedding of $\Pi_n$ into $\Pi_{n+1}$ by $\varepsilon_1\cdots \varepsilon_n\mapsto \varepsilon_1\cdots \varepsilon_n{\hat{a}}t\tau(\varepsilon_n)$ and hence, by iteration, into $\Pi_{\infty {\mathscr A}t}\subset \Pi_\infty$. We denote the corresponding inclusion $\Pi_n\hookrightarrow \Pi_\infty$ by $\tau$. Given ${\hat{a}}t\mathcal H$ we define horizontal edges ${\mathcal H}_n$ between paths of $\Pi_n$, namely $(\gamma,\gamma')\in {\mathcal H}_n$ if $\gamma$ and $\gamma'$ differ only on their last edges $\varepsilon$ and $\varepsilon'$, and with $(\varepsilon,\varepsilon')\in {\hat{a}}t\mathcal H$. For all $n$, we carry the orientation of ${\hat{a}}t\mathcal H$ over to ${\mathcal H}_n$. The {\em approximation graph} \(G_\tau=(V,E)\) is given by: \begin{eqnarray*} V = \bigcup_{n\ge 0} V_n\,, & V_n = \tau (\Pi_n) \subset \Pi_{\infty{\mathscr A}t}\,,\\ E = \bigcup_{n\ge 1} E_n\,, & E_n = \tau\times\tau({\mathcal H}_n)\,, \end{eqnarray*} together with the orientation inherited from ${\mathcal H}_n$: so we write \(E_n = E_n^+ \cup E_n^-\) for all $n$, and \(E=E^+\cup E^-\). Given $h\in{\hat{a}}t{\mathcal H}$ we denote by ${\mathcal H}_n(h),E_n(h),$ and $E(h)$ the corresponding sets of edges of type $h$. \begin{lemma} \label{lem-Gtau} The approximation graph $G_\tau=(V,E)$ is connected if and only if for all $\varepsilon,\varepsilon'\in{\mathcal E}$ with $s(\varepsilon)=s(\varepsilon')$ there is a path in ${\hat{a}}t{\mathcal H}$ linking $\varepsilon$ to $\varepsilon'$. Its set of vertices $V$ is dense in $\Pi_\infty$. \end{lemma} \begin{proof} Let $x,y\in V$, $x\neq y$, and $n$ the largest integer so that $x_i=y_i, i=1, \cdots n-1$, and $x_n\neq y_n$ (so $s(x_n)=s(y_n)$). Any path in $G_\tau$ linking $x$ to $y$ must contain a subpath linking $x_n$ to $y_n$. Hence $G_\tau$ is connected iff the above given condition on ${\hat{a}}t{\mathcal H}$ is satisfied. The density of $V$ is clear since any base clopen set for the topology of $\Pi_\infty$: $[\gamma]=\{x\in \Pi_\infty\, :\, x_i=\eta_i, \, i\le n\}$, $\gamma\in\Pi_n$, $n\in{\mathbb N}$, contains a point of $V$, namely $\tau(\gamma)$. \end{proof} Given an edge $e\in E$ we write $e^\text{\rm op}$ for the edge with the opposite orientation: \(s(e^\text{\rm op})=r(e), r(e^\text{\rm op})=s(e)\). Now our earlier construction \cite{KS10} yields a spectral triple from the data of the approximation graph $G_\tau$. The $C^{\mathscr A}t$-algebra is $C(\Pi_\infty)$, and it is represented on the Hilbert space $\ell^2(E)$ by: \begin{equation} \label{eq-rep} \pi(f){\mathscr P}i(e) = f(s(e)){\mathscr P}i(e)\,. \end{equation} The Dirac operator is given by: \begin{equation} \label{eq-Dirac} D \varphi(e) = \rho^{-n} \varphi(e^\text{\rm op}) \,, \qquad e \in E_n\,. \end{equation} The orientation yields a decomposition of $\ell^2(E)$ into \(\ell^2(E^+) \text{\rm op}lus \ell^2(E^-)\). \begin{theo} \label{thm-ST} $(C(\Pi_{\infty}({\mathcal G})),\ell^2(E),D)$ is an even spectral triple with ${\mathbb Z}/2$-grading \(\chi\) which flips the orientation. Its representation is faithful. If ${\hat{a}}t\mathcal H$ is sufficiently large ({\it i.e.} satisfies the condition in Lemma~\ref{lem-Gtau}) then its spectral distance $d_s$ is compatible with the topology on $\Pi_{\infty}({\mathcal G})$, and one has: \begin{equation} \label{eq-ds} d_s(x,y)=c_{xy} \rho^{n_{xy}} + \sum_{n>n_{xy}} \bigl( b_n(x)+b_n(y) \bigr)\rho^n\,, \ \text{\rm for } x\neq y\,, \end{equation} where $n_{xy}$ is the largest integer such that $x_i=y_i$ for $i<n_{xy}$, and $b_n(z)=1$ if ${\hat{a}}t\tau(z_n) \neq z_{n+1}$ and $b_n(z)=0$ otherwise, for any $z\in\Pi_\infty$. The coefficient $c_{xy}$ is the number of edges in a shortest path in ${\hat{a}}t{\mathcal H}$ linking $x_{n_{xy}}$ to $y_{n_{xy}}$. If ${\hat{a}}t{\mathcal H}$ is maximal: \({\hat{a}}t\mathcal H= \left\{ (\varepsilon,\varepsilon') \in {\mathcal E}\times{\mathcal E} \, : \, \varepsilon\neq \varepsilon', \; s(\varepsilon)=s(\varepsilon') \right\}\), then $c_{xy}=1$ for all $x,y\in\Pi_\infty$. \end{theo} \begin{proof} The first statements are clear. In particular the commutator $[D,\pi(f)]$ is bounded if $f$ is a locally constant function. The representation is faithfull by density of $V$ in $\Pi_\infty$. If ${\hat{a}}t\mathcal H$ satisfies the condition in Lemma~\ref{lem-Gtau}, then the graph $G(\tau)$ is connected. It is also a metric graph, with lengths given by $\rho^n$ for all edges $e\in E_n$. By a previous result (Lemma 2.5 in \cite{KS10}) $d_s$ is an extension to $\Pi_\infty$ of this graph metric, and as $\sum \rho^n < + \infty$, it is continuous and given by equation~_{\mbox{\scriptsize\em eq}}ref{eq-ds} (by straightforward generalizations of Lemma 4.1 and Corollary 4.2 in \cite{KS10}). \end{proof} \subsection{Zeta function}\label{ssec-zeta} We determine the zeta-function for the triple associated with the above data. \begin{theo} \label{thm-zeta} Suppose that the graph matrix is diagonalizable with eigenvalues $\lambda_j$, $j=1, \ldots p$. The zeta-function $\zeta$ extends to a meromorphic function on ${\mathbb C}$ which is invariant under the translation \(z \mapsto z+\mathfrak prac{2\pi \imath}{\log \rho}\). It is given by \[ \zeta(z) = \sum_{j=1}^p \mathfrak prac{C_{{\hat{a}}t\mathcal H}^j }{1-\lambda_j\rho^z} + h(z) \] where $h$ is an entire function, and $C_{{\hat{a}}t\mathcal H}^j$ is given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-CH}. In particular $\zeta$ has only simple poles which are located at \(\{\mathfrak prac{\log\lambda_j+2\pi \imath k}{-\log \rho}: k\in {\mathbb Z}, j=1,\ldots p\}\) with residui given by \begin{equation} \label{eq-s0res} \mbox{\rm Res}(\zeta,\mathfrak prac{\log\lambda_j+ 2\pi \imath k}{-\log \rho})=\mathfrak prac{ C_{{\hat{a}}t\mathcal H}^j \lambda_j}{-\log\rho} \,. \end{equation} In particular, the metric dimension is equal to $s_0 = \mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}}{-\log \rho}$. \end{theo} \begin{proof} Clearly \[ \zeta(z) = \sum_{n\ge 1} \rho^{nz} \#E_n \] The cardinality of $E_n$ can be computed by summing, over all vertices $v$ at level $n-1$ and all edges $h\in {\hat{a}}t\mathcal H$ with $s^2(h)=v$, the number of paths from level $0$ down to $v$ at level $n-1$: \[ \# E_n = \sum_{v\in {\mathcal V}} \sum_{h\in {\hat{a}}t\mathcal H} A^{n-1}_{v s^2(h)} \,. \] Now since $A$ is diagonalizable, the polynomials $P_{k}^{ij}$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} are all constant and can be expressed in terms of its (right and left) eigenvectors $R^{k,l},L^{k,l}, 1\le l\le M_k$. (These vectors were so normalized that the $R^{k,l}$ are the columns the matrix of change of basis which diagonalizes $A$, and the vectors $L^{k,l}$ are the rows of its inverse.) So from equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} we get for all $n$, \( \# E_n = \sum_{k = 1}^p C^k_{{\hat{a}}t{\mathcal H}} \; \lambda_k^{n}\,, \) and hence \[ \zeta(z) = \sum_{k = 1}^p C^k_{{\hat{a}}t{\mathcal H}} \sum_{n\ge 1} \lambda_{k}^n \rho^{nz} = \sum_{k = 1}^p C^k_{{\hat{a}}t{\mathcal H}} \mathfrak prac{\lambda_k\rho^z}{1-\lambda_k\rho^{z}}\,. \] Hence $\zeta$ has a simple pole at values $z$ for which $\rho^{z}\lambda_k = 1$, $k=1,\ldots p$. The calculation of the residui is direct. \end{proof} The periodicity of the zeta function with purely imaginary period whose length is only determined by the factor $\rho$ is a feature which distinguishes our self-similar spectral triples from the known triples for manifolds. Note also that $\zeta$ may have a (simple) pole at $0$, namely if $1$ is an eigenvalue of the graph matrix $A.$ \begin{rem} \label{rem-zeta} In the general case, when $A$ is not diagonalizable, it is no longer true that the zeta-function has only simple poles. Here the polynomials $P_{k}^{ij}(n)$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} are non constant (of degree $m_k-1>0$ for $k=2,\ldots p$), and give power terms in the sum for $\zeta(z)$ written in the proof of Theorem~\ref{thm-zeta} (one gets sums of the form \(\sum_{n\ge 1} n^a\lambda_k^n\rho^{nz}\) for integers $a\le m_k-1$). In this case $\zeta(z)$ has poles of order $m_j$ at \(z=\mathfrak prac{\log\lambda_j+ 2\pi \imath k}{-\log \rho}\). \end{rem} \subsection{Heat kernel} We derive here the asymptotic behaviour of the trace of the heat kernel ${\rm Tr\,}(e^{-t D^2})$ around $0$. We give an explicit formula when the graph matrix $A$ is diagonalizable, and explain briefly afterwards (see Remark~\ref{rem-heatkernel}) how the formula has to be corrected when this is not the case. For $r>0$, $\RMe(\alpha)>0$ and $s\in\RM$ we define \begin{equation} \label{eq-fnper1} \widetilde\mathfrak p(r,\alpha,s) = \sum_{k=-\infty}^\infty \Gamma(\alpha+\mathfrak prac{2\pi \imath k}{r})\, e^{2\pi \imath k s} \,. \end{equation} In particular, \begin{equation} \label{eq-fnper2} \mathfrak p_{r,a}(\sigma) := \mathfrak prac1r \widetilde\mathfrak p(r,\mathfrak prac{a}{r},\mathfrak prac{\sigma}{r}) \end{equation} is a periodic function of period $r$, with average over a period \(br \mathfrak p_{r,a}=\mathfrak prac{1}{r} \Gamma(\mathfrak prac{a}{r})\). \begin{theo} \label{thm-heatkernel} Consider the above spectral triple for a Bratteli diagram with graph matrix $A$ and parameter $\rho\in(0,1)$. We assume that $A$ has no eigenvalue of modulus one. Write its (generalized) eigenvalues $\lambda_j$, for $j=1, \ldots p$. Let $m_j$ be the size of the largest Jordan block of $A$ to eigenvalue $\lambda_j$, and $P_j$ the polymomial of degree $m_j$ as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-An}. Then the trace of the heat kernel has the following expansion as $t\to 0^+$: \begin{equation} \label{eq-traceheat} {\rm Tr\,}(e^{-t D^2}) = \sum_{j : |\lambda_j| > 1} P_j \Bigl( \mathfrak prac{1}{-\log \rho}\mathfrak prac{d}{d s_j} \Bigr) \, \mathfrak p_{-2\log \rho, -s_j \log\rho}({-\log t}) \; t^{- \mathfrak prac{s_j}{2}} \ + h(t) \, \end{equation} where $s_j = \mathfrak prac{\log\lambda_j}{-\log \rho}$, and $h$ is a smooth function around $0$. The leading term of the expansion comes from the Perron-Frobenius eigenvalue, and one has the equivalent \begin{equation} \label{eq-heatequiv} {\rm Tr\,}(e^{-t D^2}) \ {\mathscr A}ym C^1_{{\hat{a}}t{\mathcal H}} \ \mathfrak p_{-2\log \rho,\log\lambda_{\text{\rm \tiny PF}}}(-\log t) \ t^{- \mathfrak prac{s_0}{2}} \,. \end{equation} where $s_0= \mathfrak prac{\log \lambda_{\text{\rm \tiny PF}}}{- \log \rho}$ is the spectral dimension as given in Theorem~\ref{thm-zeta}, and $C^1_{{\hat{a}}t{\mathcal H}}$ is given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-CH}. \end{theo} \begin{proof} From equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} and the proof of Theorem~\ref{thm-zeta} we have, \( \# E_n = \sum_{j = 1}^p P_j(n) \lambda_j^n\,, \) where $P_j$ is a polynomial of degre $m_j$, for all $n$ greater than or equal to $\max\{m_j : 0\le j \le p\}$ (and if $n<\max\{m_j : 0\le j \le p\}$, $P_j$ has to be replaced by a polynomial of degree less than or equal to $m_j$, depending on $n$). Setting $v = \rho^{-2}$ the trace of the heat kernel reads \begin{equation} \label{eq-heat-1} \sum_n \# E_n e^{-t \rho^{-2n}} = \sum_{j=1}^p \sum_{n=0}^{+\infty}P_j(n) \lambda_j^n \exp(-tv^n) + g_1(t) \\ \end{equation} where $g_1(t)$ is a smooth function around zero (the finite sum over $n<\max\{m_k : 0\le k \le p\}$ of terms correcting the formula for $\# E_n$). First consider an eigenvalue of modulus less than one: $\lambda_j$ with $|\lambda_j|<1$. We have \[ \bigl| P_j(n) \lambda_j^n e^{-tv^n} \bigr| \le \bigl| P_j(n) \lambda_j^{n/2} \bigr| |\lambda_j|^{n/2} |e^{-tv^n}| \bigr|\le c_j |\lambda_j|^{n/2} \] where $c_j$ is a constant. So the corresponding series in equation~_{\mbox{\scriptsize\em eq}}ref{eq-heat-1} is absolutely summable, and therefore eigenvalues of modulus less than 1 do not contribute to the singular behaviour of the trace. Hence the trace of equation~_{\mbox{\scriptsize\em eq}}ref{eq-heat-1} can be rewritten as \begin{eqnarray*} \sum_n \# E_n e^{-t \rho^{-2n}} & = &\sum_{j: |\lambda_j| > 1} \sum_{n=0}^{+\infty}P_j(n) v^{n\mathfrak prac{s_j}{2}} \exp(-tv^n) + g_2(t) \\ & = & \sum_{j: |\lambda_j| > 1} P_j(\mathfrak prac1{-\log \rho}\mathfrak prac{d}{d s_j}) \sum_{n=0}^{+\infty}v^{n \mathfrak prac{s_j}{2}} \exp(-tv^n) + g_2(t), \end{eqnarray*} where $g_2$ is a smooth function around $0$. Consider now an eigenvalue of modulus greater than one: $\lambda_j$ with $|\lambda_j|>1$. We thus have $\RMe(s_j)>0$, and we can write \[ \sum_n v^{n \mathfrak prac{s_j}{2}} e^{-t \rho^{-2n}} = \sum_{n=-\infty}^{+\infty} v^{n\mathfrak prac{s_j}{2}} \exp(-tv^n) - \sum_{n=1}^{+\infty} v^{-n \mathfrak prac{s_j}{2}}\exp(-tv^{-n}). \] The term \(\sum_{n=1}^{+\infty} v^{-n \mathfrak prac{s_j}{2}}\exp(-tv^{-n})\) being bounded at $0$ we only need to concentrate on the first sum. Clearly \(t^{\mathfrak prac{s_j}{2}}\sum_{n=-\infty}^{+\infty} v^{n\mathfrak prac{s_j}{2}} \exp(-tv^n)=f(\mathfrak prac{\log t}{\log v})\) where \[ f(s) = \sum_{n=-\infty}^\infty v^{(n+s)\mathfrak prac{s_j}{2}} \exp(-v^{n+s})\,. \] By standard arguments this series is uniformly convergent and defines a smooth $1$-periodic function $f$. It follows that the singular behaviour of $\sum_{n=0}^{+\infty} v^{n\mathfrak prac{s_j}{2}} \exp(-tv^n)$ as $t\to 0^+$ is given by $f(\mathfrak prac{\log t}{\log v}) t^{-\mathfrak prac{s_j}{2}}$. So we get that the trace reads: \begin{equation} \label{eq-heat3} \sum_n \# E_n e^{-t \rho^{-2n}} = \sum_{j: |\lambda_j| > 1} P_j(\mathfrak prac1{-\log \rho}\mathfrak prac{d}{d s_j}) f(\mathfrak prac{\log t}{\log v}) t^{-\mathfrak prac{s_j}{2}} + h(t), \end{equation} where $h$ is a smooth function around $0$. And we are left with identifying the function $f$. Its Fourier coefficients are given by \[ {\hat{a}}t f_k = \int_{-\infty}^\infty e^{\log(v) \alpha_j x} \exp(-v^x) e^{2\pi \imath k x}dx = \mathfrak prac{\Gamma(\alpha_j+\mathfrak prac{2\pi \imath k}{\log v})}{\log v}, \] so we see from equation~_{\mbox{\scriptsize\em eq}}ref{eq-fnper1} and~_{\mbox{\scriptsize\em eq}}ref{eq-fnper2} that \(f(s) = \mathfrak prac{1}{-2\log\rho}\widetilde\mathfrak p(-2\log\rho,\alpha_j,-s)\). Hence the singular term associated with $\lambda_j$ reads \( f(\mathfrak prac{\log t}{\log v}) t^{-\mathfrak prac{s_j}{2}} = \mathfrak p_{-2\log \rho, -s_j \log \rho}(-\log t)\). We substitute this back into equation~_{\mbox{\scriptsize\em eq}}ref{eq-heat3} to complete the proof of equation~_{\mbox{\scriptsize\em eq}}ref{eq-traceheat}. Clearly $s_0= \mathfrak prac{\log \lambda_{\text{\rm \tiny PF}}}{-\log \rho}$ has the greatest modulus among all the other $s_j$. Hence the leading term in the expansion comes from the Perron-Frobenius eigenvalue. Since $\lambda_1=\lambda_{\text{\rm \tiny PF}}$ is an eigenvalue, $P_1=C^1_{{\hat{a}}t {\mathcal H}}$ is the constant polynomial given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-CH}, which proves equation~_{\mbox{\scriptsize\em eq}}ref{eq-heatequiv}. \end{proof} We could have determined asymptotic expansion of the trace of the heat kernel using the inverse Mellin transform of the function $\zeta(2s)\Gamma( s)$. This is, of course, a lot more complicated than the direct computations. When using the inverse Mellin transform of $\zeta(2s)\Gamma(s)$ one can see that the origin of the periodic function $\mathfrak p$ is directly related to the periodicity of the zeta function and that the appearence of the term $\log t$ in the trace of the heat kernel expansion arises from a simple pole of $\zeta(s)$ at $s=0$ which amounts to a {\em double} pole of $\zeta(2s)\Gamma(s)$ at $s=0$. \begin{rem} \label{rem-heatkernel} If $A$ is not diagonalizable, we don't know how to compute contributions of eigenvalues of modulus one, so we assumed $A$ didn't have any in Theorem~\ref{thm-heatkernel}. But if $A$ is diagonalizable, contributions of such eigenvalues are easily computed. Eigenvalues of modulus one, not equal to one, do not contribute to the singular behaviour of the trace. Only the eigenvalue $\lambda_{j_0}=1$, if present in the spectrum of $A$, gives an extra term. And eigenvalues of modulus greater than one contribute as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-traceheat}, but with $P_j=1$ since $A$ is diagonalizable. The trace of the heat kernel in this case has the following expansion as $t\to 0^+$: \begin{equation} \label{eq-traceheatdiag} {\rm Tr\,}(e^{-t D^2}) = \sum_{j : |\lambda_j| > 1} C^{j}_{{\hat{a}}t{\mathcal H}} \, \mathfrak p_{-2\log \rho, -s_j \log \rho}({-\log t}) \; t^{-\mathfrak prac{s_j}{2}} \ +C^{j_0}_{{\hat{a}}t{\mathcal H}}\mathfrak prac{-\log t}{-2\log \rho} + h(t) \, \end{equation} where $s_j=\mathfrak prac{\lambda_j}{-\log \rho}$, $C^j_{{\hat{a}}t{\mathcal H}}$ is given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-CH}, $j_0$ is the index for the eigenvalue $\lambda_{j_0}=1$ (setting $C^{j_0}_{{\hat{a}}t{\mathcal H}}=0$ if $A$ has no eigenvalue equal to $1$) and $h$ is a smooth function around $0$. The trace of the heat kernel has therefore the same equivalent as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-heatequiv}. \end{rem} \subsection{Spectral state}\label{ssec-state} There is a natural Borel probability measure on $\Pi_\infty$. Indeed, due to the primitivity of the graph matrix there is a unique Borel probability measure which is invariant under the action of the groupoid given by tail equivalence. We explain that. If we denote by $[\gamma]$ the cylinder set of infinite paths beginning with $\gamma$, then invariance under the above mentionned groupoid means that $\mu([\gamma])$ depends only on the length $|\gamma|$ of $\gamma$ and its range, i.e.\, $\mu([\gamma]) = \mu(|\gamma|,r(\gamma))$. By additivity we have \[ \mu([\gamma]) = \sum_{\varepsilon:s(\varepsilon)=r(\gamma)} \mu([\gamma \varepsilon]) \] which translates into \[ \mu(n,v) = \sum_{w} A_{vw}\mu(n+1,w)\,. \] The unique solution to that equation is \[ \mu(n,v) = \lambda_{\text{\rm \tiny PF}}^{-n}R_v \] where $R$ is the {\em right} Perron-Frobenius eigenvector of the adjacency matrix $A$, normalized as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-normalization}. So if $\gamma \in \Pi_n$ is a path of length $n$, then \(\mu([\gamma]) = \lambda_{\text{\rm \tiny PF}}^{-n} R_{r(\gamma)}\). \begin{theo} \label{thm-specmeas} All operators of the form $\pi(f)$, $f\in C(\Pi_\infty)$, are strongly regular. Moreover, the measure $\mu$ defined on $f\in C(\Pi_\infty)$ by \[ \mu(f) : = {\mathcal T}(\pi(f)) \] is the unique measure which is invariant under the groupoid of tail equivalence. \end{theo} \begin{proof} Let $f$ be a measurable function on $\Pi_\infty$, and set \[ \mu_n(f)= \mathfrak prac{{\rm Tr\,}_{E_n}\bigl(\pi(f)\vert_{E_n}\bigr)}{\# E_n}=\mathfrak prac{\sum_{e\in E_n} f(s(e))}{\# E_n}. \] To check that the sequence $(\mu_n(f))_{{\mathbb N}}$ converges it suffices to consider $f$ to be a characteristic function of a base clopen set for the (Cantor) topology of $\Pi_\infty$. Let $\gamma$ be a finite path of length $|\gamma|<n$ and denote by $\chi_\gamma$ the characteristic function on $[\gamma]$. Then $\chi_\gamma (s(e))$ is non-zero if the path $s(e)$ starts with $\gamma$. Given that the tail of the path $s(e)$ is determined by the choice function $\tau$, the number of $e\in E_n$ for which $\chi_\gamma (s(e))$ is non-zero coincides with the number of paths of length $n-|\gamma|-1$ which start at $r(\gamma)$ and end at $s(s(e)_{n-1}) = s^2(h)$, for some $h\in {\hat{a}}t{\mathcal H}$. Hence \[ \sum_{e\in E_n} \chi_\gamma (s(e)) = \sum_{h\in {\hat{a}}t\mathcal H} A^{n-|\gamma|-1}_{r(\gamma)\,s^2(h)} \,. \] As noted before in the proof of Theorem~\ref{thm-zeta}, the cardinality of $E_n$ is asymptotically $C^1_{{\hat{a}}t{\mathcal H}}\lambda_{\text{\rm \tiny PF}}^{n}$, so we have \[ \mu_n (\chi_\gamma)= \mathfrak prac{\sum_{e\in E_n} \chi_\gamma(s(e))}{\#E_n} = \lambda_{\text{\rm \tiny PF}}^{-|\gamma|-1} \mathfrak prac{1}{C^1_{{\hat{a}}t{\mathcal H}}} \sum_{h\in {\hat{a}}t\mathcal H} R_{r(\gamma)} L_{s^2(h)} \ (1 + o(1) ) \,. \] Set \(U_v = (\lambda_{\text{\rm \tiny PF}}^{-1}/C^1_{{\hat{a}}t {\mathcal H}}) \sum_{h\in {\hat{a}}t\mathcal H} R_{v} L_{s^2(h)})\). We readily check that $U$ is a (right) eigenvector of $A$ with eigenvalue $\lambda_{\text{\rm \tiny PF}}$, and since its coordinates add up to $1$, we have $U = R$. So we get \[ \mu_n(\chi_\gamma) = \lambda_{\text{\rm \tiny PF}}^{-|\gamma|} R_{r(\gamma)} (1+o(1)) \xrightarrow{n\rightarrow +\infty} \lambda_{\text{\rm \tiny PF}}^{-|\gamma|} R_{r(\gamma)} = \mu([\gamma])\,. \] Now Corollary~\ref{cor-cesar} implies that $\pi(f)$ is strongly regular and ${\mathcal T}(\pi(f))=\mu(f)$. \end{proof} Below, when we discuss in particular the transversal part of the Dirichlet forms for a substitution tiling, we will encounter operators which are weakly regular, but not strongly regular, and so, a priori, the spectral state is not multiplicative on tensor products of these. The following lemma will be useful in this case. \begin{lemma} \label{lem-stateres} Consider the above spectral triple for a Bratteli diagram with graph matrix $A$ and parameter $\rho\in(0,1)$. Let $A\in {\mathcal B}({\mathfrak H})$ be such that $br A_n stackrel{n\rightarrow \infty}{\sim} e^{\imath n\varphi}$ for some $\varphi\in(0,2\pi)$. Then \[ {\rm Tr\,}(e^{-t D^2} A) {\mathscr A}ym e^{\imath\mathfrak prac{\varphi \log t}{2\log\rho}} {C^1_{{\hat{a}}t\mathcal H}}\, \mathfrak p_{-2\log \rho,\log\lambda_{\text{\rm \tiny PF}}+\imath\varphi}(-\log t) \, t^\mathfrak prac{\log \lambda_{\text{\rm \tiny PF}}}{2\log\rho}\,. \] In particular, $A$ is weakly regular and ${\mathcal T}(A) = 0$. \end{lemma} \begin{proof} As before we set $\sigma = \mathfrak prac{\log t}{\log v}$, $v = \rho^{-2}$, $\alpha = \mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}}{-2\log \rho}$ so that we have to determine the asymptotic behaviour of $\sum_{n=1}^\infty e^{\imath n \varphi} v^{\alpha(n+\sigma)} e^{-v^{n+\sigma}}$ when $\sigma\to -\infty$. Since $\sum_{n=-\infty}^{-1} e^{\imath n \varphi} v^{\alpha(n+\sigma)} e^{-v^{n+\sigma}}$ is absolutely convergent the sum over ${\mathbb N}$ has the same asymptotic behaviour than the sum over ${\mathbb Z}$. Now \begin{eqnarray*} \sum_{n=-\infty}^\infty e^{\imath n \varphi} v^{\alpha(n+\sigma)} e^{-v^{n+\sigma}} & = & e^{-\imath \sigma \varphi} \sum_{n=-\infty}^\infty v^{(\alpha+\mathfrak prac{\imath \varphi}{\log v})(n+\sigma)} e^{-v^{n+\sigma}}\\ &=& e^{-\imath \sigma \varphi}\, \mathfrak prac1{\log v}\, \widetilde \mathfrak p(\log v,\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}+\imath \varphi}{\log v},-\sigma) \end{eqnarray*} from which the first statement follows. ${\mathcal T}(A)$ is equal to the mean of the function $\sigma\mapsto e^{-\imath \sigma \varphi}\,\mathfrak prac1{\log v}\,\widetilde \mathfrak p(\log v,\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}+\imath \varphi}{\log v},-\sigma)$ which is zero since for all integer $k$ is $\varphi + 2\pi k \neq 0$. \end{proof} We now consider the tensor product of two spectral triples associated with Bratteli diagrams, one with parameter $\rho<1$ and Perron Frobenius eigenvalue $\lambda_{\text{\rm \tiny PF}}$ the other with parameter $\rho'<1$ and Perron Frobenius eigenvalue $\lambda_{\text{\rm \tiny PF}}'$. \begin{lemma} \label{lem-resphi} Let $A$ be as in the last lemma, then ${\mathcal T}(A\otimes {\mathbf 1}) = 0$ if for all integer $k,k'$ \begin{equation} \varphi + 2\pi k + 2\pi \mathfrak prac{\log \rho}{\log \rho'}k'\neq 0. \end{equation} \end{lemma} \begin{proof} By the above general results ${\mathcal T}(A\otimes {\mathbf 1})$ is equal to the mean of the function \(s\mapsto e^{\imath \mathfrak prac{s\varphi}{\log v}}\,\widetilde\mathfrak p(\log v,\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}+\imath\varphi}{\log v},\mathfrak prac{-s}{\log v})\,\widetilde\mathfrak p(\log v',\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}'}{\log v'},\mathfrak prac{-s}{\log v'})\) devided by the mean of the function \(s\mapsto \widetilde\mathfrak p(\log v,\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}}{\log v},\mathfrak prac{-s}{\log v})\,\widetilde\mathfrak p(\log v',\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}'}{\log v'},\mathfrak prac{-s}{\log v'})\). The latter is always strictly greater than zero. By developing the two $\widetilde\mathfrak p$-functions into Fourier series one sees that the former mean is zero if $s\mapsto e^{\imath\mathfrak prac{s\varphi}{\log v}}e^{-\imath\mathfrak prac{2\pi k s}{\log v}}e^{-\imath \mathfrak prac{2\pi k' s}{\log v'}}$ is oscillating for all $k,k'$. \end{proof} Motivated by this lemma we define: \begin{defini} \label{def-resphase} Let $\rho,\rho'\in(0,1)$. We call $\varphi\in (0,2\pi)$ a {\em non resonant phase} (for $(\rho,\rho'$) if \begin{equation} \label{eq-resphase} \varphi + 2\pi k + 2\pi \mathfrak prac{\log \rho}{\log \rho'}k'\neq 0 \,, \quad \mathfrak porall k,k'\in{\mathbb Z}\,. \end{equation} \end{defini} If $\mathfrak prac{\log \rho}{\log\rho'}$ is irrational and for instance $\varphi = 2\pi \mathfrak prac{\log \rho}{\log \rho'}$, then $\varphi$ is resonant and, as follows easily from the calculation in the proof, ${\mathcal T}(A\otimes {\mathbf 1})\neq 0$. In particular, ${\mathcal T}(A\otimes {\mathbf 1})\neq {\mathcal T}_1(A){\mathcal T}_2(1)$. \subsection{Quadratic form}\label{ssec-form} In Section~\ref{ssec-Laplace} we considered a quadratic form which we specify to the context of spectral triples defined by Bratteli diagrams. Let $\zeta$ and $s_0$ be the zeta function with its abscisse of convergence and $\mu$ be the spectral measure on $\Pi_\infty$ as defined in Section~\ref{ssec-zeta} and \ref{ssec-state}. Equations~_{\mbox{\scriptsize\em eq}}ref{eq-rep} and~_{\mbox{\scriptsize\em eq}}ref{eq-Dirac} imply, for $e\in E_{n}$ \begin{equation} \label{eq-deltaef0} [D,\pi(f)] \varphi(e) = (\delta_{e}^\alpha f) \, \varphi(e^\text{\rm op}) \,, \quad \text{\rm with}\quad \delta_e f = \mathfrak prac{f(r(e)) -f(s(e))}{\rho^n} \,. \end{equation} Equation~_{\mbox{\scriptsize\em eq}}ref{eq-Dirform} therefore becomes \begin{eqnarray}\nonumber Q(f,g) &=& \lim_{s\rightarrow s_0^+} \mathfrak prac{1}{\zeta(s)} {\rm Tr\,} \bigl( |D|^{-s} [D,\pi(f)]^{\mathscr A}t [D,\pi(g)] \bigr) \\ \label{eq-Q2} &=& \lim_{s\rightarrow s_0^+} \mathfrak prac{1}{\zeta(s)} \sum_{n\geq 1} \#E_n \rho^{ns} \; q_n(f,g) \end{eqnarray} with \begin{equation}\label{eq-qn} q_n(f,g) = \mathfrak prac{1}{\#E_n}\sum_{e\in E_n} \; \overline{\delta_e f} \; \delta_e g\,. \end{equation} Note that $ \lim_{s\rightarrow s_0^+} \mathfrak prac{1}{\zeta(s)} \sum_{n\geq 1} \#E_n \rho^{ns} = 1$. We thus have the following simple result: \begin{lemma}\label{lem-qn} $Q(f,g) = \lim_{n\to \infty} q_n(f,g)$ provided the limit exist. \end{lemma} The following can be said in general. \begin{proposi} \label{prop-form} The quadratic form $Q$ is symmetric, positive definite, and Markovian on \({\mathcal D}=\bigl\{f \in L^2_{{\mathbb R}}(\Pi_\infty,d \mu) \, : \, Q(f,f) < +\infty \bigr\}\,\). \end{proposi} \begin{proof} The form is clearly symmetric and positive definite. Let $\epsilon>0$ and consider a smooth approximation $g_\epsilon$ of the restriction to $[0,1]$: \(g_\epsilon(t)=t\) for \(t\in [0,1]\), \(-\epsilon \le g_\epsilon(t) \le 1+ \epsilon\) for \(t \in {\mathbb R}\), and \(0\le g_\epsilon(t) - g_\epsilon(t')\le t-t'\) for all \(t\le t'\). This last property implies that \( |\delta_{e} \ g_\epsilon \circ f | \le |\delta_{e} f | \) so that \(Q(g_\epsilon \circ f,g_\epsilon \circ f)\le Q(f,f)\). This proves that $Q$ is Markovian. \end{proof} For a precise evaluation of this quadratic form on certain domains we have, however, to consider more specific systems. \subsection{A simple example: The graph with one vertex and two edges} \label{sec-graph1} Remember that one difficulty with the Dirichlet form defined by our spectral triple (equation~_{\mbox{\scriptsize\em eq}}ref{eq-Dirform} in Section~\ref{ssec-Laplace}) is that we need to specify a core for the form. We provide here an example where such a core can be suggested with the help of an additional structure. We consider the graph ${\mathcal G}$ which has one vertex and two edges, which are necessarily loops. Call one edge $0$ and the other $1$. Let ${l^*}$ be the edge $0$. Then it is clear that $\Pi_\infty$ can be identified with the set of all $\{0,1\}$-sequences and $\Pi_{\infty {\mathscr A}t}$ with those sequences which eventually become $0$. We consider the spectral triple of Section~\ref{sec-ST-Bratteli} associated with the graph ${\mathcal G}$ and parameter $\rho=\mathfrak prac12$. There is not much choice for the horizontal egdes, ${\hat{a}}t\mathcal H=\{(0,1),(1,0)\}$, nor for ${\hat{a}}t\tau$, ${\hat{a}}t\tau(1) = 0$ and ${\hat{a}}t\tau(0) = 0$. We will look at this system from two different angles justifying two different cores for the Dirichlet form. \paragraph{Group structure} The space of $\{0,1\}$-sequences carries an Abelian group structure. In fact, if we identify $\Pi_n$ with $\ZM_{2^n}$ using $\gamma \mapsto \sum_{i=1}^n \gamma_i 2^{i-1}$ then we can write $\Pi_\infty$ as the inverse limit group $$\Pi_\infty = \lim_{\longleftarrow} \left (\ZM_{2^n}stackrel{\pi_n^{n+1}}{\longleftarrow} \ZM_{2^{n+1}}\right)$$ where $\pi_n^{n+1}$ maps $m\: mod \: 2^{n+1}$ to $m\: mod \:2^n$. It follows that $C(\Pi_\infty)$ is isomorphic to the group $C^*$-algebra of the Pontrayagin dual \[ {\hat{a}}t\Pi_\infty = \lim_{\longrightarrow} \left ({\hat{a}}t\ZM_{2^n}stackrel{{\hat{a}}t\pi_n^{n+1}}{\longrightarrow} {\hat{a}}t\ZM_{2^{n+1}}\right)\,. \] This suggest that a reasonable choice for the domain of the form would be the (algebraic) group algebra ${\mathbb C} {\hat{a}}t\Pi_\infty$ whose elements are finite linear combinations of elements of ${\hat{a}}t\Pi_\infty$. Now ${\mathbb C} {\hat{a}}t\Pi_\infty$ corresponds to the subalgebra $C_{l.c.}(\Pi_\infty)\subset C(\Pi_\infty)$ of locally constant functions. We have $\lim_{n\to \infty} q_n(f,g)=0$ if $f$ and $g$ are locally constant and so by Lemma~\ref{lem-qn} $Q$ exists on that core. However, $Q$ is identically $0$ on the domain defined by the core. So the algebraic choice of the domain which was motivated by the group structure is not very interesting. \paragraph{Embedding into $S^1$} Note that $\Pi_n$ can be identified with ${\hat{a}}t \ZM_{2^n} $ via the map \(\gamma \mapsto \exp(2\pi\imath \sum_{j=1}^n \gamma_j 2^{-j+1}(\cdot))\) so that $ \Pi_{\infty{\mathscr A}t}\cong{\hat{a}}t\Pi_\infty = \{\exp(2\pi\imath p (\cdot)):p\in\ZM[\mathfrak prac12]\cap [0,1)\}$. This suggest to view $\Pi_{\infty{\mathscr A}t}$ (via $\gamma \mapsto \exp(2\pi\imath \sum_{i\geq 1} \gamma_i 2^{-i+1})\in S^1 $) as a dense subset of $S^1$. The inclusion $\Pi_{\infty{\mathscr A}t}\hookrightarrow S^1$ extends to a continuous surjection $\eta:\Pi_\infty \to S^1$ which is almost everywhere one-to-one Furthermore the push forward of the measure $\mu$ on $\Pi_\infty$ is the Lebesgue measure on $S^1$. Hence the pull back $\eta^*$ induces an isometry between $L^2(S^1)$ and $L^2(\Pi_\infty)$. It follows that $\eta^*(C(S^1))$ is dense in $L^2(\Pi_\infty)$. This suggests to take as core for the quadratic form the pull backs of trigonometric polynomials over $S^1$. Now one sees from Equ.~\ref{eq-qn} that $\lim_{n\to \infty} q_n(f,g)$ exists, provided $\rho = \mathfrak prac12$. In fact, $\lim_{n\to \infty} q_n(f,g) = \langle \mathfrak prac{\partial f}{\partial x},\mathfrak prac{\partial g}{\partial x}\rightarrowngle$ showing that $Q$ has infinitesimal generator equal to the standard Laplacian on $S^1$. \subsection{Telescoping} There is a standard equivalence relation among Bratteli diagrams which is generated by isomorphisms and so-called telescoping. Since we are looking at stationary diagrams we consider stationary telescopings only. Then the following operations generate the equivalence relation we consider: \begin{enumerate} \item {\it Telescoping}: Given the above data built from a graph ${\mathcal G}=({\mathcal V},{\mathcal E})$, and a positive integer $p$, we consider a new graph ${\mathcal G}^p:= ({\mathcal V}^p,{\mathcal E}^p)$ with the same vertices: ${\mathcal V}^p={\mathcal V}$, and the paths of length $p$ as edges: ${\mathcal E}^p=\Pi_p({\mathcal G})$. The corresponding parameter is taken to be $\rho_p=\rho^p$. \item {\it Isomorphism}: Given two graphs as above ${\mathcal G}=({\mathcal V},{\mathcal E})$, ${\mathcal G}'=({\mathcal V}',{\mathcal E}')$, we say that the corresponding stationary Bratteli diagrams are isomorphic if there are two bijections ${\mathcal V} \rightarrow {\mathcal V}'$, ${\mathcal E} \rightarrow {\mathcal E}'$ which intertwine the range and source maps. We need in this case that the associated parameters be equal, and the sets of horizontal edges isomorphic (through a map which intertwines the range and source maps). \end{enumerate} We show now that this equivalence relation leaves the properties of the associated spectral triple invariant: \begin{enumerate}[(i)] \item The zeta functions are equivalent, so have the same spectral dimension: $s_0=\mathfrak prac{\log\lambda_{\text{\rm \tiny PF}}}{-\log\rho}$; \item The spectral measures are both equal to the invariant probability measure $\mu$ on $\Pi_\infty$; \item Both spectral distances generate the topology of $\Pi_\infty$ (provided ${\hat{a}}t{\mathcal H}$ is large enough as in Lemma~\ref{lem-Gtau}), and are furthermore Lipschitz equivalent. \end{enumerate} The invariance under isomorphism is trivial. We explain briefly how things work under telescoping. The horizontal edges for ${\mathcal G}^p$ are given as for ${\mathcal G}$, by the corresponding subset \[ {\hat{a}}t\mathcal H^p \subseteq \left\{ (\varepsilon,\varepsilon') \in {\mathcal E}^p\times{\mathcal E}^p \, : \, \varepsilon\neq \varepsilon', \; s(\varepsilon)=s(\varepsilon') \right\} \,, \] and so we have the identifications \[ {\mathcal H}_n^p \cong \bigcup_{i=0}^{p-1} {\mathcal H}_{np+i}\,, \qquad E_n^p\cong \bigcup_{i=0}^{p-1} E_{np+i} \] which allows us to determine the approximation graph $G_\tau^p=(V^p,E^p)$, and yields a unitary equivalence \(\ell^2(E) \cong \ell^2(E^p)\). We identify the two Hilbert spaces $\ell^2(E) \cong \ell^2(E^p)$ and the representations $\pi\cong \pi_p$, while the Dirac operators satisfy: \[ D_p = W^\dagger D W \quad \text{with } \quad W : \left\{ \begin{array}{ccl} \ell^2(E) & \rightarrow & \ell^2(E^p) \\ \delta_e & \mapsto & \rho^{-\mathfrak prac{k}{2}} \delta_e \,, \ \text{\rm for } e\in E_n, \text{\rm with } n= k \mod p\,. \end{array} \right. \] From the inequalities \( {\mathbf 1} \le W \le \rho^{-p} {\mathbf 1}\), we deduce that the zeta functions are equivalent, and that both spectral triple have the same spectral dimension $s_0$, and give rise to the same spectral measure $\mu$. By Theorem~\ref{thm-ST}, both Connes distances generate the topology of $\Pi_\infty=\Pi_\infty^p$, provided ${\hat{a}}t{\mathcal H}$ is large enough. Let us call $d_s^p$ the spectral metric associated with ${\mathcal G}^p$, with corresponding coefficients $n^p_{xy},c_{xy}^p, b^p_n$ as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-ds}. Writing $n_{xy}=pn_{xy}^p +k_{xy}$, for some $k_{xy}\le p-1$, we have \[ d_s(x,y) = c_{xy} \rho^{k_{xy}} (\rho^p)^{n^p_{xy}} + \sum_{n> n^p_{xy}} (\rho^p)^n \sum_{k=0}^{p-1} \bigl( b_{np+k} (x) + b_{np+k} (y) \bigr) \rho^k \,, \] Now we see that \(b_{np+k}(z) = 1 \RMightarrow b_n^p(z)=1\), while if \(b_{np+k}(z)=0\) for all $k=0, \cdots p-1$, then $b_n^p(z)=0$ too, so that one has \( b_n^p(z) \le \sum_{k=0}^{p-1} b_{np+k}(z) \le p b_n^p(z)\). We substitute this back into the previous equation to get the Lipschitz equivalence: \[ c_p \ d_s^p (x,y) \ \le d_s(x,y) \le p \rho^p C_p \ d_s^p(x,y) \,, \] with $c_p$, $C_p$, the respective min and max of $c_{xy}/c^p_{xy}$ (which only depends on ${\mathcal H}$ and $p$). \section{Substitution tiling spaces} \label{sec-tilings} Bratteli diagrams occur naturally in the description of substitution tilings. The path space of the Bratteli diagram defined by the substitution graph has been used to describe the transversal of such a tiling \cite{Forrest,Kel95}. As we will first show, an extended version can also be used to describe a dense set of the continuous hull $\Omega_\Phi$ of the tiling and therefore we will employ it and the construction of the previous section to construct a spectral triple for $\Omega_\Phi$. We then will have a closer look at the Dirichlet form defined by the spectral triple. Under the assumption that the dynamical spectrum of the tiling is purely discrete we can identify a core for the Dirichlet. We can then also compute explicitely the associated Laplacian. \subsection{Preliminaries} \label{ssec-tilingbasics} We recall the basic notions of tiling theory, namely tiles, patches, tilings of the Euclidean space ${\mathbb R}^d$, and substitutions. For a more detailed presentation in particular of substitution tilings we refer the reader to \cite{Grunbaum}. A {\em tile} is a compact subset of ${\mathbb R}^d$ that is homeomorphic to a ball. It possibly carries a decoration (for instance its collar). A {\em tiling} of ${\mathbb R}^d$ is a countable set of tiles $(t_i)_{i\in I}$ whose union covers ${\mathbb R}^d$ and with pairwise disjoint interiors. Given a tiling $T$, we call a {\em patch} of $T$, any set of tiles in $T$ which covers a bounded and simply connected set. A {\em prototile} (resp. {\em protopatch}) is an equivalence class of tiles (resp. patches) modulo translations. We will only consider tilings with finitely many prototiles and for which there are only finitly many protopatches containing two tiles (such tilings have Finite Local Complexity or FLC). The tilings we are interested in are constructed from a (finite) prototile set ${\mathcal A}$ and a substitution rule on the prototiles. A substitution rule is a decomposition rule followed by a rescaling, i.e.\ each prototile is decomposed into smaller tiles, which, when stretched by a common factor $\theta>1$ are congruent to some prototiles. We call $\theta$ the {\em dilation factor} of the substitution. The decomposition rule can be applied to patches and whole tilings, by simply decomposing each tile, and so can be the substitution rule when the result of the decomposition is stretched by a factor of $\theta$. We denote the decomposition rule by $\delta$ and the substitution rule by $\Phi$. In particular we have, for a tile $t$, $\delta (t+a) = \delta (t) + a$ and $\Phi (t+a) = \Phi (t) + \theta a$ for all $a \in {\mathbb R}^d$. See Figure~\ref{fig-chair} for an example in ${\mathbb R}^2$. \begin{figure} \caption{{\small A process of inflation and substitution (chair tiling). A whole tiling of~${\mathbb R} \label{fig-chair} \end{figure} A patch of the form $\Phi^n(t)$, for some tile $t$, is called an {\em $n$-supertile}, or $n$-th order supertile. A rescaled tile $\theta^{n} t$ will be called a {\em level $n$ tile} but also, if $n=-m<0$, an {\em $m$-microtile}, or $m$-th order microtile. A substitution defines a tiling space $\Omega_\Phi$: the set of all tilings $T$ with the property that any patch of $T$ occurs in a supertile of sufficiently high order. We will assume that the substitution is {\em primitive} and {\em aperiodic}: there exists an integer $n$ such that any $n$-supertile contains tiles of each type and all tilings of $\Omega_\Phi$ are aperiodic. This implies that by inspection of a large enough but finite patch around them the tiles of $\Omega_\Phi$ can be grouped into supertiles (one says that $\Phi$ is recognizable) so that $\delta$ and $\Phi$ are invertible. In particular, $\Phi$ is a homeomorphism of $\Omega_\Phi$ if the latter is equipped with the standard tiling metric \cite{AP}. We may suppose that the substitution {\em forces the border} \cite{Kel95}. The condition says that given any tile $t$, its $n$-th substitute does not only determine the $n$-supertile $\Phi^n(t)$, but also all tiles that can be adjacent to it. This condition can be realized for instance by considering decorations of each types of tiles, and replacing ${\mathcal A}$ by the larger set of collared prototiles. There is a canonical action of ${\mathbb R}^d$ on the tiling space $\Omega_\Phi$, by translation, which makes it a topological dynamical system. Under the above assumptions, the dynamical system $(\Omega_\Phi, {\mathbb R}^d)$ is minimal and uniquely ergodic. The unique invariant and ergodic probability measure on $\Omega_\Phi$ will be denoted $\mu$. A particularity of tiling dynamical system is that they admit particular transversals to the ${\mathbb R}^d$-action. To define such a transversal $\Xi$, we associate to each prottoile a particular point, called its {\em puncture}. Each level $n$ tile being similar to a unique proto-tile we may then associate to the level $n$ tile the puncture which is the image of the puncture of the proto-tile under the similarity. The transversal\mathfrak pootnote{Sometimes $\Xi$ is referred to as the {\em canonical transversal} or the {\em discrete hull}} $\Xi$ is the subset of tilings $T\in\Omega$ which has the puncture of one of its tiles at the origin of ${\mathbb R}^d$. The measure $\mu$ induces an invariant probability measure on $\Xi$ which gives the frequencies of the tiles and patches. \subsection{Substitution graph and the Robinson map} \label{ssec-robmap} The {\em substitution matrix} of the substitution $\Phi$ is the matrix with coefficients $A_{ij}$ equal to the number of tiles of type $t_i$ in $\Phi(t_j)$. The graph ${\mathcal G}$ of Section~\ref{sec-ST-Bratteli} underlying our constructions will be here the {\em substitution graph}: the graph whose graph matrix is the substitution matrix. More precisely, its vertices $v\in{\mathcal V}$ are in one-to-one correspondence with the prototiles and we denote with $t_v$ the prototile corresponding to $v\in{\mathcal V}$, {\it i.e.}\, the prototile set reads \( {\mathcal A} = \{ t_v : v \in {\mathcal V}\}\). Between the vertices $u$ and $v$ there are $A_{uv}$ edges corresponding to the $A_{uv}$ different occurrences of tiles of type $t_u$ in $\Phi(t_v)$. Here we call $u$ (or $t_u$) the source, and $v$ (or $t_v$) the range of these edges. Notice that the Perron-Frobenius eigenvalue of $A$ is the $d$-th power of the dilation factor $\theta$: \(\lambda_{\text{\rm \tiny PF}}=\theta^d\). The asymptotics of the powers of $A$ are given by equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} as before. The coordinates of the left and right Perron-Frobenius eigenvectors $L,R$, are now related to the volumes and the frequencies of the prototiles as follows: for all $v\in{\mathcal V}$ we have \begin{equation} \label{eq-LR} \textrm{freq}(t_v) = R_v\,, \qquad \textrm{vol}(t_v) = L_v \, \end{equation} where $\textrm{freq}(t_v)$ is the frequency and $\textrm{vol}(t_v)$ the volume of $t_v$, the volume being normalized as in equation~_{\mbox{\scriptsize\em eq}}ref{eq-normalization} so that the average volume of a tile is $1$. Given a choice of punctures to define the transversal $\Xi$ of $\Omega_\Phi$ there is a map \[ \RMob:\Xi \to \Pi_\infty({\mathcal G}) \] onto the set of half-infinite paths in ${\mathcal G}$. Indeed, given a tiling $T\in\Xi$ (so with a puncture at the origin) and an integer $n\in {\mathbb N}$, we define: \begin{itemize} \item $v_n(T)\in {\mathcal V}$ to be the vertex corresponding to the prototile type of the tile in $\Phi^{-n}(T)$ which contains the origin; \item $\varepsilon_n(T)\in {\mathcal E}$ to be the edge corresponding to the occurrence of $v_{n-1}(T)$ in $\Phi(v_{n}(T))$. \end{itemize} Then $\RMob(T)$ is the sequence $(\varepsilon_n(T))_{n\geq 1}$. We call $\RMob$ the Robinson map as it was first defined for the Penrose tilings by Robinson, see \cite{Grunbaum}. \begin{theo}[\cite{Kel95}] \label{thm-homeo} $\RMob$ is a homeomorphism. \end{theo} We extend the above map $\RMob$ to the continuous hull $\Omega_\Phi$. The idea is simple: the definition of $\RMob$ makes sense provided the origin lies in a single tile but becomes ambiguous as soon as it lies in the common boundary of several tiles. We will therefore always assign that boundary to a unique tile in the following way. We suppose that the boundaries of the tiles are sufficiently regular so that there exist a vector $\vec v\in{\mathbb R}^d$ such that for all points $x$ of a tile $t$ either \(\exists \eta>0, \mathfrak porall \epsilon \in(0,\eta): x+\epsilon \vec v \in t\) or \(\exists \eta>0, \mathfrak porall\epsilon \in (0,\eta): x+\epsilon \vec v \notin t\). This is clearly the case for polyhedral tilings. We fix such a vector $v$. Given a prototile $t$ (a closed set) we define the half-open prototile $[t)$ as follows: \[ [t) :=\big\{ x\in t \, : \, \exists \eta>0\:\mathfrak porall \epsilon \in[0,\eta): x+\epsilon \vec v \in t \big\}. \] It follows that any tiling $T$ gives rise to a partition of ${\mathbb R}^d$ by half-open tiles. We extend the Robinson map to \[ \RMob: \Omega_\Phi\to\Pi_{-\infty,+\infty} \] where $\Pi_{-\infty,+\infty}$ is the space of bi-infinite sequences over ${\mathcal G}$ using half-open proto-tiles as follows. For $n\in {\mathbb Z}$ we define \begin{itemize} \item $v_n(T)\in {\mathcal V}$ to be the vertex corresponding to the prototile type of the half open tile in $\Phi^{-n}(T)$ which contains the origin. So $v_n(T)$ corresponds to \begin{itemize} \item the $n$-th order (half-open) supertile in $T$ containing the origin, for $n> 0$, \item $n$-th order (half-open) microtile in $\delta^{-n}(T)$ containing the origin, for $n \le 0$; \end{itemize} \item $\varepsilon_n(T)\in {\mathcal E}$ to be the edge corresponding to the occurrence of $v_{n-1}(T)$ in $\Phi(v_{n}(T))$. \end{itemize} And we set \(\RMob(T)\) to be the bi-infinite sequence $\RMob(T)=\bigl(\varepsilon_n(T)\bigr)_{n\in{\mathbb Z}}$. \begin{rem} \label{rem-brat2}{\em As in Remark~\ref{rem-brat1} we can see this construction as a Bratteli diagram, but the diagram is bi-infinite this time. There is a copy of ${\mathcal V}$ at each level $n\in {\mathbb Z}$ and edges of ${\mathcal E}$ between levels $n$ and $n+1$. Level $0$ corresponds to prototiles, level $1$ to supertiles and level $n>1$ to $n$-th order supertiles, while level $-1$ corresponds to microtiles and level $n<-1$ to $n$-th order microtiles. For the ``negative'' part of the diagram, we can alternatively consider the reversed substitution graph $\widetilde{\mathcal G}=({\mathcal V},\widetilde{\mathcal E})$ which is ${\mathcal G}$ with all orientations of the edges reversed. The graph matrix of $\widetilde{{\mathcal G}}$, is then the transpose of the substitution matrix: $\widetilde{A}=A^T$. So for $n\le0$, there are edges of $\widetilde {\mathcal E}$ between levels $n$ and $n-1$: there are $\widetilde{A}_{uv}=A_{vu}$ such edges linking $u$ to $v$. } \end{rem} As for Theorem~\ref{thm-homeo} one proves, using the border forcing condition, that $\RMob$ is injective. Given a path $\xi \in \Pi_{-\infty,+\infty}$, and $m < n \in {\mathbb Z}\cup\{\pm \infty\}$, we denote by \(\xi_{[m,n]}\),\(\xi_{(m,n]}\),\(\xi_{[m,n)}\) and \(\xi_{(m,n)}\), its restrictions from level $m$ to $n$ (with end points included or not). Also $\xi_n$ will denote its $n$-th edge, from level $n$ to level $n+1$, $n\in{\mathbb Z}$. We similarly define $\Pi_{m,n}$ (with end points included). For instance $\Pi_{0,+\infty}$ is simply $\Pi_\infty=\Pi_\infty({\mathcal G})$. We say that an edge $e\in{\mathcal E}$ is {\em inner} if it encodes the position of a tile $t$ in the supertile $p$ such that \(\exists\eta>0, \mathfrak porall\epsilon\in[0,\eta):t+\epsilon \vec v\in p\). This says that the occurence of $t$ in $p$ does not intersect the open part of the border of $p$. It is not true that $\RMob$ is bijective but we have the following. \begin{lemma} \label{inedges} $X:=\mbox{\rm im} \RMob$ contains the set of paths $\xi\in\Pi_{-\infty,+\infty}$ such that $\xi_{(-\infty,0]}$ has infinitely many inner edges. \end{lemma} \begin{proof} Recall the following: If $(A_n)_n$ is a sequence of subsets of ${\mathbb R}^d$ such that $A_{n+1}\subset A_{n}$ and $\mbox{diam}(A_n)\to 0$ then there exists a unique point $x\in{\mathbb R}^d$ such that $x\in \bigcap_n \overline{A_n}$. By construction, for any tiling $T$, one has $0\in \bigcap_{n\leq 0}[v_n(T))$. Hence $\xi=\RMob(T)$ whenever $\bigcap_{n\leq 0}[s(\xi_n))\neq\emptyset$, where $[s(\xi_0))$ is the standard representative for the half-open prototile of type $s(\xi_0)$ and $[s(\xi_n))$ the half-open $n$-th order microtile of type $s(\xi_n)$ in $[s(\xi_0))$ which is encoded by the path $\xi_{[n,0]}$. Suppose that $\xi_{n}$ is inner, then $[s(\xi_{n}))$ does not lie at the open border of $[r(\xi_n)=s(\xi_{n+1}))$. Hence \([s(\xi_{n}))\cap [s(\xi_{n+1}))= [s(\xi_{n})]\cap [s(\xi_{n+1}))\) where $[s(\xi_{n})]$ is the closure of $[s(\xi_n))$. Suppose that infinitely many edges of $\xi_{(-\infty,0]}$ are inner, then \[ \bigcap_{n<0: \xi_{n}\mbox{ \small inner}} [s(\xi_{n})] \subset \bigcap_{n < 0}[s(\xi_{n+1})) \] showing that the r.h.s.\ contains an element, and hence $\xi\in\mbox{\rm im}\,\RMob$. \end{proof} \begin{coro} The set $X$ is a dense and shift invariant subset of $\Pi_{-\infty,+\infty}$. \end{coro} \begin{proof} Shift invariance is clear. Denseness follows immediately from Lemma~\ref{inedges}. \end{proof} In particular, for $n\in{\mathbb N}$, each element of $\Pi_{-n,n}$ can be the middle part of a sequence in $\RMob(\Omega_\Phi)$, that is, for all $\gamma\in \Pi_{-n,n}$ there exists $T\in\Omega_\Phi$ such that $\RMob(T)_{[-n,n]} = \gamma$. \begin{rem} \label{rem-Robinson}{\em For $v\in{\mathcal V}$, let $\Pi_{-\infty,\infty}^v$ be the set of bi-infinite paths which pass through $v$ at level $0$, and set $X^v = X\cap \Pi_{-\infty,\infty}^v$. Then ${\mathcal R}$ yields a bijection between $\Xi_{t_v}\times [t_v)$ and $X^v$, where $t_v$ is the prototile corresponding to $v$ and $\Xi_{t_v}$ its acceptance domain (the set of all tilings in $\Xi$ which have $t_v$ at the origin). Notice that $\Pi_{-\infty,0}$ can be identified with $\Pi_\infty(\widetilde{\mathcal G})$, where $\widetilde{\mathcal G}$ is the graph obtained from ${\mathcal G}$ by reversing the orientation of its edges: one simply reads paths backwards, so follows the edges along their opposite orientations. One sees then, that the Robinson map yields a homeomorphism between $\Xi_{t_v}$ and $\Pi_{0,+\infty}^v=\Pi_\infty^v$, and a map with dense image from $[t_v)$ into $\Pi_{-\infty,0}^v=\Pi_\infty^v(\widetilde {\mathcal G})$. } \end{rem} \subsection{The transversal triple for a substitution tiling} \label{ssec-trST} Our aim here is to construct a spectral triple for the transversal $\Xi$. We apply the general construction of Section~\ref{sec-ST-Bratteli} to the substitution graph ${\mathcal G}=({\mathcal V},{\mathcal E})$. We may suppose\mathfrak pootnote{This can always be achieved by going over to a power of the substitution.} that the substitution has a fixed point $T^*$ such that ${\mathbb R}^d$ is covered by the union over $n$ of the $n$-th order supertiles of $T^*$ containing the origin. It follows that $\RMob(T^*)$ is a constant path in $\Pi_{-\infty,+\infty}({\mathcal G})$, that is, the infinite repetition of a loop edge of ${\mathcal G}$ which we choose to be $\varepsilon^*$. We then fix $\tau$, take $\rho=\rho_{tr}$ as a parameter, and choose a subset \[ {\hat{a}}t{\mathcal H}_{tr}\subset {\mathcal H}({\mathcal G})= \left\{ (\varepsilon,\varepsilon') \in {\mathcal E}\times{\mathcal E} \, : \, \varepsilon\neq \varepsilon', \; s(\varepsilon)=s(\varepsilon') \right\}\, \] which we suppose to satisfy the conditions of Lemma~\ref{lem-Gtau}: if $s(\varepsilon) = s(\varepsilon')$ there is a path of edges in ${\hat{a}}t{\mathcal H}_{tr}$ linking $\varepsilon'$ with $\varepsilon'$. The horizontal edges of level $n\in{\mathbb N}$ are then given by \[ {\mathcal H}_{{tr},n}= \Bigr\{ (\eta \varepsilon,\eta \varepsilon') \, : \, \eta \in \Pi_{n-1}({\mathcal G}), (\varepsilon,\varepsilon')\in {\hat{a}}t{\mathcal H}_{tr} \Bigr\} \subset \Pi_n({\mathcal G}) \times \Pi_n({\mathcal G}) \,. \] They define the transverse approximation graph \(G_{tr,\tau}=(V_{tr},E_{tr})\) as in Section~\ref{sec-ST-Bratteli} \begin{eqnarray*} V_{tr} = \bigcup_n V_{tr,n}\,, & V_{tr,n} = \tau (\Pi_n({\mathcal G})) \subset \Pi_\infty^{\mathscr A}t({\mathcal G})\,,\\ E_{tr} = \bigcup_n E_{tr,n}\,, & E_{tr,n} = \tau\times\tau({\mathcal H}_{tr,n})\,, \end{eqnarray*} together with the orientation inherited from ${\hat{a}}t{\mathcal H}_{tr}$: so \(E_{tr,n} = E_{tr,n}^+ \cup E_{tr,n}^-\) for all $n\in{\mathbb N}$, and \(E_{tr}=E_{tr}^+\cup E_{tr}^-\). We also write $E_n(h) = \tau\times\tau({\mathcal H}_{{tr},n}(h))$ where, if $h=(\varepsilon,\varepsilon')$, then ${\mathcal H}_{{tr},n}(h) = \{(\eta\varepsilon,\eta\varepsilon'):\eta\in\Pi_{n-1}({\mathcal G})\}$. By our assumption on ${\hat{a}}t{\mathcal H}$ the approximation graph $G_{tr,\tau}$ is connected, and its vertices are dense in $\Pi_\infty({\mathcal G})$. An edge $h\in {\hat{a}}t{\mathcal H}_{tr}$ has the following interpretation: The two paths $\tau(s(h))$ and $\tau(r(h))$ have the same source vertex, say $v_0$, they differ on their first edge and then, at some minimal $n_h>0$, they come back together coinciding for all further edges. This is a consequence of the property of $\tau$. Let us denote the vertex at which the two edges come back together with $v_h$. Neglecting the part after that vertex we obtain a pair $(\gamma,\gamma')$ of paths of length $n_h$ which both start at $v_0$ and end at $v_h$. Reading the definition of the Robinson map $\RMob$ backwards we see that the pair $(\gamma,\gamma')$ describes a pair of tiles $(t,t')$ of type $v_0$ in an $n_h$-th order supertile of type $v_h$. Of importance below will be the vector $r_h\in \RM^d$ of translation from $t$ to $t'$. The interpretation of an edge $e\in E_{tr,n}(h)$ (so an edge of type $h$) is similar, except that the paths $\tau(s(e))$ and $\tau(r(e))$ coincide up to level $n$ and meet again at level $n+n_h$. In particular $e$ describes a pair of $n$-th order supertiles $(t,t')$ of type $v_0$ in an $(n+n_h)$-th order supertile of type $v_h$. If one denotes by $r_e\in {\mathbb R}^d$ the translation vector between $t$ and $t'$ then, due to the selfsimilarity, one has: \begin{equation} \label{eq-transtr} r_e= \theta^{n} r_h. \end{equation} See Figure~\ref{fig-chair3} for an illustration. \begin{figure} \caption{{\small A doubly pointed pattern associated with a horizontal arrow $e\in E_{tr,3} \label{fig-chair3} \end{figure} Theorem~\ref{thm-ST} provides us with a spectral triple for the algebra $C(\Pi_\infty({\mathcal G}))$. We adapt this slightly to get a spectral triple for $C(\Xi)$. Since the $n$-th order supertiles of $T^*$ on $0$ eventually cover ${\mathbb R}^d$, $\RMob$ identifies $\Pi_\infty^*({\mathcal G})$ with the translates of $T^*$ which belong to $\Xi$. We may thus consider the spectral triple $(C(\Xi),{\mathfrak H}_{tr},D_{tr})$ (which depends on $\rho_{tr}$ and the choices for $\tau$ and $\mathcal H$)) with representation and Dirac operator defined as in equations~_{\mbox{\scriptsize\em eq}}ref{eq-rep} and~_{\mbox{\scriptsize\em eq}}ref{eq-Dirac} by: \[ {\mathfrak H}_{tr}=\ell^2(E_{tr}) \,,\quad \pi_{tr}(f)\varphi(e) = f\bigl( \RMob^{-1}(s(e)) \bigr)\varphi(e)\,, \quad D_{tr} \,\varphi(e) = \rho_{tr}^{-n} \varphi(e^\text{\rm op})\,, \ e\in E_{tr,n}\,. \] We call it the {\em transverse spectral triple} of the substitution tiling. By Theorem~\ref{thm-ST} it is an even spectral triple with grading $\chi$ (which flips the orientation). Also, since ${\mathcal H}_{tr}$ satisfies the hypothesis of Lemma~\ref{lem-Gtau} as noted above, the Connes distance induces the topology of $\Xi$. By Theorems~\ref{thm-zeta} and~\ref{thm-specmeas} the transversal spectral triple has metric dimension $s_{tr}=\mathfrak prac{d\log(\theta)}{-\log(\rho_{tr})}$, and its spectral measure is the unique ergodic measure on $\Xi$ which is invariant under the tiling groupoid action. For $v\in{\mathcal V}$, we will also consider the spectral triple $(C(\Xi_{t_v}), {\mathfrak H}_{tr}^v, D_{tr})$ for \(\Xi_{t_v}=\RMob^{-1} \bigl(\Pi_\infty^v({\mathcal G}) \bigr)\): the acceptance domain of $t_v$ (see Remark~\ref{rem-Robinson}). We call it the {\em transverse spectral triple for the prototile $t_v$}. It is obtained from the transverse spectral triple by restriction to the Hilbert space ${\mathfrak H}_{tr}^v =\ell^2(E_{tr}^v)$ where $ E_{tr}^v$ are the horizontal edges between paths which start on $v$. This restriction has the effect that \[ \zeta_v = R_v \zeta + reg.\,, \] that is, up to a perturbation which is regular at $s_{tr}$, the new zeta function is $R_v=\textrm{freq}(t_v)$ times the old one. It hence has the same abscissa of convergene, $s_{tr}^v=s_{tr}$, but its residue at $s_{tr}$ is $\textrm{freq}(t_v)$ times the old one. Like for $\Xi$ the Connes distance induces the topology of $\Xi_{t_v}$. Finally the spectral measure $\mu_{tr}^v$ is the restriction to $\Xi_{t_v}$ of the invariant measure on $\Xi$, normalized so that the total measure of $\Xi_{t_v}$ is $1$. The spectral triple for $\Xi$ is actually the direct sum over $v\in{\mathcal V}$ of the spectral triples for $\Xi_{t_v}$. We are particularily interested in the quadratic form defined formally by \begin{multline} Q_{tr}(f,g) = {\mathcal T}_{tr}([D_{tr},\pi_{tr}(f)]^*[D_{tr},\pi_{tr}(g)]) \\= \lim_{s\downarrow s_{tr}} \mathfrak prac{{\rm Tr\,}_{{\mathfrak H}_{tr}}\bigl(|D_{tr}|^{-s} [D_{tr},\pi_{tr}(f)]^*[D_{tr},\pi_{tr}(g)]\bigr)}{\zeta_{tr}(s)}\,. \end{multline} We emphasize that this expression has yet little meaning, as we have not yet specified a domain for this form. For example, while strongly pattern equivariant functions \cite{Kellendonk-PatEquiv} are dense they do not form an interesting core, as $Q_{tr}$ vanishes on such functions (see the paragraph on the transversal form in Section~\ref{ssec-DirForm}). \subsection{The longitudinal triple for a substitution tiling} \label{ssec-lgST} We now aim at constructing what we call the longitudinal spectral triple for the substitution tiling which is based on the reversed substitution graph $\widetilde{\mathcal G}=({\mathcal V},\widetilde{\mathcal E})$ (${\mathcal G}$ with all orientations of the edges reversed, so with adjacency matrix $\widetilde{A}=A^T$). Set ${\widetilde \varepsilon}^* = \varepsilon^*$ and choose $\widetilde\tau$. We take $\rho=\rho_{lg}$ as a parameter, and choose a subset \begin{eqnarray*} {\hat{a}}t {\mathcal H}_{lg}\subset {\mathcal H}(\widetilde{\mathcal G}) & = & \left\{ (\widetilde \varepsilon,\widetilde \varepsilon') \in \widetilde{\mathcal E}\times\widetilde{\mathcal E} \, : \, \widetilde \varepsilon\neq \widetilde \varepsilon', \; s(\widetilde \varepsilon)=s(\widetilde \varepsilon') \right\} \\ & = & \left\{ (\varepsilon,\varepsilon') \in {\mathcal E}\times{\mathcal E} \, : \, \varepsilon\neq \varepsilon', \; r(\varepsilon')=r(\varepsilon') \right\}\, \end{eqnarray*} again satisfying the condition of Lemma~\ref{lem-Gtau}. We denote the horizontal edges of level $n\in{\mathbb N}$ by \[ {\mathcal H}_{{lg},n}= \Bigr\{ (\eta \widetilde \varepsilon,\eta \widetilde \varepsilon') \, : \, \eta \in \Pi_{n-1}(\widetilde{\mathcal G}), (\widetilde \varepsilon,\widetilde \varepsilon')\in {\hat{a}}t {\mathcal H}_{lg} \Bigr\} \subset \Pi_n(\widetilde{\mathcal G}) \times \Pi_n(\widetilde{\mathcal G}) \,, \] and define the longitudinal approximation graph \(G_{lg,\tau}=(V_{lg},E_{lg})\) as in Section~\ref{sec-ST-Bratteli} by \begin{eqnarray*} V_{lg} = \bigcup_n V_{lg,n}\,, & V_{lg,n} = \tau (\Pi_n(\widetilde{\mathcal G})) \subset \Pi_\infty^{\mathscr A}t(\widetilde{\mathcal G})\,,\\ E_{lg} = \bigcup_n E_{lg,n}\,, & E_{lg,n} = \tau\times\tau({\mathcal H}_{{lg},n})\,, \end{eqnarray*} together with the orientation inherited from ${\hat{a}}t {\mathcal H}_{lg}$: so \(E_{lg,n} = E_{lg,n}^{+} \cup E_{lg,n}^{-}\) for all $n\in{\mathbb N}$, and \(E_{lg}=E_{lg}^{+}\cup E_{lg}^-\). With these choices made, Theorem~\ref{thm-ST} provides us with a spectral triple for the algebra $C(\Pi_\infty(\widetilde{\mathcal G}))$. A longitudinal horizontal edge $h\in{\hat{a}}t {\mathcal H}_{lg}$ has the following interpretation: As for the transversal horizontal edges, $\tau(s(h))$ and $\tau(r(h))$ start on a common vertex $v_0$, differ on their first edge and then come back to finish equally. To obtain their interpretation it is more useful, however, to reverse their orientation as this is the way the Robinson map $\RMob$ was defined. Then $h=(\widetilde\varepsilon,\widetilde\varepsilon')$ with $r(\widetilde\varepsilon) = r(\widetilde\varepsilon')$ determines a pair of microtiles $(t,t')$ of type $s(\widetilde\varepsilon)$ and $s(\widetilde\varepsilon')$, respectively, in a tile of type $r(\varepsilon)$. The remaining part of the double path $(\tau(\widetilde \varepsilon),\tau(\widetilde \varepsilon'))$ serves to fix a point in the two microtiles. Of importance is now the vector of translation $a_h$ between the two points of the microtiles. Similarily, an edge in $E_{lg,n}$ will describe a pair of $(n+1)$-th order microtiles in an $n$-th order microtile. By selfsimilarity again, the corresponding translation vector $a_e\in {\mathbb R}^d$ between the two $(n+1)$-th order microtiles will satisfy \begin{equation} \label{eq-translg} a_e= \theta^{-n} a_h\,. \end{equation} if $e\in E_{lg,n}(h)$. See Figure~\ref{fig-chair4} for an illustration. \begin{figure} \caption{{\small A microtile pattern associated with a horizontal arrow $e\in E_{lg,2} \label{fig-chair4} \end{figure} Remember from Remark~\ref{rem-Robinson}, that we can identify $\Pi_\infty(\widetilde{\mathcal G}) = \Pi_{-\infty,0}({\mathcal G})$. And the inverse of the Robinson map $\RMob$ also induces a dense map $\Pi_{-\infty,0}^v \to t_v$ which is one-to-one on the pre-image of $\Pi_{-\infty,0}^*({\mathcal G})$; we still denote this map by $\RMob^{-1}$. Hence the approximation graph for $\Pi_{-\infty,0}^v$ is also an approximation graph for $t_v$. Let $E_{lg}^v$ denote the set of edges whose corresponding paths pass through $v$ at level $0$. We may thus adapt the above spectral triple to get the spectral triple $(C(t_v),{\mathfrak H}_{lg}^v,D_{lg})$ (which depends on $\rho_{lg}$) with representation and Dirac defined as in equations~_{\mbox{\scriptsize\em eq}}ref{eq-rep} and~_{\mbox{\scriptsize\em eq}}ref{eq-Dirac} by: \[ {\mathfrak H}_{lg}^v=\ell^2(E_{lg}^v) \,,\quad \pi_{lg}(f)\varphi(e) = f\bigl( \RMob^{-1}(s(e)) \bigr)\varphi(e)\,, \quad D_{lg} \,\varphi(e) = \rho_{lg}^{-n} \varphi(e^\text{\rm op})\,, \ e\in E_{lg,n}^v\,. \] That the bounded commutator axiom is satisfied follows from the following Lemma and the fact that H\"older continuous functions are dense in $C(t_v)$. \begin{lemma} If $f\in C(t_v)$ is H\"older continuous (w.r.t.\ the Euclidean metric $d$) with exponent $\alpha = \mathfrak prac{-\log(\rho_{lg})}{\log(\theta)}$, then $[D_{lg},\pi_{lg}(f)]$ is bounded. \end{lemma} \begin{proof} Suppose $f\in C(t_v)$ is H\"older continuous with exponent $\alpha = \mathfrak prac{-\log(\rho_{lg})}{\log(\theta)}$, that is, $\left|\mathfrak prac{f(x)-f(y)}{d(x,y)}\right|\leq C$ for some $C>0$ and all $x,y\in t_v$ then \[ \|[D,\pi(f)]\| = \sup_n\sup_{e\in E_{lg,n}} \left|\mathfrak prac{f(\RMob^{-1} (r(e)))-f(\RMob^{-1}(s(e)))}{d(\RMob^{-1} (r(e)),\RMob^{-1}(s(e)))^\alpha}\right| \mathfrak prac{d(\RMob^{-1} (r(e)),\RMob^{-1}(s(e)))^\alpha}{\rho_{lg}^n}. \] this expression is finite since the first factor is bounded by $C$. By self-similarity there exists $C'>0$ such that $d(\RMob^{-1} (r(e)),\RMob^{-1}(s(e)))\leq C'\theta^{-n}$. And $\alpha$ has been chosen so that $\theta^{-n\alpha}\rho_{lg}^{-n} = 1$. \end{proof} We refer to this spectral triple $(C(t_v),{\mathfrak H}_{lg}^v,D_{lg})$ as the {\em longitudinal spectral triple for the prototile $t_v$}. It should be noted that, although the map $\RMob^{-1}$ is continuous, the topology of $t_v$ and $\Pi_{-\infty,0}^v$ are quite different and so the Connes distance of this spectral triple does not induce the topology of $t_v$. By Theorems~\ref{thm-zeta} and~\ref{thm-specmeas} the longitudinal spectral triple has metric dimension $s_{lg}=\mathfrak prac{ d\log(\theta)}{-\log(\rho_{lg})}$ for all $v$, but what depends on $v$ is the residue of the zeta function. In fact, as compared to the zeta function of the full triple it has to be rescaled: \(\zeta_{tr}^v= (L_v/\sum_u L_u) \zeta_{tr}\). The spectral measure $\mu_{lg}^v$ is easily seen to be the normalized Lebesgue measure on $t_v$, as the groupoid of tail equivalence acts by partial translations. Similarily to the transversal case we are interested in the quadratic form defined formally by \begin{multline} Q_{lg}^v(f,g) = {\mathcal T}_{lg}^v([D_{lg},\pi_{lg}(f)]^*[D_{lg},\pi_{lg}(g)]) \\= \lim_{s\downarrow s_{lg}} \mathfrak prac{{\rm Tr\,}_{{\mathfrak H}_{lg}^v}\bigl(|D_{lg}|^{-s} [D_{lg},\pi_{lg}(f)]^*[D_{lg},\pi_{lg}(g)]\bigr)}{\zeta_{lg}^v(s)}\,. \end{multline} Again, this expression has yet little meaning, as we have not yet specified a domain for this form. \subsection{The spectral triple for $\Omega_\Phi$} \label{ssec-SThull} We now combine the above triples to get a spectral triple \((C(\Omega_\Phi),{\mathfrak H},D)\) for the whole tiling space $\Omega_\Phi$. The graphs ${\mathcal G}$ and $\widetilde{\mathcal G}$ have the same set of vertices ${\mathcal V}$, so we notice from Remark~\ref{rem-Robinson} that the identification \[ \Pi_{-\infty,+\infty}({\mathcal G}) = \bigcup_{v\in{\mathcal V}} \Pi_{-\infty,0}^v({\mathcal G}) \times \Pi_{0,+\infty}^v({\mathcal G}) = \bigcup_{v\in{\mathcal V}} \Pi_{\infty}^v(\widetilde{\mathcal G}) \times \Pi_{\infty}^v({\mathcal G}) \] suggests to construct the triple for $\Omega_\Phi$ as a direct sum of tensor product spectral triples related to the transversal and the longitudinal parts. In fact, $\Pi_{-\infty,0}^v({\mathcal G}) \times \Pi_{0,+\infty}^v({\mathcal G})$ is dense in $t_v\times \Xi_{t_v}$ (see Remark~\ref{rem-Robinson}) and so we can use the tensor product construction for spectral triples to obtain a spectral triple for $C(t_v\times \Xi_{t_v})\cong C(t_v)\otimes C(\Xi_{t_v})$ from the two spectral triples considered above. Furthermore, the $C^{\mathscr A}t$-algebra $C(\Omega_\Phi)$ is a subalgebra of $\bigoplus_{v\in{\mathcal V}} C(t_v\times \Xi_{t_v})$ and so the direct sum of the tensor product spectral triples for the different tiles $t_v$ provides us with a spectral triple for $C(\Omega)$: \begin{equation} \label{eq-STOmega} {\mathfrak H}=\bigoplus_{v\in{\mathcal V}} {\mathfrak H}_{tr}^v \otimes {\mathfrak H}_{lg}^v\,, \quad \pi=\bigoplus_{v\in{\mathcal V}} \pi_{tr}^v\otimes \pi_{lg}^v\,, \quad D= \bigoplus_{v\in{\mathcal V}} \bigl( D_{tr}^v\otimes {\mathbf 1} + \chi \otimes D_{lg}^v \bigr)\,, \end{equation} where $\chi$ is the grading of the transversal triple (which flips the orientations in $E_{tr}$). The representation of a function $f\in C(\Omega)$ then reads \begin{equation} \label{eq-COmega} \pi(f) = \sum_{v\in{\mathcal V}} f_{tr}^v \otimes f_{lg}^v\,, \qquad \text{\rm with} \quad f_{tr}^v = \pi_{tr}^v(f) \in C(\Xi_{t_v})\,, \ f_{lg}^v = \pi_{lg}^v(f) \in C(t_v)\,. \end{equation} From the results in Section~\ref{ssec-tensorprod} we now get all the spectral information of \((C(\Omega_\Phi),{\mathfrak H},D)\). To formulate our results more concisely let us call $A\in {\mathcal B}({\mathfrak H})$ {\em non resonant}, if \(br{A}_n stackrel{n\rightarrow \infty}{\sim} c_A e^{\imath n \varphi}\) for some $c_A>0$ and non resonant $\varphi\in (0,2\pi)$ (Definition~\ref{def-resphase}). \begin{theo} \label{thm-STOmega} The above is a spectral triple for $C(\Omega_\Phi)$. Its spectral dimension is \[ s_0= s_{tr}+s_{lg} = \mathfrak prac{d\log\theta}{-\log\rho_{tr}} + \mathfrak prac{d\log\theta}{-\log\rho_{lg}}\,, \] and its zeta function $\zeta(z)$ has a simple pole at $s_0$ with strictly positive residue. Moreover, for $A = \text{\rm op}lus_v A_{tr}^v\otimes A_{lg}^v$, with either both $A_{tr}^v$ and $A_{lg}^v$ strongly regular, or one is strongly regular and the other the sum of a strongly regular and a non resonant part, then we have \begin{equation} \label{eq-decspecstate} {\mathcal T}(A) = \sum_{v\in{\mathcal V}} \mbox{\rm freq}(t_v) \mbox{\rm vol}(t_v) \; {\mathcal T}_{tr}^v(A_{tr}^v) {\mathcal T}_{lg}^v(A_{lg}^v) \,. \end{equation} In particular, the spectral measure is the unique invariant ergodic probability measure $\mu$ on $\Omega_\Phi$. \end{theo} \begin{proof} Consider first the triple for the matchbox $t_v\times \Xi_{t_v}$, that is, the tensor product spectral triple for $C(t_v)\otimes C(\Xi_{t_v})$. Applying Lemma~\ref{lem-prod} we obtain the value $s_0^v = \mathfrak prac{d\log\theta}{-\log\rho_{tr}} + \mathfrak prac{d\log\theta}{-\log\rho_{lg}}$ for the abscissa of convergence of its zeta function $\zeta^v$. In particular, this value does not depend on $v$. Furthermore, \[ \lim_{s\to s_0^+}(s-s_0)\zeta^v(s) = \mathfrak prac{\textrm{freq}(t_v)\textrm{vol}(t_v)}{\sum_u \textrm{vol}(t_u)} \mathfrak prac{\sum_{k=-\infty}^\infty \Gamma(\mathfrak prac{d \log \theta +2\pi i k}{-2\log (\rho_{tr})}) \Gamma(\mathfrak prac{d \log \theta -2\pi i k}{-2\log (\rho_{lg})}) } {2\Gamma(\mathfrak prac{s_0}2) \log(\rho_{tr}) \log(\rho_{lg})}. \] The above number is in fact a strictly positive real number, as it is up to a positive factor the mean of two strictly positive periodic functions. It follows that the abscissa of convergence for the zeta function of the direct sum of the above triples $\zeta$ is equal to the common value $s_0=s_0^v$. From this we can now determine with the help of Lemma~\ref{lem-prodstate} and (\ref{eq-sum-state}) the spectral state. If $A_{tr}^v$ and $A_{lg}^v$ are both strongly regular then, by Corollary~\ref{cor-prodstrongreg}, \({\mathcal T}(A_{tr}^v\otimes A_{lg}^v) =n_v{\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg}^v)\), with the factor \(n_v=\textrm{freq}(t_v)\textrm{vol}(t_v)\) because the states are normalized. If, say, $A_{tr}^v$ is regular and $A_{lg}^v=A_{lg,reg}^v+A_{lg,nres}^v$ is the sum of a strongly regular and a non resonant part, then \begin{eqnarray*} {\mathcal T}(A_{tr}^v\otimes A_{lg}^v) & = & {\mathcal T}(A_{tr}^v \otimes A_{lg,sreg}^v) + {\mathcal T}(A_{tr}^v \otimes A_{lg,nres}^v) \\ & = & n_v{\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg,sreg}^v) + \textrm{freq}(t_v){\mathcal T}_{tr}^v(A_{tr}^v) {\mathcal T}({\mathbf 1} \otimes A_{lg,nres}^v) \\ & = & n_v {\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_2(A_{lg,sreg}^v) + 0\\ & = & n_v{\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg,sreg}^v) + n_v{\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg,nres}^v) \\ & = & n_v{\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg}^v)\,, \end{eqnarray*} where the second line follows by Corollary~\ref{cor-prodstrongreg}, the third by Lemma~\ref{lem-resphi}, and the forth by Lemma~\ref{lem-stateres} (the state of a non resonant operator vanishes: \({\mathcal T}_{lg}^v(A_{lg,nres})=0\)). The argument is the same if $A_{lg}^v$ is strongly regular, and $A_{tr}^v$ the sum of a strongly regular and a non resonant part. Hence we get in both cases \[ {\mathcal T}(A) = {\mathcal T}\bigl( \sum_v A_{tr}^v \otimes A_{lg}^v \Bigr) = \sum_v n_v {\mathcal T}_{tr}^v(A_{tr}^v){\mathcal T}_{lg}^v(A_{lg}^v) \,. \] Since $n_v=\textrm{freq}(t_v)\textrm{vol}(t_v)$ is the $\mu$-measure of the matchbox $t_v\times \Xi_{t_v}$ we see that the spectral measure coincides with $\mu$. \end{proof} \subsection{Pisot substitutions} \label{ssect-Pisot} Recall that we consider here aperiodic primitive FLC substitutions which admit a fixed point tiling. Their dilation factor $\theta$ is necessarily an algebraic integer. There is a dichotomie: either the dynamical system defined by the tilings is weakly mixing (which means that there are no non-trivial eigenvalues of the translation action) or the dilation factor $\theta$ of the substitution is a Pisot number, that is, an algebraic integer greater than $1$ all of whose Galois conjugates have modulus strictly smaller than $1$. In the second case the substitution is called a Pisot-substitution. Let us recall here some of the relevant results. We denote by $\|x\|$ the distance of $x\in{\mathbb R}$ to the closest integer. A proof of the following theorem can be found in \cite{Cassels}[Chap.~VIII, Thm.~I]. \begin{theo}[Pisot, Vijayaraghavan]\label{thm-Pisot} Let $\theta>1$ be a real algebraic number and $\alpha\neq 0$ another real such that $\|\alpha\theta^n\|stackrel{n\to\infty}{\longrightarrow} 0$. Then we have \begin{itemize} \item $\theta$ is a Pisot number. We denote $\{\theta_j:1\leq j\leq J\}$ the conjugates of $\theta$ with $\theta_1=\theta$ and $|\theta_{j+1}|\leq|\theta_j|$. \item $\alpha\in{\mathbb Q}[\theta]$, i.e.\ $\alpha=p_\alpha(\theta)$ for some polynomial $p_\alpha$ with rational coefficients. \item There exists $n_0$ such that for $n\geq n_0$ $$\sum_{j=1}^Jp_\alpha(\theta_j)\theta_j^n\in {\mathbb Z}.$$ If $\theta$ is unimodular then $n_0=0$. \end{itemize} \end{theo} We note that $p_\alpha(\theta_j)\neq 0$ for all $j$, since otherwise $p_\alpha$ would be divisible by the minimal polynomial of $\theta$ implying that also $p_\alpha(\theta)= 0$. We assume throughout this work that $\theta$ is irrational so that there is at least one other conjugate ($J>1$). Note that $\theta^{-1}$ is a polynomial (of degree $J-1$) in $\theta$ with coefficients in $\mathfrak prac{1}{N}{\mathbb Z}$ where $N$ is the constant term in the minimal polynomial for $\theta$. $\theta$ is unimodular precisely if $N = \pm 1$. Recall that a dynamical eigenfunction to eigenvalue $\beta\in{\RM^d}^*$, is a measurable function $f_\beta:\Omega_\Phi\to{\mathbb C}$ which satisfies \(f_\beta(\omega+t)=e^{2\pi \beta(t)} f_\beta(\omega)\) for almost all $\omega\in \Omega_\Phi$ (w.r.t.\ the ergodic measure $\mu$) and all $t\in {\mathbb R}^d$. If $f_\beta$ can be chosen continuous then $\beta$ is also called a continuous eigenvalue. The set of eigenvalues forms a group which we denote $E$. We call a vector $r\in\RM^d$ a return vector (to tiles) if it is a vector between the punctures of two tiles of the same type in some tiling $T\in\Omega_\Phi$. \begin{theo}[\cite{Sol07}] Consider a substitution with dilation factor $\theta$. The following are equivalent. \begin{enumerate} \item $\beta$ is an eigenvalue of the translation action. \item $\beta$ is an eigenvalue of the translation action with continuous eigenfunction. \item For all return vectors $r$ one has $\|\beta(r) \theta^n\|\longrightarrow 0$. \end{enumerate} \end{theo} In particular, we may assume that all eigenfunctions are continuous, and if there are non-zero eigenvalues then $\theta$ must be a Pisot number and for all return vectors (to tiles) $r$ and large enough $n$ we have \begin{equation} \label{eq-Pisot} \exp(2\pi\imath \beta(r)\theta^n) = \exp\Bigl(-2\pi\imath \sum_{j=2}^Jp_{\beta(r)}(\theta_j)\theta_j^n \Bigr) \end{equation} We would like to give a geometric interpretation of the values of $p_{\beta(r)}(\theta_j)$. To better illustrate this we consider first only the unimodular situation and explain what changes in the general case later. The following theorem can be found in \cite{BK}. \begin{theo}\label{thm-BK} Consider a $d$-dimensional substitution as above with dilation factor $\theta$. If $\theta$ is a unimodular Pisot number of degree $J$, then the group $E$ of eigenvalues is a dense subgroup of ${{\mathbb R}^d}^*$ of rank $dJ$. \end{theo} Recall for instance from \cite{BK} that the maximal equicontinuous factor of $(\Omega_\Phi,{\mathbb R}^d)$ can be identified with ${\hat{a}}t E$ the Pontrayagin dual of the group of eigenvalues $E$ equipped with the discrete topology. Its ${\mathbb R}^d$ action is induced by translation of eigenfunctions, namely $\alpha_r:{\hat{a}}t E\to {\hat{a}}t E$, $r\in{\mathbb R}^d$, acts as $(\alpha_r(\chi))(\beta) = e^{2\pi\imath \beta(r)} \chi(\beta)$, $\chi\in{\hat{a}}t E$, $\beta\in E$. The factor map $\pi:\Omega\to {\hat{a}}t E$ is dual to the embedding of the eigenfunctions in $C(\Omega_\Phi)$. We may choose an element $T^*\in\Omega_\Phi$ and consider $\pi(T^*)$ as the neutral element in ${\hat{a}}t E$. $E$ is a free abelian group of rank $dJ$. We can therefore identify it with a regular lattice in an $dJ$-dimensional Euclidean space $W$ equipped with a scalar product $\langle\cdot,\cdot\rightarrowngle$, and ${\hat{a}}t E$ with $W/E^{rec}$ via the map $W/E^{rec}\ni \xi \mapsto e^{2\pi\imath \langle\xi,\cdot\rightarrowngle}\in {\hat{a}}t E$ ($E^{rec}$ is the reciprocal lattice). With this description of ${\hat{a}}t E$ the dual pairing ${\hat{a}}t E\times E \to {\mathbb C}$ becomes $W/E^{rec}\times E \ni (\xi,\beta) \mapsto e^{2\pi\imath \langle\xi,\beta\rightarrowngle}\in {\mathbb C}$. Furthermore $e^{2\pi\imath \langle\cdot,\beta\rightarrowngle}$, $\beta\in E$, is an eigenfunction of the $\RM^{dJ}$-action by translation on $W/E^{rec}$ to the eigenvalue $\langle\cdot,\beta\rightarrowngle\in {{\mathbb R}^{dJ}}^*$. Finally, the action of $\alpha_r$ becomes $(\alpha_r(\xi))(\beta) = e^{2\pi\imath \beta(r)} \xi(\beta)$ and hence $\alpha_r(0)$ is the unique element of $ W/E^{rec}$ satisfying $\beta(r) - \langle\alpha_r(0),\beta\rightarrowngle\in\ZM$ for all $\beta\in E$. Now $r\mapsto\alpha_r(0)$ is continuous and locally free. It follows that the map $r\mapsto \alpha_r(0)$ lifts to a linear embedding of $\RM^d$ into the universal cover $W$ of ${\hat{a}}t E$. We denote the lift of $\alpha_r(0)$ with $\widetilde r$. The vector $\widetilde r$ is thus defined by \[ \beta(r) = \langle\widetilde r,\beta\rightarrowngle,\quad \mathfrak porall \beta\in E\,. \] The image of the embedding, which we denote by $U$, is simply the lift of the orbit of $\pi(T^*)$. The acting group ${\mathbb R}^d$, or equivalently the orbit of $\pi(T^*)$, can therefore be identified with a subspace $U$ of a euclidean space $W$. Our next aim is to construct a complimentary subspace $S$ to $U$. The endomorphism on ${{\mathbb R}^d}^*$ which is dual to the linear endomorphism $\theta{\mathbf 1}$ on ${\mathbb R}^d$ preserves the group of eigenvalues $E\subset {{\mathbb R}^d}^*$. It therefore restricts to a group endomorphism of $E$. We denote this restriction by $\varphi$. With respect to a basis of $E$, $\varphi$ is thus an integer $dJ\times dJ$ matrix. Denote by $\varphi_{\mathbb R}$ its linear extension to $W$ and by $\varphi^t_{\mathbb R}$ the transpose\mathfrak pootnote{Although taking the transpose is a dualization we have not returned to the original map $\theta{\mathbf 1}$ since the two dualizations are w.r.t.\ two different dual pairings.} of $\varphi_{\mathbb R}$. Then we have $$\langle\varphi^t_{\mathbb R}(\widetilde r),\beta\rightarrowngle=\langle\widetilde r,\varphi_{\mathbb R}(\beta)\rightarrowngle=\varphi(\beta)(r) = \theta\beta(r) = \theta\langle\widetilde r,\beta\rightarrowngle$$ showing that $\varphi^t_{\mathbb R}(\widetilde r)=\theta \widetilde r$. We know also from \cite{BK} that $\varphi_{\mathbb R}$ has eigenvalues $\theta_j$, $1\leq j\leq J$ each with multiplicity $J$. It follows that $U$ is the eigenspace of $\varphi^t_{\mathbb R}$ to the eigenvalue $\theta$ and so by the Pisot-property is the full unstable subspace of $\varphi^t_{\mathbb R}$. We let $S$ be the stable subspace of $\varphi^t_{\mathbb R}$. \paragraph{Example:} Before we continue the description we provide the example of the Fibonacci substitution tiling. Here $\theta=\mathfrak prac{1+\sqrt{5}}2$ is the golden mean and $E$ is the rank $2$ subgroup $\ZM[\theta]\subset{\mathbb R}^*$. We choose the basis $\{\theta,1\}$ for $E$. Then $\varphi$ has matrix elements $\left(\begin{array}{cc}1 &1\\ 1& 0\end{array}\right)$. $\varphi_{\mathbb R}$ has, of course, the same matrix expression. It follows that $U$ is the subspace of $W={\mathbb R}^2$ generated by the vector $(\theta,1)$ and $S$ is the subspace generated by $(-\theta^{-1},1)$. It is no coincidence that this is the cut \& project setup for the Fibonacci tiling, except that we do not have a window. We return to the geometric description of the polynomicals. Let $r$ be a return vector. We choose a lattice base $\{\beta_{\nu}\}_{\nu=1\cdots,dJ}$ for $E$ and let $\{\beta^{\nu}\}_{\nu=1\cdots,dJ}$ be the dual base for $E^{rec}$. We can express $\widetilde r$ in the dual base. Since $(\widetilde r,\beta_\nu) =\beta_\nu(r)$ we have \[ \widetilde r = \sum_{\nu=1}^{dJ} \beta_\nu(r) \beta^\nu\,. \] By Theorem~\ref{thm-Pisot} there exist polynomials $p_\nu$ with rational coefficients such that \[ \beta_\nu(r) = p_\nu(\theta). \] In particular, the vector with coefficients $(p_\nu(\theta))$ is an eigenvector of $\varphi^t_\RM$ to eigenvalue $\theta$. The eigenvalue equation $\sum_\mu (\varphi^t_{\nu\mu}-\theta\delta_{\nu\mu})p_\mu(\theta)$ is a set of $dJ$ polynomial equations with rational coefficients and hence it is satisfied for $\theta$ whenever it is satisfied for all conjugates $\theta_j$. Thus $(p_\nu(\theta_j))$ is an eigenvector of $\varphi^t_\RM$ to eigenvalue $\theta_j$. If $j>1$ this vector lies thus in the stable manifold $S$ of $\varphi^t_\RM$ at $0$. We may thus define the following {\em star map} ${}^*:\{\widetilde r : r\mbox{ is a return vector}\}\subset U\to S$, \begin{equation}\label{eq-star} r\mapsto \widetilde r ^* = \sum_{j=2}^J\sum_{\nu=1}^{dJ} p_\nu(\theta_j) \beta^\nu\,. \end{equation} The map $\widetilde r\mapsto \widetilde r^*$ is actually Moody's star map. Indeed, $W$ decomposes into the direct sum of vector spaces $W= S\text{\rm op}lus U$ and contains $E^{rec}$ as regular lattice. Let $\pi_U$ and $\pi_S$ be the projection onto $U$ and $S$, resp., with kernel $S$ and $U$, resp. Recall that $\sum_{j=1}^J p_\nu(\theta_j) \in \ZM$ (we assumed that $\theta$ is unimodular). This can be reinterpreted as $\widetilde r + \widetilde r^*\in E^{rec}$. Since $S$ intersects $E^{rec}$ only in the origin\mathfrak pootnote{$\varphi^t$ preserves the intersection $E^{rec}\cap S$ which is hence invariant under a strictly contracting map, and since the intersection is uniformly discrete it must be $\{0\}$.}, $\widetilde r^*$ is the unique vector $w\in S$ which satisfies $\widetilde r + w \in E^{rec}$. Stated differently, $\widetilde r^*=\pi_S \circ \pi_U^{-1}(\widetilde r)$, is the image under $\pi_S$ of a preimage in $E^{rec}$ of $\widetilde r$ under $\pi_U$. Let $S_2$ be the subspace of $S$ generated by the subleading conjugates of $\theta$, i.e.\ the eigenspace of $\varphi^t_{\mathbb R}$ to the eigenvalues $\theta_2,\cdots,\theta_L$ which are characterised by the property that they all have the same modulus: $|\theta_j|=|\theta_2|$ for all $2\leq j\leq L\leq J$. We define the {\em reduced star map} ${}^star:U\to S_2$ by \begin{equation}\label{eq-redstar} r\mapsto \widetilde r ^star = \sum_{j=2}^L\sum_{\nu=1}^{dJ} p_\nu(\theta_j) \beta^\nu\,. \end{equation} By linearity we get \begin{equation}\label{eq-geom} \sum_{j=2}^L p_{\beta(r)}(\theta_j) = \langle\widetilde r^star,\beta\rightarrowngle \end{equation} which yields the geometric interpretation of the polynomials $p_{\beta(r)}$ we looked for. \begin{rem}{\rm If $\theta$ is not unimodular the arguments are principally the same except for having to work with inverse limits. The basic differences are that $n_0$ might be strictly larger than $0$ in Theorem~\ref{thm-Pisot} and that $\varphi$ is no longer invertible. In fact, in the non uni-modular case Theorem~\ref{thm-BK} has to be modified in the following way \cite{BK}: there exists a dense rank $dJ$ subgroup $F$ such that $E = \lim_\to (F,\varphi)$. In particular, ${\hat{a}}t E$ is an inverse limit of $dJ$-tori. Now one can construct the euclidean space $W$ with its stable and unstable subspaces under $\varphi^t_{\mathbb R}$ as above but for $F$ instead of $E$. Then ${\hat{a}}t E$ corresponds to the subgroup $E^{rec}:=\bigcup_{n\geq 0} {\varphi^t_{\mathbb R}}^{-n}( F^{rec})\subset W$.\mathfrak pootnote{Note that $E^{rec}$ is defined by this union; it is not the reciprocal lattice of $E$ which wouldn't make sense, as $E$ is not a regular lattice in $W$.} With these re-interpretations, the map $r\mapsto \widetilde r^*$ is the same, namely $\widetilde r$ is the lift of $\alpha_r(0)$, where $\alpha_r$ the action of $r\in\RM^d$ on the torus ${\hat{a}}t F$, and $\widetilde r^*$ is the unique vector $w\in S$ which satisfies $\widetilde r + w \in E^{rec}$. } \end{rem} \subsection{Dirichlet forms} \label{ssec-DirForm} The goal of this section is to investigate when the formal expression for the Dirichlet forms can be made rigourous. We will see that, apart from trivial cases, this fixes the values for $\rho_{tr}$ and $\rho_{lg}$. The study is technical. We explain the steps of the derivation which are similar for both forms, but we do not give all the straightforward but tedious details of the technical estimates. We start by specializing the formal expression of the Dirichlet form given in equations~_{\mbox{\scriptsize\em eq}}ref{eq-Q2} and~_{\mbox{\scriptsize\em eq}}ref{eq-qn} to our set-up. According to equation~_{\mbox{\scriptsize\em eq}}ref{eq-COmega}, we view the representation of $h\in C(\Omega_\Phi)$ as the sum of elementary functions \( \pi(h)= \sum_{v\in{\mathcal V}} h_{tr}^v \otimes h_{lg}^v\). And by Theorem~\ref{thm-specmeas}, any such $h_{\alpha}^v$ is a strongly regular operator on ${\mathfrak H}_{\alpha}^v$. Hence, by Lemma~\ref{lem-prodform}, we can decompose the form as follows \[ Q(f,g) = {\mathcal T}\bigl( [D,\pi(f)]^{\mathscr A}t [D,\pi(g)] \bigr) = Q_{lg}(f,g) + Q_{tr}(f,g)\,, \] with \begin{subequations} \label{eq-DirForm-lgtr} \begin{equation} \label{eq-DirForm-lg} Q_{lg}(f,g) = \sum_{v\in{\mathcal V}} \textrm{freq}(t_v) \, {\mathcal T}\bigl( {\mathbf 1}\otimes [D_{lg}^v,f_{lg}^v]^{\mathscr A}t [D_{lg}^v,g_{lg}^v] \bigr) {\mathcal T}_{tr}^v\bigl (f_{tr}^{v{\mathscr A}t} g_{tr}^v) \bigr) \end{equation} \begin{equation} \label{eq-DirForm-tr} Q_{tr}(f,g) = \sum_{v\in{\mathcal V}} \textrm{vol}(t_v) \, {\mathcal T}\bigl( [D_{tr}^v,f_{tr}^v]^{\mathscr A}t [D_{tr}^v,g_{tr}^v] \otimes {\mathbf 1} \bigr) {\mathcal T}_{lg}^v \bigl( f_{lg}^{v{\mathscr A}t} g_{lg}^v \bigr) \end{equation} \end{subequations} We are going to show in the next two paragraphs, that for $f,g$ in a suitable core, the operators \([D_{lg}^v,f_{lg}^v]^{\mathscr A}t [D_{lg}^v,g_{lg}^v]\) are strongly regular, and the operators \([D_{tr}^v,f_{tr}^v]^{\mathscr A}t [D_{tr}^v,g_{tr}^v]\) are the sums of a strongly regular and a non resonant part. Hence by Corollary~\ref{cor-prodstrongreg} and Lemma~\ref{lem-resphi} we get the following decomposition of the forms: \begin{subequations} \label{eq-DirForm-lgtr2} \begin{equation} \label{eq-DirForm-lg2} Q_{lg}(f,g) = \sum_{v\in{\mathcal V}} n_v \, Q_{lg}^v(f_{lg}^v,g_{lg}^v) \int_{\Xi_{t_v}} f_{tr}^{v{\mathscr A}t} g_{tr}^v \,d \mu_{tr}^v \,, \end{equation} \begin{equation} \label{eq-DirForm-tr2} Q_{tr}(f,g) = \sum_{v\in{\mathcal V}} n_v \, Q_{tr}^v(f_{tr}^v,g_{tr}^v) \int_{t_v} f_{lg}^{v{\mathscr A}t} g_{lg}^v \,d \mu_{lg}^v \,, \end{equation} \end{subequations} where \(n_v= \textrm{vol}(t_v)\textrm{freq}(t_v)\) is the normalization factor in equation~_{\mbox{\scriptsize\em eq}}ref{eq-decspecstate}, $d\mu_{tr}^v$ is the normalized invariant measure on $\Xi_{t_v}$ (by the results of Section~\ref{ssec-trST}), and $d \mu_{lg}^v$ the normalized Lebesgue measure on $t_v$ (by the results of Section~\ref{ssec-lgST}). Each of the forms $Q_{lg}^v$ and $Q_{tr}^v$ are of the type given in Section~\ref{ssec-form}, we only have to substitute the expression \begin{equation} \label{eq-deltaef} \delta_e^\alpha f = \mathfrak prac{f\circ\RMob^{-1}(r(e)) -f\circ \RMob^{-1}(s(e))}{\rho_\alpha^n} \,. \end{equation} in $q_{\alpha,n}^v$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-qn} for $Q_{\alpha}^v$, $\alpha\in\{tr,lg\}$ and $v\in{\mathcal V}$. It follows from Proposition~\ref{prop-form} that the forms $Q_{tr}^v$ and $Q_{lg}^v$ are symmetric, positive definite, and Markovian on the domains \( {\mathcal D}_{tr}^v=\bigl\{f \in L^2_{{\mathbb R}}(\Xi_{t_v},d \mu_{tr}^v) \, : \, Q_{tr}^v(f,f) < +\infty \bigr\} \) and \( {\mathcal D}_{lg}^v=\bigl\{f \in L^2_{{\mathbb R}}({t_v},d \mu_{lg}^v) \, : \, Q_{lg}^v(f,f) < +\infty \bigr\} \), respectively. \paragraph{The longitudinal form} Let us first look at the longitudinal part $Q_{lg}^v$, which is simpler. We show that $q^v_{\alpha,n}(f,g)$ has a limit for suitable $f,g$. This will prove that \([D_{lg}^v,f_{lg}^v]^{\mathscr A}t [D_{lg}^v,g_{lg}^v]\) is strongly regular by Corollary~\ref{cor-cesar}, and will imply the decomposition of $Q_{lg}$ given in equation~_{\mbox{\scriptsize\em eq}}ref{eq-DirForm-lg2}. We wish to adapt the parameter $\rho_{lg}$ so as to obtain a non-trivial form with core which is dense in $C^1(t_v)$. Given an edge $h\in{\hat{a}}t {\mathcal H}_{lg}$, let us denote by $a_h\in {\mathbb R}^d$ the translation vector between the punctures of the microtiles associated with $s(h)$ and $r(h)$ in the decomposition of the tile associated with $s^2(h)=sr(h)$. If $a_e \in {\mathbb R}^d$ denotes the corresponding vector for $e\in E_{lg,n}^v(h)$, equation~_{\mbox{\scriptsize\em eq}}ref{eq-translg} gives $a_e=\theta^{-n}a_h$. For $n$ large we can thus approximate (uniformly, by uniform continuity of $f$) \[ \delta^{lg}_{e} f \simeq \left( \mathfrak prac{\theta^{-1}}{\rho_{lg}} \right)^{n} \ (a_h \cdot \nabla) f(x_e)\,, \quad \text{\rm where} \quad x_e = \RMob^{-1}(s(e))\,. \] Choosing $f,g\in C^2_{\mathbb R}(t_v)$, we can substitute the above approximation in $q_{lg,n}^v(f,g)$, up to an error term uniform in $e$, which gives \[ q_{lg,n}^v(f,g) \simeq \left( \mathfrak prac{\theta^{-1}}{\rho_{lg}}\right)^{2n} \mathfrak prac{1}{\#E_{lg,n}^v} \sum_{h\in {\hat{a}}t {\mathcal H}_{lg}} \sum_{e \in E_{lg,n}^v(h)} (a_h \cdot \nabla) f(x_e) \; (a_h \cdot \nabla) g(x_e) \,, \] where $E_{lg,n}^v(h)$ is the set of edges of type $h$ in $E_{lg,n}^v$. In order to estimate the above sum, we decompose $t_v$ into boxes. Assuming that $n$ is large, we choose an integer $l=l_{n}$ such that \(1<< l<<n\), and consider the boxes \(B_\gamma={\mathcal R}^{-1}([\gamma])\) for \(\gamma \in \Pi_{l}^v(\widetilde{\mathcal G})\), the set of paths of length $l$ which start at $v$ at level $0$ (here $[\gamma]$ is the set of infinite paths which agree with $\gamma$ from level $0$ to level $l$). The idea is that $B_\gamma$ should be large enough to allow us to make averages over it, yet small enough for continuous functions to be approximately constant on it: \(F(x_e) \simeq F(x_\gamma)\), for some $x_\gamma\in B_\gamma$, and all $e$ for which $x_{e}\in B_\gamma$. We have \[ q_{lg,n}^v(f,g) \simeq \left( \mathfrak prac{\theta^{-1}}{\rho_{lg}}\right)^{2n} \sum_{h\in {\hat{a}}t {\mathcal H}_{lg}} \sum_{\gamma\in\Pi_{l}^v} \mathfrak prac{\#E_{lg,n}^v(h,\gamma) }{\#E_{lg,n}^v} \ (a_h \cdot \nabla_{lg}) f(x_{\gamma}) \; (a_h \cdot \nabla_{lg}) g(x_{\gamma}) \,, \] for some $x_\gamma\in B_\gamma$, and where $E_{lg,n}^v(h,\gamma)$ stands for the set of edges of type $h$ whose associated (range and source) infinite paths agree with $\gamma$ from level $0$ to level $l$. Using the estimation of the powers of the substitution matrix in equation~_{\mbox{\scriptsize\em eq}}ref{eq-An} (note that we use the {\em transpose} of $A$ for $\widetilde{\mathcal G}$ here) we have \[ \mathfrak prac{\#E_{lg,n}^v(h,\gamma) }{\#E_{lg,n}^v} = \mathfrak prac{L_{r(\gamma)} R_{s^2(h)}\lambda_{\text{\rm \tiny PF}}^{n-1-l} (1+o(1))}{L_v\sum_{h'\in{\hat{a}}t {\mathcal H}_{lg}}R_{s^2(h')}\lambda_{\text{\rm \tiny PF}}^{n-1} (1+o(1))} = c_{lg} \,\textrm{freq}(t_{s^2(h)}) \, \mu_{lg}^v(B_\gamma) \;(1+o(1)) \] with \begin{equation}\label{eq-clg} c_{lg}= \Bigl( \sum_{h\in{\hat{a}}t {\mathcal H}_{lg}}R_{s^2(h)} \Bigr)^{-1}. \end{equation} The last step uses the approximation \(\mu_{lg}^v(B_\gamma)F(x_\gamma) \simeq \int_{B_\gamma} F(x) d^d \mu_{lg}^v\), to obtain \[ \left( \mathfrak prac{\theta^{-1}}{\rho_{lg}}\right)^{-2n} q_{lg,n}^v(f,g) \simeq q^v_{\alpha}(f,g) := c_{lg} \sum_{h\in{\hat{a}}t {\mathcal H}_{lg}} \textrm{freq}(t_{s^2(h)}) \int_{t_v} (a_h \cdot \nabla) f \; (a_h \cdot \nabla) g \; d \mu_{lg}^v\,. \] Hence, if $\rho_{lg}=\theta^{-1}$, the sequence $q_{lg,n}^v(f,g)$ converges to $q_{lg}^v(f,g)$, therefore by Corollary~\ref{cor-cesar} we have \(Q_{lg}^v(f,g)=q_{lg}^v(f,g)\). We now have to compute $Q_{lg}$ from equation~_{\mbox{\scriptsize\em eq}}ref{eq-DirForm-lg2}, summing up the $Q_{lg}^v$ over $v\in{\mathcal V}$. Notice that by the decomposition of the representation of a function as $\pi(h)=\sum_{v\in{\mathcal V}} h_{tr}^v\otimes h_{lg}^v$, we have \[ \sum_{v\in{\mathcal V}} n_v \, \int_{t_v} (a_h \cdot \nabla) f \; (a_h \cdot \nabla) g \; d \mu_{lg}^v \int_{\Xi_{t_v}} f_{tr} g_{tr} d\mu_{tr}^v = \int_{\Omega} (a_h \cdot \nabla_{lg} )f \; (a_h\cdot \nabla_{lg}) g \; d\mu\,, \] where $\nabla_{lg}$ is the {\em longitudinal gradient} on $\Omega_\Phi$: it takes derivatives along the leaves of the folliation; so it reads on the representation of $C(\Omega_\Phi)$ simply $\nabla_{lg}={\mathbf 1}\otimes \nabla_{{\mathbb R}^d}$. Define the operator on $L^2_{{\mathbb R}}(\Omega_\Phi, d\mu)$: \begin{equation} \label{eq-lgLaplace} \Delta_{lg} = c_{lg} \nabla_{lg}^\dagger {\mathcal K} \nabla_{lg} \,, \quad \text{\rm with} \quad {\mathcal K} = \sum_{h\in{\hat{a}}t {\mathcal H}_{lg}} \textrm{freq}(t_{s^2(h)}) a_h \otimes a_h\,. \end{equation} We thus we have \begin{equation} Q_{lg} (f,f) = \left\{ \begin{array}{ll} \langle f, \; \Delta_{lg} \; f\rightarrowngle_{ L^2_{{\mathbb R}}(\Omega_\Phi, d\mu)} & \mbox{if } \rho_{lg} =\theta^{-1} \\ 0 & \mbox{if } \rho_{lg} > \theta^{-1} \\ +\infty & \mbox{if } \rho_{lg} < \theta^{-1} \end{array}\right. \,, \quad \text{\rm for all } f\in C_{lg}^2(\Omega_\Phi)\,, \end{equation} where $C_{lg}^2(\Omega_\Phi)$ is the space of longitudinally $C^2$ functions on $\Omega_\Phi$. So we see that for $\rho_{lg}\ge \theta^{-1}$, $\Delta_{lg}$ is essentially self-adjoint on the domain $C_{lg}^2(\Omega_\Phi)$, and therefore the form $Q_{lg}$ is closable. For $\rho_{lg}<\theta^{-1}$ the form is not closable. \paragraph{The transversal form} We show that $q_{tr,n}^v(f,g)$ decomposes, for suitable $f,g$, into two pieces: one which has a limit, and the other which is oscillating with a phase which will be assumed to be non resonant (Definition~\ref{def-resphase}). This will prove that \([D_{tr},f_{tr}]^{\mathscr A}t [D_{tr},g_{tr}]\) is the sum of a strongly regular and a non resonant part, and will imply by Corollary~\ref{cor-prodstrongreg} and Lemma~\ref{lem-stateres} the decomposition of $Q_{tr}$ claimed in equation~_{\mbox{\scriptsize\em eq}}ref{eq-DirForm-tr2}. We wish to adapt the parameter $\rho_{tr}$ so as to obtain a non-trivial form with core which is dense in $C(\Xi_{t_v})$. One might be tempted to consider functions which are transversally locally constant to define its core. However, it quickly becomes clear that $Q_{tr}$ vanishes on such functions. Indeed, if $f\in C(\Omega_\Phi)$ is transversally locally constant, then there exists $n_f\in {\mathbb N}$, such that ${\mathcal R}^{-1}(\Pi_{0,n_f}({\mathcal G}))$ gives a partition of $\Xi$ on which $f$ is constant. So we see from equation~_{\mbox{\scriptsize\em eq}}ref{eq-deltaef} that $\delta^{tr}_{e}f = 0$ for all $e\in E_{tr,n}$, $n\ge n_f$, and hence $Q_{tr}(f,f)=0$. We will therefore consider a different core, namely the space generated by the eigenfunctions of the action. We assume that these form a dense set of $L^2(\Omega_\Phi)$ which is equivalent to the fact that the tiling is pure point diffractive. Consider an eigenfunction to eigenvalue $\beta$, $f_\beta:\Omega_\Phi\to{\mathbb C}$. By definition $f_\beta$ satisfies \(f_\beta(\omega+r)=e^{2\pi \beta(r)} f_\beta(\omega)\) for all $\omega\in \Omega_\Phi$ and $r\in {\mathbb R}^d$. Given $v\in{\mathcal V}$, $h\in {\hat{a}}t {\mathcal H}_{tr}$, and any $e\in E_{tr,n}^{v}(h)$, we have \(\RMob^{-1}(r(e)) = \RMob^{-1}(s(e)) + r_e= \RMob^{-1}(r(e)) + \theta^n r_h\) by equation~_{\mbox{\scriptsize\em eq}}ref{eq-transtr}. Then equation~_{\mbox{\scriptsize\em eq}}ref{eq-deltaef} gives \[ \delta^{tr}_{e}f_\beta = (e^{2\pi \imath \theta^n \beta(h)} - 1) f_\beta\circ\RMob^{-1}(s(e)) \] and hence \begin{multline*} q_{tr,n}^v(f_\beta,f_{\beta'}) = \mathfrak prac{1}{\# E_{tr,n}^v} \sum_{h\in {\hat{a}}t {\mathcal H}_{tr}} \mathfrak prac{\bigl(e^{-2\pi \imath\beta(h)\theta^n} - 1\bigr)\bigl( e^{2\pi \imath\beta'(h)\theta^n} - 1\bigr)}{\rho_{tr}^{2n}} \\ \sum_{e \in E_{tr,n}^v(h)} \overline{f_\beta\circ\RMob^{-1}(s(e))} f_{\beta'}\circ\RMob^{-1}(s(e))\,. \end{multline*} As for the longitudinal form, we average over boxes which partition $\Xi_{t_v}$: \(B_\gamma= \RMob^{-1}([\gamma])\), for \(\gamma \in \Pi_{0,l}({\mathcal G})\), and $1<<l<<n$, and we get \[ q_{tr,n}^v(f_\beta,f_{\beta'}) \simeq c_{tr} \sum_{ h\in {\hat{a}}t {\mathcal H}_{tr}} \mathfrak prac{\bigl(e^{-2\pi \imath\beta(h)\theta^n} - 1\bigr)\bigl( e^{2\pi \imath\beta'(h)\theta^n} - 1\bigr)}{\rho_{tr}^{2n}} \textrm{freq}(t_{s^2(h)}) \int_{\Xi_{t_v}} \overline{f_\beta} f_{\beta'} d\mu_{tr}^v \] with \begin{equation}\label{eq-ctr} c_{tr}= \Bigl( \sum_{h\in{\hat{a}}t{\mathcal H}_{tr}}L_{s^2(h)}\Bigr)^{-1}. \end{equation} In view of equation~_{\mbox{\scriptsize\em eq}}ref{eq-DirForm-tr2} let us consider \[ q_{tr,n}(f,g) = \sum_{v\in{\mathcal V}} n_v \; q_{tr,n}^v (f_{tr}^v,g_{tr}^v) \int_{t_v} \overline{f_{lg}^v} g_{lg}^v d\mu_{lg}^v \,. \] Summing those terms up over $v\in{\mathcal V}$, the integrals yield \(\int_{\Omega_\Phi} br{f}_\beta f_{\beta'} d\mu = \delta_{\beta \beta'} \|f_\beta\|^2\) by orthogonality of the set of eigenfunctions. Hence we get \[ q_{tr,n}(f_\beta,f_{\beta'}) \simeq c_{tr} \delta_{\beta\beta'} \|f_{\beta}\|^2 \sum_{ h\in {\hat{a}}t{\mathcal H}_{tr}} \textrm{freq}(t_{s^2(h)}) \left| \mathfrak prac{e^{2\pi \imath\beta(h)\theta^n} - 1}{\rho_{tr}^{n}} \right|^2 \,. \] We now use the results of Section~\ref{ssect-Pisot}: Using equation~_{\mbox{\scriptsize\em eq}}ref{eq-Pisot}, expanding the exponential, and neglecting terms proportional to $ \left(\mathfrak prac{|\theta_j|}{\rho_{tr}}\right)^{2n} $ against $ \left(\mathfrak prac{|\theta_2|}{\rho_{tr}}\right)^{2n}$ if $|\theta_j|<|\theta_2|$ we approximate \[\left| \mathfrak prac{e^{2\pi \imath\beta(h)\theta^n} - 1}{\rho_{tr}^{n}} \right|^2 \simeq \left|\sum_{j=2}^J \mathfrak prac{p_{\beta(h)}(\theta_j)\theta_j^n}{\rho_{tr}^{n}} \right|^2 \simeq \left(\mathfrak prac{|\theta_2|}{\rho_{tr}}\right)^{2n} \sum_{j,j'=2}^L \overline{p_{\beta(h)}(\theta_j)} p_{\beta(h)}(\theta_{j'}) e^{\imath (\alpha_j-\alpha_{j'})n}\,. \] Here $L$ is such that $|\theta_j|=|\theta_2|$ for $2\le j \le L$ and we wrote $\theta_j = |\theta_2| e^{\imath\alpha_j}$. The second approximation is justified by the fact that $p_{\beta(h)}(\theta_{j})\neq 0$ for all $j$. We assume now that the phases are non resonant (Definition~\ref{def-resphase}): \[ \alpha_j - \alpha_{j'} + 2\pi k + 2\pi \mathfrak prac{\log\rho_{tr}}{\log\rho_{lg}} k' \neq 0\,, \quad \mathfrak porall k,k' \in {\mathbb Z} \] for all $j\neq j'$. This implies that the oscillating parts of \([D_{tr},f_{tr}]^{\mathscr A}t [D_{tr},g_{tr}]\) are non resonant, and hence by Lemma~\ref{lem-resphi} do not contribute to ${\mathcal T}\bigl([D_{tr},f_{tr}]^{\mathscr A}t [D_{tr},g_{tr}] \otimes {\mathbf 1}\bigr)$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-DirForm-tr}. Concerning the non oscillating part, so when $j=j'$, we clearly see that they vanish in the limit $n\to +\infty$ if $\rho_{tr} > |\theta_2|$ and that they tend to $+\infty$, as $n\rightarrow +\infty$, if $\rho_{tr} < |\theta_2|$. Finally, if $ \rho_{tr}=|\theta_2|$ then the non oscillating part of \(q_{tr,n}(f_\beta,f_{\beta})\) converges to \[ - c_{tr} (2\pi)^2 \|f_{\beta}\|^2 \sum_{ h\in {\hat{a}}t{\mathcal H}_{tr}}\textrm{freq}(t_{s^2(h)})\sum_{j=2}^L |p_{\beta(h)}(\theta_j)|^2 \,. \] Define the operator $\Delta_{tr}$ on the linear space of dynamical eigenfunctions by \begin{equation} \label{eq-trLaplace} \Delta_{tr} f_\beta = - c_{tr} (2\pi)^2 \sum_{ h\in{\hat{a}}t {\mathcal H}_{tr}}\textrm{freq}(t_{s^2(h)})\sum_{j=2}^L |p_{\beta(h)}(\theta_j)|^2 \ f_\beta \,. \end{equation} Then, on the space of dynamical eigenfunctions the transversal form is given by \[ Q_{tr}(f_\beta,f_\beta) = \left\{ \begin{array}{ll} \langle f_\beta,\Delta_{tr} f_\beta\rightarrowngle_{L^2(\Omega_\Phi,d\mu)} & \mbox{if } \rho_{tr} = |\theta_2| \\ 0 & \mbox{if } \rho_{tr} > |\theta_2| \\ +\infty & \mbox{if } \rho_{tr} < |\theta_2| \end{array}\right. \,. \] Clearly, $Q_{tr}$ is closable but trivial if $\rho_{tr}>|\theta_2|$, whereas $Q_{tr}$ is not closable if $\rho_{tr}<|\theta_2|$. \paragraph{Main result} We summarize here our results about the Dirichlet forms. For a Pisot number $\theta$ of degree $J>1$, we denote $\theta_j, j=2,\cdots J$, the other Galois conjugates in decreasing order of modulus. We write the sub-leading conjugates in the form $\theta_j=|\theta_2|e^{\imath \alpha_j}$, $2\leq j\leq L$, where $\alpha_j\in [0,2\pi)$. In particular, $|\theta_j|<|\theta_2|$ for $j>L$. \begin{theo} \label{thm-DirForm} Consider a Pisot-substitution tiling of ${\mathbb R}^d$ with Pisot number $\theta$ of degree $J>1$. Assume that for all $j\neq j'\leq L$ one has \begin{equation} \label{eq-phasePisot} \alpha_j - \alpha_{j'} + 2\pi k + 2\pi \mathfrak prac{\log|\theta_2|}{\log\theta} k' \neq 0\,, \quad \mathfrak porall k,k' \in {\mathbb Z} \,. \end{equation} Set $\rho_{lg} = \theta^{-1}$ and $\rho_{tr}=|\theta_2|$. If the dynamical spectrum is purely discrete then the set of finite linear combinations of dynamical eigenfunctions is a core for $Q$ on which it is closable. Furthermore, $Q = Q_{tr} + Q_{lg}$, and $Q_{tr/lg}$ has generator $\Delta_{tr/lg}=\sum_{h\in {\hat{a}}t{\mathcal H}_{tr/lg}}\Delta_{tr/lg}^h$ given by \begin{eqnarray*} \Delta_{lg}^h f_\beta &=& -c_{lg}(2\pi)^2 \mbox{\em freq}(t_{s^2(h)}) \beta(a_h)^2 f_\beta ,\\ \Delta_{tr}^h f_\beta &=& -c_{tr}(2\pi)^2 \mbox{\em freq}(t_{s^2(h)}) \langle \widetilde{r_h}^star,\beta\rightarrowngle^2 f_\beta \end{eqnarray*} where \(c_{lg}= \bigl( \sum_{h\in{\hat{a}}t{\mathcal H}_{lg}}R_{s^2(h)} \bigr)^{-1}\) and \(c_{tr}=\bigl( \sum_{h\in{\hat{a}}t{\mathcal H}_{tr}}L_{s^2(h)}\bigr)^{-1}\). \end{theo} \begin{proof} We have calculated the explicit formulas for $\Delta_{lg}^h$ and $\Delta_{tr}^h$ above. It remains to insert (\ref{eq-geom}) into (\ref{eq-trLaplace}). Eigenfunctions to distinct eigenvalues are orthogonal and hence form an orthogonal basis for the Hilbert space by assumption. Standard arguments show then that $\Delta^h_{tr/lg}$ is essentially self-adjoint and hence the form closable. \end{proof} Notice that the ratio $\log(\theta)/\log(|\theta_2|)$ in equation~_{\mbox{\scriptsize\em eq}}ref{eq-phasePisot} is irrational unless $\theta$ is a Pisot number of degree $J=3$ and unimodular \cite{Wald}. \subsection{Geometric interpretation of the Laplacians} We provide an interpretation of the Laplacians as differential operators on the maximal equicontinuous factor ${\hat{a}}t E$ of the tiling system. Since the dynamical spectrum is pure point and all eigenfunctions continuous the factor map $\pi:\Omega_\Phi\to{\hat{a}}t E $ becomes an isomorphism between $L^2(\Omega,\mu)$ and $L^2({\hat{a}}t E,\eta)$, where $\eta$ is the normalized Haar measure on ${\hat{a}}t E$. The Dirichlet form $Q$ can therefore also be regarded as a form on $L^2({\hat{a}}t E,\eta)$. We consider again first the simpler unimodular case in which ${\hat{a}}t E$ is a $dJ$-torus. Each point in ${\hat{a}}t E$ has a tangent space which we may identify with $U\text{\rm op}lus S$, the direct sum of the spaces tangent to the unstable and the stable direction of $\varphi^t_{\mathbb R}$, resp. Now the directional derivative at $x$ along $u\in U\text{\rm op}lus S$ is given by $$ (\langle u,\nabla\rightarrowngle f_\beta)(x)= \mathfrak prac{d}{dt} f_\beta(x+t u)\left|_{t=0} \right. = 2\pi i \langle u,\beta\rightarrowngle f_\beta(x).$$ We thus have \begin{eqnarray*} \Delta_{lg}^h &=& c_{lg}\textrm{freq}(t_{s^2(h)}) \langle\widetilde a_h,\nabla\rightarrowngle^2 ,\\ \Delta_{tr}^h &=& c_{tr}\textrm{freq}(t_{s^2(h)}) \langle\widetilde{r_h}^*,\nabla\rightarrowngle^2 . \end{eqnarray*} In the non-unimodular case one obtains essentially the same: ${\hat{a}}t E$ is an inverse limit of tori ${\hat{a}}t F\cong W/F^{rec}$ w.r.t.\ the map $\varphi^t_{\mathbb R}$, ${\hat{a}}t E =\lim_\leftarrow (W/F^{rec},\varphi^t_{\mathbb R})$. An element of ${\hat{a}}t E$ is a sequence $(x_n)_n$ of elements $x_n\in W/F^{rec}$ satifying $x_n = \varphi^t_{\mathbb R}(x_{n+1})$. The action $\alpha_r$ of $r\in\RM^d$ on ${\hat{a}}t E$ is thus $\alpha_r((x_n)_n) = (x_n+{\varphi^t_{\mathbb R}}^{-n}(r))_n$. The continuous functions on ${\hat{a}}t E$ are the direct limit of continuous functions on the tori w.r.t.\ the pull back of $\varphi^t_{\mathbb R}$, $C({\hat{a}}t E) =\lim_\rightarrow (C(W/F^{rec}),{\varphi^t_{\mathbb R}}^*)$. The elements of $C^\infty ({\hat{a}}t E)$ are approximated by sequences $(f_n)_n$ of elements $f_n\in C^\infty(W/F^{rec})$ which are eventually $0$ modulo the equivalence relation identifying $(0,\cdots,0,f,-{\varphi^t_{\mathbb R}}^*f,0\cdots )$ with $0$. It follows that the definition of the directional derivative is compatible with the above equivalence relation so that the expression $\langle u,\nabla \rightarrowngle(f_n)_n$ makes sense. In particular, since each eigenfunction corresponds to an element of the form $(0,\cdots,0,f_\beta,0\cdots )$, $\beta\in F$, we obtain the same formula as above: $ (\langle u,\nabla\rightarrowngle f_\beta)(x)= 2\pi i \langle u,\beta\rightarrowngle f_\beta(x)$ and hence also the same formulae for $\Delta_{lg}^h$ and $\Delta_{tr}^h$. This shows that the generators of the Dirichlet forms can be expressed as elliptic second order differential operators on ${\hat{a}}t E$ with constant coefficients. \end{document}
\betaegin{document} \tauitle{\etauge \betaf Asymptotic behaviour of a nonlinear parabolic equation with gradient absorption and critical exponent} \alphauthor{ \Lambdaarge Razvan Gabriel Iagar\,\varphiootnote{Departamento de Matem\'aticas, Universidad Aut\'onoma de Madrid, Campus de Cantoblanco, E--28049 Madrid, Spain. \tauextit{e-mail:} [email protected]},\\[4pt] \Lambdaarge Philippe Lauren\c cot\,\varphiootnote{Institut de Math\'ematiques de Toulouse, CNRS UMR~5219, Universit\'e de Toulouse, F--31062 Toulouse Cedex 9, France. \tauextit{e-mail:} [email protected]},\\ [4pt] \Lambdaarge Juan Luis V{\'a}zquez\,\varphiootnote{Departamento de Matem\'aticas, Universidad Aut\'onoma de Madrid, Campus de Cantoblanco, E--28049 Madrid, Spain. Also affiliated with ICMAT, Madrid. \tauextit{e-mail:} [email protected]} } \deltaate{} \muaketitle \betaegin{abstract} We study the large-time behaviour of the solutions of the evolution equation involving nonlinear diffusion and gradient absorption, $$ \piartial_t u - \Deltaelta_p u + |\nuabla u|^q=0\,. $$ We consider the problem posed for $x\iotan \muathbb{R}^N $ and $t>0$ with nonnegative and compactly supported initial data. We take the exponent $p>2$ which corresponds to slow $p$-Laplacian diffusion. The main feature of the paper is that the exponent $q$ takes the critical value $q=p-1$ which leads to interesting asymptotics. This is due to the fact that in this case both the Hamilton-Jacobi term $ |\nuabla u|^q$ and the diffusive term $\Deltaelta_p u$ have a similar size for large times. The study performed in this paper shows that a delicate asymptotic equilibrium happens, so that the large-time behaviour of the solutions is described by a rescaled version of a suitable self-similar solution of the Hamilton-Jacobi equation $|\nuabla W|^{p-1}=W$, with logarithmic time corrections. The asymptotic rescaled profile is a kind of sandpile with a cusp on top, and it is independent of the space dimension. \varepsilonnd{abstract} \nuoindent {\betaf AMS Subject Classification:} 35B40, 35K65, 35K92, 49L25. \muedskip \nuoindent {\betaf Keywords:} Nonlinear parabolic equations, $p$-Laplacian equation, gradient absorption, asymptotic patterns, Hamilton-Jacobi equation, viscosity solutions. \nuewpage \sigmaection{Introduction and main results}\lambdaabel{sect.intro} In this paper we deal with the Cauchy problem associated to the diffusion-absorption equation: \betaegin{equation} \lambdaabel{a1} \piartial_t u - \Deltaelta_p u + |\nuabla u|^{q} = 0\,, \tauhetauad (t,x)\iotan Q\,, \varepsilonnd{equation} posed in $Q:= (0,\iotanfty)\tauimes\muathbb{R}^N$ with initial data \betaegin{equation}\lambdaabel{a2} u(0,x) = u_0(x)\gammae 0\,, \tauhetauad x\iotan\muathbb{R}^N\,, \varepsilonnd{equation} where the $p$-Laplacian operator is defined as usual by \ $ \Deltaelta_p u := \mubox{ div }\lambdaeft( |\nuabla u|^{p-2}\ \nuabla u \rhoight). $ To be specific we take $p>2$, which implies finite speed of propagation, and we consider nonnegative weak solutions $u\gammae 0$ with compactly supported initial data $u_0$ such that \betaegin{equation} \lambdaabel{a3} u_0\iotan W^{1,\iotanfty}(\muathbb{R}^N)\,, \;\; u_0\gammae 0\,, \;\; \mubox{ supp }(u_0) \sigmaubset B(0,R_0)\,, \;\; u_0\nuot\varepsilonquiv 0\,, \varepsilonnd{equation} for some $R_0>0$. Known properties of the equation ensure that the corresponding solutions will be compactly supported with respect to the space variable for every time $t>0$. The goal of the paper is to describe in detail the asymptotic behaviour of the solutions as $t\tauo\iotanfty$. The equation \varepsilonqref{a1} has been studied by various authors for different values of the parameters $p\gammae 2$ and $q>1$ as a model of linear or nonlinear diffusion with gradient-dependent absorption, see {\cal I}te{BKL04,BGK04,GL07,Gi05,La08} for the semilinear case $p=2$, and {\cal I}te{ATU04,BtL08,LV07,Sh04a} for the quasilinear case $p>2$. It has been shown that the large-time behaviour of this initial-value problem depends on the relative influence of the diffusion and absorption terms and leads to a classification into the following ranges of $q$: \nuoindent (i) when $q>q_2:=p-N/(N+1)$ the large time behaviour is purely diffusive and the first-order absorption term disappears in the limit $t\tauo\iotanfty$; this is a case of asymptotic simplification in the sense of {\cal I}te{V91}. \nuoindent (ii) For $q_1:=p-1<q<q_2$ there is a behaviour given by a certain balance of diffusion and absorption in the form of a self-similar solution, its existence being established in {\cal I}te{Sh04a}; there is no asymptotic simplification; \nuoindent (iii) for $1<q<p-1$ the two last authors have recently shown in {\cal I}te{LV07} that the main term is the absorption term, leading to a separate-variables asymptotic behaviour, with diffusion playing a secondary role. We thus have asymptotic simplification, now with absorption as the dominating effect. The two critical cases $q=q_2$ and $q=q_1$ represent limit behaviours, and as is often the case in such situations, they give rise to interesting dynamics due to the curious interaction of two effects of similar power. Such situations usually lead to phenomena called \tauextsl{resonances} in mechanics, with interesting non-trivial mathematical analysis. Such interesting behaviour has been shown in particular in {\cal I}te{GL07} for $q=q_2$, in the linear case $p=2$, with the result that logarithmic factors modify the purely diffusive behaviour found for $q>q_2$. A similar situation is expected to be met when $p>2$ and $q=q_2$. We devote this paper to describe the other limit case $q=q_1=p-1$ when $p>2$, the latter condition guaranteeing that $q>1$. In that case the diffusion and the first order term have similar asymptotic size and logarithmic corrections appear in the asymptotic rates. The mathematical analysis that we perform below is strongly tied to a good knowledge of the expansion of the support of the solution, or in other words, the location of the free boundary, which happens to be approximately a sphere of radius $|x|\sigmaim C\lambdaog t$ for large times $t$. From now on, we assume that $$ q =q_1=p-1\,. $$ \sigmaubsection{Bounds in suitable norms} Studying the large time behaviour of solutions and interfaces of our problem relies on suitable and very precise estimates. The time expansion of the support and the time decay of solutions to the Cauchy problem \varepsilonqref{a1}-\varepsilonqref{a2}, with non-negative and compactly supported initial data have been recently investigated in {\cal I}te{BtL08}. The following results are proved: \betaegin{proposition}\lambdaabel{pra1} Under the above assumptions on the equation and data, the Cauchy problem \varepsilonqref{a1} has a unique non-negative viscosity solution $$ u\iotan\muathcal{BC}([0,\iotanfty)\tauimes\muathbb{R}^N){\cal A}p L^\iotanfty(0,\iotanfty;W^{1,\iotanfty}(\muathbb{R}^N)) $$ which satisfies: \betaegin{equation} \lambdaabel{a4} 0 \lambdae u(t,x) \lambdae \|u_0\|_\iotanfty\,, \tauhetauad (t,x)\iotan Q\,, \varepsilonnd{equation} \betaegin{equation}\lambdaabel{a4b} \|\nuabla u(t)\|_\iotanfty \lambdae \|\nuabla u_0\|_\iotanfty\,, \tauhetauad t\gammae 0\,, \varepsilonnd{equation} \betaegin{equation} \lambdaabel{a5} \mubox{ supp }(u(t)) \sigmaubset B(0,C_1\ \lambdaog{t}) \;\;\mubox{ for all }\;\; t\gammae 2\,, \varepsilonnd{equation} together with the following norm estimates \betaegin{eqnarray} \lambdaabel{a6} \|u(t)\|_1 & \lambdae & C_2\ t^{-1/(p-2)}\ (\lambdaog{t})^{(p(N+1)-2N-1)/(p-2)} \;\;\mubox{ for all }\;\; t\gammae 2\,, \\ \lambdaabel{a7} \|u(t)\|_\iotanfty & \lambdae & C_2\ t^{-1/(p-2)}\ (\lambdaog{t})^{(p-1)/(p-2)} \;\;\mubox{ for all }\;\; t\gammae 2\,, \\ \lambdaabel{a8} \|\nuabla u(t)\|_\iotanfty & \lambdae & C_2\ t^{-1/(p-2)}\ (\lambdaog{t})^{1/(p-2)} \;\;\mubox{ for all }\;\; t\gammae 2\,, \varepsilonnd{eqnarray} for some positive constants $C_1$ and $C_2$ depending only on $p$, $N$, and $u_0$. \varepsilonnd{proposition} Here and below, $\muathcal{BC}([0,\iotanfty)\tauimes\muathbb{R}^N)$ denotes the space of bounded and continuous functions on $[0,\iotanfty)\tauimes\muathbb{R}^N$ and $\Vert {\cal D}ot\Vert_r$ denotes the $L^r(\muathbb{R}^N)$-norm for $r\iotan [1,\iotanfty]$. As we shall see, these bounds will be very useful in the sequel. The well-posedness of \varepsilonqref{a1}-\varepsilonqref{a2} and the properties \varepsilonqref{a4}, \varepsilonqref{a5}, and \varepsilonqref{a6} are established in {\cal I}te[Theorems~1.1 \&~1.6, Corollary~1.7]{BtL08}, while \varepsilonqref{a7} and \varepsilonqref{a8} follow from \varepsilonqref{a6} and {\cal I}te[Proposition~1.4]{BtL08}. We will also use the notation $r_{+}=\muax\{r,0\}$ for the positive part of the real number $r$. \sigmaubsection{Main results}\lambdaabel{Sect.main} We describe next the main contribution of this paper. As already mentioned, our goal is to study the asymptotic behaviour of the solution $u$ of the resonant problem \varepsilonqref{a1} with $p>2$ and $q=p-1$, and with compactly supported and nonnegative initial data. Moreover, since the equation has the property of finite speed of propagation, it is natural to raise the question about how the interface and the support of the solution expand in time. We also answer this question in the present paper. \muedskip \nuoindent \tauextbf{Asymptotic behaviour.} The main result is the following: \betaegin{theorem}\lambdaabel{asympt.main} Let $u$ be the solution of the Cauchy problem \varepsilonqref{a1}-\varepsilonqref{a2}, with $u_0$ as in \varepsilonqref{a3}. Then, $u$ decays in time like \ $O(t^{-1/(p-2)}(\lambdaog t)^{(p-1)/(p-2)})$ \ and the support spreads in space like \ $O(\lambdaog t)$ as $t\tauo\iotanfty$. More precisely, we have the limit: \betaegin{equation}\lambdaabel{main.asympt} \lambdaim_{t\tauo\iotanfty}\sigmaup_{x\iotan\muathbb{R}^N}\lambdaeft| \varphirac{c_p\, t^{1/(p-2)}}{(\lambdaog{t})^{(p-1)/(p-2)}}\ u\lambdaeft(t,x\rhoight) - \ \lambdaeft(1-\varphirac{(p-2)|x|}{\lambdaog\,t}\rhoight)_{+}^{(p-1)/(p-2)} \rhoight| = 0\,, \varepsilonnd{equation} with precise constant \betaegin{equation*} c_p=(p-2)^{1/(p-2)}(p-1)^{(p-1)/(p-2)}. \varepsilonnd{equation*} \varepsilonnd{theorem} In the proof, the expression of the asymptotic profile is obtained after a complicated time scaling of $u$ and $x$ in the form of uniform limit \betaegin{equation}\lambdaabel{main.asympt2} \varphirac{t^{1/(p-2)}}{(\lambdaog{t})^{(p-1)/(p-2)}}\ u\lambdaeft(t,x \rhoight) \tauo (p-2)^{-p/(p-2)}\ W((p-2)x/\lambdaog t), \varepsilonnd{equation} where the function \betaegin{equation} W(x):=\lambdaeft(\varphirac{p-2}{p-1}\ (1-|x|)_{+}\rhoight)^{(p-1)/(p-2)} \varepsilonnd{equation} is the unique viscosity solution to the stationary form of the rescaled problem, which is: \betaegin{equation}\lambdaabel{eqlim} |\nuabla W|^{p-1}-W=0 \ \mubox{ in } \ B(0,1)\,, \tauhetauad W=0 \ \mubox{ on } \ \piartial B(0,1), \ \tauhetauad W>0 \ \mubox{ in } \ B(0,1). \varepsilonnd{equation} Let us notice that, as usual in resonance cases, the limit profile is not a self-similar solution, but it introduces logarithmic corrections to a self-similar, separate-variables profile (which in our case is $t^{-1/(p-2)}(p-2)^{-p/(p-2)}W((p-2)x)$). The uniqueness of $W$ as viscosity solution of \varepsilonqref{eqlim} is very important in the proof and follows from {\cal I}te{I87}. In consonance with \varepsilonqref{main.asympt}, we show that the shape of the support of $u(t)$ gets closer to a ball while expanding as time goes by. This is in sharp contrast with the situation described in {\cal I}te{LV07} for \varepsilonqref{a1} in the intermediate range $q\iotan (1,p-1)$, $p>2$ where the positivity set stays bounded and can have a very general shape. When $q=p-1$, the diffusion thus acts in three directions: the scaling is different, the support grows unboundedly with time, and the geometry of the positivity set simplifies. Another remarkable consequence of the interplay diffusion-absorption is the fact that the asymptotic profile is radially symmetric and does not depend on the space dimension. We devote Section~\rhoef{Sect.scaling2} to the proof of Theorem~\rhoef{asympt.main}. For the proof, we use a precise estimate for the propagation of the positivity set, that is described below. Another tool is the existence of a large family of subsolutions having a special, explicit form and allowing for a theoretical argument with viscosity solutions to finish the proof. \muedskip \nuoindent \tauextbf{Propagation of the positivity set.} We denote the positivity set and its maximal expansion radius by \betaegin{equation} {\cal P}_{u}(t):=\{x\iotan\muathbb{R}^N: \ u(t,x)>0\}, \tauhetaquad \gammaamma(t)=\sigmaup\{|x|: \ x\iotan P_{u}(t)\} \varepsilonnd{equation} respectively. Then: \betaegin{theorem}\lambdaabel{main.posit} Under the running notations and assumptions, we have: \betaegin{equation} \lambdaim\lambdaimits_{t\tauo\iotanfty}\varphirac{\gammaamma(t)}{\lambdaog\,t}=\varphirac{1}{p-2}. \varepsilonnd{equation} Moreover, the free boundary of $u$ has the same speed of expansion in any given direction $\omegamega\iotan\muathbb{R}^N$ with $|\omegamega|=1$. \varepsilonnd{theorem} In fact, we give more precise estimates for the expansion of the positivity region, obtained via comparison with some well-chosen traveling waves. The proof of Theorem~\rhoef{main.posit} is performed in Section~\rhoef{sect.posit}. \muedskip \nuoindent \tauextbf{Two scalings.} In order to prove the two theorems, we have to perform two different scaling steps. The first scaling, described in formula (\rhoef{a9}) below, is the natural one corresponding to standard scaling invariance; such a scaling has also been used in {\cal I}te{LV07} in the case $q\iotan(1,p-1)$ to obtain the correct scale of the solutions. But for $q=p-1$, we observe that a phenomenon of grow-up appears, which is typical for resonance cases: the effect of the resonance implies that the rescaled solution does not stabilize in time; on the contrary, it grows and becomes unbounded in infinite time. That is why we need a second scaling, given by the new functions $w$ and $y$ defined in (\rhoef{def.y}) and (\rhoef{def.w}), which is less natural but turns out to be adapted to our problem: it takes into account the logarithmic corrections (suggested by the a priori estimates of Proposition~\rhoef{pra1}, which turn out to be sharp), and it is adapted to the size of the grow-up phenomenon; thus, in the rescaled variables we can describe the real form and behaviour of the solution. \sigmaection{Scaling variables I}\lambdaabel{Sect.Scaling1} We recall that $p>2$ and $q=p-1$. We introduce a first set of \tauextit{self-similar} variables; we keep the space variable $x$ and introduce logarithmic time \betaegin{equation} \tauau:=\varphirac{1}{p-2}\,\lambdan{(1+(p-2)t)}, \varepsilonnd{equation} as well as the new unknown function $v=v(\tauau,x)$ defined by \betaegin{equation} \lambdaabel{a9} u(t,x) = (1+(p-2)t)^{-1/(p-2)}\ v\lambdaeft( \tauau, x \rhoight)\,, \tauhetauad (t,x)\iotan [0,\iotanfty)\tauimes\muathbb{R}^N\,. \varepsilonnd{equation} Clearly, $v$ solves the rescaled equation \betaegin{equation} \piartial_\tauau v - \Deltaelta_p v + |\nuabla v|^q -v = 0\ \,, \tauhetauad (\tauau,x)\iotan Q\,, \lambdaabel{a10} \varepsilonnd{equation} with the same initial condition \betaegin{equation} \lambdaabel{a11} v(0) = u_0\,, \tauhetauad x\iotan\muathbb{R}^N\,. \varepsilonnd{equation} We next translate the a priori bounds (\rhoef{a6}), (\rhoef{a7}), and (\rhoef{a8}) in terms of the rescaled function $v$: there is $C_3>0$ depending only on $p$, $N$, and $u_0$ such that \betaegin{equation} \lambdaabel{a12} \varphirac{\Vert v(\tauau)\Vert_1}{\tauau^{(p(N+1)-2N-1)/(p-2)}} + \varphirac{\Vert v(\tauau)\Vert_\iotanfty}{\tauau^{(p-1)/(p-2)}} + \varphirac{\Vert \nuabla v(\tauau)\Vert_\iotanfty}{\tauau^{1/(p-2)}} \lambdae C_3 \;\;\mubox{ for }\;\; \tauau\gammae 1\,. \varepsilonnd{equation} \sigmaubsection{The positivity set: time monotonicity} We define the positivity set $\muathcal{P}_v(\tauau)$ of the function $v$ at time $\tauau\gammae 0$ by \betaegin{equation}\lambdaabel{b1} \muathcal{P}_v(\tauau):= \lambdaeft\{ x\iotan\muathbb{R}^N : \;\; v(\tauau,x)>0 \rhoight\}\,. \varepsilonnd{equation} \betaegin{proposition}\lambdaabel{prb1} For $\tauau_1\iotan [0,\iotanfty)$ and $\tauau_2\iotan (\tauau_1,\iotanfty)$ we have \betaegin{equation}\lambdaabel{b2} \muathcal{P}_v(\tauau_1) \sigmaubseteq \muathcal{P}_v(\tauau_2) \tauhetauad\mubox{ and }\tauhetauad \betaigcup_{\tauau\gammae 0} \muathcal{P}_v(\tauau)=\muathbb{R}^N\,. \varepsilonnd{equation} In addition, for each $x\iotan\muathbb{R}^N$ there are $T_x\gammae 0$ and $\varepsilon_x>0$ such that \betaegin{equation}\lambdaabel{b3} v(\tauau,x) \gammae \varepsilon_x\ \tauau^{(p-1)/(p-2)} \;\;\mubox{ for }\;\; \tauau\gammae T_x\,. \varepsilonnd{equation} \varepsilonnd{proposition} The proof relies on the availability of suitable subsolutions which we describe next. \betaegin{lemma}\lambdaabel{leb2} Define two positive real numbers $R_p$ and $T_p$ by $$ R_p := \varphirac{p-2}{2^p (p-1)} \tauhetauad\mubox{ and }\tauhetauad T_p := \varphirac{2(p-1)}{p-2}\ \lambdaeft( 2 + 2^{p-1} (N+p-2) \rhoight)\,. $$ If $R\iotan (0,R_p]$ and $T\gammae T_p$, the function $s_{R,T}$ given by $$ s_{R,T}(\tauau,x) := \varphirac{p-2}{R (p-1)}\ (T+\tauau)^{(p-1)/(p-2)}\ \lambdaeft( R^2 - \varphirac{|x|^2}{(T+\tauau)^2} \rhoight)_+^{(p-1)/(p-2)}, \tauhetauad (\tauau,x)\iotan Q\,, $$ is a (viscosity) subsolution to \varepsilonqref{a10}. \varepsilonnd{lemma} \nuoindent\tauextbf{Proof.} We have $s_{R,T}(\tauau,x)= (T+\tauau)^{(p-1)/(p-2)}\ \sigmaigma(\xii)$ with $\xii:=x/(T+\tauau)$ and $\sigmaigma(\xii):= (p-2)\ \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)} /(R(p-1))$. Since $p-1>p-2>0$, we observe that $\sigmaigma$ and $|\nuabla\sigmaigma|^{p-2} \nuabla\sigmaigma$ both belong to $\muathcal{C}^1(\muathbb{R}^N)$. Therefore, $$ L(\tauau,x) := R\ (T+\tauau)^{-(p-1)/(p-2)}\ \lambdaeft\{ \piartial_\tauau s_{R,T} - \Deltaelta_p s_{R,T} + \lambdaeft| \nuabla s_{R,T}\rhoight|^{p-1} - s_{R,T} \rhoight\} $$ is well-defined for $(\tauau,x)\iotan [0,\iotanfty)\tauimes\muathbb{R}^N$ and \betaegin{eqnarray*} L(\tauau,x) & = & \varphirac{R}{T+\tauau}\ \lambdaeft\{ \varphirac{p-1}{p-2}\ \sigmaigma(\xii) - \xii{\cal D}ot\nuabla\sigmaigma(\xii) - \Deltaelta_p \sigmaigma(\xii) \rhoight\} + R\ \lambdaeft| \nuabla \sigmaigma(\xii)\rhoight|^{p-1} - R\ \sigmaigma(\xii) \\ & = & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ \varphirac{1}{T+\tauau}\ \lambdaeft( 1 + 2^{p-1} (N+p-2)\ \varphirac{|\xii|^{p-2}}{R^{p-2}} \rhoight) \rhoight\} \\ & + & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ \varphirac{2}{T+\tauau}\ \varphirac{|\xii|^2}{R^2-|\xii|^2}\ \lambdaeft( 1 - \varphirac{2^{p-1} (p-1)}{p-2}\ \varphirac{|\xii|^{p-2}}{R^{p-2}} \rhoight) \rhoight\} \\ & + & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ 2^{p-1} \varphirac{|\xii|^{p-1}}{R^{p-2}} - \varphirac{p-2}{p-1} \rhoight\} \\ & \lambdae & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ \varphirac{1+2^{p-1} (N+p-2)}{T} + 2^{p-1} R - \varphirac{p-2}{p-1} \rhoight\} \\ & + & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ \varphirac{2}{T+\tauau}\ \varphirac{|\xii|^2}{R^2-|\xii|^2}\ \lambdaeft( 1 - \varphirac{2^{p-1} (p-1)}{p-2}\ \varphirac{|\xii|^{p-2}}{R^{p-2}} \rhoight)_+ \rhoight\} \,. \varepsilonnd{eqnarray*} We next note that $$ 1 - \varphirac{2^{p-1} (p-1)}{p-2}\ \varphirac{|\xii|^{p-2}}{R^{p-2}}\lambdae 0 \tauhetauad\mubox{ if }\tauhetauad |\xii|\gammae \varphirac{R}{2}\,, $$ so that the last term of the right-hand side of the previous inequality is bounded from above by $2\lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}/(3T)$. Consequently, owing to the choice of $R$ and $T$, \betaegin{eqnarray*} L(\tauau,x) & \lambdae & \lambdaeft( R^2 - |\xii|^2 \rhoight)_+^{(p-1)/(p-2)}\ \lambdaeft\{ \varphirac{1+2^{p-1} (N+p-2)}{T_p} + 2^{p-1} R_p - \varphirac{p-2}{p-1} + \varphirac{2}{3T_p} \rhoight\} \\ & \lambdae & 0\,, \varepsilonnd{eqnarray*} whence the claim. \tauhetaed \muedskip \nuoindent\tauextbf{Proof of Proposition~\rhoef{prb1}.} (i) Fix $\tauau_1\gammae 0$ and $x_1\iotan\muathcal{P}_v(\tauau_1)$. Owing to the continuity of $x\lambdaongmapsto v(\tauau_1,x)$ there are $\deltaelta>0$ and $r_1>0$ such that $v(\tauau_1,x)\gammae\deltaelta$ for $x\iotan B(x_1,r_1)$. Take now $R>0$ small enough such that $R<\muin{\{r_1,R_p\}}$ and satisfying $$ R< \varphirac{r_1}{T_p+\tauau_1} \tauhetauad\mubox{ and }\tauhetauad \varphirac{p-2}{p-1}\ (T_p+\tauau_1)^{(p-1)/(p-2)}\ R^{p/(p-2)} \lambdae \deltaelta\,, $$ the parameters $R_p$ and $T_p$ being defined in Lemma~\rhoef{leb2}. Then we have $s_{R,T_p}(\tauau_1,x-x_1) = 0 \lambdae v(\tauau_1,x)$ \ if \ $|x-x_1|\gammae R\ (T_p+\tauau_1)$, while $$ s_{R,T_p}(\tauau_1,x-x_1) \lambdae \varphirac{p-2}{R(p-1)}\ (T_p+\tauau_1)^{(p-1)/(p-2)}\ R^{(2p-2)/(p-2)} \lambdae \deltaelta \lambdae v(\tauau_1,x) $$ if $|x-x_1|\lambdae R\ (T_p+\tauau_1)$ as $R(T_p+\tauau_1)\lambdae r_1$. Moreover, if $\tauau_2>\tauau_1$, $\tauau\iotan [\tauau_1,\tauau_2]$ and $x\iotan\piartial B(x_1,R (T_p+\tauau_2))$, then $s_{R,T_p}(\tauau,x-x_1)=0 \lambdae v(\tauau,x)$. Recalling that $s_{R,T_p}$ is a subsolution to \varepsilonqref{a10} by Lemma~\rhoef{leb2}, we infer from the comparison principle that $s_{R,T_p}(\tauau,x-x_1) \lambdae v(\tauau,x)$ for $(\tauau,x)\iotan [\tauau_1,\tauau_2]\tauimes B(x_1,R (T_p+\tauau_2))$. As $s_{R,T_p}(\tauau,x-x_1)=0\lambdae v(\tauau,x)$ for $\tauau\iotan [\tauau_1,\tauau_2]$ and $x\nuot\iotan B(x_1,R (T_p+\tauau_2))$ we actually have $s_{R,T_p}(\tauau,x-x_1) \lambdae v(\tauau,x)$ for $(\tauau,x)\iotan [\tauau_1,\tauau_2]\tauimes\muathbb{R}^N$. Since $\tauau_2>\tauau_1$ is arbitrary and neither $R$ nor $T_p$ depend on $\tauau_2$, we end up with \betaegin{equation}\lambdaabel{bs31} s_{R,T_p}(\tauau,x-x_1) \lambdae v(\tauau,x)\,, \tauhetaquad (\tauau,x)\iotan [\tauau_1,\iotanfty)\tauimes\muathbb{R}^N\,. \varepsilonnd{equation} A first consequence of \varepsilonqref{bs31} is that, if $\tauau_2>\tauau_1$, then $v(\tauau_2,x_1)\gammae s_{R,T_p}(\tauau_2,0)>0$ so that $x_1$ also belongs to $\muathcal{P}_v(\tauau_2)$. Next, given $x\iotan\muathbb{R}^N$, we have $x\iotan B(x_1,R(T_p+\tauau))$ for $\tauau$ large enough and it follows from \varepsilonqref{bs31} that $v(\tauau,x)\gammae s_{R,T_p}(\tauau,x-x_1) > 0$ for $\tauau$ large enough. Consequently, $x$ belongs to $\muathcal{P}_v(\tauau)$ for $\tauau$ large enough which proves the second assertion of \varepsilonqref{b2}. \muedskip \nuoindent (ii) Consider $x_0\iotan\muathbb{R}^N$. According to \varepsilonqref{b2} there is $\tauau_0$ large enough such that $x_0\iotan\muathcal{P}_v(\tauau_0)$. Arguing as in the proof of \varepsilonqref{b2}, we may find $r_0$ small enough (depending on $x_0$) such that $s_{r_0,T_p}(\tauau,x-x_0) \lambdae v(\tauau,x)$ for $(\tauau,x)\iotan [\tauau_0,\iotanfty)\tauimes\muathbb{R}^N$. Consequently, $$ v(\tauau,x_0) \gammae \varphirac{p-2}{r_0 (p-1)}\ (T_p+\tauau)^{(p-1)/(p-2)}\ r_0^{(2p-2)/(p-2)} \gammae \varphirac{p-2}{p-1}\ r_0^{p/(p-2)}\ \tauau^{(p-1)/(p-2)}\,, $$ which gives the lower bound \varepsilonqref{b3}. \tauhetaed \muedskip \betaegin{corollary}\lambdaabel{cor:bfbsb} Assume that $u_0(0)>0$. Then there is $r_*>0$ such that \betaegin{equation}\lambdaabel{spirou} v(\tauau,x) \gammae \varphirac{(p-2)}{r_* (p-1)} (1+\tauau)^{(p-1)/(p-2)} \lambdaeft( r_*^2 - \varphirac{|x|^2}{(1+\tauau)^2} \rhoight)_+^{(p-1)/(p-2)}\,, \tauhetauad (\tauau,x)\iotan Q\,. \varepsilonnd{equation} \varepsilonnd{corollary} \nuoindent\tauextbf{Proof.} Arguing as in the proof of \varepsilonqref{b2} and using the positivity of $u_0(0)$, we may find $r_*>0$ small enough such that $s_{r_*,T_p}(\tauau,x)\lambdae v(\tauau,x)$ for $(\tauau,x)\iotan Q$. Since $T_p>1$, the previous inequality implies \varepsilonqref{spirou}. \tauhetaed \sigmaubsection{Eventual radial symmetry} We prove the following classical monotonicity lemma, see {\cal I}te[Proposition~2.1]{AC83} for instance. \betaegin{lemma}\lambdaabel{leb4} If $x\iotan\muathbb{R}^N$ and $r>0$ satisfy \ $ |x|>2R_0$ and $ r< |x| - 2 R_0$. Then, \betaegin{equation} v(\tauau,x) \lambdae \iotanf_{|y|=r} v(\tauau,y) \tauhetauad\mubox{ for }\tauhetauad \tauau\gammae 0\,. \varepsilonnd{equation} Here, $R_0$ is radius of the initial ball defined in \varepsilonqref{a3}. \varepsilonnd{lemma} \nuoindent\tauextbf{Proof.} The proof relies on Alexandrov's reflection principle. Let $(x,r)\iotan\muathbb{R}^N\tauimes (0,\iotanfty)$ fulfil the assumptions of Lemma~\rhoef{leb4} and consider $y\iotan\muathbb{R}^N$ such that $|y|=r$. Let $H$ be the hyperplane of points of $\muathbb{R}^N$ which are equidistant from $x$ and $y$, namely $$ H := \lambdaeft\{ z\iotan\muathbb{R}^N\ : \ \lambdaeft\lambdaangle z - \varphirac{x+y}{2} , x-y \rhoight\rhoangle = 0 \rhoight\}\,. $$ Introducing $$ H_- := \lambdaeft\{ z\iotan\muathbb{R}^N\ : \ \lambdaeft\lambdaangle z - \varphirac{x+y}{2} , x-y \rhoight\rhoangle \lambdae 0 \rhoight\} $$ and $$ \tauilde{v}(\tauau,z) := v\lambdaeft( \tauau , z - 2\ \lambdaeft\lambdaangle z - \varphirac{x+y}{2} , x-y \rhoight\rhoangle\ \varphirac{x-y}{|x-y|^2} \rhoight)\,, \tauhetaquad (\tauau,z)\iotan Q\,, $$ it readily follows from the rotational and translational invariance of \varepsilonqref{a10} that $\tauilde{v}$ also solves \varepsilonqref{a10}. In addition, $y\iotan H_-$ and $\muathcal{P}_v(0) \sigmaubseteq B(0,R_0) \sigmaubseteq H_-$ by \varepsilonqref{a3}. Now, on the one hand, if $z\iotan H_-$, then $$ z - 2\ \lambdaeft\lambdaangle z - \varphirac{x+y}{2} , x-y \rhoight\rhoangle\ \varphirac{x-y}{|x-y|^2} \nuot\iotan H_- $$ and $\tauilde{v}(0,z)=0\lambdae v(0,z)$. On the other hand, if $z\iotan H = \piartial H_-$ and $\tauau\gammae 0$, we clearly have $\tauilde{v}(\tauau,z)=v(\tauau,z)$. We are then in a position to apply the comparison principle to \varepsilonqref{a10} on $(0,\iotanfty)\tauimes H_-$ and conclude that \betaegin{equation}\lambdaabel{b5} \tauilde{v}(\tauau,z) \lambdae v(\tauau,z)\,, \tauhetaquad (\tauau,z)\iotan [0,\iotanfty)\tauimes H_-\,. \varepsilonnd{equation} Recalling that $y\iotan H_-$, we infer from \varepsilonqref{b5} that $v(\tauau,y)\gammae\tauilde{v}(\tauau,y)=v(\tauau,x)$ for $\tauau\gammae 0$ which is the expected result. \tauhetaed \muedskip \betaegin{remark} Although Lemma~\rhoef{leb4} will not be used in the main proofs, this is an interesting result for the qualitative theory, since it shows that the dynamics symmetrizes the solution. \varepsilonnd{remark} \sigmaection{Propagation of the positivity set}\lambdaabel{sect.posit} We next turn to the speed of expansion of the positivity set $\muathcal{P}_v$ of $v$ and put \betaegin{equation}\lambdaabel{bs32} \varrho(\tauau) := \sigmaup{\lambdaeft\{ |x|\ : \ x \iotan \muathcal{P}_v(\tauau) \rhoight\}}\,, \varepsilonnd{equation} so that $\muathcal{P}_v(\tauau)\sigmaubseteq B(0,\varrho(\tauau))$ for $\tauau\gammae 0$. The purpose of this section is to prove that the expansion speed $\varrho(\tauau)$ of $\muathcal{P}_v(\tauau)$ is asymptotically equal to $\tauau$, in other words, \betaegin{equation*} \lambdaim\lambdaimits_{\tauau\tauo\iotanfty}\varphirac{\varrho(\tauau)}{\tauau}=1, \varepsilonnd{equation*} and, more precisely, to prove Theorem \rhoef{main.posit}. The proof relies on the existence of ``nice'' traveling wave solutions of \varepsilonqref{a10}, which may be used as subsolutions and supersolutions for the Cauchy problem \varepsilonqref{a10}-\varepsilonqref{a11}. The construction of such traveling waves is inspired on the technique used in the so-called KPP problems, {\cal I}te{KPP}, which has developed a wide literature; see e.\,g., {\cal I}te{Ar80}, {\cal I}te{VaTube} for applications to porous media, and {\cal I}te{QRV} for blow-up problems. We thus begin with a phase-plane analysis, proving the existence of the desired traveling waves. \sigmaubsection{Traveling wave analysis for $N=1$}\lambdaabel{sect.TW} We look for traveling waves of the form \betaegin{equation*} v(\tauau,x)=f(z), \tauhetauad z=x-c\tauau, \ c>0, \varepsilonnd{equation*} solving \varepsilonqref{a10} in dimension $N=1$. Then, the profile $f$ solves the ordinary differential equation: \betaegin{equation}\lambdaabel{OdeTW} -cf'-\lambdaeft(|f'|^{p-2}f'\rhoight)'+|f'|^{p-1}-f=0. \varepsilonnd{equation} We are actually only interested in traveling waves which present an interface, that is, $f$ vanishes for $z$ sufficiently large. As we shall see below, the profile $f$ is non-monotone in general, but is nonnegative and decreasing near the interface. We transform \varepsilonqref{OdeTW} into a first order system, by introducing the notation $U=f$ and $V=-f'$. We arrive at the following system \betaegin{equation}\lambdaabel{syst1} \lambdaeft\{\betaegin{array}{ll}(p-1)|V|^{p-2}U'=-(p-1)|V|^{p-2}V, \\ (p-1)|V|^{p-2}V'=-cV-|V|^{p-1}+U, \varepsilonnd{array}\rhoight. \varepsilonnd{equation} where, for the orbits, the term $(p-1)|V|^{p-2}$ in the right-hand side has no influence (since we work with $dV/dU$) and can be ignored after a change of the time variable. We perform next the phase-plane analysis of the system \varepsilonqref{syst1}. \muedskip \nuoindent \tauextbf{Local analysis in the plane}. The system \varepsilonqref{syst1} has a unique critical point, $P=(0,0)$, and the Jacobian matrix $J(0,0)$ at this point is given by \betaegin{equation*} J(0,0)=\lambdaeft( \betaegin{array}{cc} 0 & 0 \\ 1 & -c \\ \varepsilonnd{array} \rhoight) \varepsilonnd{equation*} with eigenvalues $\lambdaambda_1=0$ and $\lambdaambda_2=-c$, and corresponding eigenvectors are $e_1=(c,1)$ and $e_2=(0,1)$. By a careful analysis, we notice that the center manifold in $P$ is tangent to $e_1$, and is asymptotically stable. It follows that $P$ is a stable node for every $c>0$. There is a unique orbit entering $P$ and tangent to $e_2$, forming the stable manifold; its local behaviour is $U(z)\sigmaim C(-z)^{(p-1)/(p-2)}$ as $z\tauo 0$, hence this orbit contains all the traveling waves with velocity $c$ and having an interface. By standard theory (see, e.g., {\cal I}te{Pe}), all the other orbits approach the center manifold, tangent to $e_1$, and present an exponential decay, but no interface: $U(z)\sigmaim e^{-cz}$ as $z\tauo\iotanfty$. \muedskip \nuoindent \tauextbf{Local analysis at infinity}. We investigate the behaviour of the system when $U$ is very large. For monotone traveling waves, we make the following inversion of the plane: \betaegin{equation*} Z=\varphirac{1}{U}, \tauhetauad W=\varphirac{|V|^{p-2}V}{U}, \varepsilonnd{equation*} and we are interested in the local behaviour near $Z=0$. After straightforward calculations, \varepsilonqref{syst1} becomes the new system: \betaegin{equation}\lambdaabel{inftysyst} \lambdaeft\{\betaegin{array}{ll} Z'=Z^{(2p-3)/(p-1)}W|W|^{-(p-2)/(p-1)},\\ W'=Z^{(p-2)/(p-1)}|W|^{p/(p-1)}-cZ^{(p-2)/(p-1)}W|W|^{-(p-2)/(p-1)}+1-|W|.\varepsilonnd{array}\rhoight. \varepsilonnd{equation} We find two critical points with $Z=0$, namely $Q_1=(0,1)$ and $Q_2=(0,-1)$. We will analyze only $Q_1$, i.e. the decreasing traveling waves. Let us also remark that, in the second equation of \varepsilonqref{inftysyst}, the terms with $Z$ are dominated by $1-|W|$ near $Q_1$ and $Q_2$, hence we can study the local behaviour by using the approximate equation only with $1-|W|$ in the right-hand side. The linearization near $Q_1$ has eigenvalues $\lambdaambda_1=0$ and $\lambdaambda_2=-1$, and the center manifold, which is tangent to the line $W=1$, is unstable. Hence, the point $Q_1$ behaves like a saddle, and the orbits which are interesting for our study are the orbits going out of $Q_1$. These orbits are tangent to $W=1$, and in the original system they satisfy $U\sigmaim V^{p-1}$, hence, by integration, \betaegin{equation*} U(z)\sigmaim |z|^{(p-1)/(p-2)}, \tauhetauad \mubox{as} \ z\tauo-\iotanfty, \varepsilonnd{equation*} and are decreasing. The local analysis around $Q_2$ is similar, but not interesting for our goals. Let us notice that not all solutions passing through a point in the first quadrant come from $Q_1$. Indeed, the orbits touching the curve $U=cV+V^{p-1}$ change monotonicity as functions $V=V(U)$, hence they have previously reached the axis $V=0$, meaning a change of monotonicity as $f=f(z)$, and they enter through this change in the first quadrant. Analyzing the curve $U=cV+V^{p-1}$, we observe that it connects in the phase-plane the points $P=(0,0)$ and $Q_1$, being tangent in $Q_1$ to the axis $W=1$. In particular, there exist non-monotone solutions, and this is the object we are interested in. \muedskip \nuoindent \tauextbf{Global behaviour}. This is now not difficult to establish, by merging the previous local analysis with the following important remarks: \nuoindent (a) The evolution of the system \varepsilonqref{syst1} with respect to the parameter $c$ is monotone. Indeed, we calculate: \betaegin{equation*} \varphirac{{\rhom d}}{{\rhom d} c}\lambdaeft(\varphirac{{\rhom d} V}{{\rhom d} U}\rhoight)=\varphirac{1}{(p-1)|V|^{p-2}}>0. \varepsilonnd{equation*} \nuoindent (b) There exists an explicit family of traveling wave solutions with speed $c=1$: \betaegin{equation}\lambdaabel{expl.TW} f_{1,K}(z)=\lambdaeft(\varphirac{p-2}{p-1}\rhoight)^{(p-1)/(p-2)}(K-z)_{+}^{(p-1)/(p-2)}\,. \tauhetauad K\gammae 0, \varepsilonnd{equation} This function is obviously decreasing and presents an interface at $z=K$. It is immediate to check that this orbit satisfies $U=V^{p-1}$, hence it comes from the point $Q_1$ along the center manifold of it, and it enters $P$, being the unique orbit entering $P$ and tangent to the eigenvector $e_2=(0,1)$ (unique for $c=1$), as discussed above. \nuoindent (c) Moreover, the vectors of the direction field of \varepsilonqref{syst1} over the curve $U=V^{p-1}$ (which gives the explicit orbit \varepsilonqref{expl.TW}) have the same direction. Indeed, the normal vector to this curve is $(1,-(p-1)V^{p-2})$ and we calculate: \betaegin{equation*} (1,-(p-1)V^{p-2}){\cal D}ot(-(p-1)V^{p-1},-cV-V^{p-1}+U)=(p-1)(c-1)V^{p-1}. \varepsilonnd{equation*} For $c=1$ we obtain the explicit trajectory, and for $c<1$, the above scalar product is negative, hence all these vectors have the same direction, contrary to $(1,-(p-1)V^{p-2})$. For $c>1$, all these vectors have the same direction as $V$. Since we are interested only in traveling waves with an interface, we analyze only the unique (for $c$ fixed) orbit entering $P=(0,0)$ tangent to $e_2=(0,1)$. For $c=1$, it is explicit and connects $P$ and $Q_1$ in the first quadrant. We draw the phase-plane for $c=1$ in Figure~\rhoef{figure1} below; it is clear that the explicit connection will not change sign and monotonicity. \muedskip \betaegin{figure}[ht!] \betaegin{center} \iotancludegraphics[width=8cm]{PhasePlaneILV2.eps} \varepsilonnd{center} {\cal A}ption{Phase portrait around the origin for $c=1$. Experiment for $p=3$, $N=2$.}\lambdaabel{figure1} \varepsilonnd{figure} \muedskip By remarks (a) and (c) above, it follows that for $c<1$, this unique orbit disconnects from $Q_1$, hence it should cross at some point the curve $U=cV+V^{p-1}$ (which still connects $P=(0,0)$ and $Q_1$); as explained before, this orbit previously had a change of sign (crossing the axis $U=0$) and then a change of monotonicity (crossing the axis $V=0$). In particular, we can say that the explicit orbit \varepsilonqref{expl.TW} is a separatrix between the monotone and the non-monotone orbits. We draw the local phase portrait for $c<1$, around the origin, in Figure~\rhoef{figure2} below. We gather the discussion above in the following result. \betaegin{lemma}\lambdaabel{TWdim1} (i) For any $c\iotan(0,1)$ and $K\gammae 0$, there exists a unique traveling wave solution $\omegaverline{f}_{c,K}(z)=\omegaverline{f}_{c,K}(x-c\tauau)$ of \varepsilonqref{a10} in dimension $N=1$, having an interface at $z=K$ (that is, $\omegaverline{f}_{c,K}(z)=0$ for $z\gammae K$) and moving with speed $c$. In addition, $\omegaverline{f}_{c,K}(z)=\omegaverline{f}_{c,0}(z-K)$ for $z\iotan\muathbb{R}$. \nuoindent (ii) For $c=1$ and for any $K\gammae 0$, there exists a unique nonnegative traveling wave $f_{1,K}(z)=f_{1,K}(x-\tauau)$ of \varepsilonqref{a10} in dimension $N=1$ with interface at $z=K$, having the explicit formula: \betaegin{equation}\lambdaabel{expl.TW2} f_{1,K}(x-\tauau)=\lambdaeft(\varphirac{p-2}{p-1}\rhoight)^{(p-1)/(p-2)}(K+\tauau-x)_{+}^{(p-1)/(p-2)}. \varepsilonnd{equation} Here again, $f_{1,K}(z)=f_{1,0}(z-K)$ for $z\iotan\muathbb{R}$. \nuoindent (iii) For any $c>1$ and $K\gammae 0$, there exists a unique traveling wave solution $f_{c,K}=f_{c,K}(x-c\tauau)$ of \varepsilonqref{a10} in dimension $N=1$ with interface at $z=K$ and moving with speed $c$. Moreover, $f_{c,K}$ is nonnegative and non-increasing, and $f_{c,K}(z)=f_{c,0}(z-K)$ for $z\iotan\muathbb{R}$. \varepsilonnd{lemma} \betaegin{figure} \betaegin{center} \iotancludegraphics[width=8cm]{PhasePlaneILV1.eps} \varepsilonnd{center} {\cal A}ption{Phase portrait around the origin for $c<1$. Experiment for $p=3$, $N=2$, $c=0.9$.}\lambdaabel{figure2} \varepsilonnd{figure} \nuoindent \tauextbf{Compactly supported subsolutions for $0<c<1$.} We are looking for nonnegative and compactly supported subsolutions traveling with any speed $0<c<1$. These subsolutions are constructed in the following way: from the analysis above, we know that, given $c\iotan (0,1)$ and $K\gammae 0$, there are two points $z_{c,K}\iotan (-\iotanfty,K)$ and $\tauilde{z}_{c,K}\iotan (z_{c,K},K)$ such that $$ z_{c,K} := \iotanf{\lambdaeft\{ z\iotan (-\iotanfty,K)\ :\ \omegaverline{f}_{c,K}>0 \;\mubox{ in }\; (z,K) \rhoight\}}>-\iotanfty\,, $$ and $$ \omegaverline{f}_{c,K}'>0 \;\mubox{ in }\; (z_{c,K},\tauilde{z}_{c,K}) \;\;\;\mubox{ and }\;\;\; \omegaverline{f}_{c,K}'<0 \;\mubox{ in }\; (\tauilde{z}_{c,K},K)\,. $$ We then define \betaegin{equation}\lambdaabel{subsol} f_{c,K}(z)=\lambdaeft\{\betaegin{array}{ll}\omegaverline{f}_{c,K}(z), & \etabox{for} \ z_{c,K}\lambdae z\lambdae K, \\ 0, & \etabox{elsewhere}.\varepsilonnd{array}\rhoight. \varepsilonnd{equation} In other words, we consider the positive hump of the graph of $f_{c,K}$ located between its last change of sign and the interface. It is immediate to check that $f_{c,K}$ is a compactly supported subsolution to \varepsilonqref{a10} in dimension $N=1$, and that it has an increasing part until reaching its maximum at $\tauilde{z}_{c,K}$, and then decreases to the interface point $K$. The notation $f_{c,K}$ will designate in the sequel these subsolutions if $0<c<1$ and the solutions to \varepsilonqref{a10} in dimension $N=1$ given by Lemma~\rhoef{TWdim1} if $c\gammae1$. \sigmaubsection{Construction of subsolutions in dimension $N\gammae 1$}\lambdaabel{TWN} We turn to equation \varepsilonqref{a10} posed in dimension $N\gammae 1$ for which we aim at constructing some special subsolutions having an interface that moves out in all directions with a given velocity $c<1$. The construction is based on the traveling waves $f_{c,K}$ identified in the previous subsection. The first attempt is to try the form $V(\tauau,x)=f_{c,K}(|x|-c\tauau)$, $c\iotan (0,1)$, which satisfies: \betaegin{eqnarray*} & & \piartial_{\tauau} V-\Deltaelta_{p}V+|\nuabla V|^{p-1}-V \\ & = &-cf_{c,K}'-\lambdaeft(|f_{c,K}'|^{p-2}f_{c,K}'\rhoight)'+|f_{c,K}'|^{p-1}-f_{c,K}-\varphirac{N-1}{|x|}|f_{c,K}'|^{p-2}f_{c,K}'\\ & \lambdaeq & -\varphirac{N-1}{|x|}|f_{c,K}'|^{p-2}f_{c,K}'\,. \varepsilonnd{eqnarray*} Thus, $V$ is a subsolution of \varepsilonqref{a10} in the region of $Q$ where $f_{c,K}'\gammaeq 0$. We therefore have to modify the profile in the decreasing part of $f_{c,K}$ and we proceed as follows. \muedskip \nuoindent \tauextbf{Traveling wave solutions to a modified equation in dimension $N=1$}. For $\alphalpha\iotan (0,1/2)$, we consider the following perturbation of \varepsilonqref{a10}: \betaegin{equation}\lambdaabel{a10m} \piartial_{\tauau} \muathbb{Z}a -\piartial_x \lambdaeft( |\piartial_x \muathbb{Z}a|^{p-2} \piartial_x \muathbb{Z}a \rhoight) + |\piartial_x \muathbb{Z}a|^{p-1} -\alpha\,|\piartial_x \muathbb{Z}a|^{p-2}\piartial_x \muathbb{Z}a-\muathbb{Z}a=0\,, \tauhetauad (t,x)\iotan (0,\iotanfty)\tauimes\muathbb{R}\,, \varepsilonnd{equation} and look for traveling wave solutions $\muathbb{Z}a(\tauau,x)=g(x-c\tauau)$. Then, $g$ solves \betaegin{equation}\lambdaabel{eq.modif} -cg'-\lambdaeft(|g'|^{p-2}g'\rhoight)'+|g'|^{p-1}-\alpha\,|g'|^{p-2}g'-g=0. \varepsilonnd{equation} The phase-plane analysis for \varepsilonqref{eq.modif} is similar to that of \varepsilonqref{OdeTW}, with the difference that an extra term $-\alpha\,|V|^{p-2}V$ appears in the right-hand side of the second equation in \varepsilonqref{syst1}. This is only reflected in the analysis at infinity, where the point $Q_1$ changes into $(0,1/(1+\alpha))$ and the explicit separatrix is obtained for $c=1/(1+\alpha)<1$. In particular, we have the following analogue of Lemma~\rhoef{TWdim1}~(i). \betaegin{lemma}\lambdaabel{TWMdim1} For any $\alphalpha>0$ sufficiently small, $c\iotan(0,1/(1+\alpha))$ and $K\gammae 0$, there exists a unique traveling wave solution $g_{c,K,\alpha}(z)=g_{c,K,\alpha}(x-c\tauau)$ of \varepsilonqref{a10m} having an interface at $z=K$ and moving with speed $c$. In addition, $g_{c,K,\alpha}(z)=g_{c,0,\alpha}(z-K)$ for $z\iotan\muathbb{R}$ and there are two points $z_{c,K,\alpha}\iotan (-\iotanfty,K)$ and $\tauilde{z}_{c,K,\alpha}\iotan (z_{c,K,\alpha},K)$ such that $$ z_{c,K,\alpha} := \iotanf{\lambdaeft\{ z\iotan (-\iotanfty,K)\ :\ g_{c,K,\alpha}>0 \;\mubox{ in }\; (z,K) \rhoight\}}>-\iotanfty\,, $$ and $$ g_{c,K,\alpha}'>0 \;\mubox{ in }\; (z_{c,K,\alpha},\tauilde{z}_{c,K,\alpha}) \;\;\;\mubox{ and }\;\;\; g_{c,K,\alpha}'<0 \;\mubox{ in }\; (\tauilde{z}_{c,K,\alpha},K)\,. $$ \varepsilonnd{lemma} Setting $$ M_{c,\alpha} := \sigmaup_{z\iotan [z_{c,0,\alpha},0]}{\{ g_{c,0,\alpha}(z)\}}\,, $$ we notice that \betaegin{equation}\lambdaabel{gaston} z_{c,K,\alpha}=z_{c,0,\alpha}+K\,, \tauhetauad \tauilde{z}_{c,K,\alpha}=\tauilde{z}_{c,0,\alpha}+K\,, \tauhetauad \sigmaup_{z\iotan [z_{c,K,\alpha},K]}{\{ g_{c,K,\alpha}(z)\}} = M_{c,\alphalpha}\,. \varepsilonnd{equation} If we put now $V(\tauau,x)=g_{c,K,\alpha}(|x|-c\tauau)$, we calculate and find that \betaegin{equation*} \piartial_{\tauau} V-\Deltaelta_{p}V+|\nuabla V|^{p-1}-V=\lambdaeft(\alpha-\varphirac{N-1}{|x|}\rhoight)\ \lambdaeft( |g_{c,K,\alpha}'|^{p-2}g_{c,K,\alpha}' \rhoight)(|x|-c\tauau)\,, \varepsilonnd{equation*} and it is a subsolution where $g_{c,K,\alpha}'\lambdaeq 0$ and $\alpha\gammaeq(N-1)/|x|$. Matching these two conditions turns out to be possible as we show now. Fix $c\iotan (1/2,1)$ and $\alpha_c:=(1-c)/(1+c)$ and define \betaegin{equation}\lambdaabel{jeanne} \tauau_0(c) := \muax{\lambdaeft\{ \varphirac{2(N-1)}{\alphalpha_c} - 2 \tauilde{z}_{c,0,\alpha_c} , - \varphirac{\tauilde{z}_{c,0,\alpha_c}}{c} \rhoight\}} > \varphirac{2(N-1)}{\alphalpha_c}\,, \varepsilonnd{equation} the point $\tauilde{z}_{c,0,\alpha_c}\iotan (-\iotanfty,0)$ being defined in Lemma~\rhoef{TWMdim1}. Then $c<1/(1+\alpha_c)$ and, for $K\gammae 0$, $\tauau\gammae \tauau_0(c)$, and $|x|\gammae \tauilde{z}_{c,K,\alpha_c} + c\tauau=\tauilde{z}_{c,0,\alpha_c}+K+c\tauau$, we have $$ \varphirac{N-1}{|x|} \lambdae \varphirac{N-1}{\tauilde{z}_{c,0,\alpha_c}+c\tauau_0(c)} \lambdae \varphirac{2(N-1)}{2\tauilde{z}_{c,0,\alpha_c}+\tauau_0(c)} \lambdae \alphalpha_c\,, $$ and \betaegin{eqnarray*} g_{c,K,\alpha_c}'(|x|-c\tauau) < 0 & \mubox{ if } & \tauilde{z}_{c,K,\alpha_c} + c\tauau \lambdae |x| < K+c\tauau\,, \\ g_{c,K,\alpha_c}'(|x|-c\tauau) = 0 & \mubox{ if } & K + c\tauau \lambdae |x|\,. \varepsilonnd{eqnarray*} Consequently, for $c\iotan (1/2,1)$, $\alphalpha_c=(1-c)/(1+c)$, and $K>0$, the function $V$ defined by $V(\tauau,x)=g_{c,K,\alpha_c}(|x|-c\tauau)$ is a subsolution to \varepsilonqref{a10} for $\tauau\gammae \tauau_0(c)$ and $|x|\gammae \tauilde{z}_{c,K,\alpha_c} + c\tauau$. Observing that any positive constant is a subsolution to \varepsilonqref{a10}, we construct a compactly supported subsolution $v_{c,K}$ to \varepsilonqref{a10} by setting \betaegin{equation}\lambdaabel{subs.dimN} v_{c,K}(\tauau,x):=\lambdaeft\{\betaegin{array}{ll} M_{c,\alphalpha_c} & \tauhetauad \etabox{if} \ 0\lambdaeq|x|<\tauilde{z}_{c,K,\alpha_c} + c\tauau\,,\\ & \\ g_{c,K,\alphalpha_c}(|x|-c\tauau) & \tauhetauad \etabox{if} \ |x|>\tauilde{z}_{c,K,\alpha_c} + c\tauau\,, \varepsilonnd{array}\rhoight. \varepsilonnd{equation} for $\tauau\gammae \tauau_0(c)$. It is easy to check that the function $v_{c,K}$ is a subsolution to \varepsilonqref{a10} in $[\tauau_0(c),\iotanfty)\tauimes\muathbb{R}^N$. It will be used for comparison from below, as indicated in the next subsection. \sigmaubsection{Proof of Theorem~\rhoef{main.posit}}\lambdaabel{subsect.mainpos} We conclude the proof of Theorem~\rhoef{main.posit} by a comparison argument, using the subsolutions and supersolutions constructed in the previous subsections. Before that, we identify a class of solutions of \varepsilonqref{a10} that is representative for the general solutions. We say that a function $V=V(\tauau,x)$ is \varepsilonmph{radially non-increasing} if $V(\tauau,{\cal D}ot)$ is radially symmetric for all $\tauau$, and it is non-increasing in the radial variable $r:=|x|$. For example, the subsolutions $v_{c,K}$ are radially non-increasing. The next results show that the class of radially non-increasing solutions of \varepsilonqref{a10} is sufficient for our aims. \betaegin{lemma}\lambdaabel{rad.noninc} Let $u_0=u_0(r)$ be a radially non-increasing function satisfying \varepsilonqref{a3}. Then, the solution $v$ of \varepsilonqref{a10} with initial condition $u_0$ is also radially non-increasing. \varepsilonnd{lemma} \nuoindent\tauextbf{Proof.} The radial symmetry of the solution $v$ follows from the invariance of the equation \varepsilonqref{a10} with respect to rotations. We write now the equation satisfied by $\xii=\piartial_r v$, obtained by differentiating \varepsilonqref{a10} with respect to $r$: $$ \piartial_t\xii-\piartial_r^2(|\xii|^{p-2}\xii)-\varphirac{N-1}{r}\piartial_r(|\xii|^{p-2}\xii)+\varphirac{N-1}{r^2}|\xii|^{p-2}\xii+(p-1)|\xii|^{p-3}\xii \piartial_r\xii-\xii=0, $$ which is a parabolic equation (of porous medium type) and satisfies a maximum principle. Since $0$ is a solution to the above equation, the derivative $\xii=\piartial_r v$ remains nonpositive if it is initially nonpositive and it follows that $v$ is radially non-increasing. \tauhetaed \muedskip We are now in position to end the proof of Theorem~\rhoef{main.posit} for radially non-increasing initial data. More precisely, we have the following upper and lower bounds for the edge $\varrho\,(\tauau)$ defined in \varepsilonqref{bs32} of the support of $v(\tauau)$. \betaegin{lemma}\lambdaabel{leb3} Let $u_0=u_0(r)$ be a radially non-increasing function satisfying \varepsilonqref{a3} and denote the solution of \varepsilonqref{a10} with initial condition $u_0$ by $v$. For any $c\iotan (1/2,1)$, there exists $\tauau_1(c)>0$ such that, for any $\tauau\gammaeq\tauau_1(c)$, we have: \betaegin{equation}\lambdaabel{b4} 1+c(\tauau-\tauau_1(c)) \lambdaeq\varrho\,(\tauau) \lambdae R_0 + \varphirac{p-1}{p-2}\ \Vert u_0\Vert_\iotanfty^{(p-2)/(p-1)} + \tauau\,. \varepsilonnd{equation} In particular, we obtain that $\varrho(\tauau)/\tauau\tauo1$ as $\tauau\tauo\iotanfty$. \varepsilonnd{lemma} \nuoindent\tauextbf{Proof.} The upper bound follows by comparison with the explicit traveling wave solutions \varepsilonqref{expl.TW2}. More precisely, we define \betaegin{equation}\lambdaabel{bs41} R_1:= R_0 + \varphirac{p-1}{p-2}\ \Vert u_0\Vert_\iotanfty^{(p-2)/(p-1)} \varepsilonnd{equation} and consider the function $\omegaverline{v}(\tauau,x)=f_{1,R_1}(x_1-\tauau)$, which is a solution of \varepsilonqref{a10} by Lemma~\rhoef{TWdim1}. If $x=(x_i)_{1\lambdae i \lambdae N}\iotan\muathbb{R}^N$ is such that $x_1\gammae R_0$, then $|x|\gammae R_0$ and $u_0(x)=0\lambdae\omegaverline{v}(0,x)$ while, if $x_1\lambdae R_0$, \betaegin{eqnarray*} u_0(x) & \lambdae & \Vert u_0\Vert_\iotanfty \lambdae \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)}\ \lambdaeft( R_1 - R_0 \rhoight)^{(p-1)/(p-2)} \\ & \lambdae & \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)}\ \lambdaeft( R_1 - x_1 \rhoight)^{(p-1)/(p-2)} = \omegaverline{v}(0,x)\,. \varepsilonnd{eqnarray*} The comparison principle then entails that $v(\tauau,x)\lambdae \omegaverline{v}(\tauau,x)$ for $(\tauau,x)\iotan [0,\iotanfty)\tauimes\muathbb{R}^N$, from which we conclude that $\muathcal{P}_v(\tauau) \sigmaubseteq \lambdaeft\{ x\iotan\muathbb{R}^N\ : \ x_1\lambdae R_1+\tauau \rhoight\}$. Owing to the rotational invariance of \varepsilonqref{a10}, we actually have $\muathcal{P}_v(\tauau) \sigmaubseteq \lambdaeft\{ x\iotan\muathbb{R}^N\ : \ \lambdaangle x , \omegamega \rhoangle \lambdae R_1+\tauau \rhoight\}$ for every $\omegamega\iotan\muathbb{S}^{N-1}$ and $\tauau\gammae 0$, and thus \betaegin{equation}\lambdaabel{fantasio} \muathcal{P}_v(\tauau)\sigmaubseteq B(0,R_1+\tauau)\,. \varepsilonnd{equation} The lower bound follows from comparison with the subsolutions constructed in \varepsilonqref{subs.dimN}. Fix $c\iotan (1/2,1)$ and put $r_1:=1+c\tauau_0(c)$, $\tauau_0(c)$ being defined by \varepsilonqref{jeanne}. Since $v(\tauau)$ is radially non-increasing for all $\tauau\gammae 0$ by Lemma~\rhoef{rad.noninc}, we infer from Proposition~\rhoef{prb1} that, for $x\iotan B(0,r_1)$ and $\tauau\gammae T_{r_1}$, $$ v(\tauau,x) \gammae v\lambdaeft( \tauau, \varphirac{r_1 x}{|x|} \rhoight) \gammae \varepsilon_{r_1}\ \tauau^{(p-1)/(p-2)}\,. $$ Define $\tauau_1(c)$ by $$ \tauau_1(c) := \muax{\lambdaeft\{ \tauau_0(c), T_{r_1} , \lambdaeft( \varphirac{M_{c,(1-c)/(1+c)}}{\varepsilon_{r_1}} \rhoight)^{(p-2)/(p-1)}\rhoight\}}\,, $$ so that the previous inequality and the properties of $v_{c,1}$ defined in \varepsilonqref{subs.dimN} guarantee that $$ v(\tauau_1(c),x) \gammae M_{c,(1-c)/(1+c)} \gammae v_{c,1}(\tauau_0(c),x)\,, \tauhetauad x\iotan B(0,r_1)\,. $$ Since $v_{c,1}(\tauau_0(c),x)=0$ for $x\nuot\iotan B(0,r_1)$, we also have $v(\tauau_1(c),x) \gammae v_{c,1}(\tauau_0(c),x)$ for $x\nuot\iotan B(0,r_1)$. Recalling that $v_{c,1}$ is a subsolution to \varepsilonqref{a10} in $(\tauau_0(c),\iotanfty)\tauimes\muathbb{R}^N$, we infer from the comparison principle that \betaegin{equation}\lambdaabel{compTW} v(\tauau+\tauau_1(c),x)\gammaeq v_{c,1}(\tauau+\tauau_0(c),x), \tauhetauad (\tauau,x)\iotan Q\,. \varepsilonnd{equation} Consequently, $v(\tauau+\tauau_1(c),x)>0$ if $x\iotan B(0,r_1+c\tauau)$, whence \betaegin{equation}\lambdaabel{lebrac} B(0,1+c(\tauau+\tauau_0(c)-\tauau_1(c))) \sigmaubset \muathcal{P}_v(t)\,, \tauhetauad \tauau\gammae \tauau_1(c)\,. \varepsilonnd{equation} This readily implies that $$ \varrho(\tauau) \gammae 1+c(\tauau+\tauau_0(c)-\tauau_1(c)) \gammae 1+c(\tauau-\tauau_1(c))\,, \tauhetauad \tauau\gammae \tauau_1(c)\,. $$ In particular, we deduce from \varepsilonqref{fantasio} and \varepsilonqref{lebrac} that \betaegin{equation*} \lambdaiminf\lambdaimits_{\tauau\tauo\iotanfty}\varphirac{\varrho\,(\tauau)}{\tauau}\gammae c \;\;\;\mubox{ for any }\;\;\; c\iotan (1/2,1) \;\;\;\mubox{ and }\;\;\; \lambdaimsup\lambdaimits_{\tauau\tauo\iotanfty}\varphirac{\varrho\,(\tauau)}{\tauau}\lambdae 1\,, \varepsilonnd{equation*} which implies that $\varrho(\tauau)/\tauau\tauo1$ as $\tauau\tauo\iotanfty$. \tauhetaed \muedskip Rephrasing the rescaling and coming back to the notation with $t=(e^{(p-2)\tauau}-1)/(p-2)$ and $\gammaamma(t)=\varrho(\tauau)$, we find the result of Theorem~\rhoef{main.posit} for radially non-increasing inital data. The extension to arbitrary initial data satisfying \varepsilonqref{a3} is performed in Section~\rhoef{sec:aid}. Moreover, we notice that the speed is the same in any direction $\omegamega\iotan\muathbb{S}^{N-1}$, as stated. \sigmaection{Proof of Theorem~\rhoef{asympt.main}}\lambdaabel{Sect.scaling2} \sigmaubsection{Scaling variables~II}\lambdaabel{subsec4.1} According to Proposition~\rhoef{prb1}, as $\tauau\tauo\iotanfty$ the solution $v$ to \varepsilonqref{a10}, \varepsilonqref{a11} expands in space and grows unboundedly in time. In order to take into account such phenomena, we introduce next a further scaling of the space variable \betaegin{equation}\lambdaabel{def.y} y:= \varphirac{x}{1+\tauau} \,, \varepsilonnd{equation} together with the new unknown function $w=w(\tauau,y)$ defined by \betaegin{equation}\lambdaabel{def.w} v(\tauau,x) =(1+\tauau)^{(p-1)/(p-2)}\ w\lambdaeft(\tauau,\varphirac{x}{1+\tauau} \rhoight)\,, \tauhetauad (\tauau,x)\iotan [0,\iotanfty)\tauimes\muathbb{R}^N\,. \varepsilonnd{equation} It follows from \varepsilonqref{a10} and \varepsilonqref{a11} that $w$ solves \betaegin{equation}\lambdaabel{c1} \piartial_{\tauau}w -\varphirac{1}{1+\tauau}\lambdaeft(\Deltaelta_p w + y {\cal D}ot \nuabla w -\varphirac{p-1}{p-2}\,w\rhoight)+|\nuabla w|^{p-1} -w = 0\ \,, \tauhetauad (\tauau,y)\iotan Q\,, \varepsilonnd{equation} with the same initial condition \betaegin{equation}\lambdaabel{c2} w(0) = u_0\,, \tauhetauad y\iotan\muathbb{R}^N\,. \varepsilonnd{equation} Throughout this section we assume that $u_0$ is radially non-increasing besides \varepsilonqref{a3}. In particular, $u_0(0)>0$. We gather several properties of $w$ in the next lemma. \betaegin{lemma}\lambdaabel{lec1} There is a positive constant $C_4$ depending only on $p$, $N$, and $u_0$ such that \betaegin{equation} \lambdaabel{c3} \Vert w(\tauau)\Vert_1 + \Vert w(\tauau)\Vert_\iotanfty + \Vert \nuabla w(\tauau)\Vert_\iotanfty \lambdae C_4 \,, \tauhetauad \tauau\gammae 0\,, \varepsilonnd{equation} \betaegin{equation} \lambdaabel{c3b} w(\tauau,y) \gammae \varphirac{1}{C_4} \lambdaeft( r_*^2 - |y|^2 \rhoight)_+^{(p-1)/(p-2)}\,, \tauhetauad (\tauau,y)\iotan Q\,, \varepsilonnd{equation} the radius $r_*$ being defined in Corollary~\rhoef{cor:bfbsb}. Moreover, \betaegin{equation}\lambdaabel{c4} \muathcal{P}_w(\tauau):= \lambdaeft\{ y\iotan\muathbb{R}^N : \;\; w(\tauau,y)>0 \rhoight\} \sigmaubseteq B\lambdaeft(0, 1 + \varphirac{R_1}{1+\tauau}\rhoight) \varepsilonnd{equation} for $\tauau\gammae 0$ where $R_1$ is defined by \varepsilonqref{bs41}. In addition, for any $c\iotan (1/2,1)$, we have \betaegin{equation}\lambdaabel{c4b} B\lambdaeft( 0, c - \varphirac{\tauau_1(c)}{1+\tauau} \rhoight)\sigmaubset \muathcal{P}_{w}(\tauau) \tauhetauad {\rhom for } \ \tauau\gammaeq \tauau_1(c)\,, \varepsilonnd{equation} the time $\tauau_1(c)>0$ being defined in Lemma~\rhoef{leb3}. \varepsilonnd{lemma} \nuoindent\tauextbf{Proof.} The estimates \varepsilonqref{c3} and \varepsilonqref{c3b} readily follow from \varepsilonqref{a12} and \varepsilonqref{spirou}, while \varepsilonqref{c4} is a consequence of \varepsilonqref{fantasio}. The assertion about the ball $B(0,c-\tauau_1(c)/(1+\tauau))$ follows from \varepsilonqref{lebrac}.\tauhetaed \muedskip At this point, \varepsilonqref{c1} indicates that $w(\tauau)$ behaves as $\tauau\tauo\iotanfty$ as the solution $\tauilde{w}$ to the Hamilton-Jacobi equation $\piartial_\tauau \tauilde{w} + |\nuabla\tauilde{w}|^{p-1} - \tauilde{w}=0$ in $Q$ which is known to converge to a stationary solution uniquely determined by the limit of the support of $\tauilde{w}(\tauau)$ as $\tauau\tauo\iotanfty$, see, e.g., {\cal I}te[Theorem~A.2]{La08}. As an intermediate step, we thus have to identify the limit of the support of $w(\tauau)$ as $\tauau\tauo\iotanfty$. Thanks to \varepsilonqref{c4}, we already know that it is included in $B(0,1)$ but the information in \varepsilonqref{c4b} are yet too weak to exclude the vanishing of $w(\tauau)$ outside a ball of radius smaller than one. To complete the proof of Theorem~\rhoef{asympt.main} for radially non-increasing initial data, we show first that the asymptotic limit is supported exactly in the ball $B(0,1)$. Then we use a viscosity technique, the same that has been used in the previous paper {\cal I}te{LV07} to establish the convergence to the expected stationary solution. \sigmaubsection{Proof of Theorem~\rhoef{asympt.main}: $N=1$}\lambdaabel{subsec4.2} We first consider the one-dimensional case $N=1$ and divide the proof into several technical steps. \muedskip \nuoindent \tauextbf{Step 1. A special family of subsolutions}. Given $c\iotan (1/2,1)$, we have $$ v(\tauau,x)\gammae v_{c,1}(\tauau+\tauau_0(c)-\tauau_1(c),x)\,, \tauhetauad (\tauau,x)\iotan [\tauau_1(c),\iotanfty)\tauimes\muathbb{R}\,, $$ by \varepsilonqref{compTW}, the times $\tauau_0(c)$ and $\tauau_1(c)$ being defined in \varepsilonqref{jeanne} and Lemma~\rhoef{leb3}, respectively. Then, \betaegin{equation}\lambdaabel{compTW2} w(\tauau,y) \gammae w_c(\tauau,y) := \varphirac{1}{(1+\tauau)^{(p-1)/(p-2)}}\ v_{c,1}(\tauau+\tauau_0(c)-\tauau_1(c),y(1+\tauau)) \varepsilonnd{equation} for $(\tauau,y)\iotan [\tauau_1(c),\iotanfty)\tauimes\muathbb{R}$. \muedskip \nuoindent \tauextbf{Step 2. An explicit family of supersolutions}. Let us introduce the following family of functions: \betaegin{equation}\lambdaabel{FR1} F_{R}(\tauau,y)=\lambdaeft(\varphirac{p-2}{p-1}\rhoight)^{(p-1)/p-2)} \lambdaeft(\varphirac{\tauau+R}{\tauau+1}-|y|\rhoight)_{+}^{(p-1)/(p-2)}\,, \tauhetauad (\tauau,y)\iotan Q\,. \varepsilonnd{equation} We easily obtain by direct calculation that $F_R$ is a classical solution of \varepsilonqref{c1} for $y\nueq 0$, and for all parameter values $R\gammae 0$. However, near $y=0$, it is only a supersolution both in the weak and the viscosity sense. The latter is straightforward to verify using the definition of viscosity subsolutions and supersolutions with jets, as in the classical survey {\cal I}te{CIL92}. Let us mention at this point that these functions can be used in a comparison argument to give an alternative proof of \varepsilonqref{c4}. \betaegin{remark}\lambdaabel{rem:nat} This family of functions arises naturally if we think about asymptotics. Indeed, as already mentioned, we formally expect that the asymptotic profiles of \varepsilonqref{c1} should be given by solutions of the stationary Hamilton-Jacobi equation \betaegin{equation}\lambdaabel{limit} |\nuabla \tauilde{w}|^{p-1}-\tauilde{w}=0, \varepsilonnd{equation} supported in some ball $B(0,R)$, that is \betaegin{equation*} H_{R}(y):=\lambdaeft(\varphirac{p-2}{p-1}\rhoight)^{(p-1)/(p-2)}\lambdaeft(R-|y|\rhoight)_{+}^{(p-1)/(p-2)}\,, \tauhetauad y\iotan\muathbb{R}\,. \varepsilonnd{equation*} Making then the ``ansatz" that, for large times, the solution of \varepsilonqref{c1} should behave in a similar way as its limit, we write \betaegin{equation*} w(\tauau,y)\sigmaim \lambdaeft(\varphirac{p-2}{p-1}\rhoight)^{(p-1)/(p-2)}\lambdaeft(C(\tauau)-|y|\rhoight)_{+}^{(p-1)/(p-2)}\,. \varepsilonnd{equation*} Integrating the resulting ordinary differential equation for $C(\tauau)$, we arrive at the family of explicit exact profiles $F_R$ given by \varepsilonqref{FR1}. \varepsilonnd{remark} \muedskip \nuoindent \tauextbf{Step 3. Constructing suitable subsolutions.} We now face the problem of finding suitable subsolutions with similar behaviour. Since the $F_R$'s are classical solutions to \varepsilonqref{c1} except at $y=0$, we expect to be able to construct also a family of subsolutions based on them. To this end, we consider the ``damped'' family $F_{R,\vartheta,\betaeta}$ defined by \betaegin{equation}\lambdaabel{marsupilami} F_{R,\vartheta,\betaeta}(\tauau,y) := \vartheta\ \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)} \lambdaeft( \varphirac{\betaeta(\tauau+R)}{\tauau+1} - |y| \rhoight)_+^{(p-1)/(p-2)}\,, \tauhetauad (\tauau,y)\iotan Q\,, \varepsilonnd{equation} for parameters $R\iotan (0,1)$, $\vartheta\iotan (0,1]$, and $\betaeta\iotan (1/2,1]$. Observe that, since $(p-1)/(p-2)>1$, $F_{R,\vartheta,\betaeta}$ and $|\nuabla F_{R,\vartheta,\betaeta}|^{p-2} \nuabla F_{R,\vartheta,\betaeta}$ both belong to $\muathcal{C}^1([0,\iotanfty)\tauimes(\muathbb{R}\sigmaetminus\{0\}))$. For $\vartheta\iotan (0,1)$, $\betaeta\iotan (1/2,1]$, $\tauau>0$ and $y\nueq0$, we calculate \betaegin{equation*} \betaegin{split} \piartial_\tauau F_{R,\vartheta,\betaeta}&-\varphirac{1}{1+\tauau}\lambdaeft(\Deltaelta_{p}F_{R,\vartheta,\betaeta}+y{\cal D}ot\nuabla F_{R,\vartheta,\betaeta}-\varphirac{p-1}{p-2}F_{R,\vartheta,\betaeta}\rhoight)+|\nuabla F_{R,\vartheta,\betaeta}|^{p-1}-F_{R,\vartheta,\betaeta}\\ &=\vartheta \betaeta \varphirac{1-R}{(1+\tauau)^2} F_{R,1,\betaeta}^{1/(p-1)} - \varphirac{\vartheta}{1+\tauau} \lambdaeft( \vartheta^{p-2} - \varphirac{\betaeta (\tauau+R)}{\tauau+1} \rhoight)\ F_{R,1,\betaeta}^{1/(p-1)} - \vartheta (1-\vartheta^{p-2}) F_{R,1,\betaeta}\\ &=\vartheta \lambdaeft( \varphirac{\betaeta-\vartheta^{p-2}}{1+\tauau} - (1-\vartheta^{p-2}) F_{R,1,\betaeta}^{(p-2)/(p-1)} \rhoight)\ F_{R,1,\betaeta}^{1/(p-1)}\\ &\lambdae \vartheta(1-\vartheta^{p-2}) F_{R,1,\betaeta}^{1/(p-1)} \lambdaeft[\varphirac{1}{1+\tauau}- \varphirac{p-2}{p-1}\lambdaeft(\varphirac{\betaeta(\tauau+R)}{\tauau+1}-|y|\rhoight)\rhoight]. \varepsilonnd{split} \varepsilonnd{equation*} Analyzing the sign of the last expression and taking into account that $\vartheta\iotan(0,1)$, we obtain that $F_{R,\vartheta,\betaeta}$ has the following properties: \betaegin{equation}\lambdaabel{pim} \betaegin{minipage}{10cm} $F_{R,\vartheta,\betaeta}$ is a classical subsolution to \varepsilonqref{c1} in \\ $\{ (\tauau,y)\iotan Q \ : \ \tauau\gammae\tauau_2(R,\betaeta)\,, \ 0<|y|\lambdae K_{R,\betaeta}(\tauau) \}$ \varepsilonnd{minipage} \varepsilonnd{equation} with \betaegin{equation}\lambdaabel{pam} \tauau_2(R,\betaeta) := \varphirac{p-1}{\betaeta (p-2)} - R \;\;\mubox{ and }\;\; K_{R,\betaeta}(\tauau) := \varphirac{\betaeta(\tauau+R)}{\tauau+1} - \varphirac{p-1}{p-2}\ \varphirac{1}{\tauau+1}\,, \varepsilonnd{equation} and \betaegin{equation}\lambdaabel{poum} F_{R,\vartheta,\betaeta} \;\mubox{ vanishes for }\; |y|\gammaeq\varphirac{\betaeta(\tauau+R)}{\tauau+1} \;\mubox{ and }\; \tauau\gammae 0\,. \varepsilonnd{equation} Let us notice here that both the edge of the support of $F_{R,\vartheta,\betaeta}$ and the constant $K_{R,\betaeta}(\tauau)$, where the behaviour changes, do not depend on $\vartheta$. While the two properties \varepsilonqref{pim} and \varepsilonqref{poum} are suitable for our purpose, the function $F_{R,\vartheta,\betaeta}$ does not behave in a suitable way near $y=0$ (where it is a viscosity supersolution) and in an asymptotically small region near the edge of its support (where it is a classical supersolution). However, we already have a positive bound from below for $w$ in a small neighbourhood of $y=0$ by \varepsilonqref{c3b} which allows us to remedy to the first bad property of $F_{R,\vartheta,\betaeta}$. More precisely, we infer from \varepsilonqref{c3b} that $$ w(\tauau,y) \gammae C_5 := \varphirac{1}{C_4} \lambdaeft( \varphirac{3r_*^2}{4} \rhoight)^{(p-1)/(p-2)}>0\,, \tauhetauad (\tauau,y)\iotan [0,\iotanfty)\tauimes B(0,r_*/2)\,, $$ whence \betaegin{equation}\lambdaabel{ld15} w(\tauau,y) \gammae \vartheta \gammae F_{R,\vartheta,\betaeta}(\tauau,y) \,, \tauhetauad (\tauau,y)\iotan [0,\iotanfty)\tauimes B(0,r_*/2)\,, \varepsilonnd{equation} provided that \betaegin{equation}\lambdaabel{ld16} 0<\vartheta < \muin{\{1,C_5\}}\,. \varepsilonnd{equation} Consider next $$ \tauau\gammae\tauau_2(R,\betaeta) \;\;\mubox{ and }\;\; K_{R,\betaeta}(\tauau)\lambdae |y| \lambdae \varphirac{\betaeta(\tauau+R)}{\tauau+1}\,. $$ Then \betaegin{eqnarray} F_{R,\vartheta,\betaeta}(\tauau,y) & \lambdae & \vartheta\ \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)}\ \lambdaeft( \varphirac{p-1}{p-2}\ \varphirac{1}{1+\tauau} \rhoight)^{(p-1)/(p-2)} \nuonumber \\ & = & \varphirac{\vartheta}{(1+\tauau)^{(p-1)/(p-2)}}\,.\lambdaabel{ld17} \varepsilonnd{eqnarray} Now, if $c\iotan (\betaeta,1)$, we have $$ |y| (1+\tauau) \lambdae \betaeta (\tauau+R) \lambdae \tauilde{z}_{c,1,(1-c)/(1+c)} + c (\tauau+\tauau_0(c)-\tauau_1(c)) $$ as soon as \betaegin{equation}\lambdaabel{ld18} \tauau\gammae \tauau_3(c,R,\betaeta) := \varphirac{\betaeta R+c(\tauau_1(c)-\tauau_0(c))-\tauilde{z}_{c,1,(1-c)/(1+c)}}{c-\betaeta}\,. \varepsilonnd{equation} In that case, $$ w_c(\tauau,y) = \varphirac{1}{(1+\tauau)^{(p-1)/(p-2)}}\ v_{c,1}(\tauau+\tauau_0(c)-\tauau_1(c),y(1+\tauau)) = \varphirac{M_{c,(1-c)/(1+c)}}{(1+\tauau)^{(p-1)/(p-2)}} $$ according to the properties \varepsilonqref{subs.dimN} of $v_{c,1}$. Recalling \varepsilonqref{compTW2} and \varepsilonqref{ld17} we realize that \betaegin{equation}\lambdaabel{ld19} F_{R,\vartheta,\betaeta}(\tauau,y) \lambdae w_c(\tauau,y) \lambdae w(\tauau,y)\,, \tauhetauad K_{R,\betaeta}(\tauau)\lambdae |y| \lambdae \varphirac{\betaeta(\tauau+R)}{\tauau+1}\,, \varepsilonnd{equation} provided \betaegin{equation}\lambdaabel{ld20} c\iotan (\betaeta,1)\,, \tauhetauad \vartheta<\muin{\{1, M_{c,(1-c)/(1+c)}\}}\,, \tauhetauad \tauau\gammae \muax{\{ \tauau_1(c) , \tauau_2(R,\betaeta), \tauau_3(c,R,\betaeta)\}}\,. \varepsilonnd{equation} After this preparation, we are in a position to establish a positive lower bound for $w$ on the ball $B(0,1-\varepsilon)$ for any $\varepsilon\iotan (0,1/4)$. Indeed, we fix $\varepsilon\iotan (0,1/4)$, choose $c=1-\varepsilon$, $R=\betaeta=1-2\varepsilon$, and define $$ \tauau_4(\varepsilon) := \muax{\lambdaeft\{ \varphirac{\tauau_1(1-\varepsilon)}{\varepsilon} , \tauau_2(1-2\varepsilon,1-2\varepsilon), \tauau_3(1-\varepsilon,1-2\varepsilon,1-2\varepsilon) \rhoight\}}\,. $$ As $\tauau_4(\varepsilon)>\tauau_1(1-\varepsilon)/\varepsilon$, \varepsilonqref{c4b} guarantees that $B(0,1-2\varepsilon)\sigmaubset \muathcal{P}_w(\tauau_4(\varepsilon))$ and there is thus $m_\varepsilon\iotan (0,1)$ such that \betaegin{equation}\lambdaabel{ld21} w(\tauau_4(\varepsilon),y)\gammae m_\varepsilon\,, \tauhetauad y\iotan B(0,1-2\varepsilon)\,. \varepsilonnd{equation} Now, for $\vartheta\iotan (0,1)$ satisfying \betaegin{equation}\lambdaabel{ld22} 0<\vartheta<\muin{\{ m_\varepsilon, C_5, M_{1-\varepsilon,\varepsilon/(2-\varepsilon)} \}} \varepsilonnd{equation} we infer from \varepsilonqref{pam}, \varepsilonqref{ld15}, \varepsilonqref{ld16}, \varepsilonqref{ld19}, \varepsilonqref{ld20}, and \varepsilonqref{ld21} that $$ F_{1-2\varepsilon,\vartheta,1-2\varepsilon}(\tauau,y) \lambdae w(\tauau,y)\,, \tauhetauad |y|\iotan\lambdaeft\{ \varphirac{r_*}{2} , K_{1-2\varepsilon,1-2\varepsilon}(\tauau) \rhoight\}\,, \tauhetauad \tauau\gammae \tauau_4(\varepsilon)\,, $$ and $$ F_{1-2\varepsilon,\vartheta,1-2\varepsilon}(\tauau_4(\varepsilon),y) \lambdae \vartheta \lambdae m_\varepsilon \lambdae w(\tauau_4(\varepsilon),y)\,, \tauhetauad \varphirac{r_*}{2} \lambdae |y| \lambdae K_{1-2\varepsilon,1-2\varepsilon}(\tauau_4(\varepsilon)) \lambdae 1-2\varepsilon\,. $$ It then follows from \varepsilonqref{c1}, \varepsilonqref{pim}, and the comparison principle that $$ F_{1-2\varepsilon,\vartheta,1-2\varepsilon}(\tauau,y) \lambdae w(\tauau,y)\,, \tauhetauad \varphirac{r_*}{2} \lambdae |y| \lambdae K_{1-2\varepsilon,1-2\varepsilon}(\tauau) \,, \tauhetauad \tauau\gammae \tauau_4(\varepsilon)\,. $$ Recalling \varepsilonqref{poum}, \varepsilonqref{ld15}, and \varepsilonqref{ld19}, we have thus established that \betaegin{equation}\lambdaabel{ld23} F_{1-2\varepsilon,\vartheta,1-2\varepsilon}(\tauau,y) \lambdae w(\tauau,y)\,, \tauhetauad \tauau\iotan [\tauau_4(\varepsilon),\iotanfty)\tauimes\muathbb{R}\,, \varepsilonnd{equation} for all $\vartheta\iotan (0,1)$ satisfying \varepsilonqref{ld22}. \muedskip \nuoindent \tauextbf{Step 4. Positive bound from below.} For $\varepsilon\iotan (0,1/4)$, fix $\vartheta_\varepsilon\iotan (0,1)$ satisfying \varepsilonqref{ld22}. According to \varepsilonqref{ld23}, we have, for $\tauau\gammae\tauau_4(\varepsilon)+1$ and $y\iotan B(0,1-3\varepsilon)$, \betaegin{eqnarray*} w(\tauau,y) & \gammae & \vartheta_\varepsilon \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)}\ \lambdaeft( \varphirac{(1-2\varepsilon) (\tauau+1-2\varepsilon)}{\tauau+1} - |y| \rhoight)_+^{(p-1)/(p-2)} \\ & \gammae & \vartheta_\varepsilon \lambdaeft( \varphirac{p-2}{p-1} \rhoight)^{(p-1)/(p-2)}\ \lambdaeft( \varphirac{\varepsilon (\tauau-1+4\varepsilon)}{\tauau+1} \rhoight)_+^{(p-1)/(p-2)}\\ & \gammae & \muu_\varepsilon:=\vartheta_\varepsilon \lambdaeft( \varphirac{2(p-2) \varepsilon^2}{p-1} \rhoight)^{(p-1)/(p-2)}>0\,. \varepsilonnd{eqnarray*} We have thus proved that, for all $\varepsilon\iotan (0,1/4)$, there are $\muu_\varepsilon>0$ and $\tauau_5(\varepsilon):=\tauau_4(\varepsilon)+1$ such that \betaegin{equation}\lambdaabel{ld24} 0 < \muu_\varepsilon \lambdae w(\tauau,y)\,, \tauhetauad (\tauau,y)\iotan [\tauau_5(\varepsilon),\iotanfty)\tauimes B(0,1-3\varepsilon)\,. \varepsilonnd{equation} \muedskip \nuoindent \tauextbf{Step 5. Convergence. Viscosity argument.} To complete the proof, we use an argument relying on the theory of viscosity solutions in a similar way as in the paper {\cal I}te{LV07} for the subcritical case of \varepsilonqref{a1} with $q\iotan(1,p-1)$. We thus employ the technique of half-relaxed limits {\cal I}te{BlP88} in the same fashion as in {\cal I}te[Section~3]{Ro01} and {\cal I}te{LV07}. To this end, we pass to the logarithmic time and introduce the new variable $s:=\lambdaog(1+\tauau)$ along with the new unknown function $$ w(\tauau,y) = \omegamega(\lambdaog{(1+\tauau)} , y)\,, \tauhetauad (\tauau,y)\iotan [0,\iotanfty)\tauimes\muathbb{R}\,. $$ Then, $\piartial_\tauau w(\tauau,y)=e^{-s} \piartial_s\omegamega(s,y)$ and it follows from \varepsilonqref{c1} and \varepsilonqref{c2} that $\omegamega$ solves \betaegin{equation}\lambdaabel{log-time} e^{-s} \lambdaeft( \piartial_s \omegamega - \Deltaelta_p \omegamega - y {\cal D}ot \nuabla \omegamega + \varphirac{p-1}{p-2}\ \omegamega \rhoight) + \varepsilonrt\nuabla \omegamega \varepsilonrt^{p-1} - \omegamega = 0\ \,, \tauhetauad (s,y)\iotan Q\,, \varepsilonnd{equation} with initial condition $\omegamega(0)=u_0$. We readily infer from Lemma~\rhoef{lec1} that \betaegin{eqnarray} & & \Vert \omegamega(s)\Vert_1 + \Vert \omegamega(s)\Vert_\iotanfty + \Vert \nuabla \omegamega(s)\Vert_\iotanfty \lambdae C_4 \,, \tauhetauad s\gammae 0\,, \lambdaabel{cvld20} \\ & & \omegamega(s,y) = 0 \;\;\;\mubox{ for }\;\;\; s\gammae 0 \;\;\;\mubox{ and }\;\;\; |y|\gammae 1+R_1 e^{-s}\,. \lambdaabel{cvld21} \varepsilonnd{eqnarray} We next introduce the half-relaxed limits $$ \omegamega_*(y) := \lambdaiminf_{(\sigmaigma,z,\lambdaambda) \tauo (\sigma,y,\iotanfty)}{\omegamega(\lambdaambda+\sigmaigma,z)} \tauhetauad\mubox{ and }\tauhetauad \omegamega^*(y) := \lambdaimsup_{(\sigmaigma,z,\lambdaambda) \tauo (\sigma,y,\iotanfty)}{\omegamega(\lambdaambda+\sigmaigma,z)}, $$ for $(s,y)\iotan Q$, which are well-defined according to the uniform bounds in \varepsilonqref{cvld20} and indeed do not depend on $s>0$. Then, the definition of $\omegamega_*$ and $\omegamega^*$ clearly ensures that \betaegin{equation} \lambdaabel{c6} 0 \lambdae \omegamega_*(y) \lambdae \omegamega^*(y) \;\;\mubox{ for }\;\; y\iotan\muathbb{R} \,, \varepsilonnd{equation} while the uniform bounds \varepsilonqref{cvld20} and the Rademacher theorem warrant that $\omegamega_*$ and $\omegamega^*$ both belong to $W^{1,\iotanfty}(\muathbb{R})$. Finally, by Proposition~\rhoef{pert} applied to \varepsilonqref{log-time}, $\omegamega_*$ and $\omegamega^*$ are viscosity supersolution and subsolution, respectively, to the Hamilton-Jacobi equation \betaegin{equation} \lambdaabel{c7} H(\muathbb{Z}a,\nuabla\muathbb{Z}a) :=|\nuabla\muathbb{Z}a|^{p-1} - \muathbb{Z}a = 0 \;\;\;\mubox{ in }\;\;\; \muathbb{R}\,. \varepsilonnd{equation} Our aim is now to show that $\omegamega_*\gammae \omegamega^{*}$ in $\muathbb{R}$ (which implies that $\omegamega_*=\omegamega^{*}$ by \varepsilonqref{c6}). Since $\omegamega^{*}$ and $\omegamega_*$ are subsolution and supersolution to \varepsilonqref{c7}, respectively, such an inequality would follow from a comparison principle which cannot be applied yet without further information on $\omegamega^{*}$ and $\omegamega_*$. We actually need to prove the following two facts: \betaegin{itemize} \iotatem[(a)] $\omegamega_*(y)=\omegamega^*(y)=0$ if $|y|\gammae 1$, \iotatem[(b)] $\omegamega^{*}(y)\gammaeq \omegamega_*(y)>0$ if $y\iotan B(0,1)$, \varepsilonnd{itemize} and then to follow the technique used in {\cal I}te{LV07} to conclude that $\omegamega_*=\omegamega^*$ and identify the limit. To prove assertion~(a), let us take $y\iotan\muathbb{R}$ with $|y|>1$. We then deduce from \varepsilonqref{cvld21} that there exists $s_1(y)>0$ such that $\omegamega(s,y)=0$ for $s\gammae s_1(y)$. Pick sequences $(\sigmaigma_n)_{n\gammaeq1}$, $(\lambdaambda_n)_{n\gammaeq1}$, and $(z_n)_{n\gammaeq1}$ such that $\sigmaigma_n\tauo0$, $\lambdaambda_n\tauo\iotanfty$, $z_n\tauo y$, and $\omegamega(\sigmaigma_n+\lambdaambda_n,z_n)\tauo \omegamega^{*}(y)$. On the one hand, there exists $n_1(y)>0$ such that $\sigmaigma_n+\lambdaambda_n>s_1(y)$ for any $n\gammaeq n_1(y)$; hence $\omegamega(\sigmaigma_n + \lambdaambda_n,y) = 0$ for any $n\gammaeq n_1(y)$. On the other hand, we can write: $$ |\omegamega(\sigmaigma_n+\lambdaambda_n,z_n)-\omegamega(\sigmaigma_n+\lambdaambda_n,y)|\lambdaeq|y-z_n|\|\nuabla \omegamega(\sigmaigma_n+\lambdaambda_n)\|_{\iotanfty}\lambdaeq C_{4}|y-z_n|\tauo0, $$ hence $\omegamega^{*}(y)=0=\omegamega_{*}(y)$ for any $y\iotan\muathbb{R}$ with $|y|>1$. In addition, since $\omegamega^{*}$ and $\omegamega_{*}$ are continuous, it follows that $\omegamega^{*}=\omegamega_{*}=0$ also for $|y|=1$, hence assertion~(a). To prove assertion~(b), let us take $y\iotan B(0,1)$. Then, there exists $\varepsilon\iotan (0,1/4)$ such that $y\iotan B(0,1-4\varepsilon)$. Since $1-3\varepsilon>1-4\varepsilon$, there is $r_2(y)>0$ such that $B(y,r_2(y))\sigmaubset B(0,1-3\varepsilon)$ and we deduce from \varepsilonqref{ld24} that there exists $s_2(\varepsilon):=\lambdaog{(\tauau_5(\varepsilon)+1)}>0$ such that $\omegamega(s,z)\gammae\muu_\varepsilon$ for any $s\gammaeq s_2(\varepsilon)$ and $z\iotan B(y,r_2(y))$. We now pick sequences $(\sigmaigma_n)_{n\gammaeq1}$, $(\lambdaambda_n)_{n\gammaeq1}$ and $(z_n)_{n\gammaeq1}$ such that $\sigmaigma_n\tauo0$, $\lambdaambda_n\tauo\iotanfty$, $z_n\tauo y$, and $\omegamega(\sigmaigma_n+\lambdaambda_n,z_n)\tauo \omegamega_{*}(y)$. Then there exists again $n_2(y)>0$ such that $\sigmaigma_n+\lambdaambda_n>s_2(y)$ and $z_n\iotan B(y,r_2(y))$ for any $n\gammaeq n_2(y)$. Consequently $\omegamega(\sigmaigma_n + \lambdaambda_n,z_n) \gammaeq \muu_\varepsilon$ for any $n\gammaeq n_2(y)$. This readily implies that $\omegamega^{*}(y)\gammaeq \omegamega_{*}(y)\gammaeq\muu_\varepsilon>0$, hence (b) is proved. We follow the lines of {\cal I}te{LV07} and introduce \betaegin{equation} W_{*}(y)=\varphirac{p-1}{p-2} \omegamega_{*}(y)^{(p-2)/(p-1)}, \tauhetauad W^{*}(y)=\varphirac{p-1}{p-2} \omegamega^{*}(y)^{(p-2)/(p-1)}, \varepsilonnd{equation} for any $y\iotan B(0,1)$. From Proposition~\rhoef{eik}, it follows that $W_{*}$ and $W^{*}$ are respectively viscosity supersolution and subsolution of the eikonal equation $$ |\nuabla \muathbb{Z}a|=1 \tauhetauad \mubox{in} \ B(0,1), $$ with boundary conditions $W^{*}(y)=W_{*}(y)=0$ for $|y|=1$ and are both positive in $B(0,1)$. Using the comparison principle of Ishii {\cal I}te{I87}, we find that $W^{*}(y)\lambdaeq W_{*}(y)$, hence they should be equal by \varepsilonqref{c6}. It follows that $\omegamega_{*}=\omegamega^{*}=W$ in $B(0,1)$, where $W$ is the viscosity solution to \varepsilonqref{eqlim} $$ |\nuabla W|^{p-1} - W = 0 \tauhetauad\mubox{ in }\tauhetauad B(0,1)\,, \tauhetaquad W =0 \tauhetauad\mubox{ on }\tauhetauad \piartial B(0,1)\,, $$ which is actually explicit and given by $$ W(x):=\lambdaeft(\varphirac{p-2}{p-1}\ (1-|x|)_{+}\rhoight)^{(p-1)/(p-2)}, $$ as stated in Theorem~\rhoef{asympt.main}. In addition, the equality $\omegamega_*=\omegamega^{*}$ and \varepsilonqref{cvld21} entail the convergence of $\omegamega(s)$ as $s\tauo \iotanfty$ towards $W$ in $L^\iotanfty(\muathbb{R})$ by Lemma~4.1 in {\cal I}te{Bl94} or Lemma~V.1.9 in {\cal I}te{BdCD97}. We end the proof by rephrasing the two scaling steps and arriving in this way to \varepsilonqref{main.asympt}. \tauhetaed \sigmaubsection{Proof of Theorem~\rhoef{asympt.main}: $N\gammae 2$}\lambdaabel{subsec4.3} We now prove Theorem~\rhoef{asympt.main} for radially non-increasing initial data to the problem posed in dimension $N\gammae 2$. We follow the same steps as in dimension $N=1$, and we only indicate below the main differences that appear. These differences are mainly given by the appearance of the new term \betaegin{equation}\lambdaabel{newterm} \varphirac{N-1}{r}|\piartial_r w|^{p-2}\piartial_r w \,, \tauhetauad r=|y|\,, \varepsilonnd{equation} in the radial form of the $p$-Laplacian term. As we shall see, performing carefully the same steps as for dimension $N=1$, we find that this term does not change anything in an essential way. We follow the same division into steps as the case $N=1$. \muedskip \nuoindent \tauextbf{Step 1.} Thanks to the construction performed in Section~\rhoef{TWN}, this step is the same as in dimension $N=1$. \muedskip \nuoindent \tauextbf{Step 2.} Due to the appearance of the extra term \varepsilonqref{newterm} in the radial form of the equation \varepsilonqref{c1}, we check by direct calculation that, in dimension $N\gammae 2$, the function $F_R$ given by formula \varepsilonqref{FR1} is now a strict supersolution to \varepsilonqref{c1} in $Q$. Indeed, for $y\nue 0$, $$ \piartial_\tauau F_R -\varphirac{1}{1+\tauau}\lambdaeft(\Deltaelta_{p}F_R+y{\cal D}ot\nuabla F_R-\varphirac{p-1}{p-2}F_R\rhoight)+|\nuabla F_R|^{p-1}-F_R =\varphirac{N-1}{(1+\tauau) |y|} F_R\,. $$ Moreover, its singularity at $y=0$ is now stronger. This seems to introduce a new difficulty, but we will see that it can be handled by the same perturbation techniques. Let us notice at this moment that $F_R$ can be used for upper bounds in the same way as in the case $N=1$, and that $F_R$ still solves the limit Hamilton-Jacobi equation \varepsilonqref{limit}. \muedskip \nuoindent \tauextbf{Step 3.} In order to construct subsolutions starting from the family of functions $F_R$, we follow again the ideas of the case $N=1$. The calculations will be different in some points. We consider again the damped family $F_{R,\vartheta,\betaeta}$ defined in \varepsilonqref{marsupilami} for $R\iotan (0,1)$, $\vartheta\iotan (0,1)$, and $\betaeta\iotan (1/2,1]$. For $y\nueq0$ we have \betaegin{equation*} \betaegin{split} Y&:=\piartial_\tauau F_{R,\vartheta,\betaeta}-\varphirac{1}{1+\tauau}\lambdaeft(\Deltaelta_{p}F_{R,\vartheta,\betaeta}+y{\cal D}ot\nuabla F_{R,\vartheta,\betaeta}-\varphirac{p-1}{p-2}F_{R,\vartheta,\betaeta}\rhoight)+|\nuabla F_{R,\vartheta,\betaeta}|^{p-1}-F_{R,\vartheta,\betaeta}\\ &= \vartheta F_{R,1,\betaeta}^{1/(p-1)} \lambdaeft[\varphirac{\betaeta-\vartheta^{p-2}}{1+\tauau} + \varphirac{(N-1)\vartheta^{p-2}}{(1+\tauau) |y|} F_{R,1,\betaeta}^{(p-2)/(p-1)} - (1-\vartheta^{p-2}) F_{R,1,\betaeta}^{(p-2)/(p-1)} \rhoight]. \varepsilonnd{split} \varepsilonnd{equation*} At this point, we further assume that $|y|>r_*/2$, the radius $r_*$ being defined in Corollary~\rhoef{cor:bfbsb}, and that \betaegin{equation}\lambdaabel{shortofidea} \vartheta^{p-2} \lambdae \varphirac{(1-\betaeta) r_*}{2(N-1)}\,. \varepsilonnd{equation} Since $F_{R,1,\betaeta}\lambdae 1$, we obtain \betaegin{equation*} \betaegin{split} Y & \lambdae \vartheta F_{R,1,\betaeta}^{1/(p-1)} \lambdaeft[\varphirac{\betaeta-\vartheta^{p-2}}{1+\tauau} + \varphirac{2(N-1)\vartheta^{p-2}}{(1+\tauau) r_*} - (1-\vartheta^{p-2}) F_{R,1,\betaeta}^{(p-2)/(p-1)} \rhoight]\\ & \lambdae \vartheta(1-\vartheta^{p-2}) F_{R,1,\betaeta}^{1/(p-1)} \lambdaeft[\varphirac{1}{1+\tauau}- \varphirac{p-2}{p-1}\lambdaeft(\varphirac{\betaeta(\tauau+R)}{\tauau+1}-|y|\rhoight)\rhoight]\,, \varepsilonnd{split} \varepsilonnd{equation*} from which we conclude that \betaegin{equation}\lambdaabel{stillshortofidea} \betaegin{minipage}{10cm} $F_{R,\vartheta,\betaeta}$ is a classical subsolution to \varepsilonqref{c1} in \\ $\{ (\tauau,y)\iotan Q \ : \ \tauau\gammae\tauau_2(R,\betaeta)\,, \ (r_*/2)<|y|\lambdae K_{R,\betaeta}(\tauau) \}$\,, \varepsilonnd{minipage} \varepsilonnd{equation} where $\tauau_2(R,\betaeta)$ and $K_{R,\betaeta}(\tauau)$ are still given by \varepsilonqref{pam}. We now proceed as in the one dimensional case to establish \varepsilonqref{ld23} for all $\vartheta\iotan (0,1)$ satisfying \varepsilonqref{ld22} along with $$ \vartheta^{p-2} \lambdae \varphirac{\varepsilon r_*}{N-1}\,, $$ for \varepsilonqref{shortofidea} to be satisfied. \muedskip \nuoindent \tauextbf{Steps 4 \& 5.} The final steps of the proof are similar to the one dimensional case. \tauhetaed \sigmaection{Arbitrary initial data}\lambdaabel{sec:aid} So far, we have proved Theorems~\rhoef{asympt.main} and~\rhoef{main.posit} for radially non-increasing initial data satisfying \varepsilonqref{a3}. We now extend these two results to general initial data satisfying \varepsilonqref{a3}. \muedskip \nuoindent\tauextbf{Proof of Theorems~\rhoef{asympt.main} and~\rhoef{main.posit}.} Since $u_0\nuot\varepsilonquiv 0$, there are $x_0\iotan\muathbb{R}^N$, $r_0>0$, and $\varepsilonta_0>0$ such that $u_0(x)\gammae 2\varepsilonta_0$ for $x\iotan B(x_0,r_0)$. Then, there exists a radially non-increasing initial condition $\tauilde{u}_0$ satisfying \varepsilonqref{a3} but with support in $B(0,r_0)$ and such that $\|\tauilde{u}_0\|_\iotanfty\lambdae\varepsilonta_0$ and $\tauilde{u}_0(x) \lambdae u_0(x-x_0)$ for $x\iotan\muathbb{R}^N$. Similarly, there is a radially non-increasing initial condition $\tauilde{U}_0$ satisfying \varepsilonqref{a3} but with support in $B(0,\tauilde{R}_0)$ for some $\tauilde{R}_0>R_0$ and such that $\tauilde{U}_0(x)\gammae \|u_0\|_\iotanfty$ for $x\iotan B(0,R_0)$. Denoting the solutions to \varepsilonqref{a1} by $\tauilde{u}$ and $\tauilde{U}$ with initial conditions $\tauilde{u}_0$ and $\tauilde{U}_0$, respectively, the comparison principle and the translational invariance of \varepsilonqref{a1} ensure that \betaegin{equation}\lambdaabel{prunelle} \tauilde{u}(t,x+x_0) \lambdae u(t,x) \lambdae \tauilde{U}(t,x)\,, \tauhetauad (t,x)\iotan Q\,. \varepsilonnd{equation} Moreover, since $$ \lambdaeft| \lambdaeft( 1 - \varphirac{(p-2)|x+x_0|}{\lambdaog{t}}\rhoight)_+^{(p-1)/(p-2)} - \lambdaeft( 1 - \varphirac{(p-2)|x|}{\lambdaog{t}} \rhoight)_+^{(p-1)/(p-2)} \rhoight| \lambdae \varphirac{(p-1)|x_0|}{\lambdaog{t}}\,, $$ and Theorems~\rhoef{asympt.main} and~\rhoef{main.posit} apply to both $\tauilde{u}$ and $\tauilde{U}$, the expected results follow from \varepsilonqref{prunelle}. \tauhetaed \sigmaection*{Appendix. Some results about viscosity solutions} \sigmaetcounter{section}{7} We state, for the sake of completeness, some standard results in the theory of viscosity solutions, that we use in the proof of Theorem \rhoef{main.asympt}. The first one concerns the ``viscosity'' limit of a family of small perturbations and can be found in {\cal I}te[Theorem~4.1]{Bl94}. \betaegin{proposition}\lambdaabel{pert} Let $u_{\varepsilon}$ be a viscosity subsolution (resp. a viscosity supersolution) of the equation $$ H_{\varepsilon}(x,u_{\varepsilon},\nuabla u_{\varepsilon},D^{2}u_{\varepsilon})=0 \tauhetauad \mubox{in} \ \muathbb{R}^N, $$ where $H_{\varepsilon}$ is uniformly bounded in all variables and degenerate elliptic. Suppose that $\{u_{\varepsilon}\}$ is a uniformly bounded family of functions. Then \betaegin{equation} u^{*}(x):=\lambdaimsup\lambdaimits_{(y,\varepsilon)\tauo(x,0)}u_{\varepsilon}(y) \varepsilonnd{equation} is a subsolution of the equation \betaegin{equation} H_{*}(x,u,\nuabla u,D^2u)=0, \varepsilonnd{equation} In the same way, $$ u_{*}(x):=\lambdaiminf\lambdaimits_{(y,\varepsilon)\tauo(x,0)}u_{\varepsilon}(y) $$ is a supersolution of $ H^{*}(x,u,\nuabla u,D^2u)=0$. Here, $H_{*}$ and $H^{*}$ are constructed in the same way as \ $u_{*}$ and $u^{*}$. \varepsilonnd{proposition} In other words, this result can be applied to asymptotically small perturbations of a known equation, as we do in Section~\rhoef{Sect.scaling2}. We also use the following result: \betaegin{proposition}\lambdaabel{eik} Let $u\iotan C(\Omegamega)$ be a viscosity solution of \betaegin{equation} H(x,u,\nuabla u)=0 \tauhetauad \mubox{in} \ \Omegamega, \varepsilonnd{equation} where $\Omegamega\sigmaubset\muathbb{R}^N$ and $H$ is a continuous function. If $\Pihi\iotan C^{1}(\muathbb{R})$ is an increasing function, then $v=\Pihi(u)$ is a viscosity solution of \betaegin{equation} H\lambdaeft(x,\Pihi^{-1}(v(x)),(\Pihi^{-1})'(v(x))\nuabla v(x)\rhoight)=0. \varepsilonnd{equation} \varepsilonnd{proposition} The same result holds true for subsolutions and supersolutions and can be found in {\cal I}te{Bl94}. In particular, we use this result in order to pass from the Hamilton-Jacobi equation $|\nuabla u|^{p-1}-u=0$ to the standard eikonal equation $|\nuabla v|=1$. Finally, we also use the (now standard) comparison principle for viscosity subsolutions and supersolutions of the eikonal equation, that can be found in {\cal I}te{I87}. \betaigskip \tauextsc{Acknowledgements}. The first and the third author are supported by the Spanish Projects MTM2005-08760-C02-01 and MTM2008-06326-C02-01. JLV was partially supported by the ESF Programme "Global and geometric aspects of nonlinear partial differential equations". This work was initiated while the second author enjoyed the support and hospitality of the Departamento de Matem\'aticas of the Universidad Aut\'onoma de Madrid. \betaibliographystyle{plain} \betaegin{thebibliography}{1} \betaibitem{ATU04} D. Andreucci, A.~F.~Tedeev, and M. Ughi, \varepsilonmph{The Cauchy problem for degenerate parabolic equations with source and damping}, Ukrainian Math. Bull. \tauextbf{1} (2004), 1--23. \betaibitem{Ar80} D. G. Aronson, \varepsilonmph{Density-dependent interaction-diffusion systems. Dynamics and modelling of reactive systems} (Proc. Adv. Sem., Math. Res. Center, Univ. Wisconsin, Madison, Wis., 1979), 161--176, Publ. Math. Res. Center, Univ. Wisconsin \tauextbf{44}, Academic Press, New York---London, 1980. \betaibitem{AC83} D. G. Aronson and L. Caffarelli, \varepsilonmph{The initial trace of a solution of the porous medium equation}, Trans. Amer. Math. Soc. \tauextbf{280} (1983), 351--366. \betaibitem{BdCD97} M.~Bardi and I.~Capuzzo-Dolcetta, \varepsilonmph{Optimal Control and Viscosity Solutions of Hamilton-Jacobi-Bellman Equations}, Systems Control Found. Appl., Birkh\"auser, Boston, 1997. \betaibitem{BlP88} G. Barles and B. Perthame, \varepsilonmph{Exit time problems in optimal control and vanishing viscosity method}, SIAM J. Control Optim. \tauextbf{26} (1988), 1133--1148. \betaibitem{Bl94} G. Barles, \varepsilonmph{Solutions de Viscosit\'e des Equations d'Hamilton-Jacobi}, Math\'ematiques \& Applications \tauextbf{17}, Springer-Verlag, Berlin, 1994. \betaibitem{BtL08} J.-Ph. Bartier and Ph. Lauren\c cot, \varepsilonmph{Gradient estimates for a degenerate parabolic equation with gradient absorption and applications}, J. Funct. Anal. \tauextbf{254} (2008), 851--878. \betaibitem{BKL04} S. Benachour, G. Karch, and Ph. Lauren\c cot, \varepsilonmph{Asymptotic profiles of solutions to viscous Hamilton-Jacobi equations}, J. Math. Pures Appl. (9) \tauextbf{83} (2004), 1275--1308. \betaibitem{BGK04} P. Biler, M. Guedda, and G. Karch, \varepsilonmph{Asymptotic properties of solutions of the viscous Hamilton-Jacobi equation}, J. Evolution Equations \tauextbf{4} (2004), 75--97. \betaibitem{CIL92} M. G.~Crandall, H. Ishii, and P.-L. Lions, \varepsilonmph{User's guide to viscosity solutions of second order partial differential equations}, Bull. Amer. Math. Soc. (N.S.) \tauextbf{27} (1992), 1--67. \betaibitem{GL07} Th. Gallay and Ph. Lauren\c cot, \varepsilonmph{Asymptotic behavior for a viscous Hamilton-Jacobi equation with critical exponent}, Indiana Univ. Math. J. \tauextbf{56} (2007), 459--479. \betaibitem{Gi05} B.~Gilding, \varepsilonmph{The Cauchy problem for $u_t=\Deltaelta u+| \nuabla u|^q$, large-time behaviour}, J. Math. Pures Appl. (9) \tauextbf{84} (2005), 753--785. \betaibitem{I87} H. Ishii, \varepsilonmph{A simple, direct proof of uniqueness for solutions of the Hamilton-Jacobi equations of eikonal type}, Proc. Amer. Math. Soc. \tauextbf{100} (1987), 247--251. \betaibitem{KPP} A. N. Kolmogorov, I. G. Petrovskii, and N. S. Piskunov, \varepsilonmph{A study of a diffusion equation coupled with the growth in the amount of a material, and its application to a biological problem,} Byull. Moskov. Gos. Univ., Sect. A, \tauextbf{1} (1937), 1--26. \betaibitem{La08} Ph. Lauren\c cot, \varepsilonmph{Large time behaviour for diffusive Hamilton-Jacobi equations}, in ``Topics in Mathematical Modeling'', M.~Bene\v{s} \& E.~Feireisl (eds.), Lect. Notes Ne\v cas Center Math. Modeling \tauextbf{4}, Matfyzpress, Praha, 2008, pp.~95--168. \betaibitem{LV07} Ph. Lauren\c cot and J. L. V\'azquez, \varepsilonmph{Localized non-diffusive asymptotic patterns for nonlinear parabolic equations with gradient absorption}, J. Dynamics Differential Equations \tauextbf{19} (2007), 985--1005. \betaibitem{Pe} L. Perko, \varepsilonmph{Differential Equations and Dynamical Systems}, Texts in Applied Mathematics \tauextbf{7}, Springer-Verlag, New York, 1991. \betaibitem{QRV} F. Quir\'os, J.D. Rossi, and J.L. V\'azquez. \varepsilonmph{Thermal avalanche for blow-up solutions of semilinear heat equations}, Comm. Pure Appl. Math. {\betaf 57} (2004), 59--98. \betaibitem{Ro01} J.-M. Roquejoffre, \varepsilonmph{Convergence to steady states or periodic solutions in a class of Hamilton-Jacobi equations}, J. Math. Pures Appl. (9) \tauextbf{80} (2001), 85--104. \betaibitem{Sh04a} Peihu Shi, \varepsilonmph{Self-similar singular solution of a $p$-Laplacian evolution equation with gradient absorption term}, J. Partial Differential Equations \tauextbf{17} (2004), 369--383. \betaibitem{V91} J. L. V\' azquez, {\iotat Singular solutions and asymptotic behaviour of nonlinear parabolic equations}, International Conference on Differential Equations, Vol. 1, 2 (Barcelona, 1991), 234--249, World Sci. Publ., River Edge, NJ, 1993. \betaibitem{VaTube} J. L. V\'azquez, \varepsilonmph{Porous medium flow in a tube. Travelling waves and KPP behaviour}, Comm. Contemporary Math. \tauextbf{9} (2007), 731--751. \varepsilonnd{thebibliography} \varepsilonnd{document}
\begin{document} \title{Classical multiparty computation using quantum resources} \author{Marco Clementi$^{1,2}$, Anna Pappa$^{3,4}$, Andreas Eckstein$^1$, Ian~A.~Walmsley$^1$, Elham Kashefi$^{3,5}$, Stefanie Barz$^{1,6}$} \affiliation{$^1$~Clarendon Laboratory, Department of Physics, University of Oxford, United Kingdom\\ $^2$~Department of Physics, University of Pavia, Italy\\ $^3$~School of Informatics, University of Edinburgh, United Kingdom\\ $^4$~Department of Physics, University College London, United Kingdom\\ $^5$~LIP6 - CNRS, Universit\'e Pierre et Marie Curie, Paris, France\\ $^6$~ Institute for Functional Matter and Quantum Technologies and Center for Integrated Quantum Science and Technology IQST, University of Stuttgart, Germany} \date{\today} \begin{abstract} In this work, we demonstrate a new way to perform classical multiparty computing amongst parties with limited computational resources. Our method harnesses quantum resources to increase the computational power of the individual parties. We show how a set of clients restricted to linear classical processing are able to jointly compute a non-linear multivariable function that lies beyond their individual capabilities. The clients are only allowed to perform classical XOR gates and single-qubit gates on quantum states. We also examine the type of security that can be achieved in this limited setting. Finally, we provide a proof-of-concept implementation using photonic qubits, that allows four clients to compute a specific example of a multiparty function, the pairwise AND. \end{abstract} \maketitle \section{Introduction} The ability to communicate and perform computations between parties in a network has become the cornerstone of the modern information age. As more and more parties with limited resources become connected in wide-scale distributed systems, a critical need is to develop efficient protocols for multiparty computations (MPC), both in terms of communication load and computing capability~\cite{Yao1982,Damgard2006,Bogetoft2009,Saia2015}. One approach to efficiently performing MPC is by exploiting quantum resources. It has been shown that measurements on specific types of entangled states (GHZ states~\cite{Greenberger1989}), when controlled by a linear computer, are sufficient to compute non-linear universal functions~\cite{Anders2009}. Based on that result, it has been demonstrated that MPC under specific assumptions (use of a trusted party, restricted adversaries) is secure, by virtue of the quantum correlations of a GHZ state~\cite{Louko2010}. Similar results have recently been shown in a client-server scenario, where a client restricted to linear (XOR) operations is enabled to securely delegate the computation of a universal boolean function to a quantum server~\cite{Dunjko2014, Barz2016}. The idea behind all these protocols is to use quantum resources in order to compute classical functions more efficiently, without having to build a fully-fledged quantum computer. \begin{figure} \caption{\label{fig:Figure1} \label{fig:Figure1} \end{figure} In this work, we propose a new way of computing non-linear multivariable functions using only linear classical computing and limited manipulation of quantum information. We examine the scenario where a number of clients want to jointly compute a boolean function of their inputs. We consider that the clients have limited computing capabilities, namely access to linear (XOR) functionalities. We show how quantum resources can enable such limited clients to securely compute non-linear functions, the complexity of which lies beyond their computing capability. Since access to XOR gates alone is not sufficient for universal classical computing, the clients' computational power is enhanced by means of manipulation of quantum resources provided by a server. To demonstrate this boost of computational capabilities using quantum resources, we will focus on a particular example of classical non-linear multiparty computation (the pairwise AND function) that requires as little as one single qubit of communication between the clients. Due to the low quantum communication cost required for the evaluation of this function, the proposed protocol can be used as a building block for more complex computations. The basic idea of our approach is shown in Fig.~\ref{fig:Figure1}. A quantum server generates a single qubit that is sent through a chain of clients. Each of the clients applies a rotation on the received quantum state according to their classical input. The quantum state is then sent back to the server, which performs a measurement to obtain the result of the computation. Our protocol is designed in such a way that the input of each client remains hidden from the other clients and from the server. Furthermore, the result of the computation remains hidden from the server and is sent back to the clients in an encrypted fashion, meaning that the server performs the computation without learning anything about the result. \section{Theory} Our aim is to compute a non-linear boolean function $f(x_1,\dotsm,x_n)$ on input bits $x_i\in\{0,1\}$. We focus on a particular example of a basic multivariable boolean function, the pairwise AND: \begin{eqnarray}~\label{eqn:function} f(x_1,\dots,x_n)&=& \bigoplus_{j=1}^n \Big( x_{j+1}\cdot \big(\bigoplus_{i=1}^j x_i\big )\Big ) \end{eqnarray} The addition and multiplication are the XOR operation and the logical AND operation respectively over the field $\mathbb{F}_2$. If the function in Eqn.~\ref{eqn:function} was linear, then a change in the assignment of one of the variables would either always change the value of the function or would never change it. However it is easy to verify that the function at hand does not follow this rule, and as a non-linear function, it cannot be computed using only linear operations in $\mathbb{F}_2$, such as XOR, but necessitates the use of non-linear operations like NAND. Now let us define by $U=R_y(\pi/2)$ the rotation around the $y$-axis of the Bloch sphere (i.e. $R_y(\theta)= e^{-i\theta\sigma_y/2 }$). Then the following equation can be used to compute the function $f=f(x_1,\dots,x_n)$ in Eqn.~\ref{eqn:function}: \begin{equation} \label{wopadding1} (U^\dagger)^\globalxor{x}U^{x_n}\dotsm U^{x_2}U^{x_1}\ket{0}=\ket{f} \end{equation} The fact that Eqn.~\ref{wopadding1} uses only linear processing and operations on a single qubit to compute a non-linear function demonstrates the computational power that quantum resources can provide. Eqn.~\ref{wopadding1} can also be thought of in the clients-server setting described in Fig.~\ref{fig:Figure1}, where each client $C_i$ has an input bit $x_i$ and performs an operation on the received qubit before forwarding it to the next client. By adding extra rotations $V=R_y(\pi)$ around the $y$-axis, we can transform Eqn.~\ref{wopadding1} in the following way: \begin{equation} \label{wopadding} (U^\dagger)^\globalxor{x}\\ \underbrace{V^{r_n}U^{x_n}}_{\mathcal{C}_n}\\ ...\\ \underbrace{V^{r_2}U^{x_2}}_{\mathcal{C}_2}\\ \underbrace{V^{r_1}U^{x_1}}_{\mathcal{C}_1}\\ \ket{0}=\\ \ket{r \oplus f} \end{equation} \\ where $r_i$ $\in \{0,1\}$ for $i=1,\dots,n$, and $r=\bigoplus_i r_i$. As we will see in the following sections, this extra $V$ operation will provide some layer of security on top of the computational boost of the clients' power, in the case where there are dishonest participants. \begin{figure} \caption{\label{fig:Figure2} \label{fig:Figure2} \end{figure} \paragraph{\bf{The protocol.}} The server generates a single qubit in the state $\ket{0}$ that is sent to client $C_1$. $C_1$ applies $V^{r_1}U^{x_1}$ on the received qubit, according to input bit $x_1$ and a randomly selected bit $r_1$ and sends the qubit on to the second client $C_2$, who applies $V^{r_2}U^{x_2}$; this procedure continues until all the clients have applied their gates to the qubit (see Figure \ref{fig:Figure2}). The last operation $U^\dagger$ depends on the global XOR of the clients' inputs, computed using a classical routine described below, and can be applied by any client. The resulting state $\ket{r \oplus f}$ contains the value of the function up to a random bit flip $r$ (due to Eqn.~\ref{wopadding}). The qubit is then sent back to the server where a measurement is performed in the computational basis and announces the outcome $r \oplus f$. The clients then locally compute the XOR of the random bits of the other clients and perform the last XOR operation $f=r\oplus (r\oplus f)$ to retrieve the result of the computation. For the computation of the global XOR of both the inputs and the random bits, we consider that the clients run a classical routine that involves using their local XOR boxes to share their classical information among them. During the XOR routine, we assume that the clients communicate between them via secure classical channels that have been established by classical or QKD algorithms. \paragraph{\bf{The XOR routine.}} For $i,j=1,\dots,n$, each client $C_j$ uses his local XOR box to choose random bits $x_j^i,~r_j^i\in\{0,1\}$, such that $x_j=\bigoplus_{i=1}^nx_j^i$ and $r_j=\bigoplus_{i=1}^nr_j^i$ and sends $x_j^i$ and $r_j^i$ to client $C_i$. Each client $C_i$ then uses his local XOR box to compute $\tilde{x}_i=\bigoplus_{j=1}^n x_j^i$ and $\tilde{r}_i=\bigoplus_{j=1}^n r_j^i$. When the designated client needs to perform the operation $U^\dagger$, the rest of the clients send $\tilde{x}_i$ to that client, who uses his local box to compute the global XOR (since $\bigoplus_{i=1}^n x_i=\bigoplus_{i=1}^n \tilde{x}_i$). At the end of the protocol, when the server announces the value of the measurement $r\oplus f$, all clients broadcast $\tilde{r}_i$, so that all clients can compute the value $r$. Of course, a sequential announcement of the clients will give the last client the ability to learn the output of the computation first, and then abort the protocol. More complicated ways of secret-sharing values and broadcast channels using threshold schemes could be used instead, but that would defeat the purpose of this paper which is to show how simple manipulation of quantum states can boost the computational power of limited clients. \paragraph{\bf{Security.}} As mentioned, the goal of this work is to demonstrate how quantum information can increase the computational abilities of parties in a client-server setting; however, the introduction of $V$ rotations in Eqn.~\ref{wopadding} allows for some level of security in a passive adversarial model. More explicitly, we assume that both the server and the clients are interested in completing the computation, and will therefore act according to the protocol; they might however leak some information to an attacker that gains access to their records. We therefore assume that the server sends $\ket{0}$ single-qubit states during the protocol, and no multiple copies of the same qubit or parts of entangled states, but might leak the classical result of the measurement to an eavesdropper. The need to use single copies of quantum states in our protocol is also what distinguishes this work from the previous single-client single-server protocol~\cite{Barz2016}, where using \textit{cobits} (i.e. systems capable of being in a coherent superposition of two states) was sufficient for secure classical computing. The privacy of the secret input bits of the clients is maintained against someone who acquires a copy of the server's data, since all information that the latter can retrieve is equivalent to the sequence of gates applied, which is in turn equal to $V^{r\oplus f}$. Since the term in the exponent represents padded information, the server cannot retrieve more information than that contained in $r\oplus f$, which is indeed the expected outcome of measurement. Furthermore, security against dishonest clients is also maintained, even if we allow them to prepare quantum states and perform measurements on the received states. This is again due to the $V$ rotation that is chosen uniformly at random and performed by all honest clients on the qubit. To see this more clearly, we examine the case when the first honest client in the chain, $C_i$, applies his rotation on the received qubit. We can assume without loss of generality that the qubit is prepared by the dishonest clients in the $XZ$ plane, since all rotations by the honest clients are done on that plane, therefore any component outside the plane will not be affected. The honest operation on any pure state $\ket{\psi}$ on the XZ plane, results in the totally mixed state: \begin{equation*} \frac{1}{2}\sum_{r_i}V^{r_i}U^{x_i}\ketbra{\psi}{\psi}(U^\dagger)^{x_i}(V^\dagger)^{r_i}=U^{x_i}\mathbb{I}_2(U^\dagger)^{x_i} \end{equation*} which ensures that no information is leaked to the next clients. As in the case of the server however, we need to restrict the clients' malicious behavior to sending single qubit states or equivalently that the honest rotation is done on one qubit. Finally, the client responsible for the last $U^{\dagger}$ rotation will unavoidably learn the parity of the inputs of the rest of the clients, but as long as at least two clients are honest, it is enough to guarantee the secrecy of the independent inputs. \paragraph{\bf{Efficiency and comparison to previous protocols.}} A common way to perform multiparty computations is via expressing the desired circuits as a sequence of smaller gates, for example 2-input universal gates. Previous work~\cite{Dunjko2014} can therefore be re-interpreted as a protocol that computes the NAND of the inputs of two clients. However, a straightforward extension of this to a multivariable function would prove very costly, requiring one qubit, up to two $R_y$ rotations and several rounds of classical communication to compute the necessary XORs, for each AND evaluation in the function. By just looking at the quantum communication needed in the new protocol (which requires a single qubit to compute the pairwise AND) we observe an immediate gain in efficiency. Furthermore, a straightforward implementation of a construction based on \cite{Dunjko2014} guarantees no security for the inputs of the parties, since the XORs necessary for the application of $U^\dagger$ are on 2 bits, therefore the client who performs the latter unavoidably learns the input of the other client. Finally, previous studies of boolean function evaluation in the measurement-based quantum computation model~\cite{Hoban2011} required an $(n+1)$-extended GHZ state to compute the pairwise AND function of Eqn. \ref{eqn:function} while to compute other boolean functions (i.e. $n$-tuple AND function), the resource state should have $2^n-1$ qubits. In constrast, the presented protocol does not require any entanglement in the quantum state, and uses only one qubit to compute the pairwise AND function, while for the $n$-tuple AND function, it requires at most $n-1$ qubits (one qubit for each AND operation), giving an exponential decrease on the number of qubits used. \begin{figure} \caption{Experimental scheme. The server generates heralded, horizontally polarized photons, which are sent to the clients' side. Each client uses a pair of half-wave plates for applying the gates $V^{r_i} \label{fig:Figure3} \end{figure} \section{Experiment and Results} We implement the protocol using polarisation-encoded photonic qubits with $\ket{0}$ ($\ket{1}$) being the horizontal (vertical) polarisation state. Single photons are generated by pumping a waveguided periodically poled Potassium Titanium Oxide Phosphate crystal with a mode-locked Ti:Sapphire laser ($\tau=200\,$fs, $\lambda=\,$775 nm, 250 kHz repetition rate). After spectral filtering, we obtain pairs of photons at 1547~nm (horizontal polarisation) and 1553~nm (vertical polarisation), each with 2 nm spectral bandwidth (FWHM). The photons are detected using InGaAs avalanche photodetectors (APD)~\cite{Eckstein2011, Harder2013}. \begin{figure*} \caption{\label{Figure4} \label{Figure4} \end{figure*} Using this source, the server generates heralded single photons in state $\ket{0}$ which are sent to the clients' side via 15m-long polarisation-maintaining (PM) fibres. Each client $\mathcal{C}_i$ has access to a series of half-wave plates (HWPs) for implementing the quantum gates $U^{x_i}$ and $V^{r_i}$ (see Fig.~\ref{fig:Figure3}): \begin{equation} C_i=\hwp{\piover{4}\cdot r_i} \hwp{-\piover{8}\cdot x_i} \label{client_hwp} \end{equation} where $\hwp{\theta}$ is a HWP with optical axis rotated by $\theta$. In order to demonstrate all the features of function $f$, we choose to implement a setup with four clients. This could be easily extended straightforwardly to a scheme with an arbitrary number of clients. The overall unitary evolution of the system is then described by the following sequence of operators: \begin{equation} \label{client_side_hwp} \underbrace{\hwp{0}\hwp{\frac{\pi}{8} \cdot \oplus_i x_i}}_{final\,rotation}\\ \underbrace{C_4\,C_3\,C_2\,C_1}_{client\,chain} \ket{0}, \end{equation} up to a global phase factor. For the purpose of our demonstration, $\hwp{0}$ can be omitted as it has no effect on the correctness of the demonstration. Finally, single photons are coupled into another PM fibre and sent back to server. Here, they are measured in the computational basis using a polarisation splitter (extinction ratio $>$60\,dB) and two APDs connected to the output arms. We performed measurements on all possible 32 sequences of the input bits $x_i$. For each sequence, all possible combinations of the padding bits $r_i$ have been tested. Fig.~\ref{Figure4}a shows statistics of the results for a subset of input configurations. The average probability of finding the correct result was measured $(99.53\,\pm\,0.03)\%$, where we assumed Poissonian statistics for the errors. Imperfections arise from state preparation, polarisation manipulation and polarisation measurements, and darks counts. Fig.~\ref{Figure4}b shows results for the same input, but averaged over all combinations of random bits $r_i$, resulting in a flat distribution. The values we obtain for the average outcome of the computation lie between $(49.95\,\pm\,0.03)\%$ and $(50.06\,\pm\,0.03)\%$ with an average of $(50.00\,\pm\,0.03)\%$. These values are computed from the raw counts corrected by the coupling efficiencies. This shows that the server could not infer any information from the outcomes of its measurements. The main limiting factor in the correctness of the result is lies uncertainty in wave-plates positioning and polarisation crosstalk introduced by PM fibre connectors. Fig.~\ref{Figure4}c shows the long-time stability of our system: we repeated the same computation several times over a time interval of 13 hours and studied drift in our experiment. The average correctness over this time was $99.43\,\%$ with a standard deviation of $0.08\,\%$. The correctness decreases from $(99.52\,\pm 0.02)\%$ to $(99.27\pm 0.06)\%$; the drop in probability is caused by drifts in the coupling to the fibres and polarisation drifts. \paragraph{\bf{Security of implementation.}} In addition to the theoretical security aspects discussed above, in our implementation we choose the wave-plate settings in such a way that there is no phase shift between the states $\ket{0}$ and $\ket{1}$ that could leak information about the inputs. As already discussed in~\cite{Barz2016}, global phase shifts could leak information if the server, for example, sends part of an entangled state. However, this approach would require an interferometrically stable setup, which is an unlikely condition for a real-life implementation. Furthermore, the protocol requires the use of single qubits and a single-shot implementation in order to be secure. For the purpose of computing statistics for our proof-of-principle demonstration, we averaged over several runs of the experiment that used the same input settings. We note, however, that this would leak information about the inputs or the result to a malicious party; therefore in a realistic implementation, single-shot experiments would be required. \section{Conclusion} In this work, we demonstrate a novel way to perform non-linear classical multiparty computations by exploiting single qubits and access to restricted linear processes. This is done through studying a specific boolean function that can be thought of as a building block for more complex computations. Even though the main focus of this work is the boosting of the computational capabilities of limited clients manipulating single qubits, by introducing some extra rotations we can guarantee security under assumptions on the adversarial behavior of the participants. In this setting, the classical data obtained during the protocol do not leak any information, given that the adversaries act in a restricted way. Since the goal was to keep the clients' quantum capabilities as limited as possible, it would defeat the purpose of this study to allow them to perform any check on the correct behavior of the server or the other clients. If we would consider a setting where the clients are enhanced with quantum measurement devices, security of the protocol could be increased by checking the mean photon number (however see \cite{Sajeed2015} for a discussion on attacks and countermeasures on commercial devices). Our work also offers many avenues for further research. For example, are there more simple non-linear functions like the one presented here that can be used as subroutines for larger computation protocols? And more generally, what is the most efficient way to perform complex computations when we have access to limited quantum and classical resources? Finally, surprisingly enough, this boosting of computational power is possible with the use of single qubits, and without the need of the type of contextuality mentioned in~\cite{Raussendorf2013}, opening a discussion on whether some other form of contextuality is relevant in this setting. \begin{thebibliography}{14} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Yao}(1982)}]{Yao1982} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~C.}\ \bibnamefont {Yao}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the 23rd Annual Symposium on Foundations of Computer Science}}},\ \bibinfo {series and number} {SFCS '82}\ (\bibinfo {publisher} {IEEE Computer Society},\ \bibinfo {year} {1982})\ pp.\ \bibinfo {pages} {160--164}\BibitemShut {NoStop} \bibitem [{\citenamefont {Damg{\aa}rd}(1982)}]{Damgard2006} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Damg{\aa}rd}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the 5th International Conference on Security and Cryptography for Networks}}},\ \bibinfo {series and number} {SFCS '82}\ (\bibinfo {publisher} {Springer Berlin Heidelberg},\ \bibinfo {year} {1982})\ pp.\ \bibinfo {pages} {360--364}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bogetoft}\ \emph {et~al.}(2009)\citenamefont {Bogetoft}, \citenamefont {Christensen}, \citenamefont {Damg{\aa}rd}, \citenamefont {Geisler}, \citenamefont {Jakobsen}, \citenamefont {Kr{\o}igaard}, \citenamefont {Nielsen}, \citenamefont {Nielsen}, \citenamefont {Nielsen}, \citenamefont {Pagter}, \citenamefont {Schwartzbach},\ and\ \citenamefont {Toft}}]{Bogetoft2009} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Bogetoft}}, \bibinfo {author} {\bibfnamefont {D.~L.}\ \bibnamefont {Christensen}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Damg{\aa}rd}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Geisler}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jakobsen}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Kr{\o}igaard}}, \bibinfo {author} {\bibfnamefont {J.~D.}\ \bibnamefont {Nielsen}}, \bibinfo {author} {\bibfnamefont {J.~B.}\ \bibnamefont {Nielsen}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Nielsen}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Pagter}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Schwartzbach}}, \ and\ \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Toft}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the 13th International Conference on Financial Cryptography and Data Security}}},\ \bibinfo {series and number} {FC '09}\ (\bibinfo {publisher} {Springer Berlin Heidelberg},\ \bibinfo {year} {2009})\ pp.\ \bibinfo {pages} {325--343}\BibitemShut {NoStop} \bibitem [{\citenamefont {Saia}\ and\ \citenamefont {Zamani}(2015)}]{Saia2015} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Saia}}\ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Zamani}},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the 41st International Conference on Current Trends in Theory and Practice of Computer Science}}}\ (\bibinfo {publisher} {Springer Berlin Heidelberg},\ \bibinfo {year} {2015})\ pp.\ \bibinfo {pages} {24--44}\BibitemShut {NoStop} \bibitem [{\citenamefont {Greenberger}\ \emph {et~al.}(1989)\citenamefont {Greenberger}, \citenamefont {Horne},\ and\ \citenamefont {Zeilinger}}]{Greenberger1989} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~M.}\ \bibnamefont {Greenberger}}, \bibinfo {author} {\bibfnamefont {M.~A.}\ \bibnamefont {Horne}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Zeilinger}},\ }\enquote {\bibinfo {title} {Going beyond bell's theorem},}\ in\ \href@noop {} {\emph {\bibinfo {booktitle} {Bell's Theorem, Quantum Theory, and Conceptions of the Universe}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont {M.}~\bibnamefont {Kafatos}}}\ (\bibinfo {publisher} {Kluwer},\ \bibinfo {address} {Dordrecht},\ \bibinfo {year} {1989})\ pp.\ \bibinfo {pages} {73--76}\BibitemShut {NoStop} \bibitem [{\citenamefont {Anders}\ and\ \citenamefont {Browne}(2009)}]{Anders2009} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Anders}}\ and\ \bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont {Browne}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {050502} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Loukopoulos}\ and\ \citenamefont {Browne}(2010)}]{Louko2010} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Loukopoulos}}\ and\ \bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont {Browne}},\ }\href {\doibase 10.1103/PhysRevA.81.062336} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo {pages} {062336} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dunjko}\ \emph {et~al.}(2016)\citenamefont {Dunjko}, \citenamefont {Kapourniotis},\ and\ \citenamefont {Kashefi}}]{Dunjko2014} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Dunjko}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Kapourniotis}}, \ and\ \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Kashefi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Journal of Quantum Information and Computation}\ ,\ \bibinfo {pages} {0061}} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Barz}\ \emph {et~al.}(2016)\citenamefont {Barz}, \citenamefont {Dunjko}, \citenamefont {Schlederer}, \citenamefont {Moore}, \citenamefont {Kashefi},\ and\ \citenamefont {Walmsley}}]{Barz2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Barz}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Dunjko}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Schlederer}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Moore}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Kashefi}}, \ and\ \bibinfo {author} {\bibfnamefont {I.~A.}\ \bibnamefont {Walmsley}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {93}},\ \bibinfo {pages} {032339} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hoban}\ \emph {et~al.}(2011)\citenamefont {Hoban}, \citenamefont {Campbell}, \citenamefont {Loukopoulos},\ and\ \citenamefont {Browne}}]{Hoban2011} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Hoban}}, \bibinfo {author} {\bibfnamefont {E.~T.}\ \bibnamefont {Campbell}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Loukopoulos}}, \ and\ \bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont {Browne}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo {volume} {13}},\ \bibinfo {pages} {023014} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Eckstein}\ \emph {et~al.}(2011)\citenamefont {Eckstein}, \citenamefont {Christ}, \citenamefont {Mosley},\ and\ \citenamefont {Silberhorn}}]{Eckstein2011} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Eckstein}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Christ}}, \bibinfo {author} {\bibfnamefont {P.~J.}\ \bibnamefont {Mosley}}, \ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Silberhorn}},\ }\href {\doibase 10.1103/PhysRevLett.106.013603} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {106}},\ \bibinfo {pages} {013603} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Harder}\ \emph {et~al.}(2013)\citenamefont {Harder}, \citenamefont {Ansari}, \citenamefont {Brecht}, \citenamefont {Dirmeier}, \citenamefont {Marquardt},\ and\ \citenamefont {Silberhorn}}]{Harder2013} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Harder}}, \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Ansari}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Brecht}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Dirmeier}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Marquardt}}, \ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Silberhorn}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Optics express}\ }\textbf {\bibinfo {volume} {21}},\ \bibinfo {pages} {13975} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sajeed}\ \emph {et~al.}(2015)\citenamefont {Sajeed}, \citenamefont {Radchenko}, \citenamefont {Kaiser}, \citenamefont {Bourgoin}, \citenamefont {Pappa}, \citenamefont {Monat}, \citenamefont {Legr\'e},\ and\ \citenamefont {Makarov}}]{Sajeed2015} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Sajeed}}, \bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Radchenko}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Kaiser}}, \bibinfo {author} {\bibfnamefont {J.-P.}\ \bibnamefont {Bourgoin}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Pappa}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Monat}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Legr\'e}}, \ and\ \bibinfo {author} {\bibfnamefont {V.}~\bibnamefont {Makarov}},\ }\href {\doibase 10.1103/PhysRevA.91.032326} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages} {032326} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Raussendorf}(2013)}]{Raussendorf2013} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Raussendorf}},\ }\href {\doibase 10.1103/PhysRevA.88.022322} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {88}},\ \bibinfo {pages} {022322} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \end{thebibliography} \end{document}
\begin{document} \title[Equilibrium fluctuations for totally asymmetric interacting particles]{Equilibrium fluctuations for totally asymmetric interacting particle systems} \author[Kohei Hayashi]{Kohei Hayashi} \address{Graduate School of Mathematical Sciences, The University of Tokyo, Komaba, Tokyo 153-8914, Japan.} \email{[email protected]} \keywords{KPZ equation, stochastic Burgers equation, interacting particle systems, $q$-TASEP} \subjclass[2000]{60K35, 60H15} \maketitle \newtheorem{definition}{Definition}[section] \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{remark}{Remark}[section] \newtheorem{assumption}{Assumption}[section] \newtheorem{example}{Example}[section] \makeatletter \renewcommand{\theequation}{ \thesection.\arabic{equation}} \@addtoreset{equation}{section} \makeatother \makeatletter \renewcommand{A}{A} \makeatother \newcounter{num} \newcommand{\Rnum}[1]{\setcounter{num}{#1}\Roman{num}} \begin{abstract} We study equilibrium fluctuations for a class of totally asymmetric zero-range type interacting particle systems. As a main result, we show that density fluctuation of our process converges to the stationary energy solution of the stochastic Burgers equation. As a special case, microscopic system we consider here is related to $q$-totally asymmetric simple exclusion processes ($q$-TASEPs) and our scaling limit corresponds to letting the quantum parameter $q$ to be one. \end{abstract} \section{Introduction} In this paper, we have an interest in Kardar-Parisi-Zhang (KPZ) equation, which is a stochastic partial differential equation of unknown function $h = h (t, x ) $ where $ ( t , x ) \in [ 0, \infty ) \times \mathbb{R} $ with the form \begin{equation} \label{eq:KPZintro} \partial_t h = \nu \partial_x^2 h + \lambda (\partial_x h )^2 + \sqrt{D} \dot{W} ( t, x ) . \end{equation} Here $\nu, D > 0 $ and $\lambda \in \mathbb{R}$ are constants and $\dot{W} (t, x ) $ is the space-time white noise. Or equivalently, we focus on its tilt $u = \partial_x h$ which satisfies the stochastic Burgers equation (SBE) \begin{equation} \label{eq:SBEintro} \partial_t u = \nu \partial_x^2 u + \lambda \partial_x u^2 + \sqrt{D} \partial_x \dot{W} ( t, x ) . \end{equation} Throughout this paper, we only consider the one-dimensional setting. KPZ equation is introduced in \cite{kardar1986dynamic} as a model to describe random interface evolution. The main interest of this paper is universality of interface growth. Before discuss in detail the universality, we mention solution theory of KPZ equation. Looking the SBE \eqref{eq:SBEintro}, the solution $u$ is typically expected to have the same regularity as the space-time white noise. In particular, it takes values on distribution and thus the non-linear term $\partial_x u^2$ cannot be defined naively. As a consequence, the equation \eqref{eq:SBEintro} and also \eqref{eq:KPZintro} are called singular type equation in this sense. One way to use Cole-Hopf transformation $Z = \exp ((\lambda /\nu)h )$. Then the transformed process $Z$ satisfies the stochastic heat equation with multiplicative noise \begin{equation} \label{eq:SHEintro} \partial_t Z = \nu \partial_x^2 Z + \frac{\lambda \sqrt{D}}{\nu} Z\dot{W} (t, x ) . \end{equation} Now we can give a meaning to the solution of \eqref{eq:SHEintro} in a classical way and then the solution to KPZ equation can be defined by $h = (\nu / \lambda) \log Z$, which is called the Cole-Hopf solution. However, such a good transformation is restrictive and a solution theory which directly give a meaning to singular stochastic differential equations is preferable. To prove well-posedness of \eqref{eq:KPZintro} itself without using the Cole-Hopf transformation, a renormalization procedure which roughly subtract ``$-\infty$'' from the singular term is needed. Such a renormalization is conducted in a mathematically rigorous way in \cite{hairer2013solving} for the first time, and well-posedness of KPZ equation is proved there. And then the solution theory is generalized as the regularity structure theory in \cite{hairer2014theory} covering more wide range of singular stochastic partial differential equations. On the other hand, the paper \cite{gubinelli2015paracontrolled} introduced the notion of paracontrolled calculus and then global well-posedness of KPZ equation is shown in \cite{gubinelli2017kpz} based on paracontrolled calculus. Both solution theories are established as generalization of rough path theory, and particularly the solutions are constructed based on a pathwise approach rather than a probabilistic one. For a probabilistic construction, though restricted on the stationary case, the notion of energy solution is introduced in \cite{gonccalves2014nonlinear} as a martingale problem formulation and existence of the solution is shown. Then uniqueness of energy solution is proved in \cite{gubinelli2018energy}. The main interest of this paper is to derive \eqref{eq:KPZintro} as an equation which describes macroscopic interface evolution, by taking scaling limits of microscopic models. Until now, several microscopic models from which KPZ equation is derived by scaling limits are known. In particular, for microscopic models under equilibrium state, the notion of energy solution gives us a robust way to derive KPZ equation as scaling limits. Here we briefly review results on the universality of KPZ equation for stationary models. (See \cite{corwin2012kardar} for progress in this decades containing also non-stationary cases.) As to stationary case, \cite{bertini1997stochastic} is a celebrating result, which proved that density fluctuation of simple exclusion processes with weak asymmetric jump rates converges to the Cole-Hopf solution of SBE. After that, \cite{gonccalves2014nonlinear} generalized the result of \cite{bertini1997stochastic} to wider class of jump rates and remarkably they established a robust way to derive KPZ equation without using Cole-Hopf transformation: \cite{gonccalves2015stochastic} for interacting particle systems containing zero-range processes, \cite{diehl2017kardar} for a system of stochastic differential equations and \cite{jara2019scaling} for the Sasamoto-Spohn model, which is originally introduced in \cite{sasamoto2009superdiffusivity}. Other important class from which KPZ equation is derived is directed polymers, which is introduced in \cite{huse1985pinning} and mathematically analyzed in \cite{imbrie1988diffusion} for the first time. As to the stationary case, recently \cite{jara2020stationary} derived the stochastic Burgers equation from free-energy fluctuation of the stationary O'Connell-Yor model (\cite{o2001brownian}). On the other hand, a some relation between the O'Connell-Yor polymer and an interacting particle system is pointed out: the $q$-deformation of totally asymmetric simple exclusion process ($q$-TASEP, in short) with parameter $q \in (0, 1)$ is introduced in \cite{borodin2014macdonald} and moreover it is proved that the $q$-TASEP in some sense converges to the O'Connell-Yor polymer as $q \to 1$. (See also \cite{borodin2014duality}.) From this degeneration result, it is expected that the stochastic Burgers equation can also be derived by scaling limits of $q$-TASEPs. In this paper we consider a class of totally asymmetric interacting particle systems where particles on one dimensional lattice move only to one direction, containing $q$-TASEP model as a special case. As a main result, we show that the stochastic Burgers equation is derived from our model. \section{Main results} \label{sec:model} \subsection{Stationary energy solution of KPZ/SBE} In the sequel, we write $\mathbb{R}_+ \coloneqq [ 0, \infty ) $. Let $\nu, D > 0 $ and $\lambda \in \mathbb{R}$ be fixed constants and consider $(1 + 1 )$-dimensional KPZ equation \begin{equation} \label{KPZ} \partial_t h = \nu \partial_x^2 h + \lambda (\partial_x h)^2 + \sqrt{D} \dot{W} (t, x ) \quad \text{ in } \mathbb{R}_+ \times \mathbb{R} . \end{equation} Then recall that the tilt $u = \partial_x h $ satisfies the stochastic Burgers equation \begin{equation} \label{SBE} \partial_t u = \nu \partial_x^2 u + \lambda \partial_x u^2 + \sqrt{D} \partial_x \dot{W} (t, x ) \quad \text{ in } \mathbb{R}_+ \times \mathbb{R} . \end{equation} As a preliminary we recall the notion of \textit{stationary energy solution}. The same formulation can be applied for KPZ equation \eqref{KPZ} so that we focus only on the stochastic Burgers equation \eqref{SBE}. Now we begin with the definition of stationarity. \begin{definition} We say that an $\mathcal{S}^\prime (\mathbb{R})$-valued process $u = \{ u_t : t \in [0,T] \} $ satisfies condition \textbf{(S)} if for all $t \in [0,T]$, the random variable $u_t$ has the same distribution as space white noise with variance $D/(2\nu)$. \end{definition} For a process $u = \{ u_t: t \in [0,T]\}$ satisfying the condition \textbf{(S)}, we define \[ \mathcal{A}^\varepsilon_{ s, t } (\varphi ) = \int_s^t \int_{\mathbb{R} } u_r (\iota_\varepsilon (x; \cdot) )^2 \partial_x \varphi (x ) dx dr . \] for every $0 \le s < t \le T $, $\varphi \in \mathcal{S} (\mathbb{R} ) $ and $\varepsilon > 0 $. Here we defined the function $\iota_\varepsilon (x ; \cdot ) : \mathbb{R} \to \mathbb{R} $ by $\iota_{ \varepsilon } (x ; y) = \varepsilon^{ - 1 } \mathbf{1}_{ [ x , x + \varepsilon ) } (y) $ for each $x \in \mathbb{R} $. \begin{definition} Let $u = \{ u_t :t \in [0,T]\}$ be a process satisfying the condition \textbf{(S)}. We say that the process $u$ satisfies the energy estimate if there exists a constant $\kappa > 0$ such that: \begin{itemize} \item[\textbf{(EC1)}] For any $\varphi \in \mathcal{S} (\mathbb{R} )$ and any $0 \le s < t \le T$, \begin{equation*} \mathbb{E}_n \bigg[ \bigg| \int_s^t u_r (\partial_x^2 \varphi ) dr \bigg|^2 \bigg] \le \kappa (t- s ) \| \partial_x \varphi \|^2_{ L^2(\mathbb{R} ) } . \end{equation*} \item[\textbf{(EC2)}] For any $\varphi \in \mathcal{S} (\mathbb{R} )$, any $0 \le s < t \le T$ and any $0 < \delta < \varepsilon < 1 $, \begin{equation*} \mathbb{E}_n \big[ \big| \mathcal{A}^\varepsilon_{ s, t } (\varphi ) - \mathcal{A}^\delta_{ s, t } (\varphi ) \big|^2 \big] \le \kappa \varepsilon (t- s ) \| \partial_x \varphi \|^2_{ L^2(\mathbb{R} ) } . \end{equation*} \end{itemize} \end{definition} Then the following result is proved in \cite{gonccalves2014nonlinear}. \begin{proposition} \label{nonlinear} Assume $\{ u_t : t \in [0, T ] \} $ satisfies the conditions \textbf{(S)} and \textbf{(EC2)}. Then there exists an $\mathcal{S}^\prime (\mathbb{R} )$-valued process $\{ \mathcal{A}_t : t \in [0, T ] \} $ with continuous trajectories such that \[ \mathcal{A}_t (\varphi ) = \lim_{ \varepsilon \to 0 } \mathcal{A}^\varepsilon_{ 0, t } (\varphi) \] in $L^2 $ for every $t \in [0, T ] $ and $\varphi \in \mathcal{S} (\mathbb{R} ) $. \end{proposition} By this proposition, thinking the singular term $\partial_x u^2 $ is given by this quantity, we can define a solution of \eqref{SBE} as follows. \begin{definition} \label{def:energysol} We say that an $\mathcal{S}^\prime(\mathbb{R})$-valued process $u=\{u (t, \cdot) : t\in [0,T] \}$ is a stationary energy solution of the stochastic Burgers equation \eqref{SBE} if \begin{enumerate} \item The process $u$ satisfies the conditions \textbf{(S)}, \textbf{(EC1)} and \textbf{(EC2)}. \item For all $\varphi \in \mathcal{S} (\mathbb{R} )$, the process \[ u_t ( \varphi ) - u_0 (\varphi ) - \nu \int_0^t u_s (\partial_x^2 \varphi ) ds - \mathcal{A}_t (\varphi ) \] is a martingale with quadratic variation $D \| \partial_x^2 \varphi \|^2_{ L^2 (\mathbb{R} ) } t $ where $\mathcal{A}$ is the process obtained in Proposition \ref{nonlinear}. \item For all $\varphi \in \mathcal{S} (\mathbb{R} )$, writing $\hat{ u }_t = u_{ T - t } $ and $\hat{ \mathcal{A} }_t = - (\mathcal{A}_T - \mathcal{A}_{ T- t })$, the process \[ \hat{ u }_t ( \varphi ) - \hat{ u }_0 (\varphi ) - \nu \int_0^t \hat{ u }_s (\partial_x^2 \varphi ) ds - \hat{ \mathcal{A} }_t (\varphi ) \] is a martingale with quadratic variation $D \| \partial_x^2 \varphi \|^2_{ L^2 (\mathbb{R} ) } t $. \end{enumerate} \end{definition} Then it is proved that there exists a unique-in-law stationary energy solution of \eqref{SBE}. Existence was shown in \cite{gonccalves2014nonlinear} and then uniqueness was proved in \cite{gubinelli2018energy}. \subsection{Model and result} Throughout this paper we write $\mathbb{N} = \{ 1, 2, \ldots \}$ and $\mathbb{Z}_+ = \{ 0, 1, \ldots \}$. Let $\mathscr{X} = \mathbb{Z}_+^{ \mathbb{Z} } $ be a configuration space and we consider Markov processes which takes values on $\mathscr{X}$. We write an element in the configuration space $\mathscr{X} $ by Greek letters $\eta = \{ \eta_j : j \in \mathbb{Z} \}$ where $\eta_j $ denotes the number of particles on a site $j \in \mathbb{Z}$. Let $c:\mathbb{Z}_+ \to \mathbb{R}_+$ be such that $c(0)=0$ and take $p_n, q_n \in [0,1]$ satisfying $p_n + q_n =1$. Then zero-range process is a Markov process with generator \[ f (\eta) \mapsto n^2 \sum_{j \in \mathbb{Z}} p_n c(\eta_j) \nabla_{j,j-1} f(\eta) + n^2 \sum_{j \in \mathbb{Z}} q_n c(\eta_j) \nabla_{j,j+1} f(\eta) \] acting on each local function $f : \mathscr{X} \to \mathbb{R}$. Here $\nabla_{ j , j + 1 } f ( \eta ) = f ( \eta^{ j, j + 1 } ) - f (\eta) $ and $\eta^{ j , j + 1 } $ denotes the configuration after a particle jumps from a site $j $ to $j + 1 $ if there exists at least one particle on the site $j$: \[ \eta^{ j , j + 1 }_k = \begin{cases} \begin{aligned} & \eta_j -1 && \text{ if } k= j , \\ & \eta_{ j + 1} + 1 && \text{ if } k= j + 1 , \\ & \eta_k &&\text{ otherwise.} \end{aligned} \end{cases} \] The factor $n^2$ is needed to obtain non trivial limit under diffusive scaling. In \cite{gonccalves2015stochastic}, the stochastic Burgers equation is derived in weakly asymmetric regime where $q_n -p_n = O(n^{-1/2})$ as $n$ tends to infinity. Instead, we consider totally asymmetric regime where $q_n - p_n = O(1)$ assuming also the jump rate function $c$ depends on $n$ in an appropriate manner. To simplify the notation, we set $p_n = 0$ and $q_n =1$ in the sequel. Moreover, let $g $ be a positive function on $\mathbb{R}_+$ satisfying the following condition. \begin{assumption} \label{ass:regularity} Assume the function $g \in C^4_b ( \mathbb{R}_+ : \mathbb{R}_+ ) $ is strictly increasing where $C^4_b$ denotes the family of $C^4$-smooth functions whose all derivatives are bounded, and satisfies $g (0) = 0 $ and $g^\prime(0) > 0$. \end{assumption} For the function $g$ satisfying Assumption \ref{ass:regularity}, we write $g_n (k ) = n^{ 1/2 } g (n^{ - 1/2 } k ) $ for each $k \in \mathbb{Z}_+ $ and consider the zero-range process with jump rate $c = g_n$ in the above. In other words, we define an operator $L_n$ acting on each local fucntion $f : \mathscr{X} \to \mathbb{R}$ by \[ L_n f (\eta ) = n^{ 2 } \sum_{ j \in \mathbb{Z} } g_n ( \eta_j ) \nabla_{ j , j + 1 } f (\eta ) , \] and hereafter we consider a Markov process $\eta^n = \{ \eta^n (t ) : t \ge 0 \} $ on $\mathscr{X} $ with infinitesimal generator $L_n $. See Section 2.6 in \cite{kipnis1998scaling} about construction of zero-range processes on infinite volume space where monotonicity of jump rate is postulated. For any probability measure $\mu $ on $\mathscr{X} $, let $\mathbb{P}^n_\mu $ be the distribution of $\eta^n $ on $D ( \mathbb{R}_+ : \mathscr{X} ) $ starting form the initial distribution $\mu $ where $D ( \mathbb{R}_+ : \mathscr{X} ) $ denotes the space of right-continuous processes with left-limits taking values in $\mathscr{X} $ endowed with the Skorohod topology. Next we prepare a family of invariant measures of the process $\eta^n $ which are parametrized by density. First for each $\alpha > 0 $, let $\overline{\nu}_\alpha $ be a probability measure on $\mathscr{X} $ whose common marginal is given by \[ \overline{\nu}_\alpha (\eta_j = k ) = \frac{1}{ Z_n(\alpha) } \frac{ \alpha^k }{ g_n!(k )} , \quad Z_n (\alpha) = \sum_{ k \ge 0 } \frac{ \alpha^k }{ g_n!(k) } \] where we defined $g_n ! (k ) = g_n (k) \cdots g_n (1 ) $ for each $k \in \mathbb{N}$ and $g_n ! (0) = 1 $. Let $\alpha^*_n$ be the radius of convergence of the partition function $Z_n(\alpha)$. \begin{assumption} \label{ass:density} Assume that $Z_n(\alpha)$ diverges as $\alpha$ converges to $\alpha^*_n$ for each $n$. \end{assumption} For each $\rho > 0 $, we choose $\Phi_n = \Phi_n (\rho ) $ so that $E_{ \overline{\nu}_{ \Phi_n } } [ \eta_j ] = \rho $ for each $j \in \mathbb{Z}$. This is possible according to Assumption \ref{ass:density}. In this case we have $\Phi_n (\rho ) = E_{\nu^n_\rho}[g_n (\eta )] $ and it is easily verified that \begin{equation} \label{eq:Phi} \lim_{n \to \infty } \Phi_n (\rho) = g^\prime (0) \rho . \end{equation} An example of function $g$ satisfying Assumptions \ref{ass:regularity} and \ref{ass:density} will be given in subsection \ref{subsec:qtasep}. Hereafter we simply write $\nu^n_\rho = \overline{\nu}_{ \Phi_n (\rho)} $. Then, it is straightforward that the measure $\nu^n_\rho$ satisfies the detailed balance condition and thus $\nu^n_\rho$ is invariant for the process $\eta^n $. To be concerned with equilibrium fluctuations, we only consider the situation when the process starts form these invariant measures. We write $\mathbb{P}_n = \mathbb{P}^n_{ \nu^n_\rho } $ and write the expectation with respect to $\mathbb{P}_n $ by $\mathbb{E}_n $. Now we state our main result. For any given constant $ T > 0$, we define density fluctuation field $\{ \mathcal{X}^n_t : t \in [0, T ] \} $ with values on $D ([0, T ], \mathcal{S}^\prime (\mathbb{R} ) ) $ whose action on any test function $\varphi \in \mathcal{S} (\mathbb{R} ) $ is given by \begin{equation} \label{fluctuation} \mathcal{X}^{ n }_t (\varphi ) = \frac{1}{ \sqrt{n } } \sum_{ j \in \mathbb{Z} } ( \eta^{ n }_j (t ) - \rho ) \varphi \bigg( \frac{ j - f_n t }{ n } \bigg) \end{equation} where $f_n = f_n (g, \rho) = b_2 n^2 + b_1 n^{ 3 /2 } + b_0 n $ with \begin{equation} \label{eq:framing} \begin{aligned} & b_2 = g^{ \prime } ( 0 ) , \quad b_1 = \frac{ 1 }{ 2 } g^{ \prime \prime } (0) ( 1 + 2 \Phi_n (\rho) ) , \\ & b_0 = \frac{g^{(3)}(0)}{6g^\prime(0)} (1 + 6\Phi_n(\rho) + 3\Phi_n(\rho)^2) - \frac{ g^{ \prime \prime } (0)^2 }{ 4g^\prime(0)^2 } (1 + 10 \Phi_n(\rho) + 9\Phi_n(\rho)^2) . \end{aligned} \end{equation} Here $\rho = E_{ \nu_\rho } [ \eta (j ) ]$ stands for the density which is conserved for each process $\eta^n = \{ \eta^n (t) : t \ge 0 \}$. The main result of this paper is the following. \begin{theorem} \label{mainthm} Let $\mathcal{X}^n_t $ be the density fluctuation field defined by \eqref{fluctuation} for each zero-range process $\eta^n = \{ \eta^n_t : t\in [0, T ] \} $. Then the process $\{ \mathcal{X}^n_t : t \in [0, T] \} $ which takes values in $D ([0, T ] , \mathcal{S}^\prime (\mathbb{R})) $ converges in distribution to a unique stationary energy solution $\{ u ( t, \cdot ) : t \in [0, T ] \} $ of the stochastic Burgers equation \begin{equation} \label{SBEthm} \partial_t u = \frac{ 1 }{ 2 } g^\prime (0 ) \partial_x^2 u - \frac{ 1 }{2} g^{\prime \prime } (0 ) \partial_x u^2 + \sqrt{ g^\prime ( 0 ) \rho } \partial_x \dot{W} (t, x ) . \end{equation} \end{theorem} \subsection{Relation to \texorpdfstring{$q$}{q}-TASEP} \label{subsec:qtasep} As we mentioned before, our zero-range process is related in some way to a $q$-deformed version of totally asymmetric simple exclusion process ($q$-TASEP), which is originally introduced in \cite{borodin2014macdonald}. The dynamics of $q$-TASEP is described as follows. Fix a parameter $q \in [ 0, 1 )$. Let $\mathscr{X}_0 = \{ 0, 1 \}^{ \mathbb{Z} } $ be a configuration space with exclusion constraint where similarly to zero-range process we denote each element in $\mathscr{X}_0 $ by Greek letters like $\xi = \{ \xi_j : j \in \mathbb{Z} \} \in \mathscr{X}_0 $. Here $\xi_j$ denotes the occupation number on a site $j$: at each site at most one particle can exist and there is a particle on the site $j$ if $\xi_j = 1$ while there is no particle on that site if $\xi_j = 0$. Then $q$-TASEP is a process which takes values on $\mathscr{X}_0 $ whose infinitesimal generator is given by \[ \mathcal{L}_q f (\xi ) = \sum_{ j \in \mathbb{Z} } (1 - q^{ \xi_j } ) \nabla_{ j , j + 1 } f (\xi) \] for each real valued function $f $ on $\mathscr{X}_0 $. Here $\nabla_{ j , j + 1 } f (\xi) = f (\xi^{ j , j + 1 } ) - f (\xi ) $ and $\xi^{ j , j+ 1 } $ denotes the configuration after a particle on site $j $ jumps to the site $ j + 1 $ if possible, similarly to the zero-range case. Note that $q$-TASEP is a kind of exclusion process, which can also be interpreted as zero-range process by the following way. Indeed, let $X (t) = \{ (\cdots, x_0(t) , x_1(t), \cdots) \in \mathbb{Z}_+^{\mathbb{Z} } : x_j (t) \le x_{j+1} (t), j \in \mathbb{Z} \} $ denotes a family of site positions on which particles exist at time $t$. Note that sites except for $\{ x_j(t) : j \in \mathbb{Z} \} $ are empty and we let $\eta_j (t ) = x_{j} (t) - x_{j-1} (t) $ denotes the number of empty sites between $x_j(t) $ and $x_{j+1} (t) $. Then, after taking $q = q_n = \exp ( - n^{- 1/2 } ) $, the process $\eta^n (t) = \{ \eta_j (n^{5/2} t) : j \in \mathbb{Z} \}$ ($t \ge 0$) which takes values on $\mathscr{X}$ is a zero-range process with generator $L_n $ with $g (x ) = 1- e^{ - x } $. In this case, product $q$-geometric distribution whose marginal distribution is given by \[ \overline{ \nu }^n_\alpha (\eta_j = k ) = ( \alpha/\sqrt{n} ;q_n)_\infty \frac{ (\alpha/\sqrt{n} )^k }{ (q_n ; q_n)_k } , \quad k \in \mathbb{Z}_+ \] is invariant for the dynamics. Here $(a ; q )_\infty = \prod_{ k = 0 }^\infty (1 - a q^k ) $ and $ ( a ; q )_n = (a ;q)_\infty /(aq^n;q)_\infty $ are the $q$-Pochhammer symbols. Note that the measure $\nu_\alpha $ is a probability measure by the $q$-binomial theorem \[ \sum_{ k = 0 }^\infty x^k \frac{(a; q )_k }{ (q ;q )_k} = \frac{(ax; q )_\infty}{(x; q)_\infty} \] for all $| x | < 1$ and $| q | < 1 $ with $a = 0 $. Moreover, one can easily check that the function $g$ satisfies Assumptions \ref{ass:regularity} and \ref{ass:density}, and $\overline{\nu}^n_\alpha $ converges in law to product Poisson distribution with parameter $\alpha$ when $n$ tends to infinity, as it is expected. \section{Proof outline} \label{sec:outline} In this section, we give an outline to the proof of our main theorem (Theorem \ref{mainthm}). Recall the definition of the fluctuation fields $\{ \mathcal{X}^n_t : t \in [0, T ] \} $ defined in \eqref{fluctuation}. We write $\varphi^n_j = \varphi^n_j (t) = \varphi ( ( j - f_n t ) / n ) $ for simplicity and define discrete derivative operators $\nabla^n $ and $\Delta^n $ by \begin{equation} \label{eq:discder} \nabla^n \varphi^n_j = \frac{n}{2} (\varphi^n_{ j + 1 } - \varphi^n_{j-1} ) , \quad \Delta^n \varphi^n_j = n^2 (\varphi^n_{ j + 1 } + \varphi^n_{ j -1 } - 2 \varphi^n_j ) . \end{equation} Moreover, let $L^{ * }_n $ be the $L^2 (\nu_\rho ) $-adjoint operator of $L_n $, which acts on each $f : \mathscr{X} \to \mathbb{R} $ as \[ L^{ * }_n f (\eta ) = n^2 \sum_{ j \in \mathbb{Z} } g_n (\eta_j ) \nabla_{ j , j -1 } f (\eta ) . \] Then we define symmetric and anti-symmetric part of the operator $L_n $ by $S_n = (L_n + L^*_n )/ n $ and $A_n = (L_n - L^*_n ) / 2 $, respectively. We begin with a martingale decomposition associated Markov process. By Dynkin's martingale formula, for each test function $\varphi \in \mathcal{S} (\mathbb{R}) $, \begin{equation} \label{eq:mart} \mathcal{M}^n_t (\varphi ) = \mathcal{X}^n_t (\varphi) - \mathcal{X}^n_0 (\varphi) - \int_0^t (\partial_s + L_n ) \mathcal{X}^n_s (\varphi) ds \end{equation} is a mean-zero martingale with quadratic variation \begin{equation} \label{qv} \begin{aligned} \langle \mathcal{M}^n (\varphi ) \rangle_t & =\int_0^t \big( L_n ( \mathcal{X}^n_s (\varphi ) )^2 - 2 \mathcal{X}^n_s (\varphi ) L_n \mathcal{X}^n_s (\varphi ) \big) ds \\ & = \frac{1}{n} \int_0^t \sum_{j \in \mathbb{Z} } g_n (\eta^n_j (s) ) \big[ n ( \varphi^n_{j+1} (s) - \varphi^n_j (s) ) \big]^2 ds , \end{aligned} \end{equation} which is expected to converge to $g^\prime(0) \rho \| \partial_x \varphi \|^2_{L^2 (\mathbb{R} ) } t $ as $n$ tends to infinity in view of \eqref{eq:Phi}. Next, to decompose the additive functional into symmetric and anti-symmetric parts, we write $L_n = S_n + A_n $ and split the third term of right-hand side of \eqref{eq:mart} into sum of \begin{equation} \label{eq:symm} \begin{aligned} \mathcal{S}^n_t (\varphi) = \int_0^t S_n \mathcal{X}^n_s (\varphi ) ds = \frac{1}{ 2\sqrt{n} } \int_0^t \sum_{ j \in \mathbb{Z} } g_n (\eta^n_j (s) ) \Delta^n \varphi^n_j (s) ds \end{aligned} \end{equation} and \begin{equation} \label{eq:anti-symm} \begin{aligned} \mathcal{B}^n_t (\varphi) & = \int_0^t ( \partial_s + A_n ) \mathcal{X}^n_s (\varphi ) ds \\ & = \frac{1}{ \sqrt{n} } \int_0^t \sum_{ j \in \mathbb{Z} } \big[ n g_n (\eta^n_j (s) ) \nabla^n \varphi^n_j (s) - \frac{f_n}{n} (\eta^n_j (s) - \rho ) \partial_x \varphi^n_j (s) \big] ds . \end{aligned} \end{equation} By this line we obtain a decomposition \begin{equation} \label{decomposition} \mathcal{X}^n_t (\varphi ) = \mathcal{X}^n_0 (\varphi ) + \mathcal{S}^n_t (\varphi ) + \mathcal{B}^n_t (\varphi ) + \mathcal{M}^n_t (\varphi ) . \end{equation} In the sequel, we show tightness of each term in the decomposition \eqref{decomposition} and characterize limiting points. Before consider in detail, let us roughly see the convergence of these terms. For that purpose, a Taylor expansion of $g_n $ in occupation variables \begin{equation} \label{Taylor} g_n (k) = n^{ 1/ 2 } g (n^{ - 1/ 2} k ) = g^\prime (0) k + \frac{ 1 }{ 2} g^{\prime \prime } (0 ) k^2 n^{-1/2} + O (n^{ - 1 } ) \end{equation} plays an important role. We begin with the symmetric part. Taking only the leading term of $g_n $, the process $\mathcal{S}^n_t (\varphi ) $ can be replaced by $(g^\prime (0)/2) \mathcal{X}^n_t (\Delta^n \varphi ) $ and thus one can show that this term converges to the viscosity term in the equation \eqref{SBE} tested against each function $\varphi $. Next, for the anti-symmetric part, we see that the continuous derivative can be replaced by the discrete one. Indeed, let \[ E^n_t (\varphi) = \frac{1}{\sqrt{n}} \int_0^t \sum_{j\in \mathbb{Z}} \frac{f_n}{n} (\eta^n_j (s)-\rho) (\partial_x \varphi^n_j (s) - \nabla^n \varphi^n_j (s)) ds . \] Recall the definition of the discrete derivative given in \eqref{eq:discder}. Then the mean-value theorem yields $|\partial_x \varphi^n_j (t) - \nabla^n \varphi^n_j (s) |= O(n^{-2}) $. Hence by the Schwarz inequality, we have that \[ \mathbb{E}_n \bigg[\sup_{0 \le t \le T} |E^n_t (\varphi)|^2 \bigg] \le T \frac{f_n^2}{n^3} \int_0^T \sum_{j \in \mathbb{Z}} \mathbb{E}_n \big[ (\eta^n_j(s)-\rho)^2 \big] (\partial_x \varphi^n_j (s)-\nabla^n \varphi^n_j (s))^2 ds \le C\frac{T^2}{n^2} . \] Now we expand $ n g_n(\eta_j) - n^{-1}f_n(\eta_j -\rho ) $ in occupation variable $\eta_j $ with the help of the expansion \eqref{Taylor}. Then, choosing the framing $f_n$ carefully, the linear terms cancel and one can expect the leading part has order two. Then $\mathcal{B}^n_t (\varphi ) $ can roughly be written by a quadratic functional of the fluctuation field, which give rise to the non-linear term in the limiting equation \eqref{SBEthm}. This is clarified in the following manner. Let us define random variables $W_j$ and its centered version $\overline{W}_j$ by \begin{equation} \label{eq:wdef} W_j (t) = g_n (\eta^n_j (t ) ) / g^\prime (0) , \quad \overline{W }_j (t)= W_j (t) - \Phi_n (\rho ) / g^\prime (0) . \end{equation} Moreover, we define a modified process $\tilde{\mathcal{B} }^n_\cdot \in D ( [0, T ], \mathcal{S}^\prime (\mathbb{R} ) ) $ by \begin{equation} \label{modified} \tilde{ \mathcal{B} }^n_t ( \varphi ) = \frac{ g^{ \prime \prime } (0) }{ 2 } \int_0^t \sum_{ j \in \mathbb{Z} } \overline{W}_{ j -1 } (s) \overline{W}_j (s) \nabla^n \varphi^n_j (s) ds \end{equation} for each test function $\varphi \in \mathcal{S} (\mathbb{R}) $. Then we have the following result. \begin{lemma} \label{antisymm} We have that \[ \limsup_{ n \to \infty } \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \big| \mathcal{B}^n_t (\varphi ) - \tilde{ \mathcal{B} }^n_t (\varphi ) \big|^2 \bigg] = 0 . \] \end{lemma} Lemma \ref{antisymm} is a consequence of an expansion of $W_j - \eta_j $ (see \eqref{expansion}), which can be deduced by a similar way as for the O'Connell-Yor polymer model given in \cite{jara2020stationary}. The proof of Lemma \ref{antisymm} is postponed to Section \ref{sec:expansion}. \section{The second-order Boltzmann-Gibbs principle} \label{sec:BG2} For each $\ell \in \mathbb{N}$ we denote a centered local average of $W_j $'s by $\overrightarrow{W}^\ell_j = \ell^{ - 1 } \sum_{ k = 0, \ldots, \ell -1 } \overline{W}_{ j + k } $. In addition, let $\tau_j $ denotes the canonical shift: $\tau_j \eta_i = \eta_{ i + j } $. We define \begin{equation} \label{Q} \mathcal{Q}^n_\rho (\ell ; t ) = \frac{ g^{ \prime \prime } (0) }{ 2 } \bigg( \big( \overrightarrow{W}^\ell_0 (t) \big)^2 - \frac{ \sigma^2_n (\rho ) }{ \ell } \bigg) \end{equation} where $\sigma_n^2 (\rho) = \mathrm{Var}_{\nu^n_\rho}[W_0]$. Then the following result is central to demonstrate the main theorem. \begin{theorem}[Second-order Boltzmann-Gibbs principle] \label{BG2} We have that \[ \begin{aligned} \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \tilde{ \mathcal{B} }^n_t (\varphi ) - \int_0^t \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\ell; s ) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C \bigg( \frac{ \ell }{ n^2 } + \frac{ T }{ \ell^{ 2 } } \bigg) \int_0^T \sum_{ j \in \mathbb{Z} } (\nabla^n \varphi^n_j ( t ) )^2 dt . \end{aligned} \] \end{theorem} \subsection{Preliminaries} To give a proof Theorem \ref{BG2}, we prepare for some basic tools. For each local $L^2 (\nu^n_\rho ) $ function $F$, we define its $H^{1, n } $-norm by $\| F \|^2_{ 1 , n } = \langle F , - S_n F \rangle_{ L^2 (\nu^n_\rho ) } $, which is explicitly represented as \[ \| F \|^2_{1 , n } = \frac{ n^2}{2} \sum_{ j \in \mathbb{Z}, \, | j - j^\prime | = 1 } E_{ \nu^n_\rho } [g_n (\eta_j ) (\nabla_{ j ,j^\prime } F (\eta))^2 ] . \] Moreover we define the $H^{-1 , n } $-norm through the variational formula \[ \begin{aligned} \| F \|^2_{- 1, n } = \sup_{ f \in L^2 (\nu^n_\rho ), \, \text{local} } \big\{ 2 \langle F , f \rangle_{ L^2 (\nu^n_\rho ) } - \| f \|^2_{1, n } \big\} . \end{aligned} \] Then the following Kipnis-Varadhan inequality holds true. \begin{proposition}[Kipnis-Varadhan inequality] \label{KV} Let $F : [0, T ] \to L^2 (\nu_\rho ) $ be a function such that $E_{\nu_\rho} [F(t,\cdot ) ] = 0 $ for each $t \in [0, T]$. Then there exists a positive constant $C$ such that \[ \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \int_0^t F (s, \eta^n (s) ) ds \bigg|^2 \bigg] \le C \int_0^T \| F(t, \cdot ) \|^2_{ - 1, n } dt . \] \end{proposition} Moreover, the following integration-by-parts formula is a key ingredient. \begin{lemma}[Integration by parts] \label{IBP} Let $f $ be any real-valued local $L^2 (\nu^n_\rho ) $-function on $\mathscr{X} $. Then for each $j \in \mathbb{Z} $ we have an identity \[ E_{ \nu^n_\rho } [ f ( \eta ) ( W_j - W_{ j + 1 } ) ] = - E_{ \nu^n_\rho } [\nabla_{ j , j + 1 } f (\eta ) W_j ] . \] \end{lemma} \begin{proof} First we observe the invariant measure $\nu^n_\rho $ satisfies an identity \[ \nu^n_\rho (\eta^{ j , j + 1 } ) = \frac{ g_n ( \eta_j ) }{ g_n (\eta_{ j + 1} + 1 ) } \nu^n_\rho (\eta) \] for each $j \in \mathbb{Z} $ and each configuration $\eta \in \mathscr{X} $ such that $\eta_j > 0 $. From this identity, for any local function $f$ we see that a change of variables yields \[ \begin{aligned} E_{ \nu^n_\rho } [ f (\eta) g_n (\eta_{ j + 1 }) ] = \sum_{\eta \in \mathscr{X} } \sum_{ j \in \mathbb{Z} } f (\eta^{ j , j + 1 } ) g_n (\eta_{ j + 1 } + 1 ) \frac{ g_n (\eta_j ) }{ g_n (\eta_{j + 1} + 1 ) } \nu^n_\rho (\eta) = E_{ \nu^n_\rho} [ f (\eta^{ j , j + 1 } ) g_n (\eta_j ) ] . \end{aligned} \] Now subtract $E_{ \nu^n_\rho } [ f (\eta) g_n (\eta_j) ] $ from both sides to complete the proof. \end{proof} \subsection{Proof of Theorem \ref{BG2}} To show Theorem \ref{BG2}, we use a decomposition \[ \begin{aligned} \frac{g^{\prime \prime}(0)}{2} \overline{ W }_{j - 1 } \overline{ W }_j - \tau_j \mathcal{Q}^n_\rho (\ell ) = \frac{ g^{ \prime \prime } (0) }{ 2 } \bigg( \overline{ W }_{ j - 1 } ( \overline{ W}_j - \overrightarrow{W}^\ell_j ) + \overrightarrow{ W }^\ell_j ( \overline{W}_{ j - 1 } - \overrightarrow{W}^\ell_j ) + \frac{ \sigma^2_n ( \rho ) }{ \ell } \bigg) . \end{aligned} \] Then following Lemmas \ref{oneblock} and \ref{BGmain} finish the proof of the second-order Boltzmann-Gibbs principle. \begin{lemma} \label{oneblock} There exists a positive constant $C$ such that \[ \begin{aligned} \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \int_0^t \sum_{ j \in \mathbb{Z} } \overline{ W }_{ j -1 } ( s ) ( \overline{W}_j ( s ) - \overrightarrow{W}^\ell_j ( s ) ) \varphi_j (s) ds \bigg|^2 \bigg] \le C \frac{ \ell }{ n^2 } \int_0^T \sum_{ j \in \mathbb{Z} } ( \varphi_j (t ) )^2 dt . \end{aligned} \] \end{lemma} \begin{proof} Recalling the definition of the local average, we first observe \[ \begin{aligned} \sum_{ j \in \mathbb{Z} } \overline{ W }_{ j -1 } ( \overline{W}_j - \overrightarrow{W}^\ell_j ) \varphi_j & = \sum_{ j \in \mathbb{Z} } \overline{ W }_{ j -1 } \sum_{ i = 0 }^{ \ell -2 } ( W_{ j + i } - W_{ j + i +1 } ) \psi_{ i + 1} \varphi_j \\ & = \sum_{ k \in \mathbb{Z} } F_k ( W_k - W_{k + 1} ) \end{aligned} \] where $\psi_i = ( \ell - i ) / \ell $ and $F_k = \sum_{ i = 0 , \ldots, \ell - 2 } \overline{ W }_{ k - i - 1 } \psi_{i + 1 } \varphi_{ k - i } $, and in the second identity we let $k = j + i $ to rearrange the sum. Here we note that the local function $F_j $ is invariant under the action $\sigma_{ j , j + 1 } $. We fix any local, $L^2 (\nu^n_\rho ) $-function $f $. Then, according to the integration by parts formula (Lemma \ref{IBP}), we have \[ \begin{aligned} 2 \bigg\langle \sum_{ j \in \mathbb{Z} } \overline{ W }_{ j - 1 } ( \overline{W}_j - \overrightarrow{W}^\ell_j ) \varphi_j , f \bigg\rangle_{ L^2 (\nu^n_\rho ) } &= 2 \sum_{ j \in \mathbb{Z} } E_{ \nu^n_\rho } [ F_j ( W_j - W_{ j + 1 } ) f ] \\ & = - 2 \sum_{ j \in \mathbb{Z} } E_{ \nu^n_\rho } [ W_j (\nabla_{ j , j + 1 } f ) F_j ] . \end{aligned} \] By Young's inequality, we notice that the last display can be absolutely bounded by \[ \frac{n^2}{2} \sum_{ j \in \mathbb{Z} } E_{\nu^n_\rho } [ g_n (\eta_j ) (\nabla_{ j , j + 1 } f (\eta) )^2 ] + \frac{2}{ n^2 g^\prime (0)^2 } \sum_{ j \in \mathbb{Z} } E_{ \nu^n_\rho } [ g_n (\eta_j ) ] E_{\nu^n_\rho } [ F_j^2] \] where for the second term we noted that $F_j $ is independent of $\eta_j $. Moreover, note that the first term is bounded by $\| f \|^2_{1,n}$. Then, since $\sum_{ j } E_{ \nu_\rho } [F^2_j ] \le C \ell \sum_{j } \varphi^2_j $ we conclude with the help of the Kipnis-Varadhan inequality (Proposition \ref{KV}) that \[ \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \int_0^t \sum_{ j \in \mathbb{Z} } \big[ \overline{ W }_{ j -1 } ( s) ( \overline{ W }_j (s ) - \overrightarrow{W}^\ell_j (s) ) \varphi_j (s) \big] ds \bigg|^2 \bigg] \le C \frac{ \ell }{ n^2 } \int_0^T \sum_{ j \in \mathbb{Z} } ( \varphi_j (t ) )^2 dt \] for some $C > 0 $. Hence we complete the proof. \end{proof} \begin{lemma} \label{BGmain} There exists a positive constant $C$ such that \[ \begin{aligned} & \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \int_0^t \sum_{ j \in \mathbb{Z} } \big[ \overrightarrow{ W }^\ell_j (s) ( \overline{W}_{j -1} (s) - \overrightarrow{W}^\ell_j (s ) ) + \frac{ \sigma^2_n ( \rho ) }{ \ell } \big] \varphi_j (s) ds \bigg|^2 \bigg] \\ & \quad \le C \bigg( \frac{ \ell }{ n^2 } + \frac{ T }{\ell^2 } \bigg) \int_0^T \sum_{ j \in \mathbb{Z} } \varphi_j (t )^2 dt . \end{aligned} \] \end{lemma} \begin{proof} Recalling the definition of local averages, we have \[ \begin{aligned} \sum_{ j \in \mathbb{Z} } \overrightarrow{ W }^\ell_j ( \overline{W}_{j - 1 } - \overrightarrow{ W }^\ell_j ) \varphi_j = \sum_{ j \in \mathbb{Z} } \overrightarrow{ W }^\ell_j \sum_{ i = 0 }^{ \ell - 1 } ( \overline{W}_{ j + i - 1 } - \overline{W}_{ j + i } ) \psi_i \varphi_j \end{aligned} \] where $\psi_i = ( \ell - i ) / \ell $. Then we fix any local function $f : \mathscr{X} \to \mathbb{R} $ and apply integration by parts (Lemma \ref{IBP}). First we consider the case $1 \le i \le \ell - 1 $. To make notations simple, we write $W_j^+ = g_n (\eta_j + 1 ) / g^\prime (0) $ and $W^-_j = g_n (\eta_j -1 )/ g^\prime (0) $. Then noting \[ \begin{aligned} \nabla_{ j + i -1 , j + i } \overrightarrow{ W }^\ell_j = \ell^{ - 1 } ( W^-_{ j + i -1 } - W_{ j + i -1} + W^+_{ j + i } - W_{ j + i } ) \end{aligned} \] provided $\eta_{ j + i - 1 } > 0$, we have \[ \begin{aligned} & E_{ \nu^n_\rho } \big[ \overrightarrow{ W }^\ell_j ( \overline{W}_{ j + i - 1} - \overline{W}_{ j + i } ) f \big] \\ &\quad = - E_{ \nu^n_\rho } [ (\sigma_{ j + i - 1 , j + i } \overrightarrow{ W }^\ell_j ) ( \nabla_{ j + i - 1 , j + i } f ) W_{ j + i -1 } ] - E_{ \nu^n_\rho } [ f ( \nabla_{ j + i -1 , j + i } \overrightarrow{ W }^\ell_j ) W_{ j + i - 1 } ] . \end{aligned} \] For the first term in the right-hand side, we can use Young's inequality and then apply the Kipnis-Varadhan inequality to get a bound of order $\ell /n^2 $ by the same calculation as in Lemma \ref{oneblock}. For the second term, to make use a similar procedure, we decompose \[ \begin{aligned} & W_{j + i - 1 } ( W^-_{ j + i -1 } - W_{ j + i -1} + W^+_{ j + i } - W_{ j + i } ) \\ & \quad = - ( W_{ j + i -1 } W_{ j + i } - W_{ j + i -1 } W^-_{ j + i - 1 } ) + ( W_{ j + i -1 } W^+_{ j + i } - W_{ j + i }^2 ) + ( W_{ j + i } ^2 - W_{ j + i - 1 }^2 ) . \end{aligned} \] Then one can find that \[ \begin{aligned} & E_{ \nu^n_\rho } [ f ( \nabla_{ j + i -1 , j + i } \overrightarrow{ W }^\ell_j ) W_{ j + i - 1 } ] \\ & \quad = - \ell^{ -1 } E_{ \nu^n_\rho } [ W_{ j + i - 1 } W^-_{ j + i - 1} ( \nabla_{ j + i - 1, j + i } f ) ] + \ell^{ -1 } E_{ \nu^n_\rho } [ W_{ j + i }^2 (\nabla_{ j + i, j + i - 1 } f ) ] \\ & \qquad + \ell^{ - 1 } E_{ \nu^n_\rho } [ f (W_{ j + i }^2 - W_{ j + i - 1 }^2 ) ] . \end{aligned} \] From the first two terms we deduce the $\ell /n^2 $-bound with the help of the Kipnis-Varadhan inequality and thus hereafter we may only consider the third term. On the other hand, we consider the case $i = 0 $. Similarly to the above, a Leibniz rule for the derivative operator $\nabla_{j-1, j}$ yields \begin{equation*} \begin{aligned} & E_{ \nu^n_\rho } \big[ \overrightarrow{ W }^\ell_j ( \overline{W}_{ j - 1} - \overline{W}_j ) f \big] \\ & \quad = - E_{ \nu^n_\rho } [ (\sigma_{ j - 1 , j } \overrightarrow{ W }^\ell_j ) ( \nabla_{ j -1 , j } f ) W_{ j -1 } ] - E_{ \nu^n_\rho } [ f ( \nabla_{ j -1 , j } \overrightarrow{ W }^\ell_j ) W_{ j - 1 } ] . \end{aligned} \end{equation*} Then the first term in the last display gives an $\ell /n^2$-bound so that we may focus on the second term. Moreover, note that \[ \begin{aligned} \nabla_{ j -1 , j } \overrightarrow{ W }^\ell_j = \ell^{ - 1 } ( W^+_j - W_j ) \end{aligned} \] provided $\eta_{j -1 } > 0 $. To take advantage of a similar calculation as the case $i \ge 1$, we use a decomposition \[ \begin{aligned} W_{ j - 1 } ( W^+_j - W_j ) = (W_{j-1} W_j^+ - W_j^2) + (W_j^2 - W_{j-1} W_j) . \end{aligned} \] Then we have that \begin{equation} \label{i0} \begin{aligned} E_{ \nu^n_\rho } [ f ( \nabla_{ j -1 , j } \overrightarrow{ W }^\ell_j ) W_{ j - 1 } ] = \ell^{-1} E_{\nu^n_\rho}[W_j^2 (\nabla_{j,j-1}f)] + \ell^{-1} E_{\nu^n_\rho} [W_j (W_j -W_{j-1})f] . \end{aligned} \end{equation} From the first term, we get an extra $\ell /n^2 $ factor to bound them and thus we only consider the second term. By this line, we obtained \[ \begin{aligned} & \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \int_0^t \sum_{ j \in \mathbb{Z} } \big[ \overrightarrow{ W }^\ell_j (s) ( \overline{W}_{j -1} (s) - \overrightarrow{W}^\ell_j (s ) ) + \frac{ \sigma^2_n ( \rho ) }{ \ell } \big] \varphi_j (s) ds - \mathcal{R}^n_t (\varphi) \bigg|^2 \bigg] \\ & \quad \le C \frac{ \ell }{ n^2 } \int_0^T \sum_{ j \in \mathbb{Z} } \varphi_j (t )^2 dt \end{aligned} \] where \[ \mathcal{R}^n_t (\varphi) = \frac{1}{\ell} \int_0^t \sum_{j \in \mathbb{Z}} \bigg[ - \sum_{i=1}^{\ell-1} (W^2_{j+i}-W^2_{j+i-1})\psi_i - W_j (W_j - W_{j-1}) + \sigma_n^2(\rho) \bigg] \varphi^n_j (s )ds . \] Now our task is to estimate $\mathcal{R}^n$ directly. Here we notice that \[ \sum_{i=1}^{\ell-1} (W^2_{j+i} - W^2_{j+i-1})\psi_i = (\overrightarrow{W^2})^\ell_j - ( W^2_j - E_{\nu_\rho}[W^2_j] ) \] where $(\overrightarrow{ W^2 })^\ell_j = \ell^{ - 1} \sum_{ i = 0, \ldots, \ell- 1 } ( W^2_{j+i} - E_{\nu_\rho}[W^2_{j+i}] ) $ is centered local averages defined similarly for $W_j$. Then by Schwarz's inequality and stationarity, we have \[ \begin{aligned} \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T} \bigg| \int_0^t \frac{ 1 }{ \ell } \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_j (s) \varphi_j (s) ds \bigg|^2 \bigg] & \le \frac{ T }{ \ell^2 } \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \int_0^t \bigg( \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_j (s) \varphi_j (s) ds \bigg)^2 \bigg] \\ & \le \frac{ T }{ \ell^2 } \int_0^T \mathbb{E}_n \bigg[ \bigg( \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_j (s) \varphi_j (s) \bigg)^2 \bigg] ds . \end{aligned} \] However, noting $(\overrightarrow{W^2})^\ell_j $ and $(\overrightarrow{W^2})^\ell_k $ are independent if $| j - k | \ge \ell $, we have that \[ \begin{aligned} \mathbb{E}_n \bigg[ \bigg( \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_j \varphi_j \bigg)^2 \bigg] & = \mathbb{E}_n \bigg[ \bigg( \sum_{ k = 0 }^{ \ell - 1 } \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_{ \ell j + k } \varphi_{ \ell j + k } \bigg)^2 \bigg] \\ & \le \ell \sum_{ k = 0 }^{ \ell -1 } \mathbb{E}_n \bigg[ \bigg( \sum_{ j \in \mathbb{Z} } (\overrightarrow{W^2})^\ell_{ \ell j + k } \varphi_{ \ell j + k } \bigg)^2 \bigg] \\ & = \ell \sum_{ k = 0 }^{ \ell - 1 } \sum_{ j \in \mathbb{Z} } \mathbb{E}_n \big[ \big( (\overrightarrow{W^2})^\ell_{ \ell j + k } \big)^2 \big] \varphi_{ \ell j + k }^2 \le C \sum_{ k = 0 }^{ \ell -1 } \sum_{ j \in \mathbb{Z} } \varphi_{ \ell j + k }^2 \end{aligned} \] for some $C > 0 $ since $\mathbb{E}_n [ ( (\overrightarrow{W^2})^\ell_j )^2 ] \le C \ell^{ - 1 } $ for each $j $. On the other hand, since random variables $W_j^2-E_{\nu_\rho} [W^2_j]$ and $W_j(W_j - W_{j-1}) - \sigma_n^2(\rho)$ are centered, Schwarz's inequality bring us the desired estimate. Hence combining all the estimates we complete the proof. \end{proof} \section{Tightness} \label{sec:tightness} Recall the martingale decomposition \eqref{decomposition}. Note that according to Lemma \ref{antisymm} for the anti-symmetric part, we may consider the modified process $\{ \tilde{ \mathcal{B} }^n_t : t \in [0,T] \} $ defined by \eqref{modified} instead of the original process $\{ \mathcal{B}^n_t : t \in [0,T] \} $. We show tightness of each process as follows. \begin{lemma} \label{tightness} The sequences $\{ \mathcal{X}^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{M}^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{S}^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $ and $\{ \tilde{\mathcal{B}}^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $, when the processes start from the invariant measure $\nu^n_\rho $, are tight in the uniform topology on $D ([0, T ] , \mathcal{S}^\prime (\mathbb{R} ) ) $. \end{lemma} To prove tightness of a sequence of processes, the following criteria are helpful. \begin{proposition}[Mitoma's criterion, \cite{mitoma1983tightness}] \label{Mitoma} A sequence of $\mathcal{S}^\prime (\mathbb{R} ) $-valued processes $\{ \mathcal{Y}^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $ with trajectories in $D ([0, T ] , \mathcal{S}^\prime (\mathbb{R} ) ) $ is tight with respect to the Skorohod topology if and only if the sequence $\{ \mathcal{Y}^n_t (\varphi ) : t \in [0, T ] \}_{ n \in \mathbb{N} } $ of real-valued processes is tight with respect to the Skorohod topology of $D ([0, T ] , \mathbb{R} ) $ for any $\varphi \in \mathcal{S} ( \mathbb{R} ) $. \end{proposition} \begin{proposition}[Aldous' criterion] \label{Aldous} A sequence $\{ X^n_t : t \in [0, T ] \}_{ n \in \mathbb{N} } $ of real-valued processes is tight with respect to the Skorohod topology of $D ([0, T ] , \mathbb{R} ) $ if the following two conditions hold. \begin{enumerate} \item The sequence of real-valued random variables $\{ X^n_t \}_{ n \in \mathbb{N} } $ is tight for any $ t\in [0, T ] $. \item For any $\varepsilon > 0$, \[ \lim_{ \delta \to 0 } \limsup_{ n \to \infty } \sup_{ \gamma \le \delta } \sup_{ \tau \in \mathcal{T}_T } \mathbb{P}_n \big( | X^n_{ \tau + \gamma } - X^n_\tau | > \varepsilon \big) = 0 \] where $\mathcal{T}_T$ is the set of stopping times bounded by $T $ using the convention $X^n_{ \tau + \gamma } = X^n_T$ if $ \tau + \gamma > T$. \end{enumerate} \end{proposition} The rest of this subsection is devoted to prove Lemma \ref{tightness}. With the help of Mitoma's criterion \cite{mitoma1983tightness}, it suffices to show tightness of sequences $\{ \mathcal{X}^n_t (\varphi) : t \in [0, T ] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{S}^n_t (\varphi) : t \in [0, T ] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{A}^n_t (\varphi) : t \in [0, T ] \}_{ n \in \mathbb{N} } $ and $\{ \mathcal{M}^n_t (\varphi) : t \in [0, T ] \}_{ n \in \mathbb{N} } $ with respect to the uniform topology on $D ([0, T ] , \mathbb{R} ) $ for any given test function $\varphi \in \mathcal{S} (\mathbb{R} ) $. Moreover, one can notice that the sequence of random variables $\{ \mathcal{X}^n_0 (\varphi ) \}_{ n \in \mathbb{N}} $ converges to a mean-zero normal random variable with variance $\rho \| \varphi \|^2_{L^2(\mathbb{R})}$, which particularly shows that the sequence $\{ \mathcal{X}^n_0 \}_{n \in \mathbb{N} } $ is tight. Hence from here we focus on tightness of martingale, symmetric and anti-symmetric parts. \subsection{Martingale part} First we consider the martingale term. Recall that quadratic variation of the martingale $\mathcal{M}^n_t (\varphi ) $ is given by \eqref{qv}. Then for any stopping time $\tau \in \mathcal{T}_T $ we have \[ \begin{aligned} \mathbb{P}_{ \nu_\rho } \big( | \mathcal{M}^n_{ \tau + \gamma } (\varphi) - \mathcal{M}^n_\tau (\varphi) | > \varepsilon \big) & \le \varepsilon^{- 2 } \mathbb{E}_{\nu_\rho } \big[ | \mathcal{M}^n_{\tau + \gamma } (\varphi) - \mathcal{M}^n_\tau (\varphi) |^2 \big] \\ & \le \varepsilon^{ - 2 } \mathbb{E}_{ \nu_\rho } \bigg[ \int_\tau^{\tau + \gamma } \frac{1}{n} \sum_{ j \in \mathbb{Z} } g_n (\eta^n_j (s)) (\nabla^n \varphi^n_j )^2 (s) ds \bigg] \\ & \le C \varepsilon^{-2} \int_\tau^{\tau + \gamma} \frac{1}{n} \sum_{j \in \mathbb{Z} } (\nabla^n \varphi^n_j (s))^2 ds \end{aligned} \] since $E_{\nu^n_\rho } [g_n (\eta)] = \Phi_n (\rho) $ is convergent as a sequence of $n$. Hence the last term vanishes as $\gamma $ tends to zero for each $\varepsilon $ so that the second condition of Ardous' criteron (Proposition \ref{Aldous}) is satisfied. On the other hand, the first condition can be easily verified since an estimate $ \mathbb{E}_{\nu_\rho } [ \mathcal{M}^n_t (\varphi )^ 2] \le C t \| \varphi \|^2_{ L^2 (\mathbb{R} ) } $ ensures that the sequence $\{ \mathcal{M}^n_t (\varphi ) \}_{n \in \mathbb{N} } $ is uniformly bounded in $L^2 (\mathbb{P}_{\nu_\rho } ) $. Hence tightness of the martingale term is proved. \subsection{Symmetric part} Next we show tightness of $\mathcal{S}^n $. Recalling the definition \eqref{eq:symm}, we have an expression \[ \mathcal{S}^n_t (\varphi ) = \frac{1}{ 2\sqrt{n} } \int_0^t \sum_{ j \in \mathbb{Z} } ( g_n ( \eta^n_j (s ) ) - \Phi_n (\rho) ) \Delta^n \varphi^n_j (s) ds \] where we used the fact that the summation of $\Delta^n \varphi^n_j $ over $\mathbb{Z} $ equals to zero. Noting $E_{ \nu^n_\rho } [ g_n (\eta_j ) ] = \Phi_n (\rho) $, by Schwarz's inequality and stationarity, we have that \[ \begin{aligned} \mathbb{E}_{ n } \big[ \big( \mathcal{S}^n_t (\varphi ) - \mathcal{S}^n_s (\varphi ) \big)^2 \big] & \le \frac{ t - s }{4 n } \mathbb{E}_n \bigg[ \int_s^t \bigg( \sum_{ j \in \mathbb{Z} } ( g_n (\eta^n_j ( r ) - \Phi_n (\rho ) ) \Delta^n \varphi^n_j (r ) \bigg)^2 dr \bigg] \\ & \le \frac{(t -s)^2}{ 4 } E_{\nu^n_\rho } \big[ ( g_n ( \eta_j ) - \Phi_n (\rho) )^2 \big] \sup_{ s \le r \le t } \bigg( \frac{ 1 }{ n } \sum_{ j \in \mathbb{Z} } (\Delta^n \varphi^n_j (r) )^2 \bigg) \\ & \le C (t - s )^2 \| \partial^2_{ x } \varphi \|^2_{ L^2 (\mathbb{R} ) } \end{aligned} \] for every $s, t \in [0 , T] $ such that $s \le t $. Therefore by the Kolmogorov-Centsov criterion we conclude that the sequence $\{ \mathcal{S}^n_t (\varphi ) : t \in [0, T ] \}_{ n \in \mathbb{N} } $ is tight with respect to the uniform topology of $C ([0, T ] , \mathbb{R} ) $ and any limit point has $\alpha $-H\"{o}lder continuous trajectories with $\alpha < 1 /2 $. \subsection{Anti-symmetric part} Finally we consider the asymmetric part. By the second-order Boltzmann-Gibbs principle (Theorem \ref{BG2}) and stationarity, we have \[ \mathbb{E}_n \bigg[ \bigg| \tilde{\mathcal{B}}^n_t (\varphi) - \tilde{\mathcal{B}}^n_s (\varphi) - \int_s^t \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\ell ; r ) \nabla^n \varphi^n_j (r ) dr \bigg|^2 \bigg] \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } \bigg( \frac{ (t- s ) \ell }{ n } + \frac{ (t- s )^2 n }{ \ell^2 } \bigg) . \] On the other hand, by rearranging the sum as $\sum_{j} \tau_j \mathcal{Q} = \sum_{ i = 0, \ldots, \ell -1 } \sum_{ j } \tau_{ j \ell + i } \mathcal{Q} $ recalling $\tau_j \mathcal{Q}$ and $\tau_k \mathcal{Q}$ are independent if $| j - k | \ge \ell $, we can show by an $L^2$-computation that \[ \mathbb{E}_n \bigg[ \bigg| \int_s^t \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\ell ,r ) \nabla^n \varphi^n_j (r ) ds \bigg|^2 \bigg] \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } \frac{ (t -s )^2 n }{ \ell } . \] Here we used $\mathbb{E}_n [\mathcal{Q}^n_\rho (\ell ; t )^2] = O (\ell^{-2 })$. Now we show tightness. First we consider the case $ t-s \ge 1/n^2 $. Then taking the scaling parameter $\ell $ proportional to $(t -s )^{ 1 / 2 } n $, we obtain \[ \mathbb{E}_n \big[ \big| \tilde{ \mathcal{B} }^n_t (\varphi) - \tilde{ \mathcal{B} }^n_s (\varphi) \big|^2 \big] \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } (t -s )^{ 3 /2 } . \] Next we shift to a short time regime $t- s \le 1/ n^2 $. Recalling the definition of $\tilde{\mathcal{J}}^n_t $, we have by a direct estimate \[ \mathbb{E}_n \big[ \big| \tilde{ \mathcal{B} }^n_t (\varphi) - \tilde{ \mathcal{B} }^n_s (\varphi) \big|^2 \big] \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } (t -s )^2 n \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } (t -s )^{3 / 2} . \] Therefore combining the above estimates we conclude that the sequence $\{ \tilde{\mathcal{B}}^n_t (\varphi ) : t \in [0, T] \} $ is tight according to the Kolmogorov-Centsov criterion. \section{Identification of the limit point} \label{sec:limitpt} Again recall the martingale decomposition \eqref{decomposition}. We proved in Section \ref{sec:tightness} that the sequences $\{ \mathcal{X}^n_t : t \in [0, T] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{M}^n_t : t \in [0, T] \}_{ n \in \mathbb{N} } $, $\{ \mathcal{S}^n_t : t \in [0, T] \}_{ n \in \mathbb{N} } $ and $\{ \tilde{ \mathcal{B} }^n_t : t \in [0, T] \}_{ n \in \mathbb{N} } $ are tight in $D ([0, T ] , \mathcal{S}^\prime (\mathbb{R} ) ) $ so that there exist processes $\mathcal{X}$, $\mathcal{M}$, $\mathcal{S}$ and $\tilde{ \mathcal{B} }$ such that \[ \begin{aligned} \lim_{ n \to \infty } \mathcal{X}^n = \mathcal{X}, \quad \lim_{ n \to \infty } \mathcal{M}^n = \mathcal{M}, \quad \lim_{ n \to \infty } \mathcal{S}^n = \mathcal{S}, \quad \lim_{ n \to \infty } \tilde{ \mathcal{B} }^n = \tilde{ \mathcal{B} } \end{aligned} \] in distribution along some subsequence that is still denoted by $n $. In the sequel, we characterize the limiting processes. \subsection{Martingale part} We decompose the quadratic variation of the martingale part, which is given by \eqref{qv}, as \[ \begin{aligned} \langle \mathcal{M}^n (\varphi ) \rangle_t & = \frac{1}{n} \int_0^t \sum_{j \in \mathbb{Z} } g^\prime (0) \rho (\nabla^n \varphi^n_j (s) )^2 ds \\ & \quad + \frac{1}{n} \int_0^t \sum_{j \in \mathbb{Z} } [ \Phi_n (\rho) - g^\prime (0) \rho ] (\nabla^n \varphi^n_j (s) )^2 ds \\ & \quad + \frac{1}{n} \int_0^t \sum_{j \in \mathbb{Z} } [ g_n ( \eta^n_j (s) ) - \Phi_n ( \rho ) ] (\nabla^n \varphi^n_j (s) )^2 ds . \end{aligned} \] Then it is easy to show that $\langle \mathcal{M}^n (\varphi ) \rangle_t $ converges to $g^\prime (0) \rho \| \partial_x \varphi \|^2_{L^2 (\mathbb{R} ) } t $ in $L^2 (\mathbb{P}_n )$ as $n$ tends to infinity. Then by a similar way as \cite{gonccalves2015stochastic} we can show that the limiting process $\{ \mathcal{M}_t (\varphi) : t \in [0,T] \} $ is a martingale with quadratic variation $g^\prime (0) \rho \| \partial_x \varphi \|^2_{L^2 (\mathbb{R})} t$. \subsection{Symmetric part} Recalling that $g_n $ approaches to a linear function as $n$ tends to infinity, we decompose $\mathcal{S}^n$ as \[ \begin{aligned} \mathcal{S}^n_t (\varphi ) = \frac{g^\prime (0) }{2\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z}} \eta^n_j (s) \Delta^n \varphi^n_j (s) ds + \frac{1}{2\sqrt{n}} \int^t_0 \sum_{j \in \mathbb{Z} } [g_n (\eta^n_j (s) ) - g^\prime(0) \eta^n_j (s) ] \Delta^n \varphi^n_j (s) ds . \end{aligned} \] The second term converges to zero in $ L^2 (\mathbb{P}_n ) $ as $n$ tends to infinity. Thus the tightness of $\mathcal{X}^n_\cdot $ immediately shows \[ \mathcal{S}_t = \frac{ g^\prime (0) }{2} \int_0^t \mathcal{X}_s (\partial_x^2 \varphi ) ds . \] \subsection{Anti-symmetric part} We are in a position to identify the limit of anti-symmetric part $\mathcal{B}$. Here we define a modified version of the fluctuation field by \[ \tilde{ \mathcal{X} }^n_t (\varphi ) = \frac{1}{ \sqrt{n} } \sum_{ j \in \mathbb{Z} } \overline{W}_j (t) \varphi^n_j (t) \] for each $\varphi \in \mathcal{S} (\mathbb{R}) $. Recall the definition of $\mathcal{Q}^n_\rho (\ell ; t ) $ given in \eqref{Q} and the function $\iota_{\varepsilon } (x ; \cdot) : \mathbb{R} \to \mathbb{R} $ defined by $\iota_\varepsilon (x; y ) = \varepsilon^{ - 1 } \mathbf{1}_{ [ x, x + \varepsilon ) } (y) $ for each $x \in \mathbb{R}$. Hereafter we use an abuse of notation to denote the integer part of $\varepsilon n $ by the same notation. Then one can notice $\tilde{ \mathcal{X} }^n_t (\iota_{\varepsilon } ( \frac{ j - f_n t }{ n } ; \cdot ) ) = \sqrt{n} \overrightarrow{W}^\ell_j (t) $ for each $j \in \mathbb{Z} $ so that \[ \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\varepsilon n ; t ) \nabla^n \varphi^n_j (t ) = \frac{ g^{ \prime \prime } (0) }{ 2 n } \sum_{ j \in \mathbb{Z} } \big( \tilde{ \mathcal{X} }^n_t ( { \textstyle \iota_{ \varepsilon } ( \frac{ j- f_n t }{ n } ; \cdot ) } ) \big)^2 \nabla^n \varphi^n_j (t ) . \] Then, letting $n $ tends to infinity, we obtain the limit \[ \frac{g^{\prime \prime }(0)}{2} \mathcal{A}^\varepsilon_{ s,t } (\varphi ) = \lim_{ n \to \infty } \int_s^t \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\varepsilon n ; r ) \nabla^n \varphi^n_j (r ) dr \] in $L^2 (\mathbb{P}_n ) $. Note that such a limiting procedure does not hold immediately since the function $\iota_\varepsilon (x, \cdot) $ is not in $\mathcal{S} (\mathbb{R})$. However, wee can approximate it by functions in $\mathcal{S} (\mathbb{R})$ and we can justify the convergence (see \cite{gonccalves2014nonlinear} for detail). By Theorem \ref{BG2}, we have \[ \mathbb{E}_n \bigg[ \bigg| \tilde{ \mathcal{B} }_t (\varphi ) - \tilde{\mathcal{B}}_s (\varphi ) - \int_s^t \sum_{ j \in \mathbb{Z} } \tau_j \mathcal{Q}^n_\rho (\ell, r ) \nabla^n \varphi^n_j (r) dr \bigg|^2 \bigg] \le C \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) }\bigg( \frac{ (t -s ) \ell }{ n } + \frac{ (t-s )^2 n }{ \ell^2 } \bigg) . \] Now taking $\ell = \varepsilon n $ and then letting $n \to \infty $, we obtain \begin{equation} \label{B-A} \mathbb{E}_n \bigg[ \bigg| \tilde{ \mathcal{B} }_t (\varphi ) - \tilde{\mathcal{B}}_s (\varphi ) - \frac{g^{\prime \prime}(0)}{2}\mathcal{A}^\varepsilon_{ s , t } (\varphi ) \bigg|^2 \bigg] \le C \varepsilon (t - s ) \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } , \end{equation} from which we get the condition \textbf{(EC2)} with the help of the triangle inequality. Hence Proposition \ref{nonlinear} bring us the existence of the limit $\mathcal{A}_t (\varphi ) = \lim_{ \varepsilon \to 0 } \mathcal{A}^\varepsilon_{0, t } (\varphi ) $ for each $\varphi \in \mathcal{S} (\mathbb{R} ) $. In addition, the estimate \eqref{B-A} assures $\tilde{ \mathcal{B} } = (g^{\prime \prime}(0)/2) \mathcal{A}$. Moreover, we show that also the condition \textbf{(EC1)} holds true. For that purpose, it suffices to check that \[ \mathbb{E}_n \bigg[ \bigg| \int_0^t \tilde{ \mathcal{ X } }^n_s (\partial_x^2 \varphi ) ds \bigg|^2 \bigg] \le C t \| \partial_x^2 \varphi \|^2_{ L^2 (\mathbb{R}) } . \] Furthermore, according to summation by parts and smoothness of each test function $\varphi $, it is enough to verify that \[ \mathbb{E}_n \bigg[ \bigg| \int_0^t \sqrt{n } \sum_{ j \in \mathbb{Z} } (\overline{W}_{j - 1 } (s ) - \overline{W}_j (s) ) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C t \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R}) } . \] This follows by and $H^{ - 1 , n } $-estimate as we conducted in Section \ref{sec:BG2}. Indeed, by an integration-by-parts formula given in Lemma \ref{IBP}, we have \[ \begin{aligned} 2 \bigg\langle \sqrt{n } \sum_{ j \in \mathbb{Z} } (\overline{W}_{j - 1 } - \overline{W}_j ) \nabla^n \varphi^n_j , f \bigg\rangle_{ L^2 (\nu^n_\rho ) } = - 2 \sqrt{n} E_{ \nu^n_\rho } \bigg[ \sum_{ j\in \mathbb{Z} } W_{ j - 1 } \nabla^n \varphi^n_j \nabla_{ j-1 , j } f \bigg] . \end{aligned} \] By Young's inequality, recalling the definition of $W_j$, the last display is absolutely bounded above by \[ \frac{n^2}{2} \sum_{ j \in \mathbb{Z} } E_{ \nu^n_\rho } [ g_n (\eta_j) (\nabla_{ j , j + 1 } f )^2 ] + \frac{2}{g^\prime (0) n}\sum_{ j \in \mathbb{Z} } E_{ \nu^n_\rho } [ W_{ j - 1 } (\nabla^n \varphi^n_j )^2 ] . \] The first term is bounded by $\|f \|_{ 1 , n } $ so that we conclude that the condition \textbf{(EC1)} holds true by the Kipnis-Varadhan inequality. Finally we note that all the above estimates hold also for the reversed process $\{ \mathcal{X}^n_{T-t} : t \in [0, T] \}$ and thus condition (3) of Definition \ref{def:energysol} is satisfied. By this line it is prove that the limiting process $\mathcal{X} $ is the energy solution of the stochastic Burgers equation \eqref{SBEthm}. \section{Proof of Lemma \ref{antisymm}} \label{sec:expansion} Finally in this section we give the proof of Lemma \ref{antisymm} by showing how to take the framing in a suitable way. Recall the definition of $W_j $ given in \eqref{eq:wdef}. Then by a Taylor expansion we have that \[ W_j = \eta_j + \frac{ g^{\prime \prime} (0 ) }{ 2 g^\prime (0 ) } \frac{ \eta_j^2 }{ \sqrt{n } } + \frac{ g^{ (3) } (0) }{ 6 g^\prime (0) } \frac{ \eta_j^3 }{ n } + O (n^{ - 3 /2 } ) . \] By Assumption \ref{ass:regularity}, a direct $L^2$ computation enables us to estimate the reminder term $O(n^{-3/2})$ as follows. \[ \mathbb{E}_n \bigg[ \sup_{ 0 \le t \le T } \bigg| \sqrt{n} \int_0^t \sum_{ j \in \mathbb{Z} } ( O(n^{ - 3 /2 }) - E_{ \nu^n_\rho } [ O (n^{-3/2} ) ] ) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C \frac{T^2}{n} \| \partial_x \varphi \|^2_{ L^2 (\mathbb{R} ) } , \] which vanishes as $n $ tend to infinity. In particular, the reminder term may not be concerned. In the sequel, we write the difference $W_j - \eta_j $ in terms of the variables $W_j$'s. For that purpose, note that \[ \frac{a}{\sqrt{n}} W_j \eta_j + \frac{b}{n} W_j \eta_j^2 = a \frac{\eta_j^2}{\sqrt{n}} + \bigg( a \frac{ g^{ \prime \prime } (0 ) }{ 2 g^\prime (0 ) } + b \bigg) \frac{\eta_j^3}{n} + O (n^{-3/2}) . \] We take the constants $a$ and $b$ in order that leading terms in the right-hand side of the above display coincide with those of $W_j - \eta_j $. Then we have \[ W_j - \eta_j = \frac{ g^{\prime \prime } (0) }{ 2 g^\prime (0) } \frac{ W_j \eta_j }{ \sqrt{n } } + \bigg( \frac{ g^{ (3) } (0 ) }{ 6 g^\prime (0) } - \frac{ g^{ \prime \prime } (0 )^2 }{ 4 g^\prime (0)^2 } \bigg) \frac{ W_j \eta_j^2 }{ n } + O (n^{ - 3/2 }) . \] Next we notice the following identities. \[ \begin{aligned} & \tilde{L}_n ( \eta_j^2 - \eta_j ) = 2 ( W_{ j -1 } - W_j ) \eta_j + 2 W_j , \\ & \tilde{L}_n \big( \eta_j^3 + \frac{3}{2} \eta_j^2 + \frac{ 5 }{ 2 } \eta_j \big) = 3 (W_{ j -1 } - W_j ) \eta_j^2 + 6 W_{ j - 1 } \eta_j + 3 W_j \end{aligned} \] where $\tilde{L}_n = ( n^2 g^\prime (0 ) )^{ -1} L_n $. Here we have the following result. \begin{lemma} \label{lem:range} Let $G$ be a local function such that $E_{\nu^n_\rho } [g(\eta_j ) G(\eta)] $ is finite for any $j \in \mathbb{Z} $ and we write $G_j = \tau_j G$. Then we have \[ \mathbb{E}_n \bigg[ \sup_{0 \le t \le T } \bigg| \int_0^t \sum_{ j \in \mathbb{Z} } n^{-2} L_n G_j (\eta^n (s) ) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le \frac{C}{n^2} \int_0^T \sum_{j \in \mathbb{Z}} ( \nabla^n \varphi^n_j (s) )^2 ds . \] \end{lemma} \begin{proof} By definition of the adjoint operator $L^*_n $, for any local $L^2 (\nu^n_\rho ) $ funciton $f$, we have that \[ \begin{aligned} \bigg\langle \sum_{j \in \mathbb{Z}} n^{-2} L_n G_j \nabla^n \varphi^n_j , f \bigg\rangle_{L^2(\nu^n_\rho)} & = \sum_{j \in \mathbb{Z}} \langle G_j \nabla^n \varphi^n_j, n^{-2} L_n^* f \rangle_{L^2(\nu^n_\rho)} \\ & = \sum_{\substack{ j, k \in \mathbb{Z}, \\ k \in \mathrm{supp}G_j \cup (\mathrm{supp}G_j +1 ) } } E_{ \nu^n_\rho } [ G_j (\eta ) \nabla^n \varphi^n_j g(\eta_k ) \nabla_{ k,k- 1 } f (\eta)] . \end{aligned} \] However, by Young's inequality, twice the last display is bounded by \[ \begin{aligned} & 2n^{-2} (| \mathrm{supp}G_j | + 1 ) \sum_{\substack{ j, k \in \mathbb{Z}, \\ k \in \mathrm{supp}G_j \cup (\mathrm{supp}G_j+1 ) } } E_{ \nu^n_\rho } [ g(\eta_k ) G_j (\eta )^2 ](\nabla^n \varphi^n_j)^2 \\ & \quad + \frac{n^2}{2} \sum_{ k \in \mathbb{Z} } E_{ \nu^n_\rho } [ g(\eta_k ) ( \nabla_{ k,k- 1 } f (\eta) )^2 ] . \end{aligned} \] Since the second term is bounded by $\| f \|^2_{1, n } $, we complete the proof by using the Kipnis-Varadhan inequality. \end{proof} Lemma \ref{lem:range} assures that quantity contained in the range of $\tilde{L}_n $ is small and thus we can replace $W_{ j } \eta_j $ and $W_j \eta_j^2 $ in terms of $W_{ j -1 } \eta_j $ and $W_{j -1 } \eta_j^2 $ without any trouble. Moreover, again by a Taylor expansion, we have \[ \frac{ W_{ j - 1 } W_j }{ \sqrt{n} } = \frac{ W_{ j -1 } \eta_j }{ \sqrt{n} } + \frac{ g^{\prime \prime } (0) }{ 2 g^\prime ( 0 ) } \frac{ W_{ j -1 } \eta_j^2 }{ n } + O ( n^{ - 3/2 } ) . \] Hence by this line we obtain \begin{equation} \label{eq:expansion1} \begin{aligned} W_j - \eta_j =& \bigg( \frac{ g^{\prime \prime} (0) }{ 2 g^\prime (0) \sqrt{ n } } + \frac{ g^{ (3 ) } (0) }{ 3 g^\prime (0) n } - \frac{ g^{\prime \prime} (0)^2 }{ 2 g^\prime (0)^2 n } \bigg) W_{ j - 1 } W_j + \bigg( \frac{ g^{\prime \prime} (0) }{ 2 g^\prime (0) \sqrt{ n } } + \frac{ g^{ (3 ) } (0) }{ 6 g^\prime (0) n } - \frac{ g^{\prime \prime} (0)^2 }{ 4 g^\prime (0)^2 n } \bigg) W_j \\ & + \bigg( \frac{ g^{ (3 ) } (0) }{ 6 g^\prime (0) n } - \frac{ 3 g^{\prime \prime} (0)^2 }{ 4 g^\prime (0)^2 n } \bigg) W_{j - 1 } \eta_j^2 + R_j \end{aligned} \end{equation} where $R_j $ is a negligible error term. Finally we just need to estimate the order-three term. Since the coefficient of $W_{j-1} \eta^2_j$ in the above identity has order $O(n^{-1})$, we can replace $ \eta_j $ by $ W_j $ by using the same identity. To calculate further, we use identities \[ \begin{aligned} \tilde{L}_n (\eta_{ j - 1 } \eta_j \eta_{ j + 1} ) & = W_{ j -2} W_j W_{ j + 1 } + W_{ j - 1 }^2 W_{ j + 1} +W_{ j - 1} W_j^2 -3 W_{ j -1 }W_j W_{ j + 1 } \\ & \quad - W_{ j - 1 } W_{ j } - W_{ j - 1 } W_{ j + 1} + E^{(1)}_{ j } , \\ \tilde{L}_n (\eta_{ j - 1 }^2 \eta_{ j + 1} ) & = 2 W_{ j -2} W_{j - 1 } W_{ j + 1 } + W_{ j - 1}^2 W_j - 3 W_{ j - 1 }^2 W_{ j + 1} \\ & \quad + W_{ j - 2 } W_{ j + 1 } + W_{ j - 1 } W_{j + 1 } + E^{(2)}_{ j } , \\ \tilde{L}_n (\eta_{ j - 1 }^2 \eta_j ) & = 2 W_{ j -2} W_{j - 1 } W_{ j } - 3 W_{ j - 1 }^2 W_{ j } + W_{ j - 1}^3 \\ & \quad + W_{j-2} W_{j} + W_{ j - 1 } W_{ j} - 2 W_{j-1}^2 + W_{j-1} + E^{(3)}_{ j } ,\\ \tilde{L}_n \eta_{ j }^3 & = 3 W_{ j -1 } W_{ j }^2 - 3 W_j^3 \\ & \quad + 3 W_{ j -1 } W_j + 3 W_{ j }^2 + W_{j-1}- W_j + E^{( 4 ) }_{ j } \end{aligned} \] with some remainder terms $E^{(k)}_j $, which do not affect the limit. Here note that we can shift index of each term since for any local function $G$ we have that \[ \begin{aligned} & \mathbb{E}_n \bigg[ \sup_{0\le t \le T } \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z}} (\tau_j G -\tau_{j-1}G)(s)\nabla^n \varphi^n_j(s) ds \bigg|^2 \bigg] \\ & \quad = \mathbb{E}_n \bigg[ \sup_{0\le t \le T } \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z}} \tau_j G(s) (\nabla^n \varphi^n_j - \nabla^n \varphi^n_{j+1})(s) ds \bigg|^2 \bigg] \le C \frac{T^2}{n} E_{\nu^n_\rho}[G^2] \| \partial_x^2 \varphi \|^2_{L^2(\mathbb{R})} \end{aligned} \] where in the last inequality we used Schwarz's inequality. Moreover for order-two terms, we have that \[ \begin{aligned} \tilde{L}_n (\eta_{ j - 1 } \eta_j ) & = W_{ j -2 } W_{j} +W_{j-1}^2 - 2W_{ j - 1 } W_j - W_{j-1} + E^{(5)}_{ j } , \\ \tilde{L}_n (\eta_{ j - 1 } \eta_{ j + 1} ) & = W_{ j -2} W_{ j + 1 } - 2 W_{ j - 1 } W_{ j + 1} + W_{ j - 1} W_j + E^{(6)}_{ j } , \\ \tilde{L}_n \eta_j^2 & = 2 W_{ j - 1} W_j - 2 W_j^2 + W_{j-1} + W_j + E^{(7)}_{ j } . \end{aligned} \] From these identities and shifting indices, we may replace $W_{j-2}W_{j}$, $W_{j-2}W_{j+1}$ and $W_{j-1}W_{j+1}$ by $W_{j-1} W_j$, and $W_{j-1}^2$ and $W_j^2$ by $W_{j-1}W_j + W_j$, respectively. As summary, we can represent the order-three term $W_{ j - 1 } W_j^2 $ as \begin{equation} \label{eq:expansion2} \begin{aligned} 10 W_{ j - 1 } W_j^2 = & - 2 W_{ j - 2 } W_{ j - 1 } W_j - 6 W_{ j - 2 } W_{ j - 1 } W_{ j + 1 } - 9 W_{ j - 2 } W_{ j } W_{ j + 1 } + 27 W_{ j - 1 } W_j W_{ j + 1 } \\ & + 10 W_{ j - 1 } W_{ j } + E_j^{(8)} . \end{aligned} \end{equation} Hence from now on, we are concerned with order-three term with distinct indices. For that purpose, we first show the following estimate. \begin{lemma} \label{lem:order3} We have that \[ \begin{aligned} & \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z} } \big\{ \overline{W}_{j-1}(s) \overline{W}_j(s) \overline{W}_{j+1}(s) - (\overrightarrow{W}^\ell_{j+1}(s) )^3 \big\} \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \\ & \quad \le C \bigg( \frac{T \ell}{n^2} + \frac{T^2}{\ell^2} \bigg) \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} . \end{aligned} \] \end{lemma} \begin{proof} We use the decomposition \[ \begin{aligned} & \overline{W}_{j-1} \overline{W}_j \overline{W}_{j+1} - (\overrightarrow{W}^\ell_{j+1} )^3 \\ & \quad = \overline{W}_{j-1} \bigg( \overline{W}_j \overline{W}_{j+1} - (\overrightarrow{W}^\ell_{j+1})^2 + \frac{\sigma_n^2(\rho) }{\ell} \bigg) \\ & \qquad + (\overrightarrow{W}_{j+1}^\ell)^2 (\overline{W}_{j-1} - \overrightarrow{W}^\ell_j) + (\overrightarrow{W}_{j+1}^\ell)^2 (\overrightarrow{W}_j - \overrightarrow{W}^\ell_{j+1}) . \end{aligned} \] For the first term in the right hand side, noting $\overline{W}_{j-1}$ is independent of the quantity inside the parenthesis, we can conduct the same procedure as Theorem \ref{BG2}, which gives the bound $C (T \ell n^{-2} + T^2 \ell^{-2}) \| \partial_x \varphi \|^2_{L^2(\mathbb{R})}$. In addition, the third term can be treated by a direct $L^2$-estimate and we obtain the bound $CT^2\ell^{-2} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})}$. For the second term, note that \[ (\overrightarrow{W}_{j+1}^\ell)^2 (\overline{W}_{j-1} - \overrightarrow{W}^\ell_j) = (\overrightarrow{W}_{j+1}^\ell)^2 \sum_{i=0}^{\ell-1} \psi_i (W_{j+i-1} - W_{j+i}) \] where $\psi_i = (\ell-i)/\ell$. Then the integration-by-parts formula (Lemma \ref{IBP}) yields \[ \begin{aligned} & E_{\nu^n_\rho} [f(\overrightarrow{W}_{j+1}^\ell)^2 (W_{j+i-1} - W_{j+i})] \\ & \quad = - E_{\nu^n_\rho} [\sigma_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell)^2 (\nabla_{j+i-1, j+i} f) W_{j+i-1}] - E_{\nu^n_\rho} [f (\nabla_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell)^2) W_{j+i-1}] \end{aligned} \] for any local function $f$. However, by Young's inequality, we have that \begin{equation} \label{eq:estimateorder3} \begin{aligned} & 2 \bigg\langle \sum_{j \in \mathbb{Z} } \sum_{i=0}^{\ell-1} F^\ell_{i,j } \psi_i \nabla^n \varphi^n_j , f \bigg\rangle_{L^2(\nu^n_\rho)} \\ & \quad \le \frac{n^2 g^\prime(0)}{2\ell}\sum_{j \in \mathbb{Z}} \sum_{i=0}^{\ell-1} E_{\nu^n_\rho} [W_{j+i-1} (\nabla_{j+i-1, j+i}f)^2] \\ & \qquad + \frac{2 \ell}{n^2 g^\prime(0)} \sum_{j \in \mathbb{Z}} \sum_{i=0}^{\ell-1} E_{\nu^n_\rho}[W_{j+i-1} \big(\sigma_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell)^2\big)^2] (\nabla^n \varphi^n_j )^2 \end{aligned} \end{equation} where we set \[ F^\ell_{i,j} = (\overrightarrow{W}_{j+1}^\ell)^2 (W_{j+i-1} - W_{j+i}) + (\nabla_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell)^2) W_{j+i-1} . \] Since the first term in the right hand side of \eqref{eq:estimateorder3} is bounded by $\| f \|^2_{1,n }$ and \[ E_{\nu^n_\rho}[W_{j+i-1} \big(\sigma_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell)^2\big)^2] = O(\ell^{-2}) , \] the Kipnis-Varadhan inequality yields \[ \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z}} \sum_{i=0}^{\ell-1} F^\ell_{i,j}(s) \psi_i \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C \frac{T }{n^2} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})}. \] Therefore, it remains to estimate \[ \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z}} \sum_{i=0}^{\ell-1} (\nabla_{j+i-1, j+i}(\overrightarrow{W}_{j+1}^\ell(s))^2) W_{j+i-1}(s) \psi_i \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] . \] Here we note that \[ \begin{aligned} \nabla_{j+i-1, j+i} (\overrightarrow{W}^\ell_{j+1})^2 = \ell^{-1} \overrightarrow{W}^\ell_{j+1} \nabla_{j+i-1, j+i} Z_{i,j} - \ell^{-2} (\nabla_{j+i-1, j+i} Z_{i,j} )^2 \end{aligned} \] provided $\eta_{j+i-1} > 0$ where $Z_{i,j} = W_{j+i-1}\mathbf{1}_{i \ge 1} + W_{j+i} \mathbf{1}_{i \ge 0}$. The second term gives small factor by a direct $L^2$ computation so that we may consider only the first term. Moreover, a change of variable yields \[ \begin{aligned} & E_{\nu^n_\rho} [ f \overrightarrow{W}^\ell_{j+1} \nabla_{j+i-1, j+i} Z_{i,j} W_{j+i-1} ] \\ & \quad = E_{\nu^n_\rho} [ \nabla_{j+i-1, j+i} (f \overrightarrow{W}^\ell_{j+1} ) Z_{i,j} W_{j+i} ] - E_{\nu^n_\rho} [ f \overrightarrow{W}^\ell_{j+1} ( W_{j+i-1} - W_{j+i} ) Z_{i,j} ] \\ & \quad = E_{\nu^n_\rho} [ (\nabla_{j+i-1, j+i} f) (\sigma_{j+i-1, j+i} \overrightarrow{W}^\ell_{j+1} ) Z_{i,j} W_{j+i} ] - E_{\nu^n_\rho} [ f (\nabla_{j+i-1, j+i} \overrightarrow{W}^\ell_{j+1} ) Z_{i,j} W_{j+i} ] \\ & \qquad - E_{\nu^n_\rho} [ f \overrightarrow{W}^\ell_{j+1}(W_{j+i-1}-W_{j+i}) Z_{i,j}] . \end{aligned} \] The first two terms can be estimated by the Kipnis-Varadhan inequality as the above and thus we may only consider the last term in the right-hand side. However, note that \[ \begin{aligned} & \frac{1}{\ell} \sum_{i=0}^{\ell-1} \overrightarrow{W}^\ell_{j+1}Z_{i,j}(W_{j+i-1}-W_{j+i}) \psi_i \\ & \quad = \frac{1}{\ell} \overrightarrow{W}^\ell_{j+1} (W^2_{j-1} - E_{\nu^n_\rho}[W^2_{j-1}] - \overrightarrow{(W^2)}^\ell_j ) - \frac{\ell-1}{\ell^2} W_j(W_j- W_{j+1}) - \frac{1}{\ell} W_{j-1} (W_{j-1} - W_j) . \end{aligned} \] All terms in the right-hand side can be treated by a direct $L^2$ computation, which gives the bound $C T^2 \ell^{-2} \|\partial_x \varphi\|^2_{L^2(\mathbb{R})}$. Hence we obtained the desired estimate and complete the proof. \end{proof} Now we see that the order-three terms with distinct indices do not affect the limit if they are centered. When $T \ge 1/n^2$, taking $\ell \sim \sqrt{T}n$, the bound in Lemma \ref{lem:order3} becomes $CT^{3/2} n^{-1} \|\partial_x \varphi \|^2_{L^2(\mathbb{R})}$. Moreover, since $E[(\overrightarrow{W}^\ell_j)^6]=O(\ell^{-3})$, we have that \[ \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z} } (\overrightarrow{W}^\ell_{j+1}(s) )^3 \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C \frac{T^2}{\ell^2} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} \le C \frac{T^{3/2}}{n} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} . \] Combining both estimates, we have a bound \[ \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z} } \overline{W}_{j-1}(s) \overline{W}_{j}(s) \overline{W}_{j+1}(s) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \le C \frac{T^{3/2}}{n} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} . \] On the other hand, for the case $T < 1/n^2$, a direct $L^2$ computation yields \[ \begin{aligned} & \mathbb{E}_n \bigg[ \sup_{0 \le t \le T} \bigg| \frac{1}{\sqrt{n}} \int_0^t \sum_{j \in \mathbb{Z} } \overline{W}_{j-1}(s) \overline{W}_{j}(s) \overline{W}_{j+1}(s) \nabla^n \varphi^n_j (s) ds \bigg|^2 \bigg] \\ & \quad \le C T^2 \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} \le C \frac{T^{3/2}}{n} \| \partial_x \varphi \|^2_{L^2(\mathbb{R})} . \end{aligned} \] We can control $\overline{W}_{j-2}\overline{W}_{j-1}\overline{W}_{j}$, $\overline{W}_{j-2}\overline{W}_{j-1}\overline{W}_{j+1}$ and $\overline{W}_{j-2}\overline{W}_{j}\overline{W}_{j+1}$ in the same way. Now we return to expand $W_j - \eta_j$ in terms of $W_j$'s. In the expansion \eqref{eq:expansion1}, we can make indices of order-three terms distinct by using \eqref{eq:expansion2}. Moreover, note that when replacing $W_j$ by its centered version $\overline{W}_j = W_j - \Phi_n(\rho)$, terms of the form $(1/n) \overline{W}_{j_1} \overline{W}_{j_2}$ with $j_1\neq j_2$ and constants do not affect the limit. In other words, in \eqref{eq:expansion1} and \eqref{eq:expansion2}, we can replace $W_{j-1}W_j W_{j+1}$ and $W_{j-1}W_j$ by $\overline{W}_{j-1}\overline{W}_{j}\overline{W}_{j+1} + 3\Phi_n(\rho)^2 W_j$ and $\overline{W}_{j-1}\overline{W}_{j} + 2\Phi_n(\rho) W_j$, respectively. As a consequence, $W_j - \eta_j $ can be expanded as \begin{equation} \label{expansion} \frac{ g^{ \prime \prime } (0) }{ 2 g^\prime (0) } \frac{ \overline{W}_{ j -1 } \overline{ W}_j }{ \sqrt{n} } + \bigg( \frac{b_1}{ \sqrt{n} } + \frac{b_0}{n} \bigg) \eta_j \end{equation} plus some asymptotically small factor. Here the constants $b_0$ and $b_1$ is of the form \eqref{eq:framing}. This expansion immediately completes the proof of Lemma \ref{antisymm}. \section*{Acknowledgments} The author would like to thank Makiko Sasada and Hayate Suda for giving him fruitful comments and discussions. \end{document}
\begin{document} \title{Convergence of Langevin-Simulated Annealing algorithms with multiplicative noise} \begin{abstract} We study the convergence of Langevin-Simulated Annealing type algorithms with multiplicative noise, i.e. for $V : \mathbb{R}^d \to \mathbb{R}$ a potential function to minimize, we consider the stochastic differential equation $dY_t = - \sigma \sigma^\top \nabla V(Y_t) dt + a(t)\sigma(Y_t)dW_t + a(t)^2\Upsilon(Y_t)dt$, where $(W_t)$ is a Brownian motion, where $\sigma : \mathbb{R}^d \to \mathcal{M}_d(\mathbb{R})$ is an adaptive (multiplicative) noise, where $a : \mathbb{R}^+ \to \mathbb{R}^+$ is a function decreasing to $0$ and where $\Upsilon$ is a correction term. This setting can be applied to optimization problems arising in Machine Learning; allowing $\sigma$ to depend on the position brings faster convergence in comparison with the classical Langevin equation $dY_t = -\nabla V(Y_t)dt + \sigma dW_t$. The case where $\sigma$ is a constant matrix has been extensively studied however little attention has been paid to the general case. We prove the convergence for the $L^1$-Wasserstein distance of $Y_t$ and of the associated Euler scheme $\bar{Y}_t$ to some measure $\nu^\star$ which is supported by $\argmin(V)$ and give rates of convergence to the instantaneous Gibbs measure $\nu_{a(t)}$ of density $\propto \exp(-2V(x)/a(t)^2)$. To do so, we first consider the case where $a$ is a piecewise constant function. We find again the classical schedule $a(t) = A\log^{-1/2}(t)$. We then prove the convergence for the general case by giving bounds for the Wasserstein distance to the stepwise constant case using ergodicity properties. \end{abstract} \keywords{Stochastic Optimization, Langevin Equation, Simulated Annealing, Neural Networks} \MSC{62L20, 65C30, 60H35} \section{Introduction} Langevin-based algorithms are used to solve optimization problems in high dimension and have gained much interest in relation with Machine Learning. The Langevin equation is a Stochastic Differential Equation (SDE) which consists in a gradient descent with noise. More precisely, let $V : \mathbb{R}^d \rightarrow \mathbb{R}^+$ be a coercive potential function, then the associated Langevin equation reads $$ dX_t = -\nabla V(X_t)dt + \sigma dW_t , \ t \ge 0,$$ where $(W_t)$ is a $d$-dimensional Brownian motion and where $\sigma > 0$. Under standard assumptions, the invariant measure of this SDE is the Gibbs measure of density proportional to $e^{-2V(x)/\sigma^2}$ and for small enough $\sigma$, this measure concentrates around $\argmin(V)$ \cite{dalalyan2014} \cite{bras2021}. Adding a small noise to the gradient descent allows to explore the space and to escape from traps such as local minima or saddle points appearing in non-convex optimization problems \cite{lazarev1992} \cite{dauphin2014}. This noise may also be interpreted as coming from the approximation of the gradient in stochastic gradient descent algorithms. Such methods have been recently brought up to light again with Stochastic Gradient Langevin Dynamics (SGLD) algorithms \cite{welling2011} \cite{li2015}, especially for the deep learning and the calibration of large artificial neural networks, which is a high-dimensional non-convex optimization problem. The Langevin-simulated annealing SDE is the Langevin equation where the noise parameter is slowly decreasing to $0$, namely \begin{equation} \label{eq:intro:1} dX_t = -\nabla V(X_t) dt + a(t) \sigma dW_t , \ t \ge 0, \end{equation} where $a : \mathbb{R}^+ \to \mathbb{R}^+$ is non-increasing and converges to 0. The idea is that the "instantaneous" invariant measure $\nu_{a(t)\sigma}$ which is the Gibbs measure of density $\propto \exp(-2V(x)/(a(t)^2\sigma^2))$ converges itself to $\argmin(V)$. This method indeed shares similarities with the original simulated annealing algorithm \cite{laarhoven1987}, which builds a Markov chain from the Gibbs measure using the Metropolis-Hastings algorithm and where the parameter $\sigma$, interpreted as a temperature, slowly decreases to zero over the iterations. In \cite{chaing1987} and \cite{royer1989} is shown that choosing $a(t) = A \log^{-1/2}(t)$ for some $A>0$ in \eqref{eq:intro:1} guarantees the convergence of $X_t$ to $\nu^\star$ defined as the limit measure of $(\nu_{a(t)})$ as $t \to \infty$ and which is supported by $\argmin(V)$. \cite{miclo1992} proves again the convergence of the SDE using free energies inequalities. These studies deeply rely on some Poincar\'e and log-Sobolev inequalities and require the following assumptions on the potential function: $$ \lim_{|x| \rightarrow \infty} V(x) = \lim_{|x| \rightarrow \infty} |\nabla V(x)| = \infty \quad \text{and} \quad \forall x \in \mathbb{R}^d, \ \Delta V(x) \le C + |\nabla V(x)|^2 .$$ \cite{zitt2008} proves that the convergence still holds under weaker assumptions, in particular where the gradient of the potential is not coercive, using weak Poincaré inequalities. In \cite{gelfand-mitter} is proved the convergence of the associated stochastic gradient descent algorithm. All these results are established in the so-called additive case, i.e. they highly rely on the fact that $\sigma$ is constant, whereas little attention has been paid to the multiplicative case, i.e. where $\sigma : \mathbb{R}^d \to \mathcal{M}_d(\mathbb{R}) $ is not constant and depends on $X_t$. Allowing $\sigma$ to be adaptive and to depend on the position highly extends the range of applications of Langevin algorithms and such adaptive algorithms are already widely used by practitioners and prove to be faster than non-adaptive algorithms and competitive with standard non-Langevin algorithms or even faster. See Section \ref{subsec:practitioner} where various specifications for $\sigma(x)$ that can be found in the Stochastic Optimization literature are briefly presented, and Section \ref{sec:experiments} where we show results of simulations of the training of an artificial neural network for various choices of $\sigma$. However, to our knowledge, a general result of convergence for Langevin algorithms with multiplicative noise is yet to be proved. \cite[Proposition 2.5]{pages2020} gives a general formula on $b$ and $\sigma$ so that the associated Gibbs measure is still the invariant measure of the SDE $dX_t = b(X_t)dt + \sigma(X_t)dW_t$; a simple example of acceleration of convergence using non-constant $\sigma$ is then given in \cite[Section 2.4]{pages2020}. More generally, \cite{ma2015} gives a characterization of any SGMCMC (Stochastic Gradient Markov Chain Monte Carlo) algorithm with multiplicative noise and with the corresponding Gibbs measure as a target. In practice, the matrix $\sigma$ is often chosen so that $\sigma \sigma^\top \simeq (\nabla^2 V)^{-1}$ but approximations are needed because of the high dimensions of the matrix (e.g. only considering diagonal matrices). Still, our results hold also for non-diagonal $\sigma$, which opens the way to algorithms with such $\sigma$. In this paper, we consider the following SDE: \begin{align*} & dY_t = -(\sigma \sigma^\top \nabla V)(Y_t)dt + a(t) \sigma(Y_t) dW_t + \left(a^2(t) \left[\sum_{j=1}^d \partial_j(\sigma \sigma^\star)(Y_t)_{ij} \right]_{1 \le i \le d}\right) dt \\ & a(t) = \frac{A}{\sqrt{\log(t)}}, \end{align*} where the expression of the drift comes from \cite[Proposition 2.5]{pages2020} and where the second drift term is interpreted as a correction term so that $\nu_{a(t)}$ is still the the "instantaneous" invariant measure. This last term boils down to $0$ if $\sigma$ is constant. The aim of this paper is to prove the convergence for the $L^1$-Wasserstein distance of the law of $Y_t$ to $\nu^\star$ in the setting adopted in \cite{pages2020}, assuming in particular the convex uniformity of the potential outside a compact set and the ellipticity and the boundedness of $\sigma$. We also prove the convergence of the corresponding Euler-Maruyama scheme with decreasing steps. Considering the convex condition outside a compact set is in fact quite different from the convex setting and turns out to be more demanding. This setting often appears in optimization problems (see Section \ref{sec:neural_networks}), where a characteristic set - the compact set - contains the interesting features of the model with traps such as local minima, and where outside of this set the loss function is coercive and convex. We give classic examples of neural networks where this setting applies. We adopt a \textit{domino strategy} like in \cite{pages2020}, inspired by proofs of weak error expansion of discretization schemes of diffusion processes, see \cite{talay1990} and \cite{bally1996}. In \cite{pages2020} is proved the convergence of the Euler-Maruyama scheme $\bar{X}$ with decreasing steps $(\gamma_n)$ of an ergodic and homogeneous SDE $X$ with non constant $\sigma$, to the invariant measure of $X$. It then appears that the multiplicative case is much more demanding than the additive case. For a function $f : \mathbb{R}^d \to \mathbb{R}$, the \textit{domino strategy} consists in a step-by-step decomposition of the weak error to produce an upper bound as follows: \begin{align} | \mathbb{E}f(\bar{X}_{\Gamma_n}^x) - \mathbb{E}f(X_{\Gamma_n}^x)| & = | \bar{P}_{\gamma_1} \circ \cdots \circ \bar{P}_{\gamma_n} f(x) - P_{\Gamma_n}f(x) | \nonumber \\ \label{eq:domino_strategy} & \le \sum_{k=1}^n \left|\bar{P}_{\gamma_1} \circ \cdots \circ \bar{P}_{\gamma_{k-1}} \circ (\bar{P}_{\gamma_k}-P_{\gamma_k})\circ P_{\Gamma_n-\Gamma_k}f(x) \right|, \end{align} where $P$ and $\bar{P}$ are the transition kernels associated to $X$ and $\bar{X}$ respectively and where $\Gamma_n=\gamma_1+\cdots+\gamma_n$. Then two terms appear: first the "error" term, for large $k$, where the error is controlled by classic weak and strong bounds on the error of an Euler-Maruyama scheme, and the "ergodic" term, for small $k$, where the ergodicity of $X$ is used. However, we cannot directly apply this strategy of proof to our problem since we consider a non homogeneous SDE $Y$, so we proceed as follows: we consider instead the SDE $X$ where the coefficient $a(t)$ is non-increasing and piecewise constant and where the successive plateaux $[T_{n-1},T_{n})$ of $a$ are increasingly larger time intervals. On each plateau we obtain a homogeneous and uniformly elliptic SDE with an invariant Gibbs distribution $\nu_{a_n}$ where $a_n$ is the constant value of $a$ on $[T_{n-1},T_{n})$, to which a \textit{domino strategy} can be applied. This ellipticity fades with time since $a_n$ goes to $0$ and we need to carefully control its impact on the way the diffusion $X$ gets close to its "instantaneous" invariant Gibbs distribution $\nu_{a_n}$. To this end we have to refine several one step weak error results from \cite{pages2020} and ergodic bounds from \cite{wang2020}. Doing so we derive by induction an upper-bound for the distance between $X_t$ and $\nu^\star$ after each plateau and prove that a coefficient $(a(t))$ of order $\log^{-1/2}(t)$ is a sufficient and generally necessary condition for convergence. Using this result, we then prove the convergence of $Y_t$ and its Euler-Maruyama scheme $\bar{Y}_t$ by bounding the distance between $X_t$ and $Y_t$ and $X_t$ and $\bar{Y}_t$. We also consider the "Stochastic Gradient case" i.e. where the true gradient cannot be computed exactly and where a noise, which is a sequence of increments of a martingale, is added to the gradient. This case was treated in \cite{gelfand-mitter} in the additive setting. The process $X$ is used as a tool for the proof of the convergence of $Y_t$, however the convergence of $X_t$ to $\nu^\star$ also has its own interest since the "plateau" method is also used by practitioners. We also establish a convergence rate which is somehow limited by $\mathcal{W}_1(\nu_{a(t)},\nu^\star)$, which is of order $a(t)$ under the assumption that $\argmin(V)$ is finite and that $\nabla^2 V$ is positive definite at every element of $\argmin(V)$; if $\argmin(V)$ is still finite but if $\nabla^2 V$ is not positive definite at every element of $\argmin(V)$, but if we assume instead that all the elements of $\argmin(V)$ are strictly polynomial minima, then the rate is of order $a(t)^\delta$ for some $\delta \in (0,1)$ \cite{bras2021}. We pay particular attention to the non-definite case, since it was pointed out in \cite{sagun2016} and \cite{sagun2017} that for some optimization problems arising in Machine Learning, the Hessian of the loss function at the end of the training tends out to be extremely singular. Indeed, as the dimension of the parameter which is used to minimize the loss function is large and as the neural network can be over-parametrized, many eigenvalues of the Hessian matrix are close to zero. However, this subject is still new in the Stochastic Optimization literature and needs more theoretical background. Still we give sharper bounds on the rate of convergence of the $L^1$-Wasserstein distance between $X$ or $Y$ and $\nu_{a(t)}$ as in practice the optimization procedure stops at some (large) $t$ and the target distribution is actually $\nu_{a(t)}$ instead of $\nu^\star$. In a next paper, we shall prove the convergence in total variation distance. In this last case, the domino strategy is more complex to implement and requires regularization lemmas, as in \cite{pages2020} which studies the convergence for both distances. The article is organized as follows. In Section \ref{sec:main-results} we first give the setting and assumptions of the problem we consider. This setting is taken from \cite{pages2020}. We then state our main results of convergence as well as convergence rates. In Section \ref{sec:optimization} we show how this setting applies to some classic optimization problems arising in Machine Learning and present several general choices for $\sigma$ that are used in practice. In Section \ref{sec:langevin} we consider the case where the coefficient $a$ is constant and give convergence rates to the invariant measure taking into account the ellipticity parameter. We also give preliminary lemmas for the rate of convergence of $\nu_{a_n}$ to $\nu^\star$. In Section \ref{sec:X} we prove the convergence of the solution of the SDE where $a$ is piecewise constant, by "plateaux". Using the dependence in $a$ of the rate of convergence to the invariant measure in the ergodic case we prove the convergence to $\argmin(V)$. In Section \ref{sec:Y} we then prove the convergence of the SDE in the case where $a$ is not by plateau but continuously decreasing. This is done by bounding the Wasserstein distance with the "plateau" case and revisiting the lemmas for strong and weak errors from \cite{pages2020}. In Section \ref{sec:bar-Y} and Section \ref{sec:bar-X} we also prove the convergence for the corresponding the Euler-Maruyama schemes. The proofs actually follow the same strategy as the previous one. In Section \ref{sec:experiments} we present experiments of training of neural networks using various specifications for $\sigma$; the algorithms with multiplicative $\sigma$ prove to be faster than the algorithm with constant $\sigma$. \textsc{Notations} We endow the space $\mathbb{R}^d$ with the canonical Euclidean norm denoted by $| \boldsymbol{\cdot} |$ and we denote $\langle \cdot, \cdot \rangle$ the associated canonical inner product. For $x \in \mathbb{R}^d$ and for $R>0$, we denote $\textbf{B}(x,R) = \lbrace y \in \mathbb{R}^d : \ |y-x| \le R \rbrace$. For $M \in (\mathbb{R}^d)^{\otimes k}$, we denote by $\|M\|$ its operator norm, i.e. $\|M\| = \sup_{u \in \mathbb{R}^{d\times k}, \ |u|=1} M \cdot u$. If $M : \mathbb{R}^d \to (\mathbb{R}^d)^{\otimes k}$, we denote $\|M\|_\infty = \sup_{x \in \mathbb{R}^d} \|M(x)\|$. For $f :\mathbb{R}^d \rightarrow \mathbb{R}$ such that $\min_{\mathbb{R}^d}(f)$ exists, we denote $\text{argmin}(f) = \left\lbrace x \in \mathbb{R}^d : \ f(x) = \min_{\mathbb{R}^d}(f) \right\rbrace$. We say that $f$ is coercive if $\lim_{|x| \rightarrow \infty} f(x) = + \infty$. If $f$ is Lipschitz continuous, we denote by $[f]_{\text{Lip}}$ its Lipschitz constant. For $k \in \mathbb{N}$ and if $f$ is $\mathcal{C}^k$, we denote by $\nabla ^k f : \mathbb{R}^d \rightarrow (\mathbb{R}^d)^{\otimes k}$ its differential of order $k$. For a random vector $X$, we denote by $[X]$ its law. We denote the $L^p$-Wasserstein distance between two distributions $\pi_1$ and $\pi_2$ on $\mathbb{R}^d$: $$ \mathcal{W}_p(\pi_1, \pi_2) = \inf \left\lbrace \left(\int_{\mathbb{R}^d} |x-y|^p \pi(dx,dy) \right)^{1/p} : \ \pi \in \mathcal{P}(\pi_1,\pi_2) \right\rbrace ,$$ where $\mathcal{P}(\pi_1,\pi_2)$ stands for the set of probability distributions on $(\mathbb{R}^d \times \mathbb{R}^d, \mathcal{B}or(\mathbb{R}^d)^{\otimes 2})$ with respective marginal laws $\pi_1$ and $\pi_2$. For $p=1$, let us recall the Kantorovich-Rubinstein representation of the Wasserstein distance of order 1 \cite[Equation (6.3)]{villani2009}: $$ \mathcal{W}_1(\pi_1,\pi_2) = \sup \left\lbrace \int_{\mathbb{R}^d} f(x) (\pi_1-\pi_2)(dx) : \ f : \mathbb{R}^d \to \mathbb{R}, \ [f]_{\text{Lip}} = 1 \right\rbrace .$$ For $x \in \mathbb{R}^d$, we denote by $\delta_x$ the Dirac mass at $x$. For $x$, $y \in \mathbb{R}^d$ we denote $(x,y) = \lbrace ux + (1-u)y, \ u \in [0,1] \rbrace$ the geometric segment between $x$ and $y$. For $u$, $v \in \mathbb{R}$, we define $u \ \text{mod}(v) = u - v \lfloor u/v \rfloor$. If $u_n$ and $v_n$ are two real-valued sequences, we write $u_n \asymp v_n$ meaning that $u_n = O(v_n)$ and $v_n = O(u_n)$. In this paper, we use the notation $C$ to denote a positive real constant, which may change from line to line. The constant $C$ depends on the parameters of the problem: the coefficients of the SDE, the choice of $A$ in $a(t) = A \log^{-1/2}(t)$, the upper bound $\bar{\gamma}$ on the decreasing steps, but $C$ does not depend on $t$ nor $x$. \section{Assumptions and main results} \label{sec:main-results} \subsection{Assumptions} Let $V : \mathbb{R}^d \rightarrow (0,+\infty)$ be a $\mathcal{C}^2$ potential function such that $V$ is coercive and \begin{equation} \label{eq:def_A} (x \mapsto |x|^2 e^{-2V(x)/A^2}) \in L^1(\mathbb{R}^d) \text{ for some } A>0. \end{equation} Then $V$ admits a minimum on $\mathbb{R}^d$. Moreover, let us assume that \begin{align} V^\star :=\min_{\mathbb{R}^d} V >0, \quad \argmin(V) = \lbrace x_1^\star, \ldots, x_{m^\star}^\star \rbrace, \quad \forall \ i =1,\ldots,m^\star, \ \nabla^2 V(x_i^\star) >0, \owntag[eq:min_V]{$\mathcal{H}_{V1}$} \end{align} i.e. $\min_{\mathbb{R}^d} V$ is attained at a finite number $m^\star$ of points and in each point the Hessian matrix is positive definite. We then define for $a \in (0,A]$ the Gibbs measure $\nu_{a}$ of density : \begin{equation} \label{eq:def_nu} \nu_a(dx) = \mathcal{Z}_{a} e^{-2(V(x)-V^\star)/a^2} dx , \quad \mathcal{Z}_{a} = \left( \int_{\mathbb{R}^d} e^{-2(V(x)-V^\star)/a^2} dx \right)^{-1} \end{equation} Following \cite[Theorem 2.1]{hwang1980}, the measure $\nu_a$ converges weakly to $\nu^\star$ as $a \to 0$, where $\nu^\star$ is the weighted sum of Dirac measures: \begin{equation} \nu^\star = \left(\sum_{j=1}^{m^\star} \left(\det \nabla^2 V(x_j^\star) \right)^{-1/2} \right)^{-1} \sum_{i=1}^{m^\star} \left(\det \nabla^2 V(x_i^\star)\right)^{-1/2} \delta_{x_i^\star}. \end{equation} We consider the following Langevin SDE in $\mathbb{R}^d$: \begin{align} \label{eq:def_Y} & Y_0^{x_0} = x_0 \in \mathbb{R}^d, \\ & dY_t^{x_0} = b_{a(t)}(Y_t^{x_0})dt + a(t) \sigma(Y_t^{x_0}) dW_t, \nonumber \end{align} where, for $a\ge 0$, the drift $b_a$ is given by \begin{equation} \label{eq:def_b} b_a(x) = -(\sigma \sigma^\top \nabla V)(x) + a^2 \left[\sum_{j=1}^d \partial_j(\sigma \sigma^\top)_{ij}(x) \right]_{1 \le i \le d} =: -(\sigma \sigma^\top \nabla V)(x) + a^2 \Upsilon(x), \end{equation} where $W$ is a standard $\mathbb{R}^d$-valued Brownian motion defined on a probability space $(\Omega, \mathcal{A}, \mathbb{P})$, where $ \sigma : \mathbb{R}^d \to \mathcal{M}_{d}(\mathbb{R})$ is $C^2$ and \begin{equation} \label{eq:def_a} a(t) = \frac{A}{\sqrt{\log(t+e)}} \end{equation} where $A$ is defined in \eqref{eq:def_A} and with $\log(e)=1$. This equation corresponds to a gradient descent on the potential $V$ with preconditioning $\sigma$ and multiplicative noise ; the second term in the drift \eqref{eq:def_b} is a correction term (see \cite[Proposition 2.5]{pages2020}) which is zero for constant $\sigma$. We make the following assumptions on the potential $V$: \begin{align} \lim_{|x| \rightarrow + \infty} V(x) = + \infty, \ \ |\nabla V|^2 \le CV \ \text{ and } \sup_{x \in \mathbb{R}^d} || \nabla^2 V(x)|| < + \infty , \owntag[eq:V_assumptions]{$\mathcal{H}_{V2}$} \end{align} which implies in particular that $V$ has at most a quadratic growth. Let us also assume that \begin{align} \sigma \text{ is bounded and Lipschitz continuous,} \ \nabla^2 \sigma \text{ is bounded}, \ \nabla(\sigma\sigma^\top) \nabla V \text{ is bounded}, \owntag[eq:sigma_assumptions]{$\mathcal{H}_\sigma$} \end{align} and that $\sigma$ is uniformly elliptic, i.e. \begin{equation} \label{eq:ellipticity} \exists \ubar{\sigma}_0 > 0, \ \forall x \in \mathbb{R}^d, \ (\sigma \sigma^\top) (x) \ge \ubar{\sigma}_0^2 I_d . \end{equation} Assumptions \eqref{Eq:eq:V_assumptions} and \eqref{Eq:eq:sigma_assumptions} imply that $\Upsilon$ is also bounded and Lipschitz continuous and that $b_a$ is Lipschitz continuous uniformly in $a \in [0,A]$. Let the minimal constant $[b]_{\text{Lip}}$ be such that: $$ \forall a \in [0,A], \ b_a \text{ is } [b]_{\text{Lip}} \text{-Lipschitz continuous} .$$ We make the non-uniform dissipative (or convexity) assumption outside of a compact set: there exists $\alpha_0 >0$ and $R_0 >0$ such that \begin{align} \forall x, y \in \textbf{B}(0,R_0)^c, \ \left\langle \left( \sigma \sigma^\top \nabla V\right)(x)-\left(\sigma \sigma^\top \nabla V \right) (y), \ x-y \right\rangle \ge \alpha_0 |x-y|^2. \owntag[eq:V_confluence]{$\mathcal{H}_{cf}$} \end{align} Taking $y \in \textbf{B}(0,R_0)^c$ fixed, letting $|x| \to \infty$ and using the boundedness of $\sigma$, \eqref{Eq:eq:V_confluence} implies that $|\nabla V|$ is coercive. Using \eqref{Eq:eq:V_assumptions} and the boundedness of $\sigma$, there exists $C>0$ (depending on $A$) such that: $$ \forall a \in [0,A], \ 1 + |b_a(x)| \le CV^{1/2}(x) .$$ Let $(\gamma_n)_{n \ge 1}$ be a non-increasing sequence of varying positive steps. We define $\Gamma_n := \gamma_1 + \cdots + \gamma_n$ and for $t \ge 0$: $$ N(t) := \min \lbrace k \ge 0 : \ \Gamma_{k+1} > t \rbrace = \max \lbrace k \ge 0 : \ \Gamma_k \le t \rbrace .$$ We make the classical assumptions on the step sequence, namely \begin{equation} \gamma_n \downarrow 0, \quad \sum_{n \ge 1} \gamma_n = + \infty \quad \text{and} \quad \sum_{n \ge 1} \gamma_n^2 < + \infty \owntag[eq:gamma_assumptions]{$\mathcal{H}_{\gamma1}$} \end{equation} and we also assume that \begin{align} \varpi := \limsup_{n \to \infty} \frac{\gamma_n - \gamma_{n+1}}{\gamma_{n+1}^2} < \infty . \owntag[eq:gamma_assumptions_2]{$\mathcal{H}_{\gamma2}$} \end{align} For example, if $\gamma_n = \gamma_1/n^\alpha$ with $\alpha \in (1/2,1)$ then $\varpi = 0$; if $\gamma_n = \gamma_1/n$ then $\varpi = \gamma_1$. In Stochastic Gradient algorithms, the true gradient is measured with a zero-mean noise $\zeta$, which law only depends on the current position. That is, let us consider a family of random fields $(\zeta_n(x))_{x \in \mathbb{R}^d, n \in \mathbb{N}}$ such that for every $n \in \mathbb{N}$, $(\omega, x) \in \Omega \times \mathbb{R}^d \mapsto \zeta_n(x,\omega)$ is measurable and for all $x \in \mathbb{R}^d$, the law of $\zeta_n(x)$ only depends on $x$ and $(\zeta_n(x))_{n \in \mathbb{N}}$ is an i.i.d. sequence independent of $W$. We make the following assumptions: \begin{equation} \label{eq:zeta_assumptions} \forall x \in \mathbb{R}^d, \ \forall p \ge 1, \ \mathbb{E}[\zeta_{1}(x)] = 0 \quad \text{and} \quad \mathbb{E}[|\zeta_{1}(x)|^p] \le C_p V^{p/2}(x). \end{equation} We then consider the Euler-Maruyama scheme with decreasing steps associated to $(Y_t)$: \begin{align} \label{eq:def_Y_bar} & \bar{Y}_0^{x_0} = x_0, \quad \bar{Y}_{\Gamma_{n+1}}^{x_0} = \bar{Y}_{\Gamma_n} + \gamma_{n+1} \left(b_{a(\Gamma_{n})}(\bar{Y}^{x_0}_{\Gamma_{n}}) + \zeta_{n+1}(\bar{Y}_{\Gamma_n}^{x_0}) \right) + a(\Gamma_{n}) \sigma(\bar{Y}_{\Gamma_n}^{x_0})(W_{\Gamma_{n+1}} - W_{\Gamma_n}), \end{align} We extend $\bar{Y}^{x_0}_{\boldsymbol{\cdot}}$ on $\mathbb{R}^+$ by considering its genuine continuous interpolation: \begin{equation} \label{eq:def_Y_bar_genuine} \forall t \in [\Gamma_n, \Gamma_{n+1}), \ \bar{Y}^{x_0}_{t} = \bar{Y}^{x_0}_{\Gamma_n} + (t-\Gamma_n) \left(b_{a(\Gamma_n)}(\bar{Y}^{x_0}_{\Gamma_n}) + \zeta_{n+1}(\bar{Y}_{\Gamma_n}^{x_0}) \right) + a(\Gamma_n) \sigma(\bar{Y}^{x_0}_{\Gamma_n}) (W_t - W_{\Gamma_n}) . \end{equation} \subsection{Main results} We now state our main results. \begin{theorem} \label{thm:main} \begin{enumerate}[label=(\alph*)] \item Let $Y$ be defined in \eqref{eq:def_Y}. Assume \eqref{Eq:eq:min_V}, \eqref{Eq:eq:V_assumptions}, \eqref{Eq:eq:sigma_assumptions}, \eqref{eq:ellipticity} and \eqref{Eq:eq:V_confluence}. If $A$ is large enough, then for every $x_0 \in \mathbb{R}^d$, $$ \mathcal{W}_1([Y_t^{x_0}],\nu^\star) \underset{t \rightarrow \infty}{\longrightarrow} 0. $$ More precisely, for every $t >0$: $$ \mathcal{W}_1([Y_t^{x_0}],\nu^\star) \le C \max(1+|x_0|,V(x_0))a(t) $$ and for every $\alpha \in (0,1)$ we have $$ \mathcal{W}_1([Y_t^{x_0}],\nu_{a(t)}) \le C \max(1+|x_0|,V(x_0))t^{-\alpha} . $$ \item Let $\bar{Y}$ be defined in \eqref{eq:def_Y_bar}. Assume \eqref{Eq:eq:min_V}, \eqref{Eq:eq:V_assumptions}, \eqref{Eq:eq:sigma_assumptions}, \eqref{eq:ellipticity} and \eqref{Eq:eq:V_confluence}. Assume furthermore \eqref{Eq:eq:gamma_assumptions} and \eqref{Eq:eq:gamma_assumptions_2}, that $V$ is $\mathcal{C}^3$ with $\|\nabla^3 V\| \le CV^{1/2}$ and that $\sigma$ is $\mathcal{C}^3$ with $\|\nabla^3(\sigma \sigma^\top) \| \le CV^{1/2}$. If $A$ is large enough then for every $x_0 \in \mathbb{R}^d$, $$ \mathcal{W}_1([\bar{Y}_t^{x_0}],\nu^\star) \underset{t \rightarrow \infty}{\longrightarrow} 0. $$ More precisely, for every $t >0$: $$ \mathcal{W}_1([\bar{Y}_t^{x_0}],\nu^\star) \le C\max(1+|x_0|,V^2(x_0)) a(t) ,$$ and for every $\alpha \in (0,1)$ we have $$ \mathcal{W}_1([\bar{Y}_t^{x_0}],\nu_{a(t)}) \le C\max(1+|x_0|,V^2(x_0))t^{-\alpha} . $$ \end{enumerate} \end{theorem} \begin{remark} In particular, if $\argmin V = \lbrace x^\star \rbrace$ is reduced to a point, we can rewrite the conclusions of Theorem \ref{thm:main} as $\|Y_t^{x_0} - x^\star\|_1 \to 0$ and $\|\bar{Y}_t^{x_0} - x^\star\|_1 \to 0$ respectively and so on. \end{remark} \subsection{The degenerate case} In this subsection we consider the case where some of the $\nabla^2 V(x_i^\star)$'s may be not definite positive but where the $x_i^\star$'s are strictly polynomial minima, i.e. is $V(x)-V(x_i^\star)$ is bounded below in a neighbourhood of $x_i^\star$ by a non-negative polynomial function null only in $x_i^\star$. This case can be treated in a similar way using the change of variable given in \cite{bras2021}. First, let us restate the results from \cite[Theorem 4]{bras2021}. To simplify, let us assume that $\argmin(V)$ is reduced to a point. \begin{theorem} \label{thm:athreya} Assume that $V$ is $C^{2p}$ with $p \ge 2$, is coercive, that $\argmin(V) = \lbrace x^\star \rbrace$, that $e^{-AV} \in L^1(\mathbb{R}^d)$ for some $A>0$ and that $x^\star$ is a strictly polynomial minimum of order $2p$ i.e. $p$ is the smallest integer such that $$ \exists r >0, \ \forall h \in \textbf{B}(x^\star,r)\setminus \lbrace 0 \rbrace, \ \sum_{k=0}^{2p} \frac{1}{k!} \nabla^k V(x^\star) \cdot h^{\otimes k} > 0 .$$ Assume also the technical hypothesis \cite[(8)]{bras2021} if $p \ge 5$. Then there exist $B \in \mathcal{O}_d(\mathbb{R})$, $\alpha_1, \ \ldots, \ \alpha_d \in \lbrace 1/2, \ldots, 1/(2p) \rbrace$ and a polynomial function $g : \mathbb{R}^d \to \mathbb{R}$ which is not constant in any of its variables such that $$ \forall h \in \mathbb{R}^d, \ \frac{1}{s}[ V(x^\star + B \cdot (s^{\alpha_1}h_1,\ldots, s^{\alpha_d}h_d)) - V(x^\star)] \underset{s \to 0}{\longrightarrow} g(h) .$$ Moreover assume that $g$ is coercive. Then if $Z_s \sim \nu_{\sqrt{2s}}$, $$ \left( \frac{(B^{-1}\cdot (Z_s-x^\star))_1}{s^{\alpha_1}}, \cdots, \frac{(B^{-1}\cdot (Z_s-x^\star))_d}{s^{\alpha_d}} \right) \overset{\mathscr{L}}{\longrightarrow} Z \quad \text{ as } s \to 0,$$ where $Z$ has density proportional to $\exp(-g)$. \end{theorem} \begin{theorem} \label{thm:non_definite} Let us make the same assumptions as in Theorem \ref{thm:athreya} and assume that $V^\star > 0$. Assume furthermore \eqref{Eq:eq:V_assumptions}, \eqref{Eq:eq:sigma_assumptions}, \eqref{eq:ellipticity} and \eqref{Eq:eq:V_confluence}. Assume furthermore \eqref{Eq:eq:gamma_assumptions} and \eqref{Eq:eq:gamma_assumptions_2}, that $V$ is $\mathcal{C}^3$ with $\|\nabla^3 V\| \le CV^{1/2}$ and that $\sigma$ is $\mathcal{C}^3$ with $\|\nabla^3(\sigma \sigma^\top) \| \le CV^{1/2}$. Let us denote $\alpha_{\min} := \min(\alpha_1,\ldots,\alpha_d)$. Then for every $\alpha \in (0,1)$ we have $$ \mathcal{W}_1([Y_t^{x_0}],\nu_{a(t)}) \le C \max(1+|x_0|,V(x_0))t^{-\alpha} \quad \textup{ and } \quad \mathcal{W}_1([\bar{Y}_t^{x_0}],\nu_{a(t)}) \le C \max(1+|x_0|,V^2(x_0))t^{-\alpha} $$ and $$ \mathcal{W}_1([Y_t^{x_0}],\nu^\star) \le C \max(1+|x_0|,V(x_0))a(t)^{2\alpha_{\min}} \textup{ and } \mathcal{W}_1([\bar{Y}_t^{x_0}],\nu^\star) \le C \max(1+|x_0|,V^2(x_0))a(t)^{2\alpha_{\min}}. $$ \end{theorem} The proof is given in the Supplementary Material. \section{Application to optimization problems} \label{sec:optimization} \subsection{Potential function associated to a Neural Regression Problem} \label{sec:neural_networks} The setting described in Section \ref{sec:main-results} can first be applied to convex optimization problems where the potential function $V$ has a quadratic growth as $|x| \rightarrow \infty$. Classical examples are least-squares regression and logistic regression with quadratic regularization, that is: $$ \min_{x \in \mathbb{R}^d} \frac{1}{M} \sum_{i=1}^M \log(1+e^{-v_i \langle u_i, x \rangle}) + \frac{\lambda}{2} |x|^2 ,$$ where $v_i \in \lbrace -1, +1 \rbrace$ and $u_i \in \mathbb{R}^d$ are the data samples associated with a binary classification problem and where $\lambda >0$ is the regularization parameter. We now consider a scalar regression problem with a fully connected neural network with quadratic regularization. Let $\varphi : \mathbb{R} \rightarrow \mathbb{R}$ be the sigmoid function. To simplify the proofs, we may consider instead a smooth function approximating the sigmoid function such that $\varphi'$ has compact support. Let $K \in \mathbb{N}$ be the number of layers and for $k=1$, $\ldots$, $K$, let $d_k \in \mathbb{N}$ be the size of the $k^{\text{th}}$ layer with $d_K=1$. For $u \in \mathbb{R}^{d_{k-1}}$ and for $\theta \in \mathcal{M}_{d_k,d_{k-1}}(\mathbb{R})$, we define $\varphi_{\theta}(u) := [\varphi([\theta \cdot u]_i)]_{1 \le i \le d_k}$. The output of the neural network is \begin{align*} & \psi : \mathbb{R}^{d_1,d_0} \times \cdots \times \mathbb{R}^{d_K,d_{K-1}} \times \mathbb{R}^{d_0} \to \mathbb{R} \\ & \psi (\theta_1,\ldots,\theta_K,u) = \psi(\theta,u) = \varphi_{\theta_K} \circ \ldots \circ \varphi_{\theta_1}(u). \end{align*} Let $u_i \in \mathbb{R}^{d_0}$ and $v_i \in \mathbb{R}$ be the data samples for $1 \le i \le M$. The objective is $$ \underset{\theta_1, \ldots, \theta_K}{\text{minimize}} \quad V(\theta) := \frac{1}{2M} \sum_{i=1}^M (\psi(\theta_1, \ldots, \theta_K,u_i) -v_i)^2 + \frac{\lambda}{2}|\theta|^2 ,$$ where $\theta = (\theta_1, \ldots, \theta_K)$ and where $\lambda > 0$. \begin{proposition} Consider a neural network with a single layer : $\psi(\theta,u) = \varphi(\langle \theta, u \rangle)$. Assume that the data $u$ and $v$ are bounded and that $u$ admits a continuous density. Then $V$ satisfies \eqref{Eq:eq:V_assumptions} and for some $R_0, \ \alpha_0 >0$, \begin{equation} \label{eq:NN_condition} \forall x, \ y \in \textbf{B}(0,R_0)^c, \ \langle \nabla V(x) - \nabla V(y), x-y \rangle \ge \alpha_0|x-y|^2 \end{equation} \end{proposition} \begin{proof} Note that $\varphi$, $\varphi'$ and $\varphi''$ are bounded. The function $\psi$ is bounded so $$ 2V(\theta) = \int (\varphi(\langle \theta, u \rangle) - v)^2 P(du,dv) + \lambda|\theta|^2 \sim \lambda|\theta|^2 \quad \text{ as } |\theta| \rightarrow \infty ,$$ so $V$ is coercive. Moreover, we have \begin{align*} \nabla V = \int u \varphi'(\langle \theta, u \rangle) (\varphi(\langle \theta, u \rangle) - v) P(du,dv) + \lambda \theta \end{align*} so $\nabla V(\theta) \sim \lambda\theta$ as $|\theta| \rightarrow \infty$ and $|\nabla V|^2 \le CV$. Then, let us assume that the support of $\varphi$ is included in $[-1,1]$, that $u$ has its values in $\textbf{B}(0,1)$ and $v$ in $[-1,1]$. Then the set $ \lbrace u \in \textbf{B}(0,1), \ |\langle \theta,u \rangle| < 1 \rangle $ has Lebesgue measure no larger than $C/|\theta|$ so $$ \left\|\nabla^2 \int (\varphi(\langle \theta, u \rangle) - v)^2 P(du,dv) \right\| \le C/|\theta|,$$ so outside the compact set $\lbrace |\theta| \le 2C/\lambda \rbrace$, we have $\| \nabla ^2 V \| \ge \lambda/2$ which guarantees \eqref{eq:NN_condition}. \end{proof} However, we cannot directly extend this proposition to multi-layers neural networks. Nevertheless, if we consider that the training stops if a parameter becomes too large and if we replace $\psi(\theta,u)$ by $\psi(\phi(\theta),u)$ where $\phi : \mathbb{R} \to \mathbb{R}$ is a smooth approximation of $x \mapsto \min(x,R)\mathds{1}_{x \ge 0} + \max(x,-R)\mathds{1}_{x<0}$ where $R>0$ is large and where $\phi$ is applied in order to avoid over-fitting coordinate by coordinate then the resulting potential $V$ with quadratic regularization satisfies \eqref{Eq:eq:V_assumptions} and \eqref{eq:NN_condition}. \subsection{Practitioner's corner: choices for $\sigma$} \label{subsec:practitioner} In this section we briefly present general choices for the non-constant matrix $\sigma$ that are often used in the Stochastic Optimization and Machine Learning literature. \cite{welling2011} introduced the Stochastic Gradient Langevin Dynamics (SGLD) with constant preconditioner matrix $\sigma$. \cite{dauphin2015} adapted the well-known Newton method, which consists in considering $\sigma \sigma^\top = (\nabla^2 V)^{-1}$, to SGLD. Since the size of the Hessian matrix may be too large in practice, because inverting it is computationally costly and because the Hessian matrix may not be positive in every point, it is suggested to consider instead $|\text{diag}((\nabla^2V))^2|^{-1/2}$. However, computing high-order derivatives may be cumbersome; \cite{simsekli2016} adapts the quasi-Newton method \cite{nocedal2006} to approximate the Hessian matrix to SGLD, yielding the Stochastic Quasi-Newton Langevin algorithm. \cite{duchi2011} and \cite{li2015} give algorithms where the choice for $\sigma$ is $\sigma \simeq \text{diag}((\lambda + |\nabla V|)^{-1})$, where $\lambda >0$ guarantees numerical stability. The idea of using geometry has been explored in \cite{patterson2013}, where $\sigma^{-2}$ defines the local curvature of a Riemannian manifold, giving the Stochastic Gradient Riemaniann Langevin Dynamics algorithm where $\sigma$ is equal to $\mathcal{I}_x^{-1/2}$ where $\mathcal{I}_x$ is the Fischer information matrix, or to some other choices (see \cite[Table 1]{patterson2013}) as $\mathcal{I}_x$ may be intractable. \cite{ma2015} extends the previous algorithm to Hamiltonian Monte Carlo methods, where a momentum variable is added in order to take into account the "inertia" of the trajectory, yielding the Stochastic Gradient Riemannian Hamiltonian Monte Carlo method. Allowing the matrix $\sigma$ to depend on the position yields a faster convergence; we refer to the previous references where the simulations prove that these new methods greatly improve classical stochastic gradients algorithms. In particular, we refer to the simulations \cite[Figure 2]{simsekli2016}, \cite[Figure 2]{patterson2013} and \cite[Figure 3]{ma2015} where the different methods based on multiplicative noise are compared. \section{Langevin equation with constant time coefficient} \label{sec:langevin} In this section, we consider the following $\mathbb{R}^d$-valued homogeneous SDE: \begin{align} \label{eq:X_SDE} & X_0^x = x \in \mathbb{R}^d, \quad dX_t^x = b_a(X_t^x)dt + a\sigma(X_t^x)dW_t, \end{align} with $a \in(0,A]$ and where $b_a$ is defined in \eqref{eq:def_b}. The drift is specified in such a way that the Gibbs measure $\nu_{a}$ defined in \eqref{eq:def_nu} is the unique invariant distribution of $(X_t^x)$ (see \cite[Proposition 2.5]{pages2020}). \subsection{Exponential contraction property} We now prove contraction properties of the SDE \eqref{eq:X_SDE} under the uniform convex setting on the whole $\mathbb{R}^d$ or outside a compact set \eqref{Eq:eq:V_confluence}. If the uniform dissipative assumption holds on $\mathbb{R}^d$ then we have the following contraction property. \begin{proposition} Let $Z$ be the solution of \begin{align*} & Z_0^x = x \in \mathbb{R}^d, \quad dZ_t^x = b^Z(Z_t^x)dt + \sigma^Z(Z_t^x) dW_t, \end{align*} where the coefficients $b^Z$ and $\sigma^Z$ are Lipschitz continuous. Assume the uniform convexity i.e. there exists $\alpha >0$ such that \begin{equation} \label{eq:uniform_dissipative} \forall x, y \in \mathbb{R}^d, \langle b^Z(x)-b^Z(y), \ x-y \rangle + \frac{1}{2}|| \sigma^Z(x)-\sigma^Z(y) ||^2 \le - \alpha |x-y|^2 . \end{equation} Then: $$ \forall x,y \in \mathbb{R}^d, \ \mathcal{W}_1\left(\left[Z^x_t\right], \left[Z^y_t\right]\right) \le C|x-y|e^{-\alpha t} .$$ \end{proposition} \begin{proof} By the It\={o} lemma, $t \mapsto e^{2\alpha t}\left|Z^x_t - Z^y_t\right|^2$ is a super-martingale, so $$ \mathbb{E} \left|Z^x_t - Z^y_t\right|^2 \le e^{-2\alpha t}|x-y|^2 ,$$ which yields the desired result. \end{proof} This proposition can be applied to $X$ under the assumption $$ \forall x, y \in \mathbb{R}^d, \langle b_a(x)-b_a(y), \ x-y \rangle + \frac{a^2}{2}|| \sigma(x)-\sigma(y) ||^2 \le - \alpha |x-y|^2, $$ which may be hard to check because of the dependence in $a$. In \cite[Corollary 2.4]{pages2020} is proved that this contraction property is still true under the uniform convexity outside a compact set \eqref{Eq:eq:V_confluence}. We make this statement more precise by expliciting the dependence in $a$. \begin{theorem} \label{thm:confluence} Under the assumption \eqref{Eq:eq:V_confluence}, \begin{enumerate}[label=(\alph*)] \item For every $x$, $y \in \mathbb{R}^d$, \begin{align} \mathcal{W}_1\left(\left[X^x_t\right], \left[X^y_t\right]\right) \le C e^{C_1/a^2} |x-y|e^{-\rho_a t}, \quad \rho_a := e^{-C_2/a^2} \owntag[eq:W_confluence]{$\mathcal{P}_{cf}$} \end{align} where the constants $C$, $C_1$, $C_2$ do not depend on $a$. \item For every $x \in \mathbb{R}^d$, \begin{align} \label{eq:confluence_nu_a} & \mathcal{W}_1\left(\left[X^x_t\right],\nu_a\right) \le C e^{C_1/a^2} e^{-\rho_a t}\mathcal{W}_1(\delta_x, \nu_a). \end{align} \end{enumerate} \end{theorem} \begin{proof} $(a)$ We refine the proof of \cite[Theorem 2.6]{wang2020} to enhance the dependence of the constants in the parameter $a$. First we remark as in \cite[Section 4.5]{pages2020} in the proof of Corollary 2.4, that Assumption (2.17) of \cite{wang2020}, stating that there exist constants $K_1$, $K_2$ and $r_0>0$ such that, with $\ubar{\sigma} := \sqrt{\sigma \sigma^\top - \ubar{\sigma}_0^2 I_d}$, \begin{align} & \frac{a^2}{2}\|\ubar{\sigma}(x)-\ubar{\sigma}(y)\|^2 - \frac{|a^2(\sigma(x)-\sigma(y))^\top (x-y)|^2}{2|x-y|^2} + \langle b_a(x) - b_a(y), x-y \rangle \nonumber \\ & \quad \le \left( (K_1+K_2)\mathds{1}_{|x-y|\le r_0}-K_2 \right)|x-y|^2, \ x, \ y \in \mathbb{R}^d, \label{eq:wang_2.17} \end{align} is true, since $a \in (0,A]$ and $\sigma\sigma^\top$ bounded, as soon as there exist positive constants $\widetilde{K}_1$, $\widetilde{K}_2$ and $R_1$ such that $$ \forall x, y \in \mathbb{R}^d, \ \langle b_a(x) - b_a(y), x-y \rangle \le \widetilde{K}_1 \mathds{1}_{|x-y|\le R_1} - \widetilde{K}_2|x-y|^2 ,$$ which is, up to changing the positive constants, equivalent to $$ \forall x, y \in \mathbb{R}^d, \ \langle b_0(x) - b_0(y), x-y \rangle \le \widetilde{K}_1 \mathds{1}_{|x-y|\le R_1} - \widetilde{K}_2|x-y|^2 ,$$ which is in turn equivalent to \eqref{Eq:eq:V_confluence}. Then we repeat the argument leading to (4.3) in \cite{wang2020}. We reformulate the assumption of ellipticity \eqref{eq:ellipticity} as: $$ dX_t = b_a(X_t) dt + a(\ubar{\sigma}(X_t)dW^1_t + \ubar{\sigma}_0 dW^2_t) ,$$ where $\ubar{\sigma} \ge 0$ and where $(W^1_t)$ and $(W^2_t)$ are two independent Brownian motions in $\mathbb{R}^d$ (which can be expressed in terms of $W$). For $x \ne y$, let $X^x$ be the solution of this SDE with $X_0=x$ and let $Y^y$ solve the following coupled SDE for $Y^y_0 = y$ : $$ dY^y_t = b_a(Y^y_t)dt + a\ubar{\sigma}(Y^y_t)dW^1_t + a\ubar{\sigma}_0 \left( dW^2_t - 2 \frac{\langle X^x_t-Y^y_t, dW^2_t\rangle(X^x_t-Y^y_t)}{|X^x_t-Y^y_t|^2} \right) .$$ The process $Y^y$ is in fact defined by orthogonally symmetrizing the component of the noise in $W^2$ w.r.t. $X^x_t-Y^y_t$ at every instant t. This SDE has a unique solution up to the coupling time $$ T_{x,y} := \inf \lbrace t \ge 0 : \ X^x_t = Y^y_t \rbrace ,$$ and for $t \ge T_{x,y}$ we set $Y^y_t = X^x_t$. Then $Y^y$ has the same distribution as $X^y$ i.e. is a weak solution of \eqref{eq:X_SDE} with starting value $y$ and it follows from \eqref{eq:wang_2.17} and from the It\=o formula applied to $|X^x-Y^y|$ that for every $0 \le u \le t \le T_{x,y}$, $$ |X^x_t-Y^y_t| - |X^x_u-Y^y_u| \le M_t - M_u + \int_u^t \left((K_1 + K_2)\mathds{1}_{|X^x_s-Y^y_s|\le r_0} - K_2 \right)|X^x_s-Y^y_s|ds, $$ where $$ M_t = \int_0^t \frac{a\langle 2 \ubar{\sigma}_0 dW^2_s + (\ubar{\sigma}(X_s) - \ubar{\sigma}(Y^y_s))dW^1_t, X^x_s-Y^y_s \rangle }{|X^x_s-Y^y_s|} $$ is a true Brownian martingale with bracket process satisfying \begin{equation} \label{eq:wang:M_inequality} \langle M \rangle_t \ge 4 a^2 \ubar{\sigma}_0^2 t . \end{equation} We now set, still like in the proof of (4.3) in \cite{wang2020}, $$ p_t := \left|X^x_t-Y^y_t\right| \quad \text{and} \quad \bar{p}_t := \varepsilon p_t + 1 - e^{-Np_t},$$ where $$ N := \frac{r_0}{a^2\ubar{\sigma}_0^2}(K_1+K_2) \quad \text{ and } \quad \varepsilon := Ne^{-Nr_0} .$$ Then we have : $$ \varepsilon p_t \le \bar{p}_t \le (N+\varepsilon)p_t, \quad \text{and} \quad \forall r \in [0,r_0), \ \frac{2N^2}{r(\varepsilon e^{Nr}+N)} \ge \frac{K_1+K_2}{a^2\ubar{\sigma}_0^2} .$$ Then using \eqref{eq:wang:M_inequality} we derive for all $0 \le u \le t \le T_{x,y}$: \begin{align*} & \bar{p}_t - \bar{p}_u \le \int_u^t (\varepsilon + Ne^{-Np_s})dM_s + \int_u^t (\varepsilon + Ne^{-Np_s})\left((K_1+K_2)\mathds{1}_{p_s\le r_0} - K_2 - \frac{2N^2 a^2 \ubar{\sigma}_0^2}{p_s(\varepsilon e^{Np_s}+N)} \right) p_s ds \\ & \quad \le \tilde{M}_t - \tilde{M}_u - K_2 \int_u^t (\varepsilon+Ne^{-Np_s})p_s ds \le \tilde{M}_t - \tilde{M}_u - \varepsilon K_2 \int_u^t p_s ds \le \tilde{M}_t - \tilde{M}_u - \frac{\varepsilon K_2}{N+\varepsilon} \int_u^t \bar{p}_s ds. \end{align*} So that we have $$ \mathbb{E}[\bar{p}_t - \bar{p}_u] = \mathbb{E}[(\bar{p}_t - \bar{p}_u) \mathds{1}_{t \le T_{x,y}}] \le - \frac{\varepsilon K_2}{N+\varepsilon} \int_u^t \mathbb{E}\bar{p}_s ds ,$$ so that $$ \frac{d}{dt} \mathbb{E}[\bar{p}_t] \le - \frac{\varepsilon K_2}{N+\varepsilon} \mathbb{E}[\bar{p}_t] $$ and then $$ \mathbb{E} \bar{p}_t \le \bar{p}_0 e^{-\frac{\varepsilon K_2}{N+\varepsilon} t}. $$ Noting that $\bar{p}_0 \le (N+\varepsilon)|x-y|$, we have $$ \mathbb{E} p_t \le \frac{N+\varepsilon}{\varepsilon}|x-y| e^{-\frac{\varepsilon K_2}{N+\varepsilon} t},.$$ so that $$ \mathcal{W}_1\left(\left[X^x_t\right],\left[X^y_t\right]\right) \le \frac{N+\varepsilon}{\varepsilon}|x-y|e^{-\frac{\varepsilon K_2}{N+\varepsilon}t} \le C e^{C_1/a^2} |x-y| e^{-e^{-C_2/a^2} t} .$$ \noindent $(b)$ As $\nu_a$ is the invariant distribution of the diffusion \eqref{eq:X_SDE}, using \eqref{Eq:eq:W_confluence} we have \begin{align*} \mathcal{W}_1\left(\left[X^x_t\right],\nu_a\right) & = \int_{\mathbb{R}^d} \mathcal{W}_1\left(\left[X^x_t\right],\left[X^y_t\right]\right) \nu_a(dy) \le C e^{C_1/a^2} e^{-\rho_a t} \int_{\mathbb{R}^d} |x-y| \nu_a(dy) \\ & \le C e^{C_1/a^2}e^{-\rho_a t} \mathcal{W}_1(\delta_x, \nu_a) . \end{align*} \end{proof} \subsection{Time schedule and Wasserstein distance between Gibbs measures} For $C_{(T)}>0$ and for $\beta>0$, let us define the time schedule that will be used for the plateau SDE in the next section: \begin{equation} \label{eq:def_T_n} T_n := C_{(T)}n^{1+\beta}, \end{equation} and by a slight abuse of notation we define \begin{equation} \label{eq:def_a_n} a_n := a(T_n) = \frac{A}{\sqrt{\log(T_n+e)}} \quad \text{and} \quad \rho_n := \rho_{a_n} = e^{-C_2/a_n^2}. \end{equation} \begin{lemma} \label{lemma:app:a_n_diff} The sequence $a_n = A \log^{-1/2}(T_n+e)$ satisfies \begin{equation} \label{eq:a_n_diff} 0 \le a_n - a_{n+1} \asymp (n \log^{3/2}(n))^{-1}. \end{equation} \end{lemma} \begin{proof} One straightforwardly checks that \begin{align*} a_n - a_{n+1} \sim -\frac{d}{dn}\left( \frac{A}{\sqrt{\log(C_{(T)}n^{1+\beta}{+}e)}} \right) = \frac{A\beta}{2\log^{3/2}(C_{(T)}n^{1+\beta}{+}e)\left(n{+}e/(C_{(T)}n^\beta)\right)} \asymp \frac{1}{n\log^{3/2}(n)}. \end{align*} \end{proof} We prove the following result that will be useful to study the convergence of the plateau SDE. \begin{proposition} \label{prop:W_nu} Let $\nu_a$, $a \in (0,A]$ be the Gibbs measure defined in \eqref{eq:def_nu}. Assume that $V$ is coercive, that $(x \mapsto |x|^2 e^{-2V(x)/A^2}) \in L^1(\mathbb{R}^d)$ and \eqref{Eq:eq:min_V}. Then for $n \in \mathbb{N}$, $$ \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) \le \frac{C}{n \log^{3/2}(n)} .$$ Moreover, for every $s$, $t \in [a_{n+1},a_n]$, we have $$ \mathcal{W}_1(\nu_s,\nu_t) \le \frac{C}{n \log^{3/2}(n)} .$$ \end{proposition} The proof of this proposition is given in the Supplementary Material. It relies on the following lemma. \begin{lemma} \label{lemma:acceptance_rejection} Let $\mu$ and $\nu$ be two probability distributions on $\mathbb{R}^d$ with densities $f$ and $g$ respectively with finite moments of order $p$. Assume that there exists $M \ge 1$ such that $f \le Mg$. Then $$ \mathcal{W}_p(\mu,\nu)^p \le \mathbb{E}|X-Y|^p - \frac{1}{M}\mathbb{E}|X-\tilde{X}|^p,$$ where $X$ and $\tilde{X} \sim \mu$, $Y \sim \nu$ and $X$, $\tilde{X}$ and $Y$ are mutually independent. \end{lemma} \begin{proof} We define a coupling on $\mu$ and $\nu$ inspired from the acceptance rejection sampling as follows. Let $X \sim \mu$, $Y \sim \nu$, $U \sim \mathcal{U}([0,1])$ and $X$, $Y$, $U$ are independent, and let $$X'= Y \mathds{1}\lbrace U \le f(Y)/(Mg(Y))\rbrace + X \mathds{1}\lbrace U > f(Y)/(Mg(Y))\rbrace .$$ Then adapting the proof of the acceptance rejection method, $X' \sim \mu$ and we have: \begin{align*} \mathbb{E}|X'-Y|^p & = \mathbb{E}|Y-X|^p \mathds{1}\lbrace U > f(Y)/(Mg(Y))\rbrace \\ & = \int_{(\mathbb{R}^d)^2} |y-x|^p \left(\int_0^1 \mathds{1}\lbrace u > f(y)/(Mg(y))\rbrace du\right) f(x)g(y)dxdy \\ & = \int_{(\mathbb{R}^d)^2} |y-x|^p f(x)g(y)dxdy - \frac{1}{M} \int_{(\mathbb{R}^d)^2} |y-x|^p f(x)f(y)dx dy \\ & = \mathbb{E}|X-Y|^p - \frac{1}{M}\mathbb{E}|X-\tilde{X}|^p. \end{align*} \end{proof} \begin{lemma} \label{lemma:W_nu_a_nu_star} We have \begin{equation} \mathcal{W}_1(\nu_{a_n}, \nu^\star) \le Ca_n . \end{equation} \end{lemma} \begin{proof} First let us prove that $\mathcal{W}_1(\nu_a,\nu^\star) \to 0$ as $a \to 0$. By Proposition \ref{prop:W_nu} and using that $$ \textstyle \sum_{n \ge 2} (n\log^{3/2}(n))^{-1}< \infty ,$$ $(\nu_{a_n})$ is a Cauchy sequence in $(L^1(\mathbb{R}^d),\mathcal{W}_1)$ so converges to some limit measure $\widetilde{\nu}$. But $(\nu_{a_n})$ also weakly converges to $\widetilde{\nu}$, so $\widetilde{\nu} = \nu^\star$. Moreover, $\mathcal{W}_1(\nu_{a_n},\nu^\star)$ is bounded by the tail of the above series, which is of order $\log^{-1/2}(n)$. \end{proof} \section{Plateau case} \label{sec:X} We define $(X_t)$ as the solution the following SDE where the coefficients piecewisely depend on the time; $X$ is then said to be "by plateaux": \begin{align} \label{eq:def_X} & X_0^{x_0} = x_0, \quad dX_t^{x_0} = b_{a_{k+1}}(X_t^{x_0})dt + a_{k+1} \sigma(X_t^{x_0})dW_t, \quad t \in [T_k,T_{k+1}], \end{align} where $b_a$ is defined in \eqref{eq:def_b}, $(T_n)$ is defined in \eqref{eq:def_T_n} and $(a_n)$ is defined in \eqref{eq:def_a_n}. We note that although the coefficients are not continuous, the process $(X_t^{x_0})$ is well defined as it is the continuous concatenation of the solutions of the equations on the intervals $[T_k, T_{k+1}]$. More generally, we define $(X^{x,n}_t)$ as the solution of \begin{align*} X^{x,n}_0 &= x, \quad dX_t^{x,n} = b_{a_{k+1}}(X_t^{x,n})dt + a_{k+1} \sigma(X_t^{x,n})dW_t, \quad t \in [T_k-T_n,T_{k+1}-T_n], \ k \ge n, \end{align*} i.e. $(X_t^{x,n})$ has the law of $(X_{T_n+t})_{t \ge 0}$ conditionally to $X_{T_n}=x$. We have $X_{t}^x = X_{t}^{x,0}$. \begin{theorem} \label{thm:conv_X} Let $X$ be defined in \eqref{eq:def_X}. If \begin{equation} \label{eq:hyp_A} A > \max \left( \sqrt{(1+\beta^{-1})C_2}, \sqrt{(1+\beta)C_1} \right), \end{equation} where $C_1$ and $C_2$ are given in in \eqref{Eq:eq:W_confluence}, then for every $x_0 \in \mathbb{R}^d$: $$ \mathcal{W}_1([X^{x_0}_t], \nu^\star) \underset{t \rightarrow \infty}{\longrightarrow} 0.$$ More precisely, for $t \ge 0$ we have: $$ \mathcal{W}_1([X^{x_0}_t], \nu^\star) \le Ca(t)(1+|x_0|), $$ for all $C' < C_{(T)}$, for all large enough $n \ge n(C_{(T)}')$, on the time schedule $(T_n)$ we have $$ \mathcal{W}_1([X^{x_0}_{T_{n}}], \nu_{a_{n}}) \le Cn^{-1+(\beta+1)C_1/A^2} e^{-(C')^{1-C_2/A^2}(\beta+1)n^{\beta-(\beta+1)C_2/A^2}}(1+|x_0|) $$ and we have \begin{equation*} \mathcal{W}_1([X^{x_0}_t],\nu_{a(t)}) \le \frac{C(1+|x_0|)}{t^{(\beta+1)^{-1}-C_1/A^2} \log^{3/2}(t)}. \end{equation*} \end{theorem} \begin{proof} For fixed $x \in \mathbb{R}^d$ and using Theorem \ref{thm:confluence} we have: $$ \mathcal{W}_1( [X^{x,n}_{T_{n+1}-T_n} ], \nu_{a_{n+1}}) \le C e^{C_1/a_{n+1}^2} e^{-\rho_{a_{n+1}} (T_{n+1}-T_n)} \mathcal{W}_1(\delta_x,\nu_{a_{n+1}}). $$ So integrating $x$ with respect to the law of $X^{x_0}_{T_n}$ (and using the existence of the optimal coupling, see for example \cite[Proposition 1.3]{wang2012}) yields: \begin{equation} \label{eq:proof:X:1} \mathcal{W}_1([X^{x_0}_{T_{n+1}}], \nu_{a_{n+1}}) \le C e^{C_1/a_{n+1}^2} e^{-\rho_{a_{n+1}} (T_{n+1}-T_n)} \left( \mathcal{W}_1([X^{x_0}_{T_n}],\nu_{a_n}) + \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) \right) . \end{equation} Iterating this relation yields \begin{align} \mathcal{W}_1([X^{x_0}_{T_{n+1}}], \nu_{a_{n+1}}) & \le \mu_{n+1} \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) + \mu_{n+1} \mu_n \mathcal{W}_1(\nu_{a_{n-1}}, \nu_{a_n}) + \cdots + \mu_{n+1} \cdots \mu_1 \mathcal{W}_1(\nu_{a_0},\nu_{a_1}) \nonumber \\ & \quad + \mu_{n+1} \cdots \mu_1 \mathcal{W}_1(\delta_{x_0}, \nu_{a_0}). \label{eq:proof:X:2} \end{align} where \begin{align} \mu_n & := C e^{C_1/a_n^2}e^{-\rho_{a_n}(T_n-T_{n-1})} = C (T_n+e)^{C_1/A^2} e^{-(T_n+e)^{-C_2/A^2}(T_n-T_{n-1})} \nonumber \\ & \le C(C_{(T)}n^{\beta+1}+e)^{C_1/A^2} e^{-(C_{(T)}n^{\beta+1}+e)^{-C_2/A^2}C_{(T)}(\beta+1)(n-1)^\beta} \nonumber \\ & \le Cn^{(\beta+1)C_1/A^2} e^{-(C')^{1-C_2/A^2}(\beta+1)n^{\beta-(\beta+1)C_2/A^2}}, \label{eq:def_mu} \end{align} where we have used \eqref{eq:def_T_n} and where the last inequality holds for large enough $n$. Note that $\mu_n$ is bounded by a sequence in the form of $n^\delta \exp(-Ln^\eta) = o(n^{-\ell})$ for every $\ell \ge 0$. Owing to \eqref{eq:hyp_A}, we have $\beta - (\beta+1)C_2/A^2 >0$. On the other hand, if $Z \sim \nu_{a_0}$ then $\mathcal{W}_1(\delta_{x_0}, \nu_{a_0}) = \mathbb{E}|x_0 - Z| \le |x_0| + \mathbb{E}|Z|$. Plugging this into \eqref{eq:proof:X:2} and using that $\mu_n \to 0$ so is bounded and smaller than $1$ for $n$ large enough and then $(\mu_{n-1} \cdots \mu_k)_{1 \le k \le n-1}$ is bounded ; using Proposition \ref{prop:W_nu} and that $\sum_{n} (n\log^{3/2}(n))^{-1} < \infty$ yields \begin{align*} \mathcal{W}_1([X^{x_0}_{T_{n+1}}], \nu_{a_{n+1}}) & \le \mu_{n+1} \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) + C\mu_{n+1}\mu_n \left( \mathcal{W}_1(\nu_{a_{n-1}}, \nu_{a_n}) + \cdots + \mathcal{W}_1(\nu_{a_0}, \nu_{a_{1}}) \right) \\ & \quad + C\mu_{n+1}\mu_n\mathcal{W}_1(\delta_{x_0},\nu_{a_0}) \\ & \le \mu_{n+1} \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) + C\mu_{n+1} \mu_n + C\mu_{n+1}\mu_n (1+|x_0|) \\ & \le C \frac{\mu_{n+1}}{n\log^{3/2}(n)}(1+|x_0|) \le C \mu_{n+1} a_{n+1}(1+|x_0|), \end{align*} where we used that $\mu_n = o(\mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}))$. Then using Lemma \ref{lemma:W_nu_a_nu_star} we have $$ \mathcal{W}_1([X^{x_0}_{T_{n+1}}],\nu^\star) \le \mathcal{W}_1([X^{x_0}_{T_{n+1}}], \nu_{a_{n+1}}) + \mathcal{W}_1(\nu_{a_{n+1}}, \nu^\star) \le Ca_n (1+|x_0|) ,$$ where we used once again $\mu_n \to 0$. Now, let us prove that $\mathcal{W}_1([X^{x_0}_t],\nu^\star) \to 0$ as $t \to \infty$. For $t \in [0,T_{n+1}-T_n)$ we integrate \eqref{eq:confluence_nu_a} with respect to the law of $X^{x_0}_{T_n}$, giving \begin{align} \mathcal{W}_1([X^{x_0}_{T_n+t}], \nu_{a_{n+1}}) & \le Ce^{C_1 a_{n+1}^{-2}} e^{-\rho_{a_{n+1}} t} \mathcal{W}_1([X^{x_0}_{T_n}], \nu_{a_{n+1}}) \nonumber \\ & \le Ce^{C_1 a_{n+1}^{-2}}\left( \mathcal{W}_1([X^{x_0}_{T_n}], \nu_{a_n}) + \mathcal{W}_1(\nu_{a_n}, \nu_{a_{n+1}}) \right) \nonumber \\ & \le Ce^{C_1 a_{n+1}^{-2}} \mathcal{W}_1(\nu_{a_n}, \nu_{a_{n+1}})(1+|x_0|) \nonumber \\ & \le \frac{C(1+|x_0|)}{n^{1-(\beta + 1)C_1/A^2} \log^{3/2}(n)}. \label{eq:proof:X_t_nu} \end{align} Now, for $t \ge 0$, let $n$ be such that $t \in [T_n,T_{n+1})$. Then $(n+1)\ge t^{1/(\beta+1)}$ and \begin{align} \mathcal{W}_1([X^{x_0}_{t}], \nu_{a(t)}) \le \mathcal{W}_1([X^{x_0}_{t}], \nu_{a_{n+1}}) + \mathcal{W}_1(\nu_{a_{n+1}}, \nu_{a(t)}) \le \frac{C(1+|x_0|)}{t^{(\beta+1)^{-1}-C_1/A^2} \log^{3/2}(t)}, \label{eq:proof:X_t_nu:2} \end{align} where we used the second claim of Proposition \ref{prop:W_nu}. Furthermore owing to \eqref{eq:hyp_A} we have $(\beta + 1)C_1/A^2 < 1$, so that $$ \mathcal{W}_1([X^{x_0}_{T_n+t}],\nu^\star) \le \mathcal{W}_1([X^{x_0}_{T_n+t}], \nu_{a_{n+1}}) + \mathcal{W}_1(\nu_{a_{n+1}},\nu^\star) \le Ca_n(1+|x_0|). $$ \end{proof} \begin{remark} We find again the classic schedule $a(t)$ of order $\log^{-1/2}(t)$. If for example we choose instead $a_n = \log(T_n)^{-(1+\varepsilon)/2}$ for some $\varepsilon > 0$, then we obtain $$ \log(\mu_1 \cdots \mu_n) = n\log(C) + \frac{C_1}{A^2} \sum_{k=1}^n \log^{1+\varepsilon}(T_k) - \sum_{k=1}^n \frac{T_k-T_{k-1}}{T_k^{\log^\varepsilon(T_k) C_2/A^2}} . $$ Hence, as $T_n - T_{n-1} = o(T_n^{\log^\varepsilon(T_n) C_2/A^2})$, $\mu_1 \cdots \mu_n$ does not converge to $0$ whatever the value of $A>0$ is. \end{remark} \section{Continuously decreasing case} \label{sec:Y} We now consider $(Y_t)$ solution to \eqref{eq:def_Y} i.e. the Langevin equation where the time coefficient $a(t)$ before $\sigma$ is continuously decreasing. More generally, since $Y$ is solution to a non-homogeneous SDE, we define for every $x \in \mathbb{R}^d$ and for every fixed $u \ge 0$: \begin{align} \label{eq:def_Y:2} Y_{0,u}^{x} & = x, \quad dY_{t,u}^{x} = b_{a(t+u)}(Y_{t,u}^{x})dt + a(t+u) \sigma(Y_{t,u}^{x}) dW_t, \end{align} so that $Y^x = Y^x_{\cdot, 0}$. We define the kernel associated to $Y$ between the times $t$ and $t+u$ as $P^Y_{t,u}$ such that for all $f : \mathbb{R}^d \to \mathbb{R}^+$ measurable, $P^Y_{t,u} f(x) = \mathbb{E}[f(Y^x_{t,u})]$. We also consider $X$ as defined in \eqref{eq:def_X} and its associated kernel denoted as $P^{X,n}_t$ such that for every $f: \mathbb{R}^d \to \mathbb{R}^+$ measurable, $P^{X,n}_t f(x) = \mathbb{E}[f(X^{x,n}_t)]$. \subsection{Boundedness of the potential} \begin{lemma} \label{lemma:D.1a:cont} Let $p>0$. Then there exists $C>0$ such that for every $n \ge 0$, for every $u \ge 0$ and for every $x \in \mathbb{R}^d$: $$ \sup_{t \ge 0} \mathbb{E} V^p(X_t^{x,n}) \le CV^p(x) \ \text{ and } \ \sup_{t \ge 0} \mathbb{E} V^p(Y^x_{t,u}) \le C V^p(x) .$$ \end{lemma} \begin{proof} By the It\=o Lemma, we have for $k \ge n$ and for $t \in [T_k-T_n,T_{k+1}-T_n)$: \begin{align*} dV^p(X^{x,n}_t) & = p\nabla V(X^{x,n}_t)^\top \cdot V^{p-1}(X^{x,n}_t) \left( -\sigma \sigma^\top (X^{x,n}_t) \nabla V(X^{x,n}_t) + a_{k+1}^2 \Upsilon(X^{x,n}_t)\right) dt \\ & \quad + p\nabla V(X^{x,n}_t)^\top \cdot V^{p-1}(X^{x,n}_t) a_{k+1} \sigma(X^{x,n}_t) dW_t \\ & \quad + \frac{p}{2} \left(\nabla^2V(X^{x,n}_t)V^{p-1}(X^{x,n}_t) + (p-1)|\nabla V(X^{x,n}_t)|^2 \cdot V^{p-2}(X^{x,n}_t)\right)a_{k+1}^2 \sigma \sigma^\top(X^{x,n}_t) dt. \end{align*} Using the facts that $(a_k)$, $\Upsilon$, $\sigma$, $\nabla^2 V$ are bounded, that $|\nabla V| \le CV^{1/2}$ and that $V$, $|\nabla V|$ are coercive and $\sigma \sigma^\top \ge \ubar{\sigma}_0^2 I_d$, there exists $R>0$ such that if $|X^{x,n}_t| \ge R$ then the coefficient of $dt$ in the last equation is bounded above by \begin{align*} & pV^{p-1} \nabla V(X^{x,n}_t)^T \cdot \left( -\ubar{\sigma}_0^2 (X^{x,n}_t)\nabla V(X^{x,n}_t) + C \right) + CV^{p-1}(X^{x,n}_t)||\sigma||_\infty^2 \le 0, \end{align*} so that $$ \mathbb{E}[V^p(X^{x,n}_t)] \le \max\left(\sup_{|z|\le R} V^p(z) , V^p(x) \right) .$$ The proof is the same for $Y$, replacing $a_{k+1}$ by $a(t)$. \end{proof} \subsection{Strong and weak error bounds} In this subsection we adapt the proofs to bound weak and strong errors from \cite{pages2020} while paying attention to the dependence in $a_n$. \begin{lemma} \label{lemma:3.4.b:Y} Let $p \ge 1$ and let $\bar{\gamma} > 0$. There exists $C>0$ such that for all $n \ge 0$, $u$, $t\ge 0$ such that $u \in [T_n,T_{n+1}]$, $u+t \in [T_{n},T_{n+1}]$ and $t \le \bar{\gamma}$, $$|| X_{t}^{x,n} - Y_{t,u}^x ||_p \le C \sqrt{t} (a_n - a_{n+1}). $$ \end{lemma} \begin{proof} We first consider the case $p \ge 2$. Noting that $a_{n+1} \le a(u+s) \le a_{n}$ for all $s \in [0,t]$ and using Lemma \ref{lemma:BDG} in the Appendix, with in mind that $b_a = b_0 + a^2 \Upsilon$, we have \begin{align*} \| X_t^{x,n} - Y_{t,u}^x \|_p & \le \left\| \int_0^t (b_{a_{n+1}}(X_s^{x,n}) - b_{a(u+s)}(Y_{s,u}^x)) ds \right\|_p + \left\| \int_0^t (a_{n+1} \sigma(X_s^{x,n}) {-} a(u+s)\sigma(Y_{s,u}^x)) dW_s \right\|_p \\ & \le [b]_{\text{Lip}} \int_0^t ||X_s^{x,n} - Y_{s,u}^x||_p ds + \int_0^t ||a_{n+1}^2 \Upsilon(X_s^{x,n}) - a(u+s)^2 \Upsilon(Y_{s,u}^x)||_p ds \\ & \quad + C^{BDG}_p a_{n+1} [\sigma]_{\text{Lip}}\left( \int_0^t ||X_s^{x,n} - Y_{s,u}^x||^2_p ds \right)^{1/2} + \left|\left| \int_0^t \sigma(Y_{s,u}^x)(a_{n+1} {-} a(u{+}s)) dW_s \right|\right|_p \\ & \le [b]_{\text{Lip}} \int_0^t ||X_s^{x,n} - Y_{s,u}^x||_p ds + ||\Upsilon||_\infty (a_n^2 - a_{n+1}^2)t + a_{n+1}^2 [\Upsilon]_{\text{Lip}} \int_0^t ||X_s^{x,n} - Y_{s,u}^x||_p ds \\ & \quad + C^{BDG}_p a_{n+1} [\sigma]_{\text{Lip}}\left( \int_0^t ||X_s^{x,n} - Y_{s,u}^x||^2_p ds \right)^{1/2} + ||\sigma||_\infty ||W_1||_p \sqrt{t} (a_n - a_{n+1}), \end{align*} where we used the generalized Minkowski inequality. Set $\varphi(t) := \sup_{0 \le s \le t} ||X_s^{x,n} - Y_{s,u}^x||_p$ and $\psi(t) := ||\Upsilon||_\infty (a_n^2 - a_{n+1}^2)t + ||\sigma||_\infty ||W_1||_p \sqrt{t} (a_n - a_{n+1})$. Both functions are non-decreasing and $$ \varphi(t) \le \psi(t) + ([b]_{\text{Lip}} + a_{n+1}^2[\Upsilon]_{\text{Lip}}) \int_0^t ||X_s^{x,n} - Y_{s,u}^x||_p ds + C^{BDG}_p a_{n+1} [\sigma]_{\text{Lip}}\left( \int_0^t ||X_s^{x,n} - Y_{s,u}^x||^2_p ds \right)^{1/2} .$$ Moreover, for every $\alpha>0$: $$ \left( \int_0^t \varphi(s)^2 ds\right)^{1/2} \le \sqrt{\varphi(t)}\sqrt{\int_0^t \varphi(s) ds} \le \frac{\alpha}{2} \varphi(t) + \frac{1}{2\alpha} \int_0^t \varphi(s) ds.$$ Taking $\alpha = \left(C^{BDG}_p a_{n+1} [\sigma]_{\text{Lip}}\right)^{-1}$ yields: $$ \varphi(t) \le 2 \psi(t) + \left(2[b]_{\text{Lip}} + 2a_{n+1}^2[\Upsilon]_{\text{Lip}} + (C^{BDG}a_{n+1}[\sigma]_{\text{Lip}})^2\right) \int_0^t \varphi(s) ds .$$ So the Gronwall Lemma yields for every $t \in [0,\bar{\gamma}]$ $$ \varphi(t) \le 2 e^{(2[b]_{\text{Lip}} + 2a_{n+1}^2[\Upsilon]_{\text{Lip}} + (C^{BDG}_p a_{n+1}[\sigma]_{\text{Lip}})^2)\bar{\gamma}} \psi(t) ,$$ which completes the proof for $p \ge 2$, noting that $a_n^2 - a_{n+1}^2 \le 2 a_n (a_n - a_{n+1}) = o(a_n-a_{n+1})$. If $p \in [1,2)$, the inequality is still true remarking that $\| \cdot \|_p \le \| \cdot \|_2$. \end{proof} \begin{lemma} \label{lemma:3.4.a} Let $p \ge 1$ and let $\bar{\gamma} > 0$. There exists a real constant $C \ge 0$ such that for all $n \ge 0$, $$ \forall t \in [0, \bar{\gamma}], \ ||X_t^{x,n} - x||_p \le CV^{1/2}(x)\sqrt{t} .$$ \end{lemma} \begin{proof} We perform a proof similar to the proof of Lemma \ref{lemma:3.4.b:Y}. For $p \ge 2$ we have \begin{align*} & ||X_t^{x,n} - x||_p \le \left\| \int_0^t b_{a_{n+1}}(X_s^{x,n}) ds \right\|_p + \left\| \int_0^t a_{n+1} \sigma(X_s^{x,n}) ds \right\|_p \\ & \le t|b_{a_{n+1}}(x)| + A\|\sigma\|_\infty \|W\|_1 \sqrt{t} + [b]_{\text{Lip}} \int_0^t ||X_s^{x,n} - x||_p ds + A[\sigma]_{\text{Lip}} C_p^{BDG} \left(\int_0^t ||X_s^{x,n} - x||_p^2 ds \right)^{1/2}. \end{align*} From here we use the Gronwall Lemma as in the proof of Lemma \ref{lemma:3.4.b:Y}. For $p \in [1,2)$, we have $\| \cdot \|_p \le \| \cdot \|_2$. \end{proof} \begin{proposition} \label{prop:3.5:Y} Let $\bar{\gamma}>0$. There exists $C>0$ such that for every $g : \mathbb{R}^d \to \mathbb{R}$ being $\mathcal{C}^2$, for every $\gamma \in (0,\bar{\gamma}]$, every $n \ge 0$ and every $u \ge 0$ such that $u \in [T_n,T_{n+1}]$ and $u+\gamma \in [T_n,T_{n+1}]$: \begin{align*} & | \mathbb{E}\left[g(Y^x_{\gamma,u})\right] - \mathbb{E}\left[g(X^{x,n}_\gamma)\right]| \le C \gamma (a_n - a_{n+1}) \Phi_g(x) \\ \text{with } \ & \Phi_g(x) = \max\left(|\nabla g(x)|, \left|\left|\sup_{\xi \in (X^{x,n}_\gamma, Y_{\gamma,u}^x)} || \nabla^2 g(\xi) || \right|\right|_2, V^{1/2}(x)\left|\left|\sup_{\xi \in (x, X_\gamma^{x,n})} || \nabla^2 g(\xi) || \right|\right|_2 \right) . \end{align*} \end{proposition} \begin{proof} By the second order Taylor formula, for every $y$, $z \in \mathbb{R}^d$: $$ g(z) - g(y) = \langle \nabla g(y) | z-y \rangle + \int_0^1 (1-s)\nabla^2 g(sz + (1-s)y) ds (z-y)^{\otimes 2} .$$ Applying this expansion with $y = X^{x,n}_\gamma$ and $z=Y_{\gamma,u}^x$ yields: \begin{align} \mathbb{E}[g(Y_{\gamma,u}^x) - g(X^{x,n}_\gamma)] & = \langle \nabla g(x) | \mathbb{E}[Y_{\gamma,u}^x - X^{x,n}_\gamma] \rangle + \mathbb{E}[\langle \nabla g(X^{x,n}_\gamma) - \nabla g(x), Y_{\gamma,u}^x - X^{x,n}_\gamma \rangle] \nonumber \\ \label{eq:3.5:Y:proof} & \quad + \int_0^1 (1-s)\mathbb{E}\left[\nabla^2 g(s Y_{\gamma,u}^x + (1-s)X^{x,n}_\gamma)(Y_{\gamma,u}^x-X_\gamma^{x,n})^{\otimes 2} \right] ds . \end{align} The first term is bounded by $|\nabla g(x)| \cdot |\mathbb{E}[Y_{\gamma,u}^x - X_\gamma^{x,n}]|$, with \begin{align*} |\mathbb{E}[Y_{\gamma,u}^x - X_\gamma^{x,n}]| & = \left|\mathbb{E} \left[ \int_0^\gamma (b_{a(s+u)}(Y_{s,u}^x) {-} b_{a(s+u)}(X_s^{x,n})) ds\right] + \mathbb{E} \left[ \int_0^\gamma (b_{a(u+s)}(X_s^{x,n}) {-} b_{a_{n+1}}(X_s^{x,n})) ds\right]\right| \\ & \le C[b]_{\text{Lip}} (a_n - a_{n+1}) \int_0^\gamma \sqrt{s}ds + ||\Upsilon||_\infty \gamma (a_n^2 - a_{n+1}^2) \le C \gamma (a_n - a_{n+1}), \end{align*} where we used Lemma \ref{lemma:3.4.b:Y}. Using Lemma \ref{lemma:3.4.a} and Lemma \ref{lemma:3.4.b:Y} again, the second term in the right hand side of \eqref{eq:3.5:Y:proof} is bounded by $$ C \left|\left|\sup_{\xi \in (x, X_\gamma^{x,n})} || \nabla^2 g(\xi) || \right|\right|_2 \sqrt{\gamma}V^{1/2}(x) \sqrt{\gamma} (a_n - a_{n+1}). $$ Using Lemma \ref{lemma:3.4.b:Y}, the third term is bounded by $$ \frac{1}{2} C\gamma (a_n - a_{n+1})^2 \left|\left|\sup_{\xi \in (X_\gamma^{x,n}, Y_{\gamma,u}^x)} || \nabla^2 g(\xi) || \right|\right|_2 .$$ \end{proof} \begin{proposition} \label{prop:3.6:Y} Let $T$, $\bar{\gamma}>0$. There exists $C>0$ such that for every Lipschitz continuous function $f : \mathbb{R}^d \to \mathbb{R}$ and every $t \in (0,T]$, for all $n \ge 0$, for all $\gamma < \bar{\gamma}$ and every $u \in [T_n,T_{n+1}]$ such that $u+t+\gamma \in [T_n,T_{n+1}]$, $$ \left| \mathbb{E}\left[P_t^{X,n} f(Y_{\gamma,u}^x)\right] - \mathbb{E}\left[P_t^{X,n} f(X_\gamma^{x,n})\right] \right| \le C a_{n+1}^{-2}(a_n-a_{n+1}) [f]_{\textup{Lip}} \gamma t^{-1/2} V(x). $$ \end{proposition} \begin{proof} We apply Proposition \ref{prop:3.5:Y} to $g_t := P^{X,n}_t f$ with $t >0$. Following \cite[Proposition 3.2(b)]{pages2020} while paying attention to the dependence in the ellipticity parameter $a$, we have \begin{align*} \Phi_{g_t}(x) & \le C[f]_{\text{Lip}} a_{n+1}^{-2} t^{-1/2} \max\left(V^{1/2}(x), \left|\left|\sup_{\xi \in (X^{x,n}_\gamma, Y_{\gamma,u}^x)} V^{1/2}(\xi) \right|\right|_2, V^{1/2}(x)\left|\left|\sup_{\xi \in (x, X_\gamma^{x,n})} V^{1/2}(\xi) \right|\right|_2 \right). \end{align*} But following \eqref{Eq:eq:V_assumptions}, $\nabla V/V^{1/2}$ is bounded so $V^{1/2}$ is Lipschitz continuous and then \begin{align*} & \left|\left|\sup_{\xi \in (x, X_\gamma^{x,n})} V^{1/2}(\xi) \right|\right|_2 \le \left|\left| V^{1/2}(x) + [V^{1/2}]_{\text{Lip}} |X_\gamma^{x,n}-x| \right|\right|_2 \le CV^{1/2}(x) \\ & \left|\left|\sup_{\xi \in (X^{x,n}_\gamma, Y_{\gamma,u}^x)} V^{1/2}(\xi) \right|\right|_2 \le \left|\left| V^{1/2}(x) + [V^{1/2}]_{\text{Lip}} \max(|X^{x,n}_\gamma-x|, |Y^x_{\gamma,u}-x|) \right|\right|_2 \le CV^{1/2}(x), \end{align*} where we used Lemmas \ref{lemma:3.4.a} and \ref{lemma:3.4.b:Y}. We thus obtain the desired result. \end{proof} \subsection{Proof of Theorem \ref{thm:main}.(a)} \label{subsec:proof_Y} \begin{figure} \caption{\textit{Intervals for the domino strategy.} \label{fig:domino} \end{figure} More precisely, we prove that for all $\beta >0$, if \begin{equation} \label{eq:hyp_A_Y} A > \max\left(\sqrt{(\beta+1)(2C_1+C_2)}, \sqrt{(1+\beta^{-1})C_2} \right) , \end{equation} then $$ \mathcal{W}_1([Y^{x_0}_t], \nu_{a(t)}) \le \frac{C\max(1+|x_0|,V(x_0))}{\log^{3/2}(t)t^{(1+\beta)^{-1}-(2C_1+C_2)/A^2}} .$$ \begin{proof} We apply the \textit{domino strategy} \eqref{eq:domino_strategy}. Let us fix $T \in (0,T_1)$ and $\gamma \in (0,T_1-T)$. Here $\gamma$ is not linked to any Euler-Maruyama scheme but is an auxiliary tool for the proof. Let $n \ge 0$ and let $f : \mathbb{R}^d \to \mathbb{R}$ be Lipschitz continuous. We divide the two intervals $[T_n, T_{n+1}-T]$ and $[T_{n+1}-T, T_{n+1}]$ into smaller intervals of size $\gamma$ (see Figure \ref{fig:domino}) and for $x \in \mathbb{R}^d$ using the semi-group property of $P^{X,n}$ on $[T_n,T_{n+1})$ we write: \begin{align*} & \left| \mathbb{E}f(X_{T_{n+1}-T_n}^{x,n}) - \mathbb{E}f(Y_{T_{n+1}-T_n,T_n}^{x})\right| \\ & \quad \le \sum_{k=1}^{\lfloor(T_{n+1}-T_n-T)/\gamma\rfloor} \left| P^Y_{(k-1)\gamma,T_n} \circ (P^Y_{\gamma,T_n+(k-1)\gamma} - P^{X,n}_{\gamma}) \circ P^{X,n}_{T_{n+1}-T_n-k\gamma} f(x) \right| \\ & \quad \quad + \sum_{k=\lfloor(T_{n+1}-T_n-T)/\gamma\rfloor+1}^{\lfloor(T_{n+1}-T_n)/\gamma\rfloor-1} \left| P^Y_{(k-1)\gamma,T_n} \circ (P^Y_{\gamma,T_n+(k-1)\gamma} - P^{X,n}_{\gamma}) \circ P^{X,n}_{T_{n+1}-T_n-k\gamma} f(x) \right| \\ & \quad \quad + \left| P^Y_{\gamma(\lfloor (T_{n+1}-T_n)/\gamma \rfloor-1) ,T_n} \circ (P^Y_{\gamma + (T_{n+1}-T_n) \text{mod} \gamma, T_n + \gamma(\lfloor (T_{n+1}-T_n)/\gamma \rfloor-1)} - P^{X,n}_{\gamma + (T_{n+1}-T_n) \text{mod}(\gamma)}) f(x) \right| \\ & \quad =: (a) + (b) + (c). \end{align*} The term $(a)$ is the "ergodic term", for which the exponential contraction from Theorem \ref{thm:confluence} can be exploited. The terms $(b)$ and $(c)$ are the "error terms" where we bound the error on intervals of length no larger than $T$. The term $(c)$ is a remainder term due to the fact that $T_{n+1}-T_n$ is generally not a multiple of $\gamma$. $\bullet$ \textbf{Term $(a)$ :} It follows from Theorem \ref{thm:confluence} and Lemma \ref{lemma:3.4.b:Y} that \begin{align*} & |(P^Y_{\gamma,T_n+(k-1)\gamma} - P^{X,n}_{\gamma}) \circ P^{X,n}_{T_{n+1}-T_n-k\gamma} f(x) | \\ & \quad = |\mathbb{E}P^{X,n}_{T_{n+1}-T_n-k\gamma} f(X_{\gamma}^{x,n}) - \mathbb{E}P^X_{T_{n+1}-T_n-k\gamma,n} f(Y_{\gamma,T_n+(k-1)\gamma}^x)| \\ & \quad \le C e^{C_1 a_{n+1}^{-2}} e^{-\rho_{n+1}(T_{n+1}-T_n-k\gamma)} [f]_{\text{Lip}} \mathbb{E}|X_{\gamma}^{x,n} - Y_{\gamma,T_n+(k-1)\gamma}^x| \\ & \quad \le C e^{C_1 a_{n+1}^{-2}} e^{-\rho_{n+1}(T_{n+1}-T_n-k\gamma)} [f]_{\text{Lip}} \sqrt{\gamma} (a_n - a_{n+1}) \end{align*} Integrating with respect to $P^Y_{(k-1)\gamma,T_n}$ and summing up yields \begin{align*} (a) & \le Ce^{C_1 a_{n+1}^{-2}} [f]_{\text{Lip}} \sqrt{\gamma} (a_n - a_{n+1}) \frac{e^{-\rho_{n+1} T} - e^{-\rho_{n+1} (T_{n+1}-T_n)}}{e^{\gamma\rho_{n+1}}-1} \\ & \le Ce^{C_1 a_{n+1}^{-2}} [f]_{\text{Lip}} \sqrt{\gamma} (a_n - a_{n+1})(\gamma \rho_{n+1})^{-1} . \end{align*} $\bullet$ \textbf{Term $(b)$:} Applying Proposition \ref{prop:3.6:Y} yields: $$ |(P^Y_{\gamma, T_n+(k-1)\gamma} - P^{X,n}_{\gamma}) \circ P^{X,n}_{T_{n+1}-T_n-k\gamma} f(x) | \le C a_{n+1}^{-2}(a_n-a_{n+1})[f]_{\text{Lip}} \frac{\gamma}{\sqrt{T_{n+1}-T_n-k\gamma}} V(x) .$$ Integrating with respect to $P^Y_{(k-1)\gamma,T_n}$ and using Lemma \ref{lemma:D.1a:cont} which guarantees that $ P^Y_{(k-1)\gamma,T_n} V(x) \le C V(x) $ and summing with respect to $k$ implies $$ (b) \le C a_n^{-2}(a_n-a_{n+1})[f]_{\text{Lip}} \gamma V(x) \sum_{k=1}^{\lceil T/\gamma \rceil} (k\gamma)^{-1/2} \le C a_n^{-2}(a_n-a_{n+1})[f]_{\text{Lip}} T^{1/2} V(x) .$$ $\bullet$ \textbf{Term $(c)$:} Noting that $\gamma + (T_{n+1} - T_n) \ \text{mod} (\gamma) \le 2\gamma$, Lemma \ref{lemma:3.4.b:Y} yields $$ (c) \le C[f]_{\text{Lip}}\sqrt{\gamma}(a_n-a_{n+1}). $$ Now we sum up the terms $(a)$, $(b)$ and $(c)$. Since $\gamma$ is constant we have: $$ \left| \mathbb{E}f(X_{T_{n+1}-T_n}^{x,n}) - \mathbb{E}f(Y_{T_{n+1}-T_n,T_n}^{x})\right| \le C e^{C_1 a_{n+1}^{-2}}(a_n-a_{n+1})\rho_{n+1}^{-1}[f]_{\text{Lip}}V(x) ,$$ so that for all $x \in \mathbb{R}^d$, \begin{equation} \label{eq:proof_Y:2} \mathcal{W}_1([X_{T_{n+1}-T_n}^{x,n}], [Y_{T_{n+1}-T_n,T_n}^x]) \le Ce^{C_1 a_{n+1}^{-2}}(a_n-a_{n+1})\rho_{n+1}^{-1}V(x) . \end{equation} Temporarily setting $x_n := X^{x_0}_{T_n}$ and $y_n := Y^{x_0}_{T_n}$, we derive \begin{align*} & \mathcal{W}_1([X^{x_0}_{T_{n+1}}],[Y^{x_0}_{T_{n+1}}]) = \mathcal{W}_1([X_{T_{n+1}-T_n}^{x_n,n}], [Y_{T_{n+1}-T_n,T_n}^{y_n}]) \\ & \quad \quad \quad \quad \le \mathcal{W}_1([X_{T_{n+1}-T_n}^{x_n,n}],[X_{T_{n+1}-T_n}^{y_n,n}]) + \mathcal{W}_1([X_{T_{n+1}-T_n}^{y_n,n}], [Y_{T_{n+1}-T_n,T_n}^{y_n}]) \\ & \quad \quad \quad \quad \le Ce^{C_1 a_{n+1}^{-2}} e^{-\rho_{n+1} (T_{n+1}-T_n)} \mathcal{W}_1([X^{x_0}_{T_n}],[Y^{x_0}_{T_n}]) + Ce^{C_1 a_{n+1}^{-2}}(a_n-a_{n+1})\rho_{n+1}^{-1}\mathbb{E}V(Y^{x_0}_{T_n}), \end{align*} where we used Theorem \ref{thm:confluence} and \eqref{eq:proof_Y:2}. We then apply Lemma \ref{lemma:D.1a:cont} which guarantees that $(\mathbb{E}V(Y^{x_0}_{T_n}))_n$ is bounded by $CV(x_0)$. Let us denote $$\lambda_n := Ce^{C_1 a_n^{-2}}(a_{n-1}-a_n) \rho_n^{-1} = Ce^{(C_1+C_2) a_n^{-2}}(a_{n-1}-a_n).$$ Owing to \eqref{eq:hyp_A_Y} we have $\lambda_n \to 0$. Iterating this relation and using $(\mu_n)$ defined in \eqref{eq:def_mu} yields like in the proof of Theorem \ref{thm:conv_X}: \begin{align*} \mathcal{W}_1([X^{x_0}_{T_{n+1}}],[Y^{x_0}_{T_{n+1}}]) & \le CV(x_0) \left(\lambda_{n+1} + \mu_{n+1} \lambda_n + \mu_{n+1} \mu_n \lambda_{n-1} + \cdots + \mu_{n+1} \cdots \mu_2 \lambda_1 \right) \\ & \le CV(x_0) \left(\lambda_{n+1} + \mu_{n+1}\left(\lambda_n + \cdots + \lambda_1\right) \right) \\ & \le CV(x_0) \left(\lambda_{n+1} + n\mu_{n+1} \right). \end{align*} But following \eqref{eq:def_mu} one checks that $n\mu_{n+1} = o(\lambda_{n+1})$ so that $$ \mathcal{W}_1([X^{x_0}_{T_{n+1}}],[Y^{x_0}_{T_{n+1}}]) \le CV(x_0) \lambda_{n+1} \le \frac{CV(x_0)}{\log^{3/2}(n+1)(n+1)^{1-(\beta+1)(C_1+C_2)/A^2}} .$$ Moreover, owing to \eqref{eq:hyp_A_Y} and combining with Theorem \ref{thm:conv_X} we get \begin{align*} \mathcal{W}_1([Y^{x_0}_{T_n}],\nu_{a_n}) & \le \mathcal{W}_1([Y^{x_0}_{T_n}],[X^{x_0}_{T_n}]) + \mathcal{W}_1([X^{x_0}_{T_n}],\nu_{a_n}) \le \frac{C\max(1+|x_0|,V(x_0))}{\log^{3/2}(n)n^{1-(\beta+1)(C_1+C_2)/A^2}} \end{align*} and as the right hand side of these inequalities is in $o(a_n)$, we derive $$ \mathcal{W}_1([Y^{x_0}_{T_n}],\nu^\star) \le \mathcal{W}_1([Y^{x_0}_{T_n}],[X^{x_0}_{T_n}]) + \mathcal{W}_1([X^{x_0}_{T_n}],\nu^\star) \le Ca_n \max(1+|x_0|,V(x_0)) .$$ $\bullet$ \textbf{Convergence for $t \to \infty$ :} Now let us prove that $\mathcal{W}_1([Y^{x_0}_{t}],\nu^\star) \to 0$ as $t \to \infty$. As before, let $T>0$. For $t \ge T$, then we perform the same domino strategy where we replace $T_{n+1}$ by $T_n + t$ and we consider the intervals $[T_n,T_n+t-T]$ and $[T_n+t-T,T_n+t]$. For $t < T$ then we only consider the terms $(b)$ and $(c)$ and we replace $T$ by $t$ in $(b)$. Doing so we obtain $$ \mathcal{W}_1([X^{x,n}_t],[Y^{x}_{t,T_n}]) \le Ce^{C_1 a_{n+1}^{-2}}(a_n-a_{n+1})\rho_{n+1}^{-1}V(x). $$ So that, as before: \begin{align*} \mathcal{W}_1([X^{x_0}_{T_n+t}],[Y^{x_0}_{T_n+t}]) & \le Ce^{C_1 a_{n+1}^{-2}}\mathcal{W}_1([X^{x_0}_{T_n}],[Y^{x_0}_{T_n}]) + Ce^{C_1 a_{n+1}^{-2}}(a_n-a_{n+1})\rho_{n+1}^{-1}V(x_0) \\ & \le \frac{CV(x_0)}{\log^{3/2}(n)n^{1-(\beta+1)(2C_1+C_2)/A^2}}. \end{align*} Owing to \eqref{eq:hyp_A_Y} we have $1-(\beta+1)(2C_1+C_2)/A^2>1$, so that, using \eqref{eq:proof:X_t_nu}, $$ \mathcal{W}_1([Y^{x_0}_{T_n+t}],\nu_{a_{n+1}}) \le \mathcal{W}_1([Y^{x_0}_{T_n+t}],[X^{x_0}_{T_n+t}]) + \mathcal{W}_1([X^{x_0}_{T_n+t}],\nu_{a_{n+1}}) \le \frac{C\max(1+|x_0|,V(x_0))}{\log^{3/2}(n)n^{1-(\beta+1)(2C_1+C_2)/A^2}} .$$ We then prove the bound for $\mathcal{W}_1([Y^{x_0}_t], \nu_{a(t)})$ the same way as for \eqref{eq:proof:X_t_nu:2}, using the second claim of Proposition \ref{prop:W_nu}. \end{proof} \section{Continuously decreasing case : the Euler-Maruyama scheme} \label{sec:bar-Y} \begin{figure} \caption{\textit{Decreasing of the noise coefficient $a$ for the plateau and non plateau cases.} \label{fig:a_decreasing} \end{figure} We now consider $(\bar{Y}_n)$ to be the Euler-Maruyama scheme of $(Y_t)$ with steps $(\gamma_n)$ defined in \eqref{eq:def_Y_bar} and we also consider its genuine interpolation defined in \eqref{eq:def_Y_bar_genuine}. As with \eqref{eq:def_Y:2}, we define more generally for every $n \ge 0$, $(\bar{Y}^x_{t,\Gamma_n})_{t\ge 0}$, first at times $\Gamma_k-\Gamma_n$, $k \ge n$, by \begin{align*} \bar{Y}^x_{0,\Gamma_n} = x, \quad \bar{Y}^x_{\Gamma_{k+1}-\Gamma_n,\Gamma_n} & = \bar{Y}^x_{\Gamma_k -\Gamma_n, \Gamma_n} + \gamma_{k+1} \left(b_{a(\Gamma_k)}(\bar{Y}_{\Gamma_k-\Gamma_n,\Gamma_n}^{x}) + \zeta_{k+1}(\bar{Y}_{\Gamma_k-\Gamma_n,\Gamma_n}^{x}) \right) \\ & \quad + a(\Gamma_k)\sigma(\bar{Y}_{\Gamma_k-\Gamma_n,\Gamma_n}^{x})(W_{\Gamma_{k+1}} - W_{\Gamma_k}), \end{align*} then at every time $t$ by the genuine interpolation on the intervals $([\Gamma_k-\Gamma_n, \Gamma_{k+1}- \Gamma_n))_{k \ge n}$ as before. In particular $\bar{Y}^x = \bar{Y}^x_{\cdot,0}$. Still more generally, we define $\bar{Y}^x_{t,u}$ where $u \in (\Gamma_n, \Gamma_{n+1})$ as \begin{equation*} \bar{Y}^x_{0,u} = x, \quad \bar{Y}^x_{t,u} = \left\lbrace \begin{array}{ll} x + t(b_a(x) + \zeta_{n+1}(x)) + a^2(u)\sigma(x)(W_t-W_{\Gamma_u}) & \text{ if } t \in [u, \Gamma_{n+1}] \\ \bar{Y}^{\bar{Y}^x_{\Gamma_{n+1}-u,u}}_{t-(\Gamma_{n+1}-u),\Gamma_{n+1}} & \text{ if } t > \Gamma_{n+1} . \end{array} \right. \end{equation*} For $n$, $k \ge 0$, for $u \in [\Gamma_k,\Gamma_{k+1})$ and $\gamma \in [0,\Gamma_{k+1}-u]$, let $P^{\bar{Y}}_{\gamma,u}$ be the transition kernel associated to $\bar{Y}_{\cdot,u}$ between the times $0$ and $\gamma$ i.e. for all $f : \mathbb{R}^d \to \mathbb{R}^+$ measurable, $P^{\bar{Y}}_{\gamma,u}f(x) = \mathbb{E}[f(\bar{Y}^x_{\gamma,u})]$. \subsection{Boundedness of the potential} \begin{lemma} \label{lemma:D.1a} Let $p \ge 1/2$. There exists a constant $C >0$ such that for every $k \ge 0$, for every $u \in [\Gamma_k, \Gamma_{k+1})$ and for every $x \in \mathbb{R}^d$: $$ \sup_{n \ge k+1} \mathbb{E} V^p(\bar{Y}_{\Gamma_n-u,u}^x) \le CV^p(x). $$ \end{lemma} \begin{proof} We rework the proof of Lemma 2(b) in \cite{lamberton2002}. Let us assume directly that $u = \Gamma_k$. To simplify the notations, we define $\widetilde{y}_n := \bar{Y}^x_{\Gamma_n-\Gamma_k,\Gamma_k}$ for $n \ge k$ and $\Delta \widetilde{y}_{n+1} := \widetilde{y}_{n+1} - \widetilde{y}_n$. The Taylor formula applied to $V^p$ between $\widetilde{y}_n$ and $\widetilde{y}_{n+1}$ yields for some $\xi_{n+1} \in (\widetilde{y}_n, \widetilde{y}_{n+1})$ and with $\nabla^2 (V^p) = p(V^{p-1} \nabla^2 V + (p-1)V^{p-2} \nabla V \nabla V^T)$ : \begin{align*} V^p(\widetilde{y}_{n+1}) & = V^p(\widetilde{y}_n) + pV^{p-1} \langle \nabla V(\widetilde{y}_n), \Delta\widetilde{y}_{n+1} \rangle + \frac{1}{2}\nabla^2 (V^p)(\xi_{n+1}) \cdot (\Delta \widetilde{y}_{n+1})^{\otimes 2} \\ & = V^p(\widetilde{y}_n) + pV^{p-1} \nabla V(\widetilde{y}_n)^T \cdot \big( -\gamma_{n+1}\sigma \sigma^\top (\widetilde{y}_n)\nabla V(\widetilde{y}_n) + \gamma_{n+1} a^2(\Gamma_n)\Upsilon(\widetilde{y}_n) \\ & \quad + \gamma_{n+1} \zeta_{n+1}(\widetilde{y}_n) + \sqrt{\gamma_{n+1}} a(\Gamma_n) \sigma (\widetilde{y}_n)U_{n+1}\big) + \frac{1}{2}\nabla^2 (V^p)(\xi_{n+1}) \cdot (\Delta \widetilde{y}_{n+1})^{\otimes 2}, \end{align*} where $U_{n+1} \sim \mathcal{N}(0,I_d)$. Moreover using \eqref{Eq:eq:V_assumptions}, $\sqrt{V}$ is Lipschitz continuous so \begin{equation} \label{eq:potential_bounded_proof} \textstyle \mathbb{E} \big[\sup_{z \in (\widetilde{y}_n, \widetilde{y}_{n+1})} V^{1/2}(z) | \widetilde{y}_1,\ldots,\widetilde{y}_n \big] \le V^{1/2}(\widetilde{y}_n) + [\sqrt{V}]_{\text{Lip}} \mathbb{E}[|\widetilde{y}_{n+1} - \widetilde{y}_n||\widetilde{y}_1,\ldots,\widetilde{y}_n] \le CV^{1/2}(\widetilde{y}_n), \end{equation} and in particular $$ \mathbb{E}[\|\nabla^2(V^p)(\xi_{n+1})\| |\widetilde{y}_1,\ldots,\widetilde{y}_n] \le C\|\nabla^2(V^p)(\widetilde{y}_n)\| .$$ Moreover using that $\nabla^2 V$ is bounded and that $|\nabla V| \le CV^{1/2}$ we have $$ \|\nabla^2(V^p)(\widetilde{y}_n)\| \le C\|(V^{p-1} \nabla^2 V + V^{p-2} \nabla V \nabla V^T)(\widetilde{y}_n)\| \le CV^{p-1}(\widetilde{y}_n). $$ Then using the facts that $a$, $\Upsilon$, $\sigma$, $\nabla^2 V$ are bounded and that $\gamma_n^2 = o(\gamma_n)$, that $V$, $\nabla V$ are coercive and $\sigma \sigma^\top \ge \ubar{\sigma}_0^2 I_d$ and \eqref{eq:zeta_assumptions}, there exists $R>0$ and $N \in \mathbb{N}$ such that if $|\widetilde{y}_n| \ge R$ and $n \ge N$ then \begin{align*} & \mathbb{E}[ V^p(\widetilde{y}_{n+1}) - V^p(\widetilde{y}_n) | \widetilde{y}_1,\ldots,\widetilde{y}_n ] \\ & \quad \le pV^{p-1} \nabla V(\widetilde{y}_n)^T \cdot \left( -\gamma_{n+1} \ubar{\sigma}_0^2 (\widetilde{y}_n)\nabla V(\widetilde{y}_n) + C\gamma_{n+1} \right) \\ & \quad \quad + C\|\nabla^2 (V^p)(\widetilde{y}_n) \| \cdot \left( \gamma_{n+1}^2\|\sigma\|_\infty^4 |\nabla V(\widetilde{y}_n)|^2 + C\gamma_{n+1}^2 + C\gamma_{n+1}^2 V(x) + C\gamma_{n+1}\mathbb{E}|\mathcal{N}(0,I_d)|^2 \right) \\ & \quad \le C\gamma_{n+1}V^{p-1}(\widetilde{y}_n) \left[ |\nabla V(\widetilde{y}_n)|\big(- |\nabla V(\widetilde{y}_n)| + 1\big) + \gamma_{n+1}(|\nabla V(\widetilde{y}_n)|^2 +1) + 1 \right] \le 0 . \end{align*} On the other side, if $|\widetilde{y}_n| \le R$ then $$ \mathbb{E}[|V^p(\widetilde{y}_{n+1}) - V^p(\widetilde{y}_n)|| \widetilde{y}_1,\ldots,\widetilde{y}_n] \le C \gamma_{n+1} \textstyle \sup_{|x| \le R} V^p(x) .$$ Moreover for $n \in \lbrace k, \ldots, N \rbrace$ using \eqref{eq:potential_bounded_proof} we have $$ \mathbb{E}[|V^p(\widetilde{y}_{n+1}) - V^p(\widetilde{y}_n)| | \widetilde{y}_1,\ldots,\widetilde{y}_n] \le C V^p(\widetilde{y}_n)$$ so that $$ \textstyle \sup_{k \le n \le N+1} \mathbb{E}[V^p(\widetilde{y}_n)] \le C^{N-k} V^p(x) .$$ Finally we obtain $$ \textstyle \sup_{n \ge k} \mathbb{E}[V^p(\widetilde{y}_n)] \le C V^p(x) .$$ \end{proof} \subsection{Strong and weak error bounds for the Euler-Maruyama scheme} \begin{lemma} \label{lemma:3.4.b:Y:bar} Let $p \ge 1$. There exists $C>0$ such that for every $n$, $k \ge 0$, for every $u \in [\Gamma_k,\Gamma_{k+1})$ and every $t>0$ such that $u \in [T_n,T_{n+1}]$, $t \le \Gamma_{k+1}-u$ and $u+t \in [T_{n},T_{n+1}]$, $$|| X_t^{x,n} - \bar{Y}_{t,u}^x ||_p \le C \left(V^{1/2}(x)t + \sqrt{t} (a_n - a_{n+1})\right). $$ \end{lemma} \begin{proof} As in the proof of Lemma \ref{lemma:3.4.b:Y}, if $p \ge 2$ we have \begin{align*} & || X_t^{x,n} - \bar{Y}_{t,u}^x ||_p \le [b]_{\text{Lip}} \int_0^t ||X_s^{x,n} - x||_p ds + ||\Upsilon||_\infty (a_n^2 - a_{n+1}^2)t + a_{n+1}^2 [\Upsilon]_{\text{Lip}} \int_0^t ||X_s^{x,n} - x||_p ds \\ & \quad \quad \quad + \|\zeta_1(x)\|_p t + C^{BDG} a_{n+1} [\sigma]_{\text{Lip}}\left( \int_0^t ||X_s^{x,n} - x||^2_p ds \right)^{1/2} + ||\sigma||_\infty ||W_1||_p \sqrt{t} (a_n - a_{n+1}). \end{align*} Plugging Lemma \ref{lemma:3.4.a} and \eqref{eq:zeta_assumptions} into this inequality yields: \begin{align*} || X_t^{x,n} - \bar{Y}_{t,u}^x ||_p & \le CV^{1/2}(x)t^{3/2} + ||\Upsilon||_\infty (a_n^2 - a_{n+1}^2)t + CV^{1/2}(x)t + C \sqrt{t} (a_n - a_{n+1}), \end{align*} which completes the proof for $p \ge 2$. If $p \in [1,2)$, we remark that $\| \cdot \|_p \le \| \cdot \|_2$. \end{proof} \begin{proposition} \label{prop:3.5:Y:bar} For every $g : \mathbb{R}^d \to \mathbb{R}$ being $\mathcal{C}^3$, for every $n$, $k \ge 0$ and every $u \in [\Gamma_k, \Gamma_{k+1})$ such that $u \in [T_n,T_{n+1}]$, $\gamma \le \Gamma_{k+1}-u$ and $u +\gamma \in [T_n,T_{n+1}]$: \begin{align*} | \mathbb{E}\left[g(\bar{Y}^x_{\gamma,u})\right] - \mathbb{E}\left[g(X^{x,n}_{\gamma})\right]| & \le C V^{1/2}(x)\left(V^{1/2}(x) \gamma^2 + \gamma (a_n - a_{n+1})\right) \bar{\Phi}_{g,1}(x) \\ & \quad + C V(x)\left(V^{1/2}(x) \gamma^2 + \gamma^{3/2} (a_n - a_{n+1})\right)\bar{\Phi}_{g,2}(x) , \end{align*} with \begin{align*} & \bar{\Phi}_{g,1}(x) = \max\left(|\nabla g(x)|, ||\nabla^2 g(x)||, \left|\left|\sup_{\xi \in (X_\gamma^{x,n}, \bar{Y}_{\gamma,u}^x)} || \nabla^2 g(\xi) || \right|\right|_2 \right), \\ & \bar{\Phi}_{g,2}(x) = \left|\left| \sup_{\xi \in (x, X^{x,n}_\gamma)} ||\nabla^3 g(\xi)|| \right|\right|_4. \end{align*} \end{proposition} The proof is given in the Supplementary Material. \begin{proposition} \label{prop:3.6:Y:bar} Let $T >0$. There exists $C > 0$ such that for every Lipschitz continuous function $f$ and every $t \in (0,T]$, for all $n$, $k \ge 0$, for all $u \in [\Gamma_k, \Gamma_{k+1})$, for all $\gamma$ such that $\Gamma_k \in [T_n,T_{n+1}]$, $\gamma \le \Gamma_{k+1}-u$ and $u+t+\gamma \in [T_n,T_{n+1}]$, \begin{align*} & \left| \mathbb{E}\left[P_t^{X,n} f(\bar{Y}_{\gamma,u}^x)\right] - \mathbb{E}\left[P_t^{X,n} f(X_{\gamma}^{x,n})\right] \right| \\ & \quad \le C[f]_{\textup{Lip}} V^{2}(x) \cdot \left(a_{n+1}^{-2}t^{-1/2}\left(\gamma^2 + (a_n-a_{n+1}) \gamma\right) + a_{n+1}^{-3}t^{-1}\left(\gamma^2 + \gamma^{3/2}(a_n-a_{n+1})\right) \right). \end{align*} \end{proposition} \begin{proof} The proof is the same as for Proposition \ref{prop:3.6:Y}. \end{proof} \subsection{Proof of Theorem \ref{thm:main}.(b)} \label{subsec:proof_Y_bar} More precisely, we prove that for all $\beta>0$, if \begin{equation} \label{eq:hyp_A_Y_bar} A > \max\left(\sqrt{(\beta+1)(2C_1+C_2)}, \sqrt{(1+\beta^{-1})C_2} \right) , \end{equation} then $$ \mathcal{W}_1([\bar{Y}^{x_0}_t], \nu_{a(t)}) \le \frac{C\max\left(1+|x_0|,V^2(x_0)\right)}{t^{(1+\beta)^{-1}-(2C_1+C_2)/A^2}} .$$ \begin{proof} We apply the same \textit{domino strategy} as in Section \ref{subsec:proof_Y}. Let $n \ge 0$ and let $f : \mathbb{R}^d \to \mathbb{R}$ be Lipschitz continuous. Let us denote $$\gamma^{\text{init}} := \Gamma_{N(T_n)+1}-T_n \le \gamma_{N(T_n)+1} \quad \text{ and } \quad \gamma^{\text{end}} := T_{n+1}-\Gamma_{N(T_{n+1})} \le \gamma_{N(T_{n+1})+1}. $$ For $x \in \mathbb{R}^d$ we write: \begin{align*} & \left| \mathbb{E}f(X_{T_{n+1}-T_n}^{x,n}) - \mathbb{E}f(\bar{Y}_{T_{n+1}-T_n,T_n}^{x})\right| \le \left|(P^{\bar{Y}}_{\gamma^{\text{init}},T_n} - P^{X,n}_{\gamma^{\text{init}}}) \circ P^{X,n}_{T_{n+1}-\Gamma_{N(T_n)+1}} f(x)\right| \\ & + \sum_{k=N(T_n)+2}^{N(T_{n+1}-T)} \left| P^{\bar{Y}}_{\gamma^{\text{init}},T_n} \circ P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} \circ \cdots \circ P^{\bar{Y}}_{\gamma_{k-1},\Gamma_{k-2}} \circ (P^{\bar{Y}}_{\gamma_{k},\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x) \right| \\ & + \sum_{k=N(T_{n+1}-T)+1}^{N(T_{n+1})-1} \left| P^{\bar{Y}}_{\gamma^{\text{init}},T_n} \circ P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} \circ \cdots \circ P^{\bar{Y}}_{\gamma_{k-1},\Gamma_{k-2}} \circ (P^{\bar{Y}}_{\gamma_{k},\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x) \right| \\ & {+} \left| P^{\bar{Y}}_{\gamma^{\text{init}},T_n} {\circ} P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} {\circ} \cdots {\circ} P^{\bar{Y}}_{\gamma_{N(T_{n+1})-1},\Gamma_{N(T_{n+1})-2}} {\circ} (P^{\bar{Y}}_{\gamma^{\text{end}}+\gamma_{N(T_{n+1})},\Gamma_{N(T_{n+1})-1}} {-} P^{X,n}_{\gamma^{\text{end}}+\gamma_{N(T_{n+1})}}) f(x)\right| \\ & =: (c^{\text{init}}) + (a) + (b) + (c^{\text{end}}). \end{align*} $\bullet$ \textbf{Term $(a)$:} we have \begin{align*} & |(P^{\bar{Y}}_{\gamma_k,\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x)| \\ & = |P^{\bar{Y}}_{\gamma_k,\Gamma_{k-1}} \circ P^{X,n}_{T/2} \circ P^{X,n}_{T_{n+1}-\Gamma_k - T/2} f(x) - P^{X,n}_{\gamma_k,n} \circ P^{X,n}_{T/2} \circ P^{X,n}_{T_{n+1}-\Gamma_k - T/2} f(x)| \\ & \le | \mathbb{E} P^{X,n}_{T_{n+1}-\Gamma_k-T/2}(\Xi_k^x) - \mathbb{E}P^{X,n}_{T_{n+1}-\Gamma_k-T/2}(\bar{\Xi}_k^x)| \le Ce^{Ca_{n+1}^{-2}} e^{-\rho_{n+1}(T_{n+1}-\Gamma_k-T/2)}[f]_{\text{Lip}}\mathbb{E}|\Xi^x_k - \bar{\Xi}^x_k|, \end{align*} where $\Xi^x_k$ and $\bar{\Xi}^x_k$ are any random vectors with laws $\left[X_{T/2}^{X^{x,n}_{\gamma_k},n}\right]$ and $\left[X_{T/2}^{\bar{Y}^x_{\gamma_k,\Gamma_{k-1}},n}\right]$ respectively and where we used Theorem \ref{thm:confluence} to get the last inequality. Thus, it follows from the definition of the Wasserstein distance that $$ |(P^{\bar{Y}}_{\gamma_k,\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x)| \le Ce^{Ca_{n+1}^{-2}}e^{-\rho_{n+1}(T_{n+1}-\Gamma_k)}[f]_{\text{Lip}} \mathcal{W}_1\left(X_{T/2}^{X^{x,n}_{\gamma_k},n}, X_{T/2}^{\bar{Y}^x_{\gamma_k,\Gamma_{k-1}},n}\right) .$$ On the other hand, the Kantorovich-Rubinstein representation of the $L^1$-Wasserstein distance (see \cite[Equation (6.3)]{villani2009}) reads \begin{align*} \mathcal{W}_1\left(X_{T/2}^{X^{x,n}_{\gamma_k},n}, X_{T/2}^{\bar{Y}^x_{\gamma_k,\Gamma_{k-1}},n}\right) & = \sup_{[g]_{\text{Lip}}=1} \mathbb{E}\left[ g\left(X_{T/2}^{X^{x,n}_{\gamma_k},n}\right) - g\left(X_{T/2}^{\bar{Y}^x_{\gamma_k,\Gamma_{k-1}},n}\right) \right] \\ & = \sup_{[g]_{\text{Lip}}=1} \mathbb{E}\left[ P^{X,n}_{T/2}g(X^{x,n}_{\gamma_k}) - P^{X,n}_{T/2}g(\bar{Y}^x_{\gamma_k,\Gamma_{k-1}}) \right]. \end{align*} It follows from Proposition \ref{prop:3.6:Y:bar} and using $[g]_{\text{Lip}}=1$ that $$ \mathbb{E}\left[ P^{X,n}_{T/2}g(X^{x,n}_{\gamma_k}) - P^{X,n}_{T/2}g(\bar{Y}^x_{\gamma_k,\Gamma_{k-1}}) \right] \le Ca_{n+1}^{-3}\left(\gamma_k^2 +(a_n-a_{n+1}) \gamma_k \right)V^2(x), $$ so that $$ |(P^{\bar{Y}}_{\gamma_k,\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x)| \le Ce^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1}(T_{n+1}-\Gamma_k)}[f]_{\text{Lip}} a_{n+1}^{-3}\left(\gamma_k^2 + (a_n-a_{n+1}) \gamma_k\right) V^2(x) .$$ Finally, integrating with respect to $P^{\bar{Y}}_{\gamma^{\text{init}},T_n} \circ P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} \circ \cdots \circ P^{\bar{Y}}_{\gamma_{k-1},\Gamma_{k-2}}$ yields: \begin{align*} & \left| P^{\bar{Y}}_{\gamma^{\text{init}},T_n} \circ P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} \circ \cdots \circ P^{\bar{Y}}_{\gamma_{k-1},\Gamma_{k-2}} \circ (P^{\bar{Y}}_{\gamma_k,\Gamma_{k-1}} - P^{X,n}_{\gamma_k}) \circ P^{X,n}_{T_{n+1} - \Gamma_k} f(x) \right| \\ & \quad \le Ce^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1}(T_{n+1}-\Gamma_k)}[f]_{\text{Lip}} a_{n+1}^{-3}\left(\gamma_k^2 + (a_n-a_{n+1}) \gamma_k\right) \left(\sup_{\ell \ge N(T_n)+1} \mathbb{E} V^2(\bar{Y}_{\gamma^{\text{init}} + \Gamma_\ell - \Gamma_{N(T_n)+1},T_n}^x)\right) \\ & \quad \le Ce^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1}(T_{n+1}-\Gamma_k)}[f]_{\text{Lip}} a_{n+1}^{-3}\left(\gamma_k^2 + (a_n-a_{n+1}) \gamma_k \right) V^2(x), \end{align*} where we used Lemma \ref{lemma:D.1a}. Now, summing up over $k$ yields: \begin{align*} (a) & \le Ca_{n+1}^{-3}e^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1} T_{n+1}}[f]_{\text{Lip}} V^2(x) \sum_{k=N(T_n)+2}^{N(T_{n+1}-T)} ((a_n-a_{n-1}) + \gamma_k) \gamma_k e^{\rho_{n+1} \Gamma_k} \\ & \le Ca_{n+1}^{-3}e^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1} T_{n+1}}[f]_{\text{Lip}} ((a_n-a_{n-1}) + \gamma_{N(T_n)}) V^2(x) \sum_{k=N(T_n)+2}^{N(T_{n+1}-T)} \gamma_k e^{\rho_{n+1} \Gamma_{k-1}} \\ & \le Ca_{n+1}^{-3}e^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1} T_{n+1}}[f]_{\text{Lip}}((a_n-a_{n-1}) + \gamma_{N(T_n)})V^2(x) \int_{T_n}^{T_{n+1}-T} e^{\rho_{n+1} u} du \\ & \le Ca_{n+1}^{-3}e^{C_1 a_{n+1}^{-2}}[f]_{\text{Lip}}((a_n-a_{n-1}) + \gamma_{N(T_n)})V^2(x) \rho_{n+1}^{-1} \\ & \le Ca_{n+1}^{-3}e^{C_1 a_{n+1}^{-2}}[f]_{\text{Lip}}(a_n-a_{n-1})V^2(x) \rho_{n+1}^{-1}, \end{align*} where we used that $(e^{\rho_{n+1} \gamma_k})_{n,k \ge 0}$ is bounded and Lemma \ref{lemma:app:gamma:1} in the last inequality. We obtain likewise $$ (c^{\text{init}}) \le Ce^{C_1 a_{n+1}^{-2}}e^{-\rho_{n+1}(T_{n+1}-T_n)}[f]_{\text{Lip}}a_{n+1}^{-3}(a_n-a_{n+1})\gamma_{N(T_n)+1} V^2(x) .$$ $\bullet$ \textbf{Term $(b)$:} Applying Proposition \ref{prop:3.6:Y:bar} yields: \begin{align*} (b) & \le C a_{n+1}^{-3} \left(\gamma_{N(T_{n+1}-T)} + \sqrt{\gamma_{N(T_{n+1}-T)}}(a_n-a_{n+1})\right) [f]_{\text{Lip}} V^2(x) \sum_{k=N(T_{n+1}-T)+1}^{N(T_{n+1})-1} \frac{\gamma_k}{T_{n+1}-\Gamma_k} \\ & \quad + C a_{n+1}^{-2} \left(\gamma_{N(T_{n+1}-T)} + (a_n-a_{n+1})\right) [f]_{\text{Lip}} V^2(x) \sum_{k=N(T_{n+1}-T)+1}^{N(T_{n+1})-1} \frac{\gamma_k}{\sqrt{T_{n+1}-\Gamma_k}} \\ & \le C a_{n+1}^{-3} \left(\gamma_{N(T_{n+1}-T)} + \sqrt{\gamma_{N(T_{n+1}-T)}}(a_n-a_{n+1})\right) [f]_{\text{Lip}} V^2(x) \int_{T_{n+1}-T}^{T_{n+1}-\gamma_{N(T_{n+1})}} \frac{1}{T_{n+1}-u} du \\ & \quad + C a_{n+1}^{-2} \left(\gamma_{N(T_{n+1}-T)} + (a_n-a_{n+1})\right) [f]_{\text{Lip}} V^2(x) \int_{T_{n+1}-T}^{T_{n+1}-\gamma_{N(T_{n+1})}} \frac{1}{\sqrt{T_{n+1}-u}} du \\ & \le C a_{n+1}^{-3} \left(\gamma_{N(T_{n+1}-T)} + \sqrt{\gamma_{N(T_{n+1}-T)}}(a_n-a_{n+1})\right) [f]_{\text{Lip}} V^2(x) \log(1/\gamma_{N(T_{n+1})}) \\ & \quad + C a_{n+1}^{-2} (a_n-a_{n+1}) [f]_{\text{Lip}} V^2(x). \end{align*} Using Lemma \ref{lemma:app:gamma:2} in Appendix, $\sqrt{\gamma_{N(T_{n+1}-T)}} \log(1/\gamma_{N(T_{n+1})}) \le C \sqrt{\gamma_{N(T_{n+1})}} \log(1/\gamma_{N(T_{n+1})}) \to 0$ and using Lemma \ref{lemma:app:gamma:1} we also have $$ \gamma_{N(T_{n+1}-T)} \log(1/\gamma_{N(T_{n+1})}) \le C\gamma_{N(T_{n+1})}^{1-\varepsilon} = o\left( n^{-1-\beta'} \right) = o(a_n - a_{n+1})$$ where $\beta'>0$ for small enough $\varepsilon$. So that $$ (b) \le C a_{n+1}^{-3} (a_n-a_{n+1}) [f]_{\text{Lip}} V^2(x) .$$ $\bullet$ \textbf{Term $(c^{\text{end}})$:} Using Lemma \ref{lemma:3.4.b:Y:bar} and $\gamma^{\text{end}} \le \gamma_{N(T_{n+1})+1} \le \gamma_{N(T_n)}$ yields: \begin{align*} |(P^{\bar{Y}}_{\gamma^{\text{end}}+\gamma_{N(T_{n+1})},\Gamma_{N(T_{n+1})-1}} - P^{X,n}_{\gamma^{\text{end}}+\gamma_{N(T_{n+1})}}) f(x)| \le C[f]_{\text{Lip}} \left(\sqrt{\gamma_{N(T_n)}}(a_n-a_{n+1})+\gamma_{N(T_n)}\right) V^{1/2}(x). \end{align*} Then we integrate with respect to $P^{\bar{Y}}_{\gamma^{\text{init}},T_n} \circ P^{\bar{Y}}_{\gamma_{N(T_n)+2},\Gamma_{N(T_n)+1}} \circ \cdots \circ P^{\bar{Y}}_{\gamma_{k-1},\Gamma_{k-2}}$ and apply Lemma \ref{lemma:D.1a}. $\bullet$ So we have finally that $| \mathbb{E}f(X_{T_{n+1}-T_n}^{x,n}) - \mathbb{E}f(\bar{Y}_{T_{n+1}-T_n,T_n}^{x})|$ is bounded by $$ C a_{n+1}^{-3} [f]_{\text{Lip}}(a_n-a_{n+1}) e^{C_1 a_{n+1}^{-2}}\rho_{n+1}^{-1} V^2(x) ,$$ which implies that, for every $x \in \mathbb{R}^d$, $$ \mathcal{W}_1([X_{T_{n+1}-T_n}^{x,n}], [\bar{Y}_{T_{n+1}-T_n,T_n}^x]) \le C a_{n+1}^{-3} (a_n-a_{n+1}) e^{C_1 a_{n+1}^{-2}}\rho_{n+1}^{-1} V^2(x) .$$ We integrate this inequality with respect to the laws of $X^{x_0}_{T_n}$ and $\bar{Y}_{T_n}^{x_0}$ and obtain, temporarily setting $x_n := X^{x_0}_{T_n}$ and $\bar{y}_n := \bar{Y}_{T_n}^{x_0}$, \begin{align*} \mathcal{W}_1([X^{x_0}_{T_{n+1}}] & , [\bar{Y}^{x_0}_{T_{n+1}}]) = \mathcal{W}_1([X_{T_{n+1}-T_n}^{x_n,n}], [\bar{Y}_{T_{n+1}-T_n,T_n}^{\bar{y}_n}]) \\ & \le \mathcal{W}_1([X_{T_{n+1}-T_n}^{x_n,n}], [X_{T_{n+1}-T_n}^{\bar{y}_n,n}]) + \mathcal{W}_1([X_{T_{n+1}-T_n}^{\bar{y}_n,n}], [\bar{Y}_{T_{n+1}-T_n,T_n}^{\bar{y}_n}]) \\ & \le Ce^{C_1 a_{n+1}^{-2}} e^{-\rho_{n+1} (T_{n+1}-T_n)} \mathcal{W}_1([X^{x_0}_{T_n}], [\bar{Y}_{T_n}^{x_0}]) + Ca_{n+1}^{-3} (a_n-a_{n+1}) e^{C_1 a_{n+1}^{-2}}\rho_{n+1}^{-1} \mathbb{E} V^2(\bar{Y}_{T_n}^{x_0}) \\ & \le Ce^{C_1 a_{n+1}^{-2}} e^{-\rho_{n+1} (T_{n+1}-T_n)} \mathcal{W}_1([X^{x_0}_{T_n}], [\bar{Y}_{T_n}^{x_0}]) + Ca_{n+1}^{-3} (a_n-a_{n+1}) e^{C_1 a_{n+1}^{-2}}\rho_{n+1}^{-1}V^2(x_0) \\ & =: \mu_{n+1} \mathcal{W}_1([X^{x_0}_{T_n}], [\bar{Y}_{T_n}^{x_0}]) + v_{n+1}V^2(x_0), \end{align*} where $\mu_n$ is defined in \eqref{eq:def_mu} and where we used again Lemma \ref{lemma:D.1a}. We use Lemma \ref{lemma:app:a_n_diff} to bound $(a_n - a_{n+1})$ and owing to \eqref{eq:hyp_A_Y_bar} we have $v_n \to 0$, so is bounded. We iterate this inequality and obtain \begin{align*} \mathcal{W}_1([X_{T_{n+1}}^{x_0}],[\bar{Y}_{T_{n+1}}^{x_0}]) & \le CV^2(x_0) \left(v_{n+1} + \mu_{n+1} v_n + \mu_{n+1} \mu_n v_{n-1} + \cdots + \mu_{n+1} \cdots \mu_2 v_1 \right) \\ & \le CV^2(x_0) \left( v_{n+1} + Cn\mu_{n+1} \right). \end{align*} But following \eqref{eq:def_mu} we have $n \mu_n = O(v_n)$ so that $$ \mathcal{W}_1([X_{T_{n+1}}^{x_0}],[\bar{Y}_{T_{n+1}}^{x_0}]) \le CV^2(x_0) v_{n+1} \le \frac{CV^2(x_0)}{(n+1)^{1-(\beta+1)(C_1+C_2)/A^2}} .$$ Moreover, owing to \eqref{eq:hyp_A_Y_bar} and combining with Theorem \ref{thm:conv_X} we get $$ \mathcal{W}_1([\bar{Y}^{x_0}_{T_n}],\nu_{a_n}) \le \mathcal{W}_1([\bar{Y}^{x_0}_{T_n}],[X^{x_0}_{T_n}]) + \mathcal{W}_1([X^{x_0}_{T_n}],\nu_{a_n}) \le \frac{C\max(1+|x_0|,V^2(x_0))}{n^{1-(\beta+1)(C_1+C_2)/A^2}} $$ and $$ \mathcal{W}_1([\bar{Y}^{x_0}_{T_n}],\nu^\star) \le \mathcal{W}_1([\bar{Y}^{x_0}_{T_n}],[X^{x_0}_{T_n}]) + \mathcal{W}_1([X^{x_0}_{T_n}],\nu^\star) \le Ca_n \max(1+|x_0|,V^2(x_0)) .$$ Finally, to prove that $\mathcal{W}_1([\bar{Y}^{x_0}_t],\nu^\star) \rightarrow 0$ as $t \to \infty$, we conclude as in the end of Section \ref{subsec:proof_Y}. \end{proof} \section{Convergence of the Euler-Maruyama scheme with plateau} \label{sec:bar-X} In this section, we consider the Euler-Maruyama scheme for $(X_t)$, that is \begin{align*} \bar{X}_0^{x_0} = x_0, \quad \bar{X}_{\Gamma_{k+1}}^{x_0} = \bar{X}_{\Gamma_k} + \gamma_{k+1} b_{a_{n+1}}(\bar{X}_{\Gamma_k}) + a_{n+1} \sigma(\bar{X}_{\Gamma_k})(W_{\Gamma_{k+1}} - W_{\Gamma_k}) \end{align*} for $k \in \lbrace N(T_n), \ldots, N(T_{n+1}) - 1 \rbrace$. We also define as in Section \ref{sec:bar-Y} the genuine time-continuous scheme and the Euler-Maruyama scheme for $(X^{x,n}_t)_t$ so that $\bar{X}^{x_0,0} = \bar{X}^x$. Although we already proved the convergence of the Euler-Maruyama scheme for $(Y_t)$, we shall also prove the convergence of the present scheme, since this algorithm is also used by practitioners within the framework of batch methods. \begin{theorem} \label{thm:main:3} Assume \eqref{Eq:eq:min_V}, \eqref{Eq:eq:V_assumptions}, \eqref{Eq:eq:sigma_assumptions}, \eqref{eq:ellipticity} and \eqref{Eq:eq:V_confluence}. Assume furthermore \eqref{Eq:eq:gamma_assumptions} and \eqref{Eq:eq:gamma_assumptions_2}, that $V$ is $\mathcal{C}^3$ with $\|\nabla^3 V\| \le CV^{1/2}$ and that $\sigma$ is $\mathcal{C}^3$ with $\|\nabla^3(\sigma \sigma^\top)\| \le CV^{1/2}$. Then for large enough $A>0$ and for every $x_0 \in \mathbb{R}^d$, $$ \mathcal{W}_1(\bar{X}^{x_0}_t, \nu^\star) \underset{t \rightarrow \infty}{\longrightarrow} 0 .$$ \end{theorem} The proof of this theorem is given in the Supplementary Material. \section{Experiments} \label{sec:experiments} In this section, we compare the performances of adaptive Langevin-Simulated Annealing algorithms versus vanilla SGLD, that is the Langevin algorithm with constant (additive) $\sigma$\footnote{Our code is available at \url{https://github.com/Bras-P/langevin-simulated-annealing}.}. We train an artificial neural network on the MNIST dataset \cite{mnist}, which is composed of grayscale images of size $28 \times 28$ of handwritten digits (from 0 to 9). The goal is to recognize the handwritten digit and to classify the images. 60000 images are used for training and 10000 images are used for test. We consider a feedforward neural network with two hidden dense layers with 128 units each and with ReLU activation. For the adaptive Langevin algorithms, we choose the function $\sigma$ as a diagonal matrix which is the square root of the preconditioner in RMSprop \cite{li2015}, in Adam \cite{adam} and in Adadelta \cite{adadelta} respectively (see also Section \ref{subsec:practitioner}), giving L-RMSprop, L-Adam and L-Adadelta respectively. The results are given in Figure \ref{fig:MNIST:langevin} and in Table \ref{table:MNIST:langevin}. As pointed out in the literature (see the references Section \ref{subsec:practitioner}), the preconditioned Langevin algorithms show significant improvement compared with the vanilla SGLD algorithm. The convergence is faster and they achieve a lower error on the test set. We also display the value of the loss function on the train set during the training to show that the better performances of the preconditioned algorithms are not due to some overfitting effect. \begin{figure} \caption{\textit{Performance of preconditioned Langevin algorithms compared with vanilla SGLD on the MNIST dataset. The values of the hyperparameters are $a(n) = A \log^{-1/2} \label{fig:MNIST:langevin} \end{figure} \begin{table} \centering \begin{tabular}{ccccc} \hline Preconditioner & SGLD & L-RMSprop & L-Adam & L-Adadelta \\ \hline Best accuracy & 95,24 \% & 96,94 \% & 97,60 \% & 97,63 \% \end{tabular} \caption{\textit{Best accuracy performance on the MNIST test set after 10 epochs.}} \label{table:MNIST:langevin} \end{table} We also compare preconditioned Langevin algorithms with their respective non-Langevin counterpart. For shallow neural networks, adding an exogenous noise does not seem to improve significantly the performances of the optimization algorithm. However, for deep neural networks, which are highly non-linear and which loss function has many local minima, the Langevin version is competitive with the currently widely used non-Langevin algorithms and can even lead to improvement. The results are given in Figure \ref{fig:MNIST:deep} where we used a deep neural network with 20 hidden layers with 32 units each and with ReLU activation. \begin{figure} \caption{\textit{Side-by-side comparison of optimization algorithms with their respective Langevin counterparts for the training of a deep neural network on the MNIST dataset. We display the performance of SGD for reference. The values of the hyperparameters are $a(n) = A \log^{-1/2} \label{fig:MNIST:deep} \end{figure} In order to understand how sensitive are these methods to poor initialization, we run an experiment on the previous deep neural network where all the weights are initialized to zero, as in \cite[Section 4.1]{neelakantan2015}. We plot the accuracy on the test set in Figure \ref{fig:MNIST:zeros}. We observe that the non-Langevin optimizer needs some time before escaping from the neighbourhood of the initial point whereas in its Langevin version, the Gaussian noise is effective to rapidly escape from highly degenerated saddle points of the loss. \begin{figure} \caption{\textit{Performance of the Adam optimizer compared with its Langevin version at the beginning of the training of a deep neural network on the MNIST dataset with poor initialization. We record the accuracy on the test set 10 times per epoch.} \label{fig:MNIST:zeros} \end{figure} \appendix \section{Appendix} \begin{lemma} \label{lemma:BDG} Let $Z$ and $\widetilde{Z}$ be two continuous diffusion processes. Then for all $t \ge 0$ and for all $p \ge 2$: $$ \left\| \int_0^t (\sigma(Z_s) - \sigma(\widetilde{Z}_s)) dW_s \right\|_p \le C^{BDG}_p [\sigma]_{\textup{Lip}} \left(\int_0^t \| Z_s - \widetilde{Z}_s \|_p^2 ds\right)^{1/2} ,$$ where $C^{BDG}_p$ is a constant which only depends on $p$. \end{lemma} \begin{proof} It follows from the generalized Minkowski and the Burkholder-Davis-Gundy inequalities that $$ \left\| \int_0^t (\sigma(Z_s) - \sigma(\widetilde{Z}_s)) dW_s \right\|_p \le C^{BDG}_p [\sigma]_{\text{Lip}} \left\| \int_0^t |Z_s - \widetilde{Z}_s|^2 ds \right\|_{p/2}^{1/2} \le C^{BDG}_p [\sigma]_{\text{Lip}} \left( \int_0^t \|Z_s - \widetilde{Z}_s\|^2_p ds \right)^{1/2}$$ \end{proof} We now give some results on the step sequence $(\gamma_n)$ associated to the Euler-Maruyama scheme. Let us recall that the sequence $(T_n)$ is defined in \eqref{eq:def_T_n}. \begin{lemma} \label{lemma:app:tauber:1} Let $(u_n)$ be a positive and non-increasing sequence such that $\sum_n u_n < \infty$. Then $u_n = o(n^{-1})$. \end{lemma} \begin{proof} We have $Nu_{2N} \le \sum_{n=N}^{2N} u_n\to 0 $ as $N \to \infty$. \end{proof} \begin{lemma} \label{lemma:app:gamma:1} We have \begin{equation} \gamma_{N(T_n)} = o\left(n^{-(1+\beta)}\right). \end{equation} \end{lemma} \begin{proof} Using the previous lemma, $\gamma_n = o(n^{-1/2})$ so that $\Gamma_n = o(n^{1/2})$ and then $x^2 = o(N(x))$ as $x \rightarrow \infty$ and then $ \gamma_{N(T_n)} = o\left(N(T_n)^{-1/2}\right) = o\left(n^{-(1+\beta)}\right).$ \end{proof} \begin{lemma} \label{lemma:app:gamma:2} The sequence $(\gamma_{N(T_{n+1}-T)}/\gamma_{N(T_{n+1})})$ is bounded. \end{lemma} \begin{proof} Using \eqref{Eq:eq:gamma_assumptions_2}, we have for $\varpi'>\varpi$ and for large enough $k$, $(\gamma_k-\gamma_{k+1})/\gamma_{k+1}^2 \le \varpi'$ so that $\gamma_k/\gamma_{k+1} \le 1 + \varpi' \gamma_{k+1}$ and then \begin{align*} \log\left(\frac{\gamma_{N(T_{n+1}-T))}}{\gamma_{N(T_{n+1})}}\right) & = \sum_{k=N(T_{n+1}-T)}^{N(T_{n+1})-1} \log\left(\frac{\gamma_k}{\gamma_{k+1}}\right) \le C \sum_{k=N(T_n)}^{N(T_{n+1})-1} \gamma_k = C\left(\Gamma_{N(T_{n+1})}-\Gamma_{N(T_{n+1}-T)}\right) \\ & \le C(T_{n+1}-(T_{n+1}-T)). \end{align*} \end{proof} \input{langevin-multiplicative.bbl} \section{Supplementary Material} \subsection{Proof of Proposition \ref{prop:W_nu}} \begin{proof} We have $$\frac{\nu_{a_{n+1}}(x)}{\nu_{a_n}(x)} = \frac{\mathcal{Z}_{a_{n+1}}}{\mathcal{Z}_{a_n}} e^{-2(V(x)-V^\star)(a_{n+1}^{-2} - a_{n}^{-2})} \le \frac{\mathcal{Z}_{a_{n+1}}}{\mathcal{Z}_{a_n}} =: M_n.$$ We now consider $(P_i)_{1 \le i \le m^\star}$ a partition of $\mathbb{R}^d$ such that for all $i$, $x_i^\star \in \mathring{P_i}$. Let us prove that for all $1 \le i \le m^\star$, \begin{equation} \label{eq:Z_a_equivalent:3} \mathcal{Z}_{a,i}^{-1} := \int_{\mathbb{R}^d} e^{-2(V(x)-V^\star)/a^2} \mathds{1}_{x \in P_i} dx \underset{a \to 0}{\sim} a^d \int_{\mathbb{R}^d} e^{-x^\top \nabla^2 V(x_i^\star) x} dx . \end{equation} Let $r>0$ ; let us consider $\widetilde{V}_i$ defined as $$ \widetilde{V}_i(x) = \left\lbrace \begin{array}{ll} V(x) & \text{ if } x \in \textbf{B}(x_i^\star,r) \\ |x-x_i^\star|^2 + V^\star & \text{ otherwise} . \end{array} \right. $$ We also define $\widetilde{\mathcal{Z}}_{a,i}^{-1} := \int_{\mathbb{R}^d} e^{-2(\widetilde{V}_i(x)-V^\star)/a^2}\mathds{1}_{x \in P_i}dx$. Then, owing to $V^\star >0$ and \eqref{Eq:eq:V_assumptions}, \begin{equation} \label{eq:Z_a_equivalent:2} \forall x \in \mathbb{R}^d, \ C|x-x_i^\star|^2 \le \widetilde{V}_i(x) - V^\star \le C'|x-x_i^\star|^2 \end{equation} and then $$ \widetilde{\mathcal{Z}}_{a,i}^{-1} = a^d \int_{\mathbb{R}^d} e^{-2(\widetilde{V}_i(ax+x_i^\star)-V^\star)/a^2} \mathds{1}_{x \in a^{-1}(P_i-x_i^\star)} dx \underset{a \rightarrow 0}{\sim} a^d \int_{\mathbb{R}^d} e^{-x^\top \nabla^2 V(x_i^\star) x} dx ,$$ where we get the equivalence by dominated convergence ; the domination comes from \eqref{eq:Z_a_equivalent:2}. Then \begin{align*} & \mathcal{Z}_{a,i}^{-1} - \widetilde{\mathcal{Z}}_{a,i}^{-1} = \int_{\textbf{B}(x_i^\star,r)^c} e^{-2(V(x)-V^\star)/a^2}\mathds{1}_{x \in P_i}dx - \int_{\textbf{B}(x_i^\star,r)^c} e^{-2(\widetilde{V}_i(x)-V^\star)/a^2}\mathds{1}_{x \in P_i}dx =: I_1 - I_2, \\ & I_2 = a^d \int_{\textbf{B}(0,r/a)^c} e^{-2|x|^2} \mathds{1}_{x \in a^{-1}(P_i-x_i^\star)} dx \le a^d \int_{\textbf{B}(0,r/a)^c} e^{-2|x|^2}dx = o(a^d) = o\left(\widetilde{\mathcal{Z}}_{a,i}^{-1}\right). \end{align*} Moreover using \cite[Proposition 1]{bras2021} we have $\mathcal{Z}_{a,i} I_1 \to 0$ as $a\to 0$, so that $$ \mathcal{Z}_{a,i}^{-1} = \widetilde{\mathcal{Z}}_{a,i}^{-1} + o\left(\mathcal{Z}_{a,i}^{-1}\right) + o\left( \widetilde{\mathcal{Z}}_{a,i}^{-1}\right) \sim \widetilde{\mathcal{Z}}_{a,i}^{-1} ,$$ which proves \eqref{eq:Z_a_equivalent:3} and then \begin{align} \label{eq:Z_a_equivalent} \mathcal{Z}_a^{-1} \underset{a \rightarrow 0}{\sim} a^d \sum_{i=1}^{m^\star} \int_{\mathbb{R}^d} e^{-x^\top \nabla^2 V(x_i^\star) x} dx . \end{align} We now prove that \begin{equation} \label{eq:app:2_a_diff} \mathcal{Z}_{a_n}^{-1} - \mathcal{Z}_{a_{n+1}}^{-1} \le C a_{n+1}^{d-1}(a_n-a_{n+1}) . \end{equation} Indeed, by convexity we have for all $z \in \mathbb{R}$ \begin{align} \label{eq:app:exp_x_2} \left|e^{-2z/a_{n}^2} - e^{-2z/a_{n+1}^2} \right| & \le 2e^{-2z/a_{n}^2}z \left|\frac{1}{a_n^2} - \frac{1}{a_{n+1}^2}\right| \le 4 e^{-2z/a_{n}^2} \frac{z}{a_{n+1}^2} \frac{(a_n-a_{n+1})}{a_n}. \end{align} and then \begin{align*} & \mathcal{Z}_{a_n,i}^{-1} - \mathcal{Z}_{a_{n+1},i}^{-1} \\ & \quad = a_{n+1}^d \int_{\mathbb{R}^d} \left(e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_n^2} \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} - e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2}\mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)}\right) dx \\ & \quad \le 4a_{n+1}^{d-1}(a_n - a_{n+1}) \underbrace{\int_{\mathbb{R}^d} e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n}^2} \frac{V(a_{n+1}x+x_i^\star)-V^\star}{a_{n+1}^2}\mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} dx}_{:= I_3}. \end{align*} Let us also define $$ \widetilde{I}_3 := \int_{\mathbb{R}^d} e^{-2(\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star)/a_{n}^2} \frac{\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star}{a_{n+1}^2}\mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} dx .$$ Then $\widetilde{I_3}$ converges by dominated convergence and $|I_3 - \widetilde{I}_3|$ is bounded by \begin{align*} & \left|\int_{\mathbb{R}^d} \left(e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n}^2} \frac{V(a_{n+1}x+x_i^\star)-V^\star}{a_{n+1}^2} - e^{-2(\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star)/a_{n}^2} \frac{\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star}{a_{n+1}^2}\right) \right. \\ & \quad \quad \quad \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} dx \Big| \\ & \quad \le a_{n+1}^{-d-2} \int_{\textbf{B}(x_i^\star,r)^c} e^{-2(V(x)-V^\star)/a_{n}^2} (V(x)-V^\star) \mathds{1}_{x \in P_i} dx \\ & \quad \quad + \int_{\textbf{B}(0,r/a_{n+1})^c} e^{-2(\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star)/a_{n}^2} \frac{\widetilde{V}_i(a_{n+1}x+x_i^\star)-V^\star}{a_{n+1}^2} \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} dx. \end{align*} The second integral converges to $0$ by dominated convergence by similar arguments as for $I_2$. Moreover we have for every $x \in \textbf{B}(x_i^\star,r)^c \cap P_i$, $V(x)-V^\star \ge \varepsilon$ for some $\varepsilon > 0$ and then for $n$ such that $a_n \le A/\sqrt{2}$: \begin{align*} & a_{n+1}^{-d-2} \int_{\textbf{B}(x_i^\star,r)^c} e^{-2(V(x)-V^\star)/a_{n}^2} (V(x)-V^\star) \mathds{1}_{x \in P_i} dx \\ & \quad \le Ca_{n+1}^{-d-2} \int_{\textbf{B}(x_i^\star,r)^c} e^{-2(V(x)-V^\star)/a_{n}^2} |x-x_i^\star|^2 \mathds{1}_{x \in P_i} dx \\ & \quad \le Ca_{n+1}^{-d-2} e^{-\varepsilon/a_n^2} \int_{\textbf{B}(x_i^\star,r)^c} e^{-(V(x)-V^\star)/a_{n}^2} |x-x_i^\star|^2 \mathds{1}_{x \in P_i} dx \\ & \quad \le Ca_{n+1}^{-d-2} e^{-\varepsilon/a_n^2} \int_{\mathbb{R}^d} e^{-2(V(x)-V^\star)/A^2} |x-x_i^\star|^2 dx \underset{n \to \infty}{\longrightarrow} 0, \end{align*} where we used that $(x \mapsto |x|^2 e^{-2(V(x)-V^\star)/A^2}) \in L^1(\mathbb{R}^d)$. Then we obtain that $I_3$ converges to $\widetilde{I}_3$, which proves \eqref{eq:app:2_a_diff}. Then we have $$ 1 - M_n^{-1} = \frac{\mathcal{Z}_{a_n}^{-1}-\mathcal{Z}_{a_{n+1}}^{-1}}{\mathcal{Z}_{a_n}^{-1}} \le C\frac{a_n-a_{n+1}}{a_n} \le \frac{C}{n\log(n)}. $$ On the other hand, if $X \sim \nu_{a_{n+1}}$, $\tilde{X} \sim \nu_{a_{n+1}}$, $Y \sim \nu_{a_{n}}$ and $X$, $\tilde{X}$ and $Y$ are mutually independent then \begin{align*} & \left| \mathbb{E}|X-Y| - \mathbb{E}|X - \tilde{X}| \right| \\ & = \Big| a_{n+1}^d \mathcal{Z}_{a_n} a_{n+1}^d \mathcal{Z}_{a_{n+1}} \sum_{i,j=1}^{m^\star} \int \int a_{n+1}|x - y| e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2} e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n}^2} \\ & \qquad \qquad \qquad \qquad \qquad \qquad \qquad \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} \mathds{1}_{y \in a_{n+1}^{-1} (P_j-x_i^\star)} dxdy \\ & \quad \quad - (a_{n+1}^d \mathcal{Z}_{a_{n+1}})^2 \sum_{i,j=1}^{m^\star} \int \int a_{n+1} |x - y| e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2} e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n+1}^2} \\ & \qquad \qquad \qquad \qquad \qquad \qquad \qquad \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} \mathds{1}_{y \in a_{n+1}^{-1} (P_j-x_i^\star)} dxdy \Big| \\ & \quad = a_{n+1}^{2d+1}\mathcal{Z}_{a_{n+1}} \sum_{i,j=1}^{m^\star} \int \int |x-y| e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2} \\ & \quad \quad \cdot \left|\mathcal{Z}_{a_n} e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n}^2} - \mathcal{Z}_{a_{n+1}} e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n+1}^2} \right| \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} \mathds{1}_{y \in a_{n+1}^{-1} (P_j-x_i^\star)} dxdy \\ & \quad \le a_{n+1}\left(a_{n+1}^{2d}\mathcal{Z}_{a_{n+1}}^2\right) \sum_{i,j=1}^{m^\star} \int \int |x-y| e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2} \\ & \quad \quad \quad \quad \cdot \left|e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n}^2} - e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n+1}^2} \right| \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} \mathds{1}_{y \in a_{n+1}^{-1} (P_j-x_i^\star)} dxdy \\ & \quad \quad + a_{n+1} \left(a_{n+1}^{2d}\mathcal{Z}_{a_{n+1}}^2\right) \sum_{i,j=1}^{m^\star} \int \int |x-y| e^{-2(V(a_{n+1}x+x_i^\star)-V^\star)/a_{n+1}^2} e^{-2(V(a_{n+1}y+x_i^\star)-V^\star)/a_{n}^2} \\ & \quad \quad \quad \quad \cdot \left|1 - \frac{\mathcal{Z}_{a_{n}}}{\mathcal{Z}_{a_{n+1}}}\right| \mathds{1}_{x \in a_{n+1}^{-1} (P_i-x_i^\star)} \mathds{1}_{y \in a_{n+1}^{-1} (P_j-x_i^\star)} dxdy. \end{align*} So using \eqref{eq:app:exp_x_2}, dominated convergence as for the proof of \eqref{eq:Z_a_equivalent}, \eqref{eq:Z_a_equivalent} itself with \eqref{eq:a_n_diff} and the bound for $1-\mathcal{Z}_{a_n}/\mathcal{Z}_{a_{n+1}} = 1-M_n^{-1}$ we have \begin{align*} & \limsup_{n \to \infty}\left[ n\log^{3/2}(n) \left| \mathbb{E}|X-Y| - \mathbb{E}|X - \tilde{X}| \right| \right] \\ & \quad \le C \sum_{i=1}^{m^\star} \int \int |x-y| e^{-x^\top \nabla^2 V(x_i^\star) x} e^{-y^\top \nabla^2 V(x_i^\star) y} \left(1+y^\top \nabla^2 V(x_i^\star) y\right) dx dy. \end{align*} So that using Lemma \ref{lemma:acceptance_rejection} and the fact that $\mathbb{E}|X-\tilde{X}|$ is of order $a_n$ we have \begin{align*} \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) & \le \mathbb{E}|X-Y| - \frac{1}{M_n}\mathbb{E}|X-\tilde{X}| \le \mathbb{E}|X-Y| - \mathbb{E}|X - \tilde{X}| + \frac{C}{n\log(n)}\mathbb{E}|X-\tilde{X}| \\ & \le \frac{C}{n\log^{3/2}(n)}. \end{align*} The proof for the second claim is similar. \end{proof} \subsection{Proof of Proposition \eqref{prop:3.5:Y:bar}} \begin{proof} As in the proof of \cite[Proposition 3.5]{pages2020}, we split $ | \mathbb{E}[g(\bar{Y}^x_{\gamma,u})] - \mathbb{E}\left[g(X^{x,n}_{\gamma})\right]|$ into four terms $A_1$, $A_2$, $A_3$ and $A_4$, that is, by the Taylor formula, for every $y$, $z \in \mathbb{R}^d$, $$ g(z)-g(y)=\langle \nabla g(y)|z-y \rangle +\int_0^1 (1-u)\nabla^2g\left(uz+(1-u)y \right)du (z-y)^{\otimes 2}. $$ For a given $x\in\mathbb{R}^d$, it follows that \begin{align*} g(z)-g(y) & = \langle \nabla g(x)|z-y \rangle + \langle \nabla g(y)-\nabla g(x)|z-y \rangle +\int_0^1 (1-u)\nabla^2g\left(uz+(1-u)y \right) (z-y)^{\otimes 2}du\\ &=\langle\nabla g(x)|z-y \rangle +\langle \nabla^2 g(x) (y-x) |z-y \rangle\\ &\quad + \int_0^1(1-u)\nabla^3 g(uy+(1-u)x)(y-x)^{\otimes 2} (z-y)du\\ &\quad + \int_0^1 (1-u)\nabla^2 g \left(uz+(1-u)y \right)du (z-y)^{\otimes 2}. \end{align*} Applying this expansion with $y=X^{x,n}_{\gamma}$ and $z=\bar{Y}^x_{\gamma,u}$, this yields: \begin{align*} \mathbb{E}[&g(\bar{Y}^x_{\gamma,u})-g(X^{x,n}_{\gamma})]=\underbrace{\langle \nabla g(x)|\mathbb{E}[\bar{Y}^x_{\gamma,u} -X^{x,n}_{\gamma}]\rangle}_{=:A_1} + \underbrace{\mathbb{E}\left[\langle \nabla^2 g(x) (X^{x,n}_{\gamma}-x) |\bar{Y}^x_{\gamma,u}-X^{x,n}_{\gamma} \rangle \right]}_{=:A_2}\\ &+\underbrace{\mathbb{E}\left[\int_0^1(1-u)\nabla^3 g(u X^{x,n}_{\gamma}+(1-u)x) (X^{x,n}_{\gamma}-x)^{\otimes 2} (\bar{Y}^x_{\gamma,u}-X^{x,n}_{\gamma})du\right]}_{=:A_3}\\ & +\underbrace{\int_0^1 (1-u) \mathbb{E} \left[\nabla^2g\left(u\bar{Y}^x_{\gamma,u}+(1-u)X^{x,n}_{\gamma} \right) (\bar{Y}^x_{\gamma,u}-X^{x,n}_{\gamma})^{\otimes 2}\right]du}_{=:A_4}. \end{align*} $\bullet$ \textbf{Term} $A_1$: The term $A_1$ is bounded by $|\nabla g(x)| \cdot |\mathbb{E}[\bar{Y}^x_{\gamma,u}-X^{x,n}_\gamma]|$, with \begin{align*} \mathbb{E}[\bar{Y}^x_{\gamma,u}-X^{x,n}_\gamma] & = \mathbb{E}\left[ \int_0^\gamma b_{a(u)}(x)-b_{a(u)}(X^{x,n}_s))ds \right] + \mathbb{E}\left[ \int_0^\gamma (b_{a(u)}(X^{x,n}_s) - b_{a_{n+1}}(X^{x,n}_s))ds \right] \\ & =: A_{11} + A_{12}. \end{align*} We have $|A_{12}| \le \gamma ||\Upsilon||_\infty (a_n^2 - a_{n+1}^2)$ and \begin{align*} |A_{11}| & = \left| \int_0^\gamma \int_0^s \mathbb{E}\left[\nabla b_{a(u)}(X^{x,n}_v)b_{a(u)}(X^{x,n}_v) + \frac{1}{2} \nabla^2 b_{a(u)}(X^{x,n}_v)a_{n+1}^2\sigma \sigma^\top (X^{x,n}_v) \right] dv \right| \\ & \le C\gamma^2 \sup_{v \in [0,\gamma]} \mathbb{E}[V^{1/2}(X^{x,n}_v)] \le C\gamma^2 V^{1/2}(x), \end{align*} where we used that $|\nabla b_a| \le C$ and $\|\nabla^2 b_a \| \le CV^{1/2}$ because we assumed $\|\nabla^3 V\| \le CV^{1/2}$ and $\|\nabla^3(\sigma \sigma^\top) \| \le CV^{1/2}$. $\bullet$ \textbf{Term} $A_2$: We have: $$ |A_2| \le \sum_{1\le i,j \le d}|\partial_{ij}g(x)| |\mathbb{E}[(X^{x,n}_\gamma-x)_i(X_\gamma^{x,n}-\bar{Y}^x_{\gamma,u})_j]| $$ and we have $$ \mathbb{E}[(X^{x,n}_\gamma-x)_i(X_\gamma^{x,n}-\bar{Y}^x_{\gamma,u})_j] = \mathbb{E}[(X^{x,n}_\gamma-\bar{Y}^x_{\gamma,u})_i(X_\gamma^{x,n}-\bar{Y}^x_{\gamma,u})_j] + \mathbb{E}[(\bar{Y}^x_{\gamma,u}-x)_i(X_\gamma^{x,n}-\bar{Y}^x_{\gamma,u})_j]. $$ Using Lemma \ref{lemma:3.4.b:Y:bar}, the first term of the right-hand side is bounded by $C(V^{1/2}(x)\gamma + \sqrt{\gamma}(a_n-a_{n+1}))^2$ and in the second term we write $(\bar{Y}^x_{\gamma,u}-x)_i = \left(\gamma b_{a(u)}(x) + \gamma \zeta_{k+1}(x) + a(u)\sigma(x)W_\gamma \right)_i$ and we have $$ |\mathbb{E}[(\gamma b_{a(u)}(x) + \gamma \zeta_{k+1}(x))_i(X_\gamma^{x,n}-\bar{Y}^x_{\gamma,u})_j]| \le \gamma V^{1/2}(x) (V^{1/2}(x)\gamma + \sqrt{\gamma}(a_n-a_{n+1})) $$ and using that the increments of $\zeta$ and $W$ are independent, $$ |\mathbb{E}[\left(a(u)\sigma(x)W_\gamma \right)_i (\gamma \zeta_{k+1}(x))_j]| = 0 $$ and using the It\=o isometry: \begin{align*} & \left| \mathbb{E}\left[ \left(a(u)\sigma(x)W_\gamma \right)_i \left( \int_0^\gamma(b_{a_{n+1}}(X^{x,n}_s) - b_{a_{n+1}}(x) + b_{a_{n+1}}(x) - b_{a(u)}(x))_j ds \right.\right.\right. \\ & \quad \left.\left.\left. + \int_0^\gamma ((a_{n+1}\sigma(X^{x,n}_s) - a_{n+1}\sigma(x) + a_{n+1}\sigma(x) - a(u)\sigma(x))dW_s)_j \right) \right] \right| \\ & \le C[b]_{\text{Lip}} \int_0^\gamma ||W_\gamma||_2 ||X^{x,n}_s -x||_2 ds + C(a_n^2-a_{n+1}^2)||W_\gamma||_1 \gamma ||\Upsilon||_\infty \\ & \quad + C\left|\sum_{k=1}^d \int_0^\gamma \mathbb{E}[\sigma_{ik}(x)(\sigma_{jk}(X^{x,n}_s)-\sigma_{jk}(x)] ds \right| + C(a_n - a_{n+1})\mathbb{E}[W_\gamma^2] \\ & \le CV^{1/2}(x)\gamma^2 + C(a_n-a_{n+1})\gamma^{3/2} + CV^{1/2}(x)\gamma^2 + C(a_n-a_{n+1})\gamma, \end{align*} where we used an argument similar to $A_{11}$ to bound the third term, using that $\nabla \sigma$ and $\nabla^2 \sigma$ are bounded. $\bullet$ \textbf{Term} $A_3$: Using the three fold Cauchy-Schwarz inequality, Lemma \ref{lemma:3.4.a} and Lemma \ref{lemma:3.4.b:Y:bar}, $A_3$ is bounded by $$ C \left|\left| \sup_{\xi \in (x, X^{x,n}_\gamma)} ||\nabla^3 g(\xi)|| \right|\right|_4 V(x)\gamma \left(V^{1/2}(x)\gamma + \sqrt{\gamma}(a_n-a_{n+1})\right) .$$ $\bullet$ \textbf{Term} $A_4$: Using Lemma \ref{lemma:3.4.b:Y:bar}, $A_4$ is bounded by $$ C \left(V^{1/2}(x) \gamma + \sqrt{\gamma} (a_n - a_{n+1})\right)^2 \left|\left|\sup_{\xi \in (X_\gamma^{x,n}, \bar{Y}_{\gamma,u}^x)} || \nabla^2 g(\xi) || \right|\right|_2 .$$ \end{proof} \subsection{Proof of Theorem \ref{thm:non_definite}} \begin{proof} We remark that according to \cite[Proposition 3]{bras2021}, for all $\kappa > 0$ we have $e^{-\kappa g} \in L^1(\mathbb{R}^d)$. We first prove that \begin{equation} \label{eq:W_nu_degen} \mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}) \le \frac{C}{n \log^{1+\alpha_{\min}}(n)} , \end{equation} so that $(\mathcal{W}_1(\nu_{a_n},\nu_{a_{n+1}}))$ is still a converging Bertrand series. To do so, we directly adapt the proof of Proposition \ref{prop:W_nu}, replacing the change of variables in the integrals in $ax$ by the change of variables in $B \cdot (a^{2\alpha_1}x_1, \ldots, a^{2\alpha_d}x_d)$. Still using \eqref{eq:app:exp_x_2}, we successively obtain \begin{align*} & \mathcal{Z}_a^{-1} \underset{a \to 0}{\sim} a^{2\alpha_1+\cdots+2\alpha_d} \int_{\mathbb{R}^d} e^{-2g(x)}dx \\ & \mathcal{Z}_{a_n}^{-1} - \mathcal{Z}_{a_{n+1}}^{-1} \le 4 a_{n+1}^{2\alpha_1 + \cdots + 2\alpha_d - 1}(a_n-a_{n+1}) \int_{\mathbb{R}^d} e^{-2g(x)}g(x) dx \\ & 1 - M_n^{-1} \le \frac{C}{n \log(n)} \\ & \left| \mathbb{E}|X-Y| - \mathbb{E}|X - \tilde{X}| \right| \le \frac{C a_{n+1}^{2\alpha_{\min}}}{n \log(n)}. \end{align*} Then, using \eqref{eq:W_nu_degen} we prove that $\mathcal{W}_1(\nu_n, \nu^\star) \le Ca_n^{2\alpha_{\min}}$ the same way as in Lemma \ref{lemma:W_nu_a_nu_star}. The next parts of the proof are the same as for the definite positive case. \end{proof} \subsection{Proof of Theorem \ref{thm:conv_X}} To prove Theorem \ref{thm:main:3}, we proceed as for the proof of Theorem \ref{thm:main}. In the following, for $\gamma>0$ we denote by $(\bar{X}^{x,n,\gamma}_t)_{t \in [0,\gamma]}$ the Euler-Maruyama scheme over one step with coefficient $a_{n+1}$. We first recall \cite[Lemma 3.4(b), Proposition 3.5(a)]{pages2020} giving bounds for the weak and strong errors for the one-step Euler-Maruyama scheme, which do not depend on the ellipticity parameter $a_n$. \begin{lemma} \label{lemma:3.4.b:X:bar} Let $p \ge 1$ and let $\bar{\gamma}>0$. There exists $C>0$ such that for every $n \ge 0$, for every $\gamma \in (0,\bar{\gamma}]$ and every $t \in [0,\gamma]$: $$ ||X_t^{x,n} - \bar{X}_t^{x,n,\gamma}||_p \le CV^{1/2}(x)t .$$ \end{lemma} \begin{proposition} \label{prop:3.5.a:X:bar} Let $\bar{\gamma}>0$. Then for every $g : \mathbb{R}^d \to \mathbb{R}$ being $\mathcal{C}^3$ and for every $0 \le \gamma \le \gamma' \le \bar{\gamma}$: $$ \left| \mathbb{E}\left[g(\bar{X}^{x,n,\gamma'}_\gamma)\right] - \mathbb{E}\left[g(X^{x,n}_\gamma)\right] \right| \le C V^{3/2}(x) \gamma^2 \Phi_g(x) ,$$ where $$ \Phi_g(x) = \max \left( |\nabla g(x)|, || \nabla^2 g(x) ||, \left|\left| \sup_{\xi \in (X^{x,n}_\gamma, \bar{X}^{x,n,\gamma'}_\gamma)} ||\nabla^2 g(\xi)|| \right|\right|_2, \left|\left| \sup_{\xi \in (x, X^{x,n}_\gamma)} ||\nabla^3 g(\xi)|| \right|\right|_4 \right) .$$ \end{proposition} \begin{proposition} \label{prop:3.6:X:bar} Let $T$, $\bar{\gamma}>0$. Then for every Lipschitz continuous function $f : \mathbb{R}^d \to \mathbb{R}$, for every $n \ge 0$ and every $t \in (0,T]$ and every $0 \le \gamma \le \gamma' \le \bar{\gamma}$: $$ \left| \mathbb{E}\left[P_t f(\bar{X}_\gamma^{x,n,\gamma'})\right] - \mathbb{E}\left[P_t f(X_\gamma^{x,n})\right] \right| \le C a_n^{-3} [f]_{\textup{Lip}} \gamma^2 t^{-1} V^2(x). $$ \end{proposition} \begin{proof} The proof is the same as in \cite[Proposition 3.6]{pages2020}. When applying \cite[Proposition 3.2(b)]{pages2020}, we remark that the lowest exponent of $\ubar{\sigma}_0$ is $-3$. \end{proof} Moreover, by the same proof as in Lemma \ref{lemma:D.1a} we get $$ \textstyle \sup_{m \ge k+1} \mathbb{E} V^p(\bar{X}_{\Gamma_m-\Gamma_k}^{x,n}) \le CV^p(x). $$ We now prove Theorem \ref{thm:main:3}. \begin{proof} Let us write: $$ \mathcal{W}_1([\bar{X}_{T_n}^{x_0}],\nu^\star) \le \mathcal{W}_1([\bar{X}_{T_n}^{x_0}], [X_{T_n}^{x_0}]) + \mathcal{W}_1([X_{T_n}^{x_0}],\nu^\star). $$ Temporarily setting $\bar{x}_n := \bar{X}_{T_n}^{x_0}$ and $x_n := X_{T_n}^{x_0}$, we have \begin{align*} \mathcal{W}_1([\bar{X}_{T_{n+1}}^{x_0}], [X_{T_{n+1}}^{x_0}]) & = \mathcal{W}_1([\bar{X}_{T_{n+1}-T_n}^{\bar{x}_{n},n}], [X_{T_{n+1}-T_{n}}^{x_{n},n}]) \\ & \le \mathcal{W}_1([\bar{X}_{T_{n+1}-T_n}^{\bar{x}_{n},n}], [X_{T_{n+1}-T_{n}}^{\bar{x}_{n},n}]) + \mathcal{W}_1([X_{T_{n+1}-T_{n}}^{\bar{x}_{n},n}], [{X}_{T_{n+1}-T_{n}}^{x_{n},n}]), \end{align*} and we find a bound on the first term using the same proof as in \cite[Section 4.2]{pages2020}. For $x \in \mathbb{R}^d$, we split $| \mathbb{E} f(\bar{X}_{T_{n+1}-T_n}^{x,n}) - \mathbb{E} f(X_{T_{n+1}-T_n}^{x,n}) |$ into three terms $(a)$, $(b)$ and $(c)$. We however pay attention to the dependence in $a_n$ when applying Lemma \ref{lemma:3.4.b:X:bar}, Proposition \ref{prop:3.6:X:bar} and Theorem \ref{thm:confluence}. We then have: \begin{align*} & (c) \le C [f]_{\text{Lip}} \gamma_{N(T_n)} V^{1/2}(x), \\ & (b) \le Ca_{n+1}^{-3} \gamma_{N(T_n)} \log\left(\frac{T + ||\gamma||_\infty}{\gamma_{N(T_n)}}\right), \\ & (a) \le Ca_{n+1}^{-3} e^{C_1 a_{n+1}^{-2}} V(x) \gamma_{N(T_n)} \rho_{n+1}^{-1}. \end{align*} Then we establish a recursive relation and prove the convergence as in the proof of Theorem \ref{thm:main}. \end{proof} \end{document}
\begin{document} \begin{center} {\large \bf The combinatorics of open covers (II)} \end{center} \begin{center} Winfried Just\footnote{partially supported by NSF grant DMS-9312363}, Arnold W. Miller, Marion Scheepers\footnote{partially supported by NSF grant DMS-95-05375},\\ and Paul J. Szeptycki \end{center} \begin{abstract} We continue to investigate various diagonalization properties for sequences of open covers of separable metrizable spaces introduced in Part I. These properties generalize classical ones of Rothberger, Menger, Hurewicz, and Gerlits-Nagy. In particular, we show that most of the properties introduced in Part I are indeed distinct. We characterize two of the new properties by showing that they are equivalent to saying all finite powers have one of the classical properties above (Rothberger property in one case and in the Menger property in other). We consider for each property the smallest cardinality of metric space which fails to have that property. In each case this cardinal turns out to equal another well-known cardinal less than the continuum. We also disprove (in ZFC) a conjecture of Hurewicz which is analogous to the Borel conjecture. Finally, we answer several questions from Part I concerning partition properties of covers. \footnote{ AMS Classification: 03E05 04A20 54D20 $\;$ Key words: Rothberger property $C^{\prime\prime}$, Gerlits-Nagy property $\gamma$-sets, Hurewicz property, Menger property, $\gamma$--cover, $\omega$--cover, Sierpi\'nski set, Lusin set. } \end{abstract} \begin{center}{\bf Introduction} \end{center} Many topological properties of spaces have been defined or characterized in terms of the properties of open coverings of these spaces. Popular among such properties are the properties introduced by Gerlits and Nagy \cite{G-N}, Hurewicz \cite{Hu}, Menger \cite{Me} and Rothberger \cite{Ro}. These are all defined in terms of the possibility of extracting from a given sequence of open covers of some sort, an open cover of some (possibly different) sort. In Scheepers \cite{S} it was shown that when one systematically studies the definitions involved and inquires whether other natural variations of the defining procedures produce any new classes of sets which have mathematically interesting properties, an aesthetically pleasing picture emerges. In \cite{S} the basic implications were established. It was left open whether these were the only implications. Let $X$ be a topological space. By a ``cover'' for $X$ we always mean ``countable open cover''. Since we are primarily interested in separable metrizable (and hence Lindel\"of) spaces, the restriction to countable covers does not lead to a loss of generality. A cover ${\cal U}$ of $X$ is said to be \begin{enumerate} \item{{\em large $\lm$} if for each $x$ in $X$ the set $\{U\in {\cal U}:x\in U\}$ is infinite;} \item{{\em an $\omega$--cover $\om$} if $X$ is not in ${\cal U}$ and for each finite subset $F$ of $X$, there is a set $U\in{\cal U}$ such that $F\subset U$;} \item{{\em a $\gamma$--cover $\ga$} if it is infinite and for each $x$ in $X$ the set $\{U\in {\cal U}:x\not\in U\}$ is finite.} \end{enumerate} We shall use the symbols $\op$, $\lm$, $\om$ and $\ga$ to denote the collections of all open, large, $\omega$ and $\gamma$--covers respectively, of $X$. Let ${\cal A}$ and ${\cal B}$ each be one of these four classes. We consider the following three ``procedures'', $\sone$, $\sfin$ and $\ufin$, for obtaining covers in ${\cal B}$ from covers in ${\cal A}$: \begin{enumerate} \item{$\sone({\cal A},{\cal B})$: For a sequence $({\cal U}_n:n=1,2,3,\dots)$ of elements of ${\cal A}$, select for each $n$ a set $U_n\in{\cal U}_n$ such that $\{U_n:n=1,2,3,\dots\}$ is a member of ${\cal B}$;} \item{$\sfin({\cal A},{\cal B})$: For a sequence $({\cal U}_n:n=1,2,3,\dots)$ of elements of ${\cal A}$, select for each $n$ a finite set ${\cal V}_n\subset{\cal U}_n$ such that $\cup_{n=1}^{\infty}{\cal V}_n$ is an element of ${\cal B}$;} \item{$\ufin({\cal A},{\cal B})$: For a sequence $({\cal U}_n:n=1,2,3,\dots)$ of elements of ${\cal A}$, select for each $n$ a finite set ${\cal V}_n\subset{\cal U}_n$ such that $\{\cup{\cal V}_n:n=1,2,3,\dots\}$ is a member of ${\cal B}$ or\footnote{This is similar to the $*$ convention of \cite{S}.} there exists an $n$ such that $\cup{\cal V}_n=X$.} \end{enumerate} For ${\sf G}$ one of these three procedures, let us say that a space has property ${\sf G}({\cal A},{\cal B})$ if for every sequence of elements of ${\cal A}$, one can obtain an element of ${\cal B}$ by means of procedure ${\sf G}$. Letting ${\cal A}$ and ${\cal B}$ range over the set $\{\op, \lm, \om, \ga \}$, we see that for each ${\sf G}$ there are potentially sixteen classes of spaces of the form ${\sf G}({\cal A},{\cal B})$. Each of our properties is monotone decreasing in the first coordinate and increasing in the second, hence we get the following diagram (see figure \ref{sfinfig}) for ${\sf G}=\sfin$. \begin{figure} \caption{Basic diagram for $\sfin$ \label{sfinfig} \label{sfinfig} \end{figure} It also is easily checked that $\sfin(\lm,\om)$ and $\sfin(\op,\lm)$ are impossible for nontrivial $X$. Hence the five classes in the lower left corner are eliminated. The same follows for the stronger property $\sone$. In the case of $\ufin$ note that $\ufin(\op,\cdot)$ is equivalent to $\ufin(\ga,\cdot)$ because given an open cover $\{U_n: n\in\omega\}$ we may replace it by the $\gamma$-cover, $\{\cup_{i<n} U_i: n\in\omega\}$. This means the diagram of $\ufin$ reduces to any of its rows. Now clearly $\sone$ implies $\sfin$. Also it is clear that $\sfin(\ga,{\cal A})\rightarrow\ufin(\ga,{\cal A})$ for ${\cal A}=\ga,\om,\op$. The implication $$\sfin(\ga,\lm)\rightarrow\ufin(\ga,\lm)$$ is also true, but takes a little thought since when we take finite unions we might not get distinct sets. To prove it, assume ${\cal U}_n$ are $\gamma$--covers of $X$ with no finite subcover. Applying $\sfin$ we get a sequence of finite ${\cal V}_n\subseteq {\cal U}_n$ such that for any $x$ there exists infinitely many $n$ such that $x\in \cup {\cal V}_n$. But since the ${\cal U}_m$'s have no finite subcover we can inductively choose finite ${\cal W}_n$ with $${\cal V}_n\subseteq {\cal W}_n\subseteq {\cal U}_n$$ and $\cup{\cal W}_n\not=\cup{\cal W}_m$ for any $m<n$. Hence $\bigcup\{{\cal W}_n:n=1,2,3,\ldots\}$ is a large cover of $X$. In the three dimensional diagram of figure~\ref{3dim} the double lines indicate that the two properties are equivalent. The proof of these equivalences can be found in either Scheepers \cite{S} or section~\ref{equiv} of this paper. After removing duplications we obtain figure \ref{cshl}. \begin{figure} \caption{ Full 3d diagram \label{3dim} \label{3dim} \end{figure} For this diagram, we have provided four examples $\{C,S,H,L\}$ which show that practically no other implications can hold. $C$ is the Cantor set ($2^\omega$), $S$ is a special Sierpi\'nski set such the $S+S$ can be mapped continuously onto the irrationals, $L$ is a special Lusin set such that $L+L$ can be mapped continuously onto the irrationals, and $H$ is a generic Lusin set. Thus the only remaining problems are: \begin{problem} Is $\ufin(\ga,\om)=\sfin(\ga,\om)$? \end{problem} \begin{problem} And if not, does $\ufin(\ga,\ga)$ imply $\sfin(\ga,\om)$? \end{problem} All of our examples are subsets of the real line, but only one of them (the Cantor set) is a ZFC example. Thus, the following problem arises: \begin{problem} Are there ZFC examples of (Lindel\"of) topological spaces which show that none of the arrows in figure \ref{cshl} can even be consistently reversed? \end{problem} The paper is organized as follows: In section \ref{equiv} we prove the equivalences of our properties indicated in figure \ref{3dim}. We prove that $\sone(\ga,\ga)=\sfin(\ga,\ga)$ and $\sfin(\lm,\lm)=\sfin(\ga,\lm)$. The other equivalences are either trivial or were proved in Scheepers \cite{S}. In section~\ref{examp} we present the four examples C,S,H,L indicated in figure \ref{cshl}. In section~\ref{pres} we study the preservation of these families under the taking of finite powers and other operations. In section~\ref{card} we study for each of these eleven families the cardinal $$\non({\cal X})=\{\min |X|: X\notin {\cal X}\}.$$ We show that each is equal to either $\goth b$, $\goth d$, $\goth p$, or the covering number of the meager ideal $\covmeag$. We also show that ${\goth r}=\non(\split(\lm,\lm))$ and ${\goth u}=\non(\split(\om,\om))$. ($\split({\cal A},{\cal B})$ holds iff every infinite cover from ${\cal A}$ can be split into two covers from ${\cal B}$). In section \ref{hur} we give a ZFC counterexample to a conjecture of Hurewicz by showing that there exists an uncountable set of reals in $\ufin(\ga,\ga)$ which is not $\sigma$-compact. We also show the any $\ufin(\ga,\ga)$ set which does not contain a perfect set is perfectly meager. In section \ref{ramsey} we consider other properties from Scheepers \cite{S} and settle some questions about Ramsey-like properties of covers that were left open in \cite{S}. We show that $\sone(\om,\om)$ implies $Q(\om,\om)$ and hence $$\sone(\om,\om) = {\sf P}(\om,\om)+{\sf Q}(\om,\om).$$ We also show that $\om\to \lceil\om\rceil^2_2$ is equivalent to $\sfin(\om,\om)$. \begin{figure} \caption{After removing equivalent classes \label{cshl} \label{cshl} \end{figure} \section{Equivalences}\label{equiv} In this section we show $\sone(\ga,\ga) = \sfin(\ga,\ga)$ and $\sfin(\ga,\lm)=\sfin(\lm,\lm)$. The equivalence $\sone(\om,\ga)$ with the $\gamma$-set property (every $\omega$-cover contains a $\gamma$-cover) was shown by Gerlits and Nagy \cite{G-N}. But it is easy to see that $\sfin(\om,\ga)$ implies the $\gamma$-set property and hence $\sone(\om,\ga)=\sfin(\om,\ga)$. In Scheepers \cite{S} (Cor 6) it was shown that $\sone(\ga,\lm)=\sone(\ga,\op)$. All of the other equivalences are either to the Rothberger property $C^{\prime\prime}$ or the Menger property. For the Menger property, in Scheepers \cite{S} (Cor 5) it was shown that $\sfin(\ga,\lm)=\ufin(\ga,\op)$. $\sfin(\ga,\lm)=\sfin(\lm,\lm)$ by Theorem \ref{mengerst1} and note also that $\sfin(\op,\op)$ easily follows from $\ufin(\ga,\op)$ and hence all nine classes (see figure \ref{3dim}) are equivalent to the Menger property. In \cite{S} (Thm 17) it was shown that all five classes (see figure \ref{3dim}) are equivalent to the Rothberger property ($C^{\prime\prime}$). \begin{theorem}\label{sone_eq_sfin} $\sone(\ga,\ga) = \sfin(\ga,\ga)$. \end{theorem} \par\noindent{\bf Proof:}\par Note that the class $\sone(\ga,\ga)$ is contained in the class $\sfin(\ga,\ga)$. The difficulty with showing that these two classes are in fact equal is as follows: when we are allowed to choose finitely many elements per $\gamma$--cover, we are allowed to also pick no elements; for $\sone(\ga,\ga)$ we are required to choose an element per $\gamma$--cover. Let $X$ be a space which has property $\sfin(\ga,\ga)$, and for each $n$ let ${\cal U}_n$ be a $\gamma$--cover of $X$, enumerated bijectively as $(U^n_1,U^n_2,U^n_3,...)$. For each $n$ define ${\cal V}_n$ to be $\{V^n_1,V^n_2,V^n_3,...\}$, where \[V^n_k= U^1_k\cap U^2_k\cap U^3_k\cap \dots\cap U^n_k. \] For each $n$ , ${\cal V}_n$ is a $\gamma$--cover: For fix $n$. For each $x$, and for each $i\in\{1,\dots,n\}$ there exists an $N_i$ such that $x$ is in $U^i_m$ for all $m>N_i$. But then $x$ is in $V^n_m$ for all $m> \max\{N_i:i=1,\dots,n\}$. Now apply $\sfin(\ga,\ga)$ to $({\cal V}_n : n=1,2,\dots)$. We get a sequence $$({\cal W}_n:n\in\omega)$$ such that ${\cal W}_n$ is a finite subset of ${\cal V}_n$ for each $n$, and such that $\cup_{n=1}^{\infty}{\cal W}_n$ is a $\gamma$--cover of $X$. Choose a an increasing sequence $n_1<n_2<\dots<n_k<\dots$ such that for each $j$, ${\cal W}_{n_j}\setminus\cup_{i<j}{\cal W}_{n_i}$ is nonempty. This is possible because each ${\cal W}_n$ is finite, while the union of these sets, being a $\gamma$--cover of $X$, is infinite. For each $j$, choose $m_j$ such that $V^{n_j}_{m_j}$ is an element of ${\cal W}_{n_j}\setminus\cup_{i<j}{\cal W}_{n_i}$. Then $\{V^{n_k}_{m_k}: k=1,2,\dots\}$ is a $\gamma$--cover of $X$. For each $n$ in $(n_k,n_{k+1}]$ we define $p_n=m_{k+1}$. Then $\{U^n_{p_n}: n=1,2,\dots\}$ is a $\gamma$ cover of $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{mengerst1} $\sfin(\ga,\lm)=\sfin(\lm,\lm)$. \end{theorem} \par\noindent{\bf Proof:}\par Since $\ga\subseteq\lm$, it is clear that $\sfin(\lm,\lm)$ implies $\sfin(\ga,\lm)$. We prove the other implication. Assume $\sfin(\ga,\lm)$ and let $({\cal U}_n:n\in\omega)$ be a sequence of large covers of $X$. Without loss of generality we may assume that for every finite $F\subseteq \bigcup_{n\in\omega}{\cal U}_n$ that ${\cal U}_k\cap F=\emptyset$ for all but finitely many $k$. (This can be accomplished by throwing out finitely many elements from each ${\cal U}_n$.) For each $n$ enumerate ${\cal U}_n$ bijectively as $(U^n_k:k\in\omega)$, and define $$V^n_m =\bigcup\{U^n_i: i<m\}.$$ Since each ${\cal V}_n=(V^n_m:m\in\omega)$ is a nondecreasing open cover of $X$, either there exists $m_n$ such that $V_{m_n}^n=X$ or ${\cal V}_n$ is a $\gamma$-cover. So there must be an infinite $A$ for which one or the other always occurs. Suppose ${\cal V}_n$ is a $\gamma$-cover for every $n\in A$ . Apply $\sfin(\ga,\lm)$ to obtain ${\cal W}_n$ a finite subset of ${\cal V}_n$ such that $\bigcup\{{\cal W}_n:n\in A\}$ is a large cover of $X$. Let ${\cal P}_n$ be a finite subset of ${\cal U}_n$ such that every element of ${\cal W}_n$ is a union of elements of ${\cal P}_n$. Since ${\cal P}_n$ is disjoint from all but finitely many of the ${\cal U}_k$, it follows that $\bigcup\{{\cal P}_n: n \in A\}$ is a large cover of $X$. In the case that $V_{m_n}^n=X$ for every $n\in A$ just take ${\cal P}_n=\{U^n_i: i<m_n\}$ and the same argument works. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \section{Examples}\label{examp} \subsec {The Cantor set C} It is easy to check that every $\sigma$--compact space (the union of countably many compact sets) belongs to $\ufin(\ga,\ga)$ and $\sfin(\om,\om)$. We also show that the Cantor set, $2^\omega$, is not in the class $\sone(\ga,\lm)$. For the sake of conciseness, let us introduce the following notion. An open cover ${\cal U}$ of a topological space $X$ is a {\em $k$--cover} iff there is for every $k$--element subset of $X$ an element of ${\cal U}$ which covers that set. \begin{lemma}\label{cptomeg} Let $k$ be a positive integer. Every $\omega$--cover of a compact space contains a finite subset which is a $k$--cover for the space. \end{lemma} \par\noindent{\bf Proof:}\par Let ${\cal U}$ be an $\omega$--cover of the compact space $X$ and let $k$ be a positive integer. Then the set ${\cal V} = \{U^k:U\in{\cal U}\}$ of $k$--th powers of elements of ${\cal U}$ is a collection of open subsets of $X^k$, and it is a cover of $X^k$ since ${\cal U}$ is an $\omega$--cover of $X$. Since $X$ is compact, so is $X^k$. Thus there is a finite subset of ${\cal V}$ which covers $X^k$, say $\{U^k_1,\dots,U^k_n\}$. But then $\{U_1,\dots,U_n\}$ is a $k$--cover of$X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem} Every $\sigma$--compact topological space is a member of both the class ${\sfin}(\om,\om)$ and $\ufin(\ga,\ga)$. \end{theorem} \par\noindent{\bf Proof:}\par Let $X$ be a $\sigma$--compact space, and write $X=\cup_{n\in\omega}K_n$ where $$K_0\subseteq K_1\subseteq \dots \subseteq K_n\subseteq$$ is a sequence of compact subsets of $X$. Let $({\cal U}_n:n\in\omega)$ be a sequence of $\omega$--covers of $X$. For each $n$ apply Lemma \ref{cptomeg} to the $\omega$--cover ${\cal U}_n$ of the space $K_n$, to find a finite subset ${\cal V}_n$ of ${\cal U}_n$ which is an $n$--cover of $K_n$. Then $\cup_{n\in\omega}{\cal V}_n$ is an $\omega$--cover of $X$. This shows that $X$ has property $\sfin(\om,\om)$. Now suppose that $({\cal U}_n:n\in\omega)$ is a sequence of $\gamma$-covers of $X$. Since any infinite subset of a $\gamma$-cover is a $\gamma$-cover we may assume that our covers are disjoint. Since each $K_n$ is compact we may choose ${\cal V}_n\in [{\cal U}_n]^{<\omega}$ so that $K_n\subseteq \cup{\cal V}_n$. Either there exists $n$ such that $\cup{\cal V}_n=X$ or $\{\cup{\cal V}_n:n\in\omega\}$ is infinite and hence a $\gamma$-cover of $X$. It follows that $X$ has property $\ufin(\ga,\ga)$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem} The Cantor set, $C=2^\omega$, is not in the class $\sone(\ga,\lm)$. \end{theorem} \par\noindent{\bf Proof:}\par There exists an $\omega\times\omega$--matrix $(A^m_n:m,n<\omega)$ of closed subsets of the Cantor set such that \begin{enumerate} \item for each fixed $m\in\omega$ the sets $A^m_n$ for $n<\omega$ are pairwise disjoint and \label{disc1} \item whenever $m_1<m_2<\dots<m_k$ and $n_1,n_2,\dots,n_k$ are \label{disc2} given, then $A^{m_1}_{n_1}\cap\dots\cap A^{m_k}_{n_k}\neq\emptyset$. \end{enumerate} To see that such a matrix exists think of the Cantor set as the homeomorphic space $2^{\omega\times\omega}$ instead. Let $\la x_n,\ n<\omega\ra$ be a sequence of pairwise distinct elements of $2^{\omega}$. Also, for each $m$, let \[\pi_m:2^{\omega\times\omega}\rightarrow \mbox{$2^{\omega}$} \] be defined so that for each $y$ in $2^{\omega\times\omega}$ and for each $m$, $\pi_m(y)(n) = y(m,n)$. Then $\pi_m$ is continuous. We now define our matrix. For each $m$ and $n$ we define \[A^m_n = \{y\in 2^{\omega\times\omega}:\pi_m(y) = x_n\} \] Each row of the matrix is pairwise disjoint since the $x_n$'s are pairwise distinct. Each entry of the matrix is a closed set since each $\pi_m$ is continuous. We must still verify property 2. Thus, let $(m_1,n_1),\dots,(m_k,n_k)$ be given such that $m_1<\dots<m_k$. Then the element $y$ of $2^{\omega\times\omega}$ which is defined so that for each~$i$ \[\mbox{for each $j$, }y(m_i,j)=x_{n_i}(j) \] is a member of the set $A^{m_1}_{n_1}\cap\dots\cap A^{m_k}_{n_k}$, whence this intersection is nonempty. For each $m$ put ${\cal U}_m = \{2^\omega\setminus A^m_n:n<\omega\}$. Then by property \ref{disc1} we see that each ${\cal U}_m$ is a $\gamma$--cover of $2^\omega$. For each $m$ choose a $U^m_{n_m}$ from ${\cal U}_m$. Then $U^m_{n_m} = 2^\omega\setminus A^m_{n_m}$. By the property \ref{disc2} and the fact that all the $A^m_n$'s are compact, we see that the intersection $\cap_{m<\omega}A^m_{n_m}$ is nonempty. But then not only is $\{U^m_{n_m}:m<\omega\}$ not a large cover of $2^{\omega\times\omega}$, it is not even a cover of $2^{\omega\times\omega}$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par It follows from these two theorems that the Cantor set C must lie exactly in those classes indicated in figure \ref{cshl} in our introduction. \begin{theorem} No uncountable $F_{\sigma}$ set of reals is in $\sone(\ga,\ga)$. \label{thm17} \end{theorem} \par\noindent{\bf Proof:}\par Such a set contains an uncountable compact perfect set. The Cantor set is a continuous image of such perfect sets. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \subsec{The special Lusin set L} Recall that a set $L$ of real numbers is said to be a {\em Lusin} set iff it is uncountable but its intersection with every first category set of real numbers is countable. Sierpi\'nski \cite{sier} showed that assuming CH there exists a Lusin set $L$ such that $L+L$ is the irrationals (see also Miller \cite{survey} Thm 8.5). We will construct similarly a Lusin set $L\subseteq \zz$ with the property that $L+L=\zz$. Here $\zz$ is the infinite product of the ring of integers and addition is the usual pointwise addition, i.e, $(x+y)(n)=x(n)+y(n)$. Our construction is based on the following simple fact: \begin{lemma}\label{comeagersums} If $X$ is a comeager subset of $\zz$, then for every $x\in\zz$ there are elements $a$ and $b$ of $X$ such that $a+b=x$. \end{lemma} \par\noindent{\bf Proof:}\par Since multiplication by $-1$ and translation by $x$ are homeomorphisms, the set $$x-X = \{x-y:y\in X\}$$ is also comeager. But then $X\cap (x-X)$ is non--empty. Let $z$ be an element of this intersection. Then $z=a$ for some $a$ in $X$, and $z=x-b$ for some $b$ in $X$. The Lemma follows. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{lemma} [CH]\label{lusinset} There is a Lusin set $L\subseteq\zz$ such that $L+L=\zz$. \end{lemma} \par\noindent{\bf Proof:}\par Let $(M_{\alpha}:\alpha<\omega_1)$ bijectively list all first category $F_{\sigma}$--subsets of $\zz$. Let $(r_{\alpha}:\alpha<\omega_1)$ bijectively list $\zz$. Using Lemma \ref{comeagersums}, choose elements $x_{\alpha}, y_{\alpha}$ from $\zz$ subject to the following rules: \begin{enumerate} \item For each $\alpha$, $r_{\alpha} = x_{\alpha} + y_{\alpha}$, and \item $x_{\alpha}$ and $y_{\alpha}$ are not elements of $\cup_{\beta\leq \alpha}M_{\beta}\cup\{x_{\beta},y_\beta:\beta<\alpha\}$ \end{enumerate} Letting $L$ be the set $\{x_{\alpha}:\alpha<\omega_1\}\cup\{y_{\alpha}:\alpha<\omega_1\}$ completes the proof. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par For a proof of the following result see Rothberger \cite{Ro}. \begin{theorem} (Rothberger) Every Lusin set has property $\sone(\op,\op)=C^{\prime\prime}$. \end{theorem} \begin{theorem} \label{specluz} If $L$ is our special Lusin set (i.e., $L+L=\zz$), then $L$ does not satisfy $\ufin(\ga,\om)$. \end{theorem} \par\noindent{\bf Proof:}\par Let $\{{\cal U}_n: n\in\omega\}$ be the sequence of open covers defined by $${\cal U}_n=\{U_{n,k}:{k\in \omega}\}$$ where $$U_{n,k}=\{f\in \zz : |f(n)|\leq k\}.$$ Then each ${\cal U}_n $ is a $\gamma$-cover of ${L}$. Let $\{{\cal V}_n: n\in\omega\}$ be a sequence such that ${\cal V}_n\in [{\cal U}_n]^{<\omega}$, and let $h\in \oo$ be such that $$h(n)>2\cdot\max\{ k:U_{n,k}\in {\cal V}_n\}$$ for all $n\in \omega$. Let $f,g\in { L}$ be such that $h=f+g$. Then $$\max\{|f(n)|,|g(n)|\}\geq {1\over 2} h(n)$$ for all $n\in \omega$, and hence $\{f,g\}\not\subseteq \cup{\cal V}_n$ for any $n\in \omega$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \subsec{The special Sierpi\'nski set S} A Sierpi\'nski set is an uncountable subset of the real line which has countable intersection with every set of Lebesgue measure zero. In Theorem 7 of Fremlin and Miller \cite{F-M} it was shown that every Sierpi\'nski set belongs to the class $\ufin(\ga,\ga)$. It is well known that every Borel image of a Sierpi\'nski set into the Baire space $^{\omega}\omega$ is bounded. Sets with the property that every Borel image in the Baire space is bounded were called $A_2$--sets in Bartoszynski and Scheepers \cite{B-S}. \begin{theorem} Every $A_2$--set (hence every Sierpi\'nski set) belongs to $\sone(\ga,\ga)$. \end{theorem} \par\noindent{\bf Proof:}\par Let $X$ be an $A_2$--set, and let $({\cal U}_n:n\in\omega)$ be a sequence of $\gamma$--covers of it. Enumerate each ${\cal U}_n$ bijectively as $(U^n_m:m\in\omega)$. Define a function $\Psi$ from $X$ to $\oo$ so that for each $x\in X$ and for each $n$, $$\Psi(x)(n) = \min\{m:(\forall k\geq m)(x\in U^n_k)\}.$$ Then $\Psi$ is a Borel function. Choose a strictly increasing function $g$ from $\oo$ which eventually dominates each element of $\Psi[X]$. Then the sequence $(U^n_{g(n)}:n\in\omega)$ is a $\gamma$--cover of $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par Clearly no Sierpi\'nski set is of measure zero and since every $\sone(\op,\op)$ set is of measure zero, $X$ fails to be $\sone(\op,\op)$. Therefore we have established the following theorem: \begin{theorem}\label{sierp} If $X$ is a Sierpi\'nski set of reals, then $X$ is $\sone(\ga,\ga)$ but not $\sone(\op,\op)$. \end{theorem} We call a Sierpi\'nski set $S$ special iff $S+S$ is the set of irrationals. (Here we are using ordinary addition in the reals.) Using an argument similar to Lemma \ref{lusinset} one can show that assuming CH there exists a special Sierpi\'nski set. \begin{theorem} A special Sierpi\'nski set is not in the class $\sfin(\om,\om)$. \end{theorem} \par\noindent{\bf Proof:}\par By Theorem \ref{contimage} all our classes are closed under continuous images. Note that $S+S$ is the continuous image of $S\times S$. Also $\oo$ is not in $\ufin(\ga,\op)$ (see proof of Theorem \ref{specluz}). Hence $\oo$ is not in $\sfin(\om,\om)$ and therefore $S\times S$ is not in $\sfin(\om,\om)$. But by Theorem \ref{sfinomegprod} the class $\sfin(\om,\om)$ is closed under finite products and therefore $S$ is not in the class $\sfin(\om,\om)$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par These results show that the special Sierpi\'nski set (denoted S) is in exactly the classes indicated in figure \ref{cshl} of the introduction. \subsec{The generic Lusin set H} The fact that no Lusin set satisfies $\ufin(\ga,\ga)$ follows from Theorem \ref{gam6}. \begin{theorem}[CH]\label{lusin} There exists a Lusin set $H$ which is $\sone(\om,\om)$. \end{theorem} \par\noindent{\bf Proof:}\par To construct a $\sone(\om,\om)$ Lusin set in the reals enumerate all countable sequences of countable open families as $\{ ({\cal U}^{\beta }_{n})_{n<\omega }:\beta <\omega_{1}\} $. Also enumerate all dense open subsets of the reals as $(D_{\alpha })_{\alpha <\omega_{1}}$. We construct $X$ recursively as $\{ x_{\beta }:\beta <\omega _{1}\} $ as follows. At stage $\alpha$ of the construction we have $$\{ x_{\beta }:\beta <\alpha \} \mbox{ and }\{ (U_{n}^{\beta })_{n<\omega }:\beta <\alpha \}$$ satisfying for each $\beta <\alpha $ \begin{itemize} \item[(i)] $x_\beta\in\cap \{D_\delta:\delta<\beta\}$, \item[(ii)] $\{U_{n}^{\beta}: n<\omega \}$ is an $\omega$-cover of $\{ x_\delta:\delta <\alpha \}$, \item[(iii)] if $({\cal U}^{\beta}_{n})_{n<\omega }$ was an $\omega$-cover of $\{x_\delta:\delta<\beta\}$, then $U_{n}^{\beta}\in {\cal U}^{\beta}_{n}$ for every $n$. \end{itemize} To see how to choose $x_{\alpha }$ and $(U_{n}^{\alpha })_{n<\omega}$ consider the $\alpha $'th sequence of open families: if $({\cal U}^{\alpha }_{n})_{n<\omega }$ is a sequence of $\omega$-covers of $\{x_{\beta} :\beta <\alpha \}$ first extract an $\omega$-cover $(U_{n}^{\alpha })_{n<\omega}$ so that $U_{n}^{\alpha }\in {\cal U}^{\alpha }_{n}$ for each $n<\omega $ (countable sets are $\sone(\om,\om)$). If $({\cal U}^{\alpha }_{n})_{n<\omega }$ is not a sequence of $\omega$-covers of $\{ x_{\beta }:\beta <\alpha \}$ let $U_{n}^{\alpha }={\Bbb R}$ for each $n<\omega$. (${\Bbb R}$ is the set of real numbers.) Enumerate the finite subsets of $\{ x_{\beta }:\beta <\alpha \}$ as $\{ A_{k}:k<\omega \} $. For each $k$ and each $\beta \leq \alpha $ let $$O_{k,\beta }=\bigcup\{ U_{n}^{\beta }:A_{k}\subseteq U_{n}^{\beta }\} .$$ Then $O_{k,\beta }$ is dense and open. We choose $$ x_{\alpha }\in \bigcap_{\beta \leq \alpha }D_{\beta }\cap \bigcap_{k<\omega ,\beta \leq \alpha } O_{k,\beta } $$ different from all $x_\beta$ with $\beta<\alpha$. To see that $(U_{n}^{\beta })_{n<\omega }$ is an $\omega$-cover of $\{ x_{\beta }:\beta \leq \alpha \}$ for each $\beta \leq \alpha$ it suffices to show that each $A_{k}\cup\{ x_{\alpha }\} $ is covered by some $U_{n}^{\beta }$ for some $n<\omega$. But $x_{\alpha }\in O_{k,\beta }$ implies that there is an $n$ such that $x_{\alpha}\in U_{n}^{\beta }$ and $A_{k}\subseteq U_{n}^{\beta }$. We let $H=\{x_{\beta}: \beta<\omega_1 \}$. To see that $H$ is $\sone(\om,\om)$, fix a sequence of $\omega$-covers $({\cal U}_n)_{n<\omega}$. There is an $\alpha$ such that $({\cal U}_n)_{n<\omega}=({\cal U}_n^{\alpha})_{n<\omega}$. Then at stage $\alpha$ of the construction we extracted an appropriate $\omega$-cover of $\{x_{\beta}:\beta\leq\alpha\}$ and inductive hypothesis (ii) assures that it is also an $\omega$-cover of $H$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par The proof of Theorem \ref{lusin} only requires that the covering number of the meager ideal is equal to the continuum ($\covmeag={\goth c}$). This requirement is equivalent to $MA$ for countable posets. Adding Cohen reals over any model yields an $\sone(\om,\om)$ Lusin set and hence our name generic Lusin set. \section{Preservation of the properties.}\label{pres} Each of the properties in the diagram is inherited by closed subsets and continuous images. The preservation theory is more complicated for other topological constructions. \begin{theorem}\label{contimage} Let ${\sf G}$ be one of $\sone$, $\sfin$, or $\ufin$ and let ${\cal A}$ and ${\cal B}$ range over the set $\{\op,\om,\lm,\ga\}$. If $X$ has property ${\sf G}({\cal A},{\cal B})$ and $C$ is a closed subset of $X$, then $C$ has property ${\sf G}({\cal A},{\cal B})$. If $f:X\to Y$ is continuous and onto and $X$ has the property ${\sf G}({\cal A},{\cal B})$, then so does $Y$. \end{theorem} \par\noindent{\bf Proof:}\par The closure under taking closed subspaces is clear since if $\cal U$ is a cover of $C$ in one of the classes $\{\op,\om,\lm,\ga\}$ for $C$, then $${\cal V}=\{U\cup (X\setminus C): U\in {\cal U}\}$$ is in the same class for $X$. To prove the closure under continuous image use that if $\cal U$ is a cover of $Y$ in one of the classes $\{\op,\om,\lm,\ga\}$ for $Y$, then $${\cal V}=\{f^{-1}(U): U\in {\cal U}\}$$ is in the same class for $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \subsec{Finite powers} We show that the classes $\sone(\om,\om)$, $\sfin(\om,\om)$, and $\sone(\om,\ga)$ are the only ones closed under finite powers. \begin{lemma} \label{pow1} Let $X$ be a space and let $n$ be a positive integer. If ${\cal U}$ is an $\omega$--cover of $X$, then $\{U^n:U\in{\cal U}\}$ is an $\omega$--cover of $X^n$. \end{lemma} \par\noindent{\bf Proof:}\par Observe that if $F$ is a finite subset of $X^n$, then there is a finite subset $G$ of $X$ such that $F\subset G^n$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{lemma}\label{omcovpowers} Let $X$ be a topological space and let $n$ be a positive integer. If ${\cal U}$ is an $\omega$--cover for $X^n$, then there is an $\omega$--cover ${\cal V}$ of $X$ such that the open cover $\{V^n:V\in{\cal V}\}$ of $X^n$ refines ${\cal U}$. \end{lemma} \par\noindent{\bf Proof:}\par Let ${\cal U}$ be an $\omega$--cover of $X^n$. Let $F$ be a finite subset of $X$. Then $F^n$ is a finite subset of $X^n$. Since ${\cal U}$ is an $\omega$--cover of $X$, choose an open set $U\in {\cal U}$ such that $F^n\subset U$. For any $n$--tuple $(x_1,\dots,x_n)$ in $F^n$, find for each $i\in\{1,\dots,n\}$ an open set $U_i(x_1,\dots,x_n)\subset X$ such that $x_i\in U_i(x_1,\dots,x_n)$, and $\prod_{i=1}^nU_i(x_1,\dots,x_n)\subset U$. Then, for each $x$ in $F$, let $U_x$ be the intersection of all the $U_i(x_1,\dots,x_n)$ which have $x$ as an element. Finally, choose $V_F$ to be the set $\cup_{x\in F}U_x$, an open subset of $X$ which contains $F$, and which has the property that $F^n\subset V^n_F\subset U$. Put $${\cal V}= \{V_F:F \in[X]^{<\omega}\}.$$ Then ${\cal V}$ is as required. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par While Lemma \ref{pow1} is true of $\gamma$--covers, Lemma~\ref{omcovpowers} is not. \begin{theorem}\label{s1omegprod} Let $n$ be a positive integer. If a space $X$ has property $\sone(\om,\om)$, so does $X^n$. \end{theorem} \par\noindent{\bf Proof:}\par Let $n$ be a positive integer and let $({\cal U}_m:m=1,2,3,\dots)$ be a sequence of $\omega$--covers of $X^n$. By Lemma \ref{omcovpowers} for each $m$, we can choose ${\cal V}_m$ an $\omega$--cover of $X$ such that $$\{V^n:V\in {\cal V}_m\}$$ is an $\omega$--cover of $X^n$ which refines ${\cal U}_m$. Now apply the fact that $X$ is in $\sone(\om,\om)$ to select from each ${\cal V}_m$ a set $V_m$ such that $\{V_m:m=1,2,3,\dots\}$ is an $\omega$--cover of $X$. Then, since for each $m$ the set $\{V^n:V\in{\cal V}_m\}$ refines ${\cal U}_m$, we see that we can select from each ${\cal U}_m$ a set $U_m$ such that $V^n_m\subseteq U_m$. But then the set $\{U_n:n=1,2,3,\dots\}$ is an $\omega$--cover for $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{sfinomegprod} Let $n$ be a positive integer and let $X$ be a space. If $X$ has property $\sfin(\om,\om)$, then $X^n$ also has this property. \end{theorem} \par\noindent{\bf Proof:}\par Let $({\cal U}_m:m=1,2,3,\dots)$ be a sequence of $\omega$--covers of $X^n$. For each $m$, choose an $\omega$--cover ${\cal V}_m$ of $X$ such that $\{V^n:V\in{\cal V}_m\}$ refines ${\cal U}_m$. Now apply the fact that $X$ satisfies $\sfin(\om,\om)$: For each $m$ we find a finite subset ${\cal W}_m$ of ${\cal V}_m$ such that the collection $\cup_{m=1}^{\infty}{\cal W}_m$ is an $\omega$--cover of $X$. For each $m$, choose a finite subset ${\cal Z}_m$ of ${\cal U}_m$ such that there is for each $W$ in ${\cal W}_m$ a $Z$ in ${\cal Z}_m$ such that $W^n\subseteq Z$. Then $\cup_{m=1}^{\infty}{\cal Z}_m$ is an $\omega$--cover of $X^n$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem} Let $n$ be a positive integer and let $X$ be a space. If $X$ has property $\sfin(\om,\ga)$, then $X^n$ also has this property. \end{theorem} \par\noindent{\bf Proof:}\par This is similar to the last two proofs. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem} [CH] None of the other classes (see figure \ref{cshl}) are closed under finite powers. \end{theorem} \par\noindent{\bf Proof:}\par Note the examples L and S are such that there sum $L+L$ and $S+S$ are homeomorphic to the irrationals. The function $\phi$ from $L\times L$ which assigns to $(x,y)$ the point $\phi(x,y) = x+y$ is continuous. But the space of irrationals does not have property $\ufin(\ga,\op)$. Since $\ufin(\ga,\op)$ is closed under continuous images (see Theorem \ref{contimage}) $L\times L$ does not have property $\ufin(\ga,\op)$. Similarly, $S \times S$ does not have property $\ufin(\ga,\op)$. So none of the classes containing either one of them is closed under finite powers. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par We have seen that the inclusion $\sone(\om,\om)\subseteq\sone(\op,\op)$ may be proper, e.g. the special Lusin set L is in $\sone(\op,\op)$ but not in $\sone(\om,\om)$. We now give an important fact about these two classes, which characterizes $\sone(\om,\om)$ as a subset of $\sone(\op,\op)$. \begin{theorem} Let $X$ be a space. Then the following are equivalent: \label{sonepow} \begin{enumerate} \item $X$ satisfies $\sone(\om,\om)$. \item Every finite power of $X$ satisfies $\sone(\op,\op)$ (Rothberger property $C^{\prime\prime}$). \end{enumerate} \end{theorem} \par\noindent{\bf Proof:}\par The implication $1\Rightarrow 2$ follows immediately from Theorem \ref{s1omegprod} and the fact that $\sone(\om,\om)$ is a subclass of $\sone(\op,\op)$. The implication $2\Rightarrow 1$ is proven as follows: Let $({\cal U}_n:n\in\omega)$ be a sequence of $\omega$--covers of $X$. Write the set of positive integers as a union of countably many disjoint infinite sets, say $Y_1, Y_2,\dots, Y_n,\dots$. For each $m$ and for each $k$ in $Y_m$ put ${\cal V}_k = \{U^m:U\in{\cal U}_k\}$. Then by Lemma \ref{pow1}, for each $m$ the sequence $({\cal V}_k:k\in Y_m)$ is a sequence of $\omega$--covers of $X^m$. By Hypothesis $2$ we find for each $m$ a sequence $$(U^m_k:k\in Y_m)$$ such that for each $k$, $U_k\in{\cal U}_k$, and such that $\{U^m_k:k\in\omega\}$ is an open cover of $X^m$. The sequence $(U_k:k\in\omega)$ is an $\omega$--cover of $X$. For let $F$ be a finite subset of $X$, say $F=\{x_1,\dots,x_m\}$, enumerated bijectively. Then $(x_1,\dots,x_m)$ is an element of $X^m$. Thus, choose a $k$ in $Y_m$ such that $(x_1,\dots,x_m)$ is in $U^m_k$. Then $F$ is a subset of $U_k$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par The Borel Conjecture, that every strong measure zero set is countable, implies that the two classes $\sone(\om,\om)$ and $\sone(\op,\op)$ coincide. The Borel Conjecture was proved consistent by Laver. \begin{problem} Is it true that if there is an uncountable set of real numbers which has property $\sone(\om,\om)$, then there is a set of real numbers which has property $\sone(\op,\op)$ but does not have property $\sone(\om,\om)$? \end{problem} We shall now prove the analogue of Theorem \ref{sonepow} for $\sfin(\op,\op)$ and $\sfin(\om,\om)$. \begin{theorem} \label{finitepower} For a space $X$ the following are equivalent: \begin{enumerate} \item Every finite power of $X$ has property $\sfin(\op,\op)$. \item $X$ has property $\sfin(\om,\om)$. \end{enumerate} \end{theorem} \par\noindent{\bf Proof:}\par The implication $2\Rightarrow 1$: This follows from Theorem \ref{sfinomegprod}. We now work on the implication $1\Rightarrow 2$: Let $({\cal U}_n:n\in\omega)$ be a sequence of $\omega$--covers of $X$. Let $(Y_k:k\in\omega)$ be a pairwise disjoint sequence of infinite sets of positive integers whose union is the set of positive integers. For each $m$, for each $k$ in $Y_m$, put ${\cal V}_k = \{U^m:U\in{\cal U}_k\}$. Then for each $m$ by Lemma \ref{omcovpowers}, the sequence $({\cal V}_k:k\in Y_m)$ is a sequence of $\omega$--covers of $X^m$. Applying $1$ for each $m$, we find for each $m$ a sequence $({\cal W}_k:k\in Y_m)$ such that \begin{itemize} \item{for each $k\in Y_m$, ${\cal W}_k$ is a finite subset of ${\cal U}_k$, and} \item{$\cup_{k\in Y_m}\{U^m:U\in{\cal W}_k\}$ is an open cover of $X^m$.} \end{itemize} But then $\cup_{k=1}^{\infty}{\cal W}_k$ is an $\omega$--cover of $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par None of our classes are closed under finite products. Todorcevic \cite{To} showed that there exist two (nonmetrizable) topological spaces $X$ and $Y$ that satisfy $\sone(\om,\ga)$ ($\gamma$-set), but whose product does not satisfy $\ufin(\ga,\op)$ (Menger). Thus none of our properties are closed under finite products. If we restrict our attention to separable metric spaces it also is the case assuming CH that none of our classes are closed under finite products. For the class $\sone(\om,\ga)$ note that Galvin-Miller \cite{G-M} using a result of Todorcevic showed that there are $\gamma$-sets whose product is not a $\gamma$-set. For the classes $\sone(\om,\om)$ and $\sfin(\om,\om)$ construct a pair of generic Lusin sets $H_0$ and $H_1$ such that $H_0+H_1=\zz$ \noindent {\bf Remark} The special Lusin set L gives a partial answer to a problem of Lelek (see \cite{Le}). It shows that it is relatively consistent with ZFC that there exists a separable metrizable space $L$ that has property $\ufin(\ga,\op)$, but does not have property $\ufin(\ga,\op)$ in each finite power. In Lelek, $\ufin(\ga,\op)$ is referred to as the ``Hurewicz property'' in contrast to our notation as the Menger property. \noindent {\bf Remark} It is relatively consistent with ZFC that for every $n\geq 1$ there exists a separable metric space $X$ such that $X^n$ has property $\ufin(\ga,\op)$ but $X^{n+1}$ does not have property $\ufin(\ga,\op)$ (see Just \cite{J1} and Stamp \cite{WS}). \noindent {\bf Remark} It was shown in Just \cite{WJ} that preservation of $\ufin(\ga,\om)$ under direct sums is independent of ZFC. \subsec{Finite or countable unions} It is well-known and easy to prove that each of the classes \begin{itemize} \item $\sfin(\op,\op)$ (Rothberger property $C^{\prime\prime}$), \item $\ufin(\ga,\ga)$ (Hurewicz property), and \item $\ufin(\ga,\op)$ (Menger property) \end{itemize} are closed under taking countable unions. It also easy to prove that $\sone(\ga,\lm)$ is closed under taking countable unions. The class $\sone(\om,\ga)$ (Gerlits-Nagy property $\gamma$-sets) is not closed under taking finite unions (see Galvin-Miller \cite{G-N}). \begin{problem} Which of the remaining classes are closed under taking finite or countable unions? \end{problem} \section{Cardinal equivalents}\label{card} \begin{figure} \caption{Cardinals $\non(P)$ \label{cardfig} \label{cardfig} \end{figure} We now consider the connection between the properties and some well known cardinal invariants of $P(\omega)/Fin$. See Vaughan \cite{vaughan} for the definitions, but briefly \noindent ${\goth p}$ is least cardinality of a family of sets in $[\omega]^\omega$ with the finite intersection property but no pseudo intersection, \noindent {\goth d} is the minimal cardinality of a dominating family in $\oo$, \noindent ${\goth b}$ the minimal cardinality of an unbounded family, and \noindent $\covmeag$ is the minimal cardinality of a covering of the real line by meager sets. In particular, if $P$ is one of the eleven properties in the diagram (figure \ref{cshl}) or is one of the splitting properties $\split(\om,\om )$ or $\split(\lm,\lm )$ we will determine: $non(P)$:= the minimum cardinality of a set of reals that fails to have property $P$. \noindent Note obviously that if $P\rightarrow Q$, then $\non(P)\leq\non(Q)$. Some of these cardinals are well known and we simply state the results and refer the reader to the appropriate references. \begin{theorem}\label{gam0} (Galvin-Miller \cite{G-M}) $\non(\sone(\om,\ga))={\goth p}$. \end{theorem} \begin{theorem}\label{gam2} (Fremlin-Miller \cite{F-M}) $\non(\sone(\op,\op))=\covmeag$. \end{theorem} The next two theorems are due to Hurewicz and they imply that the minimum cardinality of a set of reals that is not $\ufin(\ga,\ga)$ is ${\goth b}$, and the minimum cardinality of a set of reals that is not $\ufin(\ga,\op)$ is ${\goth d}$. \begin{theorem}\label{gam6} (Hurewicz \cite{Hu2}) A set $X$ is $\ufin(\ga,\ga)$ if and only if every continuous image of $X$ in $\oo$ is bounded. Hence $\non(\ufin(\ga,\ga))={\goth b}$. \end{theorem} \begin{theorem}\label{gam8} (Hurewicz \cite{Hu2}) A set $X$ is $\ufin(\ga,\op)$ if and only if every continuous image of $X$ in $\oo$ is not dominating. Hence $non(\ufin(\ga,\op))={\goth d}$. \end{theorem} Next we determine $\non(P)$ for all the other properties in figure \ref{cshl} in the introduction. \begin{theorem}\label{gam4} $\non(\sone(\ga,\om))={\goth d}$. \end{theorem} \par\noindent{\bf Proof:}\par Since $\sone(\ga,\om)\subseteq \ufin(\ga,\op)$ we have that $$\non(\sone(\ga,\om))\leq \non(\ufin(\ga,\op)).$$ Also by Theorem \ref{gam8} we have $\non(\ufin(\ga,\op))={\goth d}$, so $\non(\sone(\ga,\om))\leq {\goth d}$. Conversely, suppose that $X$ is a set of reals that fails to be $\sone(\ga,\om)$. Fix a sequence of $\gamma$-covers $({\cal U}_{n})_{n\in \omega}$ witnessing the failure of $S_{1}(\ga ,\om ).$ Fix an enumeration of each cover ${\cal U}_{n}=\{U_{n}^{i}:i\in \omega\}$. For each finite set $F\subseteq X$ define $f_{F}\in \oo$ by $$ f_{F}(n)=min\{i:\forall j > i,\; F\subseteq U_{n}^{i}\}. $$ As each ${\cal U}_{n}$ is a $\gamma$-cover if $i>f_{F}(n)$, then $F\subseteq U_{n}^{i}$. Therefore, $$\{f_{F}:F\in[X]^{<\omega}\}$$ must be a dominating family. Otherwise there is a $g$ not dominated by any such $f_{F}$. I.e., for each finite $F\subseteq X$, there is an integer $n$ such that $g(n)>f_{F}(n)$. This implies that $\{U_{n}^{g(n)}:n\in \omega\}$ is an $\omega$-cover, contradicting the failure of $S_{1}(\ga ,\om)$. So $\non(\sone(\ga,\om))\geq {\goth d}$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{sfinomega} $\non(\sfin(\om,\om))={\goth d}$. \end{theorem} \par\noindent{\bf Proof:}\par Identical to the proof of \ref{gam4}. One only needs to modify the definition of $f_F$ to $$f_{F}(n)=min\{i:F\subseteq U_{n}^{i}\}$$ and take ${\cal V}_n=\{U^i_n:i\leq g(n)\}$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{gam3} $\non(\sone(\ga,\ga))={\goth b}$. \end{theorem} \par\noindent{\bf Proof:}\par Using $\sone(\ga,\ga)\subseteq \ufin(\ga,\ga)$ and Theorem~\ref{gam6} it follows that $$\non(\sone(\ga,\ga))\leq {\goth b}.$$ Conversely, suppose that $X$ is a set of reals and that $({\cal U}_{n})_{n\in \omega}$ is sequence of $\gamma $-covers witnessing the failure of $S_{1}(\ga ,\ga )$. For each $x\in X$ define $f_{x}\in \oo$ by $$f_{x}(n)=min\{i:\forall j\geq i, x\in U_{n}^{j}\}.$$ If $g$ were to dominate each $f_{x}$, then $(U_{n}^{g(n)})_{n\in \omega}$ would be a $\gamma$-cover, a contradiction. Therefore $\{f_{x}:x\in X\}$ is an unbounded family. Hence $\non(\sone(\ga,\ga))\geq {\goth b}$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{gam1} $\non(\sone(\om,\om))=\covmeag$. \end{theorem} \par\noindent{\bf Proof:}\par The inclusion $\sone(\om,\om)\subseteq \sone(\op,\op)$ and Theorem~\ref{gam2} give us the inequality $\non(\sone(\om,\om))\leq\covmeag$. Conversely fix $X$ a set of reals and $({\cal U}_{n})_{n\in \omega}$ a sequence of $\omega$-covers witnessing the failure of $\sone(\om,\om)$. For each finite $F\subseteq X$ let $$ K_{F}=\{f\in \oo :(\forall n\in \omega)(F\not\subset U_{n}^{f(n)}\}. $$ Since for each $f\in \oo$ there is a finite $F\subseteq X$ such that $F\not\subset U_{n}^{f(n)}$, we have that $\oo=\bigcup\{K_{F}:F\in [X]^{<\omega}\}$. Furthermore each $K_{F}$ is closed and nowhere dense. Hence $\non(\sone(\om,\om))\geq\covmeag$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par Our results are summarized in figure \ref{cardfig}. Classical results about the relationships between the cardinals ${\goth p}$, ${\goth b}$, ${\goth d}$ and $cov({\cal M})$ give alternative proofs that many of the implications in our diagram cannot be reversed. \subsec{$\split(\lm,\lm)$ and $\split(\om,\om)$} These properties were defined in Scheepers \cite{S}: for classes of covers ${\cal A}$ and ${\cal B}$, a space has property $\split({\cal A},{\cal B})$ iff every open cover ${\cal U\in A}$ can be partitioned into two subcovers ${\cal U}_{0}$ and ${\cal U}_{1}$ both in ${\cal B}$. Recall that a family ${\cal R}\subseteq [\omega]^{\omega}$ is said to be a {\em reaping family} if for each $x\in [\omega]^{\omega}$ there is a $y\in {\cal R}$ such that either $y\subseteq^{*}x$ or $y\subseteq^{*} \omega\setminus x$. The minimal cardinality of a reaping family is denoted by ${\goth r}$, and the minimal cardinality of a base for a nonprincipal ultrafilter is denoted by ${\goth u}$. \begin{theorem}\label{large1} $\non(\split(\lm,\lm))={\goth r}$. \end{theorem} \par\noindent{\bf Proof:}\par Suppose that $X\subseteq[\omega]^{\omega}$ is a reaping family. Consider the open family $${\cal U}=\{B^{1}_{n}:n\in \omega\}$$ where $$B^{1}_{n}=\{x\in [\omega]^{\omega}:n\in x\} \mbox{ and } B^{0}_{n}=\{x\in [\omega]^{\omega}:n\not\in x\}.$$ Clearly ${\cal U}$ is a large cover of any subset of $[\omega]^{\omega}$. We will often refer to it as the canonical large cover. Since $X$ is a reaping family, this cover cannot be partitioned into two large subcovers. Conversely, suppose that $X$ is a set of reals and ${\cal U}=\{U_{n}:n\in \omega\}$ is a large cover of $X$. For each $x\in X$ let $$ A_{x}=\{n\in \omega:x\in U_{n}\}. $$ If ${\cal F}$ is the collection of all such $A_{x}$'s, then ${\cal F}$ is a reaping family. For if $A\subseteq \omega$ is such that for all $x\in X$ both $A_{x}\cap A$ and $A_{x}\setminus A$ are infinite, then $$\{U_{n}:n\in A\}\cup\{U_{n}:n\not\in A\}$$ is a splitting of ${\cal U}$ into disjoint large subcovers. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par The proof yields a bit more. \begin{theorem}\label{large2} A set of reals $X$ is $\split (\lm ,\lm )$ with respect to clopen covers if and only if every continuous image of $X$ in $[\omega]^{\omega}$ is not a reaping family. \end{theorem} \par\noindent{\bf Proof:}\par Suppose that $X$ is a set of reals, $f:X\rightarrow [\omega]^{\omega}$ is continuous and that $f(X)$ is a reaping family. The canonical large cover is in fact a clopen family. Therefore the collection $f^{-1}({\cal U})=\{f^{-1}(B^{1}_{n}):n\in \omega\}$ is a large clopen cover of $X$. Suppose $f^{-1}({\cal U})={\cal V}_{0}\cup{\cal V}_{1}$ is a partition. Then we have the corresponding partition of $\omega=A_{0}\cup A_{1}$ where ${\cal V}_{i}=\{f^{-1}(U_{n}):n\in A_{i}\}$. As $f(X)$ is a reaping family, there is an $x\in X$ such that for either $i=0$ or $1$, $f(x)\subseteq^{*} A_{i}$. Then ${\cal V}_{i}$ is not large at $x$. Therefore $X$ is not $\split (\lm ,\lm )$ with respect to the clopen cover $f^{-1}({\cal U})$. \noindent Conversely, suppose that $X$ is not $\split(\lm,\lm ) $ with respect to some large clopen cover ${\cal U}=\{U_{n}:n\in \omega\}$. For each $x\in X$ define $f_{x}\in [\omega]^{\omega}$ by $n\in f_{x}$ iff $x\in U_{n}$. Since $U$ is large, each $f_{x}$ is infinite. As above, since ${\cal U}$ cannot be split, $\{f_{x}:x\in X\}$ is a reaping family. Therefore it suffices to check that the mapping $f:x\rightarrow f_{x}$ is continuous. But the collection of $\{B_n^i:n\in \omega,i=0,1\}$ forms a subbase for $[\omega]^{\omega}$, and clearly $f^{-1}(B_{n}^{1})=U_{n}$ and $f^{-1}(B_{n}^{0})=X\setminus U_{n}$ therefore $f$ is continuous (this is the only place where we need the restriction to clopen covers). \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{theorem}\label{omega} $\non(\split(\om,\om))={\goth u}$. \end{theorem} \par\noindent{\bf Proof:}\par Suppose that $X\subseteq[\omega]^{\omega}$ is a filter-base. Then the canonical large cover in $[\omega]^{\omega}$ is in fact an $\omega$-cover of $X$. If $X$ is a base for an ultrafilter, then the canonical cover cannot be partitioned into two $\omega$-subcovers. \noindent Conversely, suppose that $X$ is a set of reals and ${\cal U}$ is an $\omega$-cover of $X$. For each $x\in X$ let $$ {\cal U}_{x}=\{U\in {\cal U}:x\in U\}. $$ If ${\cal F}$ is the collection of all such ${\cal U}_{x}$'s, then ${\cal F}$ forms a filterbase on ${\cal U}$ and if ${\cal U}$ cannot be split into two $\omega$-covers, then ${\cal F}$ generates a nonprincipal ultrafilter. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par Analogously to Theorem \ref{large2} we can prove: \begin{theorem}\label{omega2} A set of reals $X$ is $\split(\om,\om)$ with respect to clopen covers if and only if every continuous image of $X$ in $[\omega]^{\omega}$ does not generate an ultrafilter. \end{theorem} Note that a base for an ultrafilter is a reaping family, and therefore ${\goth r}\leq{\goth u}$. In Bell-Kunen \cite {belku} it is proven consistent that this inequality may be strict. Therefore $\split(\lm,\lm )\not\Rightarrow \split (\om ,\om )$. Similarly neither ${\goth r}$ nor ${\goth u}$ are comparable to ${\goth d}$, therefore there are no implications between either $\split(\lm,\lm )$ or $\split(\om,\om)$ and any of the six classes in figure \ref{cardfig} whose `$\non$' is equivalent to ${\goth d}$. In Scheepers \cite{S} it is shown that \begin{itemize} \item $\ufin(\ga,\ga)\Rightarrow \split(\lm,\lm)$ (Cor 29), and \item $\sone(\op,\op)\Rightarrow \split(\lm,\lm)$ (Thm 15). \end{itemize} Note that while both ${\goth b}\leq {\goth r}$ and $\covmeag\leq {\goth r}$, it is consistent that these inequalities are strict (see Vaughan \cite{vaughan}). So neither of these implications can be reversed. \begin{problem} Does $\split(\om,\om)\Rightarrow \split(\lm,\lm)$? \end{problem} \section{The Hurewicz Conjecture and the Borel Conjecture.}\label{hur} Every $\sigma$--compact space belongs to $\ufin(\ga,\ga)$. It is also well-known that not every space belonging to $\ufin(\ga,\ga)$ need be $\sigma$--compact. We now look at the traditional examples of sets of reals belonging to $\ufin(\ga,\ga)$, and show that some of these belong to $\sone(\ga,\ga)$, while others do not. Since $\sone(\ga,\ga)$ is contained in $\sone(\ga,\lm)$, and the unit interval is not an element of $\sone(\ga,\lm)$, we see that the $\sigma$--compact spaces do not in general belong to the class $\sone(\ga,\ga)$. On page 200 of \cite{Hu}, W. Hurewicz conjectures: \noindent {\em [Hurewicz] A set of real numbers has property $\ufin(\ga,\ga)$ if, and only if, it is $\sigma$--compact.\footnote{``Es entsteht nun die Vermutung dass durch die (warscheinlich sch\"arfere) Eigenschaft $E^{**}$ die halbkompakten Mengen $F_{\sigma}$ allgemein charakterisiert sind.''} } The existence of a Sierpi\'nski set violates this conjecture. As we have seen earlier, Sierpi\'nski sets are elements of $\sone(\ga,\ga)$. The following result shows that Hurewicz's conjecture fails in ZFC. \begin{theorem}\label{nothc} There exists a separable metric space $X$ such that $|X|=\omega_1$, $X$ is not $\sigma$-compact and $X$ has property $\ufin(\ga,\ga)$. This $X$ also has property $\sone(\ga,\om)$. \end{theorem} \par\noindent{\bf Proof:}\par \noindent Case 1. ${\goth b}>\omega_1$. In this case every $X$ of size $\omega_1$ is in $\sone(\ga,\ga)$, hence in both $\ufin(\ga,\ga)$ and $\sone(\ga,\lm)$. (In this case also in $\sfin(\Omega,\Omega)$.) \noindent Case 2. ${\goth b}=\omega_1$. In this case we will use a construction similar to one in \cite{G-M}. Build an $\omega_1$-sequence $\la x_\alpha:\alpha<\omega_1 \ra$ of elements of $[\omega]^\omega$ such that $\alpha < \beta$ implies $x_\beta\subseteq^* x_\alpha$ and if $f_\alpha:\omega\to x_\alpha$ is the increasing enumeration of $x_\alpha$, then for every $g\in\oo$ there exists $\alpha$ such that for infinitely many $n$ we have $g(n)<f_\alpha(n)$. \begin{claim}\label{claim1} For any $S\in[\omega]^\omega$ there exists $\alpha<\omega$ such that there exists infinitely many $n$ such that $|[f_\alpha(n),f_\alpha(n+1))\cap S|\geq 2$. \end{claim} To prove Claim \ref{claim1} suppose not and let $g$ eventually dominate all the increasing enumerations of sets $S^*$ such that $S^*=^*S$. Then $g$ eventually dominates the $f_\alpha$'s, contradiction. This completes the proof of Claim \ref{claim1}. \begin{claim}\label{claim2} Let $X=[\omega]^{<\omega}\cup\{x_\alpha:\alpha<\omega_1\}$. Then for every sequence $\la{\cal U}_n:n\in\omega\ra$ of $\omega$--covers of $X$ (or even just of $[\omega]^{<\omega}$) there exists an $A\in [\omega]^\omega$, $\la V_n\in {\cal U}_n: n\in A\ra$ and $\alpha<\omega_1$ such that for all $\beta\geq\alpha$ we have $x_\beta\in V_n$ for all but finitely many $n\in A$. \end{claim} To prove Claim \ref{claim2} construct $k_n$, an increasing sequence in $\omega$, such that there exists $V_n\in{\cal U}_n$ with the property that $$\{\;x\subseteq \omega\; :\; x\cap (k_n,k_{n+1})= \emptyset\;\}\subseteq V_n.$$ (Use that ${\cal U}_n$ is a $\omega$--cover to pick $V_n\supseteq [k_n+1]^{<\omega}$ and then choose $k_{n+1}$.) It follows from Claim \ref{claim1} that there exists an $\alpha<\omega_1$, $A\in[\omega]^\omega$, and an increasing sequence $\la m_n: n \in A\ra$ such that for every $n\in A$ $$\{\;x\subseteq \omega\;:\; x\cap (f_{\alpha}(m_n),f_{\alpha}(m_n+1)) =\emptyset\;\}\subseteq V_n.$$ It follows $x_\beta\in V_n$ for all $\beta\geq\alpha$ for all but finitely many $n\in A$. This completes the proof of Claim \ref{claim2} Now we show that our set $X$ in this case is in both $\ufin(\ga,\ga)$ and $\sone(\om,\om)$ (and hence $\sone(\ga,\lm)$). First we show that it satisfies a property we might call $\sone(\ga,\ga)^*$. \begin{quote} Given any $\la{\cal U}_n:n \in \omega\ra$ a sequence of $\gamma$--covers of $X$, there exists $\la V_n \in {\cal U}_n : n \in \omega\ra$ and a countable $Y\subseteq X$ such that $\la V_n: n \in \omega\ra$ is a $\gamma$--cover of $X\setminus Y$. \end{quote} If $\sfin(\ga,\ga)^*$ is defined analogously, then it is easy to see using the same proof as Theorem \ref{sone_eq_sfin} that $\sfin(\ga,\ga)^*$ is equivalent to $\sone(\ga,\ga)^*$. Clearly Claim \ref{claim2} implies $\sfin(\ga,\ga)^*$. $\sone(\ga,\ga)^*$ implies $\ufin(\ga,\ga)$ because we may first pick $\la V_n\in {\cal U}_n: n \in \omega\ra$ a $\gamma$--cover of $X\setminus Y$ and then pick $\la W_n\in {\cal U}_n: n \in \omega\ra$ a $\gamma$--cover of $Y$. Then $\la V_n \cup W_n: n \in \omega\ra$ is a $\gamma$--cover of $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par To see that $X$ is in $\sone(\om,\om)$ we need the following claim: \begin{claim}\label{claim3} For every $B\in [\omega]^\omega$, sequence $\la{\cal U}_n:n\in B\ra$ of $\omega$--covers of $X$, and countable $Y\subseteq X$ there exist $A\in [B]^\omega$, $\la V_n\in {\cal U}_n: n\in A\ra$ and $Z\subseteq X$ countable such that $Y$ and $Z$ are disjoint and $\la V_n\in {\cal U}_n: n\in A\ra$ is a $\gamma$ cover of $X\setminus Z$. \end{claim} \par\noindent{\bf Proof:}\par Let $Y=\{y_n:n\in\omega\}$ and apply Claim \ref{claim2} to the $\omega$--covers defined by $${\cal U}^\prime_n =\{U\in {\cal U}_n: \{y_i :i< n\}\subseteq U\}$$ for $n\in B$. This completes the proof of Claim \ref{claim3}. Using Claim \ref{claim3} for every sequence $\la{\cal U}_n:n\in\omega\ra$ of $\omega$--covers of $X$ inductively construct $A_i\in [\omega]^\omega$, $\la V_n\in {\cal U}_n: n\in A_i\ra$ and $Y_i\subseteq X$ countable such that \begin{itemize} \item $A_i\cap A_j=\emptyset$ for $i\not = j$. \item $Y_i\cap Y_j=\emptyset$ for $i\not = j$. \item $\la V_n\in {\cal U}_n: n\in A_i\ra$ is a $\gamma$--cover of $X\setminus Y_i$. \end{itemize} (At stage $n$ take $Y=\cup\{Y_i:i<n\}$ and $B=\omega \setminus (\cup\{A_i:i<n\})$. Apply Claim 3 and let $Y_n=Z$ and cut down $A_n$, if necessary, to ensure that $\cup\{A_i:i \leq n\}$ is coinfinite.) Since the $(Y_i:i<\omega)$ and the $(A_i:i<\omega)$ are pairwise disjoint families, letting $A=\bigcup _{i\in\omega}A_i$, $(V_n:n\in A)$ is an $\omega$-cover of $X$. Hence $X$ has property $\sone(\om,\om)$. This completes the proof of Theorem \ref{nothc}. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{problem} Is the set $X$ constructed in Case 2 of Theorem \ref{nothc} a $\gamma$-set, i.e., $\sone(\om,\ga)$? \end{problem} The Borel conjecture implies that every set in $\sone(\op,\op)$ is countable (hence every set in $\sone(\om,\om)$ or $\sone(\om,\ga)$ is countable). Theorem \ref{nothc} and the Cantor set along with the last example rules out an analogous conjecture for all except $\sone(\ga,\ga)$. So we ask: \begin{problem} Is it consistent, relative to the consistency of ${\sf ZF}$, that every set in $\sone(\ga,\ga)$ is countable? \end{problem} One may also ask if all the pathological examples of sets having property $\ufin(\ga,\ga)$ occur because of the presence of such sets in $\sone(\ga,\ga)$; here is one formalization of this question. \begin{problem} Let $X$ be a set of real numbers which does not contain a perfect set of real numbers but which does have the Hurewicz property. Does $X$ then belong to $\sone(\ga,\ga)$? \end{problem} \subsec{$\ufin(\ga,\ga)$ and perfectly meager sets.} We now prove a theorem which implies that the $\sone(\ga,\ga)$--sets are contained in another class of sets that were introduced in the early parts of this century. Recall that a set $X$ of real numbers is {\em perfectly meager} (also called ``always of first category") if, for every perfect set $P$ of real numbers, $X\cap P$ is meager in the relative topology of $P$. \begin{theorem} If a set of reals $X$ is in $\ufin(\ga,\ga)$ and contains no perfect subset, then $X$ is perfectly meager. \end{theorem} \par\noindent{\bf Proof:}\par Let $P$ be a perfect set of real numbers. Since $X$ contains no perfect set, $P\setminus X$ is a dense subset of $P$. Let $D$ be a countable dense subset of $P$ which is contained in $P\setminus X$, and enumerate $D$ bijectively as $(d_n:n=1,2,3,...)$. Fix $k$. For each $x$ in $X$ choose open intervals $I^k_x$ and $J^k_x$ such that \begin{enumerate} \item{$I^k_x$ is centered at $x$,} \item{$J^k_x$ is centered at $d_k$, and} \item{the closures of these intervals are disjoint.} \end{enumerate} Let $\{I^k_{x^k_n}:n=1,2,3,...\}$ be a countable subset of $\{I^k_x:x \in X\}$ which covers $X$. Then for each $n$ define $I^k_n = \cup_{j\leq n}I^k_{x^k_j}$, and $J^k_n = \cap_{j\leq n}J^k_{x^k_j}$. Then ${\cal U}_k=\{I^k_n:n=1,2,3,...\}$ is a $\gamma$--cover of $X$. Apply $\ufin(\ga,\ga)$ to the sequence $({\cal U}_k:k=1,2,3,..)$. For each $k$ we find an $n_k$ such that $(I^k_{n_k}:k=1,2,3,..)$ is a $\gamma$ cover for $X$. For each $j$ put $G_j = \cup_{k\geq j}J^k_{n_k}$. Then each $G_j\cap P$ is a dense open subset of $P$ (as it contains all but a finite piece of $D$). The intersection of these sets is a dense $G_{\delta}$ subset of $P$, and is disjoint from $X\cap P$. Thus, $X\cap P$ is a meager subset of $P$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{corollary} Every element of $\sone(\ga,\ga)$ is perfectly meager. \end{corollary} \par\noindent{\bf Proof:}\par We have seen (Theorem \ref{thm17}) that sets in $\sone(\ga,\ga)$ do not contain perfect sets of real numbers. But $\sone(\ga,\ga)\subseteq\ufin(\ga,\ga)$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par In Theorem 2 of Galvin-Miller \cite{G-M} it was shown that if a subset $X$ of the real line is in $\sone(\om,\ga)$, then for every $G_{\delta}$ set $G$ which contains $X$, there is an $F_{\sigma}$ set $F$ such that $X\subseteq F\subseteq G$. In fact, this property characterizes $\ufin(\ga,\ga)$. \begin{theorem} For a set $X$ of real numbers, the following are equivalent: \begin{enumerate} \item $X$ has property $\ufin(\ga,\ga)$. \item For every $G_{\delta}$--set $G$ which contains $X$, there is a $F_{\sigma}$--set $F$ such that $X\subseteq F\subseteq G$. \end{enumerate} \end{theorem} \par\noindent{\bf Proof:}\par $1\Rightarrow 2$: Write $G = \cap_{n=1}^{\infty} G_n$, where each $G_n$ is open. Fix $n$, and choose for each $x$ in $X$ an open interval $I^n_x$ which contains $x$, and whose closure is contained in $G_n$. Choose a countable subcover $\{I^n_{x^n_j}:j=1,2,3,...\}$ of $X$ from the cover $\{I^n_x:x \in X\}$. For each $n$ and $k$ define $I^n_k = \cup_{j\leq k}I^n_{x^n_j}$. Then ${\cal U}_n=\{I^n_k:k=1,2,3,...\}$ is a $\gamma$--cover of $X$ such that for each $k$ the closure of $I^n_k$ is contained in $G_n$. Apply the fact that $X$ is a $\ufin(\ga,\ga)$-set to the sequence $$({\cal U}_n:n=1,2,3,...).$$ For each $n$ choose a $k_n$ such that $(I^n_{k_n}:n=1,2,3,...)$ is a $\gamma$--cover of $X$. For each $n$ let $F_n$ be the intersection of the closures of the sets $I^m_{k_m},\ m\geq n$. For each $n$ we have the closed set $F_n$ contained in $G$. But then the union of the $F_n$'s is an $F_{\sigma}$ set which contains $X$ and is contained in $G$. $2\Rightarrow 1$: Let $({\cal U}_n:n<\omega)$ be a sequence such that each ${\cal U}_n$ is a cover of $X$ by open subsets of the real line. By assumption there exists closed sets $F_n$ such that $$X\subseteq \bigcup_{n<\omega} F_n\subseteq \bigcap _{n<\omega} (\cup {\cal U}_n).$$ Since the real line is $\sigma$-compact we may assume that the $F_n$ are compact. For each $n$ choose ${\cal V}_n\in [{\cal U}_n]^{<\omega}$ such that $(\cup_{m<n}F_m)\subseteq \cup {\cal V}_n$ for each $n$. Either there exists $n$ such that $\cup{\cal V}_n=X$ or $\{\cup{\cal V}_n:n\in\omega\}$ is infinite and hence a $\gamma$-cover of $X$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \section{Ramseyan theorems and other properties} \label{ramsey} Other classes of spaces motivated by diagonalization of open covers are related to $Q$-point ultrafilters, $P$-point ultrafilters and Ramsey-like partition relations. If ${\cal A}$ and ${\cal B}$ are classes of open covers, then a space has the property \begin{enumerate} \item ${\sf Q}({\cal A},{\cal B})$ iff for every open cover ${\cal U}\in {\cal A}$ and for every partition of this cover into countably many disjoint nonempty finite sets ${\cal F}_0,\ {\cal F}_1,\ {\cal F}_2,\dots$, there is a subset ${\cal V}\subseteq{\cal U}$ which belongs to ${\cal B}$ such that $|{\cal V}\cap{\cal F}_n|\leq 1$ for each $n$ and \item ${\sf P}({\cal A},{\cal B})$ iff for every sequence $\{{\cal U}_n: n\in\omega\}$ of open covers of $X$ from ${\cal A}$ such that ${\cal U}_{n+1}\subseteq {\cal U}_n$, for each $n$, there is an open cover ${\cal V}$ which belongs to ${\cal B}$ such that ${\cal V}\subseteq^*{\cal U}_n$ for each $n$. \end{enumerate} In Scheepers \cite{S} the partition relation $\om\rightarrow(\om)^2_2$ was defined: a space $X$ is said to satisfy $\om\rightarrow(\om)^2_2$ iff for every $\omega$--cover ${\cal U}$ of $X$, if \[f:[{\cal U}]^2\rightarrow\{0,1\}\] is any coloring, then there is an $i\in\{0,1\}$, an $\omega$--cover ${\cal V}\subseteq {\cal U}$ such that $f(\{A,B\})=i$ for all $A$ and $B$ from ${\cal V}$. It is customary to say that ${\cal V}$ is homogeneous for $f$. Also in \cite{S} it was shown that for a set $X$ of real numbers, the following statements are equivalent: \begin{enumerate} \item{$X$ is both $\sone(\om,\om)$ and ${\sf Q}(\om,\om)$.} \item{$\om$, the collection of $\omega$--covers of $X$, satisfies the following partition relation: $\om\rightarrow(\om)^2_2$.} \end{enumerate} The next theorem shows that indeed, the partition relation characterizes the property of being a $\sone(\om,\om)$--set. This also implies that $$\sone(\om,\om)={\sf P}(\om,\om)+{\sf Q}(\om,\om).$$ \begin{theorem} $\sone(\om,\om)\subseteq {\sf Q}(\om,\om)$. \end{theorem} \par\noindent{\bf Proof:}\par Let $X$ be a $\sone(\om,\om)$--set and let ${\cal U}$ be an $\omega$--cover of it. Let $({\cal P}_n:n<\omega)$ be a partition of this cover into pairwise disjoint finite sets. Enumerate the cover bijectively as $(U_n:n<\omega)$ such that, letting for each $n$ the set $I_n$ be the $j$'s such that $U_j\in {\cal P}_n$. We get a partition $(I_n:n<\omega)$ of $\omega$ into disjoint intervals such that if $m$ is less than $n$, then each element of $I_m$ is less than each element of $I_n$. For each $\ell$, let $m_{\ell}=\sum_{j\leq\ell}|I_j|$. Now define an $\omega$--cover ${\cal V}$ of $X$ such that $V$ is in ${\cal V}$ iff \[V= U_{k_0}\cap\dots\cap U_{k_r} \] where \begin{enumerate} \item{$r=m_{\ell_0}$ and} \item{$\ell_0<\dots<\ell_r$ are such that for each $j$, $k_j$ is in $I_{\ell_j}$, and} \item{$V$ is nonempty.} \end{enumerate} Next, choose a partition $({\cal V}_n:n<\omega)$ such that each ${\cal V}_n$ is an $\omega$--cover of $X$, and ${\cal V}$ is the union of these sets. Then, discard from each ${\cal V}_n$ all sets of the form \[U_{k_0}\cap\dots\cap U_{k_r} \] where $k_0$ is an element of $I_0\cup\dots\cup I_n$; let ${\cal W}_n$ denote the resulting family. Observe that each ${\cal W}_n$ is still an $\omega$--cover. Since $X$ is an $\sone(\om,\om)$--set, we find for each $n$ a $W_n$ in ${\cal W}_n$ such that the set $\{W_n:n\in\omega\}$ is an $\omega$--cover of $X$. For each $n$ we fix a representation \[W_n = U_{k^n_0}\cap\dots\cap U_{k^n_{r(n)}} \] where $k^n_0<\dots<k^n_{r(n)}$. On account of the way ${\cal W}_n$ was obtained from ${\cal V}_n$, we see that $n<k^n_0$ and $n<r(n)$. Now choose recursively sets $$U_{k(0)}, U_{k(1)},\dots,U_{k(n)},\dots$$ so that $U_{k(0)} = U_{k^1_0}\supseteq W_1$. Suppose that $U_{k(0)}, \dots, U_{k(n)}$ have been chosen such that for $i\leq n$ we have \begin{itemize} \item{$k(i)\in\{k^i_0,\dots,k^i_{r(i)}\}$ and} \item{$W_i\subseteq U_{k(i)}$, and} \item{the $k(i)$'s belong to distinct $I_j$'s,} \end{itemize} To define $U_{k(n+1)}$ we consult $W_n=U_{k^{n+1}_0}\cap \dots\cap U_{k^{n+1}_{r(n+1)}}$. Since we have so far selected only $n+1$ numbers and since $r(n+1)$ is larger than $n+1$, and since the $k^{n+1}_j$ come from $r(n+1)$ disjoint intervals $I_j$, we can find one of these intervals which is disjoint from $\{k(0),\dots,k(n)\}$, and select $k(n+1)$ to be the $k^{n+1}_j$ from that interval. This then specifies $U_{k(n+1)}$. Because the sequence of $W_n$'s refines $\{U_{k(n)}:n<\omega\}$, the latter is an $\omega$--cover of $X$, and by construction it contains no more than one element per ${\cal P}_n$. \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par In Scheepers \cite{S} it was shown that if $X$ satisfies $\sfin(\om,\om)$, then its family of $\omega$--covers, $\om$, satisfies the partition relation \[\om\rightarrow\lceil\om\rceil^2_2. \] Satisfying this partition relation means that for every $\omega$--cover ${\cal U}$ of $X$, if \[f:[{\cal U}]^2\rightarrow\{0,1\} \] is any coloring, then there is an $i\in\{0,1\}$, an $\omega$--cover ${\cal V}\subseteq {\cal U}$ and a finite--to--one function $q:{\cal V}\rightarrow\omega$ such that for all $A$ and $B$ from ${\cal V}$, if $q(A)\neq q(B)$, then $f(\{A,B\})=i$. It is customary to say that ${\cal V}$ is eventually homogeneous for $f$. We now show that these two properties are equivalent. \begin{theorem} For any space $X$, $\om\rightarrow\lceil\om\rceil^2_2$ is equivalent to $\sfin(\om,\om)$. \end{theorem} \noindent {\par\noindent{\bf Proof:}\par} $\sfin(\om,\om)$ implies $\om\rightarrow\lceil\om\rceil^2_2$ is proved in \cite{S}. To prove the other direction suppose that ${\cal U}_{n}=\{ U^n_m:m\in\omega\}$ is an $\omega$--cover for each $n\in \omega$. Let $$ {\cal U}=\{ U^0_k\cap U^k_l:k,l\in\omega\}. $$ ${\cal U}$ is an $\omega$--cover, since given a finite $F\subseteq X$ we can first pick $k$ with $F\subseteq U^0_k$ and then pick $l$ with $F\subseteq U^k_l$. For each element of ${\cal U}$ we pick a pair as above and define $f:[{\cal U}]^{2}\rightarrow 2$ by $$f(\{U^0_{k_{0}}\cap U^{k_{0}}_{l_{0}}, U^0_{k_{1}}\cap U^{k_{1}}_{l_{1}}\})= \left\{ \begin{array}{ll} 0 & \mbox{if $k_0=k_1$}\\ 1 & \mbox{if $k_0\not=k_1$} \end{array} \right.$$ By applying $\om\rightarrow\lceil\om\rceil^2_2$ there exists a sequence $(k_{i},l_{i})$ and a finite--to--one function $q:\omega\rightarrow\omega$ such that $${\cal V}=\{U^0_{k_{i}}\cap U^{k_{i}}_{l_{i}}: i \in\omega\}$$ is an $\omega$--cover of $X$ and either \begin{itemize} \item[(a)]{$q(i)\not=q(j)$ implies $k_i=k_j$ or } \item[(b)]{$q(i)\not=q(j)$ implies $k_i\not=k_j$. } \end{itemize} In case (a) since $q$ is finite--to--one, we get that $k_i=k_j$ for every $i,j\in \omega$. This would mean that every element of ${\cal V}$ refines $U^0_{k_{0}}$, but this contradicts the fact that ${\cal V}$ is an $\omega$--cover. Thus this case cannot occur. In case (b) let $${\cal W}=\{U^{k_i}_{l_i}: i<\omega\}.$$ Since ${\cal V}$ refines ${\cal W}$ and $X\notin {\cal W}$, ${\cal W}$ is an $\omega$--cover of $X$. Define $${\cal W}_n=\{U^{k_i}_{l_i}: k_i=n\}\subseteq {\cal U}_n.$$ To finish the proof it is enough to see that each ${\cal W}_n$ is finite. If not, there would be an infinite $A\subseteq\omega$ such that $k_i=n$ for each $i\in A$. Since $q$ is finite--to--one, there would be $i\not=j\in A$ with $q(i)\not=q(j)$. But $k_i=k_j=n$ contradicts the assumption of case (b). \nopagebreak\par\noindent\nopagebreak$\blacksquare$\par \begin{center} Addresses \end{center} \begin{flushleft} Winfried Just \\ Ohio University \\ Department of Mathematics \\ Athens, OH 45701-2979 USA \\ e-mail: [email protected] \\ \end{flushleft} \begin{flushleft} Arnold W. Miller \\ University of Wisconsin-Madison \\ Department of Mathematics Van Vleck Hall \\ 480 Lincoln Drive \\ Madison, Wisconsin 53706-1388, USA \\ e-mail: [email protected] \\ home page: http://math.wisc.edu/$^\sim$miller/ \\ \end{flushleft} \begin{flushleft} Marion Scheepers \\ Department of Mathematics \\ Boise State University \\ Boise, Idaho 83725 USA \\ e-mail: [email protected] \\ \end{flushleft} \begin{flushleft} Paul J. Szeptycki \\ Ohio University \\ Department of Mathematics \\ Athens, OH 45701-2979 USA \\ e-mail: [email protected] \\ \end{flushleft} \begin{center} August 1995 \end{center} \end{document}
\begin{document} \title{Photon-Photon Entanglement with a Single Trapped Atom} \author{B.~Weber} \author{H.~P.~Specht} \author{T.~M{\"{u}}ller} \author{J.~Bochmann} \author{M.~M{\"{u}}cke} \author{D.~L.~Moehring} \email{[email protected]} \author{G.~Rempe} \affiliation{Max-Planck-Institut f{\"{u}}r Quantenoptik, Hans-Kopfermann-Strasse~1, 85748 Garching, Germany} \date{\today} \begin{abstract} An experiment is performed where a single rubidium atom trapped within a high-finesse optical cavity emits two independently triggered entangled photons. The entanglement is mediated by the atom and is characterized both by a Bell inequality violation of $S=2.5$, as well as full quantum-state tomography, resulting in a fidelity exceeding $F=90\%$. The combination of cavity-QED and trapped atom techniques makes our protocol inherently deterministic --- an essential step for the generation of scalable entanglement between the nodes of a distributed quantum network. \end{abstract} \pacs{03.65.Ud, 03.67.Bg, 42.50.Pq, 42.50.Xa} \maketitle Of all the technologies currently being pursued for quantum information science, individually trapped atoms are among the most proven candidates for quantum information storage \cite{monroe:2002}. Photons, on the other hand, are the obvious choice for carriers of quantum information over large distances. Together, this naturally leads to an atom-photon interface as an ideal node for distributed quantum computing networks \cite{cirac:1997, monroe:2002, kimble:2008}. Progress towards the construction of such quantum networks has been recently achieved in experiments entangling single atoms trapped in a free-space radiation environment with their spontaneously emitted photons \cite{blinov:2004, volz:2006, matsukevich:2008, rosenfeld:2008}, however, high photon loss rates in the emission process severely limit their usefulness for quantum information processing protocols \cite{campbell:2007}. For \textit{scalable} atom-photon based quantum information processing, it is necessary to increase this entanglement efficiency. The most promising method to accomplish this is to combine the advantages of trapped atom entanglement techniques with cavity quantum electrodynamics where both atomic and photonic qubits are under complete control \cite{kimble:2008, wilk:2007b, hijlkema:2007, fortier:2007, khudaverdyan:2008}. In this Letter, we demonstrate a deterministic entanglement protocol with a single atom trapped in an optical cavity and two subsequently emitted single photons. Compared to previous entanglement experiments with a probabilistic transit of atoms through a cavity \cite{wilk:2007b}, our results increase the atom-cavity interaction time, and therefore also the number of successful atom-photon entanglement events from a single atom, by a factor of $10^5$. The long trapping times shown here also allow us to ensure that exactly one atom is within the cavity at a given time. This is critical for the generation of high-fidelity entangled states, and is not possible with atoms randomly loaded into a cavity \cite{wilk:2007b}. Furthermore, the highly efficient photon collection in the cavity output mode allows for photon detection efficiencies that are more than an order of magnitude greater than in free-space atom-photon entanglement experiments \cite{matsukevich:2008, rosenfeld:2008}. This also allows for the coherent mapping of the atomic quantum state onto the state of a second photon. The resulting entanglement is verified by a Bell inequality measurement between the two emitted photons \cite{bell:1964}, and is in convincing violation of classical physics. \begin{figure} \caption{\label{fig:setup} \label{fig:setup} \end{figure} \begin{figure*} \caption{\label{fig:pulses} \label{fig:pulses} \end{figure*} The main element of our experimental apparatus is a coupled atom-cavity system, as shown in Figure~\ref{fig:setup}. Cold $^{87}$Rb atoms are trapped at the intersection of two orthogonally aligned standing-wave beams --- a 1030~nm beam focused in the cavity mode with a trap depth of $\approx2.3$~mK and an intracavity standing-wave trap at 785~nm with a trap depth of $\approx30$~$\mu$K \cite{nussmann:2005b, hijlkema:2007}. Together, these traps create a measured ac-Stark shift of the atomic 5S$_{1/2}\leftrightarrow$~5P$_{3/2}$ transition frequency of approximately +95~MHz. In addition to providing a second trapping axis, the 785~nm laser is used to stabilize the cavity length to the Stark-shifted D2 $F$$=$$1\leftrightarrow F'$$=$$1$ transition. The atom-cavity system operates in the intermediate coupling regime with $(g,\kappa,\gamma)/2\pi=(5,6,3)$~MHz, where $g$ denotes the maximum (spatially dependent) atom-cavity coupling constant of the relevant transitions, $\kappa$ is the cavity field decay rate, and $\gamma$ is the atomic polarization decay rate. Once atoms are loaded into the cavity mode, they are cooled via lin$\bot$lin-polarized laser beams orthogonal to the cavity axis and near resonant with the $F$$=$$2\leftrightarrow F'$$=$$3$ and $F$$=$$1 \leftrightarrow F'$$=$$2$ transitions using a Sisyphus-like cooling mechanism [Fig.~\ref{fig:pulses}(a)] \cite{nussmann:2005b, murr:2006}. A laser addressing the $F$$=$$1\leftrightarrow F'$$=$$1$ transition is also applied for cavity enhanced cooling and to create photons in the cavity mode. Photons emitted from the cavity output are coupled into an optical fiber and directed to the photon detection setup. For high-fidelity entanglement generation, it is important to ensure that exactly one atom is in the cavity. This is accomplished via two independent techniques. First, we count the number of trapped atoms by directly imaging the cavity region (Fig.~\ref{fig:setup} and~\ref{fig:pulses}). A portion of the light scattered by the atoms into free space (perpendicular to the cavity and trapping axes) is collected using an objective lens with a numerical aperture of 0.43, focal length of 25~mm, and a measured resolution of $1.3~\mu$m. The collected light is focused onto a CCD camera with a total magnification of about 28. While this technique alone can determine the number of atoms with over 90\% certainty, we further confirm that we have trapped only one atom by analyzing the statistics of the emitted photon stream. In particular, only if there is exactly one atom in the trap will the cavity output show a perfect photon antibunching signal \cite{hijlkema:2007}. The combination of these two techniques allows us to discern that a single atom is trapped within the cavity with greater than 99\% fidelity. The experimental procedure follows a similar protocol to that used in \cite{wilk:2007b}, but with several substantial differences (Fig.~\ref{fig:pulses}). First, the trap-induced Stark shift of the atomic energy levels must be taken into account by detuning the laser and cavity frequencies. Second, the Stark shift has to be stabilized in order to keep the experimental conditions constant, otherwise the fluctuations can lead to unwanted transitions to nearby hyperfine levels of the P$_{3/2}$ manifold. Moreover, a variable detuning of laser and cavity from the atom decreases the photon generation efficiency. An additional concern is the random motion of the atom in the dipole trap. Such motion results from the unidirectional laser pulses employed in the entanglement sequence (discussed below) and shortens the coherence time of atomic superposition states \cite{kuhr:2003}. In fact, these laser pulses lead to significant heating, expelling the atom from the trap within a few milliseconds. We find that by embedding each entanglement sequence with an additional cooling interval [Fig.~\ref{fig:pulses}(b)], the atoms remain sufficiently cold to allow for long trapping times and high-fidelity entanglement generation. Following this cooling interval, the entanglement protocol starts by optically pumping the atom into the $|F,m_{F}\rangle=|2,0\rangle$ Zeeman sublevel with a measured efficiency greater than $80\%$ [Fig.~\ref{fig:pulses}(b)] \cite{footnote:opticalpump}. Next, entanglement between the atomic Zeeman state and the polarization of the emitted photon is created by driving a vacuum-stimulated Raman adiabatic passage (vSTIRAP) via a $\pi$-polarized laser pulse addressing the Stark-shifted $F$$=$$2\leftrightarrow$~$F'$$=$$1$ transition and the cavity frequency locked to the $F$$=$$1\leftrightarrow$~$F'$$=$$1$ transition [Fig.~\ref{fig:pulses}(d)] \cite{hennrich:2000}. With the atom trapped and coupled to the high-finesse optical cavity, the resulting entanglement is inherently deterministic \cite{monroe:2002, kimble:2008}: \begin{eqnarray} |\Psi_{\text{AP}}\rangle=\frac{1}{\sqrt{2}}(|1,-1\rangle|\sigma^+\rangle-|1,+1\rangle|\sigma^-\rangle). \label{eq:ap} \end{eqnarray} After a user-selected time interval, the atom-photon entanglement is converted into a photon-photon entanglement via a second vSTIRAP step with a $\pi$-polarized $F$$=$$1\leftrightarrow$~$F'$$=$$1$ laser pulse [Fig.~\ref{fig:pulses}(e)]. This maps the atomic state onto the polarization of a second emitted photon, resulting in an entangled photon pair: \begin{align} |\Psi_{\text{APP}}\rangle &=|1,0\rangle\otimes|\Psi^-_{\text{PP}}\rangle\nonumber\\ &=\frac{1}{\sqrt{2}}|1,0\rangle\otimes(|\sigma^+\rangle|\sigma^-\rangle-|\sigma^-\rangle|\sigma^+\rangle). \label{eq:pp} \end{align} We characterize our entanglement by measuring a Bell inequality violation of the two emitted photons \cite{bell:1964}. The form of Bell inequality violated here was first proposed by Clauser, Horne, Shimony, and Holt (CHSH) \cite{clauser:1969}, and is based on the expectation value $E(\alpha,\beta)$ of correlation measurements in different bases: \begin{align} E(\alpha,\beta) =&~p_{\downarrow\downarrow}(\alpha,\beta)+p_{\uparrow\uparrow}(\alpha,\beta)\nonumber\\ &-p_{\uparrow\downarrow}(\alpha,\beta)-p_{\downarrow\uparrow}(\alpha,\beta).\label{eq:corr} \end{align} Here, $p_{ij}(\alpha,\beta)$ is the probability to contiguously find photon 1 in state $|i\rangle$ and photon 2 in state $|j\rangle$ following polarization rotations by an amount $\alpha$ and $\beta$, respectively, and $\{\uparrow,\downarrow\}$ represent the two output ports of the polarizing beam splitter. CHSH show that all local hidden-variable theories must obey the inequality \begin{align} S(\alpha,\alpha';\beta,\beta')\equiv &~|E(\alpha',\beta')-E(\alpha,\beta')|\nonumber\\ &+|E(\alpha',\beta)+E(\alpha,\beta)| \leq 2.\label{eq:ineq} \end{align} This inequality can only be violated via quantum physics. In particular, our entangled state $|\Psi^-_{\text{PP}}\rangle$ allows for a Bell signal as large as $2\sqrt{2}$. \begin{figure} \caption{\label{fig:dm} \label{fig:dm} \end{figure} In our experiment, the two photons are emitted into the same spatial output mode and are probabilistically directed to the two different measurement bases by a 50/50 non-polarizing beam splitter (Fig.~\ref{fig:setup}). This allows for two simultaneous Bell inequality measurements. To eliminate possible systematic effects during the course of the experiment, the polarization measurement bases are changed before every atom trapping event via motorized rotation stages. With the photon pairs measured in a combination of four different polarization bases, we obtain Bell signals of \begin{align} &S(0^{\circ},45^{\circ};22.5^{\circ},-22.5^{\circ})=2.46\pm0.05~\text{and} \nonumber\\ &S(22.5^{\circ},-22.5^{\circ};0^{\circ},45^{\circ})=2.53\pm0.05, \nonumber \end{align} both in clear violation of the classical limit of 2 by more than 9 standard deviations. In this experiment, the photons are temporally separated by $0.8~\mu$s and the optical path length between the cavity and the photon detectors is 13 meters. Therefore, the first photon is detected before the generation of the second. Nevertheless, the fact that the measured correlations violate a Bell inequality can only be explained by quantum entanglement, where the non-classical information between the two photons is temporarily stored in the single trapped atom. This is similar to experiments with atomic ensembles where the atomic qubit state must be converted to a photon for measurement \cite{jenkins:2007, kimble:2008, yuan:2008}. The entangled state is additionally characterized via quantum state tomography of the emitted photons. For this, we follow the procedure outlined in \cite{altepeter:2005} and measure the entangled photons in nine different polarization bases. The resulting density matrix for the two photons separated by $0.8~\mu$s is shown in Figure~\ref{fig:dm} with an entanglement fidelity of $F=0.902\pm0.009$ with respect to the $|\Psi^-_{\text{PP}}\rangle$ Bell state of the photons (equation~\ref{eq:pp}), clearly above the classical limit of $F=0.5$. Other calculated measures of entanglement for this state include the concurrence $C=0.81\pm0.03$, entanglement of formation $E_F=0.73\pm0.04$, and logarithmic negativity $E_N=0.867\pm0.014$. They are all significantly above their classical limit of zero and close to their maximum of 1 for a two-qubit state \cite{plenio:2007}. From the measured density matrix, we can also infer a Bell signal of $S=2.47\pm0.04$, consistent with the results given above. \begin{figure} \caption{\label{fig:coherence} \label{fig:coherence} \end{figure} With the atom trapping lifetimes in this experiment of $\approx4.1$ seconds, the separation between the entangling and mapping pulses is currently limited only by the coherence time of the atomic qubit. This coherence is determined by measuring density matrices as a function of time between the two pulses, $\Delta t$. We obtain an entanglement lifetime of $\tau_e=5.7\pm0.2~\mu$s (Fig.~\ref{fig:coherence}), limited by phase noise between the two atomic Zeeman states. This phase sensitivity is evident by the decreasing off-diagonal coherence terms in the density matrix while the diagonal components remain nearly constant (Figures~\ref{fig:dm} and~\ref{fig:coherence}). This can also be seen from the decay of the fidelity to 50\%, and not 25\% as would be the case for a completely mixed state. Our measured entanglement lifetime is comparable to lifetimes observed in atomic ensemble experiments \cite{jenkins:2007, kimble:2008, yuan:2008, simon:2007}. In our experiment, the limiting mechanisms are magnetic field instabilities ($\sim20$~mG) and a variable differential ac-Stark shift of the atomic superposition states. The differential Stark shifts are due to motion of the atom together with intensity fluctuations of the cavity stabilization laser ($\sim10\%$) and an uncompensated circular polarization component of the trapping lasers ($\sim2\%$). With an active stabilization of the magnetic field and optimized laser parameters, this lifetime may be increased to over $100~\mu$s \cite{rosenfeld:2008}. Additionally, by converting the atomic qubit to clock states, the coherence time of a single atom trapped in a standing-wave can be increased to hundreds of ms \cite{kuhr:2003}. In addition to the effects mentioned above, the fidelity of the entanglement is further limited by imperfect polarization control in the optical path to the detection setup, dark counts of the photon detectors, and multiple scattered photons during the second pulse. Indeed, by limiting our photon detection window to include only the first 40\% of the second photon pulse, we observe an increased fidelity of $F=0.932\pm0.014$, albeit with a reduced coincidence rate. However, with the incorporation of a fast excitation scheme \cite{bochmann:2008} and improved cooling and cavity parameters, many of these effects can be dramatically reduced. Finally, the most important aspect for scalable atom-photon networking is the overall success probability. Here, with a single atom in the cavity, the probability of detecting a two-photon event is about $\approx2.4\times10^{-4}$, as the probability of emitting a single photon into the cavity mode during the entangling pulse and the probability of further emitting a photon during the mapping pulse are each $\approx8.6\%$, and the detection efficiency for a single photon present inside the cavity is $\sim0.2$. This results in $\approx370$ produced entangled two-photon pairs per second, of which $\approx12$ are detected. These values are largely limited by the non-optimal atom-cavity coupling due to atomic motion, optical pumping inefficiencies, and photon loss mechanisms, including a 50\% cavity absorption loss due to a mirror defect. While an atom trapped within an optical cavity can in principle generate photons with unit efficiency \cite{cirac:1997, kimble:2008}, these results still compare well to free-space single atom entanglement experiments with detection probabilities for two subsequent single photons $<5\times10^{-7}$ \cite{matsukevich:2008, rosenfeld:2008}. Our entanglement scheme may also be extended to many-photon \cite{schon:2005} and many-atom entanglement protocols \cite{feng:2003, duan:2003, browne:2003}, as well as schemes for quantum teleportation, quantum repeaters \cite{bose:1999}, and a loophole-free Bell inequality violation. Finally, with the recent completion of a second, independent trapped-atom-cavity system in our group \cite{bochmann:2008}, the demonstration of highly efficient remote-atom entanglement should be possible in the near future. \begin{acknowledgments} The authors thank N. Kiesel and A. Ourjoumtsev for useful discussions. This work was partially supported by the Deutsche Forschungsgemeinschaft (Research Unit 635, Cluster of Excellence MAP) and the European Union (IST project SCALA). D. L. M. acknowledges support from the Alexander von Humboldt Foundation. \end{acknowledgments} \begin{thebibliography}{29} \expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi \expandafter\ifx\csname bibnamefont\endcsname\relax \def\bibnamefont#1{#1}\fi \expandafter\ifx\csname bibfnamefont\endcsname\relax \def\bibfnamefont#1{#1}\fi \expandafter\ifx\csname citenamefont\endcsname\relax \def\citenamefont#1{#1}\fi \expandafter\ifx\csname url\endcsname\relax \def\url#1{\texttt{#1}}\fi \expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi \providecommand{\bibinfo}[2]{#2} \providecommand{\eprint}[2][]{\url{#2}} \bibitem[{\citenamefont{Monroe}(2002)}]{monroe:2002} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Monroe}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{416}}, \bibinfo{pages}{238} (\bibinfo{year}{2002}). \bibitem[{\citenamefont{Cirac et~al.}(1997)\citenamefont{Cirac, Zoller, Kimble, and Mabuchi}}]{cirac:1997} \bibinfo{author}{\bibfnamefont{J.~I.} \bibnamefont{Cirac}}, \bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Zoller}}, \bibinfo{author}{\bibfnamefont{H.~J.} \bibnamefont{Kimble}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Mabuchi}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{78}}, \bibinfo{pages}{3221} (\bibinfo{year}{1997}). \bibitem[{\citenamefont{Kimble}(2008)}]{kimble:2008} \bibinfo{author}{\bibfnamefont{H.~J.} \bibnamefont{Kimble}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{453}}, \bibinfo{pages}{1023} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Blinov et~al.}(2004)\citenamefont{Blinov, Moehring, Duan, and Monroe}}]{blinov:2004} \bibinfo{author}{\bibfnamefont{B.~B.} \bibnamefont{Blinov}}, \bibinfo{author}{\bibfnamefont{D.~L.} \bibnamefont{Moehring}}, \bibinfo{author}{\bibfnamefont{L.-M.} \bibnamefont{Duan}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Monroe}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{428}}, \bibinfo{pages}{153} (\bibinfo{year}{2004}). \bibitem[{\citenamefont{Volz et~al.}(2006)\citenamefont{Volz, Weber, Schlenk, Rosenfeld, Vrana, Saucke, Kurtsiefer, and Weinfurter}}]{volz:2006} \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Volz}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Weber}}, \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Schlenk}}, \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Rosenfeld}}, \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Vrana}}, \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Saucke}}, \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Kurtsiefer}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Weinfurter}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{96}}, \bibinfo{pages}{030404} (\bibinfo{year}{2006}). \bibitem[{\citenamefont{Matsukevich et~al.}(2008)\citenamefont{Matsukevich, Maunz, Moehring, Olmschenk, and Monroe}}]{matsukevich:2008} \bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Matsukevich}}, \bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Maunz}}, \bibinfo{author}{\bibfnamefont{D.~L.} \bibnamefont{Moehring}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Olmschenk}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Monroe}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{100}}, \bibinfo{pages}{150404} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Rosenfeld et~al.}(2008)\citenamefont{Rosenfeld, Hocke, Henkel, Krug, Volz, Weber, and Weinfurter}}]{rosenfeld:2008} \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Rosenfeld}}, \bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Hocke}}, \bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Henkel}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Krug}}, \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Volz}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Weber}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Weinfurter}}, \bibinfo{journal}{arXiv:0808.3538v1 [quant-ph]} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Campbell and Benjamin}(2008)}]{campbell:2007} \bibinfo{author}{\bibfnamefont{E.~T.} \bibnamefont{Campbell}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{S.~C.} \bibnamefont{Benjamin}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{101}}, \bibinfo{eid}{130502} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Wilk et~al.}(2007)\citenamefont{Wilk, Webster, Kuhn, and Rempe}}]{wilk:2007b} \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Wilk}}, \bibinfo{author}{\bibfnamefont{S.~C.} \bibnamefont{Webster}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuhn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Science} \textbf{\bibinfo{volume}{317}}, \bibinfo{pages}{488} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Hijlkema et~al.}(2007)\citenamefont{Hijlkema, Weber, Specht, Webster, Kuhn, and Rempe}}]{hijlkema:2007} \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Hijlkema}}, \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Weber}}, \bibinfo{author}{\bibfnamefont{H.~P.} \bibnamefont{Specht}}, \bibinfo{author}{\bibfnamefont{S.~C.} \bibnamefont{Webster}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuhn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Nature Physics} \textbf{\bibinfo{volume}{3}}, \bibinfo{pages}{253} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Fortier et~al.}(2007)\citenamefont{Fortier, Kim, Gibbons, Ahmadi, and Chapman}}]{fortier:2007} \bibinfo{author}{\bibfnamefont{K.~M.} \bibnamefont{Fortier}}, \bibinfo{author}{\bibfnamefont{S.~Y.} \bibnamefont{Kim}}, \bibinfo{author}{\bibfnamefont{M.~J.} \bibnamefont{Gibbons}}, \bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Ahmadi}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{M.~S.} \bibnamefont{Chapman}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{98}}, \bibinfo{eid}{233601} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Khudaverdyan et~al.}(2008)\citenamefont{Khudaverdyan, Alt, Dotsenko, Kampschulte, Lenhard, Rauschenbeutel, Reick, Sch\"{o}rner, Widera, and Meschede}}]{khudaverdyan:2008} \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Khudaverdyan}}, \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Alt}}, \bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Dotsenko}}, \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Kampschulte}}, \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Lenhard}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Rauschenbeutel}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Reick}}, \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Sch\"{o}rner}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Widera}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Meschede}}, \bibinfo{journal}{New Journal of Physics} \textbf{\bibinfo{volume}{10}}, \bibinfo{pages}{073023} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Bell}(1964)}]{bell:1964} \bibinfo{author}{\bibfnamefont{J.~S.} \bibnamefont{Bell}}, \bibinfo{journal}{Physics (Long Island City, N.Y.)} \textbf{\bibinfo{volume}{1}}, \bibinfo{pages}{195} (\bibinfo{year}{1964}). \bibitem[{\citenamefont{Nu{\ss}mann et~al.}(2005)\citenamefont{Nu{\ss}mann, Murr, Hijlkema, Weber, Kuhn, and Rempe}}]{nussmann:2005b} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Nu{\ss}mann}}, \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Murr}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Hijlkema}}, \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Weber}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuhn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Nature Physics} \textbf{\bibinfo{volume}{1}}, \bibinfo{pages}{122} (\bibinfo{year}{2005}). \bibitem[{\citenamefont{Murr et~al.}(2006)\citenamefont{Murr, Nu{\ss}mann, Puppe, Hijlkema, Weber, Webster, Kuhn, and Rempe}}]{murr:2006} \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Murr}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Nu{\ss}mann}}, \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Puppe}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Hijlkema}}, \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Weber}}, \bibinfo{author}{\bibfnamefont{S.~C.} \bibnamefont{Webster}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuhn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{73}}, \bibinfo{eid}{063415} (\bibinfo{year}{2006}). \bibitem[{\citenamefont{Kuhr et~al.}(2003)\citenamefont{Kuhr, Alt, Schrader, Dotsenko, Miroshnychenko, Rosenfeld, Khudaverdyan, Gomer, Rauschenbeutel, and Meschede}}]{kuhr:2003} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Kuhr}}, \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Alt}}, \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Schrader}}, \bibinfo{author}{\bibfnamefont{I.}~\bibnamefont{Dotsenko}}, \bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Miroshnychenko}}, \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Rosenfeld}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Khudaverdyan}}, \bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Gomer}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Rauschenbeutel}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Meschede}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{91}}, \bibinfo{pages}{213002} (\bibinfo{year}{2003}). \bibitem{footnote:opticalpump} Note that optical pumping to the wrong state will not reduce the measured entanglement fidelity as it cannot result in a two-photon entanglement event \cite{wilk:2007b}. \bibitem[{\citenamefont{Hennrich et~al.}(2000)\citenamefont{Hennrich, Legero, Kuhn, and Rempe}}]{hennrich:2000} \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Hennrich}}, \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Legero}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuhn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{85}}, \bibinfo{pages}{4872} (\bibinfo{year}{2000}). \bibitem[{\citenamefont{Clauser et~al.}(1969)\citenamefont{Clauser, Horne, Shimony, and Holt}}]{clauser:1969} \bibinfo{author}{\bibfnamefont{J.~F.} \bibnamefont{Clauser}}, \bibinfo{author}{\bibfnamefont{M.~A.} \bibnamefont{Horne}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Shimony}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{R.~A.} \bibnamefont{Holt}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{23}}, \bibinfo{pages}{880} (\bibinfo{year}{1969}). \bibitem[{\citenamefont{Jenkins et~al.}(2007)\citenamefont{Jenkins, Matsukevich, Chanelière, Lan, Kennedy, and Kuzmich}}]{jenkins:2007} \bibinfo{author}{\bibfnamefont{S.~D.} \bibnamefont{Jenkins}}, \bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Matsukevich}}, \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Chanelière}}, \bibinfo{author}{\bibfnamefont{S.-Y.} \bibnamefont{Lan}}, \bibinfo{author}{\bibfnamefont{T.~A.~B.} \bibnamefont{Kennedy}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Kuzmich}}, \bibinfo{journal}{J. Opt. Soc. Am. B} \textbf{\bibinfo{volume}{24}}, \bibinfo{pages}{316} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Yuan et~al.}(2008)\citenamefont{Yuan, Chen, Zhao, Chen, Schmiedmayer, and Pan}}]{yuan:2008} \bibinfo{author}{\bibfnamefont{Z.-S.} \bibnamefont{Yuan}}, \bibinfo{author}{\bibfnamefont{Y.-A.} \bibnamefont{Chen}}, \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Zhao}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Chen}}, \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Schmiedmayer}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.-W.} \bibnamefont{Pan}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{454}}, \bibinfo{pages}{1098} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Altepeter et~al.}(2005)\citenamefont{Altepeter, Jeffrey, and Kwiat}}]{altepeter:2005} \bibinfo{author}{\bibfnamefont{J.~B.} \bibnamefont{Altepeter}}, \bibinfo{author}{\bibfnamefont{E.~R.} \bibnamefont{Jeffrey}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{P.~G.} \bibnamefont{Kwiat}}, \bibinfo{journal}{Adv. At. Mol. Opt. Phys.} \textbf{\bibinfo{volume}{52}}, \bibinfo{pages}{105} (\bibinfo{year}{2005}). \bibitem[{\citenamefont{Plenio and Virmani}(2007)}]{plenio:2007} \bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Plenio}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Virmani}}, \bibinfo{journal}{Quant. Inf. Comp.} \textbf{\bibinfo{volume}{7}}, \bibinfo{pages}{1} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Simon et~al.}(2007)\citenamefont{Simon, Tanji, Ghosh, and Vuleti\'{c}}}]{simon:2007} \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Simon}}, \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Tanji}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Ghosh}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vuleti\'{c}}}, \bibinfo{journal}{Nature Physics} \textbf{\bibinfo{volume}{3}}, \bibinfo{pages}{1745} (\bibinfo{year}{2007}). \bibitem[{\citenamefont{Bochmann et~al.}(2008)\citenamefont{Bochmann, M{\"{u}}cke, Langfahl-Klabes, Erbel, Weber, Specht, Moehring, and Rempe}}]{bochmann:2008} \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Bochmann}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{M{\"{u}}cke}}, \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Langfahl-Klabes}}, \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Erbel}}, \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Weber}}, \bibinfo{author}{\bibfnamefont{H.~P.} \bibnamefont{Specht}}, \bibinfo{author}{\bibfnamefont{D.~L.} \bibnamefont{Moehring}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{(Phys. Rev. Lett. in press) arXiv:0806.2600v1 [quant-ph]} (\bibinfo{year}{2008}). \bibitem[{\citenamefont{Sch{\"{o}}n et~al.}(2005)\citenamefont{Sch{\"{o}}n, Solano, Verstraete, Cirac, and Wolf}}]{schon:2005} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Sch{\"{o}}n}}, \bibinfo{author}{\bibfnamefont{E.}~\bibnamefont{Solano}}, \bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Verstraete}}, \bibinfo{author}{\bibfnamefont{J.~I.} \bibnamefont{Cirac}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{M.~M.} \bibnamefont{Wolf}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{95}}, \bibinfo{eid}{110503} (\bibinfo{year}{2005}). \bibitem[{\citenamefont{Feng et~al.}(2003)\citenamefont{Feng, Zhang, Li, Gong, and Xu}}]{feng:2003} \bibinfo{author}{\bibfnamefont{X.-L.} \bibnamefont{Feng}}, \bibinfo{author}{\bibfnamefont{Z.-M.} \bibnamefont{Zhang}}, \bibinfo{author}{\bibfnamefont{X.-D.} \bibnamefont{Li}}, \bibinfo{author}{\bibfnamefont{S.-Q.} \bibnamefont{Gong}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{Z.-Z.} \bibnamefont{Xu}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{90}}, \bibinfo{pages}{217902} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Duan and Kimble}(2003)}]{duan:2003} \bibinfo{author}{\bibfnamefont{L.-M.} \bibnamefont{Duan}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.~J.}~\bibnamefont{Kimble}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{90}}, \bibinfo{pages}{253601} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Browne et~al.}(2003)\citenamefont{Browne, Plenio, and Huelga}}]{browne:2003} \bibinfo{author}{\bibfnamefont{D.~E.} \bibnamefont{Browne}}, \bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Plenio}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{S.~F.} \bibnamefont{Huelga}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{91}}, \bibinfo{pages}{067901} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Bose et~al.}(1999)\citenamefont{Bose, Knight, Plenio, and Vedral}}]{bose:1999} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Bose}}, \bibinfo{author}{\bibfnamefont{P.~L.} \bibnamefont{Knight}}, \bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Plenio}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vedral}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{83}}, \bibinfo{pages}{5158} (\bibinfo{year}{1999}). \end{thebibliography} \end{document}
\begin{document} \title[The uniqueness of the canonical connection]{A note on the uniqueness of the canonical connection of a naturally reductive space} \author{Carlos Olmos} \author{Silvio Reggiani} \address{Facultad de Matem\'atica, Astronom\'ia y F\'\i sica, Universidad Nacional de C\'ordoba, Ciudad Universitaria, 5000 C\'ordoba, Argentina} \email{[email protected] \qquad [email protected]} \date{\today} \thanks {2010 {\it Mathematics Subject Classification}. Primary 53C30; Secondary 53C35} \thanks {{\it Key words and phrases}. Naturally reductive, canonical connection, skew-symmetric torsion, isometry group} \thanks{Supported by Universidad Nacional de C\'ordoba and CONICET. Partially supported by ANCyT, Secyt-UNC and CIEM} \begin{abstract} We extend the result in J.\ Reine Angew.\ Math.\ \textbf{664}, 29--53, to the non-compact case. Namely, we prove that the canonical connection on a simply connected and irreducible naturally reductive space is unique, provided the space is not a sphere, a compact Lie group with a bi-invariant metric or its symmetric dual. In particular, the canonical connection is unique for the hyperbolic space when the dimension is different from three. We also prove that the canonical connection on the sphere is unique for the symmetric presentation. Finally, we compute the full isometry group (connected component) of a compact and locally irreducible naturally reductive space. \end{abstract} \maketitle \section{Introduction and preliminaries} \'Elie Cartan, in the 1920s, asked for linear connections, on a given Riemannian space, that adapt to the geometry in a more suitable way than the Levi-Civita connection \cite{cartan-1924}. He proposed to study the so-called connections with skew-torsion. Such connections are characterized by the property of having parallel metric tensor and the same geodesics as the Levi-Civita connection. Spaces with skew-torsion have an increasing interest in recent years because of their applications to theoretical physics (see \cite{agricola-2006}). A distinguished family of Riemannian spaces with skew-torsion are the naturally reductive spaces. In fact, the canonical connection $\nabla^c$ of a naturally reductive space $M=G/H$ provides a metric connection and has skew-torsion $T = -2(\nabla - \nabla^c)$, where $\nabla$ is the Levi-Civita connection. If $M$ is a symmetric space, then the Levi-Civita connection is a canonical connection. In a naturally reductive space one has that $\nabla^c R = 0$ and $\nabla^c T = 0$, where $R$ is the Riemannian curvature tensor. More generally, any $G$-invariant tensor on $M$ must be parallel with respect to the canonical connection. In \cite{olmos-reggiani-2012} it was proved that the canonical connection of a (locally irreducible) compact naturally reductive space is unique, provided the space is different from the following symmetric spaces: spheres, real projective spaces and compact Lie groups with a bi-invariant metric. The proof given in \cite{olmos-reggiani-2012} uses strongly the compactness assumption (besides the so-called skew-torsion holonomy theorem). Namely, it makes use of a decomposition theorem for compact homogeneous spaces which is false in the non-compact case (however, such a decomposition theorem was crucial in the proof the skew-torsion holonomy theorem). The purpose of this note is to prove that the canonical connection is unique also for simply connected (irreducible) non-compact naturally reductive spaces, with the only exceptions of dual symmetric spaces of compact Lie groups. In particular, the canonical connection is unique for any real hyperbolic space $M = H^n$ with $n \neq 3$ (in contrast with the compact case where many spheres are excluded). Observe that the main result of this article, stated precisely in Theorem \ref{unica}, also has a local version, since a canonical connection on a naturally reductive space lifts to the universal cover. In order to prove Theorem~\ref{unica}, we use some auxiliary facts that we want to mention. Namely, that the real hyperbolic space $H^n$ admits a unique naturally reductive presentation (the symmetric pair presentation). This allows to prove that the canonical connection on $H^n$ is unique for all $n \neq 3$. To do this, we use that the canonical connection on the sphere $S^n$, with $n \neq 3$, is unique if we fix the symmetric presentation $S^n = \SO(n + 1)/\SO(n)$, since from Hodge theory there are no non-trivial parallel $3$-forms (see Remark~\ref{hodge}). Finally, in Section~\ref{sec:3}, we explicitly compute the isometry group of a compact and locally irreducible naturally reductive space. This extends the result in \cite{reggiani-2010} for normal homogeneous spaces (and known results by Onishchik \cite{onishchik-1992} and Shankar \cite{shankar-2001} on isometry groups of homogeneous spaces). \subsection{Skew-torsion holonomy systems} In a previous work \cite{olmos-reggiani-2012} we deal with the concept of skew-torsion holonomy systems, which are a variation of the so-called holonomy systems introduced by J.\ Simons in \cite{simons-1962}. Skew-torsion holonomy systems arise in a natural way and in a geometric context, by considering the difference tensor between two metric connections which have the same geodesics as the Levi-Civita connection. We say that a triple $[\bbv, \Theta, G]$ is a \emph{skew-torsion holonomy system} provided $\bbv$ is an Euclidean space, $G$ is a connected Lie subgroup of $\SO(\bbv)$, and $\Theta$ is a totally skew-symmetric $1$-form on $\bbv$ which takes values in the Lie algebra $\gg$ of $G$ (i.e., $(x, y, z) \mapsto \langle\Theta_xy, z\rangle$ is an algebraic $3$-form on $\bbv$). We say that $[\bbv, \Theta, G]$ is \emph{irreducible} if $G$ acts irreducibly on $\bbv$, \emph{transitive} if $G$ is transitive on the sphere of $\bbv$, and \emph{symmetric} if $g_*(\Theta) = \Theta$ for all $g \in G$, where $g_*(\Theta)_x = g \circ \Theta_{g^{-1}(x)} \circ g^{-1}$. The main result on skew-torsion holonomy systems is analogous to Simons holonomy theorem for classical holonomy systems. Such a result is actually stronger because transitive cases cannot occur others than the full orthogonal group. \begin{teo}[Skew-torsion Holonomy Theorem \cite{nagy-2007,olmos-reggiani-2012}] Let $[\bbv, \Theta, G]$, $\Theta \neq 0$, be an irreducible skew-torsion holonomy system with $G \neq \SO(\bbv)$. Then $[\bbv, \Theta, G]$ is symmetric and non-transitive. Moreover, \begin{enumerate} \item $(\bbv, [\cdot,\cdot])$ is an orthogonal simple Lie algebra, of rank at least $2$, with respect to the bracket $[x,y] = \Theta_xy$; \item $G = \Ad(H)$, where $H$ is the connected Lie group associated to the Lie algebra $(\bbv, [\cdot,\cdot])$; \item $\Theta$ is unique, up to a scalar multiple. \end{enumerate} \end{teo} \section{The uniqueness of the canonical connection} In this section we prove a uniqueness result for canonical connections on naturally reductive spaces, compact or not. \begin{teo}\label{unica} Let $M$ be a simply connected and irreducible naturally reductive space. Assume that $M$ is not (globally) isometric to a sphere, nor to a Lie group with a bi-invariant metric or its symmetric dual. Then, the canonical connection on $M$ is unique. \end{teo} Observe that, in particular, the above theorem says that the canonical connection is unique for the real hyperbolic space $H^n$ for all $n \neq 3$. When $n = 3$, $H^3$ is the symmetric dual of $S^3 = \SU(2)$, and in this case $H^3$ admits a line of canonical connections (see Remark~\ref{hodge}). Before giving the proof of Theorem \ref{unica} we fix some notation and we state some basic results we will need. Let $M = G/G_p$, $p \in M$, be a naturally reductive space. That is, assume that $M$ carries a $G$-invariant metric and the Lie algebra of $G$ admits a decomposition $\gg = \gg_p \oplus \gm$, where $\gg_p = \Lie(G_p)$ and $\gm$ is an $\Ad(G_p)$-invariant subspace such that the geodesics through $p$ are given by $$\Exp(tX) \cdot p, \qquad X \in \gm.$$ That is to say, Riemannian geodesics coincide with $\nabla^c$-geodesics, where $\nabla^c$ is the canonical connection associated with the above mentioned reductive decomposition. Suppose that $\nabla^{c'}$ is another canonical connection on $M$ (associated with another naturally reductive presentation or another reductive decomposition). It follows from \cite[Section 6]{olmos-reggiani-2012} that $$\Theta = (\nabla^{c'} - \nabla^c)_p,$$ that is the difference between $\nabla^{c'}$ and $\nabla^c$, evaluated at $p$, is a totally skew-symmetric $1$-form on $T_pM$ which takes values in the full isotropy subalgebra $\gh = \Lie(\Iso(M)_p)$. Hence, $[T_pM, \Theta, H]$ is a skew-torsion holonomy system, where $H = (\Iso(M)_p)^o$ is the connected component of the full isotropy subgroup at $p$. Let $\tilde\gh$ be the linear span of $\{h_*(\Theta)_v: h \in H, v \in T_pM\}$. We have that $\tilde\gh$ is an ideal of $\gh$. Let $\tilde H$ be the connected Lie subgroup of $H$ with Lie algebra $\tilde \gh$. From \cite[Section 2]{olmos-reggiani-2012} there exist decompositions \begin{equation}\label{eq:space} T_pM = \bbv_0 \oplus \bbv_1 \oplus \cdots \oplus \bbv_k \qquad \text{(orthogonal sum)} \end{equation} and \begin{equation}\label{eq:group} \tilde H = H_1 \times \cdots \times H_k \qquad \text{(almost direct product)} \end{equation} such that $H_i$ acts trivially on $\bbv_j$ if $i \neq j$ (in particular, $\bbv_0$ is the set of fixed vectors of $\tilde H$) and $H_i$ acts irreducibly on $\bbv_i$ with $\cc_i(\gh_i) = \{0\}$, where $$\cc_i(\gh_i) := \{B \in \so(\bbv_i): [B, \gh_i] = 0\}.$$ Moreover, we have that $H$ splits as $$H = H_0 \times \tilde H = H_0 \times H_1 \times \cdots \times H_k,$$ where $H_0$ acts only on $\bbv_0$ (and it could be arbitrary). In fact, any skew-torsion holonomy system can be decomposed in this way (see \cite{olmos-reggiani-2012} and also \cite{agricola-friedrich-2004, nagy-2007}). In order to prove Theorem \ref{unica}, we will make use of the following basic facts. \begin{lema}[see \cite{olmos-reggiani-2012}]\label{5.1-OR} Let $M = G/G_p$ be a Riemannian homogeneous manifold, let $H$ be a normal subgroup of $G_p$ and let $\bbw$ be the subspace of $T_pM$ defined by $$\bbw = \{v \in T_pM: dh(v) = v \text{ for all $h \in H$}\}.$$ Then $\bbw$ is $G_p$-invariant. Moreover, if $\cd$ is the $G$-invariant distribution on $M$ defined by $\cd(p) = \bbw$, then $\cd$ is integrable with totally geodesic leaves (or, equivalently, $\cd$ is autoparallel). \end{lema} \begin{lema}\label{killing} Let $M = G/G_p$ be a naturally reductive space. If $X$ is a $G$-invariant field on $M$, then $X$ is a Killing field. \end{lema} \begin{proof} Let $X$ be a $G$-invariant field on $M$ and let $D = \nabla - \nabla^c$ be the difference tensor between the Levi-Civita connection and a canonical connection on $M$ associated with a reductive decomposition $\gg = \gg_p \oplus \gm$. Since $X$ is $G$-invariant, then $X$ is $\nabla^c$-parallel (since $\nabla^c$ is $G$-invariant and the $\nabla^c$-parallel transport along the geodesic $\Exp(tZ) \cdot p$, $Z \in \gm$, is given by $\Exp(tZ)_*$). So, $\nabla X = DX$ is skew-symmetric and this implies that $X$ is a Killing field. \end{proof} \begin{proof}[Proof of Theorem \ref{unica}] We keep the notation from the previous paragraphs. We have decompositions $T_pM = \bbv_0 \oplus \bbv_1 \oplus \cdots \oplus \bbv_k$ (orthogonal) as in \ref{eq:space} and $H = H_0 \times \tilde H = H_0 \times H_1 \times \cdots \times H_k$ as in \ref{eq:group}. Let $\bbw_0$ be the set of fixed vectors of $H$ in $T_pM$, via the isotropy representation. So, $\bbw_0 \oplus \bbv_1$ is the set of fixed vectors of $H^1 = H_0 \times H_2 \times \cdots \times H_k$ and hence, by Lemma \ref{5.1-OR}, it induces the $G$-invariant autoparallel distribution $\cd^1$ defined by $\cd^1(p) = \bbw_0 \oplus \bbv_1$. Let $\cd_0$ the $G$-invariant autoparallel distribution defined by $\cd_0(p) = \bbw_0$. The key factor in the proof is to show that $\cd_0$ is parallel along $\cd^1$, and then make use of the skew-torsion holonomy theorem. Since $\cd_0$ is $G$-invariant we only have to prove that $\cd_0$ is parallel at $p$ (along $\cd^1$). Let $S^1(p)$ be the maximal connected integral manifold of $\cd^1$ which contains $p$. That is, $S^1(p)$ is the set of fixed points of $H^1$ on $M$ (connected component). It is not difficult to see that $S^1(p)$ is an extrinsic homogeneous submanifold under the action of the group $$G^1(p) = \{g \in G: g(S^1(p)) = S^1(p)\} = \{g \in G: g(p) \in S^1(p)\}$$ with effective isotropy $H_1$. Recall that the metric on $S^1(p)$ is naturally reductive, since $S^1(p)$ is a totally geodesic submanifold of $M$. Let $X \in \bbw_0$ and let $\tilde X$ be the $G^1(p)$-invariant field on $S^1(p)$ such that $\tilde X(p) = X$, or equivalently, the restriction to $S^1(p)$ of the $G$-invariant field on $M$ with initial condition $X$. It follows from Lemma \ref{killing} that $\tilde X$ is a Killing field and hence, its derivative $\nabla \tilde X$ is skew-symmetric. Observe that if $h \in H_1$ and $v \in \bbw_0 \oplus \bbv_1 \simeq T_pS^1(p)$, then $$dh(\nabla_v\tilde X) = \nabla_{dh(v)}h_*(\tilde X) = \nabla_{dh(v)}\tilde X.$$ Then $(\nabla \tilde X)_p$ commutes with $H_1$ (via the isotropy representation) and so, it leaves $\bbw_0$ and $\bbv_1$ invariant. Since $\cc_1(\gh_1) = \{0\}$ we have that $(\nabla\tilde X)_p|_{\bbv_1} \equiv 0$, and therefore $\nabla_v\tilde X \in \bbw_0$ for all $v \in \bbw_0 \oplus \bbv_1$. This implies that $\cd_0$ is parallel along $\cd^1$. Then, $\cd_1$ is parallel along $\cd^1$ and hence, $\cd_1$ is autoparallel on $M$, since $\cd^1$ is autoparallel. On the other side, we have that $\cd_1^{\bot}$ is also an autoparallel distribution on $M$, since $\cd_1^\bot(p) = \bbv_0 \oplus \bbv_2 \oplus \cdots \oplus \bbv_k$ is the set of fixed vectors of $H_1$, and therefore $M$ splits off, unless these distributions are trivial. Finally, we reach two possibilities: \begin{enumerate} \item $T_pM = \bbv_0$ and $H = (\Iso(M)_p)^o = H_0$, or \item $T_pM = \bbv_1$ and $H = (\Iso(M)_p)^o = H_1$. \end{enumerate} In the first case, we have that the group $\tilde H$ spanned by $\nabla^{c'} - \nabla^c$ is trivial, and then we conclude that $\nabla^c = \nabla^{c'}$. In the second case, we have two possibilities again. Firstly, if $H_1$ is transitive on the unit sphere of $T_pM$, then, by using the skew-torsion holonomy theorem, we have that $H_1 = (\Iso(M)_p)^o = \SO(T_pM)$. So, it is standard to see that $M = S^n$ or $M = H^n$. See Proposition \ref{hiperbolico} and Remark \ref{hodge} below to exclude the hyperbolic case when $n \neq 3$. On the other hand, if $H_1$ is not transitive on the sphere, the skew-torsion holonomy theorem says that $H_1$ acts on $T_pM$ as the adjoint representation of a simple and compact Lie group. If $M$ is compact, it follows from the classification of strongly isotropy irreducible spaces, given by J.\ Wolf in \cite{wolf-1968} (see \cite[Appendix]{olmos-reggiani-2012} for a conceptual proof), that $M$ is a Lie group with a bi-invariant metric. If $M$ is non-compact, then $M$ turns out a symmetric space (since non-compact isotropy irreducible spaces must be symmetric \cite{besse-1987, wang-ziller-1991}). If $\nabla^c \neq \nabla^{c'}$, we have, by taking the symmetric dual, that $M^*$ is isometric to a Lie group with a bi-invariant metric. In fact, it is not difficult to see that there is a one-one correspondence between canonical connections on $M$ and canonical connections on $M^*$ (see Remark \ref{dual}). This completes the proof of Theorem \ref{unica}. \end{proof} \begin{prop}\label{hiperbolico} The real hyperbolic space $H^n$ admits a unique naturally reductive presentation, the symmetric pair decomposition $H^n = \SO(n + 1, 1)^o/\SO(n)$. \end{prop} \begin{proof} Let $G$ be a connected Lie subgroup of $\Iso(H^n)$ which acts transitively on $H^n$ and such that $H^n = G/H$ is a naturally reductive space. If $G$ is semisimple, it is standard to show that $G = \Iso(H^n)^o = \SO(n + 1, 1)^o$. In fact, let $K$ be a maximal compact subgroup of $G$. So, $K$ has a fixed point, say $p$. We may assume that $H$ is the isotropy group at $p$. So $H = K$, since $K$ is maximal. Hence, $(G,H)$ is presentation of $H^n$ as an effective Riemannian symmetric pair, and therefore $G = \SO(n + 1, 1)^o$ (otherwise, $H^n$ would have two different presentations as an effective Riemannian symmetric pair). If $G$ is not semisimple, then $G$ has a nontrivial normal abelian Lie subgroup $A$. It is a well-known fact that, either $A$ fixes a unique point at infinity or $A$ translates a unique geodesic. If $A$ translates a unique geodesic $\gamma(t)$, then $G$ leaves $\gamma$ invariant, since $A$ is a normal subgroup of $G$, and so $G$ cannot be transitive, which is a contradiction. So, let $q_\infty$ be the unique point at infinity which is fixed by $A$, and let $\cf$ be the foliation on $H^n$ by parallel horospheres centered at $q_\infty$. So, we have that $A$ leaves $\cf$ invariant, and hence $G$ does. Let $p \in H^n$ and let $\cf_p$ be the horosphere through $p$. Denote by $\tilde G$ the connected component of the subgroup of $G$ which leaves $\cf_p$ invariant. Then $\tilde G$ is transitive on $\cf_p$. Hence, since $H^n$ is naturally reductive with respect to the decomposition $G/H$, each horosphere must be totally geodesic, a contradiction. \end{proof} \begin{nota}\label{hodge} Let us consider the sphere $S^n = \SO(n+1)/\SO(n)$. Then, for all $n \neq 3$, the Levi-Civita connection is the unique canonical connection on $S^n$ associated with this naturally reductive decomposition. In fact, if $\nabla^c$ is another canonical connection on $S^n$ then, the difference tensor $D = \nabla - \nabla^c$ induces a $\SO(n+1)$-invariant $3$-form $\omega(x,y,z) = \langle D_xy, z \rangle$. Since $\omega$ is invariant, $\omega$ is a harmonic $3$-form on $S^n$. From Hodge theory, $\omega$ represents a nontrivial cohomology class of order $3$ of the sphere $S^n$. This yields a contradiction, unless $n = 3$. As a consequence, it follows from Proposition \ref{hiperbolico} and the next remark that the real hyperbolic space $H^n$ admits a unique canonical connection for all $n \neq 3$. If $n=3$, $H^3$ is the dual symmetric space of the compact Lie group $S^3 \simeq \SU(2)$, and therefore it admits exactly a line of canonical connections (see \cite[Remark~6.1]{olmos-reggiani-2012}). \end{nota} \begin{nota}\label{dual} Let $M = G/K$ be a symmetric space with associated Cartan decomposition $\gg = \gk \oplus \gp$. Then, there is a one-one correspondence between canonical connections on $M$ and canonical connections on the dual $M^* = G^*/K$. In fact, assume that $M$ admits a canonical connection $\nabla^c$ associated with a reductive decomposition $\gg = \gk \oplus \gm$. Let $\gg^* = \gk \oplus i\,\gp$ be the Lie algebra of $G^*$, regarded as a subspace of the complexification $\gg^\bbc$ of $\gg$. It is clear that $\gm^*$ (the subspace of $\gg^*$ induced by $\gm$, via the vector spaces isomorphism $\gg^* \simeq \gg$) is and $\Ad^*(K)$-invariant subspace such that the geodesics through $p = eK$ are given by 1-parameter subgroups with initial velocities in $\gm^*$. So, $\nabla^c$ corresponds to a unique canonical connection on $M^*$. \end{nota} \section{The isometry group of compact naturally reductive spaces} \label{sec:3} Let $M = G/H$ be a compact and locally irreducible naturally reductive space and let $\nabla^c$ be the canonical connection associated with the reductive decomposition $\gg = \gh \oplus \gm$. Assume that $M \neq S^n$, $M \neq \bbr P^n$. Then, from \cite[Theorem~1.1]{olmos-reggiani-2012} we have that $\Iso(M)^o = \Aff(M, \nabla^c)^o$, where $\Aff(M, \nabla^c)^o$ is the connected component of the affine group of $\nabla^c$ (i.e., the subgroup of diffeomorphisms of $M$ which preserve~$\nabla^c$). By making use of Lemma \ref{killing} and some arguments in \cite{reggiani-2010} one can obtain the connected component of the isometry group of $M$. Actually, it is possible to simplify such arguments. In fact, let $\Tr(M, \nabla^c)$ be the group of transvections of $\nabla^c$, that is, the connected Lie subgroup of $\Aff(M, \nabla^c)^o$ with Lie algebra $\tr(M, \nabla^c) = [\gm, \gm] + \gm$ (not a direct sum, in general). Recall that $\Tr(M, \nabla^c)$ is a normal subgroup of $\Aff(M, \nabla^c)^o$. As it is done in \cite{reggiani-2010} for normal homogeneous spaces, we have that $G = \Tr(M, \nabla^c)$ and thus, $G$ is a normal subgroup of $\Aff(M, \nabla^c)^o$. (In fact, $\tr(M, \nabla^c)$ is an ideal of $\gg$, then if $\tr(M, \nabla^c) \neq \gg$, since $M$ is compact, one can take a complementary ideal of the transvection algebra in $\gg$, which must be contained in the isotropy algebra. This is a contradiction, since we assume that $G$ acts effectively on $M$.) Now, since $G$ is a normal subgroup of $\Iso(M)^o = \Aff(M, \nabla^c)^o$, we can write $$\iso(M) = \gg \oplus \gb,$$ where $\gb$ is a complementary ideal of $\gg$ in $\iso(M)$ (recall that $M$ is compact, and hence $\Iso(M)^o$ is also compact). Note that elements of $\gb$ correspond to $G$-invariant fields on $M$, which are Killing fields by Lemma \ref{killing} (but not any $G$-invariant field belongs to $\gb$, in principle). We can summarize this fact as follows. \begin{teo}\label{isometrias} Let $M = G/H$ be a compact naturally reductive space. Assume that $M$ is locally irreducible and that $M$ is not (globally) isometric to the sphere $S^n$ nor to the real projective space $\bbr P^n$. Then the connected component of the isometry group of $M$ is given by $$\Iso(M)^o = G_{\mathrm{ss}} \times K \qquad \text{(almost direct product),}$$ where $G_{\mathrm{ss}}$ is the semisimple part of $G$ and $K$ is the connected subgroup of $\Iso(M)$ whose Lie algebra consists of the $G$-invariant fields. In particular, $\Iso(M)$ is semisimple if and only if $K$ is semisimple. \end{teo} \begin{nota} In the notation of Theorem \ref{isometrias}, $K$ can be identified with (the connected component of) the set of fixed points of the isotropy group $H$, acting simply and transitively by right multiplication. Moreover, just by coping the argument in \cite[Theorem 1.4]{reggiani-2010} we get that the set of fixed points of the full isotropy group $(\Iso(M)^o)_p$ is a torus. \end{nota} \end{document}
\betaegin{document} \triangleitle[The Projective Class Rings of a family of pointed Hopf algebras] {The Projective Class Rings of a family of pointed Hopf algebras of Rank two} \alphauthor{Hui-Xiang Chen} \alphaddress{School of Mathematical Science, Yangzhou University, Yangzhou 225002, China} \varepsilonmail{[email protected]} \alphauthor{Hassan Suleman Esmael Mohammed} \alphaddress{School of Mathematical Science, Yangzhou University, Yangzhou 225002, China} \varepsilonmail{[email protected]} \alphauthor{Weijun Lin} \alphaddress{School of Mathematical Science, Yangzhou University, Yangzhou 225002, China} \varepsilonmail{[email protected]} \alphauthor{Hua Sun} \alphaddress{School of Mathematical Science, Yangzhou University, Yangzhou 225002, China} \varepsilonmail{[email protected]} \trianglehanks{2010 {\it Mathematics Subject Classification}. 16G60, 16T05} \keywords{Green ring, indecomposable module, Taft algebra} \betaegin{abstract} In this paper, we compute the projective class rings of the tensor product ${\rm mod}Athcal{H}_n(q)=A_n(q)\omegat A_n(q^{-1})$ of Taft algebras $A_n(q)$ and $A_n(q^{-1})$, and its cocycle deformations $H_n(0,q)$ and $H_n(1,q)$, where $n>2$ is a positive integer and $q$ is a primitive $n$-th root of unity. It is shown that the projective class rings $r_p({\rm mod}Athcal{H}_n(q))$, $r_p(H_n(0,q))$ and $r_p(H_n(1,q))$ are commutative rings generated by three elements, three elements and two elements subject to some relations, respectively. It turns out that even ${\rm mod}Athcal{H}_n(q)$, $H_n(0,q)$ and $H_n(1,q)$ are cocycle twist-equivalent to each other, they are of different representation types: wild, wild and tame, respectively. \varepsilonnd{abstract} {\rm mod}Aketitle \sigmaection{\betaf Introduction}\sigmaelabel{1} Let $H$ be a Hopf algebra over a field ${\rm mod}Athbb K$. Doi \cite{Doi} introduced a cocycle twisted Hopf algebra $H^{\sigma}$ for a convolution invertible 2-cocycle $\sigma$ on $H$. It is shown in \cite{DoiTak, Maj92} that the Drinfeld double $D(H)$ is a cocycle twisting of the tensor product Hopf algebra $H^{*cop}\omegat H$. The 2-cocycle twisting is extensively employed in various researches. For instance, Andruskiewitsch et al. \cite{AndrFanGarVen} considered the twists of Nichols algebras associated to racks and cocycles. Guillot, Kassel and Masuoka \cite{GuiKasMas} obtained some examples by twisting comodule algebras by 2-cocycles. It is well known that the monoidal category ${\rm mod}Athcal{M}^H$ of right $H$-comodules is equivalent to the monoidal category ${\rm mod}Athcal{M}^{H^{\sigma}}$ of right $H^{\sigma}$-comodules. On the other hand, we know that the braided monoidal category $_H{{\rm mod}Athcal YD}^H$ of Yetter-Drinfeld $H$-modules is the center of the monoidal category ${\rm mod}Athcal{M}^H$ for any Hopf algebra $H$ (e.g., see \cite{Ka}). Hence the monoidal equivalence from ${\rm mod}Athcal{M}^H$ to ${\rm mod}Athcal{M}^{H^{\sigma}}$ gives rise to a braided monoidal equivalence from $_H{{\rm mod}Athcal YD}^H$ to $_{H^{\sigma}}{{\rm mod}Athcal YD}^{H^{\sigma}}$. Chen and Zhang \cite{ChenZhang} described a braided monoidal equivalent functor from $_H{{\rm mod}Athcal YD}^H$ to $_{H^{\sigma}}{{\rm mod}Athcal YD}^{H^{\sigma}}$. Benkart et al. \cite{BenkPerWith} used a result of Majid and Oeckl \cite{MajOec} to give a category equivalence between Yetter-Drinfeld modules for a finite-dimensional pointed Hopf algebra $H$ and those for its cocycle twisting $H^{\sigma}$. However, the Yetter-Drinfeld module category $_H{{\rm mod}Athcal YD}^H$ is also the center of the monoidal category $_H{\rm mod}Athcal M$ of left $H$-modules. This gives rise to a natural question: Is there any relations between the two monoidal categories $_H{\rm mod}Athcal M$ and $_{H^{\sigma}}{\rm mod}Athcal M$ of left modules over two cocycle twist-equivalent Hopf algebras $H$ and $H^{\sigma}$? or how to detect the two monoidal categories $_H{\rm mod}Athcal M$ and $_{H^{\sigma}}{\rm mod}Athcal M$? This article seeks to address this question through investigating the representation types and projective class rings of a family of pointed Hopf algebras of rank 2, the tensor products of two Taft algebras, and their two cocycle deformations. In the investigation of the monoidal category of modules over a Hopf algebra $H$, the decomposition problem of tensor products of indecomposables is of most importance and has received enormous attentions. Our approach is to explore the representation type of $H$ and the projective class ring of $H$, which is a subring of the representation ring (or Green ring) of $H$. Originally, the concept of the Green ring $r(H)$ stems from the modular representations of finite groups (see \cite{Green}, etc.) Since then, there have been plenty of works on the Green rings. For finite-dimensional group algebras, one can refer to \cite{Archer, BenCar, BenPar, BrJoh, HTW}. For Hopf algebras and quantum groups, one can see \cite{ChVOZh, Chin, Cib, LiZhang, Wakui, With}. The $n^4$-dimensional Hopf algebra $H_n(p, q)$ was introduced in \cite{Ch1}, where $n\gammaeqslant2$ is an integer, $q\in{\rm mod}Athbb K$ is a primitive $n$-th root of unity and $p\in{\rm mod}Athbb K$. If $p\neq 0$, then $H_n(p, q)$ is isomorphic to the Drinfeld double $D(A_n(q^{-1}))$ of the Taft algebra $A_n(q^{-1})$. In particular, we have $H_n(p, q)\cong H_n(1, q)\cong D(A_n(q^{-1}))$ for any $p\neq 0$. Moreover, $H_n(p,q)$ is a cocycle deformation of $A_n(q)\omegat A_n(q^{-1})$. For the details, the reader is directed to \cite{Ch1, Ch2}. When $n=2$ ($q=-1$), $A_2(-1)$ is exactly the Sweedler 4-dimensional Hopf algebra $H_4$. Chen studied the finite dimensional representations of $H_n(1,q)$ in \cite{Ch2, Ch4}, and the Green ring $r(D(H_4))$ in \cite{Ch5}. Using a different method, Li and Hu \cite{LiHu} also studied the finite dimensional representations of the Drinfeld double $D(H_4)$, the Green ring $r(D(H_4))$ and the projective class ring $p(D(H_4))$. They also studied two Hopf algebras which are cocycle deformations of $D(H_4)$. By \cite{Ch4}, one knows that $D(H_4)$ is of tame representation type. By \cite{LiHu}, the two cocycle deformations of $D(H_4)$ are also of tame representation type. In this paper, we study the three cocycle twist-equivalent Hopf algebras ${\rm mod}Athcal{H}_n(q)=A_n(q)\omegat A_n(q^{-1})$, $H_n(0,q)$ and $H_n(1,q)$ by investigating their representation types and projective class rings, where $n\gammaeqslant3$. In \sigmaeref{2}, we introduce the Taft algebras $A_n(q)$, the tensor product ${\rm mod}Athcal{H}_n(q)=A_n(q)\omegat A_n(q^{-1})$ and the Hopf algebras $H_n(p,q)$. In \sigmaeref{3}, we first show that ${\rm mod}Athcal{H}_n(q)$ is of wild representation type. With a complete set of orthogonal primitive idempotents, we classify the simple modules and indecomposable projective modules over ${\rm mod}Athcal{H}_n(q)$, and decompose the tensor products of these modules. This leads the description of the projective class ring $r_p({\rm mod}Athcal{H}_n(q))$, the Jacobson radical $J(R_p({\rm mod}Athcal{H}_n(q)))$ of the projective class algebra $R_p({\rm mod}Athcal{H}_n(q))$ and the quotient algebra $R_p({\rm mod}Athcal{H}_n(q))/J(R_p({\rm mod}Athcal{H}_n(q)))$. In \sigmaeref{4}, we first show that $H_n(0,q)$ is a symmetric algebra of wild representation type. Then we give a complete set of orthogonal primitive idempotents with the Gabriel quiver, and classify the simple modules and indecomposable projective modules over $H_n(0,q)$. We also describe the projective class ring $r_p(H_n(0,q))$, the Jacobson radical $J(R_p(H_n(0,q)))$ of the projective class algebra $R_p(H_n(0,q))$ and the quotient algebra $R_p(H_n(0,q))/J(R_p(H_n(0,q)))$. In \sigmaeref{5}, using the decompositions of tensor products of indecomposables over $H_n(1,q)$ given in \cite{ChenHassenSun}, we describe the structure of the projective class ring $r_p(H_n(1,q))$. It is interesting to notice that even the Hopf algebras ${\rm mod}Athcal{H}_n(q)$, $H_n(0,q)$ and $H_n(1,q)$ are cocycle twist-equivalent to each other, they own the different number of blocks with 1, $n$ and $\frac{n(n+1)}{2}$, respectively (see \cite[Corollary 2.7]{Ch4} for $H_n(1,q)$). ${\rm mod}Athcal{H}_n(q)$ and $H_n(0,q)$ are basic algebras of wild representation type, but $H_n(1,q)$ is not basic and is of tame representation type. $H_n(0,q)$ and $H_n(1,q)$ are symmetric algebras, but ${\rm mod}Athcal{H}_n(q)$ is not. \sigmaection{\betaf Preliminaries}\sigmaelabel{2} Throughout, we work over an algebraically closed field ${\rm mod}Athbb K$. Unless otherwise stated, all algebras, Hopf algebras and modules are defined over ${\rm mod}Athbb K$; all modules are left modules and finite dimensional; all maps are ${\rm mod}Athbb K$-linear; dim and $\omegatimes$ stand for ${\rm dim}_{{\rm mod}Athbb K}$ and $\omegat_{{\rm mod}Athbb K}$, respectively. Given an algebra $A$, $A$-mod denotes the category of finite-dimensional $A$-modules. For any $A$-module $M$ and nonnegative integer $l$, let $lM$ denote the direct sum of $l$ copies of $M$. For the theory of Hopf algebras and quantum groups, we refer to \cite{Ka, Maj, Mon, Sw}. Let ${\rm mod}Athbb Z$ denote all integers, and ${{\rm mod}Athbb Z}_n={{\rm mod}Athbb Z}/n{{\rm mod}Athbb Z}$. Let $H$ be a Hopf algebra. The Green ring $r(H)$ of $H$ can be defined as follows. $r(H)$ is the abelian group generated by the isomorphism classes $[M]$ of $M$ in $H$-mod modulo the relations $[M\omegaplus V]=[M]+[V]$. The multiplication of $r(H)$ is given by the tensor product of $H$-modules, that is, $[M][V]=[M\omegat V]$. Then $r(H)$ is an associative ring. The projective class ring $r_p(H)$ of $H$ is the subring of $r(H)$ generated by projective modules and simple modules (see \cite{Cib99}). Then the Green algebra $R(H)$ and projective algebra $R_p(H)$ are associative ${\rm mod}Athbb K$-algebras defined by $R(H):={\rm mod}Athbb{K}\omegat_{{\rm mod}Athbb Z}r(H)$ and $R_p(H):={\rm mod}Athbb{K}\omegat_{{\rm mod}Athbb Z}r_p(H)$, respectively. Note that $r(H)$ is a free abelian group with a ${\rm mod}Athbb Z$-basis $\{[V]|V\in{\rm ind}(H)\}$, where ${\rm ind}(H)$ denotes the category of finite dimensional indecomposable $H$-modules. The Grothendieck ring $G_0(H)$ of $H$ is defined similarly. $G_0(H)$ is the abelian group generated by the isomorphism classes $[M]$ of $M$ in $H$-mod modulo the relations $[M]=[N]+[V]$ for any short exact sequence $0\rightarrow N\rightarrow M\rightarrow V\rightarrow 0$ in $H$-mod. The multiplication of $G_0(H)$ is given by the tensor product of $H$-modules, that is, $[M][V]=[M\omegat V]$. Then $G_0(H)$ is also an associative ring. Moreover, there is a canonical ring epimorphism from $r(H)$ onto $G_0(H)$. Let $n\gammaeqslant2$ be an integer and $q\in{\rm mod}Athbb K$ a primitive $n$-th root of unity. Then the $n^2$-dimensional Taft Hopf algebra $A_n(q)$ is defined as follows (see \cite{Ta}): as an algebra, $A_n(q)$ is generated by $g$ and $x$ with relations $$g^n=1,\ x^n=0, \ xg=qgx.$$ The coalgebra structure and antipode are given by $$\betaegin{array}{c} \triangleriangle (g)=g\omegatimes g,\ \triangleriangle (x)=x\omegatimes g+1\omegatimes x,\ \varepsilon (g)=1,\ \varepsilon (x)=0,\\ S(g)=g^{-1}=g^{n-1},\ S(x)=-xg^{-1}=-q^{-1}g^{n-1}x.\\ \varepsilonnd{array}$$ Since $q^{-1}$ is also a primitive $n$-th root of unity, one can define another Taft Hopf algebra $A_n(q^{-1})$, which is generated, as an algebra, by $g_1$ and $x_1$ with relations $g_1^n=1$, $x_1^n=0$ and $x_1g_1=q^{-1}g_1x_1$. The coalgebra structure and antipode are given similarly to $A_n(q)$. Then $A_n(q^{-1})\cong A_n(q)^{\rm op}$ as Hopf algebras. The first author Chen introduced a Hopf algebra $H_n(p,q)$ in \cite{Ch1}, where $p, q\in{\rm mod}Athbb K$ and $q$ is a primitive $n$-th root of unity. It was shown there that $H_n(p,q)$ is isomorphic to a cocycle deformation of the tensor product $A_n(q)\omegat A_n(q^{-1})$. The tensor product $A_n(q)\omegat A_n(q^{-1})$ can be described as follows. Let ${\rm mod}Athcal{H}_n(q)$ be the algebra generated by $a, b, c$ and $d$ subject to the relations: $$\betaegin{array}{lllll} ba=qab,& db=bd, & ca=ac,& dc=qcd,& cb=bc,\\ a^n=0, & b^n=1, &c^n=1,& d^n=0, & da=ad. \varepsilonnd{array}$$ Then ${\rm mod}Athcal{H}_n(q)$ is a Hopf algebra with the coalgebra structure and antipode given by $$\betaegin{array}{lll} \triangle(a)=a\omegatimes b+1\omegatimes a, & \varepsilon(a)=0, & S(a)=-ab^{-1}=-ab^{n-1},\\ \triangle(b)=b\omegatimes b, & \varepsilon(b)=1, & S(b)=b^{-1}=b^{n-1},\\ \triangle(c)=c\omegatimes c,& \varepsilon(c)=1, & S(c)=c^{-1}=c^{n-1},\\ \triangle(d)=d\omegatimes c+1\omegatimes d,& \varepsilon(d)=0, & S(d)=-dc^{-1}=-dc^{n-1}. \varepsilonnd{array}$$ It is straightforward to verify that there is a Hopf algebra isomorphism from ${\rm mod}Athcal{H}_n(q)$ to $A_n(q)\omegat A_n(q^{-1})$ via $a{\rm mod}Apsto 1\omegat x_1$, $b{\rm mod}Apsto 1\omegat g_1$, $c{\rm mod}Apsto g\omegat 1$ and $d{\rm mod}Apsto x\omegat 1$. Obviously, ${\rm mod}Athcal{H}_n(q)$ is $n^4$-dimensional with a ${\rm mod}Athbb K$-basis $\{a^ib^jc^ld^k|0\lambdaeqslanti, j, l, k\lambdaeqslantn-1\}$. Let $p\in{\rm mod}Athbb K$. Then one can define another $n^4$-dimensional Hopf algebra $H_n(p, q)$, which is generated as an algebra by $a, b, c$ and $d$ subject to the relations: $$\betaegin{array}{lllll} ba=qab,& db=qbd, & ca=qac,& dc=qcd,& bc=cb,\\ a^n=0, & b^n=1, &c^n=1,& d^n=0, & da-qad=p(1-bc). \varepsilonnd{array}$$ The coalgebra structure and antipode are defined in the same way as ${\rm mod}Athcal{H}_n(q)$ before. $H_n(p, q)$ has a ${\rm mod}Athbb K$-basis $\{a^ib^jc^ld^k|0\lambdaeqslanti, j, l, k\lambdaeqslantn-1\}$. When $p\neq 0$, $H_n(p, q)\cong H_n(1, q)\cong D(A_n(q^{-1}))$ (see \cite{Ch1, Ch2}). If $n=2$ ($q=-1$), then $H_2(1, -1)\cong D(H_4)$, and $H_2(0,-1)$ is exactly the Hopf algebra $\omegal{{\rm mod}Athcal A}$ in \cite{LiHu}. By \cite[Lemma 3.2]{Ch1}, there is an invertible skew-pairing $\triangleau_p: A_n(q)\omegat A_n(q^{-1})\rightarrow {\rm mod}Athbb K$ given by $\triangleau_p(g^ix^j, x_1^kg_1^l)=\delta_{jk}p^jq^{il}(j)!_q$, $0\lambdaeqslanti,j,k,l<n$. Hence one can form a double crossproduct $A_n(q)\betaowtie_{\triangleau_p}A_n(q^{-1})$. Moreover, $A_n(q)\betaowtie_{\triangleau_p}A_n(q^{-1})$ is isomorphic to $H_n(p,q)$ as a Hopf algebra (see \cite[Theorem 3.3]{Ch1}). By \cite{DoiTak}, $\triangleau_p$ induces an invertible 2-cocycle $[\triangleau_p]$ on $A_n(q)\omegat A_n(q^{-1})$ such that $A_n(q)\betaowtie_{\triangleau_p}A_n(q^{-1})=(A_n(q)\omegat A_n(q^{-1}))^{[\triangleau_p]}$. Thus, there is a corresponding invertible 2-cocycle $\sigma_p$ on ${\rm mod}Athcal{H}_n(q)$ such that ${\rm mod}Athcal{H}_n(q)^{\sigma_p}\cong H_n(p,q)$ as Hopf algebras. In particular, we have ${\rm mod}Athcal{H}_n(q)^{\sigma_0}\cong H_n(0,q)$ and ${\rm mod}Athcal{H}_n(q)^{\sigma_1}\cong H_n(1,q)$. In general, if $\sigma$ is a convolution invertible 2-cocycle on a Hopf algebra $H$, then $\sigma^{-1}$ is an invertible 2-cocycle on $H^{\sigma}$ and $(H^{\sigma})^{\sigma^{-1}}=H$ (see \cite[Lemma 1.2]{chen99}). More generally, if $\sigma$ is an invertible 2-cocycle on $H$ and $\triangleau$ is an invertible 2-cocycle on $H^{\sigma}$, then $\triangleau*\sigma$ is an invertible 2-cocycle on $H$ and $H^{\triangleau*\sigma}=(H^{\sigma})^{\triangleau}$ (see \cite[Lemma 1.4]{chen99}). Thus, the Hopf algebras ${\rm mod}Athcal{H}_n(q)$, $H_n(0,q)$ and $H_n(1,q)$ are cocycle twist-equivalent to each other. Throughout the following, fix an integer $n>2$ and let $q\in{\rm mod}Athbb K$ be a primitive $n$-th root of unity. For any $m\in{\rm mod}Athbb Z$, denote still by $m$ the image of $m$ under the canonical projection ${\rm mod}Athbb{Z}\rightarrow{\rm mod}Athbb{Z}_n={\rm mod}Athbb{Z}/n{\rm mod}Athbb{Z}$. \sigmaection{\betaf The Projective Class Ring of ${\rm mod}Athcal{H}_n(q)$}\sigmaelabel{3} In this section, we investigate the representations and the projective class ring of ${\rm mod}Athcal{H}_n(q)$, or equivalently, of $A_n(q)\omegat A_n(q^{-1})$. Let $A$ be the subalgebra of ${\rm mod}Athcal {H}_n(q)$ generated by $a$ and $d$. Then $A$ is isomorphic to the quotient algebra ${\rm mod}Athbb{K}[x, y]/(x^n, y^n)$ of the polynomial algebra ${\rm mod}Athbb{K}[x, y]$ modulo the ideal $(x^n,y^n)$ generated by $x^n$ and $y^n$. Let $G=G({\rm mod}Athcal{H}_n(q))$ be the group of group-like elements of ${\rm mod}Athcal{H}_n(q)$. Then $G=\{b^ic^j|i,j\in{\rm mod}Athbb{Z}_n\}\cong {\rm mod}Athbb{Z}_n\triangleimes{\rm mod}Athbb{Z}_n$, and ${\rm mod}Athbb{K}G={\rm mod}Athcal{H}_n(q)_0$, the coradical of ${\rm mod}Athcal{H}_n(q)$. Clearly, $A$ is a left ${\rm mod}Athbb{K}G$-module algebra with the action given by $b\cdot a=qa$, $b\cdot d=d$, $c\cdot a=a$ and $c\cdot d=q^{-1}d$. Hence one can form a smash product algebra $A\#{\rm mod}Athbb{K}G$. It is easy to see that ${\rm mod}Athcal{H}_n(q)$ is isomorphic to $A\#{\rm mod}Athbb{K}G$ as an algebra. Since $n\gammaeqslant3$, it follows from \cite[p.295(3.4)]{Ringel} that $A$ is of wild representation type. Since char$({\rm mod}Athbb{K})\nmid |G|$, ${\rm mod}Athbb{K}G$ is a semisimple and cosemisimple Hopf algebra. It follows from \cite[Theorem 4.5]{Liu} that $A\#{\rm mod}Athbb{K}G$ is of wild representation type. As a consequence, we obtain the following result. \betaegin{proposition}\lambdaabel{3.1} ${\rm mod}Athcal{H}_n(q)$ is of wild representation type. \varepsilonnd{proposition} ${\rm mod}Athcal{H}_n(q)$ has $n^2$ orthogonal primitive idempotents $$\betaegin{array}{c} e_{i,j}=\frac{1}{n^2}\sigmaum_{k,l\in{\rm mod}Athbb{Z}_n}q^{-ik-jl}b^kc^l =\frac{1}{n^2}\sigmaum_{k,l=0}^{n-1}q^{-ik-jl}b^kc^l,\ \ i,j\in{\rm mod}Athbb{Z}_n.\\ \varepsilonnd{array}$$ \betaegin{lemma}\lambdaabel{3.2} Let $i,j\in{\rm mod}Athbb{Z}_n$. Then $$be_{i,j}=q^ie_{i,j}, \ ce_{i,j}=q^je_{i,j},\ ae_{i,j}=e_{i+1,j}a, \ de_{i,j}=e_{i,j-1}d.$$ \varepsilonnd{lemma} \betaegin{proof} It follows from a straightforward verification. \varepsilonnd{proof} For $i,j\in{\rm mod}Athbb{Z}_n$, let $S_{i,j}$ be the one dimensional ${\rm mod}Athcal{H}_n(q)$-module defined by $bv=q^iv$, $cv=q^jv$ and $av=dv=0$, $v\in S_{i,j}$. Let $P_{i,j}=P(S_{i,j})$ be the projective cover of $S_{i,j}$. Let $J={\rm rad}({\rm mod}Athcal{H}_n(q))$ be the Jacobson radical of ${\rm mod}Athcal{H}_n(q)$. \betaegin{lemma}\lambdaabel{3.3} The simple modules $S_{i,j}$, $i,j\in{\rm mod}Athbb{Z}_n$, exhaust all simple modules of ${\rm mod}Athcal{H}_n(q)$, and consequently, the projective modules $P_{i,j}$, $i,j\in{\rm mod}Athbb{Z}_n$, exhaust all indecomposable projective modules of ${\rm mod}Athcal{H}_n(q)$. Moreover, $P_{i,j}\cong{\rm mod}Athcal{H}_n(q)e_{i,j}$ for all $i,j\in{\rm mod}Athbb{Z}_n$. \varepsilonnd{lemma} \betaegin{proof} Obviously, $a{\rm mod}Athcal{H}_n(q)={\rm mod}Athcal{H}_n(q)a$ and $d{\rm mod}Athcal{H}_n(q)={\rm mod}Athcal{H}_n(q)d$. Since $a^n=0$ and $d^n=0$, ${\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d$ is a nilpotent ideal of ${\rm mod}Athcal{H}_n(q)$. Hence ${\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d\sigmaubseteq J$. On the other hand, it is easy to see that the quotient algebra ${\rm mod}Athcal{H}_n(q)/({\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d)$ is isomorphic to the group algebra ${\rm mod}Athbb{K}G$, where $G=G({\rm mod}Athcal{H}_n(q))=\{b^ic^j|0\lambdaeqslant i, j\lambdaeqslant n-1\}$, the group of all group-like elements of ${\rm mod}Athcal{H}_n(q)$. Since ${\rm mod}Athbb{K}G$ is semisimple, $J\sigmaubseteq {\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d$. Thus, $J={\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d$. Therefore, the simple modules $S_{i,j}$ exhaust all simple modules of ${\rm mod}Athcal{H}_n(q)$, and the projective modules $P_{i,j}$ exhaust all indecomposable projective modules of ${\rm mod}Athcal{H}_n(q)$, $i,j\in{\rm mod}Athbb{Z}_n$. The last statement of the lemma follows from Lemma \ref{3.2}. \varepsilonnd{proof} \betaegin{corollary}\lambdaabel{3.4} ${\rm mod}Athcal{H}_n(q)$ is a basic algebra. Moreover, $J$ is a Hopf ideal of ${\rm mod}Athcal{H}_n(q)$, and the Loewy length of ${\rm mod}Athcal{H}_n(q)$ is $2n-1$. \varepsilonnd{corollary} \betaegin{proof} It follows from Lemma \ref{3.3} that ${\rm mod}Athcal{H}_n(q)$ is a basic algebra. By $J={\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d$, one can easily check that $J$ is a coideal and $S(J)\sigmaubseteq J$. Hence $J$ is a Hopf ideal. By $a^{n-1}\neq 0$ and $d^{n-1}\neq 0$, one gets $({\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d)^{2n-2}\neq0$. By $a^n=d^n=0$, one gets $({\rm mod}Athcal{H}_n(q)a+{\rm mod}Athcal{H}_n(q)d)^{2n-1}=0$. It follows that the Loewy length of ${\rm mod}Athcal{H}_n(q)$ is $2n-1$. \varepsilonnd{proof} In the rest of this section, we regard that $P_{i,j}={\rm mod}Athcal{H}_n(q)e_{i,j}$ for all $i,j\in{\rm mod}Athbb{Z}_n$. \betaegin{corollary}\lambdaabel{3.5} $P_{i,j}$ is $n^2$-dimensional with a ${\rm mod}Athbb K$-basis $\{a^kd^le_{i,j}|0\lambdaeqslant k, l\lambdaeqslant n-1\}$, $i,j\in{\rm mod}Athbb{Z}_n$. Consequently, ${\rm mod}Athcal{H}_n(q)$ is an indecomposable algebra. \varepsilonnd{corollary} \betaegin{proof} By Lemma \ref{3.2}, $P_{i,j}={\rm span}\{a^kd^le_{i,j}|0\lambdaeqslant k, l\lambdaeqslant n-1\}$, and hence dim$P_{i,j}\lambdaeqslant n^2$. Now it follows from ${\rm mod}Athcal{H}_n(q)=\omegaplus_{i,j\in{\rm mod}Athbb{Z}_n}{\rm mod}Athcal{H}_n(q)e_{i,j}$ and dim${\rm mod}Athcal{H}_n(q)=n^4$ that $P_{i,j}$ is $n^2$-dimensional over ${\rm mod}Athbb{K}$ with a basis $\{a^kd^le_{i,j}|0\lambdaeqslant k, l\lambdaeqslant n-1\}$. Then by Lemmas \ref{3.2}-\ref{3.3}, one knows that every simple module is a simple factor of $P_{i,j}$ with the multiplicity one. Consequently, ${\rm mod}Athcal{H}_n(q)$ is an indecomposable algebra. \varepsilonnd{proof} Given $M\in{\rm mod}Athcal{H}_n(q)$-mod, for any $\alpha\in{\rm mod}Athbb{K}$ and $u,v\in M$, we use $u\xrightarrow{\alpha}v$ (resp. $u\sigmatackrel{\alpha}{\deltaashrightarrow}v$) to represent $a\cdot u=\alpha v$ (resp. $d\cdot u=\alpha v$). Moreover, we omit the decoration of the arrow if $\alpha=1$. For $i, j\in{\rm mod}Athbb{Z}_n$, let $e_{i,j}^{k,l}=a^kd^le_{i,j}$ in $P_{i,j}$, $0\lambdaeqslantk,l\lambdaeqslantn-1$. Then the structure of $P_{i,j}$ can be described as follows: $$\betaegin{tikzpicture}[scale=1] \path (0,0) node(e) {$e_{i,j}^{0,0}$}; \path (-1,-1) node(a) {$e_{i,j}^{1,0}$} (1,-1) node(d) {$e_{i,j}^{0,1}$}; \path (-2,-1.95) node(a2) {$\vdots$} (0,-2) node(ad) {$e_{i,j}^{1,1}$} (2,-2) node(d2) {$\deltadots$}; \path (-1.5, -2.35) node(1) {$\deltadots$} (1.6, -2.4) node(1c) {$\alphadots$}; \path (-3,-3) node(an-2) {$e_{i,j}^{n-2,0}$} (-1.14,-3.23) node(2l) {$\cdot$} (-1,-3) node(2) {$\deltadots$} (-0.87,-3.01) node(2r) {$\cdot$} (0.85,-3.235) node(3l) {$\cdot$} (1,-3) node(3) {$\deltadots$} (1.15,-3) node(3r) {$\cdot$} (3,-3) node (dn-2) {$e_{i,j}^{0,n-2}$}; \path (-0.5,-3.5) node(3.5l) {$\deltadots$} (0.5,-3.5) node(3.5c) {$\alphadots$}; \path (-4,-4) node(an-1) {$e_{i,j}^{n-1,0}$} (-2, -4) node(an-2d) {$e_{i,j}^{n-2,1}$} (-0.15,-4.22) node(4l) {$\cdot$} (0,-4) node(4) {$\deltadots$} (0.15,-4) node(4r) {$\cdot$} (2,-4) node(adn-2) {$e_{i,j}^{1,n-2}$} (4, -4) node(dn-1) {$e_{i,j}^{0,n-1}$}; \path (-0.5,-4.45) node(4.5l) {$\alphadots$} (0.5,-4.5) node(4.5r) {$\deltadots$}; \path (-3,-5) node(an-1d) {$e_{i,j}^{n-1,1}$} (-1.15,-5.235) node(5l) {$\cdot$} (-1,-5) node(5) {$\deltadots$} (-0.85,-5) node(5r) {$\cdot$} (0.85,-5.235) node(6l) {$\cdot$} (1,-5) node(6) {$\deltadots$} (1.15, -5) node(6r) {$\cdot$} (3,-5) node (adn-1) {$e_{i,j}^{1,n-1}$}; \path (-1.65,-5.5) node(5.5l) {$\alphadots$} (1.5, -5.5) node(5.5r) {$\deltadots$}; \path (-2,-6) node(7) {$\deltadots$} (0,-6) node(an-2dn-2) {$e_{i,j}^{n-2,n-2}$} (2,-6) node (9) {$\alphadots$}; \path (-1,-7) node(an-1dn-2) {$e_{i,j}^{n-1,n-2}$\ \ } (1,-7) node(an-2dn-1) {\ \ $e_{i,j}^{n-2,n-1}$}; \path (0,-8) node(an-1dn-1) {$e_{i,j}^{n-1,n-1}$}; \deltaraw[->] (e) --(a); \deltaraw[->,dashed] (e) --(d); \deltaraw[->] (a) --(a2); \deltaraw[->,dashed] (a) --(ad); \deltaraw[->] (d) --(ad); \deltaraw[->,dashed] (d) --(d2); \deltaraw[->] (a2) --(an-2); \deltaraw[->] (ad) --(2); \deltaraw[->,dashed] (ad) --(3); \deltaraw[->,dashed] (d2) --(dn-2); \deltaraw[->] (an-2) --(an-1); \deltaraw[->,dashed] (an-2) --(an-2d); \deltaraw[->] (2) --(an-2d); \deltaraw[->,dashed] (3) --(adn-2); \deltaraw[->] (dn-2) --(adn-2); \deltaraw[->,dashed] (dn-2) --(dn-1); \deltaraw[->,dashed] (an-1) --(an-1d); \deltaraw[->] (an-2d) --(an-1d); \deltaraw[->,dashed] (an-2d) --(5); \deltaraw[->] (adn-2) --(6); \deltaraw[->,dashed] (adn-2) --(adn-1); \deltaraw[->] (dn-1) --(adn-1); \deltaraw[->,dashed] (an-1d) --(7); \deltaraw[->,dashed] (5) --(an-2dn-2); \deltaraw[->] (6) --(an-2dn-2); \deltaraw[->] (adn-1) --(9); \deltaraw[->,dashed] (7) --(an-1dn-2); \deltaraw[->] (an-2dn-2) --(an-1dn-2); \deltaraw[->,dashed] (an-2dn-2) --(an-2dn-1); \deltaraw[->] (9) --(an-2dn-1); \deltaraw[->,dashed] (an-1dn-2) --(an-1dn-1); \deltaraw[->] (an-2dn-1) --(an-1dn-1); \varepsilonnd{tikzpicture}$$ \betaegin{proposition}\lambdaabel{3.6} $S_{i,j}\omegat S_{k,l}\cong S_{i+k,j+l}$ and $S_{i,j}\omegat P_{k,l}\cong P_{k,l}\omegat S_{i,j}\cong P_{i+k,j+l}$ for all $i,j,k,l\in{\rm mod}Athbb{Z}_n$. \varepsilonnd{proposition} \betaegin{proof} The first isomorphism is obvious. Note that $S_{0,0}$ is the trivial ${\rm mod}Athcal{H}_n(q)$-module. Since $J$ is a Hopf ideal, it follows from \cite[Corollary 3.3]{Lo} and the first isomorphism that $P_{k,l}\omegat S_{i,j}\cong P_{0,0}\omegat S_{k,l}\omegat S_{i,j}\cong P_{0,0}\omegat S_{i+k,j+l}\cong P_{i+k,j+l}$. Similarly, one can show that $S_{i,j}\omegat P_{k,l}\cong P_{i+k, j+l}$, which also follows from the proof of \cite[Lemma 3.3]{Cib99}. \varepsilonnd{proof} \betaegin{proposition}\lambdaabel{3.7} Let $i,j,k,l\in{\rm mod}Athbb{Z}_n$. Then $P_{i,j}\omegat P_{k,l}\cong \omegaplus_{r,t\in{\rm mod}Athbb{Z}_n}P_{r,t}$. \varepsilonnd{proposition} \betaegin{proof} By Proposition \ref{3.6}, we only need to consider the case of $i=j=k=l=0$. For any short exact sequence $0\rightarrow N\rightarrow M\rightarrow L\rightarrow 0$ of modules, the exact sequence $0\rightarrow P_{0,0}\omegat N\rightarrow P_{0,0}\omegat M\rightarrow P_{0,0}\omegat L\rightarrow 0$ is always split since $P_{0,0}\omegat L$ is projective for any module $L$. By Corollary \ref{3.4} and the proof of Corollary \ref{3.5}, $[P_{0,0}]=\sigmaum_{r,t\in{\rm mod}Athbb{Z}_n}[S_{r,t}]$ in $G_0({\rm mod}Athcal{H}_n(q))$. Then it follows from Proposition \ref{3.6} that $P_{0,0}\omegat P_{0,0}\cong\omegaplus_{r,t\in{\rm mod}Athbb{Z}_n}P_{0,0}\omegat S_{r,t}\cong \omegaplus_{r,t\in{\rm mod}Athbb{Z}_n}P_{r,t}$, which is isomorphic to the regular module ${\rm mod}Athcal{H}_n(q)$. \varepsilonnd{proof} By Propositions \ref{3.6} and \ref{3.7}, the projective class ring $r_p({\rm mod}Athcal{H}_n(q))$ is a commutative ring generated by $[S_{1,0}]$, $[S_{0,1}]$ and $[P_{0,0}]$ subject to the relations $[S_{1,0}]^n=1$, $[S_{0,1}]^n=1$ and $[P_{0,0}]^2=\sigmaum_{i,j=0}^{n-1}[S_{1,0}]^i[S_{0,1}]^j[P_{0,0}]$. Hence we have the following proposition. \betaegin{theorem}\lambdaabel{3.8} $r_p({\rm mod}Athcal{H}_n(q))\cong {\rm mod}Athbb{Z}[x,y,z]/(x^n-1,y^n-1,z^2-\sigmaum_{i,j=0}^{n-1}x^iy^jz)$. \varepsilonnd{theorem} \betaegin{proof} By Propositions \ref{3.6} and \ref{3.7}, $r_p({\rm mod}Athcal{H}_n(q))$ is a commutative ring. Moreover, $r_p({\rm mod}Athcal{H}_n(q))$ is generated, as a ${\rm mod}Athbb Z$-algebra, by $[S_{1,0}]$, $[S_{0,1}]$ and $[P_{0,0}]$. Therefore, there exists a ring epimorphism $\phi: {\rm mod}Athbb{Z}[x,y,z]\rightarrow r_p({\rm mod}Athcal{H}_n(q))$ such that $\phi(x)=[S_{1,0}]$, $\phi(y)=[S_{0,1}]$ and $\phi(z)=[P_{0,0}]$. Let $I=(x^n-1,y^n-1,z^2-\sigmaum_{i,j=0}^{n-1}x^iy^jz)$ be the ideal of ${\rm mod}Athbb{Z}[x,y,z]$ generated by $x^n-1$, $y^n-1$ and $z^2-\sigmaum_{i,j=0}^{n-1}x^iy^jz$. Then it follows from Propositions \ref{3.6} and \ref{3.7} that $I\sigmaubseteq{\rm Ker}(\phi)$. Hence $\phi$ induces a ring epimorphism $\omegal{\phi}: {\rm mod}Athbb{Z}[x,y,z]/I\rightarrow r_p({\rm mod}Athcal{H}_n(q))$ such that $\omegal{\phi}\circ\pi=\phi$, where $\pi: {\rm mod}Athbb{Z}[x,y,z]\rightarrow {\rm mod}Athbb{Z}[x,y,z]/I$ is the canonical projection. Let $\omegal{u}=\pi(u)$ for any $u\in{\rm mod}Athbb{Z}[x,y,z]$. Then $\omegal{x}^n=1$, $\omegal{y}^n=1$ and $\omegal{z}^2=\sigmaum_{i,j=0}^{n-1}\omegal{x}^i\omegal{y}^j\omegal{z}$ in ${\rm mod}Athbb{Z}[x,y,z]/I$. Hence ${\rm mod}Athbb{Z}[x,y,z]/I$ is generated, as a ${\rm mod}Athbb Z$-module, by $\{\omegal{x}^i\omegal{y}^j, \omegal{x}^i\omegal{y}^j\omegal{z}|i,j\in{\rm mod}Athbb{Z}_n\}$. Since $r_p({\rm mod}Athcal{H}_n(q))$ is a free ${\rm mod}Athbb Z$-module with a ${\rm mod}Athbb Z$-basis $\{[S_{i,j}], [P_{i,j}]|i,j\in{\rm mod}Athbb{Z}_n\}$, one can define a ${\rm mod}Athbb Z$-module map $\psi: r_p({\rm mod}Athcal{H}_n(q))\rightarrow{\rm mod}Athbb{Z}[x,y,z]/I$ by $\psi([S_{i,j}])=\omegal{x}^i\omegal{y}^{j}$ and $\psi([P_{i,j}])=\omegal{x}^i\omegal{y}^{j}\omegal{z}$ for any $i,j\in{\rm mod}Athbb{Z}_n$. Now for any $i,j\in{\rm mod}Athbb{Z}_n$, we have $\psi(\omegal{\phi}(\omegal{x}^i\omegal{y}^j))=\psi(\omegal{\phi}(\omegal{x})^i\omegal{\phi}(\omegal{y})^j) =\psi([S_{1,0}]^i[S_{0,1}]^j)=\psi([S_{i,j}])=\omegal{x}^i\omegal{y}^j$ and $\psi(\omegal{\phi}(\omegal{x}^i\omegal{y}^j\omegal{z}))=\psi(\omegal{\phi}(\omegal{x})^i\omegal{\phi}(\omegal{y})^j\omegal{\phi}(\omegal{z})) =\psi([S_{1,0}]^i[S_{0,1}]^j[P_{0,0}])=\psi([P_{i,j}])=\omegal{x}^i\omegal{y}^j\omegal{z}$. This shows that $\omegal{\phi}$ is injective, and so $\omegal{\phi}$ is a ring isomorphism. \varepsilonnd{proof} Now we consider the projective class algebra $R_p({\rm mod}Athcal{H}_n(q))$. By Theorem \ref{3.8}, we have $$\betaegin{array}{c} R_p({\rm mod}Athcal{H}_n(q))\cong {\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-\sigmaum_{i,j=0}^{n-1}x^iy^jz).\\ \varepsilonnd{array}$$ Put $I=(x^n-1,y^n-1,z^2-\sigmaum_{i,j=0}^{n-1}x^iy^jz)$ and let $J( {\rm mod}Athbb{K}[x,y,z]/I)$ be the Jacobson radical of ${\rm mod}Athbb{K}[x,y,z]/I$. For any $u\in{\rm mod}Athbb{K}[x,y,z]$, let $\omegal{u}$ denote the image of $u$ under the canonical projection ${\rm mod}Athbb{K}[x,y,z]\rightarrow{\rm mod}Athbb{K}[x,y,z]/I$. Then by the proof of Proposition \ref{3.8}, ${\rm mod}Athbb{K}[x,y,z]/I$ is of dimension $2n^2$ with a ${\rm mod}Athbb K$-basis $\{\omegal{x}^i\omegal{y}^j, \omegal{x}^i\omegal{y}^j\omegal{z}|0\lambdaeqslanti,j\lambdaeqslantn-1\}$. From $\omegal{x}^n=1$, $\omegal{y}^n=1$ and $\omegal{z}^2=\sigmaum_{i,j=0}^{n-1}\omegal{x}^i\omegal{y}^j\omegal{z}$, one gets $(1-\omegal{x})\omegal{z}^2=(1-\omegal{y})\omegal{z}^2=0$, and so $((1-\omegal{x})\omegal{z})^2=((1-\omegal{y})\omegal{z})^2=0$. Consequently, the ideal $((1-\omegal{x})\omegal{z}, (1-\omegal{y})\omegal{z})$ of ${\rm mod}Athbb{K}[x,y,z]/I$ generated by $(1-\omegal{x})\omegal{z}$ and $(1-\omegal{y})\omegal{z}$ is contained in $J( {\rm mod}Athbb{K}[x,y,z]/I)$. Moreover, dim$(({\rm mod}Athbb{K}[x,y,z]/I)/((1-\omegal{x})\omegal{z}, (1-\omegal{y})\omegal{z})=n^2+1$ and $$\betaegin{array}{rl} &({\rm mod}Athbb{K}[x,y,z]/I)/((1-\omegal{x})\omegal{z}, (1-\omegal{y})\omegal{z})\\ \cong&{\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z, (1-y)z).\\ \varepsilonnd{array}$$ Let $\pi: {\rm mod}Athbb{K}[x,y,z]\rightarrow {\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z, (1-y)z)$ be the canonical projection. For any integers $k,l\gammaeqslant0$, let $f_{k,l}=\frac{1}{n^2}\sigmaum_{i,j=0}^{n-1}q^{ki+lj}x^iy^j$ in ${\rm mod}Athbb{K}[x,y,z]$. Then a straightforward verification shows that $$\betaegin{array}{c} \{\pi(f_{k,l}), \pi(f_{0,k}), \pi(f_{0,0}-\frac{1}{n^2}z), \pi(\frac{1}{n^2}z)|1\lambdaeqslantk\lambdaeqslantn-1, 0\lambdaeqslantl\lambdaeqslantn-1\}\\ \varepsilonnd{array}$$ is a set of orthogonal idempotents, and so it is a full set of orthogonal primitive idempotents in ${\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z, (1-y)z)$. Therefore, $${\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z, (1-y)z)\cong{\rm mod}Athbb{K}^{n^2+1}.$$ Thus, $J( {\rm mod}Athbb{K}[x,y,z]/I)\sigmaubseteq((1-\omegal{x})\omegal{z}, (1-\omegal{y})\omegal{z})$, and so $J( {\rm mod}Athbb{K}[x,y,z]/I)=((1-\omegal{x})\omegal{z}, (1-\omegal{y})\omegal{z})$. This shows the following proposition. \betaegin{proposition}\lambdaabel{3.9} Let $J(R_p({\rm mod}Athcal{H}_n(q)))$ be the Jacobson radical of $R_p({\rm mod}Athcal{H}_n(q))$. Then $J(R_p({\rm mod}Athcal{H}_n(q)))=((1-[S_{1,0}])[P_{0,0}], (1-[S_{0,1}])[P_{0,0}])$ and $$\betaegin{array}{rl} &R_p({\rm mod}Athcal{H}_n(q))/J(R_p({\rm mod}Athcal{H}_n(q)))\\ \cong&{\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z, (1-y)z) \cong{{\rm mod}Athbb K}^{n^2+1}.\\ \varepsilonnd{array}$$ \varepsilonnd{proposition} \sigmaection{\betaf The Projective Class Ring of $H_n(0,q)$}\sigmaelabel{4} In this section, we investigate the projective class ring of $H_n(0,q)$. \betaegin{proposition}\lambdaabel{4.1} $H_n(0,q)$ is a symmetric algebra. \varepsilonnd{proposition} \betaegin{proof} By \cite[Proposition 3.4]{Ch1} and its proof, $H_n(0,q)$ is unimodular. Moreover, $S^2(a)=qa$, $S^2(b)=b$, $S^2(c)=c$ and $S^2(d)=q^{-1}d$, where $S$ is the antipode of $H_n(0, q)$. Hence $S^2(x)=bxb^{-1}=cxc^{-1}$ for all $x\in H_n(0,q)$. That is, $S^2$ is an inner automorphism of $H_n(0,q)$. It follows from \cite{Lo, ObSch} that $H_n(0,q)$ is a symmetric algebra. \varepsilonnd{proof} Note that ${\rm mod}Athcal{H}_n(q)$ is not symmetric since it is not unimodular. \betaegin{proposition}\lambdaabel{4.1+1} $H_n(0, q)$ is of wild representation type. \varepsilonnd{proposition} \betaegin{proof} It is similar to Proposition \ref{3.1}. Let $A$ be the subalgebra of $H_n(0, q)$ generated by $a$ and $d$. Then $A$ is a ${\rm mod}Athbb{K}G$-module algebra with the action given by $b\cdot a=qa$, $b\cdot d=q^{-1}d$, $c\cdot a=qa$ and $c\cdot d=q^{-1}d$, where $G=G(H_n(0,q))=\{b^ic^j|i,j\in{\rm mod}Athbb{Z}_n\}\cong {\rm mod}Athbb{Z}_n\triangleimes{\rm mod}Athbb{Z}_n$. Moreover, $A\cong{\rm mod}Athbb{K}\lambdaangle x, y\rightarrowngle/(x^n, y^n, yx-qxy)$ and $H_n(0,q)\cong A\#{\rm mod}Athbb{K}G$, as ${\rm mod}Athbb{K}$-algebras. Since $n\gammaeqslant3$, it follows from \cite[p.295(3.4)]{Ringel} that $A$ is of wild representation type. Since ${\rm mod}Athbb{K}G$ is a semisimple and cosemisimple Hopf algebra by char$({\rm mod}Athbb{K})\nmid |G|$, it follows from \cite[Theorem 4.5]{Liu} that $A\#{\rm mod}Athbb{K}G$ is of wild representation type. \varepsilonnd{proof} $H_n(0,q)$ has $n^2$ orthogonal primitive idempotents $$\betaegin{array}{c} e_{i,j}=\frac{1}{n^2}\sigmaum_{k,l\in{\rm mod}Athbb{Z}_n}q^{-ik-jl}b^kc^l =\frac{1}{n^2}\sigmaum_{k,l=0}^{n-1}q^{-ik-jl}b^kc^l,\ \ i,j\in{\rm mod}Athbb{Z}_n.\\ \varepsilonnd{array}$$ \betaegin{lemma}\lambdaabel{4.2} Let $i,j\in{\rm mod}Athbb{Z}_n$. Then $$be_{i,j}=q^ie_{i,j}, \ ce_{i,j}=q^je_{i,j},\ ae_{i,j}=e_{i+1,j+1}a, \ de_{i,j}=e_{i-1,j-1}d.$$ \varepsilonnd{lemma} \betaegin{proof} It follows from a straightforward verification. \varepsilonnd{proof} For $i,j\in{\rm mod}Athbb{Z}_n$, let $S_{i,j}$ be the one dimensional $H_n(0,q)$-module defined by $bv=q^iv$, $cv=q^jv$ and $av=dv=0$, $v\in S_{i,j}$. Let $P_{i,j}=P(S_{i,j})$ be the projective cover of $S_{i,j}$. Let $J={\rm rad}(H_n(0,q))$ be the Jacobson radical of $H_n(0,q)$. \betaegin{lemma}\lambdaabel{4.3} The simple modules $S_{i,j}$, $i,j\in{\rm mod}Athbb{Z}_n$, exhaust all simple modules of $H_n(0,q)$, and consequently, the projective modules $P_{i,j}$, $i,j\in{\rm mod}Athbb{Z}_n$, exhaust all indecomposable projective modules of $H_n(0,q)$. Moreover, $P_{i,j}\cong H_n(0,q)e_{i,j}$ for all $i,j\in{\rm mod}Athbb{Z}_n$. \varepsilonnd{lemma} \betaegin{proof} It is similar to Lemma \ref{3.3}. \varepsilonnd{proof} \betaegin{corollary}\lambdaabel{4.4} $H_n(0, q)$ is a basic algebra. Moreover, $J$ is a Hopf ideal of $H_n(0,q)$, and the Loewy length of $H_n(0,q)$ is $2n-1$. \varepsilonnd{corollary} \betaegin{proof} It is similar to Corollary \ref{3.4}. \varepsilonnd{proof} Let $e_i=\sigmaum_{j=0}^{n-1}e_{i+j,j}=\frac{1}{n}\sigmaum_{j=0}^{n-1}q^{-ij}b^jc^{-j}$, $i\in{\rm mod}Athbb{Z}_n$. Then by Lemmas \ref{4.2} and \ref{4.3}, $\{e_i|i\in{\rm mod}Athbb{Z}_n\}$ is a full set of central primitive idempotents of $H_n(0,q)$. Hence $H_n(0,q)$ decomposes into $n$ blocks $H_n(0, q)e_i$, $i\in{\rm mod}Athbb{Z}_n$. In the rest of this section, we regard that $P_{i,j}=H_n(0,q)e_{i,j}$ for all $i,j\in{\rm mod}Athbb{Z}_n$. \betaegin{corollary}\lambdaabel{4.5} $P_{i,j}$ is $n^2$-dimensional with a ${\rm mod}Athbb K$-basis $\{a^kd^le_{i,j}|0\lambdaeqslant k, l\lambdaeqslant n-1\}$, $i,j\in{\rm mod}Athbb{Z}_n$. \varepsilonnd{corollary} \betaegin{proof} It is similar to Corollary \ref{3.5}. \varepsilonnd{proof} For $i, j\in{\rm mod}Athbb{Z}_n$, let $e_{i,j}^{k,l}=a^kd^le_{i,j}$ in $P_{i,j}$. Using the same symbols as in the last section, the structure of $P_{i,j}$ can be described as follows: $$\betaegin{tikzpicture}[scale=1] \path (0,0) node(e) {$e_{i,j}^{0,0}$}; \path (-1,-1) node(a) {$e_{i,j}^{1,0}$} (1,-1) node(d) {$e_{i,j}^{0,1}$}; \path (-0.45,-1.35) node(q1) {$q$}; \path (-2,-1.95) node(a2) {$\alphadots$} (0,-2) node(ad) {$e_{i,j}^{1,1}$} (2,-2) node(d2) {$\deltadots$}; \path (0.6,-2.4) node(q2) {$q$}; \path (-1.5, -2.35) node(1) {$\deltadots$} (1.6, -2.4) node(1c) {$\alphadots$}; \path (-3,-3) node(an-2) {$e_{i,j}^{n-2,0}$} (-1.14,-3.23) node(2l) {$\cdot$} (-1,-3) node(2) {$\deltadots$} (-0.87,-3.01) node(2r) {$\cdot$} (0.85,-3.235) node(3l) {$\cdot$} (1,-3) node(3) {$\deltadots$} (1.15,-3) node(3r) {$\cdot$} (3,-3) node (dn-2) {$e_{i,j}^{0,n-2}$}; \path (-2.2,-3.3) node(q3) {$q^{n-2}$} (-0.5,-3.5) node(3.5l) {$\deltadots$} (0.5,-3.5) node(3.5c) {$\alphadots$} (1.5,-3.3) node(q4) {$q$}; \path (-4,-4) node(an-1) {$e_{i,j}^{n-1,0}$} (-2, -4) node(an-2d) {$e_{i,j}^{n-2,1}$} (-0.15,-4.22) node(4l) {$\cdot$} (0,-4) node(4) {$\deltadots$} (0.15,-4) node(4r) {$\cdot$} (2,-4) node(adn-2) {$e_{i,j}^{1,n-2}$} (4, -4) node(dn-1) {$e_{i,j}^{0,n-1}$}; \path (-3.2,-4.3) node(q5) {$q^{n-1}$} (-1.2,-4.3) node(q6) {$q^{n-2}$} (-0.5,-4.45) node(4.5l) {$\alphadots$} (0.5,-4.5) node(4.5r) {$\deltadots$} (2.55,-4.4) node(q7) {$q$}; \path (-3,-5) node(an-1d) {$e_{i,j}^{n-1,1}$} (-1.15,-5.235) node(5l) {$\cdot$} (-1,-5) node(5) {$\deltadots$} (-0.85,-5) node(5r) {$\cdot$} (0.85,-5.235) node(6l) {$\cdot$} (1,-5) node(6) {$\deltadots$} (1.15, -5) node(6r) {$\cdot$} (3,-5) node (adn-1) {$e_{i,j}^{1,n-1}$}; \path (-2.2,-5.3) node(q8) {$q^{n-1}$} (-1.65,-5.5) node(5.5l) {$\alphadots$} (-0.2,-5.3) node(q9) {$q^{n-2}$} (1.5, -5.5) node(5.5r) {$\deltadots$}; \path (-2,-6) node(7) {$\deltadots$} (0,-6) node(an-2dn-2) {$e_{i,j}^{n-2,n-2}$} (2,-6) node (9) {$\alphadots$}; \path (-1.2,-6.3) node(q10) {$q^{n-1}$} (0.8,-6.3) node(q11) {$q^{n-2}$}; \path (-1,-7) node(an-1dn-2) {$e_{i,j}^{n-1,n-2}$\ \ } (1,-7) node(an-2dn-1) {\ \ $e_{i,j}^{n-2,n-1}$}; \path (-0.2,-7.3) node(q12) {$q^{n-1}$}; \path (0,-8) node(an-1dn-1) {$e_{i,j}^{n-1,n-1}$}; \deltaraw[->] (e) --(a); \deltaraw[->,dashed] (e) --(d); \deltaraw[->] (a) --(a2); \deltaraw[->,dashed] (a) --(ad); \deltaraw[->] (d) --(ad); \deltaraw[->,dashed] (d) --(d2); \deltaraw[->] (a2) --(an-2); \deltaraw[->] (ad) --(2); \deltaraw[->,dashed] (ad) --(3); \deltaraw[->,dashed] (d2) --(dn-2); \deltaraw[->] (an-2) --(an-1); \deltaraw[->,dashed] (an-2) --(an-2d); \deltaraw[->] (2) --(an-2d); \deltaraw[->,dashed] (3) --(adn-2); \deltaraw[->] (dn-2) --(adn-2); \deltaraw[->,dashed] (dn-2) --(dn-1); \deltaraw[->,dashed] (an-1) --(an-1d); \deltaraw[->] (an-2d) --(an-1d); \deltaraw[->,dashed] (an-2d) --(5); \deltaraw[->] (adn-2) --(6); \deltaraw[->,dashed] (adn-2) --(adn-1); \deltaraw[->] (dn-1) --(adn-1); \deltaraw[->,dashed] (an-1d) --(7); \deltaraw[->,dashed] (5) --(an-2dn-2); \deltaraw[->] (6) --(an-2dn-2); \deltaraw[->] (adn-1) --(9); \deltaraw[->,dashed] (7) --(an-1dn-2); \deltaraw[->] (an-2dn-2) --(an-1dn-2); \deltaraw[->,dashed] (an-2dn-2) --(an-2dn-1); \deltaraw[->] (9) --(an-2dn-1); \deltaraw[->,dashed] (an-1dn-2) --(an-1dn-1); \deltaraw[->] (an-2dn-1) --(an-1dn-1); \varepsilonnd{tikzpicture}$$ \betaegin{proposition}\lambdaabel{4.6} The $n$ blocks $H_n(0,q)e_i$, $i\in{\rm mod}Athbb{Z}_n$, are isomorphic to each other. \varepsilonnd{proposition} \betaegin{proof} Let $i\in{\rm mod}Athbb{Z}_n$. Since $e_i=\sigmaum_{j=0}^{n-1}e_{i+j,j}$, $H_n(0,q)e_i=\omegaplus_{j=0}^{n-1}H_n(0,q)e_{i+j,j}$ as $H_n(0,q)$-modules. Then by Corollary \ref{4.5}, dim$(H_n(0,q)e_i)=n^3$. By Lemma \ref{4.2}, one gets $be_i=q^ice_i$. It follows that $H_n(0,q)e_i={\rm span}\{a^jd^kb^le_i|0\lambdaeqslant j,k,l\lambdaeqslant n-1\}$, and so $\{a^jd^kb^le_i|0\lambdaeqslant j,k,l\lambdaeqslant n-1\}$ is a ${\rm mod}Athbb K$-basis of $H_n(0, q)e_i$. Let $B$ be the subalgebra of $H_n(q)$ generated by $a$, $b$ and $d$. Then one can easily check that the block $H_n(0,q)e_i$ is isomorphic, as an algebra, to the subalgebra $B$ of $H_n(0,q)$. Thus, the proposition follows. \varepsilonnd{proof} Let $i\in{\rm mod}Athbb{Z}_n$ be fixed. For any $j\in{\rm mod}Athbb{Z}_n$, let $\omegal{e}_j=e_{i+j,j}$. Then the Gabriel quiver $Q=(Q_0, Q_1)$ of the block $H_n(0,q)e_i$ is given by $$\betaegin{tikzpicture}[auto,bend right,scale=0.6] \node (e0) at (90:4) {$\omegal{e}_0$}; \node (e1) at (40:4) {$\omegal{e}_1$}; \node (e2) at (-10:4) {$\omegal{e}_2$}; \node (en_1) at (140:4) {$\omegal{e}_{n-1}$}; \node (en_2) at (190:4) {$\omegal{e}_{n-2}$}; \node (ejplus1) at (220:4) {$\omegal{e}_{j+1}$}; \node (ej) at (270:4) {$\omegal{e}_j$}; \node (ej_1) at (320:4) {$\omegal{e}_{j-1}$}; \deltaraw [<-](e0) to node [swap] {$\betaeta_0$} (e1); \deltaraw [<-](e1) to node [swap] {$\alphalpha_0$} (e0); \deltaraw [<-](e1) to node [swap] {$\betaeta_1$} (e2); \deltaraw [<-](e2) to node [swap] {$\alphalpha_1$} (e1); \deltaraw [<-](en_1) to node [swap] {$\betaeta_{n-1}$} (e0); \deltaraw [<-](e0) to node [swap] {$\alphalpha_{n-1}$} (en_1); \deltaraw [<-](en_2) to node [swap] {$\betaeta_{n-2}$} (en_1); \deltaraw [<-](en_1) to node [swap] {$\alphalpha_{n-2}$} (en_2); \deltaraw [<-](e0) to node [swap] {$\betaeta_0$} (e1); \deltaraw [<-](e1) to node [swap] {$\alphalpha_0$} (e0); \deltaraw [<-](ej) to node [swap] {$\alphalpha_{j-1}$} (ej_1); \deltaraw [<-](ej_1) to node [swap] {$\betaeta_{j-1}$} (ej); \deltaraw [<-](ejplus1) to node [swap] {$\alphalpha_j$} (ej); \deltaraw [<-](ej) to node [swap] {$\betaeta_j$} (ejplus1); \deltaraw[gray,dashed] (e2) -- (ej_1); \deltaraw[gray,dashed] (en_2) -- (ejplus1); \varepsilonnd{tikzpicture}$$ where for $j\in{\rm mod}Athbb{Z}_n$, the arrows $\alpha_j$, $\beta_j$ correspond to $a\omegal{e}_j$, $d\omegal{e}_{j+1}$, respectively. The admissible ideal $I$ has the following relations: $$\beta_{j}\alpha_{j}-q\alpha_{j-1}\beta_{j-1}=0,\ \alpha_{j+(n-1)}\cdots\alpha_{j+1}\alpha_{j}=0,\ \beta_{j-(n-1)}\cdots\beta_{j-1}\beta_{j}=0,\ j\in{\rm mod}Athbb{Z}_n.$$ \betaegin{proposition}\lambdaabel{4.7} $S_{i,j}\omegat S_{k,l}\cong S_{i+k,j+l}$ and $S_{i,j}\omegat P_{k,l}\cong P_{k,l}\omegat S_{i,j}\cong P_{i+k,j+l}$ for all $i,j,k,l\in{\rm mod}Athbb{Z}_n$. \varepsilonnd{proposition} \betaegin{proof} It is similar to Proposition \ref{3.6}. \varepsilonnd{proof} \betaegin{proposition}\lambdaabel{4.8} Let $i,j,k,l\in{\rm mod}Athbb{Z}_n$. Then $P_{i,j}\omegat P_{k,l}\cong \omegaplus_{t\in{\rm mod}Athbb{Z}_n}nP_{i+k+t,j+l+t}$. \varepsilonnd{proposition} \betaegin{proof} It is similar to Proposition \ref{3.7}. Note that $[P_{0,0}]=\sigmaum_{t=0}^{n-1}n[S_{t,t}]$ in $G_0(H_n(0,q))$ by Corollaries \ref{4.4} and \ref{4.5}. \varepsilonnd{proof} \betaegin{theorem}\lambdaabel{4.9} $r_p(H_n(0,q))\cong {\rm mod}Athbb{Z}[x,y,z]/(x^n-1,y^n-1,z^2-n\sigmaum_{i=0}^{n-1}x^iz)$. \varepsilonnd{theorem} \betaegin{proof} It is similar to Theorem \ref{3.8}. Note that $r_p(H_n(0,q))$ is a commutative ring generated by $[S_{1,1}]$, $[S_{0,1}]$ and $[P_{0,0}]$. \varepsilonnd{proof} Now we consider the projective class algebra $R_p(H_n(0,q))$. By Theorem \ref{4.9}, we have $$\betaegin{array}{c} R_p(H_n(0,q))\cong {\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n\sigmaum_{i=0}^{n-1}x^iz).\\ \varepsilonnd{array}$$ Put $I=(x^n-1,y^n-1,z^2-n\sigmaum_{i=0}^{n-1}x^iz)$ and let $J( {\rm mod}Athbb{K}[x,y,z]/I)$ be the Jacobson radical of ${\rm mod}Athbb{K}[x,y,z]/I$. For any $u\in{\rm mod}Athbb{K}[x,y,z]$, let $\omegal{u}$ denote the image of $u$ under the canonical projection ${\rm mod}Athbb{K}[x,y,z]\rightarrow{\rm mod}Athbb{K}[x,y,z]/I$. Then by Theorem \ref{4.9}, ${\rm mod}Athbb{K}[x,y,z]/I$ is of dimension $2n^2$ with a ${\rm mod}Athbb K$-basis $\{\omegal{x}^i\omegal{y}^j, \omegal{x}^i\omegal{y}^j\omegal{z}|i,j\in{\rm mod}Athbb{Z}_n\}$. Since $\omegal{x}^n=1$ and $\omegal{z}^2=n\sigmaum_{i=0}^{n-1}\omegal{x}^i\omegal{z}$, one gets $(1-\omegal{x})\omegal{z}^2=0$, and so $((1-\omegal{x})\omegal{z})^2=0$. Consequently, the ideal $((1-\omegal{x})\omegal{z})$ of ${\rm mod}Athbb{K}[x,y,z]/I$ generated by $(1-\omegal{x})\omegal{z}$ is contained in $J( {\rm mod}Athbb{K}[x,y,z]/I)$. Moreover, dim$(({\rm mod}Athbb{K}[x,y,z]/I)/((1-\omegal{x})\omegal{z}))=n(n+1)$ and $$({\rm mod}Athbb{K}[x,y,z]/I)/((1-\omegal{x})\omegal{z})\cong{\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z).$$ Let $\pi: {\rm mod}Athbb{K}[x,y,z]\rightarrow {\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z)$ be the canonical projection. For any integer $k\gammaeqslant0$, let $f_k=\frac{1}{n}\sigmaum_{i=0}^{n-1}q^{ki}x^i$ and $g_k=\frac{1}{n}\sigmaum_{i=0}^{n-1}q^{ki}y^i$ in ${\rm mod}Athbb{K}[x,y,z]$. Then a straightforward verification shows that $$\betaegin{array}{c} \{\pi(f_kg_l), \pi((f_0-\frac{1}{n^2}z)g_l), \pi(\frac{1}{n^2}zg_l)|1\lambdaeqslantk\lambdaeqslantn-1, 0\lambdaeqslantl\lambdaeqslantn-1\}\\ \varepsilonnd{array}$$ is a set of orthogonal idempotents, and so it is a full set of orthogonal primitive idempotents in ${\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z)$. Therefore, $${\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z)\cong{\rm mod}Athbb{K}^{n(n+1)}.$$ It follows that $J( {\rm mod}Athbb{K}[x,y,z]/I)\sigmaubseteq((1-\omegal{x})\omegal{z})$, and so $J( {\rm mod}Athbb{K}[x,y,z]/I)=((1-\omegal{x})\omegal{z})$. This shows the following proposition. \betaegin{proposition}\lambdaabel{4.10} Let $J(R_p(H_n(0,q)))$ be the Jacobson radical of $R_p(H_n(0,q))$. Then $J(R_p(H_n(0,q)))=((1-[S_{1,1}])[P_{0,0}])$ and $$\betaegin{array}{rl} &R_p(H_n(0,q))/J(R_p(H_n(0,q)))\\ \cong&{\rm mod}Athbb{K}[x,y,z]/(x^n-1,y^n-1,z^2-n^2z, (1-x)z) \cong{{\rm mod}Athbb K}^{n(n+1)}.\\ \varepsilonnd{array}$$ \varepsilonnd{proposition} \sigmaection{\betaf The Projective Class Ring of $H_n(1,q)$}\sigmaelabel{5} In this section, we will study the projective class ring of $H_n(1,q)$. The finite dimensional indecomposable $H_n(1,q)$-modules are classified in \cite{Ch2, Ch4}. There are $n^2$ simple modules $V(l,r)$ over $H_n(1,q)$, where $1\lambdaeqslant l\lambdaeqslant n$ and $r\in{\rm mod}Athbb{Z}_n$. The simple modules $V(n,r)$ are both projective and injective. Let $P(l,r)$ be the projective cover of $V(l,r)$. Then $P(l,r)$ is the injective envelope of $V(l,r)$ as well. Moreover, $P(n,r)\cong V(n,r)$. Note that $M\omegat N\cong N\omegat M$ for any modules $M$ and $N$ since $H_n(1, q)$ is a quasitriangular Hopf algebra. For any $t\in{{\rm mod}Athbb Z}$, let $c(t):=[\frac{t+1}{2}]$ be the integer part of $\frac{t+1}{2}$. That is, $c(t)$ is the maximal integer with respect to $c(t)\lambdaeqslant\frac{t+1}{2}$. Then $c(t)+c(t-1)=t$. {\betaf Convention}: If $\omegaplus_{l\lambdaeqslanti\lambdaeqslantm}M_i$ is a term in a decomposition of a module, then it disappears when $l>m$. \betaegin{lemma}\lambdaabel{5.1} Let $1\lambdaeqslant l, l'\lambdaeqslant n$ and $r, r'\in{{\rm mod}Athbb Z}_n$.\\ $(1)$ $V(1, r)\omegat V(l, r')\cong V(l, r+r')$.\\ $(2)$ $V(1, r)\omegat P(l, r')\cong P(l, r+r')$.\\ $(3)$ If $l\lambdaeqslant l'$ and $l+l'\lambdaeqslant n+1$, then $V(l, r)\omegat V(l', r')\cong \omegaplus_{i=0}^{l-1}V(l+l'-1-2i, r+r'+i)$.\\ $(4)$ If $l\lambdaeqslant l'$ and $t=l+l'-(n+1)>0$, then $$\betaegin{array}{rcl} V(l, r)\omegat V(l', r')&\cong&(\omegaplus_{i=c(t)}^tP(l+l'-1-2i, r+r'+i))\\ &&\omegaplus(\omegaplus_{t+1\lambdaeqslanti\lambdaeqslantl-1}V(l+l'-1-2i, r+r'+i)).\\ \varepsilonnd{array}$$ $(5)$ If $l\lambdaeqslant l'<n$ and $l+l'\lambdaeqslant n$, then $V(l, r)\omegat P(l', r')\cong\omegaplus_{i=0}^{l-1}P(l+l'-1-2i, r+r'+i)$.\\ $(6)$ If $l\lambdaeqslant l'<n$ and $t=l+l'-(n+1)\gammaeqslant 0$, then $$\betaegin{array}{rl} V(l, r)\omegat P(l', r') \cong&(\omegaplus_{i=c(t)}^t2P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=t+1}^{l-1}P(l+l'-1-2i, r+r'+i)).\\ \varepsilonnd{array}$$ $(7)$ If $l'<l<n$ and $l+l'\lambdaeqslant n$, then $$\betaegin{array}{rcl} V(l,r)\omegat P(l',r') &\cong&(\omegaplus_{i=0}^{l'-1}P(l+l'-1-2i, r+r'+i))\\ &&\omegaplus(\omegaplus_{i=c(l+l'-1)}^{l-1}2P(n+l+l'-1-2i, r+r'+i)).\\ \varepsilonnd{array}$$ $(8)$ If $l'<l<n$ and $t=l+l'-(n+1)\gammaeqslant 0$, then $$\betaegin{array}{rl} V(l, r)\omegat P(l', r') \cong&(\omegaplus_{i=c(t)}^t2P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=t+1}^{l'-1}P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=c(l+l'-1)}^{l-1}2P(n+l+l'-1-2i, r+r'+i)).\\ \varepsilonnd{array}$$ $(9)$ If $l<n$, then $$\betaegin{array}{rl} V(n, r)\omegat P(l, r')\cong&(\omegaplus_{i=c(l-1)}^{l-1}2P(n+l-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=1}^{c(n-l)}2P(l-1+2i, r+r'-i)).\\ \varepsilonnd{array}$$ $(10)$ If $l\lambdaeqslantl'<n$ and $l+l'\lambdaeqslantn$, then $$\betaegin{array}{rl} P(l, r)\omegat P(l', r') \cong &(\omegaplus_{i=0}^{l-1}2P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=l'}^{l'+l-1}2P(n+l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{c(l'+l-1)\lambdaeqslanti\lambdaeqslantl'-1}4P(n+l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{1\lambdaeqslanti\lambdaeqslantc(n-l-l')}4P(l+l'-1+2i, r+r'-i)).\\ \varepsilonnd{array}$$ $(11)$ If $l\lambdaeqslantl'<n$ and $t=l+l'-(n+1)\gammaeqslant 0$, then $$\betaegin{array}{rl} P(l, r)\omegat P(l', r') \cong&(\omegaplus_{i=c(t)}^{t}4P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=t+1}^{l-1}2P(l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{i=l'}^{n-1}2P(n+l+l'-1-2i, r+r'+i))\\ &\omegaplus(\omegaplus_{c(l'+l-1)\lambdaeqslanti\lambdaeqslantl'-1}4P(n+l+l'-1-2i, r+r'+i)).\\ \varepsilonnd{array}$$ \varepsilonnd{lemma} \betaegin{proof} It follows from \cite{Ch2, ChenHassenSun}. \varepsilonnd{proof} By Lemma \ref{5.1} or \cite[Corollary 3.2]{ChenHassenSun}, the category consisting of semisimple modules and projective modules in $H_n(1,q)$-mod is a monoidal subcategory of $H_n(1,q)$-mod. Therefore, we have the following corollary. \betaegin{corollary}\lambdaabel{5.2} $r_p(H_n(1,q))$ is a free ${\rm mod}Athbb Z$-module with a ${\rm mod}Athbb Z$-basis $\{[V(k,r)], [P(l,r)]|1\lambdaeqslant k\lambdaeqslant n, 1\lambdaeqslant l\lambdaeqslant n-1, r\in{\rm mod}Athbb{Z}_n\}$. \varepsilonnd{corollary} \betaegin{lemma}\lambdaabel{5.3} Let $2\lambdaeqslant m\lambdaeqslant n-1$. Then $$\betaegin{array}{c} V(2,0)^{\omegat m}\cong\omegaplus_{i=0}^{[\frac{m}{2}]} \frac{m-2i+1}{m-i+1}\betainom{m}{i}V(m+1-2i, i).\\ \varepsilonnd{array}$$ \varepsilonnd{lemma} \betaegin{proof} By Lemma \ref{5.1}(3), one can easily check that the isomorphism in the lemma holds for $m=2$ and $m=3$. Now let $3<m\lambdaeqslant n-1$ and assume $$\betaegin{array}{c} V(2,0)^{\omegat(m-1)}\cong \omegaplus_{i=0}^{[\frac{m-1}{2}]} \frac{m-2i}{m-i}\betainom{m-1}{i}V(m-2i, i).\\ \varepsilonnd{array}$$ If $m=2l$ is even, then by the induction hypothesis and Lemma \ref{5.1}(3), we have $$\betaegin{array}{rcl} V(2,0)^{\omegat m}&=&V(2, 0)\omegat V(2,0)^{\omegat(m-1)}\\ &\cong&\omegaplus_{i=0}^{l-1} \frac{2l-2i}{2l-i}\betainom{2l-1}{i}V(2, 0)\omegat V(2l-2i, i)\\ &\cong&\omegaplus_{i=0}^{l-1}\frac{2l-2i}{2l-i}\betainom{2l-1}{i}(V(2l+1-2i, i)\omegaplus V(2l-1-2i, i+1))\\ &\cong& V(2l+1,0)\omegaplus\frac{2}{l+1}\betainom{2l-1}{l-1}V(1, l)\\ &&\omegaplus(\omegaplus_{i=1}^{l-1}(\frac{2l-2i}{2l-i} \betainom{2l-1}{i}+\frac{2l-2i+2}{2l-i+1}\betainom{2l-1}{i-1})V(2l+1-2i, i))\\ &\cong& V(2l+1,0)\omegaplus\frac{2}{l+1}\betainom{2l-1}{l-1}V(1, l)\\ &&\omegaplus(\omegaplus_{i=1}^{l-1}\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l+1-2i, i))\\ &\cong&\omegaplus_{i=0}^l\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l+1-2i, i))\\ &\cong&\omegaplus_{i=0}^{[\frac{m}{2}]}\frac{m+1-2i}{m+1-i} \betainom{m}{i}V(m+1-2i, i).\\ \varepsilonnd{array}$$ If $m=2l+1$ is odd, then by the same reason as above, we have $$\betaegin{array}{rl} &V(2,0)^{\omegat m}\\ =&V(2, 0)\omegat V(2,0)^{\omegat(m-1)}\\ \cong&\omegaplus_{i=0}^l\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2, 0)\omegat V(2l+1-2i, i)\\ \cong&(\omegaplus_{i=0}^{l-1}\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l+2-2i, i)\omegaplus V(2l-2i, i+1)) \omegaplus\frac{1}{l+1}\betainom{2l}{l}V(2, l)\\ \cong&(\omegaplus_{i=0}^{l}\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l+2-2i, i)) \omegaplus(\omegaplus_{i=0}^{l-1}\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l-2i, i+1))\\ \cong&(\omegaplus_{i=0}^{l}\frac{2l+1-2i}{2l+1-i} \betainom{2l}{i}V(2l+2-2i, i)) \omegaplus(\omegaplus_{i=1}^{l}\frac{2l+3-2i}{2l+2-i} \betainom{2l}{i-1}V(2l+2-2i, i))\\ \cong& V(2l+2,0) \omegaplus(\omegaplus_{i=1}^l(\frac{2l+1-2i}{2l+1-i}\betainom{2l}{i} +\frac{2l+3-2i}{2l+2-i}\betainom{2l}{i-1})V(2l+2-2i, i))\\ \cong& V(2l+2,0) \omegaplus(\omegaplus_{i=1}^l\frac{2l+2-2i}{2l+2-i}\betainom{2l+1}{i}V(2l+2-2i, i))\\ \cong&\omegaplus_{i=0}^l\frac{2l+2-2i}{2l+2-i}\betainom{2l+1}{i}V(2l+2-2i, i)\\ \cong& \omegaplus_{i=0}^{[\frac{m}{2}]}\frac{m+1-2i}{m+1-i}\betainom{m}{i}V(m+1-2i, i).\\ \varepsilonnd{array}$$ \varepsilonnd{proof} Throughout the following, let $x=[V(1,1)]$ and $y=[V(2,0)]$ in $r_p(H_n(1,q))$. \betaegin{corollary}\lambdaabel{5.4} The following equations hold in $r_p(H_n(1,q))$ (or $r(H_n(1,q))$):\\ $(1)$ $x^n=1$ and $[V(m,i)]=x^i[V(m,0)]$ for all $1\lambdaeqslantm\lambdaeqslantn$ and $i\in{\rm mod}Athbb Z$;\\ $(2)$ $[P(m,i)]=x^i[P(m,0)]$ for all $1\lambdaeqslantm<n$ and $i\in{\rm mod}Athbb Z$;\\ $(3)$ $y[V(n,0)]=x[P(n-1,0)]$;\\ $(4)$ $y[P(1,0)]=[P(2,0)]+2x[V(n,0)]$;\\ $(5)$ $y[P(n-1,0)]=2[V(n,0)]+x[P(n-2,0)]$;\\ $(6)$ $y[P(m,0)]=[P(m+1,0)]+x[P(m-1,0)]$ for all $2\lambdaeqslantm\lambdaeqslantn-2$;\\ $(7)$ $[V(m+1,0)]=y^{m}-\sigmaum_{i=1}^{[\frac{m}{2}]}\frac{m+1-2i}{m+1-i}\betainom{m}{i} x^i[V(m+1-2i,0)]$ for all $2\lambdaeqslantm<n$. \varepsilonnd{corollary} \betaegin{proof} It follows from Lemmas \ref{5.1} and \ref{5.3}. \varepsilonnd{proof} \betaegin{proposition}\lambdaabel{5.5} The commutative ring $r_p(H_n(1,q))$ is generated by $x$ and $y$. \varepsilonnd{proposition} \betaegin{proof} Let $R$ be the subring of $r(H_n(1,q))$ generated by $x$ and $y$. Then $R\sigmaubseteq r_p(H_n(1,q))$. By Corollary \ref{5.4}(1), one gets that $[V(1,i)]=x^i\in R$ and $[V(2,i)]=x^iy\in R$ for all $i\in{\rm mod}Athbb{Z}_n$. Now let $2\lambdaeqslantm<n$ and assume $[V(l,i)]\in R$ for all $1\lambdaeqslantl\lambdaeqslantm$ and $i\in{\rm mod}Athbb{Z}_n$. Then by Corollary \ref{5.4}(1) and (7), one gets that $[V(m+1,i)]=x^i[V(m+1,0)]=x^iy^{m}-\sigmaum_{j=1}^{[\frac{m}{2}]}\frac{m+1-2j}{m+1-j}\betainom{m}{j} x^{i+j}[V(m+1-2j,0)]\in R$ for all $i\in{\rm mod}Athbb{Z}_n$. Thus, we have proven that $[V(m,i)]\in R$ for all $1\lambdaeqslantm\lambdaeqslantn$ and $i\in{\rm mod}Athbb{Z}_n$. In particular, $[V(n,i)]\in R$ for all $i\in{\rm mod}Athbb{Z}_n$. By Corollary \ref{5.4}(2) and (3), $[P(n-1, i)]=x^i[P(n-1,0)]=x^{i-1}y[V(n,0)]\in R$ for all $i\in{\rm mod}Athbb{Z}_n$. Then by Corollary \ref{5.4}(2) and (5), $[P(n-2, i)]=x^i[P(n-2,0)]=x^{i-1}(y[P(n-1, 0)]-2[V(n, 0)])\in R$ for any $i\in{\rm mod}Athbb{Z}_n$. Now let $1<m\lambdaeqslantn-2$ and assume that $[P(l,i)]\in R$ for all $m\lambdaeqslantl<n$ and $i\in{\rm mod}Athbb{Z}_n$. Then by Corollary \ref{5.4}(2) and (6), we have $[P(m-1, i)]=x^i[P(m-1,0)]=x^{i-1}(y[P(m, 0)]-[P(m+1, 0)])\in R$. Thus, we have shown that $[P(m,i)]\in R$ for all $1\lambdaeqslantm<n$ and $i\in{\rm mod}Athbb{Z}_n$. Then it follows from Corollary \ref{5.2} that $R=r_p(H_n(1,q))$. This completes the proof. \varepsilonnd{proof} \betaegin{lemma}\lambdaabel{5.6} $(1)$ $[V(m,0)]=\sigmaum_{i=0}^{[\frac{m-1}{2}]}(-1)^i\betainom{m-1-i}{i}x^iy^{m-1-2i}$ for all $1\lambdaeqslantm\lambdaeqslantn$.\\ $(2)$ Let $1\lambdaeqslantm\lambdaeqslantn-1$. Then $$\betaegin{array}{c} [P(m,0)]=(\sigmaum_{i=0}^{[\frac{n-m}{2}]}(-1)^i\frac{n-m}{n-m-i}\betainom{n-m-i}{i}x^{m+i}y^{n-m-2i})[V(n,0)].\\ \varepsilonnd{array}$$ \varepsilonnd{lemma} \betaegin{proof} (1) It is similar to \cite[Lemma 3.2]{ZWLC}.\\ (2) Note that $\frac{n-m}{n-m-i}\betainom{n-m-i}{i}$ is a positive integer for any $1\lambdaeqslantm\lambdaeqslantn-1$ and $0\lambdaeqslanti\lambdaeqslant[\frac{n-m}{2}]$. We prove the equality by induction on $n-m$. If $m=n-1$, then by Corollary \ref{5.4}(1) and (3), $[P(n-1,0)]=x^{-1}y[V(n,0)]=x^{n-1}y[V(n,0)]$, as desired. If $m=n-2$, then by Corollary \ref{5.4}(1) and (5), we have $[P(n-2,0)]=x^{-1}y[P(n-1,0)]-2x^{-1}[V(n,0)]=(x^{n-2}y^2-2x^{n-1})[V(n,0)]$, as desired. Now let $1\lambdaeqslantm<n-2$. Then by Corollary \ref{5.4}(1) and (6), and the induction hypotheses, we have $$\betaegin{array}{rl} [P(m,0)]=&x^{-1}y[P(m+1,0)]-x^{-1}[P(m+2,0)]\\ =&x^{-1}y(\sigmaum_{i=0}^{[\frac{n-m-1}{2}]}(-1)^i\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}x^{m+1+i}y^{n-m-1-2i})[V(n,0)]\\ &-x^{-1}(\sigmaum_{i=0}^{[\frac{n-m-2}{2}]}(-1)^i\frac{n-m-2}{n-m-2-i}\betainom{n-m-2-i}{i}x^{m+2+i}y^{n-m-2-2i})[V(n,0)]\\ =&(\sigmaum_{i=0}^{[\frac{n-m-1}{2}]}(-1)^i\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}x^{m+i}y^{n-m-2i})[V(n,0)]\\ &+(\sigmaum_{i=1}^{[\frac{n-m}{2}]}(-1)^{i}\frac{n-m-2}{n-m-1-i}\betainom{n-m-1-i}{i-1}x^{m+i}y^{n-m-2i})[V(n,0)].\\ \varepsilonnd{array}$$ If $n-m$ is odd, then $[\frac{n-m-1}{2}]=\frac{n-m-1}{2}=[\frac{n-m}{2}]$, and hence $$\betaegin{array}{rl} &\sigmaum_{i=0}^{[\frac{n-m-1}{2}]}(-1)^i\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}x^{m+i}y^{n-m-2i}\\ &+\sigmaum_{i=1}^{[\frac{n-m}{2}]}(-1)^{i}\frac{n-m-2}{n-m-1-i}\betainom{n-m-1-i}{i-1}x^{m+i}y^{n-m-2i}\\ =&x^my^{n-m}+\sigmaum_{i=1}^{[\frac{n-m}{2}]}(-1)^i(\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}\\ &+\frac{n-m-2}{n-m-1-i}\betainom{n-m-1-i}{i-1})x^{m+i}y^{n-m-2i}\\ =&\sigmaum_{i=0}^{[\frac{n-m}{2}]}(-1)^i\frac{n-m}{n-m-i}\betainom{n-m-i}{i}x^{m+i}y^{n-m-2i}.\\ \varepsilonnd{array}$$ If $n-m$ is even, then $[\frac{n-m-1}{2}]=\frac{n-m-2}{2}=[\frac{n-m}{2}]-1$, and hence $$\betaegin{array}{rl} &\sigmaum_{i=0}^{[\frac{n-m-1}{2}]}(-1)^i\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}x^{m+i}y^{n-m-2i}\\ &+\sigmaum_{i=1}^{[\frac{n-m}{2}]}(-1)^{i}\frac{n-m-2}{n-m-1-i}\betainom{n-m-1-i}{i-1}x^{m+i}y^{n-m-2i}\\ =&x^my^{n-m}+\sigmaum_{i=1}^{[\frac{n-m}{2}]-1}(-1)^i(\frac{n-m-1}{n-m-1-i}\betainom{n-m-1-i}{i}\\ &+\frac{n-m-2}{n-m-1-i}\betainom{n-m-1-i}{i-1})x^{m+i}y^{n-m-2i}+(-1)^{\frac{n-m}{2}}2x^{\frac{n+m}{2}}\\ =&\sigmaum_{i=0}^{[\frac{n-m}{2}]}(-1)^i\frac{n-m}{n-m-i}\betainom{n-m-i}{i}x^{m+i}y^{n-m-2i}.\\ \varepsilonnd{array}$$ Therefore, $[P(m,0)]=(\sigmaum_{i=0}^{[\frac{n-m}{2}]}(-1)^i\frac{n-m}{n-m-i}\betainom{n-m-i}{i}x^{m+i}y^{n-m-2i})[V(n,0)]$. \varepsilonnd{proof} \betaegin{proposition}\lambdaabel{5.7} In $r_p(H_n(1,q))$ (or $r(H_n(1,q))$), we have $$\betaegin{array}{c} (\sigmaum_{i=0}^{[\frac{n}{2}]}(-1)^i\frac{n}{n-i}\betainom{n-i}{i}x^iy^{n-2i}-2) (\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\betainom{n-1-i}{i}x^iy^{n-1-2i})=0.\\ \varepsilonnd{array}$$ \varepsilonnd{proposition} \betaegin{proof} By Lemma \ref{5.6}(2), we have $$\betaegin{array}{c} x^{-1}y[P(1,0)]=(\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\frac{n-1}{n-1-i}\betainom{n-1-i}{i}x^{i}y^{n-2i})[V(n,0)].\\ \varepsilonnd{array}$$ On the other hand, by Corollary \ref{5.4}(4) and Lemma \ref{5.6}(2), we have $$\betaegin{array}{rl} x^{-1}y[P(1,0)]=&x^{-1}[P(2,0)]+2[V(n,0)]\\ =&(\sigmaum_{i=0}^{[\frac{n-2}{2}]}(-1)^i\frac{n-2}{n-2-i}\betainom{n-2-i}{i}x^{i+1}y^{n-2-2i}+2)[V(n,0)]\\ =&(\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^{i-1}\frac{n-2}{n-1-i}\betainom{n-1-i}{i-1}x^{i}y^{n-2i}+2)[V(n,0)].\\ \varepsilonnd{array}$$ Therefore, one gets $$\betaegin{array}{rl} &(\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\frac{n-1}{n-1-i}\betainom{n-1-i}{i}x^{i}y^{n-2i})[V(n,0)]\\ =&(\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^{i-1}\frac{n-2}{n-1-i}\betainom{n-1-i}{i-1}x^{i}y^{n-2i}+2)[V(n,0)],\\ \varepsilonnd{array}$$ which is equivalent to $$\betaegin{array}{rl} &(\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\frac{n-1}{n-1-i}\betainom{n-1-i}{i}x^{i}y^{n-2i}\\ &-\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^{i-1}\frac{n-2}{n-1-i}\betainom{n-1-i}{i-1}x^{i}y^{n-2i}-2)[V(n,0)]=0.\\ \varepsilonnd{array}$$ Then a computation similar to the proof of Lemma \ref{5.6} shows that $$\betaegin{array}{rl} &\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\frac{n-1}{n-1-i}\betainom{n-1-i}{i}x^{i}y^{n-2i} -\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^{i-1}\frac{n-2}{n-1-i}\betainom{n-1-i}{i-1}x^{i}y^{n-2i}-2\\ =&\sigmaum_{i=0}^{[\frac{n}{2}]}(-1)^i\frac{n}{n-i}\betainom{n-i}{i}x^{i}y^{n-2i}-2.\\ \varepsilonnd{array}$$ Thus, the proposition follows from Lemma \ref{5.6}(1). \varepsilonnd{proof} \betaegin{corollary}\lambdaabel{5.8} $\{x^ly^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$ is a ${\rm mod}Athbb Z$-basis of $r_p(H_n(1,q))$. \varepsilonnd{corollary} \betaegin{proof} By Corollary \ref{5.4}(1), $x^n=1$. By Proposition \ref{5.7}, we have $$\betaegin{array}{rl} y^{2n-1}=&-\sigmaum_{i=1}^{[\frac{n-1}{2}]}(-1)^i\betainom{n-1-i}{i}x^iy^{2n-1-2i}\\ &-\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^i\frac{n}{n-i}\betainom{n-i}{i}x^iy^{2n-1-2i}+2y^{n-1}\\ &-(\sigmaum_{i=1}^{[\frac{n}{2}]}(-1)^i\frac{n}{n-i}\betainom{n-i}{i}x^iy^{n-2i}-2) (\sigmaum_{i=1}^{[\frac{n-1}{2}]}(-1)^i\betainom{n-1-i}{i}x^iy^{n-1-2i}).\\ \varepsilonnd{array}$$ Then it follows from Proposition \ref{5.5} that $r_p(H_n(1,q))$ is generated, as a ${\rm mod}Athbb Z$-module, by $\{x^ly^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$. By Corollary \ref{5.2}, $r_p(H_n(1,q))$ is a free ${\rm mod}Athbb Z$-module of rank $n(2n-1)$, and hence $\{x^ly^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$ is a ${\rm mod}Athbb Z$-basis of $r_p(H_n(1,q))$. \varepsilonnd{proof} \betaegin{theorem}\lambdaabel{5.9} Let ${\rm mod}Athbb{Z}[x,y]$ be the polynomial ring in two variables $x$ and $y$, and $I$ the ideal of ${\rm mod}Athbb{Z}[x,y]$ generated by $x^n-1$ and $$\betaegin{array}{c} (\sigmaum_{i=0}^{[\frac{n}{2}]}(-1)^i\frac{n}{n-i}\betainom{n-i}{i}x^iy^{n-2i}-2) (\sigmaum_{i=0}^{[\frac{n-1}{2}]}(-1)^i\betainom{n-1-i}{i}x^iy^{n-1-2i}).\\ \varepsilonnd{array}$$ Then $r_p(H_n(1,q))$ is isomorphic to the quotient ring ${\rm mod}Athbb{Z}[x,y]/I$. \varepsilonnd{theorem} \betaegin{proof} By Proposition \ref{5.5}, there is a ring epimorphism $\phi: {\rm mod}Athbb{Z}[x,y]\rightarrow r_p(H_n(1,q))$ given by $\phi(x)=[V(1,1)]$ and $\phi(y)=[V(2,0)]$. By Corollary \ref{5.4}(1) and Proposition \ref{5.7}, $\phi(I)=0$. Hence $\phi$ induces a ring epimorphism $\omegal{\phi}: {\rm mod}Athbb{Z}[x,y]/I\rightarrow r_p(H_n(1,q))$ such that $\phi=\omegal{\phi}\circ\pi$, where $\pi: {\rm mod}Athbb{Z}[x,y]\rightarrow{\rm mod}Athbb{Z}[x,y]/I$ is the canonical projection. Let $\omegal{u}=\pi(u)$ for any $u\in{\rm mod}Athbb{Z}[x,y]$. Then by the definition of $I$ and the proof of Corollary \ref{5.8}, one knows that ${\rm mod}Athbb{Z}[x,y]/I$ is generated, as a ${\rm mod}Athbb Z$-module, by $\{\omegal{x}^l\omegal{y}^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$. For any $0\lambdaeqslantl\lambdaeqslantn-1$ and $0\lambdaeqslantm\lambdaeqslant2n-2$, we have $\omegal{\phi}(\omegal{x}^l\omegal{y}^m)=\omegal{\phi}(\omegal{x})^l\omegal{\phi}(\omegal{y})^m=\phi(x)^l\phi(y)^m =[V(1,1)]^l[V(2,0)]^m$. By Corollary \ref{5.8}, $\{[V(1,1)]^l[V(2,0)]^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$ is a linearly independent set over ${\rm mod}Athbb Z$, which implies that $\{\omegal{x}^l\omegal{y}^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$ is also a linearly independent set over ${\rm mod}Athbb Z$. It follows that $\{\omegal{x}^l\omegal{y}^m|0\lambdaeqslantl\lambdaeqslantn-1, 0\lambdaeqslantm\lambdaeqslant2n-2\}$ is a ${\rm mod}Athbb Z$-basis of ${\rm mod}Athbb{Z}[x,y]/I$. Consequently, $\omegal{\phi}$ is a ${\rm mod}Athbb Z$-module isomorphism, and so it is a ring isomorphism. \varepsilonnd{proof} \rm \betaegin{center} \Lambdaarge {\betaf Acknowledgment} \varepsilonnd{center} This work is supported by NSF of China (No. 11571298) and TAPP of Jiangsu Higher Education Institutions (No. PPZY2015B109). \betaegin{thebibliography}{99} \betaibitem{AndrFanGarVen} N. Andruskiewitsch, F. Fantino, G. A. Garcia and L. Vendramin, On Nichols algebras associated to simple racks, in: Groups, Algebras and Applications, in: Contemp. Math., vol.537, Amer. Math. Soc., 2011, pp.31C56. \betaibitem{Archer} L. Archer, On certain quotients of the Green rings of dihedral 2-groups, J. Pure \& Appl. Algebra 212 (2008), 1888-1897. \betaibitem{BenkPerWith} G. Benkart, M. Pereira and S. Witherspoon, Yetter-Drinfeld modules under cocycle twists, J. Algebra 324 (2010), 2990-3006. \betaibitem{BenCar} D. J. Benson and J. F. Carlson, Nilpotent elements in the Green ring, J. Algebra 104 (1986), 329-350. \betaibitem{BenPar} D. J. Benson and R. A. Parker, The Green ring of a finite group, J. Algebra 87 (1984), 290-331. \betaibitem{BrJoh} R. M. Bryant and M. Johnson, Periodicity of Adams operations on the Green ring of a finite group, J. Pure Appl. Algebra 215 (2011), 989-1002. \betaibitem{chen99} H. X. Chen, Skew pairing, cocycle deformations and double crossproducts, Acta Math. Sinica, English Ser. 15 (1999), 225-234. \betaibitem{Ch1} H. X. Chen, A class of noncommutative and noncocommutative Hopf algebras-the quantum version, Comm. Algebra 27 (1999), 5011-5023. \betaibitem{Ch2} H. X. Chen, Irreducible representations of a class of quantum doubles, J. Algebra 225 (2000), 391-409. \betaibitem{Ch4} H. X. Chen, Representations of a class of Drinfeld's doubles, Comm. Algebra 33 (2005), 2809-2825. \betaibitem{Ch5} H. X. Chen, The Green ring of Drinfeld double $D(H_4)$, Algebr. Represent. Theor. 17 (2014), 1457-1483. \betaibitem{ChenHassenSun} H. X. Chen, H. E. S. Mohammed and H. Sun, Indecomposable decomposition of tensor products of modules over Drinfeld Doubles of Taft algebras, arXiv.org:1503.04393v3[math.RT]. \betaibitem{ChVOZh} H. X. Chen, F. Van Oystaeyen and Y. H. Zhang, The Green rings of Taft algebras, Proc. Amer. Math. Soc. 142 (2014), 765-775. \betaibitem{ChenZhang} H. X. Chen and Y. H. Zhang, Cocycle deformations and Brauer Groups, Comm. Algebra 35 (2007), 399-433. \betaibitem{Chin} W. Chin, Special biserial coalgebras and representations of quantum SL(2), J. Algebra 353 (2012), 1-21. \betaibitem{Cib} C. Cibils, A quiver quantum group, Commun. Math. Phys. 157 (1993), 459-477. \betaibitem{Cib99} C. Cibils, The projective class ring of basic and split Hopf algebras, K-Theory 17 (1999), 385-393. \betaibitem{Doi} Y. Doi, Braided bialgebrs and quadratic bialgebras, Comm. Algebra 21 (1993), 1731-1749. \betaibitem{DoiTak} Y. Doi and M. Takeuchi, Multiplication alteration by two-cocycles-the quantum version, Comm. Algebra 22 (1994), 5715-5732. \betaibitem{Green} J. A. Green, The modular representation algebra of a finite group, Ill. J. Math. 6(4) (1962), 607-619. \betaibitem{GuiKasMas} P. Guillot, C.Kassel and A. Masuoka, Twisting algebras using non-commutative torsors: explicitcom-putations, Math. Z. 271 (2012), 789C818. \betaibitem{HTW} I. Hambleton, L. R. Taylor and E. B. Williams, Dress induction and Burnside quotient Green ring, Algebra Number Theory 3 (2009), 511-541. \betaibitem{Ka} C. Kassel, Quantum groups, Springer-Verlag, New York, 1995. \betaibitem{LiHu} Y. Li and N. Hu, The Green rings of the 2-rank Taft algebra and its two relatives twisted, J. Algebra 410 (2014), 1-35. \betaibitem{LiZhang} L. B. Li and Y. H. Zhang, The Green rings of the generalized Taft Hopf algebras, Contemporary Mathematics 585 (2013), 275-288. \betaibitem{Liu} G. Liu, On the structure of tame graded basic Hopf algebras, J. Algebra 299 (2006), 841-853. \betaibitem{Lo} M. Lorenz, Representations of finite-dimensional Hopf algebras, J. Algebra 188 (1997), 476-505. \betaibitem{Maj92} S. Majid, Algebras and Hopf algebras in braided categories, in: Advances in Hopf Algebras, Chicago, IL, 1992, in: Lect. Notes Pure Appl. Math., vol.158, Dekker, New York, 1994, pp.55C105. \betaibitem{Maj} S. Majid, Foundations of quantum group theory, Cambridge Univ. Press, Cambridge, 1995. \betaibitem{MajOec} S. Majid and R. Oeckl, Twisting of quantum differentials and the Planck scale Hopf algebra, Comm. Math. Phys. 205 (1999), 617-655. \betaibitem{Mon} S. Montgomery, Hopf Algebras and their actions on rings, CBMS Series in Math., Vol. 82, Amer. Math. Soc., Providence, 1993. \betaibitem{ObSch} U. Oberst and H.-J. Schneider, Uber untergruppen endlicher algebraischer gruppen, Manuscripta Math. 8 (1973), 217-241. \betaibitem{Ringel} C. M. Ringel, The representation type of local algebras, in: Representation of Algebra, in: Lecture Notes in Math., Vol. 488, Springer, 1975, pp. 282-305. \betaibitem{Sw} M. E. Sweedler, Hopf Algebras, Benjamin, New York, 1969. \betaibitem{Ta} E. J. Taft, The order of the antipode of a finite-dimensional Hopf algebra, Proc. Nat. Acad. Sci. USA 68 (1971), 2631-2633. \betaibitem{Wakui} M. Wakui, Various structures associated to the representation categories of eight dimensional non-semisimple Hopf algebras, Algebr. Represent. Theory 7 (2004), 491-515. \betaibitem{With} S. J. Witherspoon, The representation ring of the quantum double of a finite group, J. Algebra 179 (1996), 305-329. \betaibitem{ZWLC} Y. Zhang, F. Wu, L. Liu and H. X. Chen, Grothendieck groups of a class of quantum doubles, Algebra Colloq. 15 (2008), 431-448. \varepsilonnd{thebibliography} \varepsilonnd{document}
\begin{document} \title{Open system dynamics with non-Markovian quantum jumps} \author{J. Piilo} \email{[email protected]} \affiliation{ Department of Physics and Astronomy, University of Turku, FI-20014 Turun yliopisto, Finland } \author{K. H\"ark\"onen} \affiliation{ Department of Physics and Astronomy, University of Turku, FI-20014 Turun yliopisto, Finland } \author{S. Maniscalco} \affiliation{ Department of Physics and Astronomy, University of Turku, FI-20014 Turun yliopisto, Finland } \author{K.-A. Suominen} \affiliation{ Department of Physics and Astronomy, University of Turku, FI-20014 Turun yliopisto, Finland } \date{\today} \begin{abstract} We discuss in detail how non-Markovian open system dynamics can be described in terms of quantum jumps [J. Piilo {\it et al.}, Phys.~Rev.~Lett.~{\bf 100}, 180402 (2008)]. Our results demonstrate that it is possible to have a jump description contained in the physical Hilbert space of the reduced system. The developed non-Markovian quantum jump (NMQJ) approach is a generalization of the Markovian Monte Carlo Wave Function (MCWF) method into the non-Markovian regime. The method conserves both the probabilities in the density matrix and the norms of the state vectors exactly, and sheds new light on non-Markovian dynamics. The dynamics of the pure state ensemble illustrates how local-in-time master equation can describe memory effects and how the current state of the system carries information on its earlier state. Our approach solves the problem of negative jump probabilities of the Markovian MCWF method in the non-Markovian regime by defining the corresponding jump process with positive probability. The results demonstrate that in the theoretical description of non-Markovian open systems, there occurs quantum jumps which recreate seemingly lost superpositions due to the memory. \end{abstract} \pacs{03.65.Yz, 42.50.Lc} \maketitle \section{Introduction} The theory of open quantum systems describes the dynamics of a system of interest interacting with its environment~\cite{Breuer2002}. The system-environment interaction leads to non-unitary reduced system dynamics and the system state is described by a density matrix instead of a single state vector used for closed systems. Generally, the density matrix evolution is governed by a master equation whose unitary part contains the dynamics as given by the system Hamiltonian and the non-unitary dissipator describes the effects that the environment has on the system. The presence of the environment leads to decoherence which is harmful for practical applications like quantum information processing~\cite{Stenholm2005}. On the other hand, decoherence has a role in open fundamental problems of quantum physics such as quantum to classical transition~\cite{Zurek}. Often, the environment is seen to have unavoidable effects on the system dynamics. However, the recently developed ability to control quantum systems and the implementation of reservoir engineering techniques are revising the role of the environment~\cite{engineerNIST,control,Zoller}. This may lead to new ways to control the system of interest indirectly via the control of the system--reservoir interaction and the properties of the environment. In memoryless Markovian open systems, the environment acts as a sink for the system information. Due to the system-reservoir interaction, the system of interest loses information on its state into the environment, and this lost information does not play any further role in the system dynamics. However, if the environment has a non-trivial structure, then the seemingly lost information can return to the system at a later time leading to non-Markovian dynamics with memory. This memory effect is the essence of non-Markovian dynamics. Non-Markovian systems appear in many branches of physics, such as quantum optics~\cite{Breuer2002,Gardiner96a, Lambro}, solid state physics~\cite{SS}, quantum chemistry~\cite{QC}, and quantum information processing~\cite{QIP}. Recently, non-Markovian features has also been exploited in the context of biomolecules where the environment consists of protein solvents~\cite{Thorwart08}. However, the elusive nature of non-Markovian dynamics makes it often difficult to obtain insight into microscopical physical processes governing the time evolution. At the same time the complex mathematical structure of the non-Markovian models prevents generally to solve the dynamics of the system of interest. Hence, new ways to describe non-Markovianity and new methods to solve non-Markovian dynamics are highly desired. The density matrix can also be seen as a collection, or ensemble, of state vectors. Then, the interaction between the system and the reservoir removes the precise information about the specific state vector to describe the system state. Instead, the state of the open system is associated with an ensemble of state vectors where each state vector has a certain (classical) probability of appearance. This view has led to the development of Monte Carlo simulation methods for Markovian~\cite{DCM1992,Dum92a,Carmichael, Plenio98,Gisin,Percival} and non-Markovian~\cite{Imamoglu,Garraway1997,Breuer99,Gambetta2004,BreuerGen,Piilo08,Strunz1999} open systems. In these methods, the time evolution of each state vector in the ensemble contains a stochastic element which can be discontinuous (quantum jump)~\cite{DCM1992,Dum92a,Carmichael,Imamoglu,Garraway1997,Breuer99,Gambetta2004,BreuerGen,Piilo08} or continuous (quantum state diffusion)~\cite{Gisin,Percival,Strunz1999}. One of the most common methods to treat Markovian dynamics is the Monte Carlo wave function (MCWF) method which exploits quantum jumps~\cite{DCM1992}. However, a generalization of this Markovian method to non-Markovian regime has turned out to be a challenging problem. The central obstacle has been the appearance of negative quantum jump probabilities due to the temporarily negative decay rates of non-Markovian dynamics. Earlier approaches to this problem exploit auxiliary extensions of the Hilbert space of the system~\cite{Imamoglu,Garraway1997,Breuer99,BreuerGen} or exploit the state of the total system~\cite{Gambetta2004}. We have recently shown that the jump-like unravelling of non-Markovian master equations is possible within the Hilbert space of the system, and hence the auxiliary extension of the system Hilbert space is not necessarily needed~\cite{Piilo08}. The key feature of the developed non-Markovian quantum jump (NMQJ) method is the notion that, when the decay rates appearing in the master equation become negative, the direction of the information flow between the system and the reservoir gets reversed. During the initial positive decay region, the information flows from the system to the environment, while during the negative decay the system may regain some of the information it lost earlier. In terms of quantum jumps this means that the seemingly lost superpositions in the ensemble can be restored. This leads to new insight into the concept of memory, which is the central ingredient of non-Markovian dynamics. We also describe in detail the positive and negative factors affecting the numerical performance of the method. The ultimate limit for the numerical performance is given by the effective ensemble size $N_{\rm eff}$ (Sec.~\ref{Sec:Num}) since the method needs to evolve simultaneously $N_{\rm eff}$ state vectors. Our results help to explain why local-in-time master equations \cite{Breuer2002,Andersson} can indeed describe systems with memory and the results also show the presence of some counterintuitive features of non-Markovian dynamics. In this regime, the rate of the process is proportional to the target state, instead of the source state, and hence challenges the classical view. We show here two different proofs of the equivalence between the algorithm and the master equation, discuss in detail how the method works, and apply it to multi-level atom schemes. Recently, the existence of a measurement scheme interpretation of non-Markovian dynamics has been actively discussed~\cite{Diosi08,Gambetta08}. Our results align along the results of Ref.~\cite{Gambetta08}. We discuss this and other insight provided by the NMQJ method in detail. We have organized the paper in the following way. Sec.~\ref{MCWF} describes briefly the Markovian MCWF method and sets the scene for its non-Markovian generalization which is presented in Sec.~\ref{NMQJ}. We then present several examples on the use of the NMQJ method in Sec.~\ref{Exs} and discuss the insight provided by the method in Sec.~\ref{Discu}. Finally, Sec.~\ref{Conclu} concludes the paper. \section{Markovian Monte Carlo wave function method}\label{MCWF} Our non-Markovian quantum jump method generalizes the MCWF method~\cite{DCM1992} into the non-Markovian regime. The algorithms and the proof of correspondence with the master equation for the two methods are very similar. The essential difference is the form of the jump operators and jump probabilities. We present first the central ingredients of the Markovian MCWF method and illustrate the problems that prevents its use for non-Markovian systems. \subsection{The algorithm and equivalence with master equation} The MCWF method is probably the most commonly used Monte Carlo method to treat Markovian open systems whose dynamics is governed by the master equation in the Lindblad form~\cite{DCM1992,Gorini} \begin{eqnarray} \dot{\rho} (t) &=& \frac{1}{\imath\hbar} \left[ H_S, \rho (t) \right] + \sum_j\Gamma_j C_j \rho(t)C_j^{\dag} \nonumber \\ &-& \frac{1}{2}\sum_j\Gamma_j \left\{C_j^{\dag} C_j, \rho(t)\right\}. \label{Eq:Mark} \end{eqnarray} Here, $\rho$ is the density matrix of the reduced system, $H_S$ the hermitian system Hamiltonian, $\Gamma_j$ is the positive and constant decay rate to decay channel $j$, and $C_j$ are the Lindblad (jump) operators describing the effects of the environment on the reduced system. To unravel the master equation (\ref{Eq:Mark}), MCWF method generates an ensemble of stochastic state vector realizations whose deterministic and continuous time evolution is interrupted by randomly occurring discontinuous quantum jumps. The average over the ensemble of stochastic realizations gives the properties of the reduced system at any given moment of time. A generic way to write the density matrix in terms of the ensemble is \begin{equation} \label{Eq:Rho} \rho(t) = \sum_\alpha \frac{N_\alpha(t)}{N} |\psi_\alpha(t)\rangle \langle \psi_\alpha(t)|, \end{equation} where $N_\alpha(t)$ is the number of ensemble members in the state $|\psi_\alpha(t)\rangle$ at time $t$ and $N$ is the total number of state vectors in the ensemble (ensemble size). The method proceeds in discrete time steps $\delta t$, and we consider one step that takes us from time $t$ to $t+\delta t$. During this time step, a given state vector $|\psi_{\alpha}(t)\rangle$ evolves either in a deterministic way or performs a randomly occurring quantum jump. The deterministic evolution is given by the non-Hermitian Hamiltonian \begin{equation} H= H_S -\frac{i\hbar}{2}\sum_j\Gamma_jC_j^{\dagger}C_j. \label{Eq:H} \end{equation} The essential feature here is the second term on the r.h.s., which is constructed from the jump operators that appear in the master equation (\ref{Eq:Mark}). This term reduces, in the Markovian case, the occupation probability of the states which decay. The deterministic time-evolution by the Hamiltonian (\ref{Eq:H}) leads, for small enough time step $\delta t$, to the state \begin{equation} \label{Eq:Phi} | \phi_\alpha(t+\delta t)\rangle = \left(1-\frac{iH\delta t}{\hbar}\right) |\psi_\alpha(t)\rangle. \end{equation} Before the next time step, this state is renormalized and the time evolution of $|\psi_{\alpha}\rangle$ is \begin{equation} \label{Eq:Det} |\psi_{\alpha}(t)\rangle \rightarrow |\psi_\alpha(t+\delta t)\rangle = \frac{ |\phi_\alpha(t+\delta t)\rangle} {|||\phi_\alpha(t+\delta t)\rangle ||}. \end{equation} If, instead of the deterministic evolution, a quantum jump to channel $j$ occurs, the state vector changes in a discontinuous way \begin{equation} \label{Eq:Jump} |\psi_\alpha(t)\rangle \rightarrow |\psi_\alpha(t+\delta t)\rangle =\frac{C_{j} |\psi_\alpha(t)\rangle} {||C_j| \psi_\alpha(t)\rangle||}. \end{equation} The probability $p_\alpha^{j}$ for a state vector $|\psi_{\alpha}\rangle$ to have a quantum jump to channel $j$ is directly proportional to the corresponding decay rate $\Gamma_j$, the time step size $\delta t$ and the occupation probability of the decaying state \begin{equation} \label{Eq:pj} p_\alpha^{j}(t)=\Gamma_{j}\delta t \langle \psi_\alpha(t) | C_j^{\dagger}C_{j}|\psi_\alpha(t)\rangle. \end{equation} The choice between the deterministic and jump evolutions, Eqs.~(\ref{Eq:Det}) and (\ref{Eq:Jump}) respectively, is done by comparing a generated random number $\xi$ to the total jump probability $p_{\alpha}$. This is the sum over channel specific probabilities $p_{\alpha}^j$ \begin{equation} p_{\alpha}=\sum_jp_{\alpha}^j, \end{equation} and has a direct relation to the norm of $| \phi_\alpha(t+\delta t)\rangle$: $1-p_{\alpha}=|||\phi_\alpha(t+\delta t)\rangle ||^2$. By calculating the average evolution $\overline{\sigma_{\alpha}} $ of $|\psi_{\alpha}(t)\rangle$ over the deterministic and jump paths one obtains \begin{eqnarray} \label{Eq:Ave} \overline{\sigma_{\alpha}(t+\delta t)} &=& (1-p_{\alpha}) \frac{| \phi_{\alpha}(t+\delta t)\rangle\langle \phi_{\alpha}(t+\delta t) |}{1-p_{\alpha}} \nonumber \\ &+& \sum_j p_{\alpha}^j \frac{C_j| \psi_{\alpha}(t)\rangle\ \langle \psi_{\alpha}(t) |C_j^{\dagger}}{\langle \psi_{\alpha}(t) | C_j^{\dagger}C_j|\psi_{\alpha}(t)\rangle}. \label{Eq:SimuMark} \end{eqnarray} Here, $(1-p_{\alpha})$ is the no-jump probability which weights the deterministic evolution and jump probabilities $p_{\alpha}^j$ weight the corresponding jump paths. By inserting Eqs.~(\ref{Eq:Phi}) and (\ref{Eq:pj}) into the Eq.~(\ref{Eq:Ave}) and rearranging the terms, one obtains after straightforward calculation the master equation (\ref{Eq:Mark}) for state vector $|\psi_{\alpha}(t)\rangle$. Taking a further step by considering the average over the whole ensemble, \begin{equation} \label{Eq:MCAve} \overline{\sigma(t+\delta t)}=\sum_\alpha \frac{N_{\alpha}}{N}\overline{\sigma_{\alpha}(t+\delta t)}, \end{equation} it is straightforward to see that the master equation (\ref{Eq:Mark}) and the MCWF method result given by Eq.~(\ref{Eq:MCAve}) match, and the two approaches are indeed equivalent descriptions of the Markovian open system dynamics. \subsection{Why the MCWF does not work for non-Markovian systems?} In Markovian systems, the decay and decoherence processes occur at constant positive rates [c.f.~Eq.~(\ref{Eq:Mark})]. This indicates constant flow of information from the system to the environment before the steady state is reached. For non-Markovian systems, the decay rates are time-dependent and may acquire temporarily negative values (to be described in detail in the next Section). During the initial period of positive time dependent decay, the rate of the information flow changes but the direction of the flow remains constant, i.e., from the system to the environment. When the decay rate becomes negative, the direction of the information flow is reversed and the reduced system, due to the non-Markovian memory, begins to recall the information that was lost earlier. In the MCWF method, the quantum jump probability is directly proportional to the decay rate [c.f.~Eq.~(\ref{Eq:pj})] which acquires negative values in the non-Markovian case. As a consequence of these two facts, a quantum jump has negative probability to occur while the deterministic evolution has larger than $1$ probability. Therefor, it is impossible to make a decision between these two alternatives and as a consequence, MCWF method can not be used to describe non-Markovian dynamics. Earlier attempts to solve this problem exploit usually the idea that non-Markovian dynamics can be converted to Markovian one by extending the Hilbert space of the system~\cite{Imamoglu,Garraway1997,Breuer99,BreuerGen}. This may come with a cost for computational efficiency and may also prevent obtaining insight into non-Markovian dynamics. This also leaves open a fundamental question: Is there a corresponding jump process in the Hilbert space of the system which has a positive probability? \section{Non-Markovian quantum jumps}\label{NMQJ} Our starting point is the general local-in-time non-Markovian master equation ~\cite{Breuer2002,BreuerGen} \begin{eqnarray} \dot{\rho}(t) &=& \frac{1}{i\hbar} \left[ H_S, \rho(t)\right] + \sum_j\Delta_j(t) C_j(t) \rho(t)C_j^{\dag}(t) \nonumber \\ & -&\frac{1}{2}\sum_j\Delta_j(t)\left\{\rho(t),C_j^{\dag}(t) C_j(t) \right\}. \label{Eq:MNM} \end{eqnarray} The difference, when comparing to the Markovian master equation~(\ref{Eq:Mark}), is that the decay rates $\Delta_j(t)$ depend on time and may acquire negative values. In the most general case the Lindblad operators $ C_j(t)$ may also depend on time. \subsection{Special case: Non-Markovian time scale is the shortest one\label{sec:simpleCase}} Before going to the general solution in the next subsection, we first describe the method for the simple case in which the non-Markovian time-scale is the fastest one, which is most often the case. This allows to introduce the NMQJ method in a way that is conceptually rather straightforward. With this approximation, the state vectors do not have time to evolve due to the system Hamiltonian $H_S$ on the time scale of non-Markovian dynamics. Consider now a non-Markovian system where the decay rates oscillate between positive and negative values before reaching a constant Markovian value. For the sake of simplicity, we assume here first that all the decay channels take negative values simultaneously. At the end of the first positive decay period, the initial pure state has evolved to a mixed state which can be described in terms of the jump paths, unravelled by the MCWF method in the positive region, as \begin{eqnarray} \label{Eq:Rho1} \rho(t) &=& \frac{N_0}{N} |\psi_0(t)\rangle \langle \psi_0(t) | + \sum_j \frac{N_j}{N} |\psi_j\rangle \langle \psi_j | \nonumber \\ &+& \sum_{j,k} \frac{N_{j,k}}{N} |\psi_{j,k}\rangle \langle \psi_{j,k} | + ... \end{eqnarray} Here, $|\psi_0(t)\rangle$ is the deterministic evolution from the initial state $|\psi_0(0)\rangle$ without jumps and $|\psi_j\rangle$ describe the ensemble members that have performed one jump to channel $j$, such that $|\psi_j\rangle= C_j |\psi_0\rangle / || C_j |\psi_0\rangle||$. In the next term, $|\psi_{j,k}\rangle$ correspond to members who have performed first a jump to channel $j$ and then, furthermore, a second jump to channel $k$, so that $|\psi_{j,k}\rangle= C_kC_j |\psi_0\rangle / || C_kC_j |\psi_0\rangle||$. The rest of the terms go in the corresponding way. $N_0$, $N_j$ and $N_{j,k}$ are the corresponding numbers of the ensemble members. \begin{figure} \caption{\label{Fig:probFlow} \label{Fig:probFlow} \end{figure} The central question is now how the ensemble (\ref{Eq:Rho1}) is evolved so that the result matches the master equation (\ref{Eq:MNM}). The sign change of the decay rate indicates the reversal of the information flow between the system and the environment, so that for negative decay the system partially recovers the information that it lost earlier. This restoration of lost information is the essence of the non-Markovian memory. In other words, the decoherence that occurred in the preceding positive decay region, turns to re-coherence in the negative decay region, i.e., the earlier effects of decoherence get partially cancelled. This leads to the idea that non-Markovian quantum jumps, taking place in the negative decay region, cancel the effect of the jumps that appeared earlier in the positive decay region destroying quantum superpositions. Reverse quantum jumps during negative decay are thus expected to counteract prior positive decay jumps. This means that in the expansion (\ref{Eq:Rho1}), state $|\psi_j\rangle$ jumps back to the state $|\psi_0\rangle$, state $|\psi_{j,k}\rangle$ jumps to the state $|\psi_j\rangle$, and so on. The direction of the probability flow gets reversed for negative decay region as illustrated in Fig.~\ref{Fig:probFlow}. The corresponding non-Markovian quantum jump operators are \begin{eqnarray} \label{Eq:JOps1} D_{j\rightarrow 0} &=& |\psi_0(t)\rangle \langle \psi_{j}|, \nonumber \\ D_{j,k\rightarrow j} &=& |\psi_j \rangle \langle \psi_{j,k}|, \end{eqnarray} and so on. The probabilities for the jumps to occur are \begin{eqnarray} \label{Eq:JProbs1} P_{j\rightarrow 0} &=& \frac{N_0 \delta t |\Delta_j| \langle \psi_0(t) | C_j^{\dagger} C_j |\psi_0(t)\rangle} {N_{j}}, \nonumber \\ P_{j,k\rightarrow j} &=& \frac{N_j \delta t |\Delta_j| \langle \psi_j | C_j^{\dagger} C_j |\psi_j \rangle}{N_{j,k}}. \end{eqnarray} Equations (\ref{Eq:JOps1}) and (\ref{Eq:JProbs1}) demonstrate that the probability for reversing a jump for one particular channel is given by the portion of ensemble members that have not yet jumped in that channel. The numerator gives the total jump probability in the ensemble which is distributed equally to those ensemble members which can perform the jumps. By doing the reversed jump according to Eq.~(\ref{Eq:JOps1}), the discontinuous history of the ensemble member is preserved. This means that when we are reversing a jump, we are not erasing the past. To prove that the algorithm matches with the master equation, we follow very closely the proof of the MCWF method~\cite{DCM1992}. The basic idea is to average over the deterministic and jump paths in order to obtain an equation of motion for the reduced density matrix. Evolving the ensemble (\ref{Eq:Rho1}) over time step $\delta t$, gives \begin{eqnarray} \overline{\sigma(t+\delta t)} &=& \Theta_0(t) + \sum_j \frac{N_j}{N} \left[ \Theta_j(t)+ \Theta_{j\rightarrow 0}(t) \right] \nonumber \\ &+& \sum_{j,k} \frac{N_{j,k}}{N} \left[\Theta_{j,k}(t) + \Theta_{j,k\rightarrow j}(t) \right]+ ... \label{Eq:Ave1} \end{eqnarray} Here, $\Theta_0(t)$ is the contribution arising from the deterministic evolution between times $0$ and $t$. $\Theta_j(t)$ is the contribution of the ensemble members who jumped earlier once to channel $j$ and the jump is not cancelled at the current point of time. In $\Theta_{j\rightarrow 0}(t)$, there has been one jump to channel $j$ and which gets cancelled at the current point of time. The rest of the terms arise correspondingly. It is worth noting that it is not possible to cancel something which never happened. Hence there are no jumps which can be cancelled from $\Theta_0(t)$ part. Taking into account for the appropriate weights and keeping in mind the jump operators and probabilities from Eqs.~(\ref{Eq:JOps1}) and (\ref{Eq:JProbs1}), these terms can be written explicitly \begin{eqnarray} \label{Eq:Thetas} \Theta_0 &=& \frac{| \phi_0(t+\delta t)\rangle \langle \phi_0(t+\delta t) |}{1+n_0}, \nonumber \\ \Theta_j(t) &=& (1-P_{j\rightarrow 0}) \frac{| \phi_{j}(t+\delta t)\rangle \langle \phi_{j}(t+\delta t) |}{1+n_{j}}, \nonumber \\ \Theta_{j\rightarrow 0}(t) &=& P_{j\rightarrow 0} D_{j\rightarrow 0} |\psi_{j}(t)\rangle \langle \psi_{j}(t) | D_{j\rightarrow 0}^{\dagger}. \end{eqnarray} Here, the time evolved deterministic states are \begin{eqnarray} \label{Eq:Det1} | \phi_0(t+\delta t)\rangle &=& (1-\frac{iH_S\delta t}{\hbar} +\sum_m\frac{|\Delta_m|\delta t}{2} C_m^{\dagger}C_m) |\psi_0(t)\rangle, \nonumber \\ | \phi_{j}(t+\delta t)\rangle &=& (1-\frac{iH_S\delta t}{\hbar} +\sum_m\frac{|\Delta_m|\delta t}{2} C_m^{\dagger}C_m) |\psi_j(t)\rangle, \nonumber \\ \end{eqnarray} and their normalization factors are \begin{eqnarray} \label{Eq:Norm1} n_0&=&\sum_m\delta t |\Delta_m(t)| \langle \psi_0(t) | C_m^{\dagger} C_m |\psi_0(t)\rangle, \nonumber \\ n_j&=&\sum_m\delta t |\Delta_m(t)| \langle \psi_j(t) | C_m^{\dagger} C_m |\psi_j(t)\rangle. \end{eqnarray} All the rest of the terms follow correspondingly. Using Eqs.~(\ref{Eq:Det1}) and (\ref{Eq:Norm1}) in Eq.~(\ref{Eq:Thetas}) and inserting the results into Eq.~(\ref{Eq:Ave1}) gives the master equation (\ref{Eq:MNM}). In a multi-channel system, positive and negative channels may appear simultaneously. The description above contains all the negative channels while the positive channels evolve according to the MCWF method. Hence, the match between the positive channel dynamics with the master equation can be proven along the MCWF proof. For the sake of simplicity, we leave the detailed description of simultaneous positive and negative channels to the general treatment presented in the next subsection. \subsection{General case} The simplified case presented in the previous section \ref{sec:simpleCase} is now generalized. The simple treatment fails in a general case, because it assumes that the jump history can be unambiguously reconstructed for each state in the decomposition \eqref{Eq:Rho1}. In general, starting from $|\psi_0\rangle\langle \psi_0 |$, many different combinations of jumps may lead to identical contribution $|\psi_\alpha\rangle\langle \psi_\alpha |$, and all these states should be counted together to form $N_\alpha$. As in the Markovian case, we write the density matrix in the most generic way \begin{equation} \label{Eq:Rho2} \rho(t) = \sum_\alpha \frac{N_\alpha(t)}{N} |\psi_\alpha(t)\rangle \langle \psi_\alpha(t)|. \end{equation} The positive and negative decay channels are noted with $j_+$ and $j_-$, respectively, while the corresponding decay rates are $\Delta_{j_+}(t)>0$ and $\Delta_{j_-}(t)<0$. With this notation the master equation (\ref{Eq:MNM}) can be written as \begin{widetext} \begin{eqnarray} \dot{\rho}(t) &=& \frac{1}{i\hbar} \left[ H_S, \rho(t)\right] +\sum_{j_+}\Delta_{j_+}(t) \left[ C_{j_+}(t) \rho(t)C_{j_+}^{\dag}(t) -\frac{1}{2}\left\{\rho(t),C_{j_+}^{\dag}(t) C_{j_+}(t) \right\}\right] \nonumber \\ &-& \sum_{j_-}|\Delta_{j_-}(t)| \left[ C_{j_-}(t) \rho(t)C_{j_-}^{\dag}(t) -\frac{1}{2}\left\{\rho(t),C_{j_-}^{\dag}(t) C_{j_-}(t) \right\}\right]. \nonumber \\ \label{Eq:MNM2} \end{eqnarray} \end{widetext} The deterministic time evolution of the state vectors $|\psi_{\alpha}(t)\rangle$ occurs as before \begin{equation} \label{Eq:Det2} |\psi_{\alpha}(t)\rangle \rightarrow |\psi_\alpha(t+\delta t)\rangle = \frac{ |\phi_\alpha(t+\delta t)\rangle} {|||\phi_\alpha(t+\delta t)\rangle ||}, \end{equation} where the non-normalized state $ |\phi_\alpha(t+\delta t)\rangle$ has been obtained with the usual non-Hermitian Monte Carlo Hamiltonian. For the sake of convenience, we write this Hamiltonian separating the positive and negative channels \begin{eqnarray} H &=& H_S-\frac{i\hbar}{2}\sum_{j_+}\Delta_{j_+}(t)C_{j_+}^{\dagger} (t) C_{j_+}(t) \nonumber \\ &-& \frac{i\hbar}{2}\sum_{j_-}\Delta_{j_-}(t)C_{j_-}^{\dagger} (t) C_{j_-}(t). \label{eq:deterministicEvolution} \end{eqnarray} The jump probabilities and the jumps for the positive channels $j_+$ follow the MCWF prescription, i.e., \begin{equation} P_\alpha^{j_+}(t)=\Delta_{j_+} (t)\delta t \langle \psi_\alpha (t) | C_{j_+}^{\dagger}(t)C_{j_+}(t)|\psi_\alpha (t)\rangle, \label{eq:positiveJumpProbability} \end{equation} and \begin{equation} \label{Eq:Jump2} |\psi_\alpha(t)\rangle \rightarrow |\psi_{\alpha'} (t+\delta t)\rangle =\frac{C_{j_+} |\psi_\alpha(t)\rangle} {||C_{j_+}| \psi_\alpha(t)\rangle||}, \end{equation} correspondingly. For negative channels $j_-$ the direction of the jump process gets reversed \begin{equation} \label{Eq:Jump3} |\psi_{\alpha'} (t+\delta t)\rangle \leftarrow |\psi_\alpha(t)\rangle =\frac{C_{j_-} |\psi_{\alpha'} (t)\rangle} {||C_{j_-}| \psi_{\alpha'} (t)\rangle||}. \end{equation} In other words, the jump operator for negative channels takes the form \begin{equation} D_{\alpha\rightarrow \alpha'}^{j_-}(t)= |\psi_{\alpha'}(t)\rangle \langle \psi_{\alpha}(t)|, \label{Eq:JOp} \end{equation} where the {\it source state} of the jump is $|\psi_{\alpha}(t)\rangle = C_{j_-}(t) |\psi_{\alpha'}(t)\rangle / ||C_{j_-}(t) |\psi_{\alpha'}(t)\rangle||$. The source and target state of the jump swap their role when the decay rate becomes negative. This transition for a given state vector $|\psi_{\alpha}\rangle$ in the ensemble (\ref{Eq:Rho2}) occurs with probability \begin{eqnarray} P_{\alpha\rightarrow \alpha'}^{j_-}(t) = \frac{N_{\alpha'}(t)} {N_{\alpha}(t)} |\Delta_{j_-}(t)| \delta t \langle \psi_{\alpha'}(t) | C_{j_-}^{\dagger}(t) C_{j_-}(t) |\psi_{\alpha'}(t)\rangle.\nonumber \\ \label{Eq:JProb2M} \end{eqnarray} Note that the probability of the non-Markovian jump is given by the target state $ |\psi_{\alpha'}\rangle$ of the jump along the term $ \langle \psi_{\alpha'}(t) | C_{j_-}^{\dagger}(t) C_{j_-}(t) |\psi_{\alpha'}(t)\rangle$. Moreover, if there are no ensemble members in the target state, $N_{\alpha'}=0$, then the jump probability is equal to zero. The sign of the decay rate $\Delta_j(t)$ can be understood in the following way. First, when for a given channel $j$, $\Delta_j(t)>0$, the process goes as $|\psi\rangle\rightarrow |\psi'\rangle =C_{j}|\psi)\rangle / ||C_{j} |\psi\rangle||$. Later on, when the decay rate becomes negative, $\Delta_j(t)<0$, the direction of this process is reversed and the jump occurs to opposite direction $|\psi\rangle\leftarrow |\psi'\rangle$. Generally, Eq.~(\ref{Eq:Jump3}) indicates that the explicit target state $|\psi_{\alpha'}(t)\rangle$ of the reverse jump for the source state $|\psi_{\alpha}(t)\rangle$ is not necessarily unique. This means that the ensemble members in the state $|\psi_{\alpha}(t)\rangle$ can jump to different target states along Eq.~(\ref{Eq:Jump3}) whenever the corresponding jump probability is larger than zero. The major factor for the computational cost is defined by how many different types of states vectors are created during the positive decay region and the need to evolve them simultaneously due to their dependence in the negative decay region. This point is discussed more in Section \ref{Sec:Num}. The proof of our NMQJ method follows again the same lines of the Markovian MCWF method~\cite{DCM1992} and given in the previous Section. By weighting the deterministic and jump paths over the time step $\delta t$ with the appropriate probabilities we obtain the master equation (\ref{Eq:MNM}). Calculating the average $\overline{\sigma}$ of the evolution of the ensemble (\ref{Eq:Rho2}) over $\delta t$ gives \begin{widetext} \begin{eqnarray} \label{Eq:AlgoM} \overline{\sigma(t+\delta t)} &=& \sum_{\alpha}\frac{N_{\alpha}(t)}{N} \left[ \left( 1-\sum_{j_+}P_{\alpha}^{j_+}(t) -\sum_{j_-,\alpha'} P_{\alpha\rightarrow \alpha'}^{j_-}(t)\right) \right. \frac{| \phi_{\alpha}(t+\delta t)\rangle \langle \phi_{\alpha}(t+\delta t) |}{||| \phi_{\alpha}(t+\delta t)\rangle ||^2} \nonumber \\ &+& \sum_{j_+} P_\alpha^{j_+}(t) \frac{C_{j_+}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)| C_{j_+}^{\dagger}(t) } {||C_{j_+}(t) |\psi_{\alpha}(t)\rangle||^2} + \left. \sum_{j_-,\alpha'} P_{\alpha\rightarrow \alpha'}^{j_-}(t) D_{\alpha\rightarrow \alpha'}^{j_-}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)| D_{\alpha\rightarrow {\alpha}'}^{j_- \dagger} (t) \right]. \end{eqnarray} \end{widetext} Here, the summations $\alpha$ and $\alpha'$ run over the ensemble [c.f.~Eq.~(\ref{Eq:Rho2})], the summation over $j_+$ and $j_-$ cover the positive and negative channels, respectively. The first term on the r.h.s.,~in the summation over $\alpha$, is the product of the no-jump probability and the deterministic evolution of the state vector, the second and third terms describe the positive and negative channel jumps, respectively, with the corresponding probabilities. The details of the proof are presented in Appendix A and we describe here briefly the main features. Like in the Markovian MCWF case, the deterministic evolution gives the commutator and the anticommutator parts of the master equation. Moreover, the jump part of positive channels goes along MCWF giving the remaining "sandwich" term for positive channels $j_+$. After making the series expansion of the denominator of the deterministic part and keeping the terms to the first order in $\delta t$, we are left with the norm change term due to negative channels times the deterministic evolution, jump probability for negative channels times the deterministic evolution, and the jump term for negative channels. As shown in Appendix A, the first and last of these three cancel each other and the second one gives the "sandwich" term of the master equation (\ref{Eq:MNM2}) for negative channels. This completes the proof. \section{Examples}\label{Exs} In order to demonstrate the applicability of the NMQJ method we give now concrete examples. These examples also show how the method works at the level of single realizations in the ensemble. Our physical system of choice is an atom interacting with a Lorentzian structured reservoir, e.g., an atom interacting with a single mode of a leaky cavity. The first example is a two-level atom interacting off-resonantly with the cavity field, also known as detuned Jaynes-Cummings model [c.f. Fig.~\ref{fig:examplesSchematic}~(a)]. We use this simple system to give a detailed walk-through description on how the NMQJ method is implemented in practice. The other examples deal with a three-level atom, another archtype of atomic systems, which holds two independent decay channels and also three different level geometries: $\Lambda$, V, and ladder-systems [c.f. Fig.~\ref{fig:examplesSchematic}~(b)--(d)]. For these cases we see how having simultaneously both a negative and a positive channel results in rich dynamics. The structure of the effective ensemble, i.e., states $|\psi_\alpha \rangle$ and the way in which they connect by different jump channels, is shown in Fig.~\ref{fig:examplesEnsemble} for each example case, respectively. This illustrates, how physically identical states can be reached by different combinations of jumps in the V and ladder-systems. From the NMQJ method's point of view the details of the actual physical system and the variety of approximations during the derivations are irrelevant as long as the master equation is in the desired general form, given by Eq.~\eqref{Eq:MNM}. Moreover, just to highlight this feature, we illustrate explicitly how the NMQJ method follows the formal mathematical solution of the given master equation as far as the solution is physically consistent, i.e., the density matrix remains positive. If the solution fails to be positive at some point, it obviously means that some of the approximations made while deriving the master equation of the reduced system are not valid. \subsection{Derivation of the non-Markovian local-in-time Master equation} To give an idea of how non-Markovian local-in-time master equations can be derived microscopically and of the explicit form of the time-dependent decay rates, we give a brief sketch of the derivation for the example system in hand. \begin{figure} \caption{\label{fig:examplesSchematic} \label{fig:examplesSchematic} \end{figure} The system Hamiltonian of a multi-level atom is \begin{equation} H_S = \sum_i \hbar \omega_i |i\rangle \langle i|. \end{equation} Similarly, the self-Hamiltonian for the electromagnetic field constituting the environment is \begin{equation} H_\textrm{env} = \sum_k \hbar \nu_k a_k^\dagger a_k. \end{equation} The dipole interaction between the system and its environment is described by an interaction Hamiltonian \begin{equation} H_\textrm{int} = - \mathbf{D} \cdot \mathbf{E}, \end{equation} where $\mathbf{D}=q \mathbf{r}$ is the dipole moment operator and $\mathbf{E}$ the quantized electromagnetic field. Within the second order time-convolutionless (TCL) approach~\cite{Breuer2002} and after performing the secular approximation, the jump channels are categorized by atomic transition frequencies, or Bohr frequencies, $\omega$, such that the Lindblad operators are \begin{equation} C_{\omega} = \sum_{\substack{i,j:\\\omega_{j}-\omega_{i} = \omega}} d_{ij} | i \rangle \langle j|, \end{equation} where $d_{ij} = \langle i | (-\mathbf{ D }) | j \rangle / \hat{D}$ is the dimensionless value of the matrix element of the dipole moment operator $\mathbf{D}$ (dimensional unit $\hat{D}$). It is convenient to pass to the continuum limit of environmental modes $\nu_k$ such that $\sum_k |\alpha_k|^2 \to \int \textrm{d} \nu\, J(\nu )$. Here, $\alpha_k$ describes the coupling strength between the system and the reservoir mode $\nu_k$, and $J(\nu)$ is the spectral density of electromagnetic modes~\cite{Breuer2002}. Considering only the zero temperature environment, where all the modes are initially empty, each decay channel is related to a time-dependent decay rate \begin{equation} \Delta_{\omega} (t) = 2 \int_{0}^{t} \textrm{d}s \int_{0}^{\infty} \textrm{d}\nu\, J(\nu) \cos [(\nu - \omega ) s ]. \end{equation} \begin{figure} \caption{\label{fig:examplesEnsemble} \label{Fig:Routes} \label{fig:examplesEnsemble} \end{figure} The interaction with the reservoir introduces a renormalization of the system Hamiltonian $H_S$ by a Hermitian term, i.e., the Lamb shift Hamiltonian \begin{equation} H_{LS}(t) = \hbar \sum_{\omega} \lambda_{\omega} (t) C_{\omega}^{\dagger} C_{\omega}, \end{equation} where the time-dependent rate factor is \begin{equation} \lambda_{\omega} (t) = \int_{0}^{t} \textrm{d}s \int_{0}^{\infty} \textrm{d}\nu\, J(\nu) \sin [(\nu - \omega ) s ]. \end{equation} We label the different Bohr frequencies by $\{ \omega^j \}$, where $j=1,\ldots$. Correspondingly, the jump operators are $C_j \equiv C_{\omega^j}$ and the decay rates are $\Delta_j (t) \equiv \Delta_{\omega^j} (t)$ and $\lambda_j (t) \equiv \lambda_{\omega^j} (t)$. Then, the time-local master equation in the interaction picture is in the form of Eq.~\eqref{Eq:MNM}, where system Hamiltonian $H_S$ has been replaced by $H_{LS} (t)$. The spectral density of the electromagnetic field inside an imperfect cavity is well approximated by a Lorentzian distribution \begin{equation} J_{\textrm{Lorentz}} (\nu ) = \frac{\alpha^2}{2\pi} \frac{\Gamma}{(\nu - \omega_{\textrm{cav}} )^2 + (\Gamma / 2)^2}, \end{equation} where $\alpha^2$ is a coupling constant, $\omega_{\textrm{cav}}$ is the resonance frequency of the cavity and $\Gamma$ characterizes the width of the distribution. The essential parameter in this case is the detuning $\delta_j \equiv \omega_{\textrm{cav}} - \omega^j$ of the Bohr frequency with respect to the cavity resonance frequency. Since the cavity supports only modes residing close to its resonance frequency $\omega_{\textrm{cav}}$, only transitions whose Bohr frequencies are close to this value contribute to the dynamics. This justifies the description of the atom's Hilbert space consisting effectively of only two or three levels, which we now study. \subsection{Units and parameters} In the examples, the time scale is set by the inverse of the spectral distribution width $\Gamma^{-1}$. The resonance frequency is assumed to be large $\omega_{\textrm{cav}} \gg \Gamma$. The Markovian time scale is then $\tau_{M} \sim 10\, \Gamma^{-1}$ [c.f. convergence of the decay rates to steady Markovian values in, e.g., Fig \ref{fig:JaynesCummingsResults}(a)]. In the Jaynes-Cummings model the coupling constant is set to $\alpha^2 = 5$ and in the three-level systems it is $\alpha^2 = 2$. The dipole moment matrix elements are always assumed to be $d_{ij} = 1$ for all pairs of states $i\neq j$. In the numerical simulations the time step size is $\delta t = 0.01 \Gamma^{-1}$ and the size of the ensemble is $N=10^5$. The notation of atomic levels is the same as in Fig.~\ref{fig:examplesSchematic}. \subsection{\label{sec:exampleResults}Results} For the sake of comparison, we solve the master equation in two different ways. First, we solve the density matrix by using the NMQJ method. Second, we calculate the formal analytical solutions of the equations of motion of the individual density matrix components (expressions are given in Appendix B). The results are then compared in order to verify the functionality of our method. \subsubsection{Two-level atom: detuned Jaynes-Cummings model}\label{subsubsec:JC} The two-level case involves only one Lindblad operator $C_1 = \sigma_- = | b \rangle \langle a |$, which is the usual lowering operator from the excited to the ground state. We choose the detuning $\delta_1 = 5 \, \Gamma$ and the Fig.~\ref{fig:JaynesCummingsResults} (a) shows the oscillatory behavior of the corresponding decay rate $\Delta_1 (t)$ . The initial state is a pure state $\rho (0) = |\psi_0(0) \rangle \langle \psi_0 (0)|$, meaning that all the $N$ ensemble members are initially in the same state $|\psi_0 (0) \rangle$. In our example $|\psi_0 (0) \rangle = ( 3 |a\rangle + 2 |b\rangle )/\sqrt{13}$. For the given single jump operator and an initial state including a finite excited state component, there will be only two kinds of states contributing to the master equation solution. This is because according to the unraveling in Eq.~\eqref{Eq:Rho} the global phase factors of the single ensemble members do not affect the density matrix representation. The two non-equivalent states are now the evolved initial state vector $|\psi_0 (t) \rangle$ and the ground state $|\psi_1 \rangle \equiv |b\rangle$, which can be reached from $|\psi_0 (t)\rangle$ by operating with the Lindblad operator. Correspondingly, there are two discrete variables $N_0 (t)$ and $N_1 (t)$ counting the number of ensemble members on each of these two states. Initially $N_0 (0) = N$ and $N_1 (0) = 0$. For a certain initial time interval, the decay rate $\Delta_1$ is positive (see Fig.~\ref{fig:JaynesCummingsResults}). During this period the ensemble evolves according to the standard MCWF description. The deterministic evolution $|\psi_\alpha (t) \rangle \to |\psi_\alpha( t + \delta t) \rangle$ is given by Eq.~\eqref{eq:deterministicEvolution} with Hamiltonian $H = H_{LS} - \frac{i\hbar}{2} \Delta_1 (t) C_1^\dagger C_1 = \hbar [ \lambda_1 (t) - \frac{i}{2} \Delta_1 (t)] |a\rangle \langle a|$. The deterministic evolution is interrupted by quantum jumps $|\psi_0 (t) \rangle \to |\psi_1 (t) \rangle$ occurring with a probability $P_0^1 (t) = \Delta_1 (t) \delta t \langle \psi_0 (t)| C_1^\dagger C_1 |\psi_0 (t) \rangle = \Delta_1 (t) \delta t |\langle a |\psi_0 (t) \rangle |^2$ given by the Eq.~\eqref{eq:positiveJumpProbability}. In our notation this means that when quantum jump occurs, the occupation numbers are updated as $\{ N_0(t), N_1 (t) \} \to \{ N_0 (t) - 1, N_1 (t) + 1\}$. Once an ensemble member has jumped to the state $|\psi_1\rangle$, it can not experience any other quantum jumps during this period, since the corresponding jump probability is $P_1^1 \propto |\langle a| \psi_1 \rangle |^2 = 0$. \begin{figure} \caption{\label{fig:JaynesCummingsResults} \label{fig:JaynesCummingsResults} \end{figure} After the first positive period the decay rate becomes negative. The deterministic evolution is still driven by the same Hamiltonian as previously. However, now those ensemble members which had previously jumped to the ground state $|\psi_1\rangle$ are able to make a reverse non-Markovian quantum jump $|\psi_0 (t) \rangle \leftarrow |\psi_1 \rangle$ going back to the deterministically evolved initial state. The probability of this jump is given by Eq.~\eqref{Eq:JProb2M} and is \begin{eqnarray} P_{1\to 0}^1 (t) &=& \frac{N_0 (t) }{N_1 (t)} | \Delta_1 (t) | \delta t \langle \psi_0 (t) | C_1^\dagger C_1 | \psi_0 (t) \rangle \nonumber \\ &=& \frac{N_0 (t) }{N_1 (t)} | \Delta_1 (t) | \delta t |\langle a | \psi_0 (t) \rangle |^2. \end{eqnarray} Accordingly, the occupation numbers are updated after each reverse jump such that $\{ N_0(t), N_1 (t) \} \to \{ N_0 (t) + 1, N_1 (t) - 1\}$. The ensemble members in the state $|\psi_0 \rangle$ are not able to perform quantum jumps during this period, since in the ensemble there are no states $|\psi_\alpha\rangle$ for which $|\psi_0 (t) \rangle = C_1 |\psi_\alpha\rangle / \| C_1 |\psi_\alpha \rangle \|$. \begin{figure} \caption{\label{fig:JaynesCummingsExample} \label{fig:JaynesCummingsExample} \end{figure} \begin{figure} \caption{\label{fig:lambdaResults} \label{fig:lambdaResults} \end{figure} \begin{figure} \caption{\label{fig:veeResults} \label{fig:veeResults} \end{figure} The successive periods of positive and negative decay rate are treated in a similar way. In the Fig.~\ref{fig:JaynesCummingsResults} we show how the ensemble average of single realizations generated by the NMQJ method gives the exact solution of the master equation. In the corresponding Markovian case with a constant decay rate $\Delta_\textrm{Markov} = \lim_{t\to\infty} \Delta (t)$, the solution would be a simple exponential decay towards the ground state accompanied by exponential decoherence. The non-Markovian time-dependent decay rate leads to a slower or faster decay compared to the Markovian exponential one. Furthermore, since the decay rate takes negative values, the decay process can be partially reversed. This leads to a regain of excited state probability and re-coherence. In Fig.~\ref{fig:JaynesCummingsExample} we give an example of a single realization experiencing both a quantum jump to the ground state $|\psi_0\rangle \to |\psi_1\rangle$ during the positive decay rate and a reverse non-Markovian quantum jump back to the initial state $|\psi_0\rangle \leftarrow |\psi_1\rangle$, during the negative decay rate. The essence of this illustration is that after these two jumps the state is (up to an irrelevant global phase factor) precisely the same as if the evolution would have been purely deterministic. However, when evaluating the time evolution with the ensemble average, the total contribution of this realization is different from the contribution given by a realization with no jumps. \subsubsection{Three-level atom: $\Lambda$-system} In a $\Lambda$-system there are two jump channels with Lindblad operators $C_1 = |b\rangle \langle a |$ and $C_2 = |c\rangle \langle a |$. In our example we choose the corresponding detunings to be $\delta_1 = -3 \, \Gamma$ and $\delta_2 = 5 \, \Gamma$. With these values the two decay rates have at certain time intervals opposite signs [c.f.~Fig.~\ref{fig:lambdaResults} (a)]. We now look at the initial state $|\psi_0 (0) \rangle = (4|a\rangle + 2 |b\rangle + |c\rangle)/\sqrt{21}$. Starting with such an initial state the ensemble consists of effectively three different states: $|\psi_0 (t) \rangle$, $|\psi_1 \rangle \equiv |b\rangle$, and $|\psi_2 \rangle \equiv |c\rangle$. There are now two competing processes affecting the time evolution of the initial state. Initially both decay rates are positive, but at $t\approx 0.5 \, \Gamma^{-1}$, channel 2 becomes negative. This means that after this moment, on the one hand, there are still quantum jumps through channel 1 away from the initial state $|\psi_0\rangle \to |\psi_1\rangle$, but on the other hand, channel 2 repumps the ensemble members back to the initial state by non-Markovian quantum jumps $|\psi_0\rangle \leftarrow |\psi_2\rangle$. At $t\approx 1.2 \, \Gamma^{-1}$ both decay rates change their signs and so on all the way until $t\approx 2.5 \, \Gamma^{-1}$. Fig.~\ref{fig:lambdaResults} illustrates that when the decay rates are counteracting each other, plateaus in the evolution of the density matrix elements can be observed. \subsubsection{Three-level atom: V-system} In the case of a V-system, the two jump channels are $C_1 = |c\rangle \langle a |$ and $C_2 = |c\rangle \langle b |$. We choose the detunings as earlier: $\delta_1 = -3 \, \Gamma$ and $\delta_2 = 5 \, \Gamma$. We consider the initial state $|\psi_0 (0) \rangle = (|a\rangle + |b\rangle + |c\rangle)/\sqrt{3}$. In this case, the ensemble consists of effectively only two different states, since both Lindblad operators act as $|\psi_0 (t) \rangle \to |\psi_1\rangle \equiv |c\rangle$. The dynamics in Fig.~\ref{fig:veeResults} shows how the upper state probabilities decay according to the individual decay channels. Since only $|\psi_0\rangle $ carries coherences, there is a plateau in the upper state coherences, as it is affected simultaneously by decoherence and recoherence. \subsubsection{Three-level atom: Ladder-system} The ladder-system induces the most complicated dynamics of the three three-level atomic schemes considered here. The Lindblad operators form a short cascade, $C_1 = |b\rangle \langle a|$ and $C_2 = |c\rangle \langle b|$, so that the target state of the upper channel can still decay further by another quantum jump. There are three different possible quantum jump processes: $|\psi_0 (t) \rangle \to |\psi_1 \rangle \equiv |b\rangle$ through channel 1, $|\psi_0 (t) \rangle \to |\psi_2 \rangle \equiv |c\rangle$ through channel 2, and $|\psi_1 \rangle \to |\psi_2 \rangle$ through channel 2. Therefore, the effective ensemble consist of three state vectors. During the negative period of channel 2, there are now interestingly two possible target states for a non-Markovian quantum jump from state $|\psi_2\rangle$ corresponding to processes $|\psi_0 (t) \rangle \leftarrow |\psi_2 \rangle$ and $|\psi_1 \rangle \leftarrow |\psi_2 \rangle$. The example dynamics in Fig.~\ref{fig:ladderResults} shows how the initial state $|\psi_0 (0) \rangle = (4 |a\rangle + 2 |b\rangle + |c\rangle ) /\sqrt{21}$ evolves. It is evident, that eventually the state decays towards $|\psi_2\rangle$, but due to complicated connections between the states and the changing signs of the decay rates, the dynamics is more rich than in the other cases. Starting from an initial state $|\psi_0 (0) \rangle = |a\rangle$, our other example of ladder-system dynamics shows that the density matrix loses its positivity at $t \approx 1.0 \, \Gamma^{-1}$ (c.f. Fig.~\ref{fig:ladderFailure}), which indicates that the approximations in the derivation of the master equation do not hold for this level geometry. The NMQJ solution follows the formal mathematical solution as long as it remains positive and the method is able to identify the point where the time evolution becomes unphysical. The failure of the positivity occurs, when channel 2 is still negative while all the ensemble members in state $|\psi_2\rangle$ have already had a non-Markovian quantum jump to states $|\psi_0 (t) \rangle$ and $|\psi_1 \rangle$. This happens, because the probability for such a non-Markovian quantum jump is $P_{2\to \alpha}^2 \propto N_\alpha(t) / N_2 (t)$, where $N_2 (t) \to 0$. This property has some interesting implications in the search for a positivity conditions for non-Markovian systems~\cite{Breuer08a}. \begin{figure} \caption{\label{fig:ladderResults} \label{fig:ladderResults} \end{figure} \section{Discussion}\label{Discu} \subsection{On non-Markovian quantum jump operators and probabilities}\label{DisJOp} To circumvent the problem of the negative probabilities of the Markovian MCWF method, one is tempted to consider negative probabilities as positive ones for inverted jumps, i.e., to switch the role of the initial and final states of a given Lindblad operator by setting $C_j \rightarrow C_j^{\dag}$. However, this does not lead to the correct ensemble for the non-Markovian dynamics. The essence of the negativity of the decay rate is the reversal of the decoherence process, i.e., re-coherence, and partial cancellation of the decoherence which occured in the past. If one uses in the non-Markovian region with negative decay rates the substitution $C_j \rightarrow C_j^{\dag}$, this only replaces one decoherent process with another one. \begin{figure} \caption{\label{fig:ladderFailure} \label{fig:ladderFailure} \end{figure} Let us illustrate this with the simple example we considered in Sec.~\ref{subsubsec:JC}. Writing the equations of motion explicitly for the density matrix elements of a two-level system, gives for the positive decay rate region, \begin{eqnarray} \dot{\rho}_{aa}&=& -|\Delta| \rho_{aa}, \nonumber \\ \dot{\rho}_{bb} &=& |\Delta| \rho_{aa}, \nonumber \\ \dot{\rho}_{ab} &=& -\frac{1}{2}|\Delta| \rho_{ab}, \label{Eq:RhoElPos} \end{eqnarray} and for the negative decay region \begin{eqnarray} \dot{\rho}_{aa}&=& |\Delta| \rho_{aa}, \nonumber \\ \dot{\rho}_{bb} &=& -|\Delta| \rho_{aa}, \nonumber \\ \dot{\rho}_{ab} &=& \frac{1}{2}|\Delta| \rho_{ab}. \label{Eq:RhoEl} \end{eqnarray} Here, $a$ denotes the excited and $b$ the ground state of the two-level atom. The first line of Eq.~(\ref{Eq:RhoEl}) shows that during the negative decay the excited state probability increases and that this increase is directly proportional to the probability which the excited state already has. This is a counterintuitive feature since it means that the total rate of the process is proportional to the target state, and not to the source state as in the positive decay region [c.f.~Eq.~(\ref{Eq:RhoElPos})]. The last line of Eq.~(\ref{Eq:RhoEl}) shows that the coherences increase during the negative decay. If one attempts to remedy the negative probability of the jump given by the Markovian method by changing the sign of the decay rate and substituting $C_j \rightarrow C_j^{\dag}$, or $\sigma_-\rightarrow \sigma_+$, this gives the equations of motion \begin{eqnarray} \dot{\rho}_{aa}&=& |\Delta| \rho_{bb}, \nonumber \\ \dot{\rho}_{bb} &=& -|\Delta| \rho_{bb}, \nonumber \\ \dot{\rho}_{ab} &=& -\frac{1}{2}|\Delta| \rho_{ab}. \end{eqnarray} It is easy to see that these equations are not the correct equations of motion (\ref{Eq:RhoEl}). In particular, the proportionality of the state populations for $\rho_{aa}$ and $\rho_{bb}$ go wrong, and the coherences decrease while the correct equations (\ref{Eq:RhoEl}) show that they must increase. Generally speaking, a simple sign change of the decay rate from positive to negative in the non-Markovian master equation (\ref{Eq:MNM2}) may seem {\it a priori} as a rather trivial problem to solve. However, as the simple example above illustrates, the sign change actually leads to a very complicated problem. The main source of the complication is that the non-Markovian jump operators, given by Eq.~(\ref{Eq:Jump2}), do not appear explicitly in the master equation to be solved, whereas in the Markovian case one can pick the jump operators directly from the dissipator of the master equation. It is also interesting to note that we can interpret the jump probability (\ref{Eq:JProb2M}) in the following way. The numerator $N_{\alpha'}|\Delta_{j_-}(t)| \delta t \langle \psi_{\alpha'}(t) | C_{j_-}^{\dagger}(t) C_{j_-}(t) |\psi_{\alpha'}(t)\rangle$ gives the cumulative non-Markovian quantum jump probability in the whole ensemble. This is then divided to those $N_{\alpha}$ ensemble members $|\psi_{\alpha}\rangle$ who perform the jumps. \subsection{Why local-in-time master equation can describe non-Markovian dynamics with memory?} Two common ways to describe non-Markovian open system dynamics are the memory kernel master equations and the local-in-time master equations with time dependent decay rates~\cite{Breuer2002}. The former consists of an integro-differential master equation where the change of the system state at a given moment of time is given by the integral over the past evolution according to a given memory kernel. The local-in-time master equations in turn are based on the microscopic system-resevoir interaction modeling leading to a differential equation of motion for the density matrix of the system, which is local-in-time. The description of non-Markovian dynamics without the use of a memory kernel, as done with the local-in-time master equations, may seem at first sight counterintuitive. Our NMQJ method sheds new light on this issue and shows explicitly how and where the memory appears in local-in-time master equations. Suppose now that we have a density matrix of the system $\rho(t)$ and the corresponding ensemble of state vectors during the initial positive decay region. At each time step a certain small fraction of the state vectors may jump to decay channel $m$ according to the Markovian MCWF scheme: $|\psi'\rangle \rightarrow |\psi\rangle = C_m |\psi'\rangle / || C_m |\psi'\rangle||$. It is important to note that $|\psi'\rangle$ contains the information what the state $|\psi\rangle$ was before the jump $|\psi'\rangle \rightarrow |\psi\rangle$ took place, and that the whole ensemble still includes both types of state vectors $|\psi'\rangle$ and $|\psi\rangle$. Then the system enters into the negative decay rate region. Here, as described in the previous two subsections, the jumps go into opposite direction from $|\psi\rangle$ to $|\psi'\rangle$, and the probability of this jump is given by the target state $|\psi'\rangle$. In other words, the very state vector that contains information on the past state of $|\psi\rangle$ defines both the target state of the non-Markovian jump and the probability for this jump to occur. In this way the past affects the current evolution of the system~\cite{Note:ManyPsi}. It is difficult to see from the density matrix description where the memory of the earlier state of the system is. However, according to the description above, when we look at the density matrix as an ensemble of state vectors and study the dynamics of the state vectors in terms of the jumps, we see explicitly how the ensemble members carry memory of other ensemble members. This memory comes into play when the decay rate becomes negative. It is also important to note that if the number of ensemble members in the target state of the reverse jump becomes equal to zero, $N_{\alpha'}=0$, then the system has lost its memory, and consequently the reverse jump probability vanishes since it is directly proportional to $N_{\alpha'}$ [c.f.~Eq.~(\ref{Eq:JProb2M})]. \subsection{Is continuous measurement of environment allowed for non-Markovian systems?} For Markovian open quantum systems, single Monte Carlo realizations have a measurement interpretation~\cite{Plenio98}. The environment is thought to be monitored in a continuous way, and the corresponding reduced system evolution, conditioned on the measurement outcome, constitutes a single pure state trajectory of the ensemble. The existence of a measurement scheme interpretation for non-Markovian trajectories has been recently under active debate. Di\'osi claims that, at least in principle, certain types of QSD trajectories can be interpreted as true pure state single system trajectories~\cite{Diosi08}. His idea is based on the assumption of availability of an infinite set of entangled von Neumann detectors. Wiseman and Gambetta question Di\'osi's claims and the existence of true pure state trajectories with the measurement scheme interpretation. Their argument is based on the notion that in Di\'osi's scheme one should actually measure also those von Neumann apparatuses which are yet to interact with the system~\cite{Gambetta08}. Due to the entanglement between the von Neumann apparatuses, the measurement induces noise turning the true pure state trajectories into mixed ones. Though both works mentioned above deal with diffusion descriptions, it is interesting to note how our jump scheme fits into the discussion. In the NMQJ method, the memory of one ensemble member is carried by other ensemble members. When a reverse non-Markovian jump for a given ensemble member occurs, this member returns to the state which it would have at this point of time, if the prior positive decay jump had not occurred. In the simple two-level atom example, the superposition which was lost earlier gets restored by the non-Markovian jump, and the information on the earlier state of the system returns from the environment to the system. The crucial point is that the information lost by the system to the environment in the initial positive decay region has to be still available to the system when the decay rate turns later on negative. If we measure the environment in a continuous way, we are extracting information from the environment - and indirectly on the system state. If this measurement is destructive, then the information is not available to the system anymore and the non-Markovian dynamics gets distorted. In the case of a two-level atom, the measurement of the photon in the environment destroys the photon, and the two-level atom can not get re-excited during the negative decay region. In addition, in the two-level atom example, the oscillations in the excited state probability arise due to virtual exchanges of excitations between the system and the reservoir~\cite{Breuer2002,Breuer99,BreuerGen}. Virtual processes can not be directly measured while they still affect the system dynamics. This fits to the insight that the NMQJ gives though in terms of virtual processes there is a subtle difference: instead of virtual exchange of photons between the two-level atom and the reservoir, we rather describe the oscillations in the excited state amplitude of the atom as destruction and restoration of the quantum superposition. This difference between the two descriptions arises because an absorption of the photon by the atom means a jump from the ground state to the excited state. This process, by definition, can not increase the coherences which is a key feature of non-Markovian systems in the negative decay region, as discussed in detail Sec.~\ref{DisJOp}. If single realizations can not be measured, is there some other physical meaning that they have? In our formalism the probability to be in a given state at a given moment of time is the sum of all the paths leading to this state, see Fig.~\ref{Fig:Paths}. In this sense the state vector evolutions can have an interpretation as possible paths that the system may take from its initial to final state. However, combining with the lack of measurement scheme, this means that we are not allowed to measure which path the system has taken while all possible paths contribute to the system state. If we try to extract information on the followed path by means of measurements, we disturb the non-Markovian memory. The rigorous connection to the Hilbert space path integral formalism will be studied in the future. \begin{figure} \caption{\label{Fig:Paths} \label{Fig:Paths} \end{figure} \subsection{Basic comparison to other jump descriptions}\label{Sec:Comp} Earlier approaches to treat non-Markovian dynamics with quantum jumps use auxiliary states and exploit the idea of Markovian embedding of non-Markovian dynamics in the extended Hilbert space~\cite{Imamoglu,Garraway1997,Breuer99,BreuerGen}. Other jumplike unravelings use as an aid the state of the total system and hidden variables~\cite{Gambetta2004} or take the measurement theory perspective~\cite{Collett}. Our results show that it is possible to have jump-like unraveling of non-Markovian dynamics of the reduced system without extending the system Hilbert space or considering in detail the total system dynamics and hidden variables. It is worthwhile to see if the differences between our method and those developed earlier reveal interesting aspects of non-Markovian dynamics. For this purpose, we compare our method to pseudomode (PM)~\cite{Garraway1997}, doubled~\cite{Breuer99} and triple~\cite{BreuerGen} Hilbert space methods (DHS and THS respectively), and to the quantum trajectory method based on hidden variables~\cite{Gambetta2004}. The PM method describes the properties of the environment in terms of the auxiliary pseudomode(s) with whom the system of interest interacts~\cite{Garraway1997}. The pseudomode is then coupled to the Markovian reservoir while the system of interest interacts only, in a coherent way, with the pseudomode. The Markovian pseudomode master equation can be unravelled with the MCWF or some other Markovian method. Once this is done, the dynamics of the system of interest is obtained by tracing out the pseudomode. This leads necessarily to mixed state trajectories for the system of interest while in our NMQJ method the time evolution of the ensemble members consists of pure states living in the Hilbert space of the system. In addition, the PM method relies on some assumptions on the form of the environment spectral density so that the pseudomode structure can be calculated, and it also exploits the solution of the total system dynamics. Our NMQJ method differs from the PM method in both of these issues and has been used to simulate two-level atom in photonic band gap in the absence of driving between the two states~\cite{Piilo08} (the driven case is more challenging, see the next subsection). On the other hand, the pseudomodes are by construction directly related to the properties of the environment. As a matter of fact, it is possible to show by exploiting the insight given by the NMQJ method, that the pseudomodes can be interpreted as an effective description of the memory of the environment of the open system~\cite{Mazzola08}. This is based on the notion that periods of negativity of the decay rate of local-in-time master equation coincide with those periods of time during which the pseudomode feeds coherently the system. The doubled Hilbert space (DHS) method uses two copies of the state vector to create a single realization in the ensemble~\cite{Breuer99}. The time evolution of the two copies is identical in the positive decay region. When the jumps with the Lindblad operators occur during the negative decay, one of the two copies gets multiplied by $-1$. This produces a negative contribution to the ensemble average. The probability in the ensemble is conserved because the norm of the deterministically evolving state vectors increases to values larger than one. From the statistics point of view, this means that the number of jumps during negative decay has to match the increase of norm in the deterministic evolution, and the probability is conserved on average. The consequence is an additional source of statistical noise. In the NMQJ method each state vector is normalized to one at each time step and the probability is conserved exactly. This gives a better statistical performance over the DHS method. In addition, the NMQJ avoids the numerical burden which is present in the DHS method due to the doubling of the Hilbert space size~\cite{Erika}. An interesting improvement to the DHS method is provided by the triple Hilbert space (THS) method~\cite{BreuerGen}. This method shows that the Markovian embedding of non-Markovian dynamics can be done with only three auxiliary discrete states. The original system dynamics is then contained in the coherences of the extended space state vectors. The method avoids the additional statistical noise term of the DHS method. However, the THS method uses a 4 times larger number of decay channels and a 3 times larger Hilbert space than the NMQJ method. Moreover, since the dynamics of the original system is contained in the coherences of the extended space, unphysical situations such as violations of positivity of the density matrix during the time evolution may occur and pass unnoticed. In contrast, the NMQJ method, by construction, always keeps the dynamics positive since it is not possible to a have negative integer number of state vectors in the ensemble. It is also worth mentioning that in the THS method the auxiliary quantum jump channels open when the decay rate becomes negative. This means that during negative decay interval, the probability flows out of the Hilbert space of the original system whereas in the NMQJ method, the direction of the probability flow within the Hilbert space of the system gets reversed at this point. From the fundamental quantum physics point of view, it is also interesting to discuss the jumplike unraveling of non-Markovian dynamics which is based on hidden variables~\cite{Gambetta2004}. The basic idea of the method is to obtain the system trajectories from the guiding state describing the state of the total system. This is then used to obtain the stochastic evolution of the so called property state which includes information on the value of the environmental hidden variable and the corresponding properties of the reduced system. Our result seems to indicate that it is possible to describe non-Markovian dynamics with quantum jumps without the use of hidden variables. However, since the hidden variable approach allows jumps towards ground and excited states in the two-level atom case, it would be very interesting to compare in detail the time evolution of the ensemble members in both of the methods, and to see if there exists any connections between the two. \subsection{Numerical and technical aspects}\label{Sec:Num} Since in the NMQJ method the realizations depend on each other due to memory effects [c.f.~Eq.~(\ref{Eq:JProb2M})], it seems at first sight that all the $N$ ensemble members have to be evolved simultaneously. However, according to Eq.~(\ref{Eq:Rho2}), the ensemble consists of several copies of each $|\psi_{\alpha}(t)\rangle$. Obviously, there is no need to have on a computer several copies of the same state vector. It is sufficient to have one copy and the corresponding integer number $N_{\alpha}$. Any number $N$ of the realizations of the process can be done by making $N_{\rm eff}\ll N$ state vector evolutions where $N_{\rm eff}$ is equal to the number of terms in the summation $N=\sum_{\alpha}N_{\alpha}$ [c.f.~Eq.~(\ref{Eq:Rho2})]. When the realizations of the process are generated on a computer, a jump means changing the integer numbers $N_{\alpha}(t)$ accordingly in Eq.~(\ref{Eq:Rho2}). A saving in CPU time is achieved since it is not necessary at each point of time to evolve $N$ state vectors, instead, it is enough to decide $N$ times if the jumps occurred or not. This means that the NMQJ method has a built-in optimization which can be exploited to improve the efficiency of the method. For the two-level atom example described above, the effective ensemble size $N_{\rm eff}=2$ while $N=10^5$. However, these $N_{\rm eff}$ state vectors need to be evolved simultaneously since there is a dependence between the state vectors [c.f.~Eqs.~(\ref{Eq:Jump3}) - (\ref{Eq:JProb2M})]. We can summarize the key factors for the numerical performance of the NMQJ method as follows: (i) no Hilbert space extensions are needed, (ii) the identification of the negative rate process as reverse jumps which keeps $N_{\rm eff}$ constant during the negative decay region and allows to technical optimization of the simulations (iii) the computational cost increases when the number of terms in the summation (\ref{Eq:Rho2}) increases. The first two points allow to improve the efficiency while the third point is expected to set the ultimate limit for the required computational resources. In addition of this resource limit, there exists also non-Markovian systems for which it is very challenging to derive local-in-time master equations of the form (\ref{Eq:MNM}). An example of this type of the system is a driven two-level atom in a photonic band gap material. To the best of our knowledge, there does not yet exist local-in-time master equations of the form (\ref{Eq:MNM}) for this system. On the other hand, it is possible to simulate this system already, e.g., with the method developed by Jack and Hope~\cite{JackPBG} which exploits memory functions and virtual density matrices. In the quantum state diffusion (QSD) method~\cite{Strunz1999}, to obtain the operator giving the stochastic evolution of state vectors, one needs to perform a memory kernel integration combined with a functional derivative of the state vector with respect to the noise. In the NMQJ method the corresponding step goes in a fundamentally different way since the simulation produces its own non-Markovian quantum jump operator. This acts by transferring the ensemble members between the existing states in a stochastic way [c.f.~Eq.~(\ref{Eq:JOp})]. It is also worth mentioning that the QSD method by definition has continuous stochastic evolution of state vectors. This means that in the QSD simulation $N_{\rm eff} \sim N$. For NMQJ method, when the complexity of the system to be treated increases, also $N_{\rm eff}$ increases. In the ultimate limit when the number of different state vectors is very large, or even approaches infinity, then there does not exist the optimization scheme for NMQJ method based on $N_{\rm eff}$. In this case, the simulations also become more tedious due to the increasing number of state vectors which need to evolved simultaneously. In general the derivation of local-in-time master equation for driven systems is a very challenging problem in the theory of non-Markovian open quantum systems. We believe that the main difficulties here are the condition of very strong driving affecting the system dynamics in the short non-Markovian time-scale, and the case of a very strong coupling between the system and the reservoir. In the latter case, the existence of a time-local generator of the reduced system dynamics is not in general guaranteed (see section 9.2.1 of Ref.~\cite{Breuer2002}). Hence, it is worth keeping in mind that the applicability of our method depends on this issue, since our starting point is the local-in-time master equation~(\ref{Eq:MNM}). \section{Conclusions}\label{Conclu} We have shown that, starting from a general local-in-time master equation, it is possible to describe the dynamics of a non-Markovian open system with an ensemble of stochastic pure state evolutions with quantum jumps. The developed non-Markovian quantum jump method (NMQJ) demonstrates that it is indeed possible to unravel non-Markovian master equations with quantum jumps without making any auxiliary extensions to the Hilbert space of the system as done in the jump descriptions developed earlier~\cite{Imamoglu,Garraway1997,Breuer99,Gambetta2004,BreuerGen}. Our approach allows a rather simple and insightful description of non-Markovian dynamics. Even though the method allows to optimize the simulations in terms of using the effective ensemble size $N_{\rm {eff}}$, this number increases with complexity of the system under study. This sets the limit for the performance of the method since $N_{\rm eff}$ state vectors need to be evolved simultaneously. The NMQJ method developed here generalizes a widely used Markovian MCWF method~\cite{DCM1992} into the non-Markovian regime. Due to the existence of the negative decay rates for non-Markovian systems, the MCWF method leads to negative quantum jump probabilities. We have discovered the corresponding jump process which has positive probability. Due to the memory of the system, this non-Markovian quantum jump essentially acts as a reverse jump, and allows the system to recover the information lost earlier. The consequence is that in the ensemble of pure states forming the density matrix, the seemingly lost superpositions can be restored. During the time evolution, jump -- reverse-jump cycles can occur in the ensemble members: the first jump during the positive decay destroys quantum superposition while the second jump in the negative decay region restores them. Our results shed new light on the non-Markovian dynamics in several ways. Breaking the density matrix evolution into an ensemble of state vectors with quantum jumps allows to understand how the density matrix carries the information on the earlier state of the system, and how the memory affects the system dynamics. This helps to clarify how local-in-time master equations are able to describe non-Markovian dynamics. Quantum mechanics reveals often counterintuitive features. Here, the rate of the process appearing in the non-Markovian region is directly proportional to the target state of the process. This is opposite to the classical view where typically the rate of a given process is given by the source state. Our analysis reveals in detail this counterintuitive feature of non-Markovian dynamics which is also present in the unravelled master equation. It has been shown earlier that Markovian open system dynamics with MCWF trajectories can be formally described as a piecewise deterministic stochastic process of general probability theory~\cite{Breuer1995}. Consequently, we can ask what is the corresponding formal stochastic process for the NMQJ state evolutions~\cite{Breuer08a}. This holds a promise to exploit new stochastic process which may allow the ingredients and insight by our NMQJ method to be taken outside the field of open systems to a more general level. \acknowledgments This work has been supported by the Academy of Finland (Projects No.~108699, No.~115682, and No.~115982), the Magnus Ehrnrooth Foundation, the V\"ais\"al\"a Foundation, and the Turku Collegium of Science and Medicine. We thank H.-P. Breuer, B. Garraway, and J. Gambetta for stimulating discussions. \appendix \section{} In this Appendix A we show the details of the proof of the match between the master equation and the NMQJ method. Averaging the evolution of the ensemble \begin{equation} \label{Eq:RhoA} \rho(t) = \sum_\alpha \frac{N_\alpha(t)}{N} |\psi_\alpha(t)\rangle \langle \psi_\alpha(t)|, \end{equation} over time step $\delta t$ gives \begin{widetext} \begin{eqnarray} \label{Eq:Algo} \overline{\sigma(t+\delta t)} &=& \sum_{\alpha}\frac{N_{\alpha}(t)}{N} \left[ \left( 1-\sum_{j_+}P_{\alpha}^{j_+}(t) -\sum_{j_-,\alpha'} P_{\alpha\rightarrow \alpha'}^{j_-}(t)\right) \right. \frac{| \phi_{\alpha}(t+\delta t)\rangle \langle \phi_{\alpha}(t+\delta t) |}{||| \phi_{\alpha}(t+\delta t)\rangle ||^2} \nonumber \\ &+& \sum_{j_+} P_\alpha^{j_+}(t) \frac{C_{j_+}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)| C_{j_+}^{\dagger}(t) } {||C_{j_+}(t) |\psi_{\alpha}(t)\rangle||^2} + \left. \sum_{j_-,\alpha'} P_{\alpha\rightarrow \alpha'}^{j_-}(t) D_{\alpha\rightarrow \alpha'}^{j_-}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)| D_{\alpha\rightarrow {\alpha}'}^{j_-\dagger}(t) \right],\nonumber \\ \end{eqnarray} \end{widetext} where we have weighted, as usual, the deterministic evolution with the no-jump probability and the jump paths with the corresponding jump probabilities. Above, we have the following quantities: $P_{\alpha}^{j_+}(t)$ is the jump probability of the state $|\psi_{\alpha}(t)\rangle$ for positive channel $j_+$ \begin{equation} \label{Eq:P+} P_{\alpha}^{j_+}(t)=\Delta_{j_+} (t)\delta t \langle \psi_{\alpha}(t) | C_{j_+}^{\dagger}(t)C_{j_+}(t)|\psi_{\alpha}(t)\rangle, \end{equation} $P_{\alpha\rightarrow \alpha'}^{j_-}(t)$ is the reverse jump probability of state $|\psi_{\alpha}(t)\rangle$ via the negative channel $j_-$ to the state $|\psi_{\alpha'}(t)\rangle$ \begin{eqnarray} P_{\alpha\rightarrow \alpha'}^{j_-}(t) &=& \frac{N_{\alpha'}(t)} {N_{\alpha}(t)} |\Delta_{j_-}(t)| \delta t \nonumber \\ &\times& \langle \psi_{\alpha'}(t) | C_{j_-}^{\dagger}(t) C_{j_-}(t) |\psi_{\alpha'}(t)\rangle. \label{Eq:JProb2} \end{eqnarray} The reverse jump operator from the state $|\psi_{\alpha}\rangle = C_{j-}|\psi_{\alpha'}\rangle / ||C_{j-}|\psi_{\alpha'}||$ via channel $j_-$ to the state $|\psi_{\alpha'}\rangle$ is \begin{equation} D_{\alpha\rightarrow \alpha'}^{j_-}(t)= |\psi_{\alpha'}(t)\rangle \langle \psi_{\alpha}(t)|. \label{Eq:JOpA} \end{equation} The deterministic evolution in Eq.~(\ref{Eq:Algo}) is given by \begin{eqnarray} \label{Eq:DetA} | \phi_{\alpha}(t+\delta t)\rangle &=& \left(1-\frac{i H_S\delta t}{\hbar} -\sum_j\frac{\Delta_j(t)\delta t}{2} C_j^{\dagger} (t) C_j(t)\right) \nonumber \\ &\times&|\psi_{\alpha}(t)\rangle, \end{eqnarray} which gives for $| \phi_{\alpha}(t+\delta t)\rangle \langle \phi_{\alpha}(t+\delta t) |$, in first order in $\delta t$, \begin{widetext} \begin{eqnarray} \label{Eq:DetAA} | \phi_{\alpha}(t+\delta t)\rangle \langle \phi_{\alpha}(t+\delta t) | &=& |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t) | -i \delta t[H_S, |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t) | ] - \frac{\delta t}{2} \sum_j \Delta_j (t) \left\{C_j^{\dagger}(t) C_j(t), |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t) | \right\}. \end{eqnarray} \end{widetext} It is easy to see from here, that this term gives the commutator and the anticommutator parts of the master equation. In Eq.~(\ref{Eq:Algo}), the jump probabilities for the positive channel, appearing in the numerator and the denominator in the no-jump path, cancel each other when doing the series expansion in $\delta t$ and keeping the terms to first order. The jump part to positive channels gives the positive channel "sandwich term" of the master equation in the usual way. We are left with the "sandwich" term for the negative channels. Inserting Eqs.~(\ref{Eq:P+})-(\ref{Eq:DetAA}) into Eq.~(\ref{Eq:Algo}) and comparing to Eq.~(\ref{Eq:MNM2}) we have to show that \begin{widetext} \begin{eqnarray} \label{Eq:LastStep} &-& \sum_{\alpha, j_-} \frac{N_{\alpha}}{N}|\Delta_{j_-}(t)| \delta t C_{j_-}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)|C_{j_-}^{\dagger}(t) = \nonumber \\ &-& \sum_{\alpha}\frac{N_{\alpha}}{N} \sum_{\alpha',j_-} P_{\alpha \rightarrow \alpha'}^{j_-}(t) |\psi_{\alpha}(t)\rangle \langle \psi_{\alpha}(t)| \delta\left( |\psi_{\alpha}(t) \rangle - \frac{C_{j_-}(t) |\psi_{\alpha'}(t)\rangle }{||C_{j_-}(t) |\psi_{\alpha'}(t)\rangle ||}\right) \nonumber \\ &-& \sum_{\alpha}\frac{N_{\alpha}}{N}\sum_{j_-} |\Delta_{j_-}(t)| \langle \psi_{\alpha}(t)| C_{j_-}^{\dagger}(t) C_{j_-}(t) |\psi_{\alpha}(t)\rangle \delta t |\psi_{\alpha}(t)\rangle \langle \psi_\alpha (t) | \nonumber \\ &+& \sum_{\alpha}\frac{N_{\alpha}}{N}\sum_{j_-,\alpha'} P_{\alpha \rightarrow \alpha'}^{j_-}(t) |\psi_{\alpha'}(t)\rangle \langle \psi_{\alpha'}(t)| \delta\left( |\psi_{\alpha}(t) \rangle - \frac{C_{j_-}(t) |\psi_{\alpha'}(t)\rangle }{||C_{j_-}(t) |\psi_{\alpha'}(t)\rangle ||}\right). \end{eqnarray} \end{widetext} We have written here explicitly the $\delta$-functional which gives the condition for the reverse jump: one can go via channel $j_-$ from $|\psi_{\alpha}\rangle$ to $|\psi_{\alpha'}\rangle$ on the condition that $|\psi_{\alpha}(t)\rangle = C_{j_-}(t) |\psi_{\alpha'}(t)\rangle / ||C_{j_-}(t) |\psi_{\alpha'}(t)\rangle|| $. In Eq.~(\ref{Eq:LastStep}), the last two lines cancel each other. This happens because the $\delta$-functional takes care of the $\alpha$ summation in the last line and the summation over $\alpha$ and $\alpha'$ are equivalent procedures making the two terms equal with opposite signs. The first and second line in Eq.~(\ref{Eq:LastStep}) are equal. In the second line the $\delta$-functional with summation over $\alpha$ means replacing $|\psi_{\alpha} \rangle$ with $C_{j_-} |\psi_{\alpha'}\rangle / \| C_{j_-} |\psi_{\alpha'}\rangle \|$ giving the sandwich term of the master equation in the first line. Thus we have proven the equivalence between the master equation and the algorithm. The proof can be summarized in the following way: the deterministic part gives the commutator and anticommutator parts of the master equation, the positive channels go in the usual way: the jump part giving the corresponding sandwich term of the master equation. For negative channels the change in the norm and jumps cancel and the jump probability of negative channels times the deterministic evolution gives the sandwich terms. \section{} This appendix B gives the formal analytical solutions for the three-level systems considered in Sec. \ref{sec:exampleResults}. For simplicity, we neglect the Lamb-shift term. First, let us define short-hand notation \begin{align} D_i (t) = \int_0^t \textrm{d} s \, \Delta_i (s), \\ L_i (t) = \int_0^t \textrm{d} s \, \lambda_i (s). \end{align} The direct formal solutions can be expressed by using these parameters, decay rates $\Delta_i (t)$, and initial conditions $\rho_{ij} (0)$ only. \subsection*{Two-level atom: detuned Jaynes-Cummings model} Master equation: \begin{align} \dot \rho(t) = & \frac{1}{i} \lambda (t) [\sigma_+ \sigma_-, \rho(t) ] + \Delta (t) \sigma_- \rho(t) \sigma_+ \nonumber \\ & - \frac{1}{2} \Delta (t) \left\{ \rho(t), \sigma_+ \sigma_- \right \}. \end{align} Jump operator: \begin{align} C_1 &= \sigma_- = |b \rangle\langle a |. \end{align} Populations: \begin{align} \rho_{aa} (t) &= e^{ -D_1(t)}\rho_{aa} (0), \\ \rho_{bb} (t) &= \Big\{ 1 - e^{ -D_1(t)} \Big\} \rho_{aa} (0) + \rho_{bb} (0). \end{align} Coherences: \begin{align} \rho_{ab} (t) &= e^{-D_1(t) / 2} \rho_{ab} (0). \end{align} \subsection*{Three-level atom: $\Lambda$-system} Master equation: \begin{align} \dot \rho(t) = & \frac{1}{i} \lambda_1 (t)[ |a\rangle\langle a|, \rho(t) ] + \frac{1}{i} \lambda_2 (t)[ |a\rangle\langle a|, \rho(t) ] \nonumber \\ & + \Delta_1 (t) \left[ | b \rangle\langle a | \rho(t) | a \rangle\langle b | - \frac{1}{2} \left\{ \rho(t), | a \rangle\langle a| \right \} \right] \nonumber \\ & + \Delta_2 (t) \left[ | c \rangle\langle a | \rho(t) | a \rangle\langle c | - \frac{1}{2} \left\{ \rho(t), | a \rangle\langle a| \right \} \right]. \end{align} Jump operators: \begin{align} C_1 &= |b \rangle\langle a |,\\ C_2 &= |c \rangle\langle a |. \end{align} Populations: \begin{align} \rho_{aa} (t) &= e^{-[D_1 (t)+D_2(t)]} \rho_{aa} (0), \\ \rho_{bb} (t) &= \int_0^t \textrm{d} s \, \Delta_1 (s) e^{-[D_1 (s)+D_2(s)]} \rho_{aa} (0) \nonumber \\ & \quad + \rho_{bb} (0), \\ \rho_{cc} (t) &= \int_0^t \textrm{d} s \, \Delta_2 (s) e^{-[D_1 (s)+D_2(s)]} \rho_{aa} (0) \nonumber \\ & \quad + \rho_{cc} (0). \end{align} Coherences: \begin{align} \rho_{ab} (t) &= e^{-[i L_1 (t) + i L_2 (t) + D_1 (t)/2 + D_2 (t)/2] } \rho_{ab} (0), \\ \rho_{ac} (t) &= e^{-[i L_1 (t) + i L_2 (t) + D_1 (t)/2 + D_2 (t)/2] } \rho_{ac} (0), \\ \rho_{bc} (t) &= \rho_{bc} (0). \end{align} \subsection*{Three-level atom: $V$-system} Master equation: \begin{align} \dot \rho(t) = & \frac{1}{i} \lambda_1 (t)[ |a\rangle\langle a|, \rho(t) ] + \frac{1}{i} \lambda_2 (t)[ |b\rangle\langle b|, \rho(t) ] \nonumber \\ & + \Delta_1 (t) \left[ | c \rangle\langle a | \rho(t) | a \rangle\langle c | - \frac{1}{2} \left\{ \rho(t), | a \rangle\langle a| \right \} \right] \nonumber \\ & + \Delta_2 (t) \left[ | c \rangle\langle b | \rho(t) | b \rangle\langle c | - \frac{1}{2} \left\{ \rho(t), | b \rangle\langle b| \right \} \right]. \end{align} Jump operators: \begin{align} C_1 &= |c \rangle\langle a |,\\ C_2 &= |c \rangle\langle b |. \end{align} Populations: \begin{align} \rho_{aa} (t) &= e^{-D_1 (t)} \rho_{aa} (0), \\ \rho_{bb} (t) &= e^{-D_2 (t)} \rho_{bb} (0), \\ \rho_{cc} (t) &= \Big[ 1 - e^{-D_1 (t)} \Big] \rho_{aa} (0) + \Big[ 1 - e^{-D_2 (t)} \Big] \rho_{bb} (0) \nonumber \\ & \quad + \rho_{cc} (0). \end{align} Coherences: \begin{align} \rho_{ab} (t) &= e^{-[i L_1 (t) + i L_2 (t) + D_1 (t)/2 + D_2 (t)/2] } \rho_{ab} (0), \\ \rho_{ac} (t) &= e^{-[i L_1 (t) + D_1 (t)/2] } \rho_{ac} (0), \\ \rho_{bc} (t) &= e^{-[i L_2 (t) + D_2 (t)/2] } \rho_{bc} (0). \end{align} \subsection*{Three-level atom: Ladder-system} Master equation: \begin{align} \dot \rho(t) = & \frac{1}{i} \lambda_1 (t)[ |a\rangle\langle a|, \rho(t) ] + \frac{1}{i} \lambda_2 (t)[ |b\rangle\langle b|, \rho(t) ] \nonumber \\ & + \Delta_1 (t) \left[ | b \rangle\langle a | \rho(t) | a \rangle\langle b | - \frac{1}{2} \left\{ \rho(t), | a \rangle\langle a| \right \} \right] \nonumber \\ & + \Delta_2 (t) \left[ | c \rangle\langle b | \rho(t) | b \rangle\langle c | - \frac{1}{2} \left\{ \rho(t), | b \rangle\langle b| \right \} \right]. \end{align} Jump operators: \begin{align} C_1 &= |b \rangle\langle a |,\\ C_2 &= |c \rangle\langle b |. \end{align} Populations: \begin{align} \rho_{aa} (t) &= e^{-D_1 (t)} \rho_{aa} (0), \\ \rho_{bb} (t) &= e^{-D_2 (t)} \int_0^t \textrm{d} s \, \Delta_1 (s) e^{-D_1 (s)+D_2(s)} \rho_{aa} (0) \nonumber \\ &\quad + e^{-D_2 (t)} \rho_{bb} (0), \\ \rho_{cc} (t) &= \Big[ 1 - e^{-D_1 (t)} - e^{-D_2 (t)} \nonumber \\ &\quad \times \int_0^t \textrm{d} s \, \Delta_1 (s) e^{-D_1 (s)+D_2(s)} \Big] \rho_{aa} (0) \nonumber \\ &\quad + \Big[ 1 - e^{-D_2 (t) } \Big] \rho_{bb} (0) + \rho_{cc} (0). \end{align} Coherences: \begin{align} \rho_{ab} (t) &= e^{-[i L_1 (t) - i L_2 (t) - D_1 (t)/2 - D_2 (t)/2] } \rho_{ab} (0), \\ \rho_{ac} (t) &= e^{-[i L_1 (t) + D_1 (t)/2 ] } \rho_{ac} (0), \\ \rho_{bc} (t) &= e^{-[i L_2 (t) + D_2 (t)/2 ] } \rho_{bc} (0). \end{align} \end{document}
\begin{document} \begin{abstract} We consider a $4$-dimensional Riemannian manifold $M$ equip\-ped with a circulant structure $q$, which is an isometry with respect to the metric $g$ and $q^{4}=\textrm{id}$, $q^{2}\neq \pm \textrm{id}$. For such a manifold $(M, g, q)$ we obtain some assertions for the sectional curvatures of $2$-planes. We construct an example of such a manifold on a Lie group and we find some of its geometric characteristics. \end{abstract} \title{Curvature properties of $4$-dimensional\ Riemannian manifolds with a circulant\ structure} \textbf{Mathematics Subject Classification (2010)}: 53C15, 53B20, 15B05, 22E60 \textbf{Keywords}: Riemannian manifold, Riemannian metric, sectional curvature, circulant matrix, Lie group, Killing metric \section*{Introduction} The circulant matrices are well-studied (for example \cite{8}, \cite{152}). They have application to Vibration analysis, Graph theory, Linear codes, Geometry (for example \cite{13}, \cite{72}, \cite{2}). The study of manifolds with additional structures plays an important role in differential geometry. In such manifolds substantial results are associated with the sectional curvatures of some characteristic $2$-planes of the tangent space of the manifolds (for example \cite{22}, \cite{132}, \cite{52}). In the present paper we consider some curvature properties of $4$-dimen\-sional Riemannian manifolds with a circulant structure $q$ with $q^{4}=\textrm{id}$, which is an isometry with respect to the metric $g$. We continue research made in \cite{1} for such manifolds and construct an example of these manifolds. The paper is organized as follows. In Sect. \ref{sec:1} we give some necessary facts from \cite{1} about a $4$-dimensional differentiable manifold $M$ with a Riemannian metric $g$, equipped with a circulant structure $q$, which is an isometry with respect to the metric $g$ and $q^{4}= \textrm{id}$, $q^{2}\neq \pm \textrm{id}$. In Sect.~\ref{sec:2} we establish that the sectional curvatures of the $2$-planes $\{u, qu\}$ and $\{u, q^{2}u\}$ are expressed by the angles $\angle(u, qu)$ and $\angle(u, q^{2}u)$, respectively. The main results here are Theorem~\ref{th6} and Theorem~\ref{th5}. We obtain relations between the sectional curvatures of some characteristic $2$-planes in the tangent space on the manifold $(M, g, q)$. In Sect.~\ref{sec:3} we construct an example of such a manifold on a Lie group and we find some of its geometric characteristics. \section{Preliminaries}\label{sec:1} In this section we recall facts from \cite{1}, which are necessary for our future consideration. Let $M$ be a $4$-dimensional Riemannian manifold with a metric $g$. Let $q$ be an endomorphism in the tangent space $T_{p}M$, $p\in M$ on the manifold $M$ with local coordinates given by the circulant matrix \begin{equation}\label{f4} (q_{i}^{j})=\begin{pmatrix} 0 & 1 & 0 & 0\\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1\\ 1 & 0 & 0 & 0\\ \end{pmatrix}. \end{equation} Then \begin{equation}\label{q4} q^{4}=\textrm{id},\qquad q^{2}\neq\pm \textrm{id}. \end{equation} We suppose that $g$ is positive definite metric and the structure $q$ of the manifold $M$ is an isometry with respect to the metric $g$, i.e. \begin{equation}\label{2.1} g(qx, qy)=g(x, y). \end{equation} Anywhere in this work $x, y, z, u$ will stand for arbitrary elements of the algebra of the smooth vector fields on $M$ or vectors in the tangent space $T_{p}M$. The Einstein summation convention is used, the range of the summation indices being always $\{1, 2, 3, 4\}$. We denote by $(M, g, q)$ the manifold $M$ equipped with the metric $g$ and the structure $q$. Easily finding that \eqref{f4} and \eqref{2.1} imply a circulant matrix of components of $g$. A basis of type $\{x, qx, q^{2}x, q^{3}x\}$ of $T_{p}M$ is called a $q$-\textit{basis}. In this case we say that \textit{the vector $x$ induces a $q$-basis of} $T_{p}M$. If a vector $x$ induces a $q$-basis, then for the angles $\angle(x,qx),\ \angle(x,q^{2}x)$, $\angle(qx,q^{2}x)$, $\angle(qx,q^{3}x)$, $\angle(x,q^{3}x)$ and $\angle(q^{2}x,q^{3}x)$ we have \begin{equation*} \angle(x,qx)=\angle(qx,q^{2}x)=\angle(x,q^{3}x)=\angle(q^{2}x,q^{3}x),\quad \angle(x,q^{2}x)=\angle(qx,q^{3}x). \end{equation*} In our further research we will use an orthogonal $q$-basis. The existence of such bases is proved in \cite{1}. \section{Some curvature properties}\label{sec:2} Let $\nabla$ be the Riemannian connection of the metric $g$ on $(M, g, q)$. The curvature tensor $R$ of $\nabla$ is determined by $R(x, y)z=\nabla_{x}\nabla_{y}z-\nabla_{y}\nabla_{x}z-\nabla_{[x,y]}z$. The tensor of type $(0, 4)$ associated with $R$ is defined as follows $$R(x, y, z, u)=g(R(x, y)z,u).$$ If we denote $P=q^{2}$, then the conditions \eqref{q4} and \eqref{2.1} imply $P^{2}=\textrm{id},$ $\ P\neq \pm \textrm{id}$, $g(Px, Py)=g(x,y)$. Thus, $(M, g, P)$ is a Riemannian manifold with an almost product structure $P$. It follows from \eqref{f4} that $\textrm{tr} P=0$. For such manifolds is valid Staikova-Gribachev classification (\cite{51}). The class $W_{0}$ defined by $\nabla P=0$ in this classification is common to all classes. Every manifold in this class satisfies the identity $R(x, y, Pz, Pu)=R(x, y, z, u)$. In \cite{1} it is proved analogous identity \begin{equation}\label{R1} R(x, y, qz, qu)=R(x, y, z, u), \end{equation} for a manifold $(M, g, q)$ with the condition $\nabla q=0$. By using \eqref{R1} and the symmetries of $R$, it is easy to find that \begin{equation}\label{R} R(qx, qy, qz, qu)=R(x, y, z, u). \end{equation} Since the latter equality follows from \eqref{R1}, the class of manifolds $(M, g, q)$ with the condition \eqref{R} is more general than the class $(M, g, q)$ with the condition \eqref{R1}. If $\{x, y\}$ is a non-degenerate $2$-plane spanned by vectors $x, y \in T_{p}M$, then its sectional curvature is (\cite{11}) \begin{equation}\label{3.3} \mu(x,y)=\frac{R(x, y, x, y)}{g(x, x)g(y, y)-g^{2}(x, y)}\ . \end{equation} \begin{theorem}\label{predl} Let $(M, g, q)$ be a manifold with property \eqref{R}. If a vector $x$ induces a $q$-basis, then for the sectional curvatures of the basic $2$-planes we have \begin{equation}\label{3.21} \mu(x,qx)=\mu(qx,q^{2}x)=\mu(q^{2}x,q^{3}x)=\mu(q^{3}x,x), \end{equation} \begin{equation}\label{3.31} \mu(x,q^{2}x)=\mu(qx,q^{3}x). \end{equation} \end{theorem} \begin{proof} From (\ref{R}) we have \begin{equation}\label{Rv1} R(x,y,z,u)=R(qx,qy,qz,qu)=R(q^{2}x,q^{2}y,q^{2}z,q^{2}u). \end{equation} In \eqref{Rv1} we substitute 1) $qx$ for $y$, $x$ for $z$, $qx$ for $u$, and we get \begin{equation}\label{slRv1} R(x,qx,x,qx)=R(qx,q^{2}x,qx,q^{2}x )=R(q^{2}x,q^{3}x,q^{2}x,q^{3}x), \end{equation} 2) $q^{3}x$ for $y$, $x$ for $z$, $q^{3}x$ for $u$, and we obtain \begin{equation}\label{dop2} R(x, q^{3}x, x, q^{3}x)=R(x, qx, x, qx), \end{equation} 3) $q^{2}x$ for $y$, $x$ for $z$, $q^{2}x$ for $u$, then \begin{equation}\label{dop4} R(x,q^{2}x,x,q^{2}x)=R(qx,q^{3}x,qx,q^{3}x). \end{equation} The equality \eqref{3.21} follows from \eqref{2.1}, \eqref{3.3}, \eqref{slRv1} and \eqref{dop2}. In a similar way, from \eqref{2.1}, \eqref{3.3} and \eqref{dop4} we get \eqref{3.31}. \end{proof} Let $x$ induce a $q$-basis $\{x, qx, q^{2}x, q^{3}x\}$. Due to Theorem~\ref{predl} there are only two different basic sectional curvatures. Therefore, we consider only the sectional curvatures $\mu(x, qx)$ and $\mu(x, q^{2}x)$. Let us note that if $y \in\{x, qx\}$ and $y\neq x$, then $qy \notin\{x, qx\}$. Consequently, we can say that the sectional curvature $\mu(x, qx)$ depends on $\varphi=\angle(x, qx)$. Analogously, $\mu(x, q^{2}x)$ depends on $\theta=\angle(x, q^{2}x)$. We denote $\mu(x, qx)=\mu_{1}(\varphi)$ and $\mu(x, q^{2}x)=\mu_{2}(\theta)$. \begin{theorem}\label{th4} Let $(M, g, q)$ be a manifold with property \eqref{R}. If vectors $x$ and $u$ induce $q$-bases and $\{x, qx, q^{2}x, q^{3}x\}$ is orthonormal, then \begin{equation}\label{mu-r2} \begin{split} \mu_{1}(\varphi)-\mu_{1}(\frac{\pi}{2})&=\frac{\cos\varphi}{1-\cos^{2}\varphi}\Big(-2R(x, qx, q^{2}x, x)\\&+2(\cos\varphi) R(x, qx, qx, q^{2}x)\\&-(\cos\varphi) R(qx, q^{2}x, q^{3}x, x)\\&-2R(qx, q^{2}x, q^{2}x, x)\Big), \end{split} \end{equation} \begin{equation}\label{mu-r} \begin{split} \mu_{2}(\theta)-\mu_{2}(\frac{\pi}{2})&=\frac{2\cos\theta}{1-\cos^{2}\theta}\Big(-2R(x, qx, qx, q^{2}x)\\&+(\cos\theta) R(qx, q^{2}x, q^{3}x, x)\Big), \end{split} \end{equation} where $\varphi=\angle(u, qu)$, $\theta=\angle(u, q^{2}u)$. \end{theorem} \begin{proof} In \eqref{Rv1} we substitute 1)\ $qx$ for $y$, $q^{2}x$ for $z$ and $x$ for $u$, then \begin{equation}\label{dop3} R(x, qx, q^{2}x, x)=R(q^{2}x, q^{3}x, x, q^{2}x)=R(q^{3}x, x, qx, q^{3}x), \end{equation} 2)\ $qx$ for $y$, $qx$ for $z$ and $q^{2}x$ for $u$, and we have \begin{equation}\label{dop5} R(x, qx, qx, q^{2}x)=R(q^{2}x, q^{3}x, q^{3}x, x)=R(q^{3}x, x, x, qx), \end{equation} 3)\ $qx$ for $y$, $q^{2}x$ for $z$ and $q^{3}x$ for $u$, then \begin{equation}\label{dop6} R(qx, q^{2}x, q^{3}x, x)=R(x, qx, q^{2}x, q^{3}x), \end{equation} 4)\ $qx$ for $y$, $qx$ for $z$ and $q^{3}x$ for $u$, and we get \begin{equation}\label{dop7} R(qx, q^{2}x, q^{2}x, x)=R(x, qx, qx, q^{3}x)=R(q^{3}x, x, x, q^{2}x), \end{equation} 5)\ $q^{2}x$ for $y$, $qx$ for $z$ and $q^{3}x$ for $u$, and we find \begin{equation}\label{dop8} R(x, q^{2}x, qx, q^{3}x)=0. \end{equation} Let $u=\alpha x+\beta qx +\gamma q^{2}x+\delta q^{3}x$, where $\alpha,\beta,\gamma, \delta \in \mathbb{R}$. From \eqref{f4} we get $qu=\delta x+\alpha qx +\beta q^{2}x + \gamma q^{3}x$, $q^{2}u=\gamma x+\delta qx +\alpha q^{2}x +\beta q^{3}x$ and \\$q^{3}u=\beta x+\gamma qx +\delta q^{2}x + \alpha q^{3}x$. Then, by using the linear properties of the curvature tensor $R$ and having in mind \eqref{Rv1}, \eqref{dop2}, \eqref{dop4}, \eqref{dop3} -- \eqref{dop8}, we obtain \begin{align*} R(u,qu,u,qu)&=\Big((\alpha^{2}-\beta\delta)^{2}+(\delta^{2}-\alpha\gamma)^{2}+(\beta^{2}-\alpha\gamma)^{2}+(\gamma^{2}-\beta\delta)^{2}\Big)R_{1}\\&+2\Big((\alpha\beta-\gamma\delta)(\gamma^{2}-\alpha^{2})+(\beta\gamma-\delta\alpha)(\delta^{2}-\beta^{2})\Big)R_{2}\\&+\Big((\alpha\beta-\gamma\delta)^{2}+(\beta\gamma-\delta\alpha)^{2}\Big)R_{3}\\&+2(\alpha^{2}+\gamma^{2}-2\beta\delta)(\delta^{2}+\beta^{2}-2\alpha\gamma)R_{4}\\&+2\Big((\alpha^{2}-\beta\delta)(\gamma^{2}-\beta\delta)+(\beta^{2}-\alpha\gamma)(\delta^{2}-\alpha\gamma)\Big)R_{5}\\&+2\Big((\alpha\beta-\gamma\delta)(\delta^{2}-\beta^{2})+(\beta\gamma-\delta\alpha)(\alpha^{2}-\gamma^{2})\Big)R_{6},\\ R(u,q^{2}u,u,q^{2}u)&=2\Big((\alpha\delta-\beta\gamma)^{2}+(\alpha\beta-\gamma\delta)^{2}\Big)R_{1}\\&+4\Big((\alpha\delta-\beta\gamma)(\gamma^{2}-\alpha^{2})+(\alpha\beta-\delta\gamma)(\delta^{2}-\beta^{2})\Big)R_{2}\\&+\Big((\alpha^{2}-\gamma^{2})^{2}+(\beta^{2}-\delta^{2})^{2}\Big)R_{3}\\&-2\Big((\alpha\beta-\gamma\delta)^{2}+(\beta\gamma-\alpha\delta)^{2}\Big)R_{5}\\&+4\Big((\beta^{2}-\delta^{2})(\alpha\delta-\beta\gamma)+(\alpha\beta-\gamma\delta)(\gamma^{2}-\alpha^{2})\Big)R_{6} , \end{align*} where \begin{equation}\label{r1-6} \begin{split} R_{1}&=R(x, qx, x, qx), \qquad R_{2}=R(x, qx, q^{2}x, x),\\ R_{3}&=R(x, q^{2}x, x, q^{2}x),\quad R_{4}=R(x, qx, qx, q^{2}x),\\ R_{5}&=R(qx, q^{2}x, q^{3}x, x),\ R_{6}=R(qx, q^{2}x, q^{2}x, x). \end{split} \end{equation} Then \begin{equation}\label{r+r} \begin{split} R(u,qu,u,qu)+\frac{1}{2}R(u,q^{2}u,u,q^{2}u)&=K_{1} R_{1}+ K_{2} R_{2}+K_{3} R_{3}\\&+K_{4} R_{4}+K_{5} R_{5}+K_{2} R_{6}, \end{split} \end{equation} where \begin{align} \label{K}\nonumber K_{1}&=(\alpha^{2}-\beta\delta)^{2}+(\delta^{2}-\alpha\gamma)^{2}+(\beta^{2}-\alpha\gamma)^{2}+(\gamma^{2}-\beta\delta)^{2}\\\nonumber &+(\alpha\delta-\beta\gamma)^{2}+(\alpha\beta-\gamma\delta)^{2},\\ \nonumber K_{2}&=2\Big((\alpha\beta-\gamma\delta)(\gamma^{2}-\alpha^{2})+(\beta\gamma-\delta\alpha)(\delta^{2}-\beta^{2})\\\nonumber &+(\alpha\delta-\beta\gamma)(\gamma^{2}-\alpha^{2})+(\alpha\beta-\delta\gamma)(\delta^{2}-\beta^{2})\Big),\\ K_{3}&=(\alpha\beta-\gamma\delta)^{2}+(\beta\gamma-\delta\alpha)^{2}\\\nonumber &+\frac{1}{2}\Big((\alpha^{2}-\gamma^{2})^{2}+(\beta^{2}-\delta^{2})^{2}\Big), \end{align}\begin{align*} K_{4}&=2(\alpha^{2}+\gamma^{2}-2\beta\delta)(\delta^{2}+\beta^{2}-2\alpha\gamma),\\ K_{5}&=2\Big((\alpha^{2}-\beta\delta)(\gamma^{2}-\beta\delta)+(\beta^{2}-\alpha\gamma)(\delta^{2}-\alpha\gamma)\Big)\\ &-(\alpha\beta-\gamma\delta)^{2}-(\beta\gamma-\alpha\delta)^{2}.\end{align*} Since the $q$-basis $\{x, qx, q^{2}x, q^{3}x\}$ is orthonormal, we have \begin{equation}\nonumber g(u, u)= g(qu, qu)=\alpha^{2}+\beta^{2}+\gamma^{2}+\delta^{2}, \end{equation} \begin{equation}\nonumber g(u,qu)= \alpha\delta+\alpha\beta+\beta\gamma+\delta\gamma,\quad g(u, q^{2}u)=2(\alpha\gamma+\delta\beta). \end{equation} Due to \eqref{2.1} and \eqref{3.3} we get \begin{equation}\nonumber \mu(u,qu)=\frac{R(u, qu, u, qu)}{g^{2}(u, u)-g^{2}(u, qu)}\ ,\quad\mu(u,q^{2}u)=\frac{R(u, q^{2}u, u, q^{2}u)}{g^{2}(u, u)-g^{2}(u, q^{2}u)}\ . \end{equation} We suppose that $g(u, u)=1$ and we obtain \begin{equation}\label{mu3} \mu(u,qu)=\frac{R(u, qu, u, qu)}{1-\cos^{2}\varphi}\ ,\quad \mu(u,q^{2}u)=\frac{R(u, q^{2}u, u, q^{2}u)}{1-\cos^{2}\theta}\ , \end{equation} \begin{equation}\label{alfa-delta} \alpha^{2}+\beta^{2}+\gamma^{2}+\delta^{2}=1,\ \alpha\delta+\alpha\beta+\beta\gamma+\delta\gamma=\cos\varphi, \ 2(\alpha\gamma+\delta\beta)=\cos\theta. \end{equation} From \eqref{alfa-delta} we express $\alpha, \beta, \gamma, \delta$ by $\cos\varphi$ and $\cos\theta$. Then, taking into account \eqref{K}, we get \begin{align*} &K_{1}=1-\cos^{2}\varphi,\quad K_{2}=-2\cos\varphi(1-\cos\theta),\quad K_{3}=\frac{1}{2}(1-\cos^{2}\theta), \\& K_{4}=2(-\cos\theta+\cos^{2}\varphi), \quad K_{5}=\cos^{2}\theta -\cos^{2}\varphi. \end{align*} Thus, \eqref{r+r} and \eqref{mu3} imply \begin{equation}\label{mu+mu} \begin{split} (1-\cos^{2}\varphi)\mu(u,qu)&+\frac{1}{2}(1-\cos^{2}\theta)\mu(u,q^{2}u)=\\&(1-\cos^{2}\varphi)R_{1} -2\cos\varphi(1-\cos\theta)R_{2}\\&+\frac{1}{2}(1-\cos^{2}\theta)R_{3}+2(-\cos\theta+\cos^{2}\varphi)R_{4}\\&+(\cos^{2}\theta -\cos^{2}\varphi)R_{5}-2\cos\varphi(1-\cos\theta)R_{6}. \end{split} \end{equation} In \eqref{mu+mu} first we substitute $\varphi=\frac{\pi}{2}$ and then $\theta=\frac{\pi}{2}$. Thus we obtain \eqref{mu-r} and \eqref{mu-r2}. \end{proof} \begin{theorem}\label{th6} Let $(M,g, q)$ be a manifold with property \eqref{R}. If a vector $u$ induces a $q$-basis, then the following equality is valid \begin{equation}\label{mu-r4} \begin{split} \mu_{1}(\varphi)&=\frac{1}{1-\cos^{2}\varphi}\Big((1-4\cos^{2}\varphi)\mu_{1}(\frac{\pi}{2})\\&+\frac{3}{4}(\cos\varphi+2\cos^{2}\varphi)\mu_{1}(\frac{\pi}{3})\\&+\frac{3}{4}(2\cos^{2}\varphi-\cos\varphi)\mu_{1}(\frac{2\pi}{3})\Big), \end{split} \end{equation} where $\varphi=\angle(u, qu)$. \end{theorem} \begin{proof} In \eqref{mu-r2} first we substitute $\varphi=\frac{\pi}{3}$ and then $\varphi=\frac{2\pi}{3}$. Due to \eqref{r1-6} and having in mind that $\{x, qx, q^{2}x, q^{3}x\}$ is an orthonormal $q$-basis, we get \begin{align*} &3\Big(\mu_{1}(\frac{\pi}{3})-\mu_{1}(\frac{\pi}{2})\Big)=4\Big(-R_{2}+\frac{1}{2}R_{4}-\frac{1}{4}R_{5}-R_{6}\Big),\\ &3\Big(\mu_{1}(\frac{2\pi}{3})-\mu_{1}(\frac{\pi}{2})\Big)=4\Big(R_{2}+\frac{1}{2}R_{4}-\frac{1}{4}R_{5}+R_{6}\Big). \end{align*} From the latter equalities we find the tensors $R_{4}-\frac{1}{2}R_{5}$ and $R_{2} + R_{6}$. Then \eqref{mu-r2} implies \eqref{mu-r4}. \end{proof} \begin{theorem}\label{th5} Let $(M,g, q)$ be a manifold with property \eqref{R}. If a vector $u$ induces a $q$-basis, then the following equality is valid \begin{equation}\label{mu-r3} \begin{split} \mu_{2}(\theta)&=\frac{1}{1-\cos^{2}\theta}\Big((1-4\cos^{2}\theta)\mu_{2}(\frac{\pi}{2})\\&+\frac{3}{4}(\cos\theta+2\cos^{2}\theta)\mu_{2}(\frac{\pi}{3})\\&+\frac{3}{4}(2\cos^{2}\theta-\cos\theta)\mu_{2}(\dfrac{2\pi}{3})\Big), \end{split} \end{equation} where $\theta=\angle(u, q^{2}u)$. \end{theorem} \begin{proof} In \eqref{mu-r} first we substitute $\theta =\frac{\pi}{3}$ and then $\theta =\frac{2\pi}{3}$. Thus we get \begin{align*} &3\Big(\mu_{2}(\frac{\pi}{3})-\mu_{2}(\frac{\pi}{2})\Big)=2R_{5}-8R_{4} ,\\ &3\Big(\mu_{2}(\frac{2\pi}{3})-\mu_{2}(\frac{\pi}{2})\Big)=2R_{5}+8R_{4} . \end{align*} Taking into account the last system and \eqref{mu-r}, we obtain \eqref{mu-r3}. \end{proof} \begin{theorem}\label{th7} Let $(M,g, q)$ be a manifold with property \eqref{R1}. If a vector $u$ induces a $q$-basis, then the following equalities are valid \begin{equation}\label{mu-r5} \mu_{2}(\theta)=0,\quad \mu_{1}(\varphi)=\frac{(1-\cos\theta)^{2}}{1-\cos^{2}\varphi}\mu_{1}(\frac{\pi}{2}), \end{equation} where $\theta=\angle(u, q^{2}u)$ and $\varphi=\angle(u, qu)$. \end{theorem} \begin{proof} From \eqref{R1} we have \begin{equation}\label{q-q2-q3} R(x, y, qz, qu)=R(x, y, q^{2}z, q^{2}u). \end{equation} In \eqref{R1}, \eqref{q-q2-q3} we substitute 1) $qx$ for $y$, $x$ for $z$, $qx$ for $u$, and we get \begin{equation}\label{r11} R(x,qx,x,qx)=R(x,qx,qx,q^{2}x )=R(x,qx,q^{2}x,q^{3}x), \end{equation} 2) $q^{2}x$ for $y$, $x$ for $z$, $q^{2}x$ for $u$, and we obtain \begin{equation}\label{r12} R(x, q^{2}x, x, q^{2}x)=R(x, q^{2}x, qx, q^{3}x)=R(x, q^{2}x, q^{2}x, x), \end{equation} 3) $qx$ for $y$, $q^{2}x$ for $z$, $x$ for $u$, then \begin{equation}\label{r13} R(x, qx,q^{2}x,x)=R(x,qx,q^{3}x,qx)=R(x,qx,x,q^{2}x). \end{equation} Comparing the identities \eqref{dop6}, \eqref{dop7}, \eqref{r11}, \eqref{r12}, \eqref{r13} and having in mind \eqref{r1-6}, we obtain $R_{1}=R_{4}=R_{5}$ and $R_{2}=R_{3}=R_{6}=0$. Then \eqref{mu+mu} implies \eqref{mu-r5}. \end{proof} We note that Proposition~4.2 from \cite{1} is a particular case of Theorem~\ref{th7}. \section{A Lie group as a $4$-dimensional Riemannian manifold with a circulant structure}\label{sec:3} Let $G$ be a $4$-dimensional real connected Lie group and $\mathfrak{g}$ be its Lie algebra with a basis $\{x_{1}, x_{2},x_{3},x_{4}\}$. We introduce a structure $q$ and left invariant metric $g$ as follows \begin{equation}\label{lie} qx_{1}=x_{2} ,\ qx_{2}=x_{3},\ qx_{3}=x_{4},\ qx_{4}=x_{1}, \end{equation} \begin{equation}\label{g} g(x_{i}, x_{j})= \left\{ \begin{array}{ll} 0, & i\neq j \hbox{;} \\ 1, & i=j \hbox{.} \end{array} \right. \end{equation} Obviously, \eqref{f4} and \eqref{2.1} are valid. Therefore $(G, g, q)$ is a Riemannian manifold with \eqref{f4} and \eqref{2.1}. For the manifold $(G, g, q)$ we suppose that $g$ is a Killing metric, i.e. \begin{equation}\label{killing} g([x_{i}, x_{j}],x_{k})+ g([x_{i}, x_{k}],x_{j})=0. \end{equation} According to \eqref{killing} and the Jacobi identity for the commutators $[x_{i}, x_{j}]$ we obtain \begin{align}\label{skobki2}\nonumber [x_{1}, x_{2}]&=\lambda_{1}x_{3}+\lambda_{2}x_{4},\qquad [x_{1}, x_{3}]=-\lambda_{1}x_{2}+\lambda_{4}x_{4},\\ [x_{2}, x_{3}]&=\lambda_{1}x_{1}+\lambda_{3}x_{4} ,\qquad [x_{1}, x_{4}]=-\lambda_{2}x_{2}-\lambda_{4}x_{3},\\\nonumber [x_{2}, x_{4}]&=\lambda_{2}x_{1}-\lambda_{3}x_{3},\qquad [x_{3}, x_{4}]=\lambda_{4}x_{1}+\lambda_{3}x_{2}, \end{align} where $\lambda_{i}\in \mathbb{R}$. Vice versa, if \eqref{skobki2} are valid for a Riemannian manifold $(G, g, q)$, where the structure $q$ and the metric $g$ on the Lie group $G$ are determined by \eqref{f4} and \eqref{2.1}, then the Jacobi identity for commutators $[x_{i}, x_{j}]$ is satisfied and the metric $g$ is Killing. Therefore, we establish the truthfulness of the following \begin{theorem}\label{kt} Let $(G, g, q)$ be a $4$-dimensional Riemannian manifold, where $G$ is the connected Lie group with an associated Lie algebra $\mathfrak{g}$, determined by a global basis $\{x_{i}\}$ of left invariant vector fields, and $q$ and $g$ are the structure and the metric, determined by \eqref{lie} and \eqref{g}. Then $(G, g, q)$ is a Riemannian manifold with a circulant structure $q$ and a Killing metric $g$, which satisfy \eqref{f4} and \eqref{2.1} if and only if $G$ belongs to a Lie group, determined by \eqref{skobki2}. \end{theorem} Further, $(G, g, q)$ will stand for the Riemannian manifold determined by the conditions of Theorem~\ref{kt}. Since $g$ is a Killing metric, then the components of $R$ are (\cite{32}) \begin{equation}\label{r} R_{ijkh}=-\frac{1}{4}g\Big([x_{i},x_{j}],[x_{k},x_{h}]\Big). \end{equation} According to \eqref{g}, \eqref{skobki2} and \eqref{r} we calculate the following components of the curvature tensor $R$: \begin{align}\label{r1}\nonumber R_{1212}&=-\frac{1}{4}(\lambda_{1}^{2}+\lambda_{2}^{2}),\quad R_{1414}=-\frac{1}{4}(\lambda_{2}^{2}+\lambda_{4}^{2}),\\\nonumber R_{2323}&=-\frac{1}{4}(\lambda_{1}^{2}+\lambda_{3}^{2}), \quad R_{3434}=-\frac{1}{4}(\lambda_{3}^{2}+\lambda_{4}^{2}),\\ R_{1313}&=-\frac{1}{4}(\lambda_{1}^{2}+\lambda_{4}^{2}),\quad R_{2424}=-\frac{1}{4}(\lambda_{2}^{2}+\lambda_{3}^{2}),\\\nonumber R_{1213}&=R_{2434}=-\frac{1}{4}\lambda_{2}\lambda_{4}, \quad R_{2324}=R_{1314}=-\frac{1}{4}\lambda_{1}\lambda_{2},\\\nonumber R_{1424}&=R_{1323}=-\frac{1}{4}\lambda_{3}\lambda_{4}, \quad R_{3134}=R_{2124}=-\frac{1}{4}\lambda_{1}\lambda_{3},\\\nonumber R_{1214}&=R_{3234}=\frac{1}{4}\lambda_{1}\lambda_{4}, \qquad R_{1434}=R_{2123}=\frac{1}{4}\lambda_{2}\lambda_{3}. \end{align} The rest of nonzero components are obtained from the properties $$R_{ijks}=R_{ksij},\ R_{ijks}=-R_{jiks}=-R_{ijsk}.$$ \begin{prop}\label{kt2} Let $(G, g, q)$ be a manifold determined by the conditions of Theorem~\ref{kt}. Then $(G, g, q)$ satisfies the identity \eqref{R} if and only if \begin{equation}\label{landa} \lambda_{1}=\varepsilon\lambda_{2}=\varepsilon\lambda_{3}=\lambda_{4},\ \varepsilon=\pm 1. \end{equation} \end{prop} \begin{proof} According to \eqref{lie} we obtain that \eqref{R} is equivalent to \begin{align*}\nonumber R_{1212}=R_{3434}=R_{2323}=R_{1414},\quad R_{1313}=R_{2424},\\ R_{1213}=R_{2324}=R_{1424}=R_{3134},\ R_{1214}=R_{1434}=R_{2123}=R_{3234}, \\\nonumber R_{1224}=R_{3123}=R_{3114}=R_{4234}, \ R_{1324}=0. \end{align*} Then \eqref{r1} implies \begin{align*} \lambda_{2}=\lambda_{3},\ \lambda_{1}=\lambda_{4},\ \lambda_{1}^{2}+\lambda_{4}^{2}=\lambda_{2}^{2}+\lambda_{3}^{2},\ \lambda_{1}\lambda_{4} = \lambda_{2}\lambda_{3}. \end{align*} So we obtain \eqref{landa}. \end{proof} From \eqref{r1} and \eqref{landa} we calculate \begin{align}\label{rlamda}\nonumber R_{1212}=R_{1414}=R_{2323}&=R_{3434}=R_{1313}=R_{2424}=-\frac{1}{2}\lambda_{1}^{2},\\ R_{1213}=R_{2434}=R_{2324}&=R_{1314}=R_{1424}=\\\nonumber &R_{3431}=R_{1323}=R_{2124}=-\frac{1}{4}\varepsilon\lambda_{1}^{2}, \\\nonumber R_{1214}=R_{3234}=R_{1434}&=R_{2123}=\frac{1}{4}\lambda_{1}^{2}. \end{align} Having in mind \eqref{rlamda} and the formulas \begin{equation*} \rho(y,z)=g^{ij}R(e_{i}, y, z, e_{j}),\qquad \tau=g^{ij}\rho(e_{i}, e_{j}), \end{equation*} we get the components of the Ricci tensor $\rho$ and the value of the scalar curvature $\tau$ as follows: \begin{align}\label{rho}\nonumber \rho_{11}=\rho_{22}=\rho_{33}=\rho_{44}=\frac{3}{2}\lambda_{1}^{2},\\ \rho_{12}=\rho_{14}=\rho_{23}=\rho_{34}=\frac{1}{2}\varepsilon\lambda_{1}^{2}, \\\nonumber \rho_{13}=\rho_{24}=-\frac{1}{2}\lambda_{1}^{2}, \end{align} \begin{equation}\label{tau2} \tau=6\lambda_{1}^{2}. \end{equation} By using \eqref{3.3} for the sectional curvatures of the basic $2$-planes we find \begin{equation}\label{mu4} \mu(x_{1},x_{2})=\mu(x_{1},x_{4})=\mu(x_{2},x_{3})= \mu(x_{2},x_{4})=\mu(x_{1},x_{3})=-\frac{1}{2}\lambda_{1}^{2}. \end{equation} Therefore, we arrive at the following \begin{prop}\label{kt3} Let $(G, g, q)$ be a manifold determined by the conditions of Theorem~\ref{kt}. If $(G, g, q)$ satisfies the identity \eqref{R}, then \begin{itemize} \item[(i)] The nonzero components of the curvature tensor $R$ and the Ricci tensor $\rho$ are \eqref{rlamda} and \eqref{rho}; \item[(ii)] The scalar curvature $\tau$ is \eqref{tau2}; \item[(iii)] $(G, g, q)$ is of constant sectional curvatures \eqref{mu4}, i.e. $(G, g, q)$ is conformally flat manifold. \end{itemize} \end{prop} According to \eqref{lie} we obtain that \eqref{R1} is equivalent to the equalities \begin{align*} R_{1212}=R_{1414}=R_{2323}=R_{3434}&=R_{1223}=R_{1241}=\\R_{4134}&=R_{1234}=R_{2334}= R_{2341},\\ R_{1313}=R_{2424}=R_{1324}=R_{1213}&=R_{1224}=R_{1431}=\\ R_{2441}&=R_{2423}=R_{2331}=R_{1334}=R_{2434}=0. \end{align*} Then, by using \eqref{r1} and \eqref{tau2} we have the following \begin{prop} Let $(G, g, q)$ be a manifold determined by the conditions of Theorem~\ref{kt}. Then the following propositions are equivalent: \begin{itemize} \item[(i)] $\lambda_{1}=\lambda_{2}=\lambda_{3}=\lambda_{4}=0$, i.e. $G$ is abelian; \item[(ii)] $(G, g, q)$ satisfies the identity \eqref{R1}; \item[(iii)] $\tau=0$, i.e. $(G, g, q)$ is a scalar flat manifold with respect to $\nabla$. \end{itemize} \end{prop} \section*{Acknowledgments} This work was partially supported by project NI15-FMI-004 of the Scientific Research Fund, Paisii Hilendarski University of Plovdiv, Bulgaria. \end{document}
\begin{document} \begin{abstract}In this paper we prove that Amdeberhan's conjecture on the largest size of $(t,t+1, t+2)$-core partitions is true. We also show that the number of $(t, t + 1, t + 2)$-core partitions with the largest size is $1$ or $2$ based on the parity of $t$. More generally, the largest size of $(t,t+1,\ldots, t+p)$-core partitions and the number of such partitions with the largest size are determined. \end{abstract} \title{On the largest size of $\boldsymbol{(t,t+1,\ldots, t+p)} \section{Introduction} In number theory and combinatorics, a \emph{partition} is a finite weakly decreasing sequence of positive integers $\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_r)$. Let $\mid \lambda \mid=\sum_{1\leq i\leq r}\lambda_i$. The positive integer $\mid \lambda \mid$ is called the \emph{size} of the partition $\lambda$. A partition $\lambda$ could be represented by its Young diagram, which is a collection of boxes arranged in left-justified rows with $\lambda_i$ boxes in the $i$-th row. For the $(i, j)$-box, we can associate its \emph{hook length}, denoted by $h(i, j)$, which is the number of boxes exactly to the right, or exactly below, or the box itself. For example, the following are the Young diagram and hook lengths of the partition $(6, 4, 2)$. \begin{figure} \caption{The Young diagram of the partition $(6,4,2)$ and the hook lengths of corresponding boxes. } \end{figure} Let $t$ be a positive integer. A partition is called a \emph{$t$-core partition} if none of its hook lengths is divisible by $t$. For example, we can see that $\lambda=(6,4,2)$ is a $3$-core partition from Figure \textbf{$1$}. Furthermore, a partition is called a $(t_1,t_2,\ldots, t_m)$-core partition if it is simultaneously a $t_1$-core, a $t_2$-core, $\ldots$, a $t_m$-core partition. A number of methods, from several areas of mathematics, have been used in the study of $t$-core and $(t_1,t_2,\ldots, t_m)$-core partitions. Granville and Ono \cite{gran} proved that for given positive integers $n$ and $t\geq 4$, there always exists a $t$-core partition with size $n$. It was showed by Anderson \cite{and} that the number of $(t_1,t_2)$-core partitions is $\frac{1}{t_1+t_2} \binom{t_1+t_2}{t_1}$ when $t_1$ and $t_2$ are coprime to each other. Recently, a result obtained by Olsson and Stanton \cite{ols} was that the largest size of $(t_1,t_2)$-core partitions is $ \frac{({t_1}^2-1)({t_2}^2-1)}{24}$ when $t_1$ and $t_2$ are relatively prime. But for general $(t_1,t_2,\ldots, t_m)$-core partitions, what we know is quite few. We prove the following result, which verifies and generalizes the conjecture of Amdeberhan \cite{tamd} on the largest size of $(t,t+1, t+2)$-core partitions: \begin{theorem} \label{main} Let $t$ and $p$ be positive integers. Suppose that $t=pn+d$, where $1\leq d\leq p$ and $n\geq 0.$ Then the largest size of $(t, t + 1, \ldots, t + p)$-core partitions is \begin{eqnarray*} &\max\{\binom{n+2}{2}[\frac{d}{2}](d-[\frac{d}{2}])+\binom{n+2}{3}(p^2n+pd-p^2)-3\binom{n+2}{4}p^2,&\\ &\binom{n+1}{2}(p-[\frac{p-d}{2}])(d+[\frac{p-d}{2}])+\binom{n+1}{3}(p^2n+pd-p^2)-3\binom{n+1}{4}p^2 \},&\end{eqnarray*} where $\max\{x,\ y\}$ denotes the maximal element in $\{x,\ y\}$. The number of $(t, t + 1, \ldots, t + p)$-core partitions with the largest size is at most $4$. \end{theorem} \begin{cor} (Cf. Conjecture 11.2 of \cite{tamd}.) The largest size $g(t)$ of $(t, t + 1, t + 2)$-core partitions equals to: \[ g(t) = \begin{cases} \ \ \ \ \ \ \ n \binom{n+1}{3}, \ \ \ \ \ \ \ \ \ \ \ \text{if} \ t = 2n-1;\\ ( n+1) \binom{n+1}{3}+\binom{n+2}{3},\ \text{if} \ t = 2n. \end{cases} \] \end{cor} \section{The $\boldsymbol{\beta}$-sets of $\boldsymbol{(t,t+1,\ldots, t+p)}$-core partitions} Let $\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_r)$ be a partition whose corresponding Young diagram has $r$ rows. The \emph{$\beta$-set} of the partition $\lambda$ is denoted by $$\beta(\lambda)=\{h(i,1) : 1 \leq i \leq r\},$$ which is the set of hook lengths of boxes in the first column of the corresponding Young diagram. It is easy to see that $h(1,1)> h(2,1)>\cdots>h(r,1)>0$ and thus $\beta(\lambda)\subseteq \{0,1, 2, \ldots , h(1,1)\}$. Let $\beta(\lambda)'$ be the complement of $\beta(\lambda)$ in $\{0,1, 2, \ldots , h(1,1)\}$ and $H(\lambda)$ be the multiset of hook lengths of $\lambda$. Then $\beta(\lambda)\subseteq H(\lambda).$ We know $0\in \beta(\lambda)'$ since $0\notin \beta(\lambda)$. It is easy to see that $\lambda$ is a $t$-core partition if and only if $H(\lambda)$ doesn't contain any multiple of $t$. The following results are well-known and easy to prove: \begin{lemma} \label{thm1} (\cite{berge}) The partition $\lambda$ is uniquely determined by its $\beta$-set. (1) Suppose $\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_r)$. Then $\lambda_i=h(i,1)-r+i$ for $ 1 \leq i \leq r$. Thus the size of $\lambda$ equals to $\mid \lambda \mid=\sum_{x\in \beta(\lambda)}{x}-\binom{ \#\beta(\lambda) }{2}$, where $\# \beta(\lambda) $ denotes the number of elements in $\beta(\lambda)$; (2) $H(\lambda) = \{x - x' : x\in \beta(\lambda), \ x' \in \beta(\lambda)' ,\ x > x'\}$. \end{lemma} \noindent\textbf{Remark.} Any finite set of some positive integers could be a $\beta$-set of some partition. Actually, by Lemma \ref{thm1}, it is easy to see that, given any finite set $S$ of some positive integers, we can recover a partition by considering $S$ as a $\beta$-set. Then we know there is a bijection between partitions and finite sets of some positive integers. Any finite positive integer set could be a $\beta$-set of some partition. But to be a $\beta$-set of some $t$-core partition, a finite positive integer set must satisfy the following condition. \begin{lemma} \label{a-mt} A partition $\lambda$ is a $t$-core partition if and only if for any $x\in \beta(\lambda)$ and any positive integer $m$ with $x\geq mt$, we have $x-mt \in \beta(\lambda)$. \end{lemma} \textbf{Proof.} $\Rightarrow$: Suppose that $\lambda$ is a $t$-core partition, $x\in \beta(\lambda)$,\ $m$ is a positive integer, and $x\geq mt$. By the definition of t-core partitions, we have $mt \notin H(\lambda) $ and thus $x>mt$. But we know $x-(x-mt)=mt \notin H(\lambda)$, $x\in \beta(\lambda)$, and $x>x-mt$. Then by Lemma \ref{thm1}(2), $x-mt$ couldn't be an element in $\beta(\lambda)'$. Thus we know $x-mt \in \beta(\lambda)$. $\Leftarrow$: Suppose that for any $x\in \beta(\lambda)$ and any positive integer $m$ with $x\geq mt$, we have $x-mt \in \beta(\lambda)$. This means that for any such $x$ and $m$ we have $x-mt\notin \beta(\lambda)'$. Thus for any $x\in \beta(\lambda), \ x' \in \beta(\lambda)', \ x > x'$, we know $x-x'$ couldn't be a multiple of $t$. Then by Lemma \ref{thm1}(2) we know $\lambda$ must be a $t$-core partition. $\square$ Throughout this paper, let $t$ and $p$ be positive integers. We have the following lemmas. \begin{lemma} \label{rep} Let $k$ be a positive integer. Then \begin{eqnarray*} && \{\sum \limits_{0\leq i \leq p}{c_i(t+i)}: c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p),\ \sum \limits_{0\leq i \leq p}{c_i}=k\}\\ &=& \{ \ x\in \textbf{Z}: kt\leq x \leq k(t+p) \}.\end{eqnarray*} \end{lemma} \textbf{Proof.} Suppose that $c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p)$ and $\sum_{0\leq i \leq p}{c_i}=k.$ Let $x=\sum_{0\leq i \leq p}{c_i(t+i)}$. It is easy to see that $$kt=\sum \limits_{0\leq i \leq p}{c_it}\leq x \leq \sum \limits_{0\leq i \leq p}{c_i(t+p)}= k(t+p).$$ On the other hand, suppose that $x\in \textbf{Z}$ and $kt\leq x \leq k(t+p)$. We will show by induction that $$x\in \{\sum \limits_{0\leq i \leq p}c_i(t+i): c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p),\ \sum \limits_{0\leq i \leq p}c_i=k\}.$$ First it is obvious that $$kt \in \{\sum \limits_{0\leq i \leq p}c_i(t+i): c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p),\ \sum \limits_{0\leq i \leq p}c_i=k\}.$$ Suppose that for $kt< x\leq k(t+p) $ we already have $x-1=\sum_{0\leq i \leq p}c_i(t+i)$ for some $c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p)$ and $ \sum_{0\leq i \leq p}c_i=k$. Now we have $c_p< k$ since $x-1< k(t+p)$. Then there must exist some $0\leq i_0 \leq p-1$ such that $c_{i_0}\geq 1$. Thus we have \begin{eqnarray*} x&=& 1+ \sum \limits_{0\leq i \leq p}c_i(t+i)\\ &=&\sum \limits_{0\leq i \leq p,\ i\neq i_0,\ i_0+1}c_i(t+i)+(c_{i_0}-1)(t+i_0)+(c_{i_0+1}+1)(t+i_0+1).\end{eqnarray*} It follows that $$x\in \{\sum \limits_{0\leq i \leq p}c_i(t+i): c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p),\ \sum \limits_{0\leq i \leq p}c_i=k\}.$$ Now we finish the induction and prove the lemma. $\square$ \begin{lemma} \label{linear} Let $\lambda$ be a $(t,t+1,\ldots, t+p)$-core partition. Suppose that $c_i\in \textbf{Z}$ and $ c_i\geq 0$ for $ 0\leq i \leq p$. Then $\sum_{0\leq i \leq p}c_i(t+i) \notin \beta(\lambda)$. \end{lemma} \textbf{Proof.} Let $k=\sum_{0\leq i \leq p}c_i$. We will prove this lemma by induction on $k$. If $k=0$, we have $\sum_{0\leq i \leq p}c_i(t+i)=0 \notin \beta(\lambda)$. Now assume that $k\geq 1$ and the result is true for $k-1$. Assume that the result is not true for $k$, i.e., there exist $c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p)$ such that $\sum_{0\leq i \leq p}c_i=k$ and $\sum_{0\leq i \leq p}c_i(t+i) \in \beta(\lambda)$. Then there must exist some $0\leq i_0 \leq p$ such that $c_{i_0}\geq 1$ since $\sum_{0\leq i \leq p}c_i=k\geq 1$. By Lemma \ref{a-mt}, $$\sum \limits_{0\leq i \leq p}c_i(t+i)-(t+i_0)= \sum \limits_{0\leq i \leq p,\ i\neq i_0}c_i(t+i)+(c_{i_0}-1)(t+i_0)\in \beta(\lambda)$$ since $\lambda$ is a $(t,t+1,\ldots, t+p)$-core partition. But by assumption we know $$\sum_{0\leq i \leq p,\ i\neq i_0}c_i(t+i)+(c_{i_0}-1)(t+i_0)\notin \beta(\lambda)$$ since $\sum_{0\leq i \leq p,\ i\neq i_0}c_i+(c_{i_0}-1)=k-1$, a contradiction! This means that we must have $$\sum_{0\leq i \leq p}c_i(t+i) \notin \beta(\lambda)$$ for $c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p)$ and $\sum_{0\leq i \leq p}c_i=k$. We finish the induction. $\square$ Let $[ x ]$ be the largest integer not greater than $x$. For $1\leq k \leq [\frac{t+p-2}{p}]$, let $$S_{k}=\{ x\in \textbf{Z} : (k-1)(t+p)+1\leq x \leq kt-1 \}.$$ Notice that for $1\leq k \leq [\frac{t+p-2}{p}]$, $S_k\neq\emptyset$ since $(k-1)(t+p)+1\leq kt-1$. We have the following characterization for $\beta$-sets of $(t,t+1,\ldots, t+p)$-core partitions. \begin{lemma} \label{set} Let $t$ and $p$ be positive integers. Suppose that $\lambda$ is a $(t,t+1,\ldots, t+p)$-core partition. Then $\beta(\lambda)$ must be a subset of $ \ \bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$. \end{lemma} \textbf{Proof.} First we claim that for every $x\geq [\frac{t+p-2}{p}]t$, we have $x\notin \beta(\lambda)$: Suppose $x\geq [\frac{t+p-2}{p}]t$. Then there must exist some $k\geq [\frac{t+p-2}{p}]$ such that $kt\leq x < (k+1)t$. Thus we know $$ kt\leq x \leq (k+1)t-1\leq kt+ [\frac{t+p-2}{p}]p\leq k(t+p)$$ since $k\geq [\frac{t+p-2}{p}]$. By Lemma \ref{rep} we have $$x\in \{\sum \limits_{0\leq i \leq p}c_i(t+i): c_i\in \textbf{Z},\ c_i\geq 0\ (0\leq i \leq p),\ \sum \limits_{0\leq i \leq p}c_i=k\}.$$ Then by Lemma \ref{linear} we know $x\notin \beta(\lambda)$. The claim is proved. Now we know $\beta(\lambda)$ must be a subset of $\{x\in \textbf{Z} : 1\leq x\leq [\frac{t+p-2}{p}]t-1 \}$. By Lemma \ref{rep} and Lemma \ref{linear} we have $$\{ \ x\in \textbf{Z}: kt\leq x \leq k(t+p) \} \bigcap \beta(\lambda)=\emptyset$$ for every positive integer $k$. Hence $\beta(\lambda)$ must be a subset of $$\{ x\in \textbf{Z} :1\leq x\leq [\frac{t+p-2}{p}]t-1 \} \setminus (\mathop{\bigcup}_{1\leq k \leq [\frac{t+p-2}{p}]-1} {\{x\in \textbf{Z}: kt\leq x \leq k(t+p) \}}),$$ which equals to $\bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$. $ \square$ By Lemma \ref{set} and Lemma \ref{thm1}, the next result is obvious. We mention that, the following result is also a corollary of Theorem \textbf{$1$} in \cite{and}. \begin{cor} \label{} Let $t$ and $p$ be positive integers. Then the number of $(t,t+1,\ldots, t+p)$-core partitions must be finite. \end{cor} \section{The largest size of $\boldsymbol{(t, t+1, \ldots, t+p)}$-core partitions} Let $\lambda$ be a $(t,t+1,\ldots, t+p)$-core partition. By Lemma \ref{set}, we know $\beta(\lambda)\subseteq \bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$. Let $a_{k}=\# S_{k} $ be the number of elements in $S_k$ and $b_{\lambda,k}=\# (\beta(\lambda)\bigcap S_{k}) $ be the number of elements in $\beta(\lambda)\bigcap S_{k} $ for $1\leq k \leq [\frac{t+p-2}{p}]$. It is obvious that $b_{\lambda,k}\leq a_k$ for $1\leq k \leq [\frac{t+p-2}{p}]$. \begin{lemma} \label{a_k} Let $1\leq k \leq [\frac{t+p-2}{p}] $. Then $a_{k}=t-(k-1)p-1$. Thus $1\leq a_{[\frac{t+p-2}{p}]}\leq p$ and for $1\leq k \leq [\frac{t+p-2}{p}]-1 $, we have $a_{k}-a_{k+1}=p$. Additionally, for every $x\in S_{k+1}$ and $0\leq i\leq p$, we have $x-(t+i)\in S_{k}$. Furthermore, $\bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$ is a $\beta$-set of some $(t,t+1,\ldots, t+p)$-core partition. \end{lemma} \textbf{Proof.} First we have $$a_{k}=kt-1-((k-1)(t+p)+1)+1=t-(k-1)p-1$$ for $1\leq k \leq [\frac{t+p-2}{p}] .$ Thus $$1\leq t-([\frac{t+p-2}{p}]-1)p-1=a_{[\frac{t+p-2}{p}]}\leq t-(\frac{t+p-2-(p-1)}{p}-1)p-1= p$$ and $$a_{k}-a_{k+1}=t-(k-1)p-1-(t-kp-1)=p.$$ Suppose that $x\in S_{k+1}$ for $1\leq k \leq [\frac{t+p-2}{p}]-1 $. This means that $$k(t+p)+1\leq x\leq (k+1)t-1.$$ Thus for $0\leq i\leq p$, we have $$(k-1)(t+p)+1\leq k(t+p)+1-(t+i)\leq x-(t+i)\leq (k+1)t-1-(t+i)\leq kt-1,$$ which means that $x-(t+i)\in S_{k}$. Then by Lemma \ref{a-mt} we know $\bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$ must be a $\beta$-set of some $(t,t+1,\ldots, t+p)$-core partition. $\square$ \begin{lemma} \label{bk} Let $\lambda$ be a $(t,t+1,\ldots, t+p)$-core partition and $1\leq k \leq [\frac{t+p-2}{p}]-1 $. If $b_{\lambda,k+1}\neq 0$, then $b_{\lambda,k}-b_{\lambda,k+1}\geq p$. \end{lemma} \textbf{Proof.} Suppose that $1\leq k \leq [\frac{t+p-2}{p}]-1 $ and $b_{\lambda,k+1}\neq 0$. Let $$\beta(\lambda)\bigcap S_{k+1}=\{x_i: 1\leq i \leq b_{\lambda,k+1}\}$$ where $x_1 < x_2< \cdots <x_{b_{\lambda,k+1}}$. Then by Lemma \ref{a-mt} and Lemma \ref{a_k} we know $\{ x_1-(t+p), x_1-(t+p-1),\ldots, x_{1}-(t+1), x_1-t, x_2-t, \ldots, x_{b_{\lambda,k+1}}-t \}\subseteq \beta(\lambda)\bigcap S_{k}$ where $x_1-(t+p)< x_1-(t+p-1)<\cdots< x_{1}-(t+1)< x_1-t< x_2-t< \cdots< x_{b_{\lambda,k+1}}-t$. It follows that $\beta(\lambda)\bigcap S_{k}$ has at least $b_{\lambda,k+1}+ p$ different elements and thus $b_{\lambda,k}\geq b_{\lambda,k+1}+ p$. $\square$ Let $1\leq r \leq [\frac{t+p-2}{p}].$ Suppose that $c_1, c_2, \ldots, c_r$ are positive integers and $c_k\leq a_{k}$ for $1\leq k \leq r$. Let $\mu_{c_1, c_2, \ldots, c_r}$ be the partition whose $\beta$-set satisfies $$\beta(\mu_{c_1, c_2, \ldots, c_r})\subseteq \bigcup_{1\leq k \leq r} S_{k}$$ and $$\beta(\mu_{c_1, c_2, \ldots, c_r})\bigcap S_{k}= \{ x\in \textbf{Z} : kt-c_k\leq x \leq kt-1 \}$$ for $1\leq k \leq r.$ \begin{lemma} \label{mostnumbers} Suppose that $c_k\leq a_{k}$ for $1\leq k \leq r$. The partition $\mu_{c_1, c_2, \ldots, c_r}$ is a $(t,t+1,\ldots, t+p)$-core partition if and only if $c_k-c_{k+1}\geq p$ for $1\leq k \leq r-1$. \end{lemma} \textbf{Proof.} Suppose that $1\leq k \leq r-1$ and $x\in \beta(\mu_{c_1, c_2, \ldots, c_r})\bigcap S_{k+1}.$ This means that $(k+1)t-c_{k+1}\leq x \leq (k+1)t-1$. Thus for $0\leq i\leq p$, we have $$ (k+1)t-c_{k+1}-(t+p)\leq x-(t+i) \leq (k+1)t-1-t=kt-1.$$ Then by Lemma \ref{a-mt} and Lemma \ref{a_k} it is easy to see that $\mu_{c_1, c_2, \ldots, c_r}$ is a $(t,t+1,\ldots, t+p)$-core partition if and only if $kt-c_k\leq (k+1)t-c_{k+1}-(t+p)$ for $1\leq k \leq r-1$, which is equivalent to $c_k-c_{k+1}\geq p$ for $1\leq k \leq r-1$. $\square$ Let ${\gamma}_i= \mu_{i, i-p, i-2p, \ldots, i- [\frac{i-1}{p}] p}$ and $f(i)=\mid {\gamma}_i \mid$ be the size of ${\gamma}_i$ for $1\leq i \leq t-1$. By Lemma \ref{mostnumbers} ${\gamma}_i$ is a $(t,t+1,\ldots, t+p)$-core partition for $1\leq i \leq t-1$. For convenience, let ${\gamma}_0$ be the empty partition and $f(0)=0$. \begin{lemma} \label{f(pm+i+1)-f(pm+i)} Suppose that $1\leq i \leq p$, $m\geq 0$ and $pm+i\leq t-1.$ Then we have (1) $f(pm+i)-f(pm+i-1)=\binom{m+2}{2}(t-pm-2i+1)$; (2) $ f(pm+i)-f(pm)=\binom{m+2}{2}(it-ipm-i^2). $ \end{lemma} \textbf{Proof.} \textbf{(1)} First we know ${\gamma}_{pm+i}=\mu_{pm+i, p(m-1)+i, p(m-2)+i, \ldots, i }$ and $$ \# \beta({\gamma}_{pm+i})= \sum \limits_{0\leq j \leq m}(pj+i)=p\binom{m+1}{2}+(m+1)i.$$ Then by Lemma \ref{thm1}(1) we have \begin{eqnarray*} & &f(pm+i)-f(pm+i-1)\\ &=& \mid {\gamma}_{pm+i} \mid- \mid {\gamma}_{pm+i-1} \mid \\ &=& \sum \limits_{x\in \beta({\gamma}_{pm+i})}{x}-\binom{\# \beta({\gamma}_{pm+i}) }{2}-(\sum \limits_{y\in \beta({\gamma}_{pm+i-1})}{y}-\binom{\# \beta({\gamma}_{pm+i-1}) }{2}) \\ &=& \sum \limits_{1\leq k \leq m+1} (k t-p(m+1-k)-i)- \sum \limits_{1\leq k \leq m+1}(\# \beta({\gamma}_{pm+i})-k)\\ &=& \frac{m+1}{2}((m+2)t-pm-2i)- \sum \limits_{1\leq k \leq m+1}(p\binom{m+1}{2}+(m+1)i-k) \\ &=& \frac{m+1}{2}((m+2)t-pm-2i)- \frac{m+1}{2} (2p\binom{m+1}{2}+2(m+1)i-m-2)\\ &=& \frac{m+1}{2} (m+2)(t-pm-2i+1) \\ &=& \binom{m+2}{2}(t-pm-2i+1). \end{eqnarray*} \textbf{(2)} By (1) we know \begin{eqnarray*} f(pm+i)-f(pm) &=& \sum \limits_{1\leq l \leq i}(f(pm+l)-f(pm+l-1)) \\ &=& \sum \limits_{1\leq l \leq i} \binom{m+2}{2}(t-pm-2l+1) \\ &=& \binom{m+2}{2}(it-ipm-i^2). \ \end{eqnarray*} $\square$ \noindent\textbf{Remark.} Notice that Lemma \ref{f(pm+i+1)-f(pm+i)}(2) is also true for $i=0$. \begin{lemma} \label{pm+i} Let $0\leq i \leq p$ and $m\geq 0.$ Suppose that $pm+i\leq t-1.$ Then $$f(pm+i)=\binom{m+2}{2}(it-i p m-i^2) +\binom{m+2}{3}(pt-p^2)-3\binom{m+2}{4}p^2.$$ \end{lemma} \textbf{Proof.} By Lemma \ref{f(pm+i+1)-f(pm+i)} we have \begin{eqnarray*} f(pm+i)&=& f(pm+i)-f(pm)+\sum \limits_{0\leq k\leq m-1}(f(pk+p)-f(p k))\\ &=& \binom{m+2}{2}(it-i p m-i^2) +\sum \limits_{0\leq k\leq m-1}\binom{k+2}{2}(pt-p^2k-p^2) \\ &=& \binom{m+2}{2}(it-i p m-i^2) +\binom{m+2}{3}(pt-p^2)-3\binom{m+2}{4}p^2. \end{eqnarray*} In the above proof, we use the identities $$k\binom{k+2}{2}=3\binom{k+2}{3}$$ and $$\sum_{0\leq k\leq m-1} \binom{k+n}{n}=\binom{n+m}{n+1}.$$ $\square$ Now we prove our main result in this paper. \noindent\textbf{Proof of Theorem \ref{main}.} Suppose that $\lambda$ is a $(t,t+1,\ldots, t+p)$-core partition with the largest size. By Lemma \ref{set}, we know $\beta(\lambda)\subseteq \bigcup_{1\leq k \leq [\frac{t+p-2}{p}]} S_{k}$. We will give some properties for such $\lambda$. \textbf{Step 1.} Let $r$ be the largest positive integer $k$ such that $b_{\lambda,k}> 0$, i.e., $b_{\lambda,r}> 0$ and $b_{\lambda,k}=0 $ for $ k> r$. We claim that $\lambda=\mu_{c_1, c_2, \ldots, c_r}$ for some positive integers $c_1, c_2, \ldots, c_r$ such that $c_k\leq a_k$ for $1\leq k \leq r $ and $ c_k-c_{k+1}\geq p$ for $1\leq k \leq r-1 $: First we know $b_{\lambda,k}-b_{\lambda,k+1}\geq p$ for $1\leq k \leq r-1 $ by Lemma \ref{bk}. It follows that $$kt-b_{\lambda,k}\leq (k+1)t-b_{\lambda,k+1}-(t+p).$$ This means that for every $$(k+1)t-b_{\lambda,k+1}\leq x \leq (k+1)t-1 ,$$ we have $$kt-b_{\lambda,k}\leq x-(t+i) \leq kt-1 $$ for $0\leq i \leq p.$ Thus by Lemma \ref{a-mt} we have $ \bigcup_{1\leq k \leq r} \{ x\in \textbf{Z} : kt-b_{\lambda,k}\leq x \leq kt-1 \}$ must be a $\beta$-set for some $(t,t+1,\ldots, t+p)$-core partition ${\lambda}'$. We can write $$\beta({\lambda}')= \bigcup_{1\leq k \leq r} \{ x\in \textbf{Z} : kt-b_{\lambda,k}\leq x \leq kt-1 \}.$$ Since $\{ x\in \textbf{Z} : kt-b_{\lambda,k}\leq x \leq kt-1 \}$ is just the set of the largest $b_{\lambda,k}$ elements in $S_{k}$, we have $$\sum \limits_{x\in \beta(\lambda)\bigcap S_{k}}x\ \leq \sum \limits_{kt-b_{\lambda,k}\leq x \leq kt-1}x\ =\sum \limits_{x\in \beta({\lambda}')\bigcap S_{k}}x$$ for $1\leq k\leq r.$ Then we know $$ \mid \lambda \mid= \sum \limits_{x\in \beta(\lambda)}x-\binom{\sum \limits_{1\leq k \leq r} b_{\lambda,k}}{2} \leq \sum \limits_{x\in \beta({\lambda}')}x-\binom{\sum \limits_{1\leq k \leq r} b_{\lambda,k}}{2} = \mid \lambda' \mid.$$ The above equality holds if and only if $\lambda= \lambda'$. Since we already assume that $\lambda$ is a $(t,t+1,\ldots, t+p)$-core partition with the largest size, we must have $\lambda= \lambda'$ and thus $$\beta(\lambda)\bigcap S_{k}=\{ x\in \textbf{Z} : kt-b_{\lambda,k}\leq x \leq kt-1 \}$$ for $1\leq k \leq r$. Let $c_k=b_{\lambda,k}$ for $1\leq k\leq r$. Then we have $\lambda=\mu_{c_1, c_2, \ldots, c_r}$, $c_k\leq a_k$ for $1\leq k \leq r $ and $ c_k-c_{k+1}\geq p$ for $1\leq k \leq r-1$. We prove this claim. \textbf{Step 2.} We claim that $1\leq c_r\leq p$: Otherwise, suppose that $c_r\geq p+1$. Then $$r\leq [\frac{t+p-2}{p}]-1$$ since $$c_{[\frac{t+p-2}{p}]}\leq a_{[\frac{t+p-2}{p}]}\leq p.$$ Then we know $1 \leq a_{r+1}.$ Thus we can define $$\lambda'=\mu_{c_1, c_2, \ldots, c_r, 1}.$$ By Lemma \ref{mostnumbers}, $\lambda'$ is a $(t,t+1,\ldots, t+p)$-core partition since $c_r-1\geq p$. It is easy too see that $$\beta({\lambda}')= \beta({\lambda})\bigcup \{ (r+1)t-1\}.$$ But by Lemma \ref{thm1} we have \begin{eqnarray*} \mid \lambda' \mid-\mid \lambda \mid &=& \sum \limits_{x\in \beta(\lambda')}{x}-\binom{\# \beta(\lambda') }{2}-(\sum \limits_{y\in \beta(\lambda)}{y}-\binom{\# \beta(\lambda) }{2})\\&=& (r+1)t-1-\# \beta(\lambda) >0\end{eqnarray*} since $(r+1)t-1$ is larger than any element in $\beta({\lambda})$. This contradicts the assumption that $\lambda$ is a $(t, t + 1, \ldots, t + p)$-core partition with the largest size. Then we must have $1\leq c_r\leq p$. \textbf{Step 3.} We claim that there is at most one integer $i$ satisfying $1\leq i \leq r-1$ and $ c_i-c_{i+1}\neq p$: Otherwise, suppose that $1\leq i<j \leq r-1$ such that $ c_i-c_{i+1}\neq p$ and $ c_j-c_{j+1}\neq p$. It is easy to see that $$c_{j+1}+1\leq c_j-p \leq a_j-p= a_{j+1}.$$ Then we can define $$\lambda'=\mu_{c_1, c_2, \ldots, c_{i-1}, c_i-1, c_{i+1}, \ldots, c_{j}, c_{j+1}+1, c_{j+2}, \ldots, c_r}.$$ By Lemma \ref{mostnumbers}, $\lambda'$ is a $(t,t+1,\ldots, t+p)$-core partition since $ c_i-c_{i+1}\geq p+1$ and $ c_j-c_{j+1}\geq p+1$. It is easy too see that $$\beta({\lambda}')= \beta({\lambda})\bigcup \{ (j+1)t-c_{ j+1}-1\}\setminus \{ it-c_i \}.$$ But by Lemma \ref{thm1} we have $$\mid \lambda' \mid-\mid \lambda \mid= (j+1)t-c_{ j+1}-1-(it-c_i)\geq 2t+c_i-c_{ j+1}-1>0.$$ This contradicts the assumption that $\lambda$ is a $(t, t + 1, \ldots, t + p)$-core partition with the largest size. Then we prove the claim. \textbf{Step 4.} We claim that if such $i$ in Step $3$ exists, then $ c_i-c_{ i+1}= p+1$: Otherwise, suppose that $ c_i-c_{ i+1}\geq p+2$. It is easy to see that $$c_{i+1}+1< c_i-p \leq a_i-p= a_{i+1}.$$ Then we can define $$\lambda'=\mu_{c_1, c_2, \ldots, c_{i-1}, c_i-1, c_{i+1}+1, c_{i+2}, \ldots, c_r}.$$ By Lemma \ref{mostnumbers}, $\lambda'$ is a $(t,t+1,\ldots, t+p)$-core partition since $$ (c_i-1)-(c_{i+1}+1)=c_i-c_{i+1}-2\geq p.$$ Notice that $$\beta({\lambda}')= \beta({\lambda})\bigcup \{ (i+1)t-c_{i+1}-1\}\setminus \{ it-c_{i} \}.$$ By Lemma \ref{thm1} we have $$\mid \lambda' \mid-\mid \lambda \mid= (i+1)t-c_{i+1}-1-(it-c_{i})=t+c_i-c_{ i+1}-1>0.$$ This contradicts the assumption that $\lambda$ is a $(t, t + 1, \ldots, t + p)$-core partition with the largest size. Then we prove the claim. \textbf{Step 5.} We claim that such $i$ in Step 4 couldn't exist, i.e., there is no such $i$ satisfying $ c_i-c_{i+1}=p+1$: Otherwise, suppose that $ c_i-c_{i+1}=p+1$. It is easy to see that $$c_{i+1}+1= c_i-p \leq a_i-p= a_{i+1}.$$ Then we can define $$\lambda'=\mu_{c_1, c_2, \ldots, c_{i}, c_{i+1}+1, c_{i+2}, \ldots, c_r}$$ and $$\lambda''=\mu_{c_1, c_2, \ldots, c_{i-1}, c_{i}-1, c_{i+1}, \ldots, c_r}.$$ Then $\lambda'$ and $\lambda''$ are also $(t, t + 1, \ldots, t + p)$-core partitions by $ c_i-c_{i+1}=p+1$ and Lemma \ref{mostnumbers}. Notice that $$\beta({\lambda}')= \beta({\lambda})\bigcup \{ (i+1)t-c_{i+1}-1 \}$$ and $$\beta({\lambda}'')= \beta({\lambda})\setminus \{ it-c_{i} \}.$$ By Lemma \ref{thm1} we have $$\mid \lambda' \mid-\mid \lambda \mid=(i+1)t-c_{i+1}-1-\# \beta(\lambda)=(i+1)t-c_{i+1}-1-\sum \limits_{1\leq k \leq r}c_k$$ and $$\mid \lambda \mid-\mid \lambda'' \mid =it-c_{i}-\# \beta(\lambda'') =it-c_{i}-(\sum \limits_{1\leq k \leq r}c_k-1).$$ Put these two equalities together, we have \begin{eqnarray*} 2\mid \lambda \mid & = & \mid \lambda'\mid+\mid \lambda''\mid-(t+c_i-c_{i+1}-2) \\ &=& \mid \lambda'\mid+\mid \lambda''\mid-(t+p-1) \\ &<& \mid \lambda'\mid+\mid \lambda''\mid. \end{eqnarray*} This contradicts the assumption that $\lambda$ is a $(t, t + 1, \ldots, t + p)$-core partition with the largest size. Then we prove the claim. \textbf{Step 6.} We claim that $\lambda\in \{ {\gamma}_{t-j}:\ 1\leq j \leq p \}$: By Step $2,\ 3,\ 4,\ 5$ we know $1\leq c_r \leq p$ and $ c_k-c_{k+1}= p$ for $1\leq k \leq r-1,$ which means that $$\lambda= \mu_{c_1, c_1-p, c_1-2p, \ldots, c_1- [\frac{c_1-1}{p}] p}={\gamma}_{c_1}.$$ Suppose that $c_1=pm+i$, where $0\leq i\leq p-1$ and $m\geq 0.$ If $c_1<t-p$, then $\gamma_{c_1+1}$ is well defined and by Lemma \ref{f(pm+i+1)-f(pm+i)} we have \begin{eqnarray*} \mid \gamma_{c_1+1} \mid -\mid \lambda \mid & = & \mid \gamma_{c_1+1} \mid -\mid \gamma_{c_1}\mid = f(c_1+1)-f(c_1)\\ &=& f(pm+i+1)-f(pm+i) = \binom{m+2}{2}(t-pm-2(i+1)+1)\\&=& \binom{m+2}{2}(t-c_1-i-1) > \binom{m+2}{2}(p-i-1)\geq 0. \end{eqnarray*} This means that $c_1<t-p$ implies $\mid \gamma_{c_1+1} \mid > \mid \lambda \mid$, which contradicts the assumption that $\lambda$ is a $(t, t + 1, \ldots, t + p)$-core partition with the largest size. Then we must have $c_1\geq t-p$. But $c_1\leq a_1=t-1$, thus $\lambda\in \{ {\gamma}_{t-j}:\ 1\leq j \leq p \}$. \textbf{Step 7.} By assumption we know $t=pn+d$, where $1\leq d\leq p$ and $n\geq 0.$ We claim that $\mid \lambda \mid = \text{max}\{f(pn+[\frac{d}{2}]),\ f(pn-[\frac{p-d}{2}]) \}$ and $\lambda\in \{ {\gamma}_{j}:\ j=pn+[\frac{d}{2}],\ pn+[\frac{d}{2}]+1,\ pn-[\frac{p-d}{2}],\ \text{or} \ pn-[\frac{p-d}{2}]-1\}$: By Step $6$ we know $$\lambda\in \{ {\gamma}_{pn+k}:\ 0\leq k \leq d-1\}\bigcup \{ {\gamma}_{pn-k}:\ 0\leq k \leq p-d\}.$$ For ${\gamma}_{pn+k}$ where $0\leq k \leq d-1$, by Lemma \ref{f(pm+i+1)-f(pm+i)} we have \begin{eqnarray*}f(pn+k)-f(pn) &=&\binom{n+2}{2}(kt-kpn-k^2)\\ &=&\binom{n+2}{2}(kd-k^2)\\ &=&\binom{n+2}{2}(\frac{d^2}{4}-(k-\frac{d}{2})^2).\end{eqnarray*} Then it is easy to see that when $d$ is even, $f(pn+k)$ is maximal for $0\leq k \leq d-1$ if and only if $k=[\frac{d}{2}]$; when $d$ is odd, $f(pn+k)$ is maximal for $0\leq k \leq d-1$ if and only if $k=[\frac{d}{2}]$ or $ [\frac{d}{2}]+1.$ For ${\gamma}_{pn-k}$ where $ 0\leq k \leq p-d$, by Lemma \ref{f(pm+i+1)-f(pm+i)} we have \begin{eqnarray*}f(pn)-f(pn-k)&=&f(p(n-1)+p)-f(p(n-1)+p-k)\\ &=&\sum \limits_{p-k\leq l \leq p-1 }(f(p(n-1)+l+1)-f(p(n-1)+l) )\\ &=&\sum \limits_{p-k\leq l \leq p-1 }\binom{n+1}{2}(t-p(n-1)-2(l+1)+1)\\ &=& \sum \limits_{p-k\leq l \leq p-1 }\binom{n+1}{2}(p+d-2l-1)\\ &=&\binom{n+1}{2}(k^2-k(p-d))\\ &=&\binom{n+1}{2}((k-\frac{p-d}{2})^2-\frac{(p-d)^2}{4}).\end{eqnarray*} Then it is easy to see that when $p-d$ is even, $f(pn-k)$ is maximal for $ 0\leq k \leq p-d$ if and only if $k=[\frac{p-d}{2}]$; when $p-d$ is odd, $f(pn-k)$ is maximal for $ 0\leq k \leq p-d$ if and only if $k=[\frac{p-d}{2}]$ or $ [\frac{p-d}{2}]+1.$ Then we prove the claim. \textbf{Step 8.} By Step $7$ and Lemma \ref{pm+i} we have the largest size of $(t, t + 1, \ldots, t + p)$-core partitions is $$\text{max}\{f(pn+[\frac{d}{2}]),\ f(pn-[\frac{p-d}{2}]) \},$$ which equals to \begin{eqnarray*} &\max\{\binom{n+2}{2}[\frac{d}{2}](d-[\frac{d}{2}])+\binom{n+2}{3}(p^2n+pd-p^2)-3\binom{n+2}{4}p^2,&\\ &\binom{n+1}{2}(p-[\frac{p-d}{2}])(d+[\frac{p-d}{2}])+\binom{n+1}{3}(p^2n+pd-p^2)-3\binom{n+1}{4}p^2 \}.&\end{eqnarray*} By Step $7$ we also know the number of $(t, t + 1, \ldots, t + p)$-core partitions with the largest size is at most $4$. We finish the proof. $\square$ For $p=1$, we have the following corollary. We mention that, this corollary could be obtained by results on the largest size of $(t_1, t_2)$-core partitions in \cite{ols}. \begin{cor} \label{p=1} The largest size of $(t, t + 1)$-core partition is $ \binom{t+2}{4}$. The number of $(t, t + 1)$-core partitions with the largest size is $1$. \end{cor} \textbf{Proof.} By Step $6$ of Theorem \ref{main}, we know ${\gamma}_{t-1}$ is the only $(t, t + 1)$-core partition with the largest size since $p=1$ in this case. By Lemma \ref{pm+i} the largest size is $$ \mid {\gamma}_{t-1} \mid = f(t-1) = \binom{t+1}{3}(t-1) -3\binom{t+1}{4}= \binom{t+2}{4}.$$ $\square$ For $p=2$, we have the following corollary, which shows that Amdeberhan's conjecture on the largest size of $(t,t+1, t+2)$-core partitions proposed in \cite{tamd} is true. \begin{cor} \label{p=2} \textbf{(1)} If $\ t=2n-1$, the largest size of $(2n-1, 2n, 2n+1)$-core partition is $n \binom{n+1}{3}.$ The number of $(2n-1, 2n, 2n+1)$-core partitions with the largest size is $2$. \textbf{(2)} If $\ t=2n$, the largest size of $(2n, 2n + 1, 2n + 2)$-core partition is $( n+1) \binom{n+1}{3}+\binom{n+2}{3}$. The number of $(2n, 2n + 1, 2n + 2)$-core partitions with the largest size is $1$. \end{cor} \textbf{Proof.} By Step $6$ of Theorem \ref{main}, we know a $(t, t + 1, t + 2)$-core partition with the largest size must be ${\gamma}_{t-1}$ or ${\gamma}_{t-2}$ since $p=2$ in this case. \textbf{(1)} When $t=2n-1$, by Lemma \ref{f(pm+i+1)-f(pm+i)} we have $$\mid {\gamma}_{t-1} \mid- \mid {\gamma}_{t-2} \mid= f(2n-2)-f(2n-3)=\binom{n}{2}( t-2(n-2)-4+1 )= 0.$$ Then we get $f(2n-2)=f(2n-3)$. This means that ${\gamma}_{2n-2}$ and ${\gamma}_{2n-3}$ are the only two $(2n-1, 2n, 2n+1)$-core partitions with the largest size. By Lemma \ref{pm+i} the largest size is \begin{eqnarray*} f(2n-2) &=& f(2(n-1))\\ &=&\binom{n+1}{3}(2(2n-1)-4)-12\binom{n+1}{4}\\ &=& \binom{n+1}{3}(4n-6-3(n-2)) \\ &=& n\binom{n+1}{3}. \end{eqnarray*} \textbf{(2)} When $t=2n$, by Lemma \ref{f(pm+i+1)-f(pm+i)} we have \begin{eqnarray*}\mid {\gamma}_{t-1} \mid- \mid {\gamma}_{t-2} \mid &=& f(2n-1)-f(2n-2)\\ &=& \binom{n+1}{2}( t-2(n-1)-2+1 )\\&=&\binom{n+1}{2}> 0.\end{eqnarray*} This means that ${\gamma}_{2n-1}$ is the only $(2n, 2n + 1, 2n + 2)$-core partition with the largest size. By Lemma \ref{pm+i} the largest size is \begin{eqnarray*} f(2n-1) &=& f(2(n-1)+1)\\ &=& \binom{n+1}{2}(2n- 2 (n-1)-1) +\binom{n+1}{3}(4n-4)-12\binom{n+1}{4}\\ &=& \binom{n+1}{2} +\binom{n+1}{3}(4n-4)-3\binom{n+1}{3}(n-2)\\ &=& \binom{n+1}{2} +(n+2)\binom{n+1}{3}\\ &=& \binom{n+1}{2} +\binom{n+1}{3}+(n+1)\binom{n+1}{3}\\ &=& (n+1)\binom{n+1}{3}+\binom{n+2}{3}. \end{eqnarray*} $\square$ \end{document}
\begin{document} \title{Measuring Small Longitudinal Phase Shifts via Weak Measurement Amplification} \author{Kai Xu}\email{These authors contribute equally to this work} \affiliation{CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, China} \affiliation{CAS Center For Excellence in Quantum Information and Quantum Physics, University of Science and Technology of China, Hefei 230026, China} \author{Xiao-Min Hu}\email{These authors contribute equally to this work} \affiliation{CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, China} \affiliation{CAS Center For Excellence in Quantum Information and Quantum Physics, University of Science and Technology of China, Hefei 230026, China} \author{Chao Zhang} \author{Yun-Feng Huang} \author{Bi-Heng Liu}\email{[email protected]} \author{Chuan-Feng Li} \author{Guang-Can Guo} \affiliation{CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, China} \affiliation{CAS Center For Excellence in Quantum Information and Quantum Physics, University of Science and Technology of China, Hefei 230026, China} \author{Meng-Jun Hu}\email{[email protected]} \affiliation{Beijing Academy of Quantum Information Sciences, Beijing, 100193, China} \author{Yong-Sheng Zhang}\email{[email protected]} \affiliation{CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, China} \affiliation{CAS Center For Excellence in Quantum Information and Quantum Physics, University of Science and Technology of China, Hefei 230026, China} \date{\today} \begin{abstract} Weak measurement amplification, which is considered as a very promising scheme in precision measurement, has been applied to various small physical quantities estimation. Since many quantities can be converted to phase signal, it is thus interesting and important to consider measuring ultra-small longitudinal phase shifts by using weak measurement. Here, we propose and experimentally demonstrate a novel weak measurement amplification based ultra-small longitudinal phase estimation, which is suitable for polarization interferometry. We realize one order of magnitude amplification measurement of small phase signal directly introduced by Liquid Crystal Variable Retarder and show its robust to finite visibility of interference. Our results may find important applications in high-precision measurements, such as gravitational waves detection. \end{abstract} \maketitle \section{Introduction} Weak measurement, which was first proposed by Aharonov, Albert and Vaidman \cite{AAV}, has attracted a lot of attention in the last decades \cite{rmp}. In the theoretical framework of weak measurements, the system with pre-selected state interacts weakly with the pointer first, and then followed by a post-selection on its state. When the interaction is weak enough such that only first-order approximation need to be considered, the so-called weak value of observable $\hat{A}$, defined as $\langle\hat{A}\rangle_{w}=\langle\psi_{i}|\hat{A}|\psi_{f}\rangle/\langle\psi_{i}|\psi_{f}\rangle$ with $|\psi_{i}\rangle$ and $|\psi_{f}\rangle$ are pre-selected state and post-selected state of the system respectively, emerges naturally in the framework of weak measurements \cite{AAV}. The weak value is generally complex, with its real part and imaginary part being obtained separately by performing measurement of non-commuting observables on the pointer \cite{Rotza}, and can be arbitrarily large when $|\psi_{i}\rangle$ and $|\psi_{f}\rangle$ are almost orthogonal. Although the weak value has been intensively investigated since its birth \cite{wv1,wv2,wv3,wv4,wv5,wv6,wv7}, the debate on its physical meaning continues \cite{de1,de11,de2,de21,de3,de31}. Regardless of these arguments, the method of weak measurement has been shown powerful in solving quantum paradox \cite{pa1,pa2,pa3,pa4,pa5}, reconstructing quantum state \cite{tom1, tom2, tom3, tom4,tom5,tom6,tom7,tom8,tom9,tom10}, amplifying small effects \cite{am1,am5,am8,am11,am2,am3,am4,am6,am7,am9,am10,am12,am13} and investigating foundations of quantum world \cite{f1,f2,f3,f4,f2018,f5,f6,f7,f8,f9}. Among above applications, weak value amplification (WVA) is particularly intriguing and has been rapidly developed in high precision measurements. In order to realize WVA, the tiny quantity to be measured need be converted into coupling coefficient of an von Neumann- type interaction Hamiltonian, which is small enough such that the condition of weak measurements is satisfied. The magnification of WVA is directly determined by the weak value of the system observable appearing in the interaction Hamiltonian. When the pre-selected state $|\psi_{i}\rangle$ and the post-selected state $|\psi_{f}\rangle$ of the system are properly chosen, the weak value can be arbitrarily large. However, the magnification is limited when all orders of evolution are taken into consideration \cite{lim1,lim2,lim3,lim4}. While the potential application of the weak value in signal amplification was pointed out as early as 1990 \cite{sa}, it has drawn no particular attention until the first report on the observation of the spin Hall effect of light via WVA \cite{am1}. Since then, many kinds of signal measurements via WVA have been reported, such as geometric phase\cite{am13}, angular rotation \cite{an,an1}, spin Hall effect\cite{am1,am5,am8,am11}, single-photon nonlinearity \cite{non1,non2}, frequency \cite{fre,am10}, etc. Meanwhile, WMA obtains the large weak value at the price of low detection probability by postselection, which may causes greater statistical error\cite{post1}. Therefore, the controversy on whether or not WVA outperforms conventional measurements arises \cite{co1,co2,co21,co22,co3,co4,co5}. Although not positive conclusions arrived by some theoretical researches, it becomes different when practical experiments are taken into consideration. There are some researches that have shown WMA has meaningful robustness against technical noise \cite{technoise1,technoise3,temnoise1,temnoise2}. Meanwhile, some technical advantages of WVA have been experimentally demonstrated \cite{tec2,tec3,detsat2,tec4}. Moreover, WVA-based proposals to achieve the Heisenberg limit by using quantum resources such as squeezing\cite{co5} and entanglement\cite{enta1,enta2} are also explored, e.g., the entanglement-assisted WVA has been realized in optical system\cite{entae1,entae2}. Recently, Kim {\it et al} demonstrate a novel WMA scheme based on iterative interactions to achieve Heisenberg limit \cite{hei1}. Since many physical quantities can be converted into phase measurements, using WVA to realize ultra-small phase measurement, especially longitudinal phase, has been developed rapidly in recent years \cite{lon7,lon6,lon4,lon5}. However, the existing schemes are still not suitable for practical applications because of their severe requirements on the preparation of initial state of probe as perfect Gaussian distribution and detections on time or frequency domain \cite{lon7,lon1, lon3}. The direct amplification of the phase shift in optical interferometry with weak measurement has been experimentally studied by Li et al.\cite{lon4}. In that experiment, the systerm state with initial phase shift is prepared before getting into interferometer, the weak measurement part is composed of a HWP and a sagnac-like interferometer, and the phase shift can be directly measured by scanning two oscillation patterns via a general polarization projection measurements device. Here we proposed a different scheme of ultra-small longitudinal phase amplification measurement within the framework of weak measurements. Compared with sagnac interferometer scheme in Ref.\cite{lon4}, it can be expanded to other interferometers like Michelson interferometer suggested in Ref.\cite{hu,hu2}. Surprisingly, no definite weak value occurs in our case and the magnification is nonlinear, which makes it different from WVA. In this Letter, we experimentally demonstrate this new scheme by measuring ultra-small longitudinal phase caused by the liquid crystal phase plate and realize one order of magnitude amplification. \section{Weak measurements amplification based phase measurement} The key idea of weak measurements based ultra-small phase amplification (WMPA) is to transform the ultra-small longitudinal phase to be measured into a larger rotation along the latitude of Bloch sphere of the meter qubit, e.g., larger rotation of a photon's polarization \cite{hu}. To explicitly see how it works, consider a two-level system initially prepared in the state of superposition $|\psi_{i}\rangle_{S}=\alpha|0\rangle+\beta|1\rangle$ with $|\alpha|^{2}+|\beta|^{2}=1$. Contrary to most discussions of WVA in which continuous pointer is used \cite{AAV,rmp,am1,am2}, we adopt discrete pointer ,i.e., qubit \cite{tom1,f2,knee} prepared in the state of superposition $|\phi\rangle_{P}=\mu|\uparrow\rangle+\nu|\downarrow\rangle$ with $|\mu|^{2}+|\nu|^{2}=1$. The pointer can be another two-level system or the different degree of freedom of the same system. We consider unitary control-rotation evolution of the system-pointer interaction \begin{equation} \hat{U}=|0\rangle\langle 0|\otimes\hat{I}+|1\rangle\langle 1|\otimes(|\uparrow\rangle\langle\uparrow|+e^{i\theta}|\downarrow\rangle\langle\downarrow|), \end{equation} where $\theta$ is the ultra-small phase signal to be measured. After evolution of composite system, the post-selection is performed on the system that collapses it into state $|\psi_{f}\rangle_{S}=\gamma|0\rangle+\eta|1\rangle$ with $|\gamma|^{2}+|\eta|^{2}=1$. The state of the pointer, after the post-selection of the system, becomes (unnormalized) \begin{equation} \begin{split} |\tilde{\varphi}\rangle_{P}&=_{S}\langle\psi_{f}|\hat{U}|\psi_{i}\rangle_{S}\otimes|\phi\rangle_{P} \\ &=\mu(\alpha\gamma+\beta\eta)|\uparrow\rangle+\nu(\alpha\gamma+\beta\eta e^{i\theta})|\downarrow\rangle \end{split} \end{equation} with the successful probability $P_{s}=\mathrm{Tr}[|\tilde{\varphi}\rangle_{P}\langle\tilde{\varphi}|]$ and $\alpha,\beta,\gamma,\eta$ are all taken to be real numbers without loss of generality. Since $\theta\ll 1$, $\alpha\gamma+\beta\eta e^{i\theta}=(\alpha\gamma+\beta\eta)e^{i\kappa}$ in the first order approximation with \begin{equation} \mathrm{tan}(\kappa)=\dfrac{\mathrm{sin}(\theta)}{\mathrm{cos}(\theta)+(\alpha\gamma)/(\beta\eta)}. \end{equation} Phase signal amplification is realized as the post-selected state is properly chosen such that $_{S}\langle\psi_{f}|\psi_{i}\rangle_{S}=\alpha\gamma+\beta\eta\rightarrow 0$. The normalized pointer state thus becomes \begin{equation} |\varphi\rangle_{P}=\mu|\uparrow\rangle+\nu e^{i\kappa}|\downarrow\rangle \end{equation} in the first order approximation. Analogous to micrometer, which transforms small displacement into larger rotation of circle, our protocol transforms ultra-small phase into larger rotation of pointer along latitude of Bloch sphere. The amplified phase information $\kappa$ can be easily extracted by performing proper basis measurement on the pointer. It is intriguing to note that the WMPA seems to work even when $_{S}\langle\psi_{f}|\psi_{i}\rangle_{S}=0$ according to Eq. (3), in which case an infinitely large amplification can be realized. It is, however, not true because the relative phase signal reduces into global phase that cannot be extracted in that case according to Eq. (2). \begin{figure*} \caption{{\bf Experiment Setup} \end{figure*} \section{Experiment realization} In our experimental demonstration as shown in Fig. 1, we take the path state of photons as system and its polarization freedom of degree as pointer and perform ultra-small longitudinal phase measurement introduced by Liquid Crystal Variable Retarder (LCVR). We choose $\alpha,\beta,\mu,\nu=1/\sqrt{2}$ in our experiment such that the polarization of the post-selected photons is $(|H\rangle+e^{i\kappa}|V\rangle)/\sqrt{2}$ with $|H\rangle$ and $|V\rangle$ represent horizontal and vertical polarization respectively. The amplified phase $\kappa$ is extracted by performing measurement on the basis of $\lbrace |+\rangle,|-\rangle\rbrace$ with $|\pm\rangle=(|H\rangle\pm|V\rangle)/\sqrt{2}$ on the post-selected photons, which gives the expectation value of the observable $\hat{\sigma}_{x}\equiv |+\rangle\langle +|-|-\rangle\langle -|$ as $<\hat{\sigma}_{x}>=\mathrm{cos}(\kappa)$. The whole experimental setup consists of four parts i.e., initial state preparation, ultra-small phase signal $\theta$ collection, phase signal amplification via the post-selection and extraction of the amplified phase signal $\kappa$. The ultra-small phase $\theta$ is derived by substituting the measured $\kappa$ into the application formula Eq. (3), where $\alpha\gamma/\beta\eta$ is predetermined experimental parameter. As shown in Fig. 1, a single-mode fiber (SMF) coupled $808$ nm laser beam is emitted from the Coherent Laser (Mira Model 900-P). The laser beam has been attenuated before coupled into the fiber, which results in the final counting rate approximately $8\times 10^{5}/\mathrm{s}$. The light beam, which outputs SMF, passes a polarizing beamsplitter (PBS) and a half wave plate (HWP1) rotated at $22.5^{\circ}$ such that the polarization of photons is prepared in the $|+\rangle$ state. Preparation of the initial state of photons is completed by passing through a calcite beam displacer (BD) and two HWPs (HWP2 and HWP3) placed in the two paths separately. The BD is approximately $39.70$ mm long and photons with horizontal polarization $|H\rangle$ transmit it without change of its path while photons with vertical polarization $|V\rangle$ suffer a $4.21$ mm shift away from its original path. The HWP2 and HWP3 are rotated at $22.5^{\circ}$ and $-22.5^{\circ}$ respectively, which gives the initial state of photons as $(|0\rangle+|1\rangle)/\sqrt{2}\otimes|+\rangle$ with $|0\rangle$ represents the state of down path and $|1\rangle$ represents the state of up path. In fact, the initial state of the system i.e., path degree of freedom and the pointer i.e., polarization degree of freedom can be arbitrarily prepared via rotating HWP1 and HWP2, HWP3. The ultra-small longitudinal phase signal $\theta$ to be measured is produced by LCVR1 (Thorlabs LCC1411-B) placed in the up path. The LCVR causes phase shift between horizontal and vertical polarization state of photons when voltage is introduced by Liquid Crystal Controller (Thorlabs LCC25). Another LCVR placed in the down path without introducing voltage is used for phase compensation. The LCVRs fulfill the unitary control-rotation operation and the state of photons, after passing through the LCVRs, becomes \begin{equation} |\Psi\rangle_{SP}=\dfrac{1}{\sqrt{2}}[|0\rangle\otimes|+\rangle+|1\rangle\otimes(|H\rangle+e^{i\theta}|V\rangle)/\sqrt{2}]. \end{equation} The amplification of the ultra-small phase signal $\theta$ is completed via HWP4, HWP5 and a BD, where the post-selected photons come out from the middle path of the BD toward to HWP6. To see explicitly how post-selection works, we can recast Eq. (5) as \begin{equation} |\Psi\rangle_{SP}=\dfrac{1}{\sqrt{2}}[|H\rangle\otimes(|0\rangle+|1\rangle)/\sqrt{2}+|V\rangle\otimes(|0\rangle+e^{i\theta}|1\rangle)/\sqrt{2}], \end{equation} which implies the exchange of the path degree of freedom and the polarization degree of freedom of photons. Since the polarization degree of freedom of photons represents the system now, post-selection of the system can be readily realized by a HWP combined with a PBS. Suppose that the HWP is rotated at $22.5^{\circ}-\delta$ with $\delta$ is a small angle, then the post-selected state of photons coming out from the reflection port of the PBS is $|\psi_{f}\rangle=\mathrm{sin}(45^{\circ}-2\delta)|H\rangle-\mathrm{cos}(45^{\circ}-2\delta)|V\rangle$. After the post-selection, we exchange back the system and the pointer to the original degree of freedom by using a HWP rotated at $45^{\circ}$ in one of outgoing paths and a BD to recombine the light beam. The above process of post-selection can be equivalently realized via HWP4 rotated at $67.5^{\circ}-\delta$, HWP5 rotated at $22.5^{\circ}-\delta$ and a BD as shown in Fig. (1). \begin{figure} \caption{{\bf Experimental results of phase amplification and calibration} \label{fig:data2} \end{figure} The post-selected photons, which come from the middle port of the second BD, are in the polarization state $|\varphi\rangle_{P}=(|H\rangle+e^{i\kappa}|V\rangle)/\sqrt{2}$, where $\kappa$ is the amplified phase signal determined by Eq. (3) with $\alpha=\beta=1/\sqrt{2}$ and $\gamma=\mathrm{sin}(45^{\circ}-2\delta),\eta=-\mathrm{cos}(45^{\circ}-2\delta)$. The amplified phase $\kappa$ can be extracted by performing measurement on the basis of $\lbrace|+\rangle,|-\rangle\rbrace$ and calculating the expectation value of Pauli observable $\hat{\sigma}_{x}$, which is realized by the HWP6 rotated at $22.5^{\circ} $ ,a PBS and two avalanche photodiode single-photon detectors (SPD). Once the $\kappa$ is obtained, the ultra-small phase $\theta$ can be easily derived from the Eq. (3). \section{Results} Our experiment results are shown in Fig. 2. Fig. 2(a) shows the relationship between amplified phase $\kappa$ and ultra-small phase $\theta$, in which real lines are theoretical predictions and dots are measured data. When phase signal to be measured is small enough, according to Eq. (3), the factor of amplification mainly determined by parameter $(\alpha\gamma)/(\beta\eta)$. Three different values of $(\alpha\gamma)/(\beta\eta)$ are considered in our experiment corresponding to about $3, 5$ and $10$ times magnification in the linear amplification region. For each case, four ultra-small phases chosen in the range of $0.03\mathrm{rad}-0.1\mathrm{rad}$, which are produced by LCVR1, are measured in advance. From Fig. 2(b), we can see the LCVR's phase is linearly related to the voltage. As an important experimental parameter, $(\alpha\gamma)/(\beta\eta)$ needs to be determined before the amplification measurement. This is done by measuring the successful probability of post-selection i.e., $p=|\langle\psi_{f}|\psi_{i}\rangle|^{2}=(\alpha\gamma+\beta\eta)^{2}$ without introducing any ultra-small phase. In the case of our experiment, $(\alpha\gamma)/(\beta\eta)=-\mathrm{tan}(45^{\circ}-2\delta)$ and $\mathrm{sin}(2\delta)=\sqrt{p}$, which gives \begin{equation} \dfrac{\alpha\gamma}{\beta\eta}=\dfrac{\sqrt{p}-\sqrt{1-p}}{\sqrt{p}+\sqrt{1-p}}. \end{equation} Once parameter $(\alpha\gamma)/(\beta\eta)$ is settled, the voltage is added to LCVR to produce a ultra-small phase and the amplification measurement begins. The ultra-small phase $\theta$ is immediately estimated by conventional measurement method after amplification measurement, which is done by blocking the down path between BDs, replacing HWP4 with a HWP rotated at $67.5^{\circ}$ and rotating HWP6 to $45^{\circ}$. The visibility is about $0.999598$ so that the precision of phase estimation is about $0.04\mathrm{rad}$. Three different values of $(\alpha\gamma)/(\beta\eta)$ are obtained by adjusting the small angle $\delta$ and four ultra-small phases are measured within $10\mathrm{s}$ counting for each value. Considering the relevant statistical errors, system errors and imperfections of optical elements, our results meet well with theoretical predictions. As the key part of the experimental setup, the performance of the BD-type Mach-Zehnder interferometer directly determines the precision of phase estimation. The visibility of the interferometer in our experiment is about $0.9976$, which gives the precision of phase estimation about $0.098\mathrm{rad}$. In the amplification case, the ultimate precision of phase estimation should be divided by corresponding factor of amplification $h$, which implies that higher precision can be obtained compared to conventional Mach-Zehnder interferometry. The sensitivity of phase can also be significantly improved by weak measurements amplification if quantum noise limitation is not considered. The sensitivity in our amplification case is $\Delta\theta=\dfrac{\Delta\langle\hat{\sigma}_{x}\rangle}{h\mathrm{sin}(h\theta)}$ with $\Delta\langle\hat{\sigma}_{x}\rangle$ represents the uncertainty of $\langle\hat{\sigma}_{x}\rangle$, which implies $h$ times improvement even at the optimal point. When quantum noise limitation is considered, the ultimate sensitivity of weak measurements amplification cannot outperform the conventional measurements because of the large loss of photons. Fortunately, we need not worry about quantum noise too much in most practical experiment except for in super-sensitivity experiment such as gravitational wave detection and even in this kind of experiment, weak measurements amplification is able to approach the quantum noise limitation \cite{hu3}. \section{Discussion and Conclusion} Although we only experimentally demonstrate amplification of polarization-dependent longitudinal phase, the general phase amplification can be readily realized by using Michelson interferometer suggested in Ref.\cite{hu,hu2}. This realization indicates that WMPA is capable of measuring any ultra-small phase signal with higher precision and sensitivity than conventional interferometers in practice. In conclusion, we have described and demonstrated a weak measurements amplification protocol i.e., WMPA that is capable of measuring any ultra-small longitudinal phase signal. The ultra-small phase introduced by LCVR is measured and one order of magnitude of amplification is realized. Larger amplification is possible if post-selected state is properly chosen. The WMPA would has higher precision and sensitivity than conventional interferometry if the quantum noise limitation is negligible, which is usually the case in practice. In addition, the precision of our scheme has potential to achieve the Heisenberg-limited precision scaling by using quantum resources such as squeezing\cite{co5}. Our results significantly broaden the area of applications of weak measurements and may play a crucial role in high precision measurements. \section{Acknowledgment} This work is supported by the National Natural Science Foundation of China (No. 92065113, 11904357, 62075208 and 12174367), National Key Research and Development Program of China (No. 2021YFE0113100). Meng-Jun Hu is supported by Beijing Academy of Quantum Information Sciences. \begin{thebibliography}{95} \makeatletter \providecommand \@ifxundefined [1]{ \@ifx{#1\undefined} } \providecommand \@ifnum [1]{ \ifnum #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \@ifx [1]{ \ifx #1\expandafter \@firstoftwo \else \expandafter \@secondoftwo \fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode `\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty \bibitem [{\citenamefont {Aharonov}\ \emph {et~al.}(1988)\citenamefont {Aharonov}, \citenamefont {Albert},\ and\ \citenamefont {Vaidman}}]{AAV} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}}, \bibinfo {author} {\bibfnamefont {D.~Z.}\ \bibnamefont {Albert}}, \ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href {\doibase 10.1103/PhysRevLett.60.1351} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {60}},\ \bibinfo {pages} {1351} (\bibinfo {year} {1988})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dressel}\ \emph {et~al.}(2014)\citenamefont {Dressel}, \citenamefont {Malik}, \citenamefont {Miatto}, \citenamefont {Jordan},\ and\ \citenamefont {Boyd}}]{rmp} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Dressel}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Malik}}, \bibinfo {author} {\bibfnamefont {F.~M.}\ \bibnamefont {Miatto}}, \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}}, \ and\ \bibinfo {author} {\bibfnamefont {R.~W.}\ \bibnamefont {Boyd}},\ }\href {\doibase 10.1103/RevModPhys.86.307} {\bibfield {journal} {\bibinfo {journal} {Rev. Mod. Phys.}\ }\textbf {\bibinfo {volume} {86}},\ \bibinfo {pages} {307} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jozsa}(2007)}]{Rotza} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Jozsa}},\ }\href {\doibase 10.1103/PhysRevA.76.044103} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {76}},\ \bibinfo {pages} {044103} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ritchie}\ \emph {et~al.}(1991)\citenamefont {Ritchie}, \citenamefont {Story},\ and\ \citenamefont {Hulet}}]{wv1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~W.~M.}\ \bibnamefont {Ritchie}}, \bibinfo {author} {\bibfnamefont {J.~G.}\ \bibnamefont {Story}}, \ and\ \bibinfo {author} {\bibfnamefont {R.~G.}\ \bibnamefont {Hulet}},\ }\href {\doibase 10.1103/PhysRevLett.66.1107} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {66}},\ \bibinfo {pages} {1107} (\bibinfo {year} {1991})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Johansen}\ and\ \citenamefont {Luis}(2004)}]{wv2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~M.}\ \bibnamefont {Johansen}}\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Luis}},\ }\href {\doibase 10.1103/PhysRevA.70.052115} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {70}},\ \bibinfo {pages} {052115} (\bibinfo {year} {2004})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aharonov}\ and\ \citenamefont {Botero}(2005)}]{wv3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}}\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Botero}},\ }\href {\doibase 10.1103/PhysRevA.72.052111} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {72}},\ \bibinfo {pages} {052111} (\bibinfo {year} {2005})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pryde}\ \emph {et~al.}(2005)\citenamefont {Pryde}, \citenamefont {O'Brien}, \citenamefont {White}, \citenamefont {Ralph},\ and\ \citenamefont {Wiseman}}]{wv4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~J.}\ \bibnamefont {Pryde}}, \bibinfo {author} {\bibfnamefont {J.~L.}\ \bibnamefont {O'Brien}}, \bibinfo {author} {\bibfnamefont {A.~G.}\ \bibnamefont {White}}, \bibinfo {author} {\bibfnamefont {T.~C.}\ \bibnamefont {Ralph}}, \ and\ \bibinfo {author} {\bibfnamefont {H.~M.}\ \bibnamefont {Wiseman}},\ }\href {\doibase 10.1103/PhysRevLett.94.220405} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {94}},\ \bibinfo {pages} {220405} (\bibinfo {year} {2005})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kedem}\ and\ \citenamefont {Vaidman}(2010)}]{wv5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kedem}}\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href {\doibase 10.1103/PhysRevLett.105.230401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {230401} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pusey}(2014)}]{wv6} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~F.}\ \bibnamefont {Pusey}},\ }\href {\doibase 10.1103/PhysRevLett.113.200401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {113}},\ \bibinfo {pages} {200401} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dressel}(2015)}]{wv7} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Dressel}},\ }\href {\doibase 10.1103/PhysRevA.91.032116} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo {pages} {032116} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Leggett}(1989)}]{de1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~J.}\ \bibnamefont {Leggett}},\ }\href {\doibase 10.1103/PhysRevLett.62.2325} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {62}},\ \bibinfo {pages} {2325} (\bibinfo {year} {1989})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aharonov}\ and\ \citenamefont {Vaidman}(1989)}]{de11} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}}\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href {\doibase 10.1103/PhysRevLett.62.2327} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {62}},\ \bibinfo {pages} {2327} (\bibinfo {year} {1989})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ferrie}\ and\ \citenamefont {Combes}(2014{\natexlab{a}})}]{de2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Ferrie}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Combes}},\ }\href {\doibase 10.1103/PhysRevLett.113.120404} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {113}},\ \bibinfo {pages} {120404} (\bibinfo {year} {2014}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Brodutch}(2015)}]{de21} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Brodutch}},\ }\href {\doibase 10.1103/PhysRevLett.114.118901} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {114}},\ \bibinfo {pages} {118901} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cohen}(2017)}]{de3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Cohen}},\ }\href {\doibase 10.1007/s10701-017-0107-2} {\bibfield {journal} {\bibinfo {journal} {Found. Phys.}\ }\textbf {\bibinfo {volume} {47}},\ \bibinfo {pages} {1261} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kastner}(2017)}]{de31} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Kastner}},\ }\href {\doibase 10.1007/s10701-017-0085-4} {\bibfield {journal} {\bibinfo {journal} {Found. Phys.}\ }\textbf {\bibinfo {volume} {47}},\ \bibinfo {pages} {697} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aharonov}\ \emph {et~al.}(2002)\citenamefont {Aharonov}, \citenamefont {Botero}, \citenamefont {Popescu}, \citenamefont {Reznik},\ and\ \citenamefont {Tollaksen}}]{pa1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Botero}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Popescu}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Reznik}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Tollaksen}},\ }\href {\doibase 10.1016/S0375-9601(02)00986-6} {\bibfield {journal} {\bibinfo {journal} {Phys. Lett. A}\ }\textbf {\bibinfo {volume} {301}},\ \bibinfo {pages} {130} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Resch}\ \emph {et~al.}(2003)\citenamefont {Resch}, \citenamefont {Lundeen},\ and\ \citenamefont {Steinberg}}]{pa2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Resch}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Lundeen}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Steinberg}},\ }\href {\doibase 10.1016/j.physleta.2004.02.042} {\bibfield {journal} {\bibinfo {journal} {Phys. Lett. A}\ }\textbf {\bibinfo {volume} {324}},\ \bibinfo {pages} {125} (\bibinfo {year} {2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lundeen}\ and\ \citenamefont {Steinberg}(2009)}]{pa3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}}\ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Steinberg}},\ }\href {\doibase 10.1103/PhysRevLett.102.020404} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {020404} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yokota}\ \emph {et~al.}(2009)\citenamefont {Yokota}, \citenamefont {Yamamoto}, \citenamefont {Koashi},\ and\ \citenamefont {Imoto}}]{pa4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Yokota}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Yamamoto}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Koashi}}, \ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Imoto}},\ }\href {\doibase 10.1088/1367-2630/11/3/033011} {\bibfield {journal} {\bibinfo {journal} {New J. Phys.}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {033011} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pan}(2020)}]{pa5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~K.}\ \bibnamefont {Pan}},\ }\href {\doibase 10.1103/PhysRevA.102.032206} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {032206} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lundeen}\ \emph {et~al.}(2011)\citenamefont {Lundeen}, \citenamefont {Sutherland}, \citenamefont {Patel}, \citenamefont {Stewart},\ and\ \citenamefont {Bamber}}]{tom1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Sutherland}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Patel}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Stewart}}, \ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Bamber}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {474}},\ \bibinfo {pages} {188} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lundeen}\ and\ \citenamefont {Bamber}(2012)}]{tom2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}}\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Bamber}},\ }\href {\doibase 10.1103/PhysRevLett.108.070402} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages} {070402} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Salvail}\ \emph {et~al.}(2013)\citenamefont {Salvail}, \citenamefont {Agnew}, \citenamefont {Johnson}, \citenamefont {Bolduc}, \citenamefont {Leach},\ and\ \citenamefont {Boyd}}]{tom3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~Z.}\ \bibnamefont {Salvail}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Agnew}}, \bibinfo {author} {\bibfnamefont {A.~S.}\ \bibnamefont {Johnson}}, \bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Bolduc}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Leach}}, \ and\ \bibinfo {author} {\bibfnamefont {R.~W.}\ \bibnamefont {Boyd}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nat. Photonics}\ }\textbf {\bibinfo {volume} {7}},\ \bibinfo {pages} {316} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Malik}\ \emph {et~al.}(2014)\citenamefont {Malik}, \citenamefont {Mirhosseini}, \citenamefont {Lavery}, \citenamefont {Leach}, \citenamefont {Padgett},\ and\ \citenamefont {Boyd}}]{tom4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Malik}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mirhosseini}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Lavery}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Leach}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Padgett}}, \ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Boyd}},\ }\href {\doibase 10.1038/ncomms4115} {\bibfield {journal} {\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {3115} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}(2013)}]{tom5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Wu}},\ }\href {\doibase 10.1038/srep01193} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {3}},\ \bibinfo {pages} {1193} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Thekkadath}\ \emph {et~al.}(2016)\citenamefont {Thekkadath}, \citenamefont {Giner}, \citenamefont {Chalich}, \citenamefont {Horton}, \citenamefont {Banker},\ and\ \citenamefont {Lundeen}}]{tom6} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~S.}\ \bibnamefont {Thekkadath}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Giner}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Chalich}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Horton}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Banker}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}},\ }\href {\doibase 10.1103/PhysRevLett.117.120401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {117}},\ \bibinfo {pages} {120401} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kim}\ \emph {et~al.}(2018)\citenamefont {Kim}, \citenamefont {Kim}, \citenamefont {Lee}, \citenamefont {Han}, \citenamefont {Moon}, \citenamefont {Kim},\ and\ \citenamefont {Cho}}]{tom7} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Lee}}, \bibinfo {author} {\bibfnamefont {S.~W.}\ \bibnamefont {Han}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Moon}}, \bibinfo {author} {\bibfnamefont {Y.~H.}\ \bibnamefont {Kim}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.~W.}\ \bibnamefont {Cho}},\ }\href {\doibase 10.1038/s41467-017-02511-2} {\bibfield {journal} {\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {9}},\ \bibinfo {pages} {192} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Shojaee}\ \emph {et~al.}(2018)\citenamefont {Shojaee}, \citenamefont {Jackson}, \citenamefont {Riofr\'{\i}o}, \citenamefont {Kalev},\ and\ \citenamefont {Deutsch}}]{tom8} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Shojaee}}, \bibinfo {author} {\bibfnamefont {C.~S.}\ \bibnamefont {Jackson}}, \bibinfo {author} {\bibfnamefont {C.~A.}\ \bibnamefont {Riofr\'{\i}o}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kalev}}, \ and\ \bibinfo {author} {\bibfnamefont {I.~H.}\ \bibnamefont {Deutsch}},\ }\href {\doibase 10.1103/PhysRevLett.121.130404} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {121}},\ \bibinfo {pages} {130404} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pan}\ \emph {et~al.}(2019)\citenamefont {Pan}, \citenamefont {Xu}, \citenamefont {Kedem}, \citenamefont {Wang}, \citenamefont {Chen}, \citenamefont {Jan}, \citenamefont {Sun}, \citenamefont {Xu}, \citenamefont {Han}, \citenamefont {Li},\ and\ \citenamefont {Guo}}]{tom9} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {W.~W.}\ \bibnamefont {Pan}}, \bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kedem}}, \bibinfo {author} {\bibfnamefont {Q.~Q.}\ \bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Jan}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Sun}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Y.~J.}\ \bibnamefont {Han}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \ and\ \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Guo}},\ }\href {\doibase 10.1103/PhysRevLett.123.150402} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {123}},\ \bibinfo {pages} {150402} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xu}\ \emph {et~al.}(2021)\citenamefont {Xu}, \citenamefont {Xu}, \citenamefont {Jiang}, \citenamefont {Xu}, \citenamefont {Zheng}, \citenamefont {Wang}, \citenamefont {Zhang},\ and\ \citenamefont {Zhang}}]{tom10} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Zheng}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Zhang}}, \ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Zhang}},\ }\href {\doibase 10.1103/PhysRevLett.127.180401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {127}},\ \bibinfo {pages} {180401} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hosten}\ and\ \citenamefont {Kwiat}(2008)}]{am1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Hosten}}\ and\ \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Kwiat}},\ }\href {\doibase 10.1126/science.1152697} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {319}},\ \bibinfo {pages} {787} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bai}\ \emph {et~al.}(2020)\citenamefont {Bai}, \citenamefont {Liu}, \citenamefont {Tang}, \citenamefont {Zang}, \citenamefont {Li}, \citenamefont {Lu}, \citenamefont {Shi}, \citenamefont {Sun},\ and\ \citenamefont {Lu}}]{am5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Bai}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Tang}}, \bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Zang}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Lu}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Shi}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Sun}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Lu}},\ }\href {\doibase 10.1364/OE.392402} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {15284} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Luo}\ \emph {et~al.}(2020{\natexlab{a}})\citenamefont {Luo}, \citenamefont {He}, \citenamefont {Liu}, \citenamefont {Li}, \citenamefont {Duan},\ and\ \citenamefont {Zhang}}]{am8} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Luo}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {He}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Duan}}, \ and\ \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Zhang}},\ }\href {\doibase 10.1364/OE.386017} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {6408} (\bibinfo {year} {2020}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}\ \emph {et~al.}(2022)\citenamefont {Wu}, \citenamefont {Liu}, \citenamefont {Chen}, \citenamefont {Luo},\ and\ \citenamefont {Wen}}]{am11} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Luo}}, \ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Wen}},\ }\href {\doibase 10.1364/OL.450039} {\bibfield {journal} {\bibinfo {journal} {Opt. Lett.}\ }\textbf {\bibinfo {volume} {47}},\ \bibinfo {pages} {846} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Dixon}\ \emph {et~al.}(2009)\citenamefont {Dixon}, \citenamefont {Starling}, \citenamefont {Jordan},\ and\ \citenamefont {Howell}}]{am2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~B.}\ \bibnamefont {Dixon}}, \bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Starling}}, \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Howell}},\ }\href {\doibase 10.1103/PhysRevLett.102.173601} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {173601} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xu}\ \emph {et~al.}(2013)\citenamefont {Xu}, \citenamefont {Kedem}, \citenamefont {Sun}, \citenamefont {Vaidman}, \citenamefont {Li},\ and\ \citenamefont {Guo}}]{am3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kedem}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Sun}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \ and\ \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Guo}},\ }\href {\doibase 10.1103/PhysRevLett.111.033604} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {111}},\ \bibinfo {pages} {033604} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Santana}\ \emph {et~al.}(2016)\citenamefont {Santana}, \citenamefont {Alves~de Carvalho}, \citenamefont {De~Leo},\ and\ \citenamefont {Araujo}}]{am4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.}~\bibnamefont {Santana}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Alves~de Carvalho}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {De~Leo}}, \ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Araujo}},\ }\href {\doibase 10.1364/OL.41.003884} {\bibfield {journal} {\bibinfo {journal} {Opt. Lett.}\ }\textbf {\bibinfo {volume} {41}},\ \bibinfo {pages} {3884} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Tang}\ \emph {et~al.}(2019)\citenamefont {Tang}, \citenamefont {Li}, \citenamefont {Luo}, \citenamefont {Shen}, \citenamefont {Li}, \citenamefont {Qin}, \citenamefont {Bi},\ and\ \citenamefont {Hou}}]{am6} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Tang}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Luo}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Shen}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Qin}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Bi}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Hou}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {27}},\ \bibinfo {pages} {17638} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Li}\ \emph {et~al.}(2021)\citenamefont {Li}, \citenamefont {Chen}, \citenamefont {Xie}, \citenamefont {Liao}, \citenamefont {Zhou}, \citenamefont {Chen},\ and\ \citenamefont {Lin}}]{am7} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Xie}}, \bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Liao}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Chen}}, \ and\ \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Lin}},\ }\href {\doibase 10.1364/OE.420432} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo {pages} {8777} (\bibinfo {year} {2021})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Luo}\ \emph {et~al.}(2020{\natexlab{b}})\citenamefont {Luo}, \citenamefont {Yang}, \citenamefont {Wang}, \citenamefont {Yu}, \citenamefont {Wu}, \citenamefont {Chang}, \citenamefont {Wu},\ and\ \citenamefont {Cui}}]{am9} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Luo}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Yu}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Wu}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Chang}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Wu}}, \ and\ \bibinfo {author} {\bibfnamefont {H.-L.}\ \bibnamefont {Cui}},\ }\href {\doibase 10.1364/OE.400373} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {25935} (\bibinfo {year} {2020}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Steinmetz}\ \emph {et~al.}(2022)\citenamefont {Steinmetz}, \citenamefont {Lyons}, \citenamefont {Song}, \citenamefont {Cardenas},\ and\ \citenamefont {Jordan}}]{am10} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Steinmetz}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Lyons}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Song}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Cardenas}}, \ and\ \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}},\ }\href {\doibase 10.1364/OE.444216} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {30}},\ \bibinfo {pages} {3700} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Huang}\ \emph {et~al.}(2022)\citenamefont {Huang}, \citenamefont {He}, \citenamefont {Duan}, \citenamefont {Wang},\ and\ \citenamefont {Hu}}]{am12} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~H.}\ \bibnamefont {Huang}}, \bibinfo {author} {\bibfnamefont {F.~F.}\ \bibnamefont {He}}, \bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Duan}}, \bibinfo {author} {\bibfnamefont {G.~J.}\ \bibnamefont {Wang}}, \ and\ \bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Hu}},\ }\href {\doibase 10.1103/PhysRevA.105.013718} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {013718} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pal}\ \emph {et~al.}(2019)\citenamefont {Pal}, \citenamefont {Saha}, \citenamefont {B~S}, \citenamefont {Dutta~Gupta},\ and\ \citenamefont {Ghosh}}]{am13} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Pal}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Saha}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {B~S}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Dutta~Gupta}}, \ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Ghosh}},\ }\href {\doibase 10.1103/PhysRevA.99.032123} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {032123} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wiseman}(2002)}]{f1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~M.}\ \bibnamefont {Wiseman}},\ }\href {\doibase 10.1103/PhysRevA.65.032111} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {65}},\ \bibinfo {pages} {032111} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Goggin}\ \emph {et~al.}(2011)\citenamefont {Goggin}, \citenamefont {Almeida}, \citenamefont {Barbieri}, \citenamefont {Lanyon}, \citenamefont {O'Brien}, \citenamefont {White},\ and\ \citenamefont {Pryde}}]{f2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Goggin}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Almeida}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Barbieri}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Lanyon}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {O'Brien}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {White}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Pryde}},\ }\href {\doibase 10.1073/pnas.1005774108} {\bibfield {journal} {\bibinfo {journal} {PNAS.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages} {1256} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Piacentini}\ \emph {et~al.}(2016)\citenamefont {Piacentini}, \citenamefont {Avella}, \citenamefont {Levi}, \citenamefont {Lussana}, \citenamefont {Villa}, \citenamefont {Tosi}, \citenamefont {Zappa}, \citenamefont {Gramegna}, \citenamefont {Brida}, \citenamefont {Degiovanni},\ and\ \citenamefont {Genovese}}]{f3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Piacentini}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Avella}}, \bibinfo {author} {\bibfnamefont {M.~P.}\ \bibnamefont {Levi}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Lussana}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Villa}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Tosi}}, \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Zappa}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Gramegna}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Brida}}, \bibinfo {author} {\bibfnamefont {I.~P.}\ \bibnamefont {Degiovanni}}, \ and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Genovese}},\ }\href {\doibase 10.1103/PhysRevLett.116.180401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {116}},\ \bibinfo {pages} {180401} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kocsis}\ \emph {et~al.}(2011)\citenamefont {Kocsis}, \citenamefont {Braverman}, \citenamefont {Ravets}, \citenamefont {Stevens}, \citenamefont {Mirin}, \citenamefont {Shalm},\ and\ \citenamefont {Steinberg}}]{f4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Kocsis}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Braverman}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Ravets}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Stevens}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Mirin}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Shalm}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Steinberg}},\ }\href {\doibase 10.1126/science.1202218} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo {volume} {332}},\ \bibinfo {pages} {1170} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ochoa}\ \emph {et~al.}(2018)\citenamefont {Ochoa}, \citenamefont {Belzig},\ and\ \citenamefont {Nitzan}}]{f2018} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Ochoa}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Belzig}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Nitzan}},\ }\href {\doibase 10.1038/s41598-018-33562-0} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo {pages} {15781} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Quach}(2019)}]{f5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~Q.}\ \bibnamefont {Quach}},\ }\href {\doibase 10.1103/PhysRevA.100.052117} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {100}},\ \bibinfo {pages} {052117} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cho}\ \emph {et~al.}(2019)\citenamefont {Cho}, \citenamefont {Kim}, \citenamefont {Choi}, \citenamefont {Kim}, \citenamefont {Han}, \citenamefont {Lee}, \citenamefont {Moon},\ and\ \citenamefont {Kim}}]{f6} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.~W.}\ \bibnamefont {Cho}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {Y.~H.}\ \bibnamefont {Choi}}, \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {S.~W.}\ \bibnamefont {Han}}, \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Lee}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Moon}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.~H.}\ \bibnamefont {Kim}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nat. Phys.}\ }\textbf {\bibinfo {volume} {15}} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2019)\citenamefont {Liu}, \citenamefont {Mart\'{\i}nez-Rinc\'on},\ and\ \citenamefont {Howell}}]{f7} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {W.-T.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Mart\'{\i}nez-Rinc\'on}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Howell}},\ }\href {\doibase 10.1103/PhysRevA.100.012125} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {100}},\ \bibinfo {pages} {012125} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Guchhait}\ \emph {et~al.}(2020)\citenamefont {Guchhait}, \citenamefont {B~S}, \citenamefont {Modak}, \citenamefont {Nayak}, \citenamefont {Panda}, \citenamefont {Pal},\ and\ \citenamefont {Ghosh}}]{f8} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Guchhait}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {B~S}}, \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Modak}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Nayak}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Panda}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Pal}}, \ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Ghosh}},\ }\href {\doibase 10.1038/s41598-020-68126-8} {\bibfield {journal} {\bibinfo {journal} {Sci. Rep.}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo {pages} {11464} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yu}\ \emph {et~al.}(2020)\citenamefont {Yu}, \citenamefont {Meng}, \citenamefont {Tang}, \citenamefont {Xu}, \citenamefont {Wang}, \citenamefont {Yin}, \citenamefont {Ke}, \citenamefont {Liu}, \citenamefont {Li}, \citenamefont {Yang}, \citenamefont {Chen}, \citenamefont {Han}, \citenamefont {Li},\ and\ \citenamefont {Guo}}]{f9} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Yu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Meng}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Tang}}, \bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Y.~T.}\ \bibnamefont {Wang}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Yin}}, \bibinfo {author} {\bibfnamefont {Z.~J.}\ \bibnamefont {Ke}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {Z.~P.}\ \bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {Y.~Z.}\ \bibnamefont {Yang}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {Y.~J.}\ \bibnamefont {Han}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \ and\ \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Guo}},\ }\href {\doibase 10.1103/PhysRevLett.125.240506} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {125}},\ \bibinfo {pages} {240506} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wu}\ and\ \citenamefont {Li}(2011)}]{lim1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Wu}}\ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}},\ }\href {\doibase 10.1103/PhysRevA.83.052106} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {83}},\ \bibinfo {pages} {052106} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhu}\ \emph {et~al.}(2011)\citenamefont {Zhu}, \citenamefont {Zhang}, \citenamefont {Pang}, \citenamefont {Qiao}, \citenamefont {Liu},\ and\ \citenamefont {Wu}}]{lim2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Zhu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pang}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Qiao}}, \bibinfo {author} {\bibfnamefont {Q.}~\bibnamefont {Liu}}, \ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Wu}},\ }\href {\doibase 10.1103/PhysRevA.84.052111} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {84}},\ \bibinfo {pages} {052111} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Nakamura}\ \emph {et~al.}(2012)\citenamefont {Nakamura}, \citenamefont {Nishizawa},\ and\ \citenamefont {Fujimoto}}]{lim3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Nakamura}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Nishizawa}}, \ and\ \bibinfo {author} {\bibfnamefont {M.-K.}\ \bibnamefont {Fujimoto}},\ }\href {\doibase 10.1103/PhysRevA.85.012113} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {85}},\ \bibinfo {pages} {012113} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kofman}\ \emph {et~al.}(2012)\citenamefont {Kofman}, \citenamefont {Ashhab},\ and\ \citenamefont {Nori}}]{lim4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Kofman}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Ashhab}}, \ and\ \bibinfo {author} {\bibfnamefont {F.}~\bibnamefont {Nori}},\ }\href {\doibase 10.1016/j.physrep.2012.07.001} {\bibfield {journal} {\bibinfo {journal} {Phys. Rep.}\ }\textbf {\bibinfo {volume} {520}},\ \bibinfo {pages} {43} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Aharonov}\ and\ \citenamefont {Vaidman}(1990)}]{sa} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Aharonov}}\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href {\doibase 10.1103/PhysRevA.41.11} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {41}},\ \bibinfo {pages} {11} (\bibinfo {year} {1990})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Maga\~na Loaiza}\ \emph {et~al.}(2014)\citenamefont {Maga\~na Loaiza}, \citenamefont {Mirhosseini}, \citenamefont {Rodenburg},\ and\ \citenamefont {Boyd}}]{an} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {O.~S.}\ \bibnamefont {Maga\~na Loaiza}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mirhosseini}}, \bibinfo {author} {\bibfnamefont {B.}~\bibnamefont {Rodenburg}}, \ and\ \bibinfo {author} {\bibfnamefont {R.~W.}\ \bibnamefont {Boyd}},\ }\href {\doibase 10.1103/PhysRevLett.112.200401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo {pages} {200401} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhou}\ \emph {et~al.}(2020)\citenamefont {Zhou}, \citenamefont {Zhong}, \citenamefont {Ma}, \citenamefont {Xu}, \citenamefont {Shi},\ and\ \citenamefont {He}}]{an1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Zhong}}, \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont {Ma}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Shi}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {He}},\ }\href {\doibase 10.1103/PhysRevA.102.063717} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo {pages} {063717} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Feizpour}\ \emph {et~al.}(2011{\natexlab{a}})\citenamefont {Feizpour}, \citenamefont {Xing},\ and\ \citenamefont {Steinberg}}]{non1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Feizpour}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Xing}}, \ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Steinberg}},\ }\href {\doibase 10.1103/PhysRevLett.107.133603} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {107}},\ \bibinfo {pages} {133603} (\bibinfo {year} {2011}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hallaji}\ \emph {et~al.}(2017)\citenamefont {Hallaji}, \citenamefont {Feizpour}, \citenamefont {Dmochowski}, \citenamefont {Sinclair},\ and\ \citenamefont {Steinberg}}]{non2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Hallaji}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Feizpour}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Dmochowski}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Sinclair}}, \ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Steinberg}},\ }\href {\doibase 10.1038/nphys4040} {\bibfield {journal} {\bibinfo {journal} {Nat. Phys.}\ }\textbf {\bibinfo {volume} {13}},\ \bibinfo {pages} {540} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Starling}\ \emph {et~al.}(2010)\citenamefont {Starling}, \citenamefont {Dixon}, \citenamefont {Jordan},\ and\ \citenamefont {Howell}}]{fre} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont {Starling}}, \bibinfo {author} {\bibfnamefont {P.~B.}\ \bibnamefont {Dixon}}, \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Howell}},\ }\href {\doibase 10.1103/PhysRevA.82.063822} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {82}},\ \bibinfo {pages} {063822} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Combes}\ \emph {et~al.}(2014)\citenamefont {Combes}, \citenamefont {Ferrie}, \citenamefont {Jiang},\ and\ \citenamefont {Caves}}]{post1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Combes}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Ferrie}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Jiang}}, \ and\ \bibinfo {author} {\bibfnamefont {C.~M.}\ \bibnamefont {Caves}},\ }\href {\doibase 10.1103/PhysRevA.89.052117} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo {pages} {052117} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ferrie}\ and\ \citenamefont {Combes}(2014{\natexlab{b}})}]{co1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Ferrie}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Combes}},\ }\href {\doibase 10.1103/PhysRevLett.112.040406} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {112}},\ \bibinfo {pages} {040406} (\bibinfo {year} {2014}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Vaidman}(2014)}]{co2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Vaidman}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1402.0199}\ } (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kedem}(2014)}]{co21} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kedem}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1402.1352}\ } (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Ferrie}\ and\ \citenamefont {Combes}(2014{\natexlab{c}})}]{co22} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Ferrie}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Combes}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1402.2954}\ } (\bibinfo {year} {2014}{\natexlab{c}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhang}\ \emph {et~al.}(2015)\citenamefont {Zhang}, \citenamefont {Datta},\ and\ \citenamefont {Walmsley}}]{co3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Datta}}, \ and\ \bibinfo {author} {\bibfnamefont {I.~A.}\ \bibnamefont {Walmsley}},\ }\href {\doibase 10.1103/PhysRevLett.114.210801} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {114}},\ \bibinfo {pages} {210801} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Knee}\ and\ \citenamefont {Gauger}(2014)}]{co4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Knee}}\ and\ \bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont {Gauger}},\ }\href {\doibase 10.1103/PhysRevX.4.011032} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo {pages} {011032} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pang}\ and\ \citenamefont {Brun}(2015{\natexlab{a}})}]{co5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pang}}\ and\ \bibinfo {author} {\bibfnamefont {T.~A.}\ \bibnamefont {Brun}},\ }\href {\doibase 10.1103/PhysRevLett.115.120401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {115}},\ \bibinfo {pages} {120401} (\bibinfo {year} {2015}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kedem}(2012)}]{technoise1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kedem}},\ }\href {\doibase 10.1103/PhysRevA.85.060102} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {85}},\ \bibinfo {pages} {060102} (\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Viza}\ \emph {et~al.}(2015)\citenamefont {Viza}, \citenamefont {Mart'{\i}nez-Rinc'on}, \citenamefont {Alves}, \citenamefont {Jordan},\ and\ \citenamefont {Howell}}]{technoise3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~I.}\ \bibnamefont {Viza}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Mart'{\i}nez-Rinc'on}}, \bibinfo {author} {\bibfnamefont {G.~B.}\ \bibnamefont {Alves}}, \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Howell}},\ }\href {\doibase 10.1103/PhysRevA.92.032127} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {032127} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Feizpour}\ \emph {et~al.}(2011{\natexlab{b}})\citenamefont {Feizpour}, \citenamefont {Xing},\ and\ \citenamefont {Steinberg}}]{temnoise1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Feizpour}}, \bibinfo {author} {\bibfnamefont {X.}~\bibnamefont {Xing}}, \ and\ \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Steinberg}},\ }\href {\doibase 10.1103/PhysRevLett.107.133603} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {107}},\ \bibinfo {pages} {133603} (\bibinfo {year} {2011}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sinclair}\ \emph {et~al.}(2017)\citenamefont {Sinclair}, \citenamefont {Hallaji}, \citenamefont {Steinberg}, \citenamefont {Tollaksen},\ and\ \citenamefont {Jordan}}]{temnoise2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Sinclair}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Hallaji}}, \bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont {Steinberg}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Tollaksen}}, \ and\ \bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}},\ }\href {\doibase 10.1103/PhysRevA.96.052128} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {96}},\ \bibinfo {pages} {052128} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jordan}\ \emph {et~al.}(2014)\citenamefont {Jordan}, \citenamefont {Mart\'{\i}nez-Rinc\'on},\ and\ \citenamefont {Howell}}]{tec2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~N.}\ \bibnamefont {Jordan}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Mart\'{\i}nez-Rinc\'on}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\ \bibnamefont {Howell}},\ }\href {\doibase 10.1103/PhysRevX.4.011031} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. X}\ }\textbf {\bibinfo {volume} {4}},\ \bibinfo {pages} {011031} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Harris}\ \emph {et~al.}(2017)\citenamefont {Harris}, \citenamefont {Boyd},\ and\ \citenamefont {Lundeen}}]{tec3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Harris}}, \bibinfo {author} {\bibfnamefont {R.~W.}\ \bibnamefont {Boyd}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}},\ }\href {\doibase 10.1103/PhysRevLett.118.070802} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {118}},\ \bibinfo {pages} {070802} (\bibinfo {year} {2017})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Xu}\ \emph {et~al.}(2020)\citenamefont {Xu}, \citenamefont {Liu}, \citenamefont {Datta}, \citenamefont {Knee}, \citenamefont {Lundeen}, \citenamefont {Lu},\ and\ \citenamefont {Zhang}}]{detsat2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Datta}}, \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Knee}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Lundeen}}, \bibinfo {author} {\bibfnamefont {Y.~Q.}\ \bibnamefont {Lu}}, \ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Zhang}},\ }\href {\doibase 10.1103/PhysRevLett.125.080501} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {125}},\ \bibinfo {pages} {080501} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Arvidsson-Shukur}\ \emph {et~al.}(2020)\citenamefont {Arvidsson-Shukur}, \citenamefont {Halpern}, \citenamefont {Lepage}, \citenamefont {Lasek}, \citenamefont {Barnes},\ and\ \citenamefont {Lloyd}}]{tec4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Arvidsson-Shukur}}, \bibinfo {author} {\bibfnamefont {N.~Y.}\ \bibnamefont {Halpern}}, \bibinfo {author} {\bibfnamefont {H.~V.}\ \bibnamefont {Lepage}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont {Lasek}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Barnes}}, \ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {3775} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pang}\ \emph {et~al.}(2014)\citenamefont {Pang}, \citenamefont {Dressel},\ and\ \citenamefont {Brun}}]{enta1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pang}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Dressel}}, \ and\ \bibinfo {author} {\bibfnamefont {T.~A.}\ \bibnamefont {Brun}},\ }\href {\doibase 10.1103/PhysRevLett.113.030401} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {113}},\ \bibinfo {pages} {030401} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Pang}\ and\ \citenamefont {Brun}(2015{\natexlab{b}})}]{enta2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Pang}}\ and\ \bibinfo {author} {\bibfnamefont {T.~A.}\ \bibnamefont {Brun}},\ }\href {\doibase 10.1103/PhysRevA.92.012120} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {92}},\ \bibinfo {pages} {012120} (\bibinfo {year} {2015}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Chen}\ \emph {et~al.}(2019)\citenamefont {Chen}, \citenamefont {Liu}, \citenamefont {Hu}, \citenamefont {Hu}, \citenamefont {Li}, \citenamefont {Guo},\ and\ \citenamefont {Zhang}}]{entae1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {B.~H.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Hu}}, \bibinfo {author} {\bibfnamefont {X.~M.}\ \bibnamefont {Hu}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Guo}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Zhang}},\ }\href {\doibase 10.1103/PhysRevA.99.032120} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {032120} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Stárek}\ \emph {et~al.}(2020)\citenamefont {Stárek}, \citenamefont {Mičuda}, \citenamefont {Hošák}, \citenamefont {Jezek},\ and\ \citenamefont {Fiurásek}}]{entae2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Stárek}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mičuda}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Hošák}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Jezek}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Fiurásek}},\ }\href {\doibase 10.1364/OE.403711} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {34639} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kim}\ \emph {et~al.}(2022)\citenamefont {Kim}, \citenamefont {Yoo},\ and\ \citenamefont {Kim}}]{hei1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Kim}}, \bibinfo {author} {\bibfnamefont {S.~Y.}\ \bibnamefont {Yoo}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.~H.}\ \bibnamefont {Kim}},\ }\href {\doibase 10.1103/PhysRevLett.128.040503} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {128}},\ \bibinfo {pages} {040503} (\bibinfo {year} {2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zhang}\ \emph {et~al.}(2016)\citenamefont {Zhang}, \citenamefont {Chen}, \citenamefont {Xu}, \citenamefont {Tang}, \citenamefont {Zhang}, \citenamefont {Han}, \citenamefont {Li},\ and\ \citenamefont {Guo}}]{lon7} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {Z.~H.}\ \bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Chen}}, \bibinfo {author} {\bibfnamefont {X.~Y.}\ \bibnamefont {Xu}}, \bibinfo {author} {\bibfnamefont {J.~S.}\ \bibnamefont {Tang}}, \bibinfo {author} {\bibfnamefont {W.~H.}\ \bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {Y.~J.}\ \bibnamefont {Han}}, \bibinfo {author} {\bibfnamefont {C.~F.}\ \bibnamefont {Li}}, \ and\ \bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Guo}},\ }\href {\doibase 10.1103/PhysRevA.94.053843} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {94}},\ \bibinfo {pages} {053843} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Huang}\ \emph {et~al.}(2019)\citenamefont {Huang}, \citenamefont {Li}, \citenamefont {Fang}, \citenamefont {Li},\ and\ \citenamefont {Zeng}}]{lon6} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Huang}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Fang}}, \bibinfo {author} {\bibfnamefont {H.}~\bibnamefont {Li}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Zeng}},\ }\href {\doibase 10.1103/PhysRevA.100.012109} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {100}},\ \bibinfo {pages} {012109} (\bibinfo {year} {2019})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Li}\ \emph {et~al.}(2018)\citenamefont {Li}, \citenamefont {Li}, \citenamefont {Zhang}, \citenamefont {Yu}, \citenamefont {Lu}, \citenamefont {Liu}, \citenamefont {Zhang},\ and\ \citenamefont {Pan}}]{lon4} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Li}}, \bibinfo {author} {\bibfnamefont {Y.~L.}\ \bibnamefont {Zhang}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Yu}}, \bibinfo {author} {\bibfnamefont {C.~Y.}\ \bibnamefont {Lu}}, \bibinfo {author} {\bibfnamefont {N.~L.}\ \bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Zhang}}, \ and\ \bibinfo {author} {\bibfnamefont {J.~W.}\ \bibnamefont {Pan}},\ }\href {\doibase 10.1103/PhysRevA.97.033851} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {97}},\ \bibinfo {pages} {033851} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {St\'{a}rek}\ \emph {et~al.}(2020)\citenamefont {St\'{a}rek}, \citenamefont {Mi\v{c}uda}, \citenamefont {Ho\v{s}\'{a}k}, \citenamefont {Je\v{z}ek},\ and\ \citenamefont {Fiur\'{a}\v{s}ek}}]{lon5} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {St\'{a}rek}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mi\v{c}uda}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Ho\v{s}\'{a}k}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Je\v{z}ek}}, \ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Fiur\'{a}\v{s}ek}},\ }\href {\doibase 10.1364/OE.403711} {\bibfield {journal} {\bibinfo {journal} {Opt. Express}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo {pages} {34639} (\bibinfo {year} {2020})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Brunner}\ and\ \citenamefont {Simon}(2010)}]{lon1} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Brunner}}\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Simon}},\ }\href {\doibase 10.1103/PhysRevLett.105.010405} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {105}},\ \bibinfo {pages} {010405} (\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Str\"ubi}\ and\ \citenamefont {Bruder}(2013)}]{lon3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.}~\bibnamefont {Str\"ubi}}\ and\ \bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Bruder}},\ }\href {\doibase 10.1103/PhysRevLett.110.083605} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {110}},\ \bibinfo {pages} {083605} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hu}\ and\ \citenamefont {Zhang}(2017{\natexlab{a}})}]{hu} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Hu}}\ and\ \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Zhang}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1707.00886}\ } (\bibinfo {year} {2017}{\natexlab{a}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hu}\ and\ \citenamefont {Zhang}(2017{\natexlab{b}})}]{hu2} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Hu}}\ and\ \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Zhang}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1709.01218}\ } (\bibinfo {year} {2017}{\natexlab{b}})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Knee}\ \emph {et~al.}(2013)\citenamefont {Knee}, \citenamefont {Briggs}, \citenamefont {Benjamin},\ and\ \citenamefont {Gauger}}]{knee} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~C.}\ \bibnamefont {Knee}}, \bibinfo {author} {\bibfnamefont {G.~A.~D.}\ \bibnamefont {Briggs}}, \bibinfo {author} {\bibfnamefont {S.~C.}\ \bibnamefont {Benjamin}}, \ and\ \bibinfo {author} {\bibfnamefont {E.~M.}\ \bibnamefont {Gauger}},\ }\href {\doibase 10.1103/PhysRevA.87.012115} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. A}\ }\textbf {\bibinfo {volume} {87}},\ \bibinfo {pages} {012115} (\bibinfo {year} {2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hu}\ \emph {et~al.}(2021)\citenamefont {Hu}, \citenamefont {Zha},\ and\ \citenamefont {Zhang}}]{hu3} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont {Hu}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Zha}}, \ and\ \bibinfo {author} {\bibfnamefont {Y.~S.}\ \bibnamefont {Zhang}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:2007.03978}\ } (\bibinfo {year} {2021})}\BibitemShut {NoStop} \end{thebibliography} \end{document}
\begin{document} \begin{abstract} A descent of a labeled digraph is a directed edge $(s, t)$ with $s > t$. We count strong tournaments, strong digraphs, acyclic digraphs, and forests by descents and edges. To count strong tournaments we use Eulerian generating functions and to count strong and acyclic digraphs we use a new type of generating function that we call a graphic Eulerian generating function. \end{abstract} \keywords{acyclic digraph, strong digraph, strong tournament, descent, Eulerian generating function, graphic generating function} \title{Counting acyclic and strong digraphs by descents} \section{Introduction} A \emph{digraph} $D$ consists of a finite vertex set $V$ together with a subset $E$ of $V\times V - \{\,(v,v): v\in V\,\}$, the set of edges of $D$. (We do not allow loops in our digraphs.) We call a digraph with vertex set $V$ a digraph \emph{on} $V$. We assume that the vertices of our digraph are totally ordered, and for simplicity we take them to be integers. A \emph{descent} of a digraph is an edge $(s,t)$ with $s>t$ and an \emph{ascent} is an edge $(s,t)$ with $s<t$. In this paper we count two important classes of digraphs, acyclic and strong, by edges and descents, generalizing the results of Robinson \cite{racyclic}. We also count strong tournaments and forests by descents. A digraph is \emph{weakly connected} (or simply \emph{weak}) if its underlying graph is connected, and is \emph{strongly connected} (or simply \emph{strong}) if for every two vertices $u$ and $v$ there is a directed path from $u$ to $v$ (allowing the empty path if $u=v$). The \emph{weak} and \emph{strong components} of a digraph are the maximal weakly or strongly connected subgraphs. Note that every edge is contained in a weak component but there may be edges not contained in any strong component. A \emph{source strong component} of a digraph is a strong component with no edges entering it from outside the component. (Robinson \cite{racyclic,rstrong} calls these \emph{out-components}.) We use the notation $[n]$ to denote the set $\{1,2,\dots,n\}$. Given a digraph $D$ on $[n]$, we denote by $\e(D)$ the total number of edges of $D$, and by $\des(D)$ the number of descents of $D$. We define the \emph{descent polynomial} for a family of digraphs $\mathscr{B}_n$ on $[n]$ to be $$b_n(u) = \sum_{D \in \mathscr{B}_n} u^{\des(D)}.$$ The coefficient of $u^k$ in $b_n(u)$ is the number of digraphs in $\mathscr{B}_n$ with exactly $k$ descents. Similarly, the \emph{descent-edge polynomial} $b_n(u,y)$ of $\mathscr{B}_n$ is defined as $$b_n(u,y) = \sum_{D \in \mathscr{B}_n} u^{\des(D)}y^{\e(D)}.$$ The coefficient of $u^k y^m$ in $b_n(u,y)$ is the number of digraphs in $\mathscr{B}_n$ with exactly $m$ edges, $k$ of which are descents. Note that $b_n(u) = b_n(u,1)$. This paper is organized as follows. In Section~\ref{sec:digraphs}, we introduce several families of graphs and give known formulas for enumerating these families. In Section~\ref{sec:strong}, we enumerate strong tournaments by the number of descents and enumerate strong digraphs by both the number of descents and edges. In Section~\ref{sec:acyclic}, we enumerate acyclic digraphs by the number of edges and descents; we also derive a formula for rooted trees and forests with a given number of descents and leaves. \section{Families of digraphs}\label{sec:digraphs} We are concerned primarily with four types of digraphs: strong tournaments, strong digraphs, acyclic digraphs, and trees. \subsection{Strong tournaments} A digraph is a \emph{tournament} if there is exactly one directed edge between each pair of vertices. There are $2^{\binom{n}{2}}$ tournaments on $[n]$ since for any two vertices $u$ and $v$, a tournament contains the edge $(u,v)$ or $(v,u)$ but not both. In \cite{mm}, Moon and Moser found a formula for the probability that a randomly chosen tournament is strongly connected. Equivalently, they showed that the number $t_n$ of strong tournaments on $n \ge 1$ labeled vertices is given by the recurrence \begin{equation} \label{e-mm1} t_n= 2^{\binom{n}{2}} - \sum_{k=1}^{n-1} \binom{n}{k}2^{\binom{n-k}{2}} t_k. \end{equation} This recurrence is equivalent to the generating function relation \begin{equation*} \sum_{n=1}^\infty t_n \frac{x^n}{n!} = 1-\biggl(\sum_{n=0}^\infty 2^{\binom n2}\frac{x^n}{n!}\biggr)^{-1}. \end{equation*} The first few values of $t_n$ are $t_1=1, t_2=0, t_3=2, t_4=24, t_5=544,$ and $t_6=22320$. In Section \ref{sec:strong tournaments} we generalize these formulas to count strong tournaments by descents, replacing the exponential generating functions with Eulerian generating functions. \subsection{Strong digraphs} Strong digraphs were first counted by Liskovets \cite{L69}, using a system of recurrences. Liskovets's recurrences were simplified by Wright \cite{wright}, who showed that the number $s_n$ of strong digraphs on $[n]$ is given by \begin{equation} \label{e-wright1} s_n = \eta_n + \sum_{k=1}^{n-1} \binom{n-1}{k-1} s_k\eta_{n-k}, \end{equation} where \begin{equation} \label{e-wright2} \eta_n = 2^{n(n-1)} - \sum_{k=1}^{n-1} \binom{n}{k}2^{(n-1)(n-k)} \eta_k. \end{equation} The first few values of $s_n$ are $s_1=1, s_2=1, s_3=18, s_4=1606$, and $s_5=565080$. A more direct approach to counting strong digraphs was given by Robinson \cite{racyclic} (see also \cite{rstrong}). Robinson's method will be discussed in more detail in Section \ref{SectionStrongDi}, and it is the basis for our approach to counting strong digraphs by edges and descents. We use a new kind of generating function that we call an \emph{Eulerian graphic generating function} whose properties are introduced in Section \ref{SectionEulerianGraphic}. De Panafieu and Dovgal \cite{pd} have also counted acyclic and strong digraphs using an approach similar to Robinson's. A different approach to counting strong digraphs has been given by Ostroff \cite{ostroff}. \subsection{Acyclic digraphs} An \emph{acyclic} digraph is a digraph with no directed cycles. That is, there is no nonempty directed path from any vertex to itself. Robinson \cite{racyclic} showed that the number $a_n$ of acyclic digraphs on $[n]$ is given by the generating function \[ \sum_{n=0}^\infty a_n\dpowg xn = \left( \sum_{n=0}^\infty (-1)^n \dpowg xn\right)^{-1}.\] The first few values of $a_n$ are $a_0=1, a_1=1, a_2=3, a_3=25,a_4=543,$ and $a_5=29281$. (Robinson had earlier \cite{racyclic1} found a different recurrence for counting acyclic digraphs that we will not discuss here.) Using an approach similar to Robinson's, in Section~\ref{sec:acyclic digraphs} we generalize his formula to derive an Eulerian graphic generating function for counting acyclic digraphs by edges and descents. Another proof of Robinson's formula was given by Stanley \cite{sacyclic}, using properties of chromatic polynomials of graphs. In Section \ref{sec:chromatic} we generalize Stanley's proof to count acyclic digraphs by edges and descents, using a generalization of the chromatic polynomial related to the chromatic quasisymmetric function of Shareshian and Wachs \cite{sw}. \subsection{Trees} We consider a rooted tree to be an acyclic digraph in which one vertex (the root) has outdegree 0 and every other vertex has outdegree 1. It is well known that there are $n^{n-1}$ rooted trees on $n$ vertices. More relevant to our results is that the exponential generating function $\sum_{n=1}^\infty n^{n-1}x^n/n!$ for rooted trees is the compositional inverse of $xe^{-x}$. In Section \ref{sec:trees} we use a variation of our first approach to counting acyclic digraphs to give a simple formula for the compositional inverse of the exponential generating function for counting rooted trees by descents. \section{Strong Tournaments and Strong Digraphs}\label{sec:strong} \subsection{Strong Tournaments}\label{sec:strong tournaments} We first study strong tournaments, which are easier to enumerate than acyclic and strong digraphs. Since every tournament on $n$ vertices has exactly $\binom{n}{2}$ edges, we count strong tournaments by descents only. First, let us determine the descent polynomial for all tournaments on $[n]$. For each pair of vertices $\{s,t\}$, exactly one of $(s,t)$ and $(t,s)$ is an edge. One of these edges is a descent and the other is an ascent, and thus the descent polynomial for all tournaments on $[n]$ is $(1+u)^{\binom n2}$. To count strong tournaments by descents, we will need some properties of the \emph{$q$-binomial coefficients} (also called \emph{Gaussian binomial coefficients}). We first define the $q$-factorial $n!_q$ by \[ n!_q = 1\cdot(1+q)\cdots(1+q+ \cdots +q^{n-2})\cdot (1+q+\cdots +q^{n-1}), \] with $0!_q=1$. The $q$-binomial coefficients, denoted $\tqbinom ni$, are defined by \[ \qbinom ni = \frac{n!_q}{i!_q(n-i)!_q}.\] For $q=1$ they reduce to ordinary binomial coefficients. The $q$-binomial coefficients have several combinatorial interpretations, but the one that we need is given in Lemma \ref{l-qbin} below. For disjoint sets of integers $S$ and $T$, we call an element $(s,t)$ of $S\times T$ a \emph{descent} if $s>t$ and an \emph{ascent} if $s<t$. Let $\des(S,T)$ denote the number of descents in $S\times T$. \begin{lem} \label{l-qbin} For nonnegative integers $n$ and $i$, with $i\le n$, we have \begin{equation*} \qbinom ni=\sum_{(S,T)} q^{\des(S,T)}, \end{equation*} where the sum is over all ordered partitions $(S,T)$ of $[n]$ for which $|S|=i$. \end{lem} This lemma is proved in \cite[Lemma 5.1]{qexp} by showing that the right side satisfies the same recurrence as the left side, $\tqbinom{n}{i} = q^i \tqbinom{n-1}i +\tqbinom{n-1}{i-1}$. It can also be derived easily from other well-known combinatorial interpretations for the $q$-binomial coefficients such as \cite[p.~56, Proposition 1.7.1]{ec1}. We can now give a recurrence for the descent polynomial for strong tournaments on $[n]$. (Although the variable $q$ is traditionally used in $\tqbinom ni$, we will replace $q$ with $u$, as we are using the variable $u$ to weight descents.) Notice that setting $u=1$ gives the recurrence \eqref{e-mm1} of Moon and Moser. \begin{thm}\label{StrongTournRec} Let $t_n(u)$ be the descent polynomial for the set of strong tournaments on $[n]$. Then for $n\ge1$ we have \begin{equation} \label{e-str} t_n(u) = (1+u)^{n \choose 2} - \sum_{k=1}^{n-1} \ubinom{n}{k} (1+u)^{n-k \choose 2}t_k(u).\end{equation} \end{thm} \begin{proof} Every nonempty tournament has a unique source strong component. Thus every tournament on $[n]$, for $n\ge 1$, can be constructed uniquely by choosing an ordered partition $(S,T)$ of $[n]$, with $S$ nonempty, then constructing a strong tournament on $S$ and an arbitrary tournament on $T$, and adding all edges in $S\times T$. By Lemma \ref{l-qbin}, the contribution to the descent polynomial for all tournaments on $[n]$ with $|S|=k$ is $\tubinom{n}{k} t_k(u)\, (1+u)^{n-k \choose 2}$. Thus \begin{equation} \label{e-str1} (1+u)^{n \choose 2} = \sum_{k=1}^{n} \ubinom{n}{k} (1+u)^{n-k \choose 2}t_k(u). \end{equation} Solving for $t_n(u)$ gives \eqref{e-str}. \end{proof} The first few values of the polynomials $t_n(u)$ are \[t_1(u) =1,\: t_2(u)= 0, \: t_3(u) = u + u^2, \: \text{ and } \: t_4(u) = u+6{u}^{2}+10{u}^{3}+6{u}^{4}+{u}^{5}. \] Coefficients of $t_n(u)$ for larger $n$ can be easily computed from \eqref{e-str} and are given in Table~\ref{chartStrong}. It is not difficult to show that $t_n(u)$ is a polynomial of degree $\binom n2 -1$ for $n\ge3$. Also, since reversing all the edges of a strong tournament gives another strong tournament, $t_n(u)$ is symmetric; i.e., $t_n(u) = u^{\binom n2}t_n(1/u)$. \begin{table} \begin{tabular}{c||r|r|r|r} \diagbox{$d$}{$n$} & \multicolumn{1}{c|}{4} & \multicolumn{1}{c|}{5} & \multicolumn{1}{c|}{6} & \multicolumn{1}{c}{7} \\ \hline \hline 1 & 1 & 1 & 1 & 1\\ 2 & 6 & 13 & 22 & 33\\ 3 & 10 & 56 & 172 & 402\\ 4 & 6 & 123 & 717 & 2,674\\ 5 & 1 & 158 & 1,910 & 11,614\\ 6 & 0 & 123 & 3,547 & 36,293\\ 7 & 0 & 56 & 4,791 & 86,305\\ 8 & 0 & 13 & 4,791& 161,529 \\ 9 & 0 & 1 & 3,547 & 242,890\\ 10 & 0 & 0 & 1,910 & 297,003\\ 11 & 0 & 0 & 717 & 297,003\\ 12 & 0 & 0 &172 & 242,890\\ 13 & 0 & 0 & 22 &161,529\\ 14 & 0 & 0 & 1 & 86,305\\ 15 & 0&0&0& 36,293\\ 16 & 0&0&0& 11,614\\ 17 & 0&0&0& 2,674\\ 18 & 0&0&0& 402\\ 19 & 0&0&0& 33\\ 20 & 0&0&0& 1\\ \hline TOTAL & 24 & 544 & 22,320 & 1,677,488 \end{tabular} \caption{The number of strong tournaments on $n$ vertices with $d$ descents for $4 \leq n \leq 7$; equivalently, the coefficients of $u^d$ in $t_n(u)$.} \label{chartStrong} \end{table} The next result gives a divisibility property for $t_n(u)$. \begin{prop} The polynomial $t_n(u)$ is divisible by $(1+u)^{\floor{n/2}}$. \end{prop} \begin{proof} Let $v_n(u)=t_n(u)/(1+u)^{\floor{n/2}}$. Then from \eqref{e-str} we obtain the recurrence \begin{equation*} v_n(u) = (1+u)^{\binom n2 -\floor{n/2}}-\sum_{k=1}^{n-1}\ubinom{n}{k} (1+u)^{\binom{n-k}{2}-\floor{n/2}+\floor{k/2}}v_k(u) \end{equation*} for $n\ge1$. It is easy to check that $\binom n2 -\floor{n/2}\ge0$, so it suffices to show that the expression multiplied by $v_k(u)$ in the sum on the right is a polynomial in $u$. Let \begin{equation*} E(n,k) =\binom{n-k}{2}-\floor{\frac n2}+\floor{\frac k2}. \end{equation*} We first show that $E(n,k)\ge 0$ for $k<n-1$. Note that \begin{align*} E(n,k) \geq \binom{n-k}{2}-\frac n2+\frac {k-1}{2} \\ = \frac{(n-k)(n-k-2) -1}{2}. \end{align*} For $k \leq n-2$, this gives $E(n,k) \geq -1/2$, so since $E(n,k)$ is an integer, we have $E(n,k)\ge0$. In the case where $k=n-1$, we must consider the parity of $n$. If $n$ is odd, then $E(n,n-1) = 0$. However, if $n$ is even, $E(n,n-1) = -1$. To complete the proof it suffices to show that if $n$ is even then $\tubinom {n}{n-1}$ is divisible by $1+u$. But if $n$ is even then \begin{equation*} \ubinom{n}{n-1}= 1+u+u^2+\cdots +u^{n-1}=(1+u)(1+u^2+u^4+\cdots+u^{n-2}), \end{equation*} thus completing the proof. \end{proof} The coefficients of $t_n(u)/(1+u)^{\lfloor n/2\rfloor}$ seem to be nonnegative, but we are not able to prove this. The recurrence of Theorem \ref{StrongTournRec} can also be expressed with generating functions. An \emph{Eulerian generating function} \cite[p.~321, Example 3.18.1c]{ec1} is a generating function of the form \begin{equation*} \sumz n a_n\dpowq xn. \end{equation*} Note that for $q=1$, this reduces to an exponential generating function. The multiplication of Eulerian generating functions is similar to that of exponential generating functions. If \begin{equation*} a(x) = \sumz n a_n \dpowq xn \quad \text{ and } \quad b(x) = \sumz n b_n \dpowq xn, \end{equation*} then \begin{equation*} a(x)b(x) = \sumz n c_n \dpowq xn, \end{equation*} where the coefficient $c_n$ is given by \begin{equation*} c_n = \sum_{i=0}^n \qbinom ni a_i b_{n-i}. \end{equation*} The generating function for $t_n(u)$ can be derived directly from the formula given in Theorem \ref{StrongTournRec} and the multiplication property of Eulerian generating functions: \begin{cor} \label{StrongTournGen} Let $T(x) = \sum_{n=1}^\infty t_n(u) x^n/n!_u$ be the Eulerian generating function for strong tournaments by descents and let $U(x) = \sum_{n=0}^\infty (1+u)^{\binom n2}x^n/n!_u$ be the Eulerian generating function for all tournaments by descents. Then \begin{equation*} \label{e-stourn} T(x) =1-U(x)^{-1} \end{equation*} and \begin{equation} \label{e-stourn2} U(x) = \frac{1}{1-T(x)}. \end{equation} \end{cor} \begin{proof} Equation \eqref{e-str1} is equivalent to $U(x) =1+T(x)U(x)$ from which the two formulas follow easily. \end{proof} Equation \eqref{e-stourn2} has a simple combinatorial interpretation obtained by iterating the decomposition described in the proof of Theorem \ref{StrongTournRec}: every tournament may be decomposed into a sequence of strong tournaments with all edges between the strong tournaments oriented from left to right. \subsection{Eulerian Graphic Generating Functions} \label{SectionEulerianGraphic} In this section, we introduce a new type of generating function which will be useful in enumerating both acyclic and strong digraphs by descents and edges. This new generating function is a generalization of a \emph{graphic generating function} (also called a \emph{special generating function} \cite{racyclic} or \emph{chromatic generating function} \cite[p.~321, Example 3.18.1c]{ec1}), which is a generating function of the form \begin{equation*} \sumz n a_n \dpowgy xn, \end{equation*} often with $y=1$. Graphic generating functions were first used by Robinson \cite{racyclic} and by Read \cite{read} (in the case $y=1$, with a slightly different normalization); further applications of graphic generating functions have been given by Gessel and Sagan \cite{GS96}, Gessel \cite{decomp}, and de Panafieu and Dovgal \cite{pd}. We define an \emph{Eulerian graphic generating function} to be a generating function of the form \begin{equation} \label{e-Eggf} \sumz n a_n \dpowbq{x}{n}. \end{equation} Given two Eulerian graphic generating functions $a(x)$ and $b(x)$ defined by \[ a(x) = \sumz n a_n \dpowbq{x}{n}\quad \text{and} \quad b(x) = \sumz n b_n \dpowbq{x}{n},\] we multiply them to obtain \[ a(x)b(x) = \sumz n c_n \dpowbq{x}{n} \] where\[ c_n = \sum_{i=0}^n \qbinom ni (1+y)^{i(n-i)} a_i b_{n-i}. \] In all of our formulas from here on we will modify the Eulerian graphic generating functions by taking $q=(1+uy)/(1+y)$. The combinatorial interpretation of these modified Eulerian graphic generating functions is explained by the following lemma. \begin{lem} \label{l-qbinom} Let $q=(1+uy)/(1+y)$. Then $\tqbinom ni (1+y)^{i(n-i)}$ is a polynomial in $u$ and $y$, and the coefficient of $u^j y^m$ in $\tqbinom ni (1+y)^{i(n-i)}$ is the number of ordered pairs $(S,A)$ where $S$ is an $i$-subset of $[n]$ and $A$ is an $m$-subset of $S\times ([n]-S)$ containing exactly $j$ descents. \end{lem} \begin{proof} Let $S$ be an $i$-subset of $[n]$ such that $S\times([n]-S)$ has $k$ descents, and thus $i(n-i)-k$ ordered pairs that are not descents. Define the weight of a subset $A\subseteq S\times ([n]-S)$ to be $u^{\des(A)}y^{|A|}$. To count such weighted subsets of $S\times([n]-S)$ we specify $A$ by deciding which descents and ascents of $S\times([n]-S)$ are included in $A$. Each descent in $S\times ([n]-S)$ can either be included in $A$, contributing a factor of $uy$ to the weight of $A$, or excluded, contributing a factor of 1. Similarly, each ascent in $S\times ([n]-S)$ can either be included in $A$, contributing a factor of $y$, or excluded, contributing a factor of 1. Thus the sum of the weights of all $A\subseteq S \times ([n]-S)$ is $(1+uy)^k (1+y)^{i(n-i) -k}$. Now define $Q_{n,i,k}$ by \begin{equation*} \qbinom ni = \sum_{k=0}^{i(n-i)} Q_{n,i,k}q^k. \end{equation*} Then by Lemma \ref{l-qbin}, $Q_{n,i,k}$ is the number of $i$-subsets $S$ of $[n]$ such that $S\times([n]-S)$ has $k$ descents. Thus the sum over all $i$-subsets $S\subseteq[n]$ of the weights of all $A\subseteq S \times ([n]-S)$ is \begin{align*}\sum_{k=0}^{i(n-i)} Q_{n,i,k} (1+uy)^k (1+y)^{i(n-i)-k} &=(1+y)^{i(n-i)}\sum_{k=0}^{i(n-i)} Q_{n,i,k} \left(\frac{1+uy}{1+y}\right)^k\\ &=(1+y)^{i(n-i)}\qbinom ni, \end{align*} where $q=(1+uy)/(1+y)$. \end{proof} The modified Eulerian graphic generating functions may be viewed another way. Note that \begin{equation*} n!_q(1+y)^{\binom n2}=\prod_{i=1}^{n} (1+q+\cdots + q^{i-1}) (1+y)^{i-1}. \end{equation*} Setting $q=(1+uy)/(1+y)$ and letting $P(i)$ denote the $i$th factor in this product gives \begin{align*} P(i)&:= (1+y)^{i-1}+(1+uy)(1+y)^{i-2}+(1+uy)^2(1+y)^{i-3}+\cdots+(1+uy)^{i-1}\\ &\phantom{:}=\frac{(1+y)^i-(1+uy)^i}{y(1-u)}. \end{align*} So if we let $F(n) = P(1)P(2)\cdots P(n)$, then the modified Eulerian graphic generating functions are of the form $\sumz n a_n x^n/F(n)$. Note that if we set $u=1$, then $q=(1+uy)/(1+y)$ becomes 1. So in this case the Eulerian graphic generating function \eqref{e-Eggf} reduces to the ``ordinary" graphic generating function \begin{equation*} \sumz n a_n \dpowgy xn. \end{equation*} \subsection{Strong Digraphs} \label{SectionStrongDi} We now find a generating function for the descent-edge polynomial for the set of strong digraphs. Before beginning the proof, we need several preliminary definitions. Let $\mathscr{D}elta$ be the linear transformation that converts an exponential generating function to an Eulerian graphic generating function. That is, \[ \mathscr{D}elta\left(\sum_{n=0}^\infty a_n \frac{x^n}{n!}\right) = \sum_{n=0}^{\infty} a_n \dpowbq xn. \] Let $G(x)$ be the Eulerian graphic generating function for the descent-edge polynomials of all digraphs. To specify a digraph $D$ on $[n]$, for each possible edge $(s,t)$ we either include it in $D$ or exclude it. If $s>t$ then including $(s,t)$ as an edge contributes a factor $uy$ to the descent-edge weight of $D$ and excluding it contributes a factor of 1. Similarly, if $s<t$ then including $(s,t)$ as an edge contributes a factor $y$ to the descent-edge weight of $D$ and excluding it contributes a factor of 1. Since there are $\binom n2$ possible edges $(s,t)$ with $s>t$ and $\binom n2$ with $s<t$, the descent-edge polynomial of the set of all digraphs on $[n]$ is $(1+uy)^{n \choose 2}(1+y)^{n \choose 2}$ and therefore \begin{equation} \label{e-G(x)} G(x) =\sum_{n=0}^{\infty} (1+uy)^{n \choose 2} (1+y)^{n \choose 2}\dpowbq xn =\sum_{n=0}^{\infty} (1+uy)^{n \choose 2} \frac{x^n}{n!_q}. \end{equation} Now let \[d_n(u,y;\beta)= \sum_D u^{\des(D)}y^{\e(D)}\beta^{\ssc(D)},\] where the sum is over all digraphs $D$ on the vertex set $[n]$, $\e(D)$ is the number of edges of $D$, and $\ssc(D)$ is the number of source strong components of $D$. As we have just seen, \begin{equation} \label{e-dn1} d_n(u,y;1) = (1+uy)^{\binom n2}(1+y)^{\binom n2}. \end{equation} Let $s_n(u,y)$ be the descent-edge polynomial for the set of strong digraphs on $n$ vertices, and let \[S(x)=\sum_{n=1}^\infty s_n(u,y)\dpow xn.\] Define polynomials $v_n(u,y;\beta)$ by \begin{equation} \label{e-vn} e^{\beta S(x)}=\sumz n v_n(u,y;\beta) \dpow xn. \end{equation} Then by the ``exponential formula'' \cite[p.~5, Corollary 5.1.6]{ec2}, $v_n(u,y;\beta)$ is the descent-edge polynomial for digraphs on $n$ in which every weak component is strong, where each weak component is weighted $\beta$. The Eulerian graphic generating function for the polynomials $v_n(u,y;\beta)$ is thus $\mathscr{D}elta(e^{\beta S(x)})$. We can now count strong digraphs by edges and descents, generalizing the result of Robinson who proved the case $u=1$ (and thus $q=1$) of the next result. \begin{thm} \label{t-strong} Let $S(x)$ be the exponential generating function for the descent-edge polynomial for strong digraphs and let $G(x)$ be the Eulerian graphic generating function for all digraphs, given in \eqref{e-G(x)}. Then \begin{equation} \label{e-strong} S(x) = -\log \bigl(\mathscr{D}elta^{-1}(G(x)^{-1})\bigr). \end{equation} \end{thm} \begin{proof} We will count in two ways ordered pairs $(D, C)$ where $D$ is a digraph on $[n]$ and $C$ is a subset of the set of source strong components of $D$. (We may identify $C$ with the digraph whose weakly connected components are the elements of the set $C$.) To such a pair we assign the weight $\beta^{|C|} u^{\des(D)} y^{\e(D)}$. We compute the sum of the weights of these pairs in two ways. First, we may choose $D$ as an arbitrary digraph on $[n]$ and then choose $C$ as an arbitrary subset of the source strong components of $D$. Thus the sum of the weights is $d_n(u,y;\beta+1)$. Alternatively, we may count pairs $(D,C)$ by first choosing a subset $T$ of $[n]$, constructing a set of strong digraphs $C$ on $T$, choosing a digraph $D'$ on $[n]-T$ and choosing a subset $E$ of $T\times( [n]-T)$. We then construct $D$ by adding to $D'$ the digraphs in $C$ together with the elements of $E$ as edges. Then $\des(D) = \des(E)+\des(C)+\des(D')$ and $\e(D) = |E| +\e(C)+ \e(D')$. It follows from Lemma \ref{l-qbinom} that the sum of the weights of the pairs $(D,C)$ in which $C$ has a total of $i$ vertices is $(1+y)^{i(n-i)}\tqbinom{n}{i}v_i(u,y; \beta) d_{n-i}(u,y;1)$. Summing over $i$ and using \eqref{e-dn1} gives \begin{equation} \label{e-vrec} \sum_{i=0}^n (1+y)^{i(n-i)}\qbinom{n}{i}v_i(u,y;\beta) d_{n-i}(u,y;1)=d_n(u,y;\beta+1), \end{equation} which is equivalent by \eqref{e-vn} to \begin{equation*} \label{e-Dalpha} \mathscr{D}elta(e^{\beta S(x)}) G(x)= \sumz n d_n(u,y;\beta+1) \dpowbq xn, \end{equation*} where $G(x)$ is given by \eqref{e-G(x)}. Now we set $\beta=-1$. Since $d_n(u,y;0)=0$ for $n>0$ we obtain \begin{equation*} \mathscr{D}elta(e^{- S(x)}) G(x)= 1. \end{equation*} Solving for $S(x)$ yields \eqref{e-strong}. \end{proof} We can now give extensions of Wright's recurrences \eqref{e-wright1} and \eqref{e-wright2} for the polynomials $s_n(u,y)$. \begin{cor}\label{StrongDiRec} The descent-edge polynomial for strong digraphs on $n$ vertices $s_n(u,y)$ satisfies the recurrence \begin{equation} \label{e-s-eta} s_n(u,y) = \eta_n(u,y) + \sum_{k=1}^{n-1} \binom{n-1}{k-1} s_k(u,y)\eta_{n-k}(u,y), \ n\ge 1, \end{equation} where the polynomials $\eta_n(u,y)$ are determined by \begin{multline} \label{e-etarec} \qquad \eta_n(u,y) = (1+y)^{\binom n2}(1+uy)^{\binom n2}\\ -\sum_{k=1}^{n-1} \qbinom{n}{k} (1+uy)^{\binom{n-k}{2}} (1+y)^{(n-k)(n+k-1)/2} \eta_k(u,y), \qquad \end{multline} with $q = (1+uy)/(1+y)$. \end{cor} \begin{proof} Let $E(x) = 1-e^{-S(x)}$, so \begin{equation} \label{e-SE} S(x) = \log\frac{1}{1-E(x)}, \end{equation} and define polynomials $\eta_n(u,y)$ by $E(x) = \sum_{n=1}^\infty \eta_n(u,y) x^n/n!$. Thus for $n\ge1$, $\eta_n(u,y) = -v_n(u,y;-1)$, where $v_n(u,y;\beta)$ is defined in \eqref{e-vn}. Then \eqref{e-etarec} is obtained by rearranging the case $\beta=-1$ of \eqref{e-vrec}, using \eqref{e-dn1} and $d_n(u,y;0)=0$ for $n>0$. Differentiating \eqref{e-SE} with respect to $x$ and simplifying gives \begin{equation*} S'(x) = E'(x)+S'(x)E(x). \end{equation*} Equating coefficients of $x^{n-1}/(n-1)!$ gives \eqref{e-s-eta}. \end{proof} The first few values of the polynomials $s_n(u, y)$ are $s_1(u,y) =1, s_2(u,y)= uy^2$, and \[ s_3(u,y) = uy^3 + u^2y^3 + uy^4 + 7u^2y^4 + u^3y^4 + 3u^2y^5+3u^3y^5 + u^3y^6.\] The values of $s_n(u,y)$ for larger $n$ can be easily computed from the recurrences of Corollary~\ref{StrongDiRec}. We provide the values of $s_n(u,1)$ and $s_n(1,y)$ for small values in Tables~\ref{NumberStrong}A and \ref{NumberStrong}B. \begin{table} \begin{tabular}{ccc} \subcaptionbox{Number of strong digraphs on $n$ vertices with $d$ descents}{ \begin{tabular}{c||r|r|r|r} \diagbox{$d$}{$n$} & \multicolumn{1}{c|}{3} & \multicolumn{1}{c|}{4} & \multicolumn{1}{c|}{5} & \multicolumn{1}{c}{6} \\ \hline \hline 1 & 2&10 & 122 & 3,346\\ 2 & 11&154 & 3,418 & 142,760\\ 3 & 5&540 & 27,304 & 1,938,178\\ 4 & 0&581 & 90,277 & 12,186,976\\ 5 & 0&272 & 150,948 & 42,696,630\\ 6 & 0&49 & 150,519 & 94,605,036 \\ 7 & 0&0 & 95,088 & 145,009,210\\ 8 & 0&0 & 37,797& 161,845,163 \\ 9 & 0 &0 & 8,714 & 134,933,733\\ 10 & 0&0 & 893 & 84,656,743\\ 11 & 0 &0& 0 & 39,632,149\\ 12 & 0 & 0&0 & 13,481,441\\ 13 & 0 & 0&0 &3,156,845\\ 14 & 0 & 0 &0& 455,917\\ 15 & 0&0& 0&30,649\\ \hline TOTAL & 18 &1,606 & 565,080 & 734,774,776\\ \multicolumn{5}{c}{} \\ \multicolumn{5}{c}{} \\ \multicolumn{5}{c}{} \\ \end{tabular} } & \quad & \subcaptionbox{Number of strong digraphs on $n$ vertices with $e$ edges}{ \begin{tabular}{c||r|r|r} \diagbox{$e$}{$n$} & \multicolumn{1}{c|}{3} & \multicolumn{1}{c|}{4} & \multicolumn{1}{c}{5} \\ \hline \hline 3 & 2 & 0 & 0\\ 4 & 9 & 6 & 0\\ 5 & 6 & 84 & 24\\ 6 & 1 & 316 & 720 \\ 7 & 0 & 492 & 6,440\\ 8 & 0 & 417& 26,875 \\ 9 & 0 & 212 & 65,280\\ 10 & 0 & 66 & 105,566\\ 11 & 0 & 12 & 122,580\\ 12 & 0 & 1 & 106,825\\ 13 & 0 & 0 & 71,700\\ 14 & 0 & 0 & 37,540\\ 15 & 0&0& 15,344\\ 16 & 0 & 0 & 4,835\\ 17 & 0 & 0 & 1,140\\ 18 & 0 & 0 & 190\\ 19 & 0 & 0 & 20\\ 20 & 0 & 0 & 1\\ \hline TOTAL & 18 & 1,606 & 565,080 \end{tabular} } \end{tabular} \caption{Number of strong digraphs by descents and edges} \label{NumberStrong} \end{table} Note that setting $u=y=1$ in the recurrences of Corollary \ref{StrongDiRec} gives Wright's recurrences \eqref{e-wright1} and \eqref{e-wright2}, so Wright's $\eta_n$ is our $\eta_n(1,1)$. In fact, Wright also knew the corresponding recurrences for $s_n(1,y)$ and $\eta_n(1,y)$, counting strong digraphs by edges. He also stated that \eqref{e-wright1} and \eqref{e-wright2} look as if they should possess combinatorial interpretations, but that he was not able to find one. He wrote, ``We can show that $\eta_n$ is non-negative, though $\eta_2 = 0$. But some of the coefficients in the polynomials $\eta_n(y)$ are negative and this makes it seem somewhat unlikely that $\eta_n$ has a simple combinatorial meaning." (Wright's $\eta_n(y)$ is our $\eta_n(1,y)$.) Despite Wright's pessimism, $\eta_n$ does have a simple combinatorial interpretation, which suggests a connection between the enumeration of strong tournaments and the enumeration of strong digraphs. If we multiply Moon and Moser's recurrence \eqref{e-mm1} for strong tournaments by $2^{\binom n2}$, we get \begin{equation*} 2^{\binom n2}t_n= 2^{n(n-1)} - \sum_{k=1}^{n-1} \binom{n}{k}2^{(n-1)(n-k)}\cdot 2^{\binom k2}t_k. \end{equation*} Comparing with \eqref{e-wright2}, we see that Wright's $\eta_n$ is equal to our $2^{\binom n2}t_n$. Thus \eqref{e-SE} for $u=y=1$ may be written \begin{equation} \label{e-wright3} \sum_{n=1}^\infty s_n \dpow xn =-\log\biggl(1-\sum_{n=1}^\infty 2^{\binom n2}t_n\dpow xn \biggr). \end{equation} Although the coefficients of $\eta_n(u,y)$ are not in general nonnegative, we can derive a one-parameter refinement of the formula $\eta_n = 2^{\binom n2}t_n$ with nonnegative coefficients from Theorems \ref{StrongTournRec} and \ref{t-strong}. Note that $\eta_2=\eta_2(1,1)=0$ and $\eta_2(u,y) = -1+uy^2$. This suggests that if we want a specialization of $ \eta_n(u,y)$ with nonnegative coefficients, we might try setting $u=y^{-2}$. \begin{prop} The polynomials $\eta_n(u,y)$ defined by \eqref{e-etarec} and the descent polynomials for strong tournaments by descents $t_n(u)$, determined by \eqref{e-str}, are related by \begin{equation} \label{e-eta-t} \eta_n(y^{-2},y) = (1+y)^{\binom n2}t_n(y^{-1}). \end{equation} \end{prop} \begin{proof} We show that both sides of \eqref{e-eta-t} satisfy the same recurrence. (This recurrence does not require any initial values.) If $u=y^{-2}$ then $1+uy=1+y^{-1}$ and $q=(1+uy)/(1+y)=y^{-1}$, so setting $u=y^{-2}$ in \eqref{e-etarec} gives \begin{multline*} \qquad \eta_n(y^{-2},y) = (1+y)^{\binom n2}(1+y^{-1})^{\binom n2}\\ -\sum_{k=1}^{n-1} \binom{n}{k}_{\!\!y^{-1}} (1+y^{-1})^{\binom{n-k}{2}} (1+y)^{(n-k)(n+k-1)/2} \eta_k(y^{-2},y). \qquad \end{multline*} Setting $u=y^{-1}$ in \eqref{e-str}, multiplying by $(1+y)^{\binom n2}$, and simplifying gives \begin{multline*} \quad (1+y)^{\binom n2}t_n(y^{-1})=(1+y)^{\binom n2}(1+y^{-1})^{\binom n2}\\ -\sum_{k=1}^{n-1} \binom {n}{k}_{\!\!y^{-1}} (1+y)^{\binom n2}(1+y^{-1})^{\binom{n-k}2} (1+y)^{-\binom k2}\cdot (1+y)^{\binom k2}t_k(y^{-1}). \quad \end{multline*} Then \eqref{e-eta-t} follows by comparing these two recurrences and using $(1+y)^{(n-k)(n+k-1)/2}= (1+y)^{\binom n2}(1+y)^{-\binom k2}$. \end{proof} Applying \eqref{e-eta-t} to \eqref{e-SE} gives \begin{equation} \label{e-sdt} \sumz n s_n(y^{-2},y) \dpow xn = -\log\biggl(1-\sum_{n=1}^\infty (1+y)^{\binom n2}t_n(y^{-1})\dpow xn \biggr). \end{equation} If two exponential generating functions $f$ and $g$ are related by $f=-\log(1-g)=\sum_{n=1}^\infty (n-1)!\,g^n\!/n!$ then $f$ may be interpreted as counting cycles of the objects counted by $g$, so we might hope that \eqref{e-wright3} and \eqref{e-sdt} could be explained combinatorially by a bijection from strong digraphs to cycles of strong tournaments with some additional structure. But we have not been able to find such a bijection. \section{Acyclic Digraphs and Trees}\label{sec:acyclic} \subsection{Acyclic Digraphs}\label{sec:acyclic digraphs} We begin this section by enumerating acyclic digraphs by their number of edges, descents, and sources. We again make use of Eulerian graphic generating functions, and also follow closely the proof in \cite{G96} for enumeration of acyclic digraphs by sources and edges (which is based on Robinson's proof \cite{racyclic}). Let \[a_n(u,y;\beta)= \sum_D u^{\des(D)}y^{\e(D)}\beta^{\source(D)},\] where the sum is over all acyclic digraphs $D$ on the vertex set $[n]$, $\e(D)$ is the number of edges of $D$, and $\source(D)$ is the number of sources of $D$; that is, the number of vertices of $D$ of in-degree 0. Let $a_n(u,y)=a_n(u,y;1)$. To count acyclic digraphs by sources we take an acyclic digraph and add some new vertices as sources. The new vertices will be a subset of the set of sources of the expanded digraph. This gives a formula expressing $a_n(u,y; \beta+1)$ in terms of $a_j(u,y)$ for $j\le n$. Since every nonempty acyclic digraph has at least one source, the formula for $a_n(u,y;0)$ gives a recurrence for $a_n(u,y)$. \begin{lem} \label{l-acyclic} For every nonnegative integer $n$, we have \begin{equation} \label{e-ac-rec} \sum_{i=0}^n \qbinom{n}{i} (1+y)^{i(n-i)}\beta^i a_{n-i}(u,y)=a_n(u,y;\beta+1). \end{equation} \end{lem} \begin{proof} We count ordered pairs $(D,C)$, where $D$ is an acyclic digraph on $[n]$ and $C$ is a subset of the set of sources of $D$. To such a pair we assign the weight $u^{\des(D)} y^{\e(D)}\beta^{|C|}$. We compute the sum of the weights of these pairs in two ways. First, we may choose $D$ as an acyclic digraph on $[n]$ and then choose $C$ as an arbitrary subset of the sources of $D$. Thus the sum of the weights is $a_n(u,y;\beta+1)$. We may also count pairs $(D,C)$ by first choosing a subset $C$ of $[n]$, choosing a digraph $D'$ on $[n]-C$ and choosing a subset $E$ of $C\times ([n]-C)$. We then construct $D$ by adding to $D'$ the elements of $C$ as vertices and the elements of $E$ as edges. Then $\des(D) = \des(E)+\des(D')$ and $\e(D) = |E| + \e(D')$. Then it follows from Lemma \ref{l-qbinom} that the sum of the weights of the pairs $(D,C)$ in which $|C|=i$ is $\tqbinom{n}{i}(1+y)^{i(n-i)}\beta^i a_{n-i}(u,y)$, and summing on $i$ gives the left side of \eqref{e-ac-rec}. \end{proof} \begin{thm} \label{t-acyclic} Let $a_n(u,y)$ be the descent-edge polynomial for the set of acyclic digraphs on $n$ vertices and let $A(x)$ be the Eulerian graphic generating function for $a_n(u,y)$ where $q = (1+uy)/(1+y)$. Then \begin{equation} \label{e-A(x)} A(x) = \left(\sumz n (-1)^n \dpowbq xn \right)^{-1}. \end{equation} More generally, the Eulerian graphic generating function for $a_n(u,y;\beta)$ is \begin{equation} \label{e-A(x,a)} \left(\sumz n (\beta -1)^n\dpowbq xn \right)\biggm/ \left(\sumz n (-1)^n \dpowbq xn \right)\qedhere. \end{equation} \end{thm} \begin{proof} Equation \eqref{e-ac-rec} is equivalent to \begin{equation} \label{e-A2} \sumz n a_n(u, y; \beta+1) \dpowbq xn = \left(\sumz n \beta ^n \dpowbq xn \right)A(x). \end{equation} Setting $\beta=-1$ in \eqref{e-A2}, and using the fact that $a_n(u,y;0) = 0$ for $n>0$ gives \eqref{e-A(x)}. Then replacing $\beta$ by $\beta-1$ in \eqref{e-A2} and applying \eqref{e-A(x)} gives \eqref{e-A(x,a)}. \end{proof} An interesting special case of \eqref{e-A(x,a)} is obtained by setting $u=0$, so that we are counting (acyclic) digraphs with no descents by the number of sources. We find that \begin{equation*} a_n(0, y; \beta) = \prod_{i=0}^{n-1}\bigl( \beta + (1+y)^i-1\bigr). \end{equation*} This is not difficult to prove directly: since $a_0(0,y;\beta)=1$, it is enough to show that for $n>0$ we have \begin{equation} \label{e-a0} a_n(0, y; \beta) = a_{n-1}(0, y; \beta)\bigl( \beta + (1+y)^n-1\bigr). \end{equation} To prove \eqref{e-a0}, we note that every acyclic digraph on $[n]$ with no descents is obtained from an acyclic digraph on $[n-1]$ with no descents by adding $n$ as a vertex, together with some of the edges $(i,n)$ for $i\in [n-1]$. If none of these edges are added then $n$ is a source; otherwise, $n$ is not a source. Equation \eqref{e-a0} follows immediately from this construction. From either \eqref{e-ac-rec} or \eqref{e-A(x)} we obtain a recurrence for $a_n(u,y)$: \begin{cor}\label{AcyclicRec} Let $a_n(u,y)$ be the descent-edge polynomial for the set of acyclic digraphs on $n$ vertices. Then \[ a_n(u,y) = \sum_{i=0}^{n-1} (-1)^{n-i-1}\qbinom{n}{i} (1+y)^{i(n-i)}a_{i}(u,y) \] where $q = (1+uy)/(1+y)$.\qed \end{cor} The polynomials $a_n(u, y)$ for the first few values of $n$ are given by $a_1(u,y) =1,$ $a_2(u,y)= 1 + y + uy$, and \[ a_3(u,y) = 1 + (3 + 3u)y + (3 + 6u + 3u^2)y^2 + (1 + 2u + 2u^2 + u^3)y^3.\] Coefficients of $a_n(u,y)$ for larger $n$ can be computed from the formula in Corollary~{\ref{AcyclicRec}}; we provide the values of $a_n(u,1)$ for small values of $n$ in Table~\ref{NumberAcyclicDescent}. \begin{table} \begin{tabular}{c||r|r|r|r|r|r|r} \diagbox{$u$}{$n$} & 1 & 2 & \multicolumn{1}{c|}{3} & \multicolumn{1}{c|}{4} & \multicolumn{1}{c|}{5} & \multicolumn{1}{c|}{6} & \multicolumn{1}{c|}{7} \\ \hline \hline 0 & 1 & 2 & 8 & 64 & 1,024 & 32,768 & 2,097,152\\ 1 & 0 & 1 & 11 & 161 & 3,927 & 172,665 & 14,208,231\\ 2 & 0 & 0 & 5 & 167 & 6,698 & 419,364& 45,263,175\\ 3 & 0 & 0 & 1 & 102 & 7,185 & 656,733& 94,040,848\\ 4 & 0 & 0 & 0 & 39 & 5,477 & 757,939&145,990,526\\ 5 & 0 & 0 & 0 & 9 & 3,107 & 686,425& 181,444,276\\ 6 & 0 & 0 & 0 & 1 & 1,329 & 504,084& 187,742,937\\ 7 & 0 & 0 & 0 & 0 & 423 & 305,207& 165,596,535\\ 8 & 0 & 0 & 0 & 0 & 96 & 153,333& 126,344,492\\ 9 & 0 & 0 & 0 & 0 & 14 & 63,789& 84,115,442\\ 10 & 0 & 0 & 0 & 0 & 1 & 21,752& 49,085,984\\ 11 & 0 & 0 & 0 & 0 & 0 & 5,959& 25,134,230\\ 12 & 0 & 0 & 0 & 0 & 0 &1,267& 11,270,307\\ 13 & 0 & 0 & 0 & 0 & 0 & 197 & 4,403,313\\ 14 & 0 & 0 & 0 & 0 & 0 & 20& 1,486,423\\ 15 & 0 & 0 & 0 & 0 & 0 & 1& 428,139\\ 16 & 0 & 0 & 0 & 0 & 0 & 0 & 103,345\\ 17 & 0 & 0 & 0 & 0 & 0 & 0 & 20,369 \\ 18 & 0 & 0 & 0 & 0 & 0 & 0 & 3,153 \\ 19 & 0 & 0 & 0 & 0 & 0 & 0 & 360 \\ 20 & 0 & 0 & 0 & 0 & 0 & 0 & 27 \\ 21 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\ \hline TOTAL & 1 & 3 & 25 & 543 & 29,281 & 3,781,503 & 1,138,779,265 \end{tabular} \caption{Values of $a_{n}(u,1)$, the number of acyclic digraphs on $n$ vertices with $u$ descents, for $n \leq 7$. The total is the number of labeled acyclic digraphs on $n$ vertices.} \label{NumberAcyclicDescent} \end{table} Robinson \cite{racyclic, rstrong} gave a common generalization of the case $u=1$ of Theorems \ref{t-acyclic} and \ref{t-strong}. Given a class $\mathscr{S}$ of strong digraphs he found a relation between the generating function for digraphs in $\mathscr{S}$ and the generating function for digraphs all of whose strong components are in $\mathscr{S}$. If $\mathscr{S}$ is the class of all strong digraphs we get the case $u=1$ of Theorem \ref{t-strong} and if $\mathscr{S}$ is the class of 1-vertex graphs we get the case $u=1$ of Theorem \ref{t-acyclic}. Our approach could be applied to extend Robinson's generalization to include descents. \subsection{Chromatic polynomials}\label{sec:chromatic} Stanley \cite{sacyclic} derived the generating function for acyclic digraphs from his theorem \cite[Corollary 1.3]{sacyclic} that if $\chi_G(\lambda)$ is the chromatic polynomial of a graph $G$ with $n$ vertices, then the number of acyclic orientations of $G$ is $(-1)^n\chi_G(-1)$. We can use his approach to derive in another way the Eulerian graphic generating function \eqref{e-A(x)} for acyclic digraphs by edges and descents, using an interesting generalization of the chromatic polynomial. Let us first sketch Stanley's approach. By applying the combinatorial interpretation of multiplication of graphic generating functions, we can show that for $\lambda$ a nonnegative integer, the coefficient of $x^n/n!\,(1+y)^{\binom n2}$ in \begin{equation} \label{e-color} \mathscr{B}iggl(\sumz n \dpowgy xn\mathscr{B}iggr)^\lambda \end{equation} counts ordered partitions of $[n]$ into $\lambda$ blocks (possibly empty), together with an arbitrary set of (undirected) edges between elements of different blocks, where each edge has weight $y$. If we think of a vertex in the $i$th block as colored in color $i$, then we may describe these objects as $\lambda$-colored graphs, i.e., graphs in which the vertices are colored using colors chosen from $\{1,2,\dots, \lambda\}$ so that adjacent vertices have different colors, as shown (for $y=1$) by Read \cite{read}. Thus for $\lambda$ a nonnegative integer, the coefficient of $x^n/n!\,(1+y)^{\binom n2}$ in \eqref{e-color} is the sum over all graphs $G$ on $[n]$ of $y^{\e(G)}\chi_G(\lambda)$, where $\e(G)$ is the number of edges of $G$. But if two polynomials in $\lambda$ are equal whenever $\lambda$ is a nonnegative integer then they are equal as polynomials in $\lambda$, and in particular, they are equal for $\lambda=-1$. Applying Stanley's theorem on acyclic orientations, we find that setting $\lambda=-1$ in \eqref{e-color}, and replacing $x$ with $-x$, gives the graphic generating function for acyclic digraphs. (Stanley only considered the case $y=1$ but the extension to counting by edges is straightforward.) To include descents, we consider a generalization of the chromatic polynomial. Let $G$ be a graph with totally ordered vertices. We define a \emph{descent} of a proper coloring $c$ of $G$ to be an edge $\{i,j\}$ of $G$ with $i<j$ and $c(i)>c(j)$. We may define the \emph{refined chromatic polynomial} $X_G(\lambda)$ to be $\sum_{c}u^{\des(c)}$ where the sum is over all proper colorings $c$ of $G$ with colors chosen from $\{1, 2, \dots, \lambda\}$, and $\des(c)$ is the number of descents of the coloring $c$. (It is not hard to show that $X_G(\lambda)$ is indeed a polynomial in $\lambda$; this follows from the proof of Theorem \ref{t-rcp} below.) We note that $X_G(\lambda)$ is a specialization of the chromatic quasisymmetric function introduced by Shareshian and Wachs \cite{sw}. Then we have the following analogue of Stanley's theorem on acyclic orientations. \begin{thm} \label{t-rcp} Let $G$ be a graph on a totally ordered $n$-element vertex set and let $X_G(\lambda)$ be the refined chromatic polynomial of $G$. Then \begin{equation*} X_G(-1)=(-1)^n \sum_O u^{\des(O)} \end{equation*} where the sum is over all acyclic orientations $O$ of $G$. \end{thm} \begin{proof}[Proof sketch] We follow closely Stanley's second proof in \cite{sacyclic} of the case $u=1$, to which we refer for definitions not given here. To each proper coloring $c$ of $G$, we associate an acyclic orientation $O_c$ of $G$ in which each edge is directed from the lower-colored endpoint to the higher-colored endpoint. Then the coloring $c$ and the acyclic orientation $O_c$ have the same number of descents. As in Stanley's proof, the number of proper $\lambda$-colorings associated with a given acyclic orientation $O$ is the strict order polynomial $\bar\Omega(\bar O, \lambda)$, where $\bar O$ is the transitive and reflexive closure of $O$, regarded as a binary relation on the vertex set. Thus \begin{equation*} X_G(\lambda) = \sum_O u^{\des(O)}\bar\Omega(\bar O, \lambda), \end{equation*} where the sum is over all acyclic orientations of $G$. It is known, as a special case of Stanley's reciprocity theorem for order polynomials, that for every acyclic orientation $O$ we have $\bar\Omega(\bar O, -1)=(-1)^n$, and the result follows. \end{proof} We can now give another proof of \eqref{e-A(x)}, counting acyclic digraphs by descents and edges. Using Lemma \ref{l-qbinom}, we can show by induction on $\lambda$ that for $\lambda$ a nonnegative integer, the coefficient of $x^n/n!_q (1+y)^{\binom n2}$ in \begin{equation*} \mathscr{B}iggl(\sumz n \dpowbq xn\mathscr{B}iggr)^\lambda \end{equation*} counts $\lambda$-colored graphs on $[n]$, with edges weighted by $y$ and descents weighted by~$u$. Thus this coefficient is the sum \begin{equation*} \sum_G y^{\e(G)}X_G(\lambda) \end{equation*} over all graphs $G$ on $[n]$. Setting $\lambda=-1$ and using Theorem \ref{t-rcp} gives \eqref{e-A(x)}. \subsection{Trees}\label{sec:trees} We can use the basic idea of Lemma \ref{l-acyclic} to count rooted trees and forests by descents. Recall that we define a rooted tree (\emph{tree} for short) to be an acyclic digraph in which one vertex (the root) has outdegree 0 and every other vertex has outdegree 1. The vertices of indegree 0 are called \emph{leaves} but if the tree contains only one vertex, we do not consider this vertex to be a leaf. A (rooted) \emph{forest} is a digraph in which every weak component is a tree. Let $t_n(u; \beta)$ be the sum of the weights of all trees with vertex set $[n]$, where the weight of a tree with $i$ descents and $j$ leaves is $u^i\beta^j$, and let $T(x,u; \beta) = \sumz n t_n(u; \beta) x^n/n!$. We first illustrate the approach with $u=1$. The same approach to counting trees was taken in \cite{G96}. The result was stated there as a recurrence, but here we use exponential generating functions directly since the analogue for general $u$, discussed in Proposition \ref{p-tdl} below, would be more complicated as a recurrence. Writing $T(x; \beta)$ for $T(x,1; \beta)$ (counting trees by leaves) and $T(x)$ for $T(x,1;1)$ (just counting trees), we will show that \begin{equation} \label{e-tree1} T(x; \beta+1) = T(xe^{\beta x}). \end{equation} The left side of \eqref{e-tree1} counts trees in which some subset of the leaves are marked, where each marked leaf is weighted $\beta$. To interpret the right side of \eqref{e-tree1}, we assume that the reader is familiar with the combinatorics of exponential generating functions, as described, for example, in \cite[Chapter 5]{ec2}. The exponential generating function $xe^{\beta x}$ counts ``short trees": trees in which every vertex other than the root is a leaf, where the leaves are weighted by $\beta$. Then $T(xe^{\beta x}; 1)$ is the exponential generating function for structures obtained from rooted trees by replacing each vertex with a short tree. It is clear that these structures are essentially the same as the trees counted by the left side; the marked leaves corresponding to the leaves of the short trees. Setting $\beta=-1$ in \eqref{e-tree1} gives $T(xe^{-x})=x$. In other words $T(x)$ is the compositional inverse of $xe^{-x}$, so $T(x)e^{-T(x)}=x$, or $T(x) = xe^{T(x)}$, the more common form of the functional equation for $T(x)$. These equations can be solved by Lagrange inversion or other methods to obtain the well-known formula \begin{equation*} T(x) = \sum_{n=1}^\infty n^{n-1}\dpow xn, \end{equation*} and more generally, \begin{equation*} e^{zT(x)}=\sumz n z(z+n)^{n-1}\dpow xn, \end{equation*} which counts forests of rooted trees by the number of trees. There is also a simple functional equation for $T(x;\beta)=T(xe^{(\beta -1)x})$, which counts trees by leaves. From the functional equation $T(x) = xe^{T(x)}$ we can easily obtain the functional equation for $T(x;\beta)=T(xe^{(\beta -1)x})$: \begin{equation} \label{e-T(x;alpha)} T(x;\beta) = xe^{T(x;\beta) +(\beta-1)x} \end{equation} Equation \eqref{e-T(x;alpha)} is easy to see combinatorially, interpreting $T(x;\beta)+(\beta-1)x$ as counting trees by leaves, but now considering the root of a one-vertex tree to be a leaf. Next, we can generalize \eqref{e-tree1} to keep track of descents. The argument is essentially the same as for \eqref{e-tree1} but we need to replace $xe^{\beta x}$ with something a little more complicated. \begin{prop} \label{p-tdl} The exponential generating function $T(x,u;\beta)$ for trees by descents and leaves satisfies \begin{equation} \label{e-tree2} T(x, u;\beta+1) = T\left(\frac{e^{\beta x} - e^{\beta u x}}{\beta(1-u)}, u\right), \end{equation} where $T(x,u) = T(x,u;1)$ is the exponential generating function for trees by descents. Moreover, $T(x,u)$ is the compositional inverse \textup{(}as a power series in $x$\textup{)} of \begin{equation*} \frac{e^{-x} - e^{-u x}}{u-1} = \sum_{n=1}^\infty (-1)^{n-1}(1+u+\cdots +u^{n-1})\dpow xn. \end{equation*} \end{prop} \begin{proof} A short tree on $[n]$ with root $i+1$ has $i$ descents. Thus the exponential generating function for short trees, with descents weighted by $u$ and leaves weighted by $\beta$, is \begin{equation*} \sum_{n=1}^\infty (1+u+\cdots +u^{n-1})\beta^{n-1}\dpow xn = \sum_{n=1}^\infty\frac{1-u^n}{1-u}\beta^{n-1}\dpow xn =\frac{e^{\beta x} - e^{\beta u x}}{\beta(1-u)}. \end{equation*} Then we obtain \eqref{e-tree2} in the same way that we obtained \eqref{e-tree1}. As before, $T(x,u; 0) = x$, so setting $\beta=-1$ in \eqref{e-tree2} gives \begin{equation*} T\left(\frac{e^{-x} - e^{-u x}}{u-1}, u\right)=x.\qedhere \end{equation*} \end{proof} Another combinatorial proof that $T(x,u)$ is the compositional inverse of $(e^{-x}-e^{-ux})/(u-1)$ was given by Drake \cite[Example 1.7.2]{drake}. There is a simple formula for the coefficients of $T(x,u)$ that can be derived from our results and known formulas. \begin{prop} \label{p-trees} For the exponential generating function $T(x,u)$ for trees by descents, we have the formulas \begin{equation*} T(x,u) = \sum_{n=1}^\infty \prod_{i=1}^{n-1} (iu+n-i)\dpow xn \end{equation*} and \begin{equation*} e^{zT(x,u)}=1+\sum_{n=1}^\infty z\prod_{i=1}^{n-1} (iu+n-i+z)\dpow xn. \end{equation*} \end{prop} \begin{proof} Since $T(x,u)$ is the compositional inverse of $(e^{-x} - e^{-u x})/(u-1)$, we have \begin{equation*} \frac{e^{-T(x,u)} - e^{-u T(x,u)}}{u-1} = x. \end{equation*} Multiplying both sides by $(1-u)e^{T(x,u)}$ gives \begin{equation} \label{e-Txu} e^{(1-u)T(x,u)}-1 = (1-u)xe^{T(x,u)}. \end{equation} Now set $G=e^{(1-u)T(x,u)}$. Then \eqref{e-Txu} may be written \begin{equation*} G = 1+ (1-u)xG^{1/(1-u)}, \end{equation*} and the desired formulas follow from the results of \cite[Section 5]{GS} or by Lagrange inversion (see, e.g., \cite[Section 3.3]{GLagrange}). \end{proof} We note that the formulas of Proposition \ref{p-trees} are proved by a different method in \cite[Section 9]{GS}, and more general enumerative results for trees have been proved bijectively by E\u gecio\u glu and Remmel \cite{ER}. Forests have been counted by leaves and descents of a different kind in \cite{desc-leaves} but there does not seem to be any connection between the results described here and the results of \cite{desc-leaves}. \textbf{Acknowledgment.} We would like to thank two anonymous referees for helpful comments. \end{document}
\begin{document} \title{$L_1$ spline fits via sliding window process : continuous and discrete cases} \begin{abstract} Best $L_1$ approximation of the Heaviside function and best $\ell_1$ approximation of multiscale univariate datasets by cubic splines have a Gibbs phenomenon. Numerical experiments show that it can be reduced by using $L_1$ spline fits which are best $L_1$ approximations in an appropriate spline space obtained by the union of $L_1$ interpolation splines. We prove here the existence of $L_1$ spline fits which has never been done to the best of our knowledge. Their major disadvantage is that obtaining them can be time consuming. Thus we propose a sliding window method on seven nodes which is as efficient as the global method both for functions and datasets with abrupt changes of magnitude but within a linear complexity on the number of spline nodes.\\ \textbf{Keywords :} Best approximation, $L_1$ norm, shape preservation, polynomial spline, Heaviside function, sliding window \end{abstract} \section{Introduction} Over the past fifteen years, $L_1$ minimization-based methods have shown very interesting features for the interpolation and approximation of continuous or discontinuous function and irregular geometric data. In \cite{Moskona1995}, Moskona \emph{et al.} have shown the Gibbs phenomenon existing for best $L_1$ trigonometric approximation of the Heaviside function is lower than the one observed using the $L_2$ norm. Saff and Tashev have done a similar work leading to the same conclusion using polygonal lines \cite{SaffTashev1999}.\\ Similarly to classical cubic interpolation splines which minimize the $L_2$ norm of the second derivative, Lavery has defined cubic Hermite interpolation splines which minimizes the $L_1$ norm of the second derivative \cite{Lavery2000}. He has noted that this strategy enabled to delete completely the Gibbs phenomenon observed for classical $L_2$ cubic interpolation spline when applied on the Heaviside function. It has later been shown formally by Auquiert \emph{et al.} \cite{Auquiert2007}.\\ Further work has then focused on an appropriate combination of the best $L_1$ approximation functional and the variational $L_1$ functional used for the interpolation problem. Lavery has firstly proposed a linear combination of the two functionals and called the resulting splines $L_1$ smoothing splines \cite{Lavery2000b}. They do not introduce oscillation on multiscale univariate datasets contrary to $L_2$ smoothing splines. However, the regularization parameter used in the linear combination of $L_1$ functionals cannot be easily fixed.\\ Lavery has then proposed another kind of $L_1$ splines named $L_1$ spline fits \cite{Lavery2004}. They are best $L_1$ approximations in an appropriate spline space obtained by the union of $L_1$ interpolation splines. Like $L_1$ smoothing splines, they do not introduce oscillations but they have the asset that they do not need any additional parameter. Existence of such splines was not shown in \cite{Lavery2004}. We prove in this paper that $L_1$ splines fits at a given set of nodes exist for every function in $L_1[a,b]$.\\ One must admit that the intrinsic non-linearity of $L_1$ norm problems imply that a closed form solution is not available in general. A global numerical solvation is then currently used \cite{Lavery2000,Lavery2000b,Lavery2004,Auquiert2007b,Dobrev2010}. Another strategy has been introduced in 2011 by Nyiri, Auquiert and Gibaru for the interpolation problem \cite{Nyiri2011}. The algorithm they design is based on a sliding-window process. It consists in finding a set of local solutions on a limited number of successive points - five for the interpolation problem. By keeping appropriate information - the derivative at the middle point of the window - one can easily construct a global interpolating function which has similar shape preserving properties than the global solution. Moreover, this process enables to have a linear complexity algorithm and it can be parallelized. This process has also been applied in recent articles for a problem of approximation of data with prescribed error using $L_1$ norm \cite{Gajny2013,Gajny2014}. \\ Recently, Wang {et al.} proposed a method to compute $L_1$ spline fits with a global algorithm but based on a five-point interpolation rule to fix derivatives at the spline nodes \cite{Wang2014}. Indeed, the first derivative at a given node is determined using only its four neighbours while the value of the spline is determined by a minimization process on the whole dataset. We propose in this article another approach following the work in \cite{Nyiri2011,Gajny2013,Gajny2014}. We investigate to define an appropriate sliding window process to compute locally-computed $L_1$ spline fits close to the global one.\\ In the first section, we recall some generalities about $L_1$ cubic Hermite interpolation splines. We show that the union of such splines over all possible Lagrange interpolation is a closed set. This helps in the second section to show the existence of $L_1$ spline fits previously introduced in the literature. We introduce in section 3 and 4 sliding-window algorithms to determine a locally-computed $L_1$ spline fits and we compare them to each other. Conclusions are drawn in a last section. \section{The set of $L_1$ cubic Hermite interpolation splines} Let $(x_i,y_i)$, $i=1,\dots,n$, where $x_1<x_2<\dots<x_n$, be $n$ data points belonging to the graph of a function $f$. Let $Her(\mathbf{x})$ the space of cubic Hermite splines with nodes $\mathbf{x}=\{x_1,x_2,\dots,x_n\}$. A $L_1$ cubic Hermite interpolation spline of this data is a cubic Hermite spline $\gamma^*\in Her(\mathbf{x})$ a solution of : \begin{equation} \min_{\gamma \in Her(\mathbf{x})} \int_{x_1}^{x_n} \vert \gamma''(x) \vert \mathrm{d}x, \end{equation} under the Lagrange interpolation constraints : \begin{equation} \gamma(x_i)=f_i, \ i=1,2,\dots,n. \end{equation} Lavery has shown that a solution of this problem always exists. By mean of numerical experiments, he has noted that the resulting splines were very efficient to preserve the shape of the Heaviside function (see Figure \ref{L1L2interp}). Auquiert later has shown that a $L_1$ cubic Hermite interpolation spline with six knots or more with at least three knots on each part of the Heaviside function preserve both linearities of the Heaviside function and then do not lead to a Gibbs phenomenon \cite{Auquiert2007}. This is the major asset of $L_1$ cubic Hermite interpolation splines.\\ \begin{figure} \caption{$L_1$ (left) and $L_2$ (right) interpolation splines (solid lines) of the Heaviside function (dotted line) with 10 and 30 equally spaced knots.} \label{L1L2interp} \end{figure} We consider now the union of all $L_1$ cubic Hermite interpolation splines as follow : \begin{equation} \mathcal{F}_\mathbf{x} = \bigcup_{\mathbf{y}\in \mathbf{R}^n} \mathrm{argmin}\left\{ \int_{x_1}^{x_n} \vert \gamma''(x) \vert \ \mathrm{d}x, \ \gamma \in Her(\mathbf{x}), \ \gamma(x_i)=y_i, \ i=1,\dots,n\right\}. \end{equation} This set will be fundamental in the definition of $L_1$ spline fits. We give in the following an important property of this set which has never been proved before to the best of our knowledge and will be very important in the next section. \begin{proposition} Given $\mathbf{x}=\{x_1<x_2<\dots<x_n\} \in \mathbf{R}^n $, the set $\mathcal{F}_\mathbf{x}$ is closed. \end{proposition} \begin{proof} Let us define a norm on $Her(\mathbf{x})$. Let $s \in \overline{\mathcal{F}_{\mathbf{x}}}$ then by definition there exists a sequence : \[\left(s_p\in \mathrm{argmin} \left\{\int_{a}^{b} |\gamma''(x)| \mathrm{d}x, \ s\in Her(\mathbf{x}),\ \gamma(x_k)=q_k^{(p)},\ k=1,\dots,m\right\}\right)_{p\in \mathbf{N}}\] which converges to $s\in Her(\mathbf{x})$. For all $p \in \mathbf{N}$, $s_p$ is a cubic Hermite spline and is then defined by $2n$ coefficients $q_k^{(p)}$, $b_k^{(p)}$, $k=1,\dots,n$, respectively the values and the first derivative values of $s_p$ at abscissae $x_k$. By convergence hypothesis in $Her(\mathbf{x})$, there existe real values $q_k^*$, $b_k^*$, $k=1,\dots,n$ such that : \begin{equation} \begin{split} q_k^{(p)} & \underset{p\rightarrow +\infty}{\longrightarrow} q_k^*,\\ b_k^{(p)} & \underset{p\rightarrow +\infty}{\longrightarrow} b_k^*. \end{split} \label{eq_conv} \end{equation} By the unicity of the limit, $s$ is defined by these $2n$ coefficients. We then show that the minimization property of the splines $s_p$ is stable by passing to the limit.\\ We deduce from \eqref{eq_conv} that $(s_p'')_{p\in \mathbf{N}}$ converges simply almost everywhere to $s''$. Moreover, for all $p\in\mathbf{N}$, $s_p''$ is piecewise linear. Then we can easily bound it on the interval $[a,b]$ by an integrable function. By the dominated convergence theorem, it follows that : \begin{equation} \int_a^b |s_p''(x)| \ \mathrm{d}x \underset{p\rightarrow +\infty}{\longrightarrow} \int_a^b |s''(x)| \ \mathrm{d}x. \end{equation} Let $\gamma \in Her({\mathbf{x}})$ such that $\gamma(x_k)=q_k^*, \ k=1,\dots,n$. By the first assertion in \eqref{eq_conv}, there exists a sequence $(\gamma_p \in Her({\mathbf{x}}))_{p\in\mathbf{N}}$ such that for all $p\in \mathbf{N}$ et $k=1,\dots,n$, $\gamma_p(x_k)=q_k^{(p)}$ that converges to $\gamma$. We easily show that : \begin{equation} \int_a^b |\gamma_p''(x)| \ \mathrm{d}x \underset{p\rightarrow + \infty}{\longrightarrow} \int_a^b |\gamma''(x)| \ \mathrm{d}x. \end{equation} For all $p \in \mathbf{N}$, since $s_p\in \widetilde{\mathcal{S}}_{1,\mathbf{x},\mathbf{q}^{(n)}}$, it follows that : \begin{equation} \int_a^b |s_n''(x)| \ \mathrm{d}x \le \int_a^b |\gamma_p''(x)| \ \mathrm{d}x. \end{equation} By passing to the limite, we have that for all $\gamma \in Her({\mathbf{x}})$ such that $\gamma(x_k)=q_k^*$ : \begin{equation} \int_a^b |s''(x)| \ \mathrm{d}x \le \int_a^b |\gamma''(x)| \ \mathrm{d}x. \end{equation} We conclude that $\mathcal{F}_{1,\mathbf{x}}$ is closed in $Her(\mathbf{x})$. \end{proof} \section{Best approximation using $L_1$ spline fits} Let us first define these splines introduced in \cite{Lavery2004}. \begin{definition} Given a function $f\in L_1[a,b]$, $a,b \in \mathbf{R}$ and a set of knots $\mathbf{x}=\{a=x_1<x_2<\dots<x_n=b\}$, a $L_1$ spline fit of the function $f$ at knots $\mathbf{x}$ is a best $L_1$ approximation of $f$ in $\mathcal{F}_\mathbf{x}$. In other words, it is a solution of : \begin{equation} \min_{s \in \mathcal{F}_\mathbf{x}} \int_{a}^{b} \vert s(x) - f(x)\vert \ \mathrm{d}x. \end{equation} \end{definition} We prove with the next theorem that $L_1$ spline fits are well defined. \begin{theorem} $L_1$ splines fit exist for every function $f\in L_1[a,b]$ and every set of knots $\mathbf{x}=\{a=x_1<x_2<\dots,x_n=b\}$. \end{theorem} \begin{proof} Let us give $f\in L_1[a,b]$ and a set of knots $\mathbf{x}=\{a=x_1<x_2<\dots<x_n=b\}$. Since $\mathcal{F}_{1,\mathbf{x}}$ is closed in the finite dimensional subspace $Her(\mathbf{x})$ of $L_1[a,b]$, there exists a best $L_1$ approximation of $f$ in $\mathcal{F}_{1,\mathbf{x}}$. $\square$ \end{proof} One can easily define an equivalent tool using exclusively $L_2$ norm and called $L_2$ splines fit. We compare both methods in Figure \ref{fig_heaviside_glob}. \begin{figure} \caption{Global $L_1$ spline fits (left) and global $L_2$ spline fits (right) of the Heaviside function with ten equally spaced knots.} \label{fig_heaviside_glob} \end{figure} $L_1$ spline fits has been previously defined for discrete data \cite{Lavery2004}. Let $(\hat{x}_i,\hat{y}_i), i=1,\dots,m$ be $m$ data points where $m\ge n$. A $L_1$ spline fits of this dataset is a best $\ell_1$ approximation of them in $\mathcal{F}_\mathbf{x}$. In other words, it is a solution of : \begin{equation} \min_{s \in \mathcal{F}_\mathbf{x}} \sum_{i=1}^m \vert s(\hat{x}_i) - \hat{y}_i \vert. \end{equation} As in the continuous case, these splines exist since they are solutions of a best approximation problem in a closed set of a finite dimensional subpace of a normed linear space. The results presented in Figure \ref{fig_L1SFG} indicate that $L_1$ spline fits preserve well the shape of multiscale data contrary to $L_2$ spline fits. Moreover, $L_1$ spline fits do not require human intervention to choose a parameter that balances weights of the approximation functional and the variational functional. However, the computational cost of $L_1$ spline fits is generally higher than the one of $L_1$ smoothing spline and more obviously of least squares methods. \begin{figure} \caption{Global $L_1$ spline fits (top) and global $L_2$ spline fits (bottom).} \label{fig_L1SFG} \end{figure} \section{Sliding window algorithms for $L_1$ spline fits} \subsection{Best approximation of functions} We define sliding window methods with window size $m=3,\ 5,\ 7$ that we call respectively $L_1$SFL3, $L_1$SFL5 and $L_1$SFL7. For all set of $m$ consecutive knots $\mathbf{x}_{i,m}=\{x_{i-\lfloor \frac{m}{2}\rfloor},\dots,x_i,\dots,x_{i+\lfloor \frac{m}{2}\rfloor}\}$, we will determine numerically a cubic Hermite spline $s_{i,m}^*$ solution of : \begin{equation} \min_{\gamma\in \mathcal{F}_{\mathbf{x}_{i,m}}} \int_{x_{i-\lfloor \frac{m}{2}\rfloor}}^{x_{i+\lfloor \frac{m}{2}\rfloor}} \vert \gamma(x) - f(x) \vert \ \mathrm{d}x. \end{equation} Then we keep only middle information $z_{i}=s_{i,m}^*(x_i)$ and $b_{i}=s_{i,m}^{*'}(x_i)$. \begin{figure} \caption{$L_1$ spline fits computed by the three proposed sliding windows methods on the Heaviside function with ten equally spaced knots.} \label{fig_Heaviside_cont} \end{figure} These methods have been tested on the Heaviside function with ten equally spaced knots and the results are summarized in Figure \ref{fig_Heaviside_cont}. The three-point and five-point methods fail to approximate linear shape on both side of the discontinuity. We are facing here typical cases of non-invariance of the numerical solution by rotation of the data. On both side of the discontinuities, the two windows considered are similar geometrically and should lead the same solution. Since on one side, we are able to preserve linearity by the three-point and the five-point methods, we should be able to do it on the other side. Further work will be done to make these methods invariant by rotation. The seven-point methods seems to more robust to rotation of data and so should be preferred. In this case, the seven-point solution and the global solution are identical.\\ We have also made some test about computing time and a comparison between the methods. The results are summarized in the graph in Figure \ref{fig_CPU_cont}. We can notice a great improvement of computing time when using local methods. The faster is of course the three-point method. We also notice a dual phenomenon in these results. The numerical solvation is different whether we have even or odd number of knots. It is linked to the fact of having a knot or not at the discontinuity.\\ \begin{figure} \caption{Comparison of computational times between global and local methods} \label{fig_CPU_cont} \end{figure} Regarding the graphical and computational time results, the seven-point method is a good compromise. We will confirm this tendency by the study of the discrete case. \subsection{Best approximation of discrete data} In this section, we apply the three-point, five-point and seven-point methods to discrete multiscale data. In other words, for all set of $m$ consecutive knots $\mathbf{x}_{i,m}=\{x_{i-\lfloor \frac{m}{2}\rfloor},\dots,x_i,\dots,x_{i+\lfloor \frac{m}{2}\rfloor}\}$, we will determine numerically a cubic Hermite spline $s_{i,m}^*$ solution of : \begin{equation} \min_{\gamma\in \mathcal{F}_{\mathbf{x}_{i,m}}} \sum_{j=i-\lfloor \frac{m}{2}\rfloor}^{i+\lfloor \frac{m}{2}\rfloor} \vert s(\hat{x}_j) - \hat{y}_j \vert. \end{equation} Then we only keep information at the middle point of the window $z_{i}=s_{i,m}^*(x_i)$ and $b_{i}=s_{i,m}^{*'}(x_i)$. The results are illustrated in Fig. \ref{fig_dataset1_L1SFL}, \ref{fig_dataset2_L1SFL} and \ref{fig_dataset3_L1SFL}. While the three point and the seven-point methods give smooth curves, the five-point method highly fails. In Fig. \ref{fig_dataset1_L1SFL}, we can notice an undershoot phenomenon and in Fig. \ref{fig_dataset2_L1SFL}, oscillations are created.\\ \begin{figure} \caption{Local (solid lines) and global (dashed line) $L_1$ spline fits on a multi-scale data set.} \label{fig_dataset1_L1SFL} \end{figure} \begin{figure} \caption{Local (solid lines) and global (dashed line) $L_1$ spline fits on a multi-scale data set.} \label{fig_dataset2_L1SFL} \end{figure} \begin{figure} \caption{Local (solid lines) and global (dashed line) $L_1$ spline fits on a multi-scale data set.} \label{fig_dataset3_L1SFL} \end{figure} Like in the continuous case, the seven-point method is the closest graphically to the global method. In some cases like Fig. \ref{fig_dataset1_L1SFL}, linear shape are preserved in a better way. \section{Modification of $L_1$SFL5 and $L_1$SFL7} The methods presented before may exhibit on multiscale configurations some undesirable features. We have observed them with the discrete $L_1$SFL5 in Fig.\ref{fig_dataset1_L1SFL} and \ref{fig_dataset2_L1SFL} and with the discrete $L_1$SFL3 in Fig.\ref{fig_dataset3_L1SFL}. This is typically due to a lack of consistency between the different windows. To reduce this phenomenon, we propose two others sliding window methods, $L_1$SFL5-3 and $L_1$SFL7-3, which are respectively a five-point and a seven-point method. The difference with the previous $L_1$SFL5 and $L_1$SFL7 is that we keep now the three middle pieces of information (approximation points and derivative value) instead of one single information. In other words, for sets of $m$ consecutive knots $\mathbf{x}_{i,m}=\{x_{i-\lfloor \frac{m}{2}\rfloor},\dots,x_i,\dots,x_{i+\lfloor \frac{m}{2}\rfloor}\}$ with $i$ going from $\lfloor \frac{m}{2}\rfloor+1$ to $n-\lfloor \frac{m}{2}\rfloor$ by step 3, we will determine numerically a cubic Hermite spline $s_{i,m}^*$ solution of : \begin{equation} \min_{\gamma\in \mathcal{F}_{\mathbf{x}_{i,m}}} \sum_{j=i-\lfloor \frac{m}{2}\rfloor}^{i+\lfloor \frac{m}{2}\rfloor} \vert s(\hat{x}_j) - \hat{y}_j \vert. \end{equation} Then we keep information at the three central knots: \begin{itemize} \item $z_{i-1}=s_{i-1,m}^*(x_{i-1})$, $z_{i}=s_{i,m}^*(x_i)$ and $z_{i+1}=s_{i+1,m}^*(x_{i+1})$. \item $b_{i-1}=s_{i-1,m}^{*'}(x_{i-1})$, $b_{i}=s_{i,m}^{*'}(x_i)$ and $b_{i+1}=s_{i+1,m}^{*'}(x_{i+1})$. \end{itemize} \begin{figure} \caption{Application of discrete $L_1$SFL5-3 and $L_1$SFL7-3 (solid line) on a multiscale dataset. Comparison with previous discrete $L_1$SFL5 and $L_1$SFL7 (dotted line) and global $L_1$SF (dashed line).} \label{fig_dataset2_L1SFL-3} \end{figure} These methods have also the advantage of requiring less computation than the previous $L_1$SFL5 and $L_1$SFL7. Indeed, with $L_1$SFL5-3 and $L_1$SFL7-3, the window slides more rapidly since we do not treat as before every sequence of five, resp. seven, consecutive knots.\\ By this way, we were able to enhance consistency in the five point solution. However, the seven-point method is still the closest one to the initial global method. Since the global method is for now our reference, we select this method for further tests on noisy datasets.\\ We have applied firstly in Fig.\ref{fig_test_heav} our $L_1$SFL7-3 method to a 100-point configuration initially evenly distributed on the Heaviside function and then corrupted by a Gaussian noise with zero mean and 0.03 standard deviation.\\ \begin{figure} \caption{Application of discrete $L_1$SFL7-3 (solid line) on a noisy Heaviside-like dataset. Comparison with previous discrete $L_1$SFL7 (dotted line) and global $L_1$SF (dashed line).} \label{fig_test_heav} \end{figure} Results are compared with the global method and the $L_1$SFL7 method. Solutions are not identical but are similar as the error plot in Fig.\ref{fig_test_heav} suggests it. We have then applied the method on a 300-point configuration lying initially in the sine function and then corrupted by a Gaussian noise with zero mean and 0.05 standard mean. The observations are the same and graphical results are given in Fig.\ref{fig_test_sine}. \begin{figure} \caption{Application of discrete $L_1$SFL7-3 (solid line) on a noisy sinus-like dataset. Comparison with previous discrete $L_1$SFL7 (dotted line) and global $L_1$SF (dashed line).} \label{fig_test_sine} \end{figure} \section{Conclusion} In this article, we have shown the existence of $L_1$ splines fits which are very efficient to approximate data with abrupt changes but time-consuming. In order to obtain lower algorithmic complexity methods, we have tested different methods of computation of $L_1$ spline fits by sliding window process for both continuous and discrete case. At the end of this study, a seven-point method named $L_1$SFL7-3 should be chosen. It is currently a good compromise between keeping the geometrical properties of global $L_1$ spline fits and decreasing computations. The method has linear computational complexity and can be parallelized. This method has shown good results on both multiscale datasets and noisy datasets. \section{Acknowledgments} The authors thank deeply Shu-Cherng Fang and Ziteng Wang from the Industrial and Systems Engineering Department of North Carolina State University and John E. Lavery, retired from the Army Research Office, for their comments and suggestions that improved the contents of this paper. \end{document}
\mathbf{b}egin{document} \title{Best rank one approximation of real symmetric tensors\\ can be chosen symmetric\mathbf{m}athbf{f}ootnotemark[1]} \mathbf{a}uthor{ S. Friedland\mathbf{m}athbf{f}ootnotemark[2]\\ Department of Mathematics, Statistics and Computer Science\\ University of Illinois at Chicago\\ Chicago, Illinois 60607-7045, USA\\ e-mail:\texttt{[email protected]}} \renewcommand{\arabic{footnote}}{\mathbf{a}rabic{footnote}} \mathbf{m}athbf{f}ootnotetext[1]{ To appear in Special Issue ``Tensor Theory'', \textit{Frontiers of Mathematics in China}, Springer.} \mathbf{m}athbf{f}ootnotetext[2]{This work was supported by NSF grant DMS-1216393.} \date{November 25, 2012 } \mathbf{m}aketitle \mathbf{b}egin{abstract} We show that a best rank one approximation to a real symmetric tensor, which in principle can be nonsymmetric, can be chosen symmetric. Furthermore, a symmetric best rank one approximation to a symmetric tensor is unique if the tensor does not lie on a certain real algebraic variety. \mathbf{m}athbf{e}nd{abstract} \mathbf{n}oindent {\mathbf{b}f 2010 Mathematics Subject Classification.} 15A18, 15A69, 46B28, 65D15, 65H10, 65K10 \mathbf{n}oindent {\mathbf{b}f Key words.} Symmetric tensor, rank one approximation of tensors, uniqueness of rank one approximation. \renewcommand{\arabic{footnote}}{\mathbf{a}rabic{footnote}} \mathbf{s}ection{Introduction}\label{intro} Denote by $\mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}:=\otimes_{i=1}^d \mathbf{m}athbb{R}^{n_j}$ the tensor products of $\mathbf{m}athbb{R}^{n_1},\ldots,\mathbf{m}athbb{R}^{n_d}$ for an integer $d\mathbf{m}athbf{g}e 2$. $\mathcal{T}=[t_{i_1,\ldots, i_d}]\in \mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}$ is called a \mathbf{m}athbf{e}mph{$d$-tensor}. Let $[d]:=\{1,\ldots,d\}$. For $\mathbf{x}_j=(x_{1,j},\ldots,x_{n_j,j})\mathop{\mathrm{tr}}\nolimitsans\in\mathbf{m}athbb{R}^{n_j}, j\in [d]$, the decomposable tensor $\otimes_{j=1}^d \mathbf{x}_j=\mathbf{x}_1\otimes\ldots\mathbf{x}_d$ is given as $[\mathbf{p}rod_{j=1}^d x_{i_j,j}]\in\mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}$. A decomposable tensor is a rank one tensor if and only if $\mathbf{x}_j\mathbf{n}e \mathbf{0}$ for each $j\in[d]$. On $\mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}$ define the inner product $\mathbf{a}n{\mathcal{S},\mathcal{T}}:=\mathbf{s}um_{i_j\in[n_j],j\in[d]} s_{i_1,\ldots,i_d}t_{i_1,\ldots,i_d}$ and the Hilbert-Schmidt norm $\|\mathcal{T}\|:=\mathbf{s}qrt{\mathbf{a}n{\mathcal{T},\mathcal{T}}}$. For $\mathbf{x}\in\mathbf{m}athbb{R}^n$ let $\|\mathbf{x}\|:=\mathbf{s}qrt{\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans \mathbf{x}}$ be the $\mathbf{m}athbf{e}ll_2$ norm. Denote by $\mathrm{S}^{n-1}:=\{\mathbf{x}\in\mathbf{m}athbb{R}^n,\;\|\mathbf{x}\|=1\}$ the $n-1$ dimensional unit sphere. Then a best rank one approximation of $\mathcal{T}$ is a decomposable tensor solving the minimal problem \mathbf{b}egin{equation}\label{brank1appr} \mathbf{m}in_{s\in\mathbf{m}athbb{R},\mathbf{x}_j\in\mathrm{S}^{n_j-1},j\in[d]} \|\mathcal{T}-s\otimes_{j=1}^d\mathbf{x}_j\|=\|\mathcal{T}-a\otimes_{j=1}^d\mathbf{u}_j\|. \mathbf{m}athbf{e}nd{equation} It is well known \mathbf{b}egin{equation}\label{maxmultform} a=\mathbf{m}ax_{\mathbf{x}_j\in\mathrm{S}^{n_j-1},j\in[d]}\mathbf{s}um_{i_j\in[n_j],j\in[d]}t_{i_1,\ldots,i_d} x_{i_1,1}\ldots x_{i_d,d}=\mathbf{a}n{\mathcal{T},\otimes_{j=1}^d \mathbf{u}_j}. \mathbf{m}athbf{e}nd{equation} For matrices, i.e. $d=2$, a best rank one approximation of $A\in\mathbf{m}athbb{R}^{m\times n}$ is given by $\mathbf{s}igma_1(A)\mathbf{x}\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans, \mathbf{x}\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans=\mathbf{y}\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans=1$, where $\mathbf{s}igma_1(A)$ is the maximal singular value of $A$, and $A\mathbf{y}=\mathbf{s}igma_1(A)\mathbf{x}, A\mathop{\mathrm{tr}}\nolimitsans \mathbf{x}=\mathbf{s}igma_1(A)\mathbf{y}$. A best rank one approximation is unique if and only if $\mathbf{s}igma_1(A)>\mathbf{s}igma_2(A)$. Assume that $A\in\mathbf{m}athbb{R}^{n\times n}$ is symmetric. Let $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)\mathbf{m}athbf{g}e\ldots\mathbf{m}athbf{g}e\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$ be the $n$-eigenvalues of $A$, counted with their multiplicities. Then $\mathbf{s}igma_1(A)=\mathbf{m}ax(|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A), |\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)|)$. Furthermore, there always exists a symmetric best rank approximation. Moreover, there exists a nonsymmetric best rank one approximation if and only if $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)=-\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)<0$. Assume now that $\mathcal{T}$ is a $d$-mode tensor with $d\mathbf{m}athbf{g}e 3$. Let $\mathbf{a}lpha$ be a subset of $[d]$ of cardinality $2$ at least. We say that $\mathcal{T}=[t_{i_1,\ldots,i_d}]\in \mathbf{m}athbb{R}^{n_1\times\ldots n_d}$ is symmetric with respect to $\mathbf{a}lpha$ if $n_p=n_q$ for each pair $\{p,q\}\mathbf{s}ubset \mathbf{a}lpha$ and the value of $t_{i_1,\ldots,i_d}$ does not change if we interchange any two indices $i_p,i_q$ for $p,q\in\mathbf{a}lpha$ and for any choice $i_j\in [n_j], j\in [d]$. We agree that any tensor $\mathcal{T}$ is symmetric with respect to each $\{i\}$, for $i\in [d]$. $\mathcal{T}$ is called symmetric if $\mathbf{a}lpha=[d]$. Denote by $\mathop{\mathrm{Sym}}\nolimits(n,d)$ all $d$-mode symmetric tensors in $\mathbf{m}athbb{R}^{n\times\ldots\times n}$. These tensors are called also supersymmetric. Let $\mathcal{T}\in \mathbf{m}athbb{R}^{n_1\times \ldots\times n_d}$ be given. Clearly, there exists a unique decomposition of $[d]$ to a disjoint union of nonempty sets $\mathbf{c}up_{j=1}^m \mathbf{a}lpha_j$ such that the following conditions hold. \mathbf{b}egin{itemize} \item For each $j\in [m]$ the tensor $\mathcal{T}$ is symmetric with respect to $\mathbf{a}lpha_j$. \item For $1\le j <k\le m$ and two indices $p\in\mathbf{a}lpha_j,q\in\mathbf{a}lpha_k$ the tensor $\mathcal{T}$ is not symmetric with respect to $\{p,q\}$. \mathbf{m}athbf{e}nd{itemize} We call $[d]=\mathbf{c}up_{j=1}^m\mathbf{a}lpha_j$ the \mathbf{m}athbf{e}mph{symmetric decomposition} for $\mathcal{T}$. The main result of this paper is. \mathbf{b}egin{theo}\label{maintheo} Let $\mathcal{T}\in \mathbf{m}athbb{R}^{n_1\times\ldots n_d}\mathbf{s}etminus\{0\}$ be given. Assume that $[d]=\mathbf{c}up_{j=1}^m \mathbf{a}lpha_j$ is the symmetric decomposition for $\mathcal{T}$. Then there exist a best rank one approximation $\mathcal{A}$ to $\mathcal{T}$ such that $\mathcal{A}$ is symmetric with to each $\mathbf{a}lpha_j$. \mathbf{m}athbf{e}nd{theo} In the special case of symmetric tensors, i.e. $m=1$, the above theorem is also proved in \mathbf{c}ite[Theorem 4.1]{CHLZ12}. It is not difficult to show that to prove the above theorem it is enough to show the above theorem for symmetric tensors. Furthermore, it is enough to show the above theorem for $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(2,d)$. Finally we show that there exists an real algebraic variety $\Sigma_1(n,d)\mathbf{s}ubset \mathop{\mathrm{Sym}}\nolimits(n,d)$ such that for $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\Sigma_1(n,d)$ a best rank one symmetric approximation is unique. We now describe briefly the contents of our paper. In \S2 we summarize the well known results of best rank one approximation for real matrices that are used in this paper. In \S3 we discuss certain basic results on best rank one approximation of real tensors. In \S4 we give a complete characterization of tensors $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(2,3)$ which have nonsymmetric best rank one approximation. In \S5 we prove Theorem \ref{maintheo} by first showing the case where $\mathcal{T}$ is a symmetric tensor, (Theorem \ref{brank1symtheo}). In \S6 we show that a ''generic" symmetric tensor has a unique best rank one approximation. The last section uses some facts from algebraic geometry, and probably the most difficult section of this paper. \mathbf{s}ection{Best rank one approximation of matrices} We recall briefly the needed results on best rank one approximation of matrices $A=[a_{i,j}]\in\mathbf{m}athbb{R}^{m\times n}$ \mathbf{c}ite{GolV96}. Assume that $r=\ranglek A$. Then $A$ has exactly $r$ positive singular values $\mathbf{s}igma_1(A)\mathbf{m}athbf{g}e\ldots\mathbf{m}athbf{g}e \mathbf{s}igma_r(A)>0$. $\mathbf{s}igma_1(A)^2\mathbf{m}athbf{g}e\ldots\mathbf{m}athbf{g}e\mathbf{s}igma_r(A)^2$ are the positive eigenvalues of either $AA\mathop{\mathrm{tr}}\nolimitsans$ or $A\mathop{\mathrm{tr}}\nolimitsans A$. \mathbf{b}egin{equation}\label{charsingval1} \mathbf{s}igma_1(A)=\mathbf{m}ax_{\mathbf{x}\in\mathrm{S}^{m-1},\mathbf{y}\in\mathrm{S}^{n-1}}\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans A\mathbf{y}. \mathbf{m}athbf{e}nd{equation} The left and the right singular pair of singular vectors $\mathbf{u}\in\mathrm{S}^{m-1},\mathbf{v}\in\mathrm{S}^{n-1}$ corresponding to $\mathbf{s}igma_1(A)$ are given by equalities \mathbf{b}egin{equation}\label{uvsingvectA} A\mathop{\mathrm{tr}}\nolimitsans\mathbf{u}=\mathbf{s}igma_1(A)\mathbf{v},\; A\mathbf{v}=\mathbf{s}igma_1(A)\mathbf{u}, \mathbf{q}uad \mathbf{u}\in\mathrm{S}^{m-1},\mathbf{v}\in\mathrm{S}^{n-1}. \mathbf{m}athbf{e}nd{equation} Hence $\mathbf{s}igma_1(A)=\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans A\mathbf{v}$. Furthermore any best rank one approximation of $A$ in the Frobenius norm $\|A\|=\mathbf{s}qrt{\mathop{\mathrm{tr}}\nolimits (A\mathop{\mathrm{tr}}\nolimitsans A)}$ is of the form $\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans \mathbf{v}$ for some pair of singular vectors $\mathbf{u},\mathbf{v}$ corresponding to $\mathbf{s}igma_1(A)$: \mathbf{b}egin{equation}\label{brank1apmat} \mathbf{m}in_{s\in \mathbf{m}athbb{R},\mathbf{x}\in\mathrm{S}^{m-1},\mathbf{y}\in\mathrm{S}^{n-1}}\|A-s\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}\|=\|A-\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans\mathbf{v}\|. \mathbf{m}athbf{e}nd{equation} Recall that $\mathop{\mathrm{Sym}}\nolimits(n,2)\mathbf{s}ubset \mathbf{m}athbb{R}^{n\times n}$ is the space of symmetric matrices. The following result is well known and we bring its proof for completeness. \mathbf{b}egin{prop}\label{branapproxsymmat} Let $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)$. Then $\mathbf{s}igma_1(A)=\mathbf{m}ax(|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)|,|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)|)$ and $A$ has a symmetric best rank one approximation. Suppose furthermore that $A\mathbf{n}e 0$. Then \mathbf{b}egin{enumerate} \item\label{symrank1motappr} Any best rank one approximation to $A$ is symmetric if and only if $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)\mathbf{n}e -\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. \item\label{nsmrank1motappr} Assume that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)=-\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. Then $\mathbf{s}igma_1(A)=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)$. Furthermore, $\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans\mathbf{v}$, where \mathbf{m}athbf{e}qref{uvsingvectA} holds, is a nonsymmetric best rank one approximation of $A$ if neither $\mathbf{u}$ nor $\mathbf{v}$ are eigenvectors of $A$ and $\mathbf{u},\mathbf{v}$ are eigenvectors of $A^2$ corresponding to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)^2$. \mathbf{m}athbf{e}nd{enumerate} \mathbf{m}athbf{e}nd{prop} \mathbf{p}roof Since $A\mathop{\mathrm{tr}}\nolimitsans A=A^2$ the singular values of $A$ are $|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i(A)|, i\in [n]$. As all eigenvalues of $A$ lie in the interval $[\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A),\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)]$ it follows that $\mathbf{s}igma_1(A)=\mathbf{m}ax_{i\in[n]}|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i(A)|=\mathbf{m}ax(|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)|,|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)|)$. We now show that there exists a symmetric best rank one approximation of $A$. Clearly, it is enough to consider $A\mathbf{n}e 0$. Assume that $\mathbf{s}igma_1(A)=|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)|, j\in \{1,n\}$. Let $\mathbf{v}\in \mathrm{S}^{n-1}$ be an eigenvector of $A$ corresponding to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)$. Hence $\mathbf{u}=\mathbf{m}athbf{f}rac{\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)}{|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)|}\mathbf{v}$ and $\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans\mathbf{v}=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)\mathbf{v}\mathop{\mathrm{tr}}\nolimitsans\mathbf{v}$, which is a symmetric best rank one approximation of $A$. Assume now that $A\mathbf{n}e 0$. Note that the assumption that $|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)|\mathbf{n}e |\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)|$ is equivalent to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)\mathbf{n}e -\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. Assume first that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)\mathbf{n}e -\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. Then there exists a unique $j\in \{1,n\}$ such that $\mathbf{s}igma_1(A)=|\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)|$. As a right singular vector $\mathbf{v}\in\mathrm{S}^{n-1}$ of $A$ is an eigenvector of $A^2$ corresponding to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)^2$ it follows that $\mathbf{v}$ is an eigenvector $A$. Hence the above argument show that $\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans \mathbf{v}$ is symmetric and \ref{symrank1motappr} holds. Assume now that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)=-\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. If $\mathbf{v}$ is an eigenvector of $A$ corresponding to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A), j\in\{1,n\}$ then $\mathbf{u}$ is also eigenvector of $A$ corresponding to $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)$, and vice versa. In this case $\mathbf{s}igma_1(A)\mathbf{u}\mathop{\mathrm{tr}}\nolimitsans \mathbf{v}$ is a symmetric best rank one approximation. Assume that $\mathbf{v}$ is not an eigenvector of $A$. Since $\mathbf{v}$ is a right singular vector corresponding to $\mathbf{s}igma_1(A)$ it follows that $\mathbf{v}$ is an eigenvector of $A^2$ corresponding to the eigenvalue $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)^2$. Similar claim holds for $\mathbf{u}$. Hence $(A\mathbf{v})\mathop{\mathrm{tr}}\nolimitsans \mathbf{v}$ is a nonsymmetric best rank approximation and \ref{nsmrank1motappr} holds. \mathbf{q}ed \mathbf{b}egin{lemma}\label{uniqaprmat} \mathbf{n}oindent \mathbf{b}egin{enumerate} \item \label{uniqaprmatnsym} $A\in\mathbf{m}athbb{R}^{m\times n}$ has a unique rank one approximation for $m,n\mathbf{m}athbf{g}e 2$, unless $A$ lies on a subvariety of codimension two. \item \label{uniqaprmatsym} $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)$ has a unique rank one approximation for $n\mathbf{m}athbf{g}e 2$, which is symmetric, unless $A$ lies on a subvariety of codimension one in $\mathop{\mathrm{Sym}}\nolimits(n,2)$. \mathbf{m}athbf{e}nd{enumerate} \mathbf{m}athbf{e}nd{lemma} \mathbf{p}roof To prove the first part of the Lemma we use the singular value decomposition. Assume without loss of generality that $2\le m \le n$. Then each matrix $A\in\mathbf{m}athbb{R}^{m\times n}$ is of the form $UDV\mathop{\mathrm{tr}}\nolimitsans$ where $U=[\mathbf{u}_1,\ldots\mathbf{u}_m]\in \mathbf{m}athbb{R}^{m\times m}$ is orthogonal, $V=[\mathbf{v}_1,\ldots,\mathbf{v}_m]\in \mathbf{m}athbb{R}^{n\times m}$ has $m$-orthogonal columns and $D=\mathop{{\rm diag}}\nolimits(d_1,\ldots,d_m)$,, where $d_1\mathbf{m}athbf{g}e\ldots\mathbf{m}athbf{g}e d_m\mathbf{m}athbf{g}e 0$. So $\mathbf{s}igma_i(A)=d_i, i\in[m]$, and the columns $i$ of $V$ and $U$ are right and the left singular vectors respectively corresponding to the singular value $d_i$. Note that if $d_1>\ldots>d_m>0$ then each column of $V$ is determined up to $\mathbf{p}m$, and after $V$ is fixed, then $U$ is determined uniquely. In this case $\mathbf{s}igma_1(A)=d_1>\mathbf{s}igma_2(A)=d_2$ and $A$ has a unique rank one approximation. $A$ has no unique approximation if and only if $d_1=d_2$. The generic case for this situation is \mathbf{b}egin{equation}\label{genmatdegcase} d_1=d_2>d_3>\ldots>d_m>0. \mathbf{m}athbf{e}nd{equation} The equality $d_1=d_2$ means that we loose one parameter. The columns $3,\ldots,m$ of $V$ are determined uniquely to $\mathbf{p}m 1$. The first two columns of $V$ are not determined uniquely. What is determined uniquely is the two dimensional subspace $\mathbf{V}\mathbf{s}ubset\mathbf{m}athbb{R}^n$ which is orthogonal the the columns $3,\ldots,m$ of $V$ and the null space of $A$. We can choose as a first column $\mathbf{v}_1$ of $V$ any unit vector in $\mathbf{V}$. The the second column $\mathbf{v}_2$ of $V$ is a unit vector in $\mathbf{V}$ which is orthogonal to $\mathbf{v}_1$. So $\mathbf{v}_2$ is determined uniquely up to a sign. Recall that $\mathbf{u}_i=d_i^{-1}A\mathbf{v}_i,i\in[n]$. Hence the set $A=UDV\mathop{\mathrm{tr}}\nolimitsans$ of the above form, where the entries of $D$ satisfy \mathbf{m}athbf{e}qref{genmatdegcase}, is a manifold $\mathbb{P}hi(m,n)\mathbf{s}ubset \mathbf{m}athbb{R}^{m\times n}$ of codimension two in $\mathbf{m}athbb{R}^{m\times n}$. It is left to show that there is a nonzero polynomial $Q$ in the entries of $A$ which satisfies the following conditions. First, $Q$ vanishes on $\mathbb{P}hi(m,n)$. Second, for each $A\in \mathbb{P}hi(m,n)$ the exists a neighborhood $\mathrm{O}\mathbf{s}ubset \mathbf{m}athbb{R}^{m\times n}$ of $A$ such that the zero set of $Q$ on $\mathrm{O}$ is equal to $\mathrm{O}\mathbf{c}ap\mathbb{P}hi(m,n)$. Consider the symmetric matrix $B=AA\mathop{\mathrm{tr}}\nolimitsans$. Let $\textrm{Dis}(B)$ be the discriminant of the characteristic polynomial of $B$. Then $\textrm{Dis}(B)$ vanishes if and only if $B$ has a multiple eigenvalue. In particular $\textrm{Dis}(B)$ vanishes on $\mathbb{P}hi(m,n)$. Fix $A\in \mathbb{P}hi(m,n)$. Assume that $C\in \mathbf{m}athbb{R}^{m\times n}$ is very close to $A$. Then $\mathbf{s}igma_2(C)>\ldots>\mathbf{s}igma_m(C)>0$. So $Q(CC\mathop{\mathrm{tr}}\nolimitsans)=0$ if and only if $\mathbf{s}igma_1(C)=\mathbf{s}igma_2(C)$. This establishes the first part of the Lemma. Recall that the set of all $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)$ having at least one multiple eigenvalue is a variety $\Delta_n$ of codimension two, e.g. \mathbf{c}ite{FRS}. (This follows from the arguments of the first part of the Lemma. $\Delta_n$ is the zero set of the discriminant of the characteristic polynomial of $A$.) Assume that $A\in \mathop{\mathrm{Sym}}\nolimits(n,2)\mathbf{s}etminus\Delta_n$. (So $A\mathbf{n}e 0$.) Proposition \ref{branapproxsymmat} yields that $A$ has a unique symmetric rank one approximation if and only if $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)\mathbf{n}e -\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)$. It is left to show that all matrices $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)\mathbf{s}etminus\Delta_n$ satisfying $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)+\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)=0$ lie on a variety of codimension one. This follows from the spectral decomposition $A=UDU\mathop{\mathrm{tr}}\nolimitsans$, where $U$ ia an orthogonal matrix and $D=\mathop{{\rm diag}}\nolimits(d_1,\ldots,d_n),d_1>\ldots>d_n$. Note that the columns of $U\mathop{\mathrm{tr}}\nolimitsans$ are determined uniquely up to a sign. Hence the set of all $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)\mathbf{s}etminus\Delta_n$ satisfying $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A)+\mbox{\mathbf{b}oldmath{$\lambda$}}bda_n(A)=0$ is a manifold of codimension one. It is left to show that that this manifold is a zero set of some polynomial in $A$. Recall that for $A\in\mathop{\mathrm{Sym}}\nolimits(n,2)$ the matrix $A\otimes I_n+I_n\otimes A\in\mathop{\mathrm{Sym}}\nolimits(n^2,2)$, where $I_n\in\mathop{\mathrm{Sym}}\nolimits(n,2)$ is the identity matrix and $A\otimes B$ is the Kronecker tensor product, have eigenvalues $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i(A)+\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j(A)$ for $i,j\in[n]$. Hence the zero set of $\det(A\otimes I_n+I_n\otimes A)$ includes the above manifold. This concludes the proof of the second part of the Lemma. \mathbf{q}ed \mathbf{s}ection{Preliminary results on best rank one approximation of tensors} Recall that $\infty$-Schatten norm of $\mathcal{T}=[t_{i_1,\ldots,i_d}]\in \mathbf{m}athbb{R}^{n_1\times \ldots\times n_d}$, with respect to the $\mathbf{m}athbf{e}ll_2$ norm on each factor $\mathbf{m}athbb{R}^{n_i}$, is given by \[\|\mathcal{T}\|_{\infty,2}:=\mathbf{m}ax_{\mathbf{x}_i\in\mathrm{S}^{n_i-1}, i\in [d]}|\mathbf{a}n{\mathcal{T}\times \otimes_{i=1}^d \mathbf{x}_i}|.\] Since $-\mathrm{S}^{n-1}=\mathrm{S}^{n-1}$ it follows that \mathbf{b}egin{equation}\label{definfsnorm} \|\mathcal{T}\|_{\infty,2}:=\mathbf{m}ax_{\mathbf{x}_i=(x_{1,i},\ldots,x_{n_i,i})\mathop{\mathrm{tr}}\nolimitsans\in\mathrm{S}^{n_i-1}, i\in [d]} \mathbf{s}um_{i_1=\ldots=i_d=1}^{n_1,\ldots,n_d}t_{i_1,\ldots,i_d}x_{i_1,1}\ldots x_{i_d,d}. \mathbf{m}athbf{e}nd{equation} See for example \mathbf{c}ite{DF93} for a modern exposition on tensor norms and \mathbf{c}ite{Fr82} for simple geometrical properties of cross norms. Note that for matrices, i.e. $d=2$, $\|A\|_{\infty,2}$ is the operator norm $\|A\|_2=\mathbf{s}igma_1(A)$, where $A\in\mathbf{m}athbb{R}^{m\times n}$ viewed as a linear operator $\mathbf{y}\mathbf{m}apsto A\mathbf{y}$ from $\mathbf{m}athbb{R}^n$ to $\mathbf{m}athbb{R}^m$. Let $\mathbf{b}eta\mathbf{s}ubset[d]$ be a nonempty set and assume that $\mathbf{x}_j=(x_{1,j},\ldots,x_{n_j,j})\mathop{\mathrm{tr}}\nolimitsans\in \mathbf{m}athbb{R}^{n_j}$ for $j\in\mathbf{b}eta$. Denote by $\mathcal{T}\times \otimes_{j\in\mathbf{b}eta}\mathbf{x}_j$ the contracted $d-|\mathbf{b}eta|$ tensor \mathbf{b}egin{equation}\label{defTcontr} \mathcal{T}\times\otimes_{j\in\mathbf{b}eta}\mathbf{x}_j:=\mathbf{s}um_{i_j\in[n_j],j\in\mathbf{b}eta}t_{i_1,\ldots,i_d}\mathbf{p}rod_{j\in\mathbf{b}eta} x_{i_j,j} \in \otimes_{k\in[d]\mathbf{s}etminus\mathbf{b}eta}\mathbf{m}athbb{R}^{n_k}. \mathbf{m}athbf{e}nd{equation} Note that if $\mathbf{b}eta=[d]$ then $\mathcal{T}\times\otimes_{j\in[d]}\mathbf{x}_j=\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{x}_j}$. Let $\mathbf{b}eta$ be a nonempty strict subset of $[d]$. By considering the maximum in \mathbf{m}athbf{e}qref{defTcontr} as a maximum on $\mathbf{x}_j$ first on $j\in[d]\mathbf{s}etminus\mathbf{b}eta$ and then on $j\in\mathbf{b}eta$ we deduce \mathbf{b}egin{equation}\label{infshatnrmchar} \|\mathcal{T}\|_{\infty,2}=\mathbf{m}ax_{ \mathbf{x}_j\in \mathrm{S}^{n_j-1},j\in\mathbf{b}eta}\|\mathcal{T}\times\otimes_{j\in\mathbf{b}eta}\mathbf{x}_j\|_{\infty,2}. \mathbf{m}athbf{e}nd{equation} Suppose that $\mathbf{b}eta=[d]\mathbf{s}etminus\{p,q\}$, where $1\le p <q\le d$. We view $\mathcal{T}\times \otimes_{j\in\mathbf{b}eta}\mathbf{x}_j$ as a matrix in $\mathbf{m}athbb{R}^{n_p\times n_q}$. Hence \mathbf{b}egin{equation}\label{infshatnrmchar1} \|\mathcal{T}\|_{\infty,2}=\mathbf{m}ax_{ \mathbf{x}_j\in \mathrm{S}^{n_j-1},j\in[d]\mathbf{s}etminus\{p,q\}}\mathbf{s}igma_1(\mathcal{T}\times\otimes_{j\in[d]\mathbf{s}etminus\{p,q\}}\mathbf{x}_j). \mathbf{m}athbf{e}nd{equation} The following result is well known and we bring its proof for completeness. \mathbf{b}egin{lemma}\label{brank1char} Let $\mathcal{T}\mathbf{n}e 0$ be a given tensor in $\mathbf{m}athbb{R}^{n_1\times \ldots\times n_d}$. Then $a\otimes_{i=1}^d \mathbf{u}_i$, where $\mathbf{u}_i\in\mathrm{S}^{n_i-1}, i\in[d]$, is a best rank one approximation of $\mathcal{T}$ if and only if the following conditions hold. First $a=\mathbf{p}m\|\mathcal{T}\|_{\infty,2}$. Second the function $\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{x}_j}$ attains its maximum or minimum on $\mathrm{S}^{n_1-1}\times\ldots\times\mathrm{S}^{n_d-1}$ at $(\mathbf{u}_1,\ldots,\mathbf{u}_d)$. In particular \mathbf{b}egin{equation}\label{brank1char1} \mathcal{T} \times \otimes_{j\in[d]\mathbf{s}etminus\{i\}} \mathbf{u}_j=\mbox{\mathbf{b}oldmath{$\lambda$}}bda \mathbf{u}_i, \mathbf{q}uad i\in [d], \mathbf{m}athbf{e}nd{equation} where $\mbox{\mathbf{b}oldmath{$\lambda$}}bda=\mathbf{p}m\|\mathcal{T}\|_{2,\infty}$ and $\mathbf{u}_i\in\mathrm{S}^{n_i-1}$ for $i\in[d]$. Suppose furthermore that $\mathcal{T}$ is symmetric with respect to $1\le p <q\le d$. Then there exist a best rank one approximation which is symmetric with respect with respect to $\{p,q\}$. \mathbf{m}athbf{e}nd{lemma} \mathbf{p}roof Let $\mathbf{x}_i\in\mathrm{S}^{n_i-1}$ for $i\in[d]$. Then $\otimes_{i\in[d]}\mathbf{x}_i$ is a unit vector in $\otimes_{i=1}^d \mathbf{m}athbb{R}^{n_i}$. Let $\mathbf{U}:=\mathbf{s}pan(\otimes_{i\in[d]}\mathbf{x}_i)$ and $\mathbf{U}^{\mathbf{p}erp}$ be the orthonormal complement of $\mathbf{U}$ in $\otimes_{i=1}^d \mathbf{m}athbb{R}^{n_i}$. The orthogonal projection of $\mathcal{T}$ on $\mathbf{U}$ is given by $P_{\mathbf{U}}(\mathcal{T})=\mathbf{a}n{\mathcal{T},\otimes_{i\in[d]}\mathbf{x}_i}\otimes_{i\in[d]}\mathbf{x}_i$, and $\|P_{\mathbf{U}}(\mathcal{T})\|=|\mathbf{a}n{\mathcal{T},\otimes_{i\in[d]}\mathbf{x}_i}|$. It is well known that $\mathbf{m}in_{s\in \mathbf{m}athbb{R}} \|\mathcal{T}-s\otimes_{i=1}^d \mathbf{x}_i\|= \|P_{\mathbf{U}^{\mathbf{p}erp}}(\mathcal{T})\|$. The Pythagoras theorem yields that \mathbf{b}egin{equation}\label{pythid} \|\mathcal{T}\| = \|P_{\mathbf{U}}(\mathcal{T})\|^2 + \|P_{\mathbf{U}^\mathbf{p}erp}(\mathcal{T})\|^2=\mathbf{a}n{\mathcal{T},\otimes_{i\in[d]}\mathbf{x}_i}^2+\|P_{\mathbf{U}^\mathbf{p}erp}(\mathcal{T})\|^2. \mathbf{m}athbf{e}nd{equation} Hence a minimal solution of the left-hand side of \mathbf{m}athbf{e}qref{brank1appr} gives rise to a maximum or minimum of $\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{x}_j}$ on $\mathrm{S}^{n_1-1}\times\ldots\times\mathrm{S}^{n_d-1}$. We now give a short proof of a result by Lim \mathbf{c}ite{Lim05}. Consider the maximum problem \mathbf{m}athbf{e}qref{maxmultform}. Use Lagrange multipliers for the function $\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{x}_j}-\mathbf{s}um_{j\in[d]}\mbox{\mathbf{b}oldmath{$\lambda$}}bda_j\mathbf{x}_j\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}_j$ to deduce that a maximum solution satisfies \[\mathcal{T}\times\otimes_{j\in[d]\mathbf{s}etminus\{i\}}\mathbf{u}_j=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i\mathbf{u}_i, \mathbf{q}uad i\in[d].\] Hence $\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{u}_j}=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i\mathbf{u}_i\mathop{\mathrm{tr}}\nolimitsans\mathbf{u}_i=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_i$ for each $i\in[d]$. Therefore a best rank one approximation $a\otimes_{j\in[d]}\mathbf{u}_j$ satisfies $a=\|\mathcal{T}\|_{\infty,2}$ and \mathbf{m}athbf{e}qref{brank1char1}, where $\mbox{\mathbf{b}oldmath{$\lambda$}}bda=a$ and $\mathbf{u}_j\in\mathrm{S}^{n_j-1}$ for $j\in[d]$. Similar results hold for the minimum of $\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{x}_j}$ on $\mathrm{S}^{n_1-1}\times\ldots\times\mathrm{S}^{n_d-1}$. Assume now that $\mathcal{T}$ is symmetric with respect to two indices $p<q$. So $n_p=n_q$. Assume that $a\otimes_{i=1}^d \mathbf{u}_i$ is best rank one approximation. Let $\mathbf{U}_i=\mathbf{s}pan(\mathbf{u}_i), i\in [d]$. Note that $A:=\mathcal{T}\times \otimes_{i\in [d]\mathbf{s}etminus\{p,q\} }\mathbf{u}_i$ is a symmetric matrix. As best rank one approximation of a $A$ can be chosen symmetric we deduce that we can choose $\mathbf{u}_p,\mathbf{u}_q\in \mathrm{S}^{n_p-1}$ such that $\mathbf{u}_q\in \{\mathbf{u}_p,-\mathbf{u}_p\}$. Hence there exist a best rank approximation of $\mathcal{T}$ which is symmetric with respect to $\{p,q\}$. \mathbf{q}ed \mathbf{b}egin{corol}\label{charb1bap} Let $\mathcal{T}\mathbf{n}e 0$ be a given tensor in $\mathbf{m}athbb{R}^{n_1\times \ldots\times n_d}$. Then rank one tensor $\mathcal{A}\in\mathbf{m}athbb{R}^{n_1\times \ldots\times n_d}$ is best rank one approximation of $\mathcal{T}$ if and only if \mathbf{b}egin{equation}\label{charb1bap1} \mathbf{a}n{\mathcal{T},\mathcal{A}}=\|\mathcal{T}\|_{\infty,2}^2=\|\mathcal{A}\|^2. \mathbf{m}athbf{e}nd{equation} \mathbf{m}athbf{e}nd{corol} The following lemma is straightforward. \mathbf{b}egin{lemma}\label{uniqbrankap} Let $\mathcal{T}\in\mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}$ and assume that $a\otimes_{j\in[d]}\mathbf{u}_j$ is a best rank one approximation of $\mathcal{T}$. Suppose that $\mathcal{T}$ is symmetric with respect to $\mathbf{a}lpha\mathbf{s}ubset[d]$. Let $\mathbf{s}igma:[d]\to[d]$ be a permutation which is identity on $[d]\mathbf{s}etminus\mathbf{a}lpha$. Then $a\otimes_{j\in[d]} \mathbf{x}_{\mathbf{s}igma(j)}$ is a best is rank one approximation of $\mathcal{T}$. In particular, if $a\otimes_{j\in[d]}\mathbf{u}_j$ is unique best rank one approximation of $\mathcal{T}$ then $a\otimes_{j\in[d]}\mathbf{u}_j$ is symmetric with respect to $\mathbf{a}lpha\mathbf{s}ubset[d]$. \mathbf{m}athbf{e}nd{lemma} Lemma \ref{uniqaprmat} suggests the following conjecture. \mathbf{b}egin{con}\label{brank1con} Let $d\mathbf{m}athbf{g}e 3, n_j\mathbf{m}athbf{g}e 2, j\in [d]$ be integers. Then \mathbf{n}oindent \mathbf{b}egin{enumerate} \item \label{uniqaprtengen} $\mathcal{T}\in\mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}$ has a unique rank one approximation, unless $\mathcal{T}$ lies on a subvariety. \item \label{uniqaptensym} $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)$ has a unique rank one approximation, which is symmetric, unless $\mathcal{T}$ lies on a subvariety. \mathbf{m}athbf{e}nd{enumerate} \mathbf{m}athbf{e}nd{con} The above conjecture was recently settled in \mathbf{c}ite{FO12}. We remark that T. Kolda in her lecture \mathbf{c}ite{Kol} stated a stronger version of the second part of Conjecture \ref{brank1con}. Namely:''rank-r symmetric factorization of a symmetric tensor is unique even without the symmetry condition, under very mild conditions", although she did not specify the nature of the mild conditions. \mathbf{b}egin{prop}\label{conjexistsymbr1ap} Assume that the second part of Conjecture \ref{uniqaptensym} holds for some integers $n\mathbf{m}athbf{g}e 2, d\mathbf{m}athbf{g}e 3$. Then each $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)$ has a best rank one symmetric approximation. \mathbf{m}athbf{e}nd{prop} \mathbf{p}roof Assume that $\mathbb{P}si_n\mathbf{s}ubset \mathop{\mathrm{Sym}}\nolimits(n,d)$ is the variety of all symmetric tensors which do not have a unique best rank one approximation. So each $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\mathbb{P}si_n$ has a unique best rank one approximation $\mathcal{A}(\mathcal{T})$ which is symmetric. Assume now that $\mathcal{T}\in\mathbb{P}si_n$. As $\mathbb{P}si_n$ is a variety, there exists a sequence $\mathcal{T}_k, k\in\mathbf{m}athbb{N}$ which converges to $\mathcal{T}$, and $\mathcal{T}_k\mathbf{n}ot\in\mathbb{P}si_n$ for all $k\in\mathbf{m}athbb{N}$. Use Corollary \ref{charb1bap} to deduce \[ \|\mathcal{T}\|_{\infty,2}^2=\lim_{k\to\infty}\|\mathcal{T}_k\|_{2,\infty}^2=\lim_{k\to\infty}\mathbf{a}n{\mathcal{T}_k,\mathcal{A}(\mathcal{T}_k)}=\lim_{k\to\infty} \|\mathcal{A}(\mathcal{T}_k)\|^2.\] So $\mathcal{A}(\mathcal{T}_k)$ is a bounded sequence in $\mathop{\mathrm{Sym}}\nolimits(n,d)$. Hence there exists a subsequence $\mathcal{A}(\mathcal{T}_{k_l})$ which converges to a rank one symmetric tensor $\mathcal{A}$ which satisfies $\|\mathcal{T}\|_{\infty,2}^2=\mathbf{a}n{\mathcal{T},\mathcal{A}}=\|\mathcal{A}\|^2$. Hence by Corollary \ref{charb1bap} $\mathcal{A}$ is a symmetric best rank one approximation of $\mathcal{T}$. \mathbf{q}ed A weaker version of the second part of Conjecture \ref{brank1con} is: \mathbf{b}egin{theo}\label{brank1symtheo} Every symmetric tensor $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)$ has a symmetric best rank one approximation for integers $n\mathbf{m}athbf{g}e 2, d\mathbf{m}athbf{g}e 3$. \mathbf{m}athbf{e}nd{theo} Note that the above theorem is a special case of Theorem \ref{maintheo}. We will first prove Theorem \ref{brank1symtheo}, and using it we will prove Theorem \ref{maintheo}. \mathbf{b}egin{lemma}\label{nto2} Let $k\mathbf{m}athbf{g}e 2$. Assume that Theorem \ref{brank1symtheo} holds for $n=2$ and for all positive integers $d$ in the interval $[2,k]$. Then Theorem \ref{brank1symtheo} holds for all integers $n\mathbf{m}athbf{g}e 3$ and $d\in [2,k]$. \mathbf{m}athbf{e}nd{lemma} \mathbf{p}roof We prove our Lemma by induction on $k$. In view of Proposition \ref{branapproxsymmat} Theorem \ref{brank1symtheo} trivially holds for $k=2$. Assume that $N\mathbf{m}athbf{g}e 3$ and suppose that we proved the Lemma for $k=N-1$. Assume that Theorem \ref{brank1symtheo} holds for $\mathop{\mathrm{Sym}}\nolimits(2,N)$. Let $\mathcal{T}=[t_{i_1,\ldots,i_N}]\in \mathop{\mathrm{Sym}}\nolimits(n,N)$ and $n\mathbf{m}athbf{g}e 3$. Suppose that $\|\mathcal{T}\|_{2,\infty}=|\mathbf{a}n{\mathcal{T},\otimes_{j\in[d]}\mathbf{v}_j}|$, where $\mathbf{v}_j\in \mathrm{S}^{n-1}$ for $j\in [N]$. Let $\mathcal{S}:=\mathcal{T}\times \mathbf{v}_N\in \mathop{\mathrm{Sym}}\nolimits(n,N-1)$. So $\|\mathcal{S}\|_{2,\infty}=\|\mathcal{T}\|_{2,\infty}$. Our induction assumption implies that there exists $\mathbf{u}\in\mathrm{S}^{n-1}$ such that $\|\mathcal{S}\|_{2,\infty}=|\mathbf{a}n{\mathcal{S},\otimes_{j\in[N-1]}\mathbf{u}_j}|$, where $\mathbf{u}_j=\mathbf{u}$ for $j\in[N-1]$. Let $\mathbf{u}_N=\mathbf{v}_N$. Then $\|\mathcal{T}\|_{\infty,2}=|\mathbf{a}n{\mathcal{T},\otimes_{j\in[N]}\mathbf{u}_j}|$. If $\mathbf{u}_N=\mathbf{p}m \mathbf{u}$ it follows that that $\mathbf{a}n{\mathcal{T},\otimes_{j\in[N]}\mathbf{u}_j}\otimes_{j\in[N]}\mathbf{u}_j$ a best rank one symmetric approximation of $\mathcal{T}$, and we are done. Suppose that $\mathbf{u}_N\mathbf{n}e \mathbf{p}m \mathbf{u}$. So $\mathbf{s}pan(\mathbf{u},\mathbf{u}_N)$ is two dimensional. By changing an orthonormal basis in $\mathbf{m}athbb{R}^n$ we may assume without loss of generality that $\mathbf{s}pan(\mathbf{u},\mathbf{u}_N)=\mathbf{s}pan(\mathbf{m}athbf{e}_1,\mathbf{m}athbf{e}_2)$, where $\mathbf{m}athbf{e}_j$ is the $j$-th column of the identity matrix $I_n$. Let $\mathcal{T}'=[t_{i_1,\ldots,i_N}]_{i_1,\ldots,i_N\in [2]}\in \mathop{\mathrm{Sym}}\nolimits(2,N)$. So $\|\mathcal{T}\|_{\infty,2}=\|\mathcal{T}'\|_{\infty,2}$. Our assumption implies that there exists $\mathbf{w}'\in\mathrm{S}^1$ such that $\|\mathcal{T}'\|_{\infty,2}= |\mathbf{a}n{\mathcal{T}',\otimes_{j\in[N]}\mathbf{w}'_j}|$, where $\mathbf{w}_j'=\mathbf{w}'$ for $j\in [N]$. Let $\mathbf{w}_j=\mathbf{w}'\oplus \mathbf{0}_{N-2}\in\mathrm{S}^{N-1}$. Then $\mathbf{a}n{\mathcal{T},\otimes_{j\in[N]}\mathbf{w}_j}\otimes_{j\in[N]}\mathbf{w}_j$ is a symmetric best rank one approximation of $\mathcal{T}$. \mathbf{q}ed \mathbf{s}ection{Best rank one approximations of $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(2,3)$} \mathbf{b}egin{theo}\label{sym23case} Let $\mathcal{T}=[t_{i,j,k}]\in\mathop{\mathrm{Sym}}\nolimits(2,3)$. Then each best rank one approximation of $\mathcal{T}$ is symmetric, unless $\mathcal{T}$ is a nonzero tensor proportional to the following one. \mathbf{b}egin{equation}\label{sym23sten} t_{1,1,1}=\mathbf{c}os \theta, t_{1,1,2}=\mathbf{s}in \theta, t_{1,2,2}=-\mathbf{c}os\theta, t_{2,2,2}=-\mathbf{s}in\theta, \mathbf{q}uad \theta\in [0,2\mathbf{p}i). \mathbf{m}athbf{e}nd{equation} For the above tensor there is a best rank approximation of the form $\mathbf{u}\otimes\mathbf{v}\otimes\mathbf{w}(\mathbf{u},\mathbf{v})$ where $\mathbf{u},\mathbf{v}\in\mathrm{S}^1$ are arbitrary and $\mathbf{w}(\mathbf{u},\mathbf{v})\in\mathrm{S}^1$ is uniquely determined by $\mathbf{u},\mathbf{v}$. Furthermore, the above tensor has three symmetric best rank one approximations. \mathbf{m}athbf{e}nd{theo} \mathbf{p}roof Since $\mathcal{T}=0$ has a unique best rank one approximation - $\mathcal{T}$, we assume that $\mathcal{T}\mathbf{n}e 0$. Suppose that $\mathcal{T}$ has a best rank approximation $c\mathbf{x}\otimes\mathbf{y}\otimes \mathbf{z}, \mathbf{x},\mathbf{y},\mathbf{z}\in \mathrm{S}^1$ which is not symmetric. (Note that $c\mathbf{n}e 0$.) By permuting $\mathbf{x},\mathbf{y},\mathbf{z}$ we can assume that $\mathbf{y}\mathbf{n}e \mathbf{p}m \mathbf{z}$. Let $A(\mathbf{x}):=\mathcal{T}\times \mathbf{x}\in\mathop{\mathrm{Sym}}\nolimits(2,2)$. (Because $\mathcal{T}$ is symmetric, it is not important which index we contract.) Hence $c\mathbf{y}\mathbf{z}\mathop{\mathrm{tr}}\nolimitsans$ is best rank one approximation of the symmetric matrix $A(\mathbf{x})$. (Note that $A(\mathbf{x})\mathbf{n}e 0$.) Proposition \ref{branapproxsymmat} yields that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A(\mathbf{x}))+\mbox{\mathbf{b}oldmath{$\lambda$}}bda_2(A(\mathbf{x}))=0$, which is equivalent to $\mathop{\mathrm{tr}}\nolimits (A(\mathbf{x}))=0$. Observe next that $A(\mathbf{x})^2$ is a scalar matrix, i.e. $A(\mathbf{x})^2=\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A(\mathbf{x}))^2 I_2$. Let $\mathbf{u}\in\mathrm{S}^1$ be arbitrary and $\mathbf{v}:=\mathbf{m}athbf{f}rac{1}{\|A(\mathbf{x})\mathbf{u}\|}A(\mathbf{x})\mathbf{u}(\in\mathrm{S}^1)$. Proposition \ref{branapproxsymmat} yields that $\|A(\mathbf{x})\mathbf{u}\|\mathbf{u}\mathbf{v}\mathop{\mathrm{tr}}\nolimitsans$ is a best rank approximation of $A(\mathbf{x})$. Choose $\mathbf{u}$ so that $\mathbf{u},\mathbf{v}\mathbf{n}ot\in\{\mathbf{x},-\mathbf{x}\}$. Then $c'\mathbf{x}\mathbf{v}\mathop{\mathrm{tr}}\nolimitsans$ is a nonsymmetric best rank one approximation of $A(\mathbf{u})$. The previous arguments show that $\mathop{\mathrm{tr}}\nolimits (A(\mathbf{u}))=0$. Since $\mathbf{x}$ and $\mathbf{u}$ are linearly independent, it follows that the two frontal section $A_k:=[t_{i,j,k}]_{i=j=1}^2\in \mathop{\mathrm{Sym}}\nolimits(2,2)$ have trace zero. Taking in account the $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(2,2)$ we deduce that $\mathcal{T}$ is proportional to the tensor given by \mathbf{m}athbf{e}qref{sym23sten}. Assume that $\mathcal{T}$ is of the form \mathbf{m}athbf{e}qref{sym23sten}. Let \[A_1(\theta)=\left[\mathbf{b}egin{array}{cc}\mathbf{c}os\theta&\mathbf{s}in\theta\\ \mathbf{s}in\theta&-\mathbf{c}os\theta\mathbf{m}athbf{e}nd{array}\right], \mathbf{q}uad A_2(\theta)=\left[\mathbf{b}egin{array}{cc}\mathbf{s}in\theta&-\mathbf{c}os\theta\\ -\mathbf{c}os\theta&-\mathbf{s}in\theta\mathbf{m}athbf{e}nd{array}\right]\] be the two frontal sections of $\mathcal{T}$. Then \mathbf{b}egin{equation}\label{Axform} A((\mathbf{c}os\mathbf{p}hi,\mathbf{s}in\mathbf{p}hi)\mathop{\mathrm{tr}}\nolimitsans)=\mathbf{c}os\mathbf{p}hi A_1(\theta)+\mathbf{s}in\mathbf{p}hi A_2(\theta)=A_1(\theta-\mathbf{p}hi). \mathbf{m}athbf{e}nd{equation} So $\mbox{\mathbf{b}oldmath{$\lambda$}}bda_1(A(\mathbf{u}))=-\mbox{\mathbf{b}oldmath{$\lambda$}}bda_2(A(\mathbf{u}))=1$ for every $\mathbf{u}\in \mathrm{S}^1$. Hence any best rank one approximation of $A(\mathbf{u})$ is of the form $\mathbf{v}\mathbf{w}(\mathbf{u},\mathbf{v})\mathop{\mathrm{tr}}\nolimitsans$, where $\mathbf{v}\in\mathrm{S}^1$ and $\mathbf{w}(\mathbf{u},\mathbf{v}):=A(\mathbf{u})\mathbf{v}$. This shows that any best rank approximation of $\mathcal{T}$ is $\mathbf{u}\otimes\mathbf{v}\otimes\mathbf{w}(\mathbf{u},\mathbf{v})$ as claimed. It is left to show that $\mathcal{T}$ has exactly $3$ different best rank one symmetric approximations. In view of \mathbf{m}athbf{e}qref{Axform}, by changing an orthonormal basis in $\mathbf{m}athbb{R}^2$ we may assume that $\theta=0$. The condition that $\mathcal{T}$ has a symmetric best rank approximation means that we need to choose $\mathbf{x}\in\mathrm{S}^1$ such that $A(\mathbf{x})=\mathbf{p}m \mathbf{x}$. Note that if $A(\mathbf{x})\mathbf{x}=\mathbf{x}$ then $A(-\mathbf{x})(-\mathbf{x})=-(-\mathbf{x})$. Hence we need to find all $\mathbf{x}\in\mathrm{S}^1$ such that $A(\mathbf{x})\mathbf{x}=\mathbf{x}=(\mathbf{c}os\mathbf{p}hi,\mathbf{s}in\mathbf{p}hi)$. This condition gives rise to the following three solutions \[(1,0)\mathop{\mathrm{tr}}\nolimitsans, (-\mathbf{m}athbf{f}rac{1}{2},\mathbf{m}athbf{f}rac{\mathbf{s}qrt{3}}{2})\mathop{\mathrm{tr}}\nolimitsans, (-\mathbf{m}athbf{f}rac{1}{2},-\mathbf{m}athbf{f}rac{\mathbf{s}qrt{3}}{2})\mathop{\mathrm{tr}}\nolimitsans).\] \mathbf{q}ed \mathbf{s}ection{Proofs of Theorems \ref{brank1symtheo} and \ref{maintheo}} \mathbf{b}egin{lemma}\label{lem2dcase} Let $d\mathbf{m}athbf{g}e 2$ be an integer and $\mathcal{T}=[t_{i_1,\ldots,i_d}]\in \mathop{\mathrm{Sym}}\nolimits(2,d)\mathbf{s}etminus\{0\}$. Assume that $\mathcal{T}$ has a nonsymmetric best rank one approximation. Then for each $i_3,\ldots,i_d\in [2]$ the symmetric matrix $[t_{i,j,i_3,\ldots,i_d}]_{i=j=1}^2\in\mathop{\mathrm{Sym}}\nolimits(2,2)$ has zero trace. \mathbf{m}athbf{e}nd{lemma} \mathbf{p}roof For $d=2$ the lemma follows from Proposition \ref{branapproxsymmat}. For $d=3$ the lemma follows from Theorem \ref{sym23case}. We prove the lemma by induction on $d\mathbf{m}athbf{g}e 3$. Suppose that the lemma holds for $d=N\mathbf{m}athbf{g}e 3$. Assume that $d=N+1$. Suppose that $\mathcal{A}=a\otimes_{j=1}^d\mathbf{x}_j, \mathbf{x}_j\in\mathrm{S}^1, j\in[d], a\mathbf{n}e 0$ is a nonsymmetric best rank one approximation of $\mathcal{T}$. Since $\mathcal{T}$ is symmetric it follows that for each permutation $\mathbf{s}igma: [d]\to [d]$ the decomposable tensor $a\otimes_{j=1}^d \mathbf{x}_{\mathbf{s}igma(j)}$ is best rank one approximation of $\mathcal{T}$ which is nonsymmetric. Hence, without a loss of generality we may assume that $\mathbf{x}_d\mathbf{n}e \mathbf{p}m\mathbf{x}_{d-1}$. Fix the vectors $\mathbf{x}_1,\ldots,\mathbf{x}_{d-3}$ and consider $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_{d-3}):=\mathcal{T}\times \otimes_{j=1}^{d-3}\mathbf{x}_j\in\mathop{\mathrm{Sym}}\nolimits(2,3)$. Clearly, $a\mathbf{x}_{d-2}\otimes\mathbf{x}_{d-1}\otimes\mathbf{x}_d$ is a nonsymmetric best rank one approximation to $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_{d-3})$. Theorem \ref{sym23case} yields that a best rank one approximation of $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_{d-3})$ can be chosen of the form $a\otimes \mathbf{w}(\mathbf{u},\mathbf{v})\otimes \mathbf{u}\otimes \mathbf{v}$ where $\mathbf{u},\mathbf{v}$ are arbitrary vectors in $\mathrm{S}^1$ and $\mathbf{w}(\mathbf{u},\mathbf{v})\in\mathrm{S}^1$. Fix the vector $\mathbf{v}=(v_1,v_2)\mathop{\mathrm{tr}}\nolimitsans\in\mathrm{S}^1$. Then $\mathcal{A}:=a((\otimes_{j=1}^{d-3}\mathbf{x}_j)\otimes\mathbf{w}(\mathbf{u},\mathbf{v})\otimes\mathbf{u})$ is best rank one approximations of $\mathcal{T}(\mathbf{v}):=\mathcal{T}\times \mathbf{v}$. Observe that $\mathcal{A}$ is not symmetric for $\mathbf{u}\mathbf{n}e \mathbf{p}m \mathbf{x}_1$. Hence the induction hypothesis yield that the tensor $\mathcal{T}(\mathbf{v})=[t_{i_1,\ldots,i_{d-1},1}v_1+t_{i_1,\ldots,i_{d-1},2}v_2] \in\mathop{\mathrm{Sym}}\nolimits(2,d-1)$ satisfies that the assumption of the lemma. I.e., for any $i_3,\ldots,i_{d-1}\in [2]$ the matrix $[t_{i,j,i_3,\ldots,i_{d-1},1}v_1+t_{i_1,\ldots,i_{d-1},2}v_2]_{i=j=1}^2$ has zero trace. Let $\mathbf{v}=(1,0)\mathop{\mathrm{tr}}\nolimitsans,(0,1)\mathop{\mathrm{tr}}\nolimitsans$ to deduce that the lemma holds for $\mathcal{T}$. \mathbf{q}ed \textbf{Proof of Theorem \ref{brank1symtheo}}. We first prove the theorem for $\mathcal{T}=[t_{i_1,\ldots,i_d}]\in \mathop{\mathrm{Sym}}\nolimits(2,d)\mathbf{s}etminus\{0\}$. Suppose first that for some $i_3,\ldots,i_d\in[2]$ the $2\times 2$ symmetric matrix $[t_{i,j,i_3,\ldots,i_d}]_{i,j\in[2]}$ does not have trace $0$. Then Lemma \ref{lem2dcase} yields that each best rank one approximation tensor of $\mathcal{T}$ is symmetric. Assume now that for each $i_3,\ldots,i_d\in[2]$ the $2\times 2$ symmetric matrix $[t_{i,j,i_3,\ldots,i_d}]_{i,j\in[2]}$ has trace $0$. Let $\mathcal{S}=[s_{i_1,\ldots,i_d}]\in\mathop{\mathrm{Sym}}\nolimits(2,d)$ be the following tensor. $s_{1,\ldots,1}=1$ and all other entries of $\mathcal{S}$ are zero. Then for any $\varepsilon\mathbf{n}e 0$ the tensor $\mathcal{T}+\varepsilon\mathcal{S}$ is symmetric, and the trace of the matrix $[t_{i,j,1,\ldots,1}+\varepsilon s_{i,j,1,\ldots,1}]_{i,j\in[2]}$ is $\varepsilon$. Lemma \ref{lem2dcase} yields that $\mathcal{T}+\varepsilon \mathcal{S}$ has best rank one approximation $\mathcal{A}(\varepsilon)\in \mathop{\mathrm{Sym}}\nolimits(2,d)$. Recall that \mathbf{b}egin{equation}\label{Aepsprop} \|\mathcal{A}(\varepsilon)\|^2=\|\mathcal{T}+\varepsilon\mathcal{S}\|_{2,\infty}^2=\mathbf{a}n{\mathcal{T}+\varepsilon \mathcal{S},\mathcal{A}(\varepsilon)}. \mathbf{m}athbf{e}nd{equation} \mathbf{m}athbf{e}qref{pythid} yields that \[\|\mathcal{A}(\varepsilon)\|\le \|\mathcal{T}+\varepsilon \mathcal{A}\|\le \|\mathcal{T}\|+|\varepsilon|\|\mathcal{S}\|=\|\mathcal{T}\|+|\varepsilon|.\] Consider the bounded sequence $\mathcal{A}_m:=\mathcal{A}(\mathbf{m}athbf{f}rac{1}{m})\in\mathop{\mathrm{Sym}}\nolimits(2,d), m\in\mathbf{m}athbb{N}$. There exists a subsequence $\mathcal{A}_{m_i}, i\in\mathbf{m}athbb{N}$, such that $\lim_{i\to\infty}\mathcal{A}_{m_i}=\mathcal{A}\in\mathop{\mathrm{Sym}}\nolimits(2,d)$, where $\mathcal{A}$ is a decomposable tensor. \mathbf{m}athbf{e}qref{Aepsprop} yields that $\mathcal{A}$ is a best rank one approximation of $\mathcal{T}$. This completes the proof of the theorem for $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(2,d)$. Assume that $n>2$. Use Lemma \ref{nto2} to conclude that each $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\{0\}$ has a symmetric best rank one approximation. \mathbf{q}ed \textbf{Proof of Theorem \ref{maintheo}}. Let $\mathcal{T}\in \mathbf{m}athbb{R}^{n_1\times\ldots\times n_d}\mathbf{s}etminus \{0\}$. Assume that $[d]=\mathbf{c}up_{j=1}^k \mathbf{a}lpha_j$ is the symmetric decomposition for $\mathcal{T}$. We prove the theorem by induction on $k$. Assume that $k=1$, i.e. $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)$. Then the theorem follows from Theorem \ref{brank1symtheo}. Suppose that the theorem holds for $k\in [m]$, where $m \mathbf{m}athbf{g}e 1$ and assume that $k=m+1$. Permute the factors in $\otimes_{j=1}^d\mathbf{m}athbb{R}^{n_i}$ to assume that $\mathbf{a}lpha_1=\{1,\ldots,l\}, l<d$. Suppose that $a\otimes_{j=1}^d \mathbf{x}_j, \mathbf{x}_j\in \mathrm{S}^{n_j-1},j\in[d]$ is a best rank one approximation of $\mathcal{T}$. Recall that $a\otimes_{j=l+1}^d \mathbf{x}_j$ is a best rank one approximation of $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_l):= \mathcal{T}\times \otimes_{j=1}^l \mathbf{x}_j$. Furthermore \mathbf{m}athbf{e}qref{infshatnrmchar} yields that $\|\mathcal{T}\|_{2,\infty}=\|\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_l)\|_{2,\infty}$. Observe that $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_l)$ is symmetric with respect to $\mathbf{a}lpha_2,\ldots,\mathbf{a}lpha_k$. Hence the induction hypothesis yields that there exists a best rank one approximation $b\otimes_{j=l+1}^d \mathbf{y}_j$ of $\mathcal{T}(\mathbf{x}_1,\ldots,\mathbf{x}_l)$, where $\mathbf{y}_j\in\mathrm{S}^{n_j-1}$ for $j>l$, with the following properties. $\mathbf{y}_p=\mathbf{y}_q$ for each $p,q\in\mathbf{a}lpha_i, i>1$. Let $\mathcal{T}(\mathbf{y}_{l+1},\ldots,\mathbf{y}_k):=\mathcal{T}\times\otimes_{j=l+1}^d\mathbf{y}_j$. Then $\mathcal{T}(\mathbf{y}_{l+1},\ldots,\mathbf{y}_k)\in\mathop{\mathrm{Sym}}\nolimits(n_1,l)$. Theorem \ref{brank1symtheo} yields that $\mathcal{T}(\mathbf{y}_{l+1},\ldots,\mathbf{y}_k)$ best rank one approximation of the form $c\otimes_{j=1}^l \mathbf{y}_j$ where $\mathbf{y}_1=\ldots=\mathbf{y}_l$. Hence the rank one approximation $c\otimes_{j=1}^d\mathbf{y}_j$ is symmetric with respect $\mathbf{a}lpha_1,\ldots,\mathbf{a}lpha_k$. \mathbf{q}ed \mathbf{s}ection{Uniqueness of symmetric rank one approximation for generic symmetric tensors} For $\mathbf{x}=(x_1,\ldots,x_n)\mathop{\mathrm{tr}}\nolimitsans \in \mathbf{m}athbb{C}^n$ denote $\|\mathbf{x}\|=\mathbf{s}qrt{\mathbf{s}um_{i=1}^n |\mathbf{x}_i|^2}$. By $\otimes^d\mathbf{m}athbb{C}^n$ we denote the tensor products of $d$ copies of $\mathbf{m}athbb{C}^n$. Then $\otimes^d\mathbf{x}\in\otimes^d \mathbf{m}athbb{C}^n$ is a decomposable tensor $\mathbf{x}\otimes\ldots\otimes\mathbf{x}$. Denote by $\mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}ubset \otimes^d\mathbf{m}athbb{C}^n$ the space of all symmetric tensors $\mathcal{T}=[t_{i_1,\ldots,i_d}]_{i_1=\ldots=i_d=1}^n$ with complex entries. It is well known that $\mathop{\mathrm{Sym}}\nolimits(n,d)$ is isomorphic to $\mathbf{m}athbb{C}^{n+d-1\mathbf{c}hoose d}$. \mathbf{b}egin{theo}\label{numbcritsymten} Let $d\mathbf{m}athbf{g}e 3, n\mathbf{m}athbf{g}e 2$ be integers. Then there exists an algebraic variety $\Sigma(n,d)\mathbf{s}ubset\mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ such that for each $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(d,n,\mathbf{m}athbb{C})\mathbf{s}etminus \Sigma(n,d)$ the symmetric eigensystem \mathbf{b}egin{equation}\label{symeigensys} \mathcal{T}\times \otimes^{d-1}\mathbf{x}=\mathbf{x}, \mathbf{q}uad \mathbf{x}\mathbf{n}e \mathbf{0} \mathbf{m}athbf{e}nd{equation} have at most $(d-1)^n-1$ distinct solutions, which are invariant under the multiplication by $d-2$ roots of unity. Assume furthermore that $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(d,n)\mathbf{s}etminus\Sigma(n,d)$. If $d\mathbf{m}athbf{g}e 3$ is odd, the number of real distinct solutions of the above system is at most $\mathbf{m}athbf{f}rac{(d-1)^n-1}{d-2}$. Furthermore any solution of \mathbf{b}egin{equation}\label{symeigensys1} \mathcal{T}\times \otimes^{d-1}\mathbf{x}=-\mathbf{x}, \mathbf{q}uad \mathbf{x}\mathbf{n}e \mathbf{0} \mathbf{m}athbf{e}nd{equation} is the negative of the real solution of \mathbf{m}athbf{e}qref{symeigensys}. If $d$ is even then the systems \mathbf{m}athbf{e}qref{symeigensys}--\mathbf{m}athbf{e}qref{symeigensys1} all together have at most $\mathbf{m}athbf{f}rac{2((d-1)^n-1)}{d-2}$ real solutions which are invariant by multiplication by $-1$. \mathbf{m}athbf{e}nd{theo} \mathbf{p}roof Let $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$. Consider the system \mathbf{b}egin{equation}\label{homeigsys} \mathcal{T}\times \otimes^{d-1}\mathbf{x}=\mathbf{0},\mathbf{q}uad \mathbf{x}\in\mathbf{m}athbb{C}^n. \mathbf{m}athbf{e}nd{equation} Let $\mathbf{u}_1,\ldots,\mathbf{u}_n\in \mathbf{m}athbb{C}^n$ be linearly independent. Let $\mathcal{T}_0=\mathbf{s}um_{i=1}^n \otimes^d \mathbf{u}_i$. Then the above system is equivalent to $\mathbf{s}um_{i=1}^d (\mathbf{u}_i\mathop{\mathrm{tr}}\nolimitsans \mathbf{x})^{d-1}\mathbf{u}_i=\mathbf{0}$. Since $\mathbf{u}_1,\ldots,\mathbf{u}_n$ are linearly independent it follows that $(\mathbf{u}_i\mathop{\mathrm{tr}}\nolimitsans \mathbf{x})^{d-1}=0$ for $i\in[n]$. Hence $(\mathbf{u}_i\mathop{\mathrm{tr}}\nolimitsans \mathbf{x})=0$ for $i\in[n]$. As $\mathbf{u}_1,\ldots,\mathbf{u}_n$ independent it follows that $\mathbf{x}=\mathbf{0}$. That is \[\mathbf{m}in_{\|\mathbf{x}\|=1,\mathbf{x}\in\mathbf{m}athbb{C}^n} \|\mathcal{T}_0\times \otimes^{d-1}\mathbf{x}\|>0.\] The above inequality holds for $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ in some neighborhood of $\mathcal{T}_0$. Hence there exists a strict algebraic variety $\Sigma(n,d)\mathbf{s}ubset\mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ such that for each $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}etminus \Sigma(n,d)$ the system \mathbf{m}athbf{e}qref{homeigsys} has a unique solution $\mathbf{x}=0$. Assume that $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}etminus \Sigma(n,d)$. Consider the polynomial system \mathbf{b}egin{equation}\label{symeigensys0} \mathcal{T}\times \otimes^{d-1}\mathbf{x}-\mathbf{x}=\mathbf{0}, \mathbf{q}uad \mathbf{x}\in\mathbf{m}athbb{C}^n. \mathbf{m}athbf{e}nd{equation} Its principal part is the homogeneous system \mathbf{m}athbf{e}qref{homeigsys}, where each homogeneous polynomial is of degree $d-1$. Hence \mathbf{m}athbf{e}qref{symeigensys0} has at most $(d-1)^n$ distinct solutions. (This is the precise version of Bezout's theorem. See for example \mathbf{c}ite{Fr77}.) Note that $\mathbf{x}=\mathbf{0}$ is a solution. Hence \mathbf{m}athbf{e}qref{symeigensys} has at most $(d-1)^n-1$ distinct solutions. Clearly, if $\mathbf{x}\mathbf{n}e \mathbf{0}$ is a solution of \mathbf{m}athbf{e}qref{symeigensys} then $\mathbf{z}eta\mathbf{x}$ is also a solution of \mathbf{m}athbf{e}qref{symeigensys} if $\mathbf{z}eta^{d-2}=1$. As $\mathbf{x}\mathbf{n}e 0$, note that $\mathbf{z}eta\mathbf{x}\mathbf{n}e \mathbf{m}athbf{e}ta\mathbf{x}$ if $\mathbf{z}eta,\mathbf{m}athbf{e}ta$ are two distinct $d-2$ roots of $1$. That establishes the first part of the of the theorem. Assume now that $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\Sigma(d,n)$. We know that \mathbf{m}athbf{e}qref{symeigensys} has at most $(d-1)^n-1$ complex distinct solutions. Assume that $\mathbf{x}\mathbf{n}e \mathbf{0}$ is a real solution. Then $\mathbf{z}eta\mathbf{x}$ is also a solution for $\mathbf{z}eta^{d-2}=1$. Suppose that $d\mathbf{m}athbf{g}e 3$ is odd. Then $\mathbf{z}eta\mathbf{x}$ is real when $\mathbf{z}eta^{d-2}=1$ if and only if $\mathbf{z}eta=1$. Hence each real solution of \mathbf{m}athbf{e}qref{symeigensys} gives rise to another $d-3$ distinct nonreal solutions of \mathbf{m}athbf{e}qref{symeigensys}. Hence the number of real solutions of \mathbf{m}athbf{e}qref{symeigensys} is at most $\mathbf{m}athbf{f}rac{(d-1)^n-1}{d-2}$. Note that for any real solution $\mathbf{x}$ of \mathbf{m}athbf{e}qref{symeigensys} $-\mathbf{x}$ is a real solution of \mathbf{m}athbf{e}qref{symeigensys1} and vice versa. This proves our theorem for $d\mathbf{m}athbf{g}e 3$ odd. Assume now that $d\mathbf{m}athbf{g}e 3$ is even. Then for any real solution $\mathbf{x}$ of \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1} we get another real solution $-\mathbf{x}$, and $d-4$ complex solutions of \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1}, respectively, of the form $\mathbf{z}eta\mathbf{x}$, where $\mathbf{z}eta^{d-2}=1,\mathbf{z}eta\mathbf{n}e \mathbf{p}m 1$. Assume now that $\mathbf{x}$ is a real solution \mathbf{m}athbf{e}qref{symeigensys1}. Then $\mathbf{m}athbf{e}ta\mathbf{x}, \mathbf{m}athbf{e}ta^{d-2}=-1$ gives rise to $d-2$ distinct nonreal solutions of \mathbf{m}athbf{e}qref{symeigensys}. Hence the total number of real solutions of the systems \mathbf{m}athbf{e}qref{symeigensys}--\mathbf{m}athbf{e}qref{symeigensys1} is $\mathbf{m}athbf{f}rac{2((d-1)^n-1)}{d-2}$. \mathbf{q}ed \mathbf{b}egin{theo}\label{simcritsymtens} There exists a subvariety $\Sigma_1\mathbf{s}ubset\mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$, where $\Sigma_1(n,d)\mathbf{s}upseteq \Sigma(n,d)$, with the following properties. Let $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\Sigma_1(d,n)$ and consider nonzero critical values $\mathbf{a}n{\mathcal{T},\otimes^d\mathbf{x}}$ on $\mathrm{S}^{n-1}$ and the corresponding critical points. Then \mathbf{b}egin{enumerate} \item\label{simcritsymtens1} For an odd $d\mathbf{m}athbf{g}e 3$ there are $N(\mathcal{T})\le \mathbf{m}athbf{f}rac{2((d-1)^n-1)}{d-2}$ nonzero distinct critical values, where each critical value $\mbox{\mathbf{b}oldmath{$\lambda$}}bda$ has a unique corresponding critical point $\mathbf{x}\in\mathrm{S}^{n-1}$. Furthermore, $-\mbox{\mathbf{b}oldmath{$\lambda$}}bda$ is also a critical value with the corresponding unique critical point $-\mathbf{x}$. \item\label{simcritsymtens2} For an even $d\mathbf{m}athbf{g}e 3$ there are $N(\mathcal{T})\le \mathbf{m}athbf{f}rac{2((d-1)^n-1)}{d-2}$ nonzero distinct critical values, where each critical value $\mbox{\mathbf{b}oldmath{$\lambda$}}bda$ has exactly two critical points $\mathbf{p}m\mathbf{x}\in\mathrm{S}^{n-1}$. Furthermore, the absolute values of two distinct critical points are distinct. \mathbf{m}athbf{e}nd{enumerate} \mathbf{m}athbf{e}nd{theo} \mathbf{p}roof Let $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}etminus\Sigma(n,d)$. We claim that there exists a subvariety $\Sigma_0(d,n)\mathbf{s}ubset \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ such that for each $\mathcal{T}\mathbf{n}ot\in\Sigma_0(n,d)$ the following conditions hold. \mathbf{b}egin{enumerate} \item\label{doddsig1con} Assume that $d\mathbf{m}athbf{g}e 3$ is odd. Then the system \mathbf{m}athbf{e}qref{symeigensys} has $(d-1)^n-1$ solutions, and for two different solutions $\mathbf{x},\mathbf{y}$ the inequality $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans \mathbf{x}\mathbf{n}e \mathbf{y}\mathop{\mathrm{tr}}\nolimitsans \mathbf{y}$ holds. \item\label{devensig1con} Assume that $d\mathbf{m}athbf{g}e 3$ is even. Then the systems \mathbf{m}athbf{e}qref{symeigensys} and \mathbf{m}athbf{e}qref{symeigensys1} each has $(d-1)^n-1$ solutions, and for two different solutions $\mathbf{x},\mathbf{y}$ of either \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1}, where $\mathbf{y}\mathbf{n}e -\mathbf{x}$, the inequality $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans \mathbf{x}\mathbf{n}e \mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}$ holds. \mathbf{m}athbf{e}nd{enumerate} We consider a special tensor $\mathcal{T}=\mathbf{s}um_{i=1}^nt_i^{-(d-2)}\otimes^d \mathbf{m}athbf{e}_i$, where $\mathbf{m}athbf{e}_i=(\delta_{1i},\ldots,\delta_{ni})\mathop{\mathrm{tr}}\nolimitsans, t_i\in\mathbf{m}athbb{C}\mathbf{s}etminus\{0\}$ for $i\in[n]$. For this tensor we can explicitly calculate all solutions of \mathbf{m}athbf{e}qref{symeigensys}. Namely, it is of the form $(x_1,\ldots,x_n)\mathop{\mathrm{tr}}\nolimitsans$ where each $\mathbf{m}athbf{f}rac{x_i}{t_i}$ satisfies the equation $x(x^{d-2}-1)=0$. (We need to exclude from this list the trivial solution $\mathbf{x}=\mathbf{0}$.) This means that \mathbf{m}athbf{e}qref{symeigensys} has $(d-1)^n-1$ solutions. Moreover, the solutions of \mathbf{m}athbf{e}qref{symeigensys1} are of the form $\theta\mathbf{x}$, where $\mathbf{x}$ is a solution of \mathbf{m}athbf{e}qref{symeigensys} and $\theta$ is a fixed solution of $\theta^{d-2}=-1$. To be explicit, let $m\mathbf{m}athbf{g}e 0$ be the integer such that $\mathbf{m}athbf{f}rac{d-2}{2^m}$ is an odd integer. Then $\theta:=e^{\mathbf{m}athbf{f}rac{\mathbf{p}i\mathbf{s}qrt{-1}}{2^{m}}}$. Let $\mathbf{m}athbb{F}=\mathbf{m}athbb{Q}[\mathbf{x}i]$ be a finite extension field of the rational numbers $\mathbf{m}athbb{Q}$, where $\mathbf{x}i$ is a primitive root of $\mathbf{x}i^{d-2}=1$. (Each element of $\mathbf{m}athbb{F}$ is a polynomial of degree $d-3$ at most with rational coefficients.) Assume that $t_1^2,\ldots,t_n^2$ are linearly independent over $\mathbf{m}athbb{Q}[\mathbf{x}i]$. We first consider the simple case: $d\mathbf{m}athbf{g}e 3$ is odd. We claim that if $\mathbf{x}=(x_1,\ldots,x_n)\mathop{\mathrm{tr}}\nolimitsans$ and $\mathbf{y}=(y_1,\ldots,y_n)\mathop{\mathrm{tr}}\nolimitsans$ are different nonzero solution of \mathbf{m}athbf{e}qref{symeigensys} then $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}\mathbf{n}e \mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}$. Indeed $\mathbf{x}=(\mathbf{z}eta_1 t_1,\ldots,\mathbf{z}eta_n t_n)\mathop{\mathrm{tr}}\nolimitsans, \mathbf{y}=(\mathbf{m}athbf{e}ta_1 t_1,\ldots,\mathbf{m}athbf{e}ta_n t_n)\mathop{\mathrm{tr}}\nolimitsans$. The assumption that $\mathbf{x}$ and $\mathbf{y}$ satisfy \mathbf{m}athbf{e}qref{symeigensys} imply that each $\mathbf{z}eta_i$ and $\mathbf{m}athbf{e}ta_i$ satisfy the equation $s(s^{d-2}-1)=0$. Observe next that \mathbf{b}egin{equation}\label{xyeq} \mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}=\mathbf{s}um_{i=1}^n (\mathbf{z}eta_i^2-\mathbf{m}athbf{e}ta_i^2)t_i^2. \mathbf{m}athbf{e}nd{equation} Suppose that $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}=\mathbf{0}$. As $t_1^2,\ldots,t_n^2$ are linearly independent over $\mathbf{m}athbb{F}$ it follows that $\mathbf{z}eta_i^2=\mathbf{m}athbf{e}ta_i^2$ for $i\in[d]$. So $\mathbf{z}eta_i=\mathbf{p}m \mathbf{m}athbf{e}ta_i$ for $i\in [d]$. Clearly $\mathbf{z}eta_i=0\iff \mathbf{m}athbf{e}ta_i=0$. Assume that $\mathbf{z}eta_i\mathbf{n}e 0$. So $\mathbf{z}eta_i^{d-2}=1\mathbf{m}athbb{R}ightarrow \mathbf{m}athbf{e}ta_i^{d-2}=1$. Hence $\mathbf{z}eta_i=\mathbf{m}athbf{e}ta_i$ for $i\in [d]$. So $\mathbf{x}=\mathbf{y}$ contrary to our assumption. Hence, there exists a subvariety $\Sigma_0(n,d)\mathbf{s}ubset \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ such that for $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}etminus(\Sigma(n,d)\mathbf{c}up \Sigma_0(n,d))$ the condition \ref{doddsig1con} hold. Assume now that $d\mathbf{m}athbf{g}e 3$ is even. Let $\mathbf{x}=(\mathbf{z}eta_1 t_1,\ldots,\mathbf{z}eta_n t_n)\mathop{\mathrm{tr}}\nolimitsans$ and $\mathbf{y}=(\mathbf{m}athbf{e}ta_1 t_1,\ldots,\mathbf{m}athbf{e}ta_n t_n)\mathop{\mathrm{tr}}\nolimitsans$ satisfy either \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1}. So $\mathbf{z}eta_i((\mathbf{p}hi\mathbf{z}eta_i)^{d-2})=\mathbf{m}athbf{e}ta_i((\mathbf{p}si\mathbf{m}athbf{e}ta_i)^{d-2}-1)=0$ for $i\in [d]$. Here $\mathbf{p}hi,\mathbf{p}si\in\{1,\theta\}$. Assume that $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans=0$. So $\mathbf{z}eta_i=\mathbf{p}m \mathbf{m}athbf{e}ta_i$ for $i\in [d]$. Hence $\mathbf{p}hi=\theta$. That is either $\mathbf{x}$ and $\mathbf{y}$ satisfy \mathbf{m}athbf{e}qref{symeigensys} or $\mathbf{x}$ and $\mathbf{y}$ satisfy \mathbf{m}athbf{e}qref{symeigensys1}. However, it is possible that $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}=0$ and $\mathbf{x}\mathbf{n}e \mathbf{p}m \mathbf{y}$. We now find a tensor $\mathcal{T}'\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ in a small neighborhood of $\mathcal{T}$ such that for $\mathbf{x}',\mathbf{y}'$, where $\mathbf{x}'\mathbf{n}e \mathbf{p}m\mathbf{y}'$, satisfying either the system \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1} for $\mathcal{T}'$, one has the inequality $(\mathbf{x}')\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}'-(\mathbf{y}') \mathop{\mathrm{tr}}\nolimitsans\mathbf{y}'\mathbf{n}e 0$. Since any solution $\mathbf{x}$ of \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1} is a simple solution, (as we have the maximal number of distinct solutions), we can use an implicit function theorem to find the unique solutions $\mathbf{x}'$ of \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1} in the neighborhood of the solution $\mathbf{x}$ respectively. Since for $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}=0$ may hold if only $\mathbf{x},\mathbf{y}$ satisfy both either \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1} it is enough to show that $(\mathbf{x}')\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}'-(\mathbf{y}') \mathop{\mathrm{tr}}\nolimitsans\mathbf{y}'\mathbf{n}e 0$ where $\mathbf{x}',\mathbf{y}'$ satisfy \mathbf{m}athbf{e}qref{symeigensys}. Let $\mathcal{T}(\varepsilon):=\mathcal{T}+\varepsilon\mathcal{S}$ for a fixed $\mathcal{S}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$. Consider the system \mathbf{b}egin{equation}\label{epseigsys} \mathcal{T}(\varepsilon)\times\otimes^{d-1}\mathbf{x}(\varepsilon)=\mathbf{x}(\varepsilon). \mathbf{m}athbf{e}nd{equation} The implicit function theorem yields that $\mathbf{x}(\varepsilon)$ can be expanded in power series of $\varepsilon$. So \mathbf{b}egin{equation}\label{powexpan} \mathbf{x}(\varepsilon)=\mathbf{x}+\varepsilon \mathbf{x}_1+O(\varepsilon^2). \mathbf{m}athbf{e}nd{equation} Denote by $\mathcal{T}(\mathbf{x}):=\mathcal{T}\times\otimes^{d-2}\mathbf{x}\in \mathop{\mathrm{Sym}}\nolimits(n,2,\mathbf{m}athbb{C})$. The system \mathbf{m}athbf{e}qref{symeigensys} is equivalent to $\mathcal{T}(\mathbf{x})\mathbf{x}=\mathbf{x}$ and $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans \mathcal{T}(\mathbf{x})=\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans$. Then the $\varepsilon$ term in \mathbf{m}athbf{e}qref{epseigsys} is \mathbf{b}egin{equation}\label{ftermepeig} (d-1)\mathcal{T}(\mathbf{x})\mathbf{x}_1+\mathcal{S}\times \otimes^{d-1}\mathbf{x}=\mathbf{x}_1. \mathbf{m}athbf{e}nd{equation} Multiply the above equality by $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans$ to deduce that \mathbf{b}egin{equation}\label{xx1teq} \mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}_1=\mathbf{m}athbf{f}rac{\mathbf{a}n{\mathcal{S},\otimes^d\mathbf{x}}}{2-d}. \mathbf{m}athbf{e}nd{equation} Observe finally that \mathbf{b}egin{equation}\label{xepyepdif} \mathbf{x}(\varepsilon)\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}(\varepsilon)-\mathbf{y}(\varepsilon)\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}(\varepsilon)=\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}-\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y} +\mathbf{m}athbf{f}rac{2\varepsilon}{2-d}\mathbf{a}n{\mathcal{S},\otimes^d\mathbf{x}- \otimes^d\mathbf{y}}+O(\varepsilon^2). \mathbf{m}athbf{e}nd{equation} Note that $\otimes^d\mathbf{x}- \otimes^d\mathbf{y}$ is zero tensor if and only if $\mathbf{x}=\mathbf{m}athbf{g}amma\mathbf{y}$ where $\mathbf{m}athbf{g}amma^d=1$. Hence we can choose $\mathcal{S}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ lying outside a finite number of subspaces of codimension one, such that the coefficient of $\varepsilon$ is different from zero for each pair of solutions $\mathbf{x},\mathbf{y}$ of \mathbf{m}athbf{e}qref{symeigensys} such that $\mathbf{x}\mathbf{n}e \mathbf{p}m\mathbf{y}$ and $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}=\mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}$. Hence, there exists a subvariety $\Sigma_0(n,d)\mathbf{s}ubset \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})$ such that for $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d,\mathbf{m}athbb{C})\mathbf{s}etminus(\Sigma(n,d)\mathbf{c}up \Sigma_0(n,d))$ the condition \ref{devensig1con} hold. Let $\Sigma_1(n,d)=\Sigma(n,d)\mathbf{c}up\Sigma_0(n,d)$ and $\mathcal{T}\in \mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus \Sigma_1(n,d)$. Assume that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda\mathbf{n}e 0$ is a critical value of $\mathbf{a}n{\mathcal{T},\otimes^d \mathbf{z}}$ on $\mathrm{S}^{n-1}$ with a corresponding critical point $\mathbf{y}\in\mathrm{S}^{n-1}$. Hence $\mathcal{T}\times\otimes^{d-1}\mathbf{y}=\mbox{\mathbf{b}oldmath{$\lambda$}}bda\mathbf{y}$. Assume first that $d\mathbf{m}athbf{g}e 3$ is odd. Clearly, $-\mathbf{y}$ is a critical point corresponding to the critical value $-\mbox{\mathbf{b}oldmath{$\lambda$}}bda$. Without loss of generality we may assume that $\mbox{\mathbf{b}oldmath{$\lambda$}}bda>0$. Then $\mathbf{x}:=\mbox{\mathbf{b}oldmath{$\lambda$}}bda^{-\mathbf{m}athbf{f}rac{1}{d-2}}\mathbf{y}$ is a real solution of \mathbf{m}athbf{e}qref{symeigensys}. Vice versa, any real solution of $\mathbf{x}$ of \mathbf{m}athbf{e}qref{symeigensys} gives rise to a critical point $\mathbf{y}=\mathbf{m}athbf{f}rac{1}{\|\mathbf{x}\|}\mathbf{x}$ with the critical value $\|\mathbf{x}\|^{-(d-2)}$. Recall that for $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\Sigma_1(n,d)$ \mathbf{m}athbf{e}qref{symeigensys} has exactly $(d-1)^n-1$ different complex solutions, with corresponding $(d-1)^n-1$ different values of $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}$. Hence \mathbf{m}athbf{e}qref{symeigensys} has at most $\mathbf{m}athbf{f}rac{(d-1)^n-1}{d-2}$ real solutions and the length of all these solutions are distinct. Therefore, the number of positive nonzero critical points of $\mathbf{a}n{\mathcal{T},\otimes^d\mathbf{y}}$ on $\mathrm{S}^{n-1}$ is at most $\mathbf{m}athbf{f}rac{(d-1)^n-1}{d-2}$, and to each critical positive value corresponds a unique critical point. These arguments prove the theorem for $d$ odd. Assume now that $d$ is even. The $-\mathbf{y}$ is a critical point of the critical value $\mbox{\mathbf{b}oldmath{$\lambda$}}bda$. Let $\mathbf{x}_+:=\mbox{\mathbf{b}oldmath{$\lambda$}}bda^{-\mathbf{m}athbf{f}rac{1}{d-2}}\mathbf{y}$ if $\mbox{\mathbf{b}oldmath{$\lambda$}}bda>0$ and $\mathbf{x}_-:=(-\mbox{\mathbf{b}oldmath{$\lambda$}}bda)^{-\mathbf{m}athbf{f}rac{1}{d-2}}\mathbf{y}$ if $\mbox{\mathbf{b}oldmath{$\lambda$}}bda<0$. Then $\mathbf{x}_+$ satisfies \mathbf{m}athbf{e}qref{symeigensys} and $\mathbf{x}_-$ satisfies \mathbf{m}athbf{e}qref{symeigensys1}. Theorem \ref{numbcritsymten} yields that the number of critical nonzero values of $\mathbf{a}n{\mathcal{T},\otimes^d \mathbf{z}}$ is at most $\mathbf{m}athbf{f}rac{2((d-1)^n-1)}{d-2}$. It is left to show that the absolute values of nonzero critical values are distinct. Let $\mathbf{x},\mathbf{y}$ be two solutions of either \mathbf{m}athbf{e}qref{symeigensys} or \mathbf{m}athbf{e}qref{symeigensys1}, and assume that $\mathbf{x}\mathbf{n}e \mathbf{p}m \mathbf{y}$. Then $\mathbf{u}:=\mathbf{m}athbf{f}rac{1}{\|\mathbf{x}\|}\mathbf{x},\mathbf{v}:=\mathbf{m}athbf{f}rac{1}{\|\mathbf{y}\|}\mathbf{y}$ are critical points of $\mathbf{a}n{\mathcal{T},\otimes^d \mathbf{z}}$ on $\mathrm{S}^{n-1}$, corresponding to critical values $\mbox{\mathbf{b}oldmath{$\lambda$}}bda,\mathbf{m}u$ respectively. Clearly $|\mbox{\mathbf{b}oldmath{$\lambda$}}bda|=\|\mathbf{x}\|^{-(d-2)}, |\mbox{\mathbf{b}oldmath{$\lambda$}}bda|=\|\mathbf{y}\|^{-(d-2)}$. Since $\mathcal{T}\mathbf{n}ot\in \Sigma_1(n,d)$ we deduce that $\mathbf{x}\mathop{\mathrm{tr}}\nolimitsans\mathbf{x}\mathbf{n}e \mathbf{y}\mathop{\mathrm{tr}}\nolimitsans\mathbf{y}$. So $|\mbox{\mathbf{b}oldmath{$\lambda$}}bda|\mathbf{n}e |\mathbf{m}u|$. \mathbf{q}ed \mathbf{b}egin{corol}\label{uniquerank1symap} Let $d\mathbf{m}athbf{g}e 3$ be an integer and assume that $\mathcal{T}\in\mathop{\mathrm{Sym}}\nolimits(n,d)\mathbf{s}etminus\Sigma_1(n,d)$. Then $\mathcal{T}$ has a unique symmetric best rank one approximation. \mathbf{m}athbf{e}nd{corol} \mathbf{p}roof Best rank one approximation corresponds to the critical point $\mathbf{y}$ of $\mathbf{a}n{\mathcal{T},\otimes^d\mathbf{z}}$, corresponding to the critical value $\mbox{\mathbf{b}oldmath{$\lambda$}}bda$ where $\mbox{\mathbf{b}oldmath{$\lambda$}}bda^2$ is maximal. The corresponding best rank one symmetric approximation is $\mathcal{A}:=\mathbf{a}n{\mathcal{T},\otimes^d\mathbf{y}}\otimes^d\mathbf{y}$. So $-\mathbf{y}$ gives the same $\mathcal{A}$. For $d$ odd all positive critical values are distinct. Hence $\mathcal{A}$ is unique. For $d$ even, each nonzero critical value has two critical points $\mathbf{p}m \mathbf{y}$. The absolute values of the critical values are distinct. Hence $\mathcal{A}$ is unique. \mathbf{q}ed \mathbf{b}ibliographystyle{plain} \mathbf{b}egin{thebibliography}{MMM} \mathbf{b}ibitem{CHLZ12} B. Chen, S. He, Z. Li, and S. Zhang, Maximum block improvement and polynomial optimization, SIAM J. OPTIM. 22 (2012), 87–-107. \mathbf{b}ibitem{DF93} A. Defant and K. Floret, \mathbf{m}athbf{e}mph{Tensor Norms and Operator Ideals}, North-Holland, Amsterdam, 1993. \mathbf{b}ibitem{Fr77} S. Friedland, Inverse eigenvalue problems, {\it Linear Algebra Appl.} 17 (1977), 15-51. \mathbf{b}ibitem{Fr82} S. Friedland, Variation of tensor powers and spectra, \mathbf{m}athbf{e}mph{Linear Multilin. Algebra} 12 (1982), 81-98. \mathbf{b}ibitem{FO12} S. Friedland and G. Ottaviani, The number of singular vector tuples and uniqueness of best rank one approximation of tensors, arXiv:1210.8316. \mathbf{b}ibitem{FRS} S. Friedland, J. Robbin and J. Sylvester, On the crossing rule, \mathbf{m}athbf{e}mph{Comm. Pure Appl. Math.} 37 (1984), 19-37. \mathbf{b}ibitem{GolV96} G.H. Golub and C.F. Van Loan, {\it Matrix Computation}, John Hopkins Univ. Press, 3rd Ed., 1996. \mathbf{b}ibitem{Kol} T. Kolda, On the best rank-$k$ approximation of a symmetric tensor, Householder 2011. \mathbf{b}ibitem{Lim05} L.-H. Lim, Singular values and eigenvalues of tensors: a variational approach, \mathbf{m}athbf{e}mph{Proc. IEEE International Workshop on Computational Advances in Multi-Sensor Adaptive Processing} (CAMSAP '05), 1 (2005), 129-132. \mathbf{m}athbf{e}nd{thebibliography} \mathbf{m}athbf{e}nd{document}
\begin{document} \title{Character and object ootnote{This is an expanded version of the corresponding journal publication. In the latter, Section~ ef{metaphysics:section} \begin{abstract} In 1837, Dirichlet proved that there are infinitely many primes in any arithmetic progression in which the terms do not all share a common factor. Modern presentations of the proof are explicitly higher-order, in that they involve quantifying over and summing over \emph{Dirichlet characters}, which are certain types of functions. The notion of a character is only implicit in Dirichlet's original proof, and the subsequent history shows a very gradual transition to the modern mode of presentation. In this essay, we describe an approach to the philosophy of mathematics in which it is an important task to understand the roles of our ontological posits and assess the extent to which they enable us to achieve our mathematical goals. We use the history of Dirichlet's theorem to understand some of the reasons that functions are treated as ordinary objects in contemporary mathematics, as well as some of the reasons one might want to resist such treatment. We also use these considerations to illuminate the formal treatment of functions and objects in Frege's logical foundation, and we argue that his philosophical and logical decisions were influenced by many of the same factors. \end{abstract} \tableofcontents \section{Introduction} \label{introduction:section} The philosophy of mathematics has long been concerned with the nature of mathematical objects, and the proper methods for acquiring mathematical knowledge. But as of late some philosophers of mathematics have begun to raise questions of a broader epistemological character: What does it mean to properly \emph{understand} a piece of mathematics? In what sense can a proof be said to \emph{explain} a mathematical fact? In what senses can one proof be viewed as better than another one that establishes the same theorem? What makes a concept fruitful, and what makes one definition more natural than another? Why are certain historical developments viewed as important advances? Questions like these are sometimes classified as pertaining to the \emph{methodology} of mathematics, in contrast to more traditional ontological concerns. One of our goals in this essay is to argue that methodology and ontology cannot be so cleanly separated. Certainly part of the justification for our ontological commitments stems from the positive effects those commitments have on the practice, and, conversely, ``internal'' methodological shifts are influenced by a broader conception as to what is permissible. In Section~\ref{metaphysics:section}, we describe a model for historical change that closely links ontological and methodological considerations. One of the hallmarks of the nineteenth century transition to modern mathematics was the adoption of implicit or explicit set-theoretic language and methods. For Gauss \cite{gauss:01}, the number-theoretic relation of congruence modulo $m$ was a relation that was similar to equality, and addition and multiplication modulo $m$ were operations on integers that respect that relation. Today, however, we can form the quotient structure of integers modulo $m$, which consists of classes of integers that are equivalent modulo $m$. Addition and multiplication then lift to operations on these classes. This amounts to \emph{reifying} the property of being equivalent to an integer $a$ modulo $m$ to an object, $[a]$, the equivalence class of $a$. Similarly, to restore the property of unique factorization to the algebraic integers in a cyclotomic field, Kummer \cite{kummer:46} introduced properties $P(\alpha)$ that were meant to be interpreted as the assertion that $\alpha$ is divisible by a certain ``ideal divisor.'' Dedekind \cite{dirichlet:63b} later reified the property $P$ to the class of $\alpha$ that satisfy it, thereby giving rise to the modern notion of an ideal in a ring of integers. Other nineteenth century examples include the construction of quotient groups, or the lifting of Gauss' operation of ``composition'' of binary quadratic forms to equivalence classes of such forms. What these instances have in common is that they involve treating certain higher-order entities --- classes of integers, classes of algebraic integers, classes of quadratic forms, or classes of elements in a group or a ring --- as objects in their own right. By this we mean that, in particular, one can quantify over them, sum over them, and define operations on them. Moreover, one can consider algebraic structures whose elements are such classes, much as one can consider algebraic structures whose elements are integers or real or complex numbers. Much of what can be said about the treatment of classes as objects in the nineteenth century applies to the treatment of functions as objects as well. In 1837, Dirichlet proved that there are infinitely many prime numbers in any arithmetic progression in which the terms do not all share a common factor. Our goal here is to study the role that certain types of functions, called \emph{Dirichlet characters}, play in contemporary presentations of Dirichlet's proof, and the historical process that has led to our contemporary understanding. In Section~\ref{metaphysics:section}, we present a framework for assessing the ontological commitments of a body of mathematics, one which is informed by, and can inform, the history of mathematics. In Section~\ref{overview:section}, we provide an overview of Dirichlet's proof, and in Section~\ref{functions:section}, we clarify the senses in which contemporary presentations treat characters as ordinary mathematical objects. Despite the name, the notion of a Dirichlet character is not present in Dirichlet's original presentation. In Sections~\ref{dirichlet:section} and~\ref{transition:section}, we describe the history of presentations of Dirichlet's theorem, which shows a fitful and gradual transition to modern terminology and usage. In doing so, we draw on a detailed historical study that we have carried out in another work \cite{avigad:morris:unp}, which we will refer to as ``Concept'' in the presentation below. In Section~\ref{analysis:section}, we argue that, as per the model presented in Section~\ref{metaphysics:section}, the gradual adoption of the modern treatment of characters is best viewed as an ontological response to pragmatic mathematical concerns, and we explore some of the considerations that bear on the rationality of the outcome. Thus we use the history to help us understand and assess some of the reasons that we treat functions as objects in current mathematical practice. Complementing the mathematical narrative, in Section~\ref{frege:section}, we consider Frege's conflicted attitudes towards the treatment of functions as objects, and in Section~\ref{frege:section:b}, we argue that key choices in the design of his formal system were motivated by the same sorts of considerations. This is not to say that Frege's logico-philosophical concerns should be seen as properly mathematical, or vice versa. Rather, they both stem from the need to balance two key desiderata: the desire, on the one hand, for flexible and uniform ways of dealing with higher-order entities in the many guises in which they appear, and the desire, on the other hand, to make sure that the methods of doing so are clear, coherent, and meaningful. \section{From methodology to ontology} \label{metaphysics:section} Let us start by distinguishing between two kinds of questions one can ask, having to do with the existence of mathematical objects. On the one hand, we can ask question such as: \begin{itemize} \item Is there a nontrivial zero of the Riemann zeta function whose real part is not equal to $1/2$? \item Are there noncyclic simple groups of odd order? \end{itemize} These are fundamentally \emph{mathematical} questions. Answering them is not easy: the Riemann hypothesis posits a negative answer to the first, while the Feit-Thompson theorem, a landmark in finite group theory, provides a negative answer to the second. But even in the first case, where we do not know the answer to the question, we feel that we have a clear sense as to what kind of argument would settle the issue one way or another. Put simply, questions like these can be addressed using conventional mathematical methods. In contrast, there are questions like these: \begin{itemize} \item Do the natural numbers (really) exist, and what sorts of things are they? \item Are there infinite totalities? \item What kinds of sets and functions exist (if any), and what properties do they have? \item Are there infinitesimals, fluxions, fluents, and ultimate ratios? \end{itemize} These are questions as to the ultimate nature of mathematics and its objects of study, and seem to call for a more general, open-ended \emph{philosophical} analysis. What is sought is not just an axiomatization of mathematics or an enumeration of the mathematical objects that exist, but also explanation as to why we are justified in asserting in their existence, with an overall account that squares with broader epistemological and scientific concerns. The distinction between the two types of questions may call to mind the logical positivists' distinction between questions that are ``internal'' to a linguistic framework, and ``external'' or ``pragmatic'' questions pertaining to the choice of a framework itself. Some take this distinction to be have been repudiated, decisively, by the criticisms of W.~V.~O.~Quine \cite{quine:51}. But keep in mind that Quine's arguments, which were directed against the claim that there is a sharp, principled distinction between the two sorts of questions, were not meant to show that there is no difference between them at all. In locating both kinds of questions on the common continuum of scientific inquiry, he did not deny that different kinds of questions call for different sorts of answers; indeed, his influential \emph{Word and Object} \cite{quine:60} is an extended exploration of the considerations that he took to bear on ``philosophical'' questions of the latter sort. Nothing we say below commits us to a sharp distinction, and it seems relatively uncontroversial to say that insofar as any rational arguments can be brought to bear on the second group of questions, these will look different from the kinds of arguments that are brought to bear on the first. Despite their different characterizations of the philosophical project, Carnap and Quine shared the view that ontological questions come down to pragmatic questions as to the choice of a conceptual framework. Here is what Carnap had to say about our scientific commitments to abstract objects: \begin{quote} The acceptance cannot be judged as being either true or false because it is not an assertion. It can only be judged as being more or less expedient, fruitful, conducive to the aim for which the language is intended. Judgments of this kind supply the motivation for the decision of accepting or rejecting the kind of entities.\footnote{Here and below, when the bibliographic entry of a work includes a reprinted version, page numbers in the references refer to the reprinted version. Similarly, when the bibliographic entry includes an English translation, our translations are taken from that source, unless we indicate otherwise. Where no translation is listed, the translations are our own. The original versions of most of the mathematical sources quoted here can be found in ``Concept,'' so we have not reproduced them here.} \cite[p.~250]{carnap:50} \end{quote} Quine offered the following amendment: \begin{quote} Consider the question whether to countenance classes as entities. This, as I have argued elsewhere, is the question whether to quantify with respect to variables which take classes as values. Now Carnap has maintained that this is a question not of matters of fact but of choosing a convenient language form, a convenient conceptual scheme or framework for science. With this I agree, but only on the proviso that the same be conceded regarding scientific hypotheses generally. \cite[p.~43]{quine:51} \end{quote} We take these views seriously here, seeing it as an important philosophical task to clarify the role of our ontological posits with respect to ordinary mathematical activity, and evaluate their efficacy towards achieving our mathematical goals. This amounts to something like the naturalist approaches to the philosophy of mathematics advocated by Kitcher \cite{kitcher:88}, Burgess \cite{burgess:08}, and Maddy \cite{maddy:97}, focused on specific aspects of mathematical practice. How, then, should such an analysis proceed? It is instructive to consider those historical situations in which the mathematical community faced possibilities for methodological or ontological expansion and reacted accordingly. For example, it is helpful to consider the ancient Greek idealizations of number and magnitude, and the theory of proportion; the gradual acceptance of negative numbers, and then complex numbers, in the Western tradition; the use of algebraic methods in geometry, infinitesimals in the calculus, points at infinity in projective geometry; the development of the function concept from Euler to modern times; the gradual set-theoretic treatment of algebraic objects like cosets, ideals, equivalence classes in the nineteenth century; and so on. By studying the historical concerns regarding these expansions as well as the pressures that led to their ultimate acceptance, we can hope to better understand the factors that influence such developments. Indeed, at junctures like these, historical developments tend to follow a common pattern. First, expansions are met with resistance, or at least, extreme caution. Sometimes, the expansions can be explained in terms of the more conservative practice; for example, complex numbers can be interpreted as ordered pairs, algebraic solutions to geometric problems can be reinterpreted geometrically, and equations can be rewritten to avoid consideration of negative quantities. In other cases, the expansions are not generally conservative, but, at least, can be explained away in particular instances; for example, arguments involving infinitesimals can sometimes be interpreted in terms of ``ultimate ratios'' in a geometric diagram, and operations on abstract objects can sometimes be understood as operations on explicit representations. This makes it possible to adopt the expansions, tentatively, as convenient shorthand for more tedious but conservative arguments. Over time, the rules and norms that govern the expansions are clarified, and the expansions themselves prove to be convenient, or even indispensable, while they do not cause serious problems. Over time, the mathematical community grows used to them, to the point where they become part of the usual business of mathematics. Whiggish narratives tend to dismiss such historical hand-wringing and shilly-shallying as short-sighted conservativism that stands in the way of mathematical progress. We, however, prefer to view it as a rational response to the proposed expansions, whereby the benefits are carefully weighed against the concerns. In hindsight, we tend to make too little of the pitfalls associated with an ontological or methodological expansion. To start with, there are concerns about the \emph{consistency} and \emph{coherence} of the new methods, that is, worries as to whether the changes will lead to mistakes, false results, or utter nonsense, perhaps when employed in situations that have not even been imagined. Kenneth Manders has also emphasized the importance of maintaining \emph{control} of our mathematical practices \cite{manders:08}. Mathematics requires us to be able to come to agreement as to whether a proof is correct, or whether a given inference is valid or not. If new objects come with rules of use that are not fully specified, or vague, or unclear, the practice is in danger of breaking down. In a sense, this concern comes prior to concerns of consistency: if it is not clear what properties abstract magnitudes, negative numbers, complex numbers, infinitesimals, sets, and ``arbitrary'' functions have, it doesn't even make sense to ask whether using them correctly will lead to contradictions.\footnote{Mathematics, however, often gets by surprisingly well with concepts that are problematic, incompletely specified, and not fully understood, something which has been emphasized by Wilson \cite{wilson:94} and Urquhart \cite{urquhart:08}.} And then there are further concerns as to whether the new methods are \emph{meaningful} and \emph{appropriate} to mathematics. Even if a body of methods is consistent and clearly specified, it may still fail to provide us with the results we are after. If we expect an existence proof to yield certain kinds of information about the object that is asserted to exist, methods that fail to provide that sort of information do not constitute mathematics---or, at least, not the kind of mathematics we should be doing. If you expect a mathematical theory to make scientific predictions that we can act on rationally, it is a serious concern as to whether the new methods can deliver. In short, the concerns are not easily set aside. What, then, are the factors that might sway a decision in favor of an expansion? Mathematicians tend to wax poetic in their praise of conceptual advances, highlighting the power of new methods, the elegance and naturality of the resulting theory, and the insight and depth of the associated ideas. Part of our goal here is to de-romanticize these virtues and gain clarity as to what might be achieved. In many instances, the virtues in question have a lot to do with efficiency and economy of thought:\footnote{The phrase is borrowed from Ernst Mach's \emph{The Science of Mechanics} \cite{mach:93}; we are grateful to Michael Detlefsen for bringing this to our attention.} we tend to value methods that make it possible to solve problems that were previously unsolvable, or simplify proofs and calculations that were previously tedious, complex, and error-prone. Below we will consider specific ways in which ontological and methodological expansions help us manage complex tasks by suppressing irrelevant detail, making key features of a problem salient, and keeping key information ready-to-hand. We will also try to understand the way they make it possible to generalize and extend results, and facilitate the transfer of ideas to other domains. To summarize our high-level historical model: when mathematics is faced with methodological expansion, benefits such as simplicity, generality, and efficiency are invariably weighed against concerns as to the consistency, cogency, and appropriateness of the new methods. Sufficient benefit encourages us to entertain the changes cautiously, while trying to minimize the dangers involved. Cogency is obtained by working out the norms and conventions that govern the new methods. Consistency may not be guaranteed, but our experiences over time can bolster our faith that the new methods do not cause problems. In this regard, initial checks that the new methods are partially conservative over the old ones helps preserve mathematical meaning, and reassures us that even if the new methods turn out to be problematic, one will be able to restrict their scope in such a way that preserves their utility.\footnote{Wittgenstein's discussion of contradiction is interesting in this regard; see \cite[Lectures XI--XII]{wittgenstein:89}.} The philosophy of mathematics should give us better means to evaluate such expansions: to talk about the cogency of a mathematical argument and whether it delivers the desired result, and to understand the ways in which our ontological posits and methodological expansions improve our ability to reason effectively. A salient feature of our approach is that we aim to take mathematics at face value: when our best mathematical theories tell us that numbers and functions exist, our best philosophical theories should not repudiate those claims. This feature is common to other approaches to the philosophy of mathematics, such as the platonism espoused by William Tait \cite[\S 5]{tait:86}, or the non-eliminative structuralism proposed by Charles Parsons \cite[\S 18]{parsons:08}. On the other hand, we recognize an ongoing need for sustained reflection on our mathematical goals and methods, in order to better understand and improve that practice. Taking mathematics at face value doesn't mean viewing it as fixed and unchanging; mathematics has evolved for centuries and will continue to do so, guided, we hope, by thoughtful reflection of this sort. At times, it may seem that our treatment of ontological questions verges on a kind of formalist instrumentalism, for example, the view that there is nothing more to mathematics than linguistic conventions, which are to be adjudicated on the basis of ``pragmatic'' concerns. To be sure, we take pragmatic concerns to be an important target of philosophical study, but insofar as there is anything to be made of the realism/anti-realism debate with respect to mathematics, nothing we say here should preclude a realist position. For example, Hilary Putnam has argued that \begin{quote} \ldots at least when it comes to the theories that scientists regard as most fundamental\ldots we should regard all of the rival theories as candidates for truth or approximate truth, and that \emph{any philosophy of mathematics that would be inconsistent with so regarding them should be rejected}. \cite[p.~184]{putnam:12} \end{quote} Moreover: \begin{quote} \ldots a \emph{prima facie} attractive position---realism with respect to the theoretical entities postulated by physics, combined with \emph{antirealism} with respect to mathematical entities and/or modalities---doesn't work. [\emph{ibid.}, p.~188] \end{quote} Considering our mathematical and scientific theories as ``candidates for truth or approximate truth'' does not preclude reflecting on those theories and bringing pragmatic considerations to bear on the choices among them. Indeed, that is an integral part of the scientific enterprise, and it is the kind of activity we hope to support. \section{An overview of Dirichlet's theorem} \label{overview:section} Two integers, $m$ and $k$, are said to be \emph{relatively prime}, or \emph{coprime}, if they have no common factor. In 1837, Dirichlet proved the following: \begin{theorem} \label{dirichlet:theorem} If $m$ and $k$ are relatively prime, the arithmetic progression $m, m + k, m + 2k, \ldots$ contains infinitely many primes. \end{theorem} In other words, if $m$ and $k$ are relatively prime, there are infinitely many primes congruent to $m$ modulo $k$. In 1798, Legendre had assumed this, without justification, in a purported proof of the law of quadratic reciprocity. Gauss pointed out this gap, and presented two proofs of quadratic reciprocity in his \emph{Disquisitiones Arithmeticae} of 1801, which do not rely on that fact. He ultimately published six proofs of quadratic reciprocity, and left two more in his \emph{Nachlass}, but he never proved the theorem on primes in an arithmetic progression. Dirichlet's proof is notable not only for settling a longstanding open problem, but also for its sophisticated use of analytic methods to prove a number-theoretic statement. \subsection{Euler's proof that there are infinitely many primes} As Dirichlet himself made clear, the conceptual starting point for his proof lies in the work of Euler. In the \emph{Elements}, Euclid proved that there are infinitely many primes, but his proof does not provide much information about how they are distributed. Euler, in his \emph{Introductio in Analysin Infinitorum} \cite{Euler48}, proved the following: \begin{theorem} \label{euler:thm} The series $\sum_{q}\frac{1}{q}$ diverges, where the sum is over all primes $q$. \end{theorem} This implies that there are infinitely many primes, but also says something more about their density. For example, since we know that the series $\sum_{n}\frac{1}{n^{2}}$ is convergent, it tells us that, in a sense, there are ``more'' primes than there are squares. Euler's proof of Theorem~\ref{euler:thm} centers around his famous zeta function, \[ \zeta(s) = \sum_{n=1}^{\infty}n^{-s} = 1 + \frac{1}{2^s} + \frac{1}{3^s} + \ldots, \] defined for a real variable $s$. (The zeta function was later extended by Riemann to the entire complex plane via analytic continuation.) It is not hard to show that the series $\zeta(s)$ converges whenever $s > 1$. In that case, the infinite sum can also be expressed as an infinite product: \begin{equation} \label{euler:product:eqn} \sum_{n=1}^{\infty} n^{-s}= \prod_{q}\left(1-\frac{1}{q^{s}}\right)^{-1}, \end{equation} where the product is over all primes $q$. This is known as the \emph{Euler product formula}. Roughly, this holds because we can write each term of the product as the sum of a geometric series, \[ \left(1-\frac{1}{q^{s}}\right)^{-1} = 1 + q^{-s} + q^{-2s} + \ldots \] and then expand the product into a sum. The unique factorization theorem tells us that every integer $n > 1$ can be written uniquely as a product $q_1^{i_1} \cdot q_2^{i_2} \cdots q_k^{i_k}$. This means that the term $n^{-s} = q_1^{-i_1 s} \cdot q_2^{-i_2 s} \cdots q_k^{-i_k s}$ will occur exactly once in the expansion, corresponding to the choice of the $i_j$th element of the sum for each $q_j$, and the choice of $1$ in every other sum. Since we are dealing with infinite sums and products, the Euler product formula implicitly makes a statement about limits, and some care is necessary to make the argument precise; but this is not hard to do. If we take the logarithm of each side of the product formula and appeal to properties of the logarithm function, we obtain \[ \log\sum_{n=1}^{\infty}n^{-s}=\sum_{q}-\log\left(1-\frac{1}{q^{s}}\right). \] Using the Taylor series expansion \[ \log(1 - x) = -x - x^2 / 2 - x^3 / 3 - \ldots \] and changing the order of summations yields \[ \log\sum_{n=1}^{\infty}n^{-s}=\sum_{q}\frac{1}{q^{s}} + \sum_{n=2}^{\infty}\frac{1}{n}\sum_{q}\frac{1}{q^{ns}}. \] Remember that we want to show that $\sum_{q}\frac{1}{q}$ diverges. Notice that the first term on the right-hand side of the above equation is $\sum_{q}\frac{1}{q^{s}}$. Thus we should consider what happens as $s$ tends to 1 from above. One can show that the second term on the right-hand side is bounded by a constant that is independent of $s$, a fact that can be expressed using ``big O'' notation as follows: \begin{equation} \label{euler:primes:eqn} \log\sum_{n=1}^{\infty}\frac{1}{n^s}=\sum_{q}\frac{1}{q^{s}} + O(1). \end{equation} As $s$ approaches $1$ from above, the left-hand side clearly tends to infinity. Thus, the right-hand side, $\sum_{q}\frac{1}{q^{s}}$, must also tend to infinity, which implies that $\sum_{q}\frac{1}{q}$ diverges. \subsection{Dirichlet's approach} \label{dirichlet:approach:section} To make the ideas more perspicuous, Dirichlet first considered Theorem~\ref{dirichlet:theorem} in the special case where the common difference is a prime number $p$. Any prime $q$ other than $p$ leaves a remainder of $1, \ldots, p-1$ when divided by $p$. Splitting up the sum in (\ref{euler:primes:eqn}) we then have \begin{equation} \label{euler:primes:eqn:b} \log\sum_{n=1}^{\infty}\frac{1}{n^s}=\sum_{q \equiv 1 \bmod p} \frac{1}{q^{s}} + \sum_{q \equiv 2 \bmod p} \frac{1}{q^{s}} + \ldots + \sum_{q \equiv p-1 \bmod p} \frac{1}{q^{s}} + O(1). \end{equation} This shows that (\ref{euler:primes:eqn}) is too crude to prove Theorem~\ref{dirichlet:theorem}: to show that there are infinitely many primes congruent to $m$ modulo $p$, we need to show that the $m$th term on the right-hand side tends to infinity, not just the sum of all such terms. More work is therefore needed to tease apart the contribution of the primes modulo $m$, for each nonzero residue $m$ modulo $p$. Dirichlet sketched his proof in a three-page note announcing the result in 1837 \cite{Dirichletshort}, before spelling out the details in a later publication \cite{Dirichlet37}. The method relies on a trick that seems to come out of nowhere. We describe the trick here, and in the Appendix offer an explanation as to how Dirichlet may have come upon this approach. It is a fact from number theory that for any prime number $p$, there is a number $g$, such that the powers $g^0, g^1, g^2, \ldots, g^{p-2}$ modulo $p$ are exactly the nonzero residues $1, 2, 3, \ldots, p-1$ modulo $p$ in some order. Such an element $g$ is called a \emph{primitive root modulo $p$}. For example, when $p = 11$, we can choose $g = 2$. In that case, the powers of $g$ modulo 11 are \[ 1, 2, 4, 8, 5, 10, 9, 7, 3, 6, \] which are just the numbers from 1 to 10 listed in a different order. Notice that the next element on the list would be $1$ again, and the list cycles. In general, if $g$ is a primitive root modulo $p$, then $g^{p-1}$ is equal to $1$ modulo $p$. The statement that $g$ is a primitive root modulo $p$ means that for each nonzero residue $m$ modulo $p$, there is an exponent $\gamma$ between $0$ and $p-2$, with the property that $g^\gamma$ is equal to $m$ modulo $p$. We will denote this exponent $\gamma_m$ and call it the \emph{index} of $m$ modulo $p$ with respect to $g$, as Dirichlet did. For example, consulting the list above, we see that the index of $10$ is $5$, because $2^5$ is equal to $10$ modulo $11$. The function $n \mapsto \gamma_n$ behaves like a logarithm, in the sense that if $m$ and $n$ are nonzero residues modulo $p$, $\gamma_{mn}$ is equal to $\gamma_m + \gamma_n$ modulo $p - 1$. This is because we have \[ g^{\gamma_m + \gamma_n} = g^{\gamma_m} g^{\gamma_n} = m n \bmod p, \] and so $\gamma_m + \gamma_n$ modulo $p - 1$ is the exponent corresponding to $mn$. We now turn our attention from integer roots modulo a prime to the notion of a complex root of unity. In general, if $n$ is any integer, the equation $x^n = 1$ will have $n$ distinct roots in the complex numbers. Moreover, we can choose such a root, $\omega$, that is primitive in the sense that $\omega^0, \omega^1, \omega^2, \ldots, \omega^{n-1}$ are all such roots; taking $\omega = e^{2 \pi i / n}$ will do. Notice that we are now using the phrase ``primitive root'' in two distinct, but related, senses: to refer to primitive roots modulo a prime, and to refer to primitive roots of unity. For future reference, notice also that the expression $x^n - 1$ factors as $(x - 1) (x^{n - 1} + \ldots + x^2 + x+ 1)$. So, for any complex number $x$, if $x$ is a solution to $x^n = 1$ other than $1$, we have $x^{n - 1} + \ldots + x^2 + x+ 1 = 0$. Returning to Dirichlet's theorem, let $p$ be any prime, fix a primitive root $g$ modulo $p$, and let $\omega$ be any $(p - 1)$st root of 1, primitive or not. Consider the function $\chi(n)$ which maps any nonzero residue $n$ to the value $\omega^{\gamma_n}$. The function $\chi$ is \emph{multiplicative}, which is to say, $\chi(mn) = \chi(m) \chi(n)$ for any two nonzero residues $m$ and $n$. This holds because \[ \chi(mn) = \omega^{\gamma_{mn}} = \omega^{\gamma_m + \gamma_n} = \omega^{\gamma_n} \omega^{\gamma_n} = \chi(m) \chi(n). \] In the next section, we will see the functions $\chi$ are exactly the \emph{characters} on the group of nonzero residues modulo $p$. Here, following Dirichlet, we will avoid writing $\chi(n)$ and stick with the notation $\omega^{\gamma_n}$. A crucial ingredient in Dirichlet's proof is the observation that the Euler product formula can be generalized. What makes Euler's argument work is the fact that $(1 / m^s) \cdot (1 / n^s) = 1 / (mn)^s$, that is, the fact that the function which maps $n$ to $ 1 / n^s$ is multiplicative. The same argument goes through if we replace the quantity $1 / n^s$ by the function \[ \psi(n) = \left\{ \begin{array}{ll} \omega^{\gamma_n} / n^s & \mbox{if $n$ is not divisible by $p$} \\ 0 & \mbox{otherwise.} \end{array} \right. \] Thus, generalizing (\ref{euler:product:eqn}), we obtain \[ \sum_{p \nmid n} \frac{\omega^{\gamma_n}}{n^s}= \prod_{p \neq q}\left(1-\frac{\omega^{\gamma_q}}{q^{s}}\right)^{-1}. \] The sum on the left-hand side ranges over numbers $n$ that are not divisible by $p$, and the product on the right ranges over prime numbers $q$ other than $p$. Euler's calculation then shows that we have \[ \log\sum_n\frac{\omega^{\gamma_n}}{n^s} =\sum_{q}\frac{\omega^{\gamma_q}}{q^{s}} + O(1), \] in place of (\ref{euler:primes:eqn}). Here the first sum ranges over the same values of $n$, and the second sum ranges over the same values of $p$ as before. \emph{Now} decompose the sum on the right in terms of the remainder that $q$ leaves when divided by $p$, and notice that, by definition, $\gamma_q$ only depends on this remainder. In other words, we have \begin{multline} \label{euler:character:eqn} \log\sum_n\frac{\omega^{\gamma_n}}{n^s} = \Bigg(\sum_{q \equiv 1 \bmod p} \frac{ 1 }{q^{s}}\Bigg) \omega^{\gamma_1} + \Bigg(\sum_{q \equiv 2 \bmod p } \frac{ 1 }{q^{s}}\Bigg) \omega^{\gamma_2} + \ldots + \\ \Bigg(\sum_{q \equiv p - 1 \bmod p } \frac{ 1 }{q^{s}}\Bigg) \omega^{\gamma_{p-1}} + O(1). \end{multline} The next step involves the trick we alluded to above. Remember, to show that there are infinitely many primes congruent to $m$ modulo $p$, we want to show that the coefficient of the $m$th term in the preceding equation, $\sum_{q \equiv m \bmod p } \frac{ 1 }{q^{s}}$, approaches infinity as $s$ approaches $1$. If we let $\omega$ be a primitive $(p-1)$st root of 1, then all the roots are given by $\omega^0, \omega^1, \omega^2, \ldots, \omega^{p-2}$. The idea is to plug in all these roots into the preceding equation, and use that to solve for the $m$th coefficient. Replacing $\omega$ by $\omega^i$ in the last equation yields \begin{multline*} \log\sum_n\frac{\omega^{i\gamma_n}}{n^s} = \Bigg(\sum_{q \equiv 1 \bmod p } \frac{ 1 }{q^{s}}\Bigg) \omega^{i \gamma_1} + \Bigg(\sum_{q \equiv 2 \bmod p } \frac{ 1 }{q^{s}}\Bigg) \omega^{i \gamma_2} + \ldots + \\ \Bigg(\sum_{q \equiv p - 1 \bmod p } \frac{ 1 }{q^{s}}\Bigg) \omega^{i \gamma_{p-1}} + O(1). \end{multline*} This yields $p-1$ many equations, as $i$ ranges from $0$ to $p - 2$. To solve for the $m$th coefficient, for each $i$, multiply the $i$th equation by $\omega^{-i \gamma_m}$, and add them. This is where the magic occurs. If we write $L_i$ for the expression $\sum_{n=1}^{\infty}\frac{\omega^{i\gamma_n}}{n^s}$ that occurs on the left, then the left-hand side of the sum can be written \[ \log L_0 + \log L_1 \cdot \omega^{-\gamma_m} + \log L_2 \cdot \omega^{-2 \gamma_m} + \ldots + \log L_{p-2} \cdot \omega^{-(p-2) \gamma_m}. \] On the right-hand side, the $m$th term is exactly \[ (p - 1) \cdot \Bigg(\sum_{q \equiv m \bmod p } \frac{ 1 }{q^{s}}\Bigg), \] because $\omega^{i \gamma_m} \cdot \omega^{-i \gamma_m} = 1$ for each $i$, and we are simply summing the same value, $\sum_{q \equiv m \bmod p } 1 / q^{s}$, $p - 1$ times. When $j$ is different from $m$, however, the $j$th term will be \[ (\omega^{0 (\gamma_j - \gamma_m)} + \omega^{1 (\gamma_j - \gamma_m)} + \ldots + \omega^{(p-2) \cdot (\gamma_j - \gamma_m)}) \cdot \Bigg(\sum_{q \equiv j \bmod p } \frac{ 1 }{q^{s}} \Bigg). \] If we write $\eta = \omega^{\gamma_j - \gamma_m}$, then the coefficient in the last expression is \[ 1 + \eta + \eta^2 + \ldots + \eta^{p-2}. \] But since $\omega$ is a $(p-1)$st root of 1, so is $\eta$, and since $\gamma_j \neq \gamma_m$, $\eta$ is not equal to $1$. By the observation above, this sum is equal to $0$. In other words, all the other terms magically disappear. Thus we have shown that \begin{multline} \label{main:equation:prime:case:a} \log L_0 + \omega^{-\gamma_m} \log L_1 + \omega^{-2 \gamma_m} \log L_2 + \ldots + \omega^{-(p-2) \gamma_m} \log L_{p-2} = \\ (p - 1) \cdot \sum_{q \equiv m \bmod p } \frac{ 1 }{q^{s}} + O(1). \end{multline} Solving for $\sum_{q \equiv m \bmod p } 1 / q^{s}$ yields \begin{multline} \label{main:equation:prime:case} \sum_{q \equiv m \bmod p } \frac{ 1 }{q^{s}} = \frac{1}{p - 1} \Bigg(\log L_0 + \omega^{-\gamma_m} \log L_1 + \omega^{-2 \gamma_m} \log L_2 + \ldots + \\ \omega^{-(p-2) \gamma_m} \log L_{p-2} \Bigg) + O(1). \end{multline} As a result, we have managed to ``extricate'' the expression $\sum_{q \equiv m \bmod p } 1 / q^{s}$ from (\ref{euler:primes:eqn:b}). The goal is now to show that the this expression approaches infinity as $s$ approaches $1$. We now come to the analytic part of Dirichlet's proof: he showed that as $s$ approaches $1$, $L_0$ approaches infinity, but each of the other $L_i$'s approaches a nonzero limit as $s$ approaches $1$. This implies that the right-hand side approaches infinity as $s$ approaches $1$. Thus the left-hand side approaches infinity as well, which is only possible if there are infinitely many primes congruent to $m$ modulo $p$. The presentation here follows Dirichlet's short 1837 presentation fairly closely, though Dirichlet is more terse. As Dirichlet pointed out in that note, the argument can be pushed through for an arbitrary modulus $k$. But, as we will see in Section~\ref{dirichlet:section}, the details become unwieldy, and subsequent authors found more convenient ways to express the ideas. In the next section, we explain how the argument above can be described in terms of group characters, and then generalized to the case of an arbitrary modulus. \subsection{Group characters} \label{group:character:section} Let $G$ be a finite abelian group. In contemporary terms, a \emph{character on $G$} is a function $\chi$ from $G$ to the set of nonzero complex numbers with the property that, for every $g_1, g_2 \in G$, $\chi(g_1 g_2) = \chi(g_1) \chi(g_2)$. If $g$ is an element of any finite abelian group, then there is an integer $n > 0$ such that $g^n$ is equal to the identity element of $G$. This implies that $\chi(g)^n = \chi(g^n) = \chi(1) = 1$. This means that for every $g$, $\chi(g)$ is a complex root of 1. The notion of ``character'' introduced in the last section corresponds to the special case where $G$ is the group of nonzero residues modulo $p$, with the operation of multiplication. The point is that the key properties of the expressions $\omega^{\gamma_n}$ that came into play in the last section hold more generally of the set of characters on any finite abelian group. In particular, for any such group $G$, one can show that there are exactly $|G|$ many distinct characters on $G$, where $|G|$ denotes the number of elements of $G$. In the case where $G$ is the group of nonzero residues modulo $p$, $|G| = p - 1$, so the characters correspond to the $p - 1$ choices of $\omega$ in the previous section. More generally, for any $k \geq 1$, the set of residues $m$ modulo $k$ that have no common factor with $k$ form a group under multiplication. The cardinality of this group is commonly denoted $\varphi(k)$, and $\varphi$ is known as the Euler phi function. Thus, for every $k$, there are $\varphi(k)$ many characters on the group of residues modulo $k$. In fact, the set of characters itself has the structure of a group $\widehat G$, where the identity is the character $\chi_1$ that always returns 1, and the product of two characters is given pointwise, $(\chi \cdot \chi')(g) = \chi(g) \chi'(g)$ for every $g$. The following theorem expresses two important properties, known as the ``orthogonality relations'' for group characters. \begin{theorem} \label{ortho} Let $G$ be a finite abelian group. Then for any character $\chi$ in $\widehat{G}$, we have \[ \sum_{g \in G}\chi(g) = \begin{cases} |G| & \mbox{if $\chi = \chi_{0}$} \\ 0 & \mbox{if $\chi \neq \chi_{0}$,} \end{cases} \] and for any element $g$ of $G$, we have \[ \sum_{\chi \in \widehat{G}}\chi(g) = \begin{cases} |G| & \mbox{if $g=1_{G}$} \\ 0 & \mbox{if $g \neq 1_{G}$.} \end{cases} \] \end{theorem} The remarkable fact is that it is no harder to prove these facts in the general case than in the specific case where $G$ is a group of residues modulo $p$. For example, the second equation clearly holds when $g$ is the identity of $G$, since, in this case, each term of the sum is equal to $1$. Otherwise, choose a character $\psi$ such that $\psi(g) \neq 1$ and note \[ \psi(g) \sum_{\chi \in \widehat{G}} \chi(g) = \sum_{\chi \in \widehat{G}} \psi(g) \chi(g) = \sum_{\chi \in \widehat{G}} \chi(g), \] since multiplying each character $\chi$ in $\widehat{G}$ by $\psi$ simply permutes the elements of $\widehat{G}$. Subtracting the right side of the equation from the left, we see that $(\psi(g) - 1) \cdot \sum_{\chi \in \widehat{G}} \psi(g) = 0$, and since $\psi(g)$ is not equal to $1$, we have that $\sum_{\chi \in \widehat{G}} \psi(g) = 0$. The first equation can be established in a similar way. The second orthogonality relation gives rise to the ``cancellation trick'' used in the last section, where we multiplied each identity by $\omega^{- i \gamma_m}$ and added them, to isolate a particular coefficient. The general phenomenon can be expressed as follows: \begin{corollary} \label{orthocorrol} For any $g, h \in G$ we have the following: \[ \sum_{\chi \in \widehat{G}}\chi(g)\overline{\chi(h)}= \begin{cases} |G| & \ \mbox{\rm if} \ g=h \\ 0 & \ \mbox{\rm if} \ g \neq h.\end{cases} \] \end{corollary} Here $\bar z$ denotes the complex conjugate of $z$, which is in fact equal to $1 / z$ when $z$ is a root of unity. The corollary follows from the fact that we have \[ \sum_{\chi \in \widehat{G}}\chi(g)\overline{\chi(h)} =\sum_{\chi \in \widehat{G}}\chi(g)\chi(h)^{-1}=\sum_{\chi \in \widehat{G}}\chi(gh^{-1})= \begin{cases} |G| & \ \mbox{if} \ g=h \\ 0 & \ \mbox{if} \ g \neq h. \end{cases} \] Notice that the abstract algebraic formulation simplifies matters by eliminating clutter. For example, the presentation in the last section depended on choices of a primitive element $g$ modulo $p$, and a primitive $(p-1)$st root of unity $\omega$. Although these played a role in the computations, any choice of $g$ and $\omega$ works just as well. The abstract version ``factors these out'' of the presentation. Recall also that the calculation in the last section required facts such as $\gamma_{m n} = \gamma_m + \gamma_n$. Once again, the abstract version factors this out of the computation; the requisite property of $\gamma$ subsumed by the more general fact that $\widehat{G}$ is a group, and only the latter fact enters into the proof. \subsection{A modern formulation of Dirichlet's proof} \label{modern:formulation:section} With the notion of a group character in mind, we can now describe Dirichlet's original proof of Theorem~\ref{dirichlet:theorem} in modern terms. Let $k$ be an integer greater than or equal to $1$. It is a fundamental theorem of number theory that an integer $n$ is relatively prime to $k$ if and only if $n$ has a multiplicative inverse modulo $k$; in other words, if and only if there is some $n'$ such that $n n' \equiv 1 \bmod{k}$. This implies that the residues of integers modulo $k$ that are relatively prime to $k$ form a group, denoted $(\mathbb{Z}/k\mathbb{Z})^*$, with multiplication modulo $k$. As noted above, the cardinality of $(\mathbb{Z}/k\mathbb{Z})^*$, that is, the number of residues relatively prime to $k$, is denoted $\varphi(k)$. A character $\chi$ on the group of residues modulo $k$ can be viewed as a function defined on all integers by \[ X(n) = \begin{cases} \chi(n \bmod k) &\text{if $n$ is relatively prime to $k$} \\ 0 &\text{otherwise.} \end{cases} \] Such a function is called a \emph{Dirichlet character modulo $k$}. Dirichlet characters are \emph{completely multiplicative}, which is to say, $X(1) = 1$ and $X(mn) = X(m)X(n)$ for every $m$ and $n$ in $\mathbb{Z}$. Mathematicians typically use the symbol $\chi$ to range over Dirichlet characters, blurring the distinction between such functions and their group-character counterparts. This is harmless, since there is a one-to-one correspondence between the two, and so we will adopt this practice as well. Recall that in the case where $k$ is a prime number $p$, Dirichlet considered certain expressions $L_i(s)$, analogues of Euler's zeta function, where $i$ is an integer between $0$ and $p - 2$. Each such $i$ corresponds to a choice of a character $\chi$ modulo $p$. In the modern formulation, then, we define \[ L(s, \chi)= \sum_{n=1}^{\infty}\frac{\chi(n)}{n^{s}}, \] where $\chi$ is such a character. The function $L(s, \chi)$ is called the \emph{Dirichlet $L$-function}, or \emph{$L$-series}. The calculation in Section~\ref{dirichlet:approach:section} can be generalized to show: \[ \log L(s, \chi) = \sum_{q \nmid k}\frac{\chi(q)}{q^{s}} \ + \ O(1). \] Now comes the crucial use of Corollary~\ref{orthocorrol} to pick out the primes in the relevant residue class. We multiply each side of the above equation by $\overline{\chi(m)}$ and then take the sum of these over all the Dirichlet characters modulo $k$. (Recall that we can identify each Dirichlet character with the corresponding group character, that is, the corresponding element of $\widehat{(\mathbb{Z}/k \mathbb{Z})^*}$.) Thus we have: \begin{equation*} \sum_{\chi \in \widehat{(\mathbb{Z}/k \mathbb{Z})^*}} \overline{\chi(m)}\log L(s, \chi) = \sum_{\chi \in \widehat{(\mathbb{Z}/k \mathbb{Z})^*}} \overline{\chi(m)}\sum_{q \nmid k}\frac{\chi(q)}{q^{s}}\ + \ O(1). \end{equation*} To simplify this expression, we exchange the summations on the right-hand side, and appeal to Corollary~\ref{orthocorrol}. Since the cardinality of the group $(\mathbb{Z} / k \mathbb{Z})^*$ is $\varphi(k)$, we obtain \begin{equation} \label{sumoverchar2} \sum_{\chi \in \widehat{(\mathbb{Z} / k \mathbb{Z})^*}}\overline{\chi(m)}\log L(s, \chi) = \varphi(k) \sum_{q \equiv m \pmod{k}}\frac{1}{q^{s}} \ + \ O(1). \end{equation} This is analogous to the equation (\ref{euler:primes:eqn}) in Euler's proof, and equation (\ref{main:equation:prime:case:a}) in Section~\ref{dirichlet:approach:section}. Our goal is once again to show that the left-hand side tends to infinity as $s$ approaches 1 from above. This implies that the right-hand side tends to infinity, which, in turn, implies that there infinitely many primes $q$ that are congruent to $m$ modulo $k$. To show that $\sum_{\chi \in \widehat{(\mathbb{Z} / k \mathbb{Z})^*}}\overline{\chi(m)}\log L(s, \chi)$ tends to infinity as $s$ approaches $1$, we divide the characters into three classes, as follows: \begin{enumerate} \item The first class contains only the principal character $\chi_0$, which takes the value of 1 for all arguments that are relatively prime to $k$, and 0 otherwise. \item The second class consists of all those characters which take only real values (i.e.\ 0 or $\pm 1$), other than the principal character. \item The third class consists of those characters which take at least one complex value. \end{enumerate} It is not difficult to show that $L(s, \chi_{0})$ has a simple pole at $s=1$, which implies that the term $\overline{\chi_0}(m) \log L(s,\chi_0)$ approaches infinity as $s$ approaches $1$. The real work involves showing that for all the other characters $\chi$, $L(s,\chi)$ has a finite nonzero limit. This implies that the other terms in the sum approach a finite limit, and so the entire sum approaches infinity. For characters in the third class, that is, the characters that take on at least one complex value, the result is not difficult. For characters in the second class, the result is much harder, and Dirichlet used deep techniques from the theory of quadratic forms to obtain it. In the years that followed, other mathematicians found alternative, and simpler, ways of handling this case. But even in modern presentations, this case remains the most substantial and technically involved part of the proof. \section{Functions as objects} \label{functions:section} In Section~\ref{dirichlet:section} below, we will discuss, in greater detail, the implicit treatment of characters in Dirichlet's original proof, and in Section~\ref{transition:section}, we will summarize the gradual historical transition to the modern formulation. The general theme will be that, over time, characters came to be treated as objects in their own right. Before surveying the history, however, it will be helpful for us to provide some general background information on the nineteenth century concept of ``function,'' and begin to spell out what it means to treat functions like characters as ``objects.'' In ``Concept,'' we discussed a number of nineteenth-century methodological changes that are clustered around the function concept. These include what we termed the ``unification'' or ``generalization'' of the function concept, whereby particular instances (including real- and complex-valued functions, number-theoretic functions, sequences, permutations, transformations, automorphisms, and so on) gradually came to be subsumed under a general notion; the ``liberalization'' of the function concept, whereby mathematicians adopted novel means of defining particular functions, such as Dirichlet's 1827 example of a real-valued function that takes one value on the rationals, and another value on the irrationals; the ``extensionalization'' of the function concept, whereby functions gradually came to be viewed less as syntactic or algebraic expressions, and more as the abstract entities denoted by such expressions; and the ``reification'' of the function concept, whereby functions were gradually treated as \emph{bona fide} mathematical objects. The notion of ``reification'' is vague. The claim that over the course of the century characters gradually become treated as new sorts of objects supports our contention that the transformation has ontological overtones, but it raises serious questions as to what, exactly, it means to treat certain entities as objects. To start with, consider the fact that in our presentation of Dirichlet's theorem we identified the concept of a ``character,'' reasoned about the entities falling under this concept, and ascribed various properties to them. This seems to be a bare-minimum requirement to support the claim that a mathematical text sanctions certain entities as objects, namely, that it recognizes them as being entities of a certain \emph{sort}, capable of bearing predicates and being the target of certain operations. It does not matter whether we take this sort as fundamental (for example, as we take the notion of ``integer'' in most contexts) or as derived from a broader sort (for example, when we view characters as functions of a certain kind). What is important is that the entities belong to a grammatically recognized category, and this category helps determine the predicates and operations that can be meaningfully ascribed to it. For example, one can talk about one integer being larger than another, but not one character as being larger than another. In sum, our first criterion of objecthood is whether the entities in question have a recognizable role in the grammar of the language. The fact that we took characters to be ``represented'' by certain symbolic expression provides another clue, insofar as we generally speak of a representation \emph{of} something or other. For example, we think of expressions like ``$6$'' and ``$2 \times 3$'' as representing an integer. As Michael Detlefsen has pointed out to us, one common view is that an ``object'' is what remains invariant under all its representations; in other words, what is left over when one has ``squeezed out'' all the features that are contingent on particular representations. When it comes to the notion of a function, what is the underlying invariant? There may be lots of ways of describing a particular function, but what makes them representations of the \emph{same} function is surely that they take the same values on any given input. Thus treating function expressions extensionally is a sign that one is reasoning about functions as objects, rather than reasoning about the expressions themselves.\footnote{Recall Quine's dictum that ``there is no entity without identity,'' for example in \cite{quine:69}.} A third hallmark of object-hood that is present in our list is evidenced by the fact that we can \emph{sum} over characters, just as we can sum over natural numbers. Notice that in an expression $\sum_{\chi} \ldots \chi \ldots$, the variable $\chi$ is a bound variable that ranges over the entities in question. Similar considerations hold for the universal and existential quantifiers. Viewing the natural numbers as quintessential mathematical objects, a sign that an entity has attained the status of object-hood is that it is possible to quantify over them in theorems and definitions, just as one quantifies over the natural numbers.\footnote{This echoes another Quine dictum, ``to be is to be the value of a bound variable'' \cite[p.~15]{quine:48}.} The consideration admits of degrees: whereas the bare-minimum requirement discussed above may allow us to state theorems about, and define operations on, ``arbitrary'' entities of the sort, a more full-blown notion of object-hood will give us more latitude in the kinds of quantification and binding that are allowed. A fourth criterion for object-hood is evidenced by the fact that characters are allowed to appear as \emph{arguments} to the $L$-functions, for example, in the expression $L(s,\chi)$. To avoid making this consideration depend on the modern notion of a function, let us note that what is essential here is that an expression denoting a recognized mathematical object (in this case, a complex number) is allowed to \emph{depend} on a character, much the way that a real number $(s)_i$ in a sequence depends on the value of the index $i$, or a value $\varphi(n)$ of the phi function depends on $n$. What makes this more potent than the mere ability to define operations on characters is that the dependent expressions are treated as objects in their own right. $L(s,\chi)$ is not just an operation on $s$ and $\chi$: fixing $\chi$, the function $s \mapsto L(s,\chi)$ is an object that one can integrate and differentiate, and fixing $s$, we can sum over the values obtained by varying $\chi$. It is also notable that the characters can be components in the construction of other mathematical objects and structures. For example, one can form sets and sequences of characters, in much the same way that one forms sets and sequences of numbers, and one can define a group whose elements are characters, in much that same way that one can form a group whose elements are residues modulo some number $m$. To summarize, here are some of the various senses in which one might say that characters are treated ``as objects'' in our presentation of Dirichlet's proof: \begin{enumerate} \item Characters fall under a recognized grammatic category, which allows us to state things about them and define operations and predicates on them. \item There is a clear understanding of what it means for two expressions to represent the \emph{same} character, and conventions ensure that the expressions occurring in a proof respect this ``sameness.'' \item One can quantify and sum over characters; in short, they can fall under the range of a bound variable. \item One can define functions which take characters as arguments. \item One can construct new mathematical entities, like sets and sequences, whose elements are characters. In particular, characters can be elements of an algebraic structure like a group. \end{enumerate} We recognize that determining the ``ontological commitments'' of a practice may not be as clear-cut as Quine's writings suggest. Our goal here is not to explicate what it means to say that a certain manner of discourse is committed to treating some entity as an object. In particular, we do not claim to have given a precise sense to the question as to whether a particular mathematical proof is committed to functions as objects. We do claim, however, to have identified various important senses in which contemporary proofs of Dirichlet's theorem treat functions as ordinary mathematical objects, whereas Dirichlet's original proof did not. It may be helpful to compare the way we treat functions today to the way we treat natural numbers today. For example, the expressions ``$2 + 2$'' and ``$4$'' both denote integers, but we think of the number as the object denoted, rather than the expression. Thus we can send numbers as arguments to functions, and when we write $f(2 + 2)$ and $f(4)$, it is understood that the function $f$ cannot distinguish the mode of presentation. We can form sets of numbers, like the set of even number or the set of prime numbers, and we can consider algebraic structures on these sets; for example, the ring of integers, or the field of integers modulo $7$. We can quantify over numbers in definitions, such as when we say $n$ divides $m$ if there is some $k$ such that $n k = m$, and in theorems, such as when we assert that every integer greater than one has a prime divisor. If $S$ is a finite set of integers and $f$ is a function from the integers to the integers or the reals, we can readily form the sum $\sum_{x \in S} f(x)$. In contemporary mathematics, nothing goes awry if you replace integers with functions in the examples in the last paragraph. In other words, one can define functionals $F(f)$ that depend only on the extension of $f$, and not its manner of presentation. We can consider sets of functions, rings of functions, and spaces of functions. We quantify over functions in definitions and theorems, and, if $S$ is a finite set of functions, we think nothing of considering a sum $\sum_{f \in S} F(f)$. In the proof of Dirichlet's theorem, these ``higher order'' operations are manifest when we consider the group of characters $\chi$, define the Dirichlet $L$ series $L(s, \chi)$, and form the sum $\sum_\chi \overline{\chi(m)}\log L(s, \chi)$. In ``Concept,'' we argued in detail that these very features of the modern treatment of functions were alien to early nineteenth century mathematics, and that the history of presentations of Dirichlet's theorem shows a very gradual evolution, in fits and starts, towards the contemporary manner of thought. We will highlight some of the key features of the historical development in Sections~\ref{dirichlet:section} and \ref{transition:section}, and, in Section~\ref{analysis:section}, explore what the history tells us about the nature of mathematics. \section{Dirichlet's treatment of characters} \label{dirichlet:section} Contemporary mathematicians are often surprised to hear that there is no explicit notion of ``character'' in Dirichlet's 1837 proof. After all, the expressions $X(n)$ defined in Section~\ref{modern:formulation:section} are known as ``Dirichlet characters'' precisely because of their implicit use in that proof. But Dirichlet did not introduce notation for the characters or refer to them as such. When we speak of the ``characters'' in his proof, we are projecting a modern interpretation onto the symbolic expressions that appear there. Remember how it works in the case where the common difference is a prime, $p$. Let $g$ be a primitive element modulo $p$, and for every $n$ coprime to $p$, let $\gamma_n$ denote the index of $n$ with respect to $g$, so that $g^{\gamma_n} \equiv n \bmod p$. Then each character $\chi$ corresponds to a $(p-1)$st root of unity $\omega$, with defining equation $\chi(n) = \omega^{\gamma_n}$. In that case, Dirichlet wrote $\omega^{\gamma_n}$ where we would write $\chi(n)$. We obtain all the characters by picking a primitive $(p-1)$st root of unity, $\Omega$, so that all the $(p-1)$st roots of unity are given by the sequence $\Omega^0, \ldots, \Omega^{p-2}$. This provides a convenient numbering scheme for the characters and $L$-series: Dirichlet used $L_m$ to denote the $L$-series based on the character $\chi$ that corresponds to $\Omega_m$, where we would write instead $L(s, \chi)$. And where we would form a summation over the set of all characters, Dirichlet instead took a summation over the values $0, \ldots, p-2$. For example, after demonstrating the Euler product formula, \[ \prod \frac{1}{1-\omega^{\gamma}\frac{1}{q^{s}}}=\sum\omega^{\gamma}\frac{1}{n^{s}}=L, \] Dirichlet wrote: \begin{quote} The equation just found represents $p-1$ different equations that result if we put for $\omega$ its $p-1$ values. It is known that these $p-1$ different values can be written using powers of the same $\Omega$ when it is chosen correctly, to wit: \[ \Omega^{0}, \ \Omega^{1}, \ \Omega^{2}, \ \ldots,\ \Omega^{p-2} \] According to this notation, we will write the different values $L$ of the series or product as: \[ L_0,\ L_1,\ L_2,\ \ldots,\ L_{p-2} \] \end{quote} In the case where the modulus $k$ is not prime, the procedure is more complicated. It is a fundamental theorem of group theory that every finite abelian group can be represented as a product of cyclic groups, but that theorem was first proved by Kronecker in 1870 \cite{kronecker:70}. Dirichlet instead used the particular instance of this fact for the group $(\mathbb{Z} / k\mathbb{Z})^*$ of residues modulo $k$ that are relatively prime with $k$ (these are sometimes called the ``units'' modulo $k$). The structure of that group was known to Gauss. First, write $k$ as a product of primes, \[ k = 2^\lambda p_1^{\pi_1} p_2^{\pi_2} \cdots p_j^{\pi_j} \] where each $p_i$ is an odd prime and $\pi_i$ is greater than or equal to 1. Then the group of units modulo $k$ is isomorphic to the product of the groups of units modulo each term in the factor. If $p$ is an odd prime and $\pi$ is an integer greater than or equal to $1$, then one can more generally find a primitive element $c$ modulo $p^\pi$. This means that the residue class of $c$ generates the cyclic group $(\mathbb{Z} / p^\pi \mathbb{Z})^*$, or, equivalently, for every $n$ relatively prime to $p$ there is a $\gamma_n$ such that $c^{\gamma_n} \equiv n \bmod p^\pi$. Thus we can choose primitive elements $c_1, \ldots, c_j$ corresponding to $p_1^{\pi_1}, p_2^{\pi_2}, \ldots, p_j^{\pi_j}$. If $\lambda \geq 3$, however, there is no primitive element modulo $2^\lambda$. Rather, $(\mathbb{Z}/ 2^\lambda)^*$ is a product of two cyclic groups, and for every $n$ relatively prime to $2^\lambda$ there are an $\alpha_n$ and $\beta_n$ such that $(-1)^{\alpha_n} 5^{\beta_n} \equiv n \bmod 2^\lambda$. Thus for any $n$ relatively prime to $k$, we can write \[ n \equiv (-1)^{\alpha_n} 5^{\beta_n} c_1^{\gamma_{1,m}} c_2^{\gamma_{2,m}} \ldots c_j^{\gamma_{j,m}} \bmod k \] where each $\gamma_{i,n}$ is the index $n$ relative to $p_i^{\pi_i}$. As above, if we choose appropriate roots of unity $\theta, \varphi, \omega_1, \omega_2, \ldots, \omega_j$, we obtain a character \begin{equation*} \chi(n) = \theta^{\alpha_n} \varphi^{\beta_n} \omega_1^{\gamma_{1,n}} \omega_2^{\gamma_{2,n}} \cdots \omega_j^{\gamma_{j,n}}. \end{equation*} And, once again, every character is obtained in this way. We should note that Dirichlet used the notation $p, p', \ldots$ rather than $p_1, \ldots, p_j$ to denote the sequence of odd primes. Moreover, he used the notation $\alpha, \beta, \gamma, \gamma', \ldots$ to denote the indices, suppressing the dependence on $n$. Thus, Dirichlet wrote $\theta^\alpha \varphi^\beta \omega^\gamma \omega'^{\gamma'} \ldots$ for the expression we have denoted $\chi(n)$ above, leaving it up to us to keep in mind that $\alpha, \beta, \ldots$ depend on $n$. To summarize, in the simple case of a prime modulus $p$, Dirichlet fixed a primitive element modulo $c$, and represented each character $\chi$ in terms of a $(p-1)$st root of unity, $\omega$. In that case, the value $\chi(n)$ is given by $\omega^{\gamma_n}$. In the more general case of a composite modulus $k$, Dirichlet fixed primitive elements modulo the terms of the prime factorization of $k$, and represented each character $\chi$ in terms of a sequence $\theta, \varphi, \pi, \pi'$ of roots of unity. In that case, the value $\chi(n)$ was written $\theta^\alpha \varphi^\beta \omega^\gamma \omega'^{\gamma'} \ldots$, suppressing the information that the exponents $\alpha, \beta, \gamma, \gamma', \ldots$ depend on $n$. For example, he described the Euler product formula as follows: \begin{quote} \begin{align} \prod \frac{1}{1-\theta^{\alpha}\varphi^{\beta}\omega^{\gamma}\omega^{'\gamma^{'}}\ldots\frac{1}{q^{s}}}=\sum \theta^{\alpha}\varphi^{\beta}\omega^{\gamma}\omega'^{\gamma'}\ldots\frac{1}{n^{s}} = L, \label{dirichletEulergeneral} \end{align} where the multiplication sign ranges over all primes, with the exclusion of $2, p, p', \ldots$, and the summation ranges over all the positive integers that are not divisible by any of the primes $2, p, p', \ldots$. The system of indices $\alpha, \beta, \gamma, \gamma', \ldots$ on the left side corresponds to the number $q$, and on the right side to the number $n$. The general equation (\ref{dirichletEulergeneral}), in which the different roots $\theta, \varphii, \omega, \omega', \ldots$ can be combined with one another arbitrarily, clearly contains $K$-many particular equations. \cite[p.~17; equation number changed]{Dirichlet37} \end{quote} Note, again, Dirichlet's characterization of the general equation as ``containing'' the particular instances. Here, $K$ is what we have called $\varphi(k)$, the cardinality of the group $(\mathbb{Z} / k\mathbb{Z})^*$. Dirichlet went on to observe that we can choose primitive roots of unity $\Theta, \Phi, \Omega, \Omega', \ldots$ so that all choices of $\theta, \varphi, \omega, \omega', \ldots$ can be expressed as powers of these, \[ \theta = \Theta^\mathfrak{a}, \varphi = \Phi^\mathfrak{b}, \omega = \Omega^\mathfrak{c}, \omega' = \Omega^{\mathfrak{c}'}, \ldots, \] just as in the simpler case. He wrote that we can thus refer to the $L$-series in a ``convenient'' (\emph{bequem}) way, as $L_{\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c}', \ldots}$, where $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c}', \ldots$ are the exponents of the chosen primitive roots. Notice that the representations just described depend on fixed, but arbitrary, choices of the primitive roots of unity, as well as fixed but arbitrary generators of the cyclic groups. Modulo those choices, we have parameters $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c}', \ldots$ that vary to give us all the characters; and for each choice of $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c}', \ldots$ we have an explicit expression that tells us the value of the character at $n$. For Dirichlet, summing over characters therefore amounted to summing over all possible choices of this representing data. In the special case of where the common difference is a prime, $p$, Dirichlet ran through calculations similar to those described in Section~\ref{modern:formulation:section} to obtain the following identity: \begin{multline*} \sum\frac{1}{q^{1 + \rho}} +\frac{1}{2}\sum\frac{1}{q^{2 +2 \rho}} + \frac{1}{3}\sum\frac{1}{q^{3 + 3 \rho}} + \ldots \\ = \frac{1}{p-1} (\log L_{0} + \Omega^{-\gamma_{m}} \log L_{1} + \Omega^{-2\gamma_{m}}\log L_{2} + \ldots + \Omega^{-(p-1)\gamma_{m}} \log L_{p-2}). \end{multline*} This is exactly equation (\ref{main:equation:prime:case}) above, with $\Omega$ in place of our $\omega$, and $1 + \rho$ in place of $s$, and the ``$O(1)$'' expression left explicit. In the more general case, he arrived at the analogous result: \begin{multline*} \sum\frac{1}{q^{1 + \rho}} + \frac{1}{2}\sum\frac{1}{q^{2 +2 \rho}} + \frac{1}{3}\sum\frac{1}{q^{3 + 3 \rho}} + \ldots \notag \\ = \frac{1}{K}\sum \Theta^{-\alpha_{m}\mathfrak{a}}\ \Phi^{-\beta_{m}\mathfrak{b}}\Omega^{-\gamma_{m}\mathfrak{c}}\Omega^{-\gamma'_{m}\mathfrak{c}'} \cdots \log L_{\mathfrak{a},\mathfrak{b},\mathfrak{c},\mathfrak{c}', \ldots.} \end{multline*} Here the summation on the right-hand side of the equation is over the possible values of $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c'}, \ldots$. This corresponds to equation (\ref{sumoverchar2}) in Section~\ref{modern:formulation:section}. Finally, recall from the sketch in Section~\ref{modern:formulation:section} that Dirichlet divided the $L$ functions into three classes, depending on whether the corresponding character was trivial (identically equal to 1), real-valued, or complex-valued. But in Dirichlet's presentation, the categorization was made in terms of the \emph{roots used to describe the character}. Thus the three classes of $L$ functions were characterized as follows: \begin{enumerate} \item the one in which all the roots contained in the expression are $1$ \item those, among the ones that remain, in which all the roots are real ($\pm 1$) \item those in which at least one of the roots is not real \end{enumerate} Dirichlet showed that the first approaches infinity as $\rho$ approach $0$, while the others approach finite limits, which establishes the desired conclusion. Let us summarize the features of Dirichlet's presentation we wish to highlight. First, he did not name or identify the characters, and simply used the corresponding algebraic expressions. The corresponding $L$ functions were then characterized by the data that appeared in the expression, rather than in terms of a functional dependence on the character. In other words, Dirichlet wrote $L_m$ or $L_{a,b,c,c', \ldots}$ where we would write $L(s, \chi)$. As a result, where we would sum an expression over all values of the characters $\sum_{\chi} \ldots$, he summed over the representing data $\sum_m \ldots$ or $\sum_{a, b, c, c', \ldots} \ldots$. Finally, in preparation for the analytic part of the proof, he sorted the $L$ functions in terms of this data, rather than in terms of the values of the corresponding characters. In the next section, we will see that, over time, all of these features were gradually eliminated from later expositions. \section{The transition to the modern treatment of characters} \label{transition:section} In ``Concept,'' we studied the treatment of characters in subsequent work by Dirichlet (1840, 1841), Dedekind (1863, 1879), Kronecker (1870's), Weber (1883), Hadamard (1896), de la Vall\'ee Poussin (1897), and Landau (1909, 1927). We will not review all the details here, but, rather, summarize the salient features of the history. \subsection{Reification} We have seen that in Dirichlet's original proof, characters are present only in the form of the algebraic expressions $\omega^{\gamma_n}$ in the simple case, and in the form $\theta^{\alpha_n} \varphi^{\beta_n} \omega^{\gamma_n} \omega'^{\gamma'_n} \ldots$ in the case of an arbitrary modulus. In 1841, however, Dirichlet considered expressions \[ \Omega_{n}=\varphi^{\alpha_{n}}\varphi^{'\alpha'_{n}}\times\ldots\times\psi^{\beta_{n}}\chi^{\gamma_{n}}\psi^{'\beta'_{n}}\chi^{'\gamma'_{n}}\times\ldots\times\theta^{\delta_{n}}\eta^{\varepsilon_{n}} \] analogous to the characters in his 1837 proof. In this case, however, he introduced the explicit notation $\Omega_n$, and isolated four key properties of these values: \begin{enumerate} \item $\Omega_{nn'} = \Omega_{n}\Omega_{n'}$ for every $n$ and $n'$. \item $\Omega_{n'}=\Omega_{n}$ whenever $n'\equiv n \pmod{k}$. \item $\sum\Omega_{l} = 0$ or $\sum\Omega_{l} = \frac{1}{4}\psi(k)$ depending on whether there is at least one root among the roots in $\Omega_{l}$ that is different to 1, or whether they are all equal. \item $S\Omega_{n} = \frac{1}{4}\psi(k)$ or $S\Omega_{n}=0$ depending on whether $n\equiv 1 \pmod{k}$ or $n\not\equiv 1 \pmod{k}$, where the sign ``$S$'' indicates a sum over all combinations of the roots that can occur in $\Omega$. \end{enumerate} In modern terms, the first clause asserts that the function $n \mapsto \Omega_n$ is a multiplicative function from the integers to the complex numbers, and the second asserts that the value $\Omega_n$ only depends on the value of $n$ modulo $p$. If you add the constraints that $\Omega_n$ is nonzero when $n$ is relatively prime to $k$ and zero otherwise, this is exactly the algebraic definition of character we presented in Section~\ref{group:character:section}. The third and fourth properties correspond to the two orthogonality relations we presented in Section~\ref{group:character:section}. The article provided only a short sketch of a generalization of his 1837 proof, but it is notable that there Dirichlet went out of his way to flag these expressions as playing a key role, and to abstract away the general properties that are common to both proofs. In 1863, Dedekind gave an exposition of Dirichlet's proofs in one of the appendices, or ``supplements,'' to the first edition of his presentation of Dirichlet's lectures on number theory \cite{dirichlet:63b}. When presenting the generalization of the Euler product formula, he went out of his way to point out that the function \[ \psi(n) = \frac{\theta^{\alpha}\eta^{\beta}\omega^{\gamma}\omega'^{\gamma'}\ldots}{n^s} \] is multiplicative, and that this is what makes the generalization hold. In a later 1871 edition of the work, he added a footnote, in which he singled out the numerator of this expression, and introduced the notation $\chi(n)$: \begin{quote} The numerator [of $\psi(n)$] $\chi(n)=\theta^{\alpha}\eta^{\beta}\omega^{\gamma}\omega'^{\gamma'}\ldots$ has the characteristic property $\chi(n)\chi(n')=\chi(nn')\ldots$\cite[\S 133, footnote]{dirichlet:63b} \end{quote} It is notable that he went out of his way to add this footnote, calling attention to the importance of these expressions.\footnote{In ``Concept,'' we mistakenly asserted that Dedekind did not alter the text of this supplement in later editions. He made very few such changes, however, making this particular addition especially interesting.} In 1879, in the third edition of the lectures, Dedekind introduced the notion of a character in an entirely different context: his theory of ideals in an algebraic number field. Rather than considering characters on the multiplicative group of residues modulo an integer, he considered characters defined on another finite abelian group, namely, on the class group in an algebraic number field: \begin{quote} \ldots the function $\chi(\mathfrak a)$ also possesses the property that it takes the same value on all ideals $\mathfrak a$ belonging to the same class $A$; this value is therefore appropriately denoted by $\chi(A)$ and is clearly always an $h$th root of unity. Such functions $\chi$, which in an extended sense can be termed \emph{characters}, always exist; and indeed it follows easily from the theorems mentioned at the conclusion of \S 149 that the class number $h$ is also the number of all distinct characters $\chi_1, \chi_2, \ldots, \chi_h$ and that every class $A$ is completely characterized, i.e.~is distinguished from all other classes, by the $h$ values $\chi_1(A), \chi_2(A), \ldots, \chi_h(A)$.\footnote{The quotation appears in \S 178 in the 1879 edition of the \emph{Vorlesungen} \cite{dirichlet:63b}, and in \S 184 of the 1894 edition, which is reproduced in Dedekind's \emph{Werke} \cite{dedekind:68}. The translation above is by Hawkins \cite[p.~149]{hawkins:71}.} \end{quote} As we emphasize in ``Concept,'' this was not only the first use of the term ``character'' in its modern sense, but also, as far as we know, the earliest instance of the use of the term ``function'' for something defined on a domain other than the integers, real numbers, or complex numbers. (A similarly broad use of the term occurs in Frege's \emph{Begriffsschrift}, which was published in the same year.) We will discuss Frege's notion of function in detail in Sections~\ref{frege:section} and \ref{frege:section:b}.) Within three years, in an 1882 publication, Weber gave the general definition of a character of an abelian group and provided a thorough analysis of their properties. Thus, over time, the symbolic expressions appearing in Dirichlet's proof were named and flagged as entities worthy of attention. Their properties were stated abstractly, and developed in a manner that were independent of the original formulation. This, in turn, made it possible to apply the notion in other settings. As we have argued in Section~\ref{functions:section}, this provides at least a minimal sense in which characters can be viewed as objects, namely, as entities which can bear properties and be a target of assertions. \subsection{Functional dependence and summation} In Section~\ref{functions:section}, we also flagged it as notable that, in the modern view, functions can depend on characters, and we can form the sum of an expression with a variable ranging over the characters. Let us consider the way these features of the treatment of characters play out in the various presentations of Dirichlet's theorem. We have noted that one benefit of identifying the characters as such is that it facilitates extracting the central properties that play a role in the proof, such as the identity \[ \sum_{\chi \in \widehat{G}}\chi(g) = \begin{cases} |G| & \mbox{if $g=1_{G}$} \\ 0 & \mbox{if $g \neq 1_{G}$} \end{cases} \] in Theorem~\ref{ortho}, and the consequence expressed by Corollary~\ref{orthocorrol} that for every $g$ and $h$ in an abelian group $G$, \[ \sum_{\chi \ \in \ \widehat{G}}\chi(g)\overline{\chi(h)}= \begin{cases} |G| & \ \mbox{if} \ g=h \\ 0 & \ \mbox{if} \ g \neq h.\end{cases} \] In the case where $G$ is the group of nonzero residues modulo $p$, Dirichlet expressed the latter by saying that we have \[ 1+\Omega^{h\gamma - \gamma_{m}} + \Omega^{2(h\gamma - \gamma_{m})} + \ldots + \Omega^{(p-2)(h\gamma - \gamma_{m})}=0 \] except when $h\gamma - \gamma_{m} \equiv 0 \bmod{p-1}$, in which case the sum is equal to $p - 1$. In the case of an arbitrary modulus, Dirichlet did not even extract the conclusion explicitly. Rather, it is implicitly contained in an argument in which he considered the sum $\frac{1}{h}\sum W\frac{1}{q^{h + h\rho}}$, \begin{quote} \ldots where the symbol $\sum$ ranges over all primes $q$ and $W$ denotes the product of the sums taken over $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c'}, \ldots$ or respectively over \[ \sum\Theta^{(h\alpha - \alpha_{m})\mathfrak{a}}, \sum\Phi^{(h\beta - \beta_{m})\mathfrak{b}}, \sum\Omega^{(h\gamma - \gamma_{m})\mathfrak{c}}, \sum\Omega'^{(h\gamma' - \gamma'_{m})\mathfrak{c'}}, \ldots. \] \cite[p.~340]{Dirichlet37} \end{quote} This makes it harder to appreciate the nature of the cancellation trick. Moreover, although values $\Theta, \Phi, \Omega, \Omega', \ldots$ can be used to define the individual characters, these tuples and the corresponding representation play no role in the subsequent proof, which depends only on the orthogonality relations and the multiplicative nature of the characters. It seems reasonable, then, to seek a manner of expression that abstracts away the details of the representation. We saw that in his 1841 paper on arithmetic progressions in the quadratic integers, Dirichlet briefly used the expression $S\Omega_n$ to denote the result of summing the values of $\Omega_n$ over all possible combinations of roots that occur in $\Omega$. Kronecker maintained the dependence of the characters on the defining tuples of data, but found a much more elegant notation for expressing the dependence. He denoted the character corresponding to the tuple of parameters $(k)$ by $\Omega^{(k)}$, and in the case of a modulus $m$, he expressed the second orthogonality relation by writing \[ \sum_{(k)} \Omega^{(k)}(r_0) = \varphi(m), \] when $r_0$ is congruent to $1$ modulo $m$, and \[ \sum_{(k)} \Omega^{(k)}(r) = 0 \] otherwise. In his 1883 paper on general characters, Weber adopted a curious means of abstracting the representation of the characters: he simply assigned arbitrary indices to the characters, listing them as $\chi_1, \ldots, \chi_h$. He then expressed the second orthogonality principle without summation notation, as \[ \chi_{1}(\Theta) + \chi_{2}(\Theta) + \ldots + \chi_{h}(\Theta) = 0, \] for each group element $\Theta$. In 1896, however, de la Vall\'ee-Poussin adopted notation $S_\chi$ for summation over characters: \begin{quote} Consider \ldots the sum extending over all the characters, that is to say over all the systems of roots \[ S_{\chi}\chi(n)=S_{\omega}\omega_{1}^{\nu_{1}}\omega_{2}^{\nu_{2}} \ldots \] \ldots\ \emph{For every number $n$, the sum extending over the totality of characters satisfies \[ S_{\chi}\chi(n)=0, \] the only exception being the case where \[ n\equiv 1 \pmod{M}, \] because then all the indices are zero and one has \[ S_{\chi}\chi(n)=\varphi(M). \]} \cite[pp.~14--15]{Poussin96} \end{quote} It is notable that he chose a symbol distinct from the usual summation symbol, $\sum$, which he used for sums ranging over natural numbers. Nonetheless, he seems to be the only nineteenth century author to have taken summation over characters at face value. Setting aside the orthogonality relation, let us consider the subsequent calculation, involving the $L$-series, where those identities are put to use. We have observed that the modern notation $L(s, \chi)$ allows us to express the dependence of an $L$-series on the character $\chi$, and that the notation $\sum_\chi \overline{\chi(m)}\log L(s, \chi)$ allows us to sum over characters, but these means of expression were not available to Dirichlet. In the case of a prime modulus $p$, Dirichlet defined the $L$ series \[ L_0, L_1, \ldots, L_{p-2}, \] where the index corresponds to a particular numeric parameter occurring in the algebraic expression that we now recognize as the value of the corresponding character, and \[ \log L_{0} + \Omega^{-\gamma_{m}} \log L_{1} + \Omega^{-2\gamma_{m}}\log L_{2} + \ldots + \Omega^{-(p-1)\gamma_{m}} \log L_{p-2} \] to sum over the $p - 1$ many $L$ series in the case of a prime modulus. In the case of a general modulus $k$, each $L$ series has a similar denotation \[ L_{\mathfrak{a},\mathfrak{b},\mathfrak{c},\mathfrak{c}', \ldots} \] where $\mathfrak{a},\mathfrak{b},\mathfrak{c},\mathfrak{c}', \ldots$ are a sequence of numeric parameters that appear in the algebraic expression for the general character, and the summation is denoted \[ \sum \Theta^{-\alpha_{m}\mathfrak{a}} \Phi^{-\beta_{m}\mathfrak{b}}\Omega^{-\gamma_{m}\mathfrak{c}}\Omega^{-\gamma'_{m}\mathfrak{c}'} \cdots \log L_{\mathfrak{a},\mathfrak{b},\mathfrak{c},\mathfrak{c}', \ldots} \] where the summation ranges over the $\varphi(k)$ many choices of values of $\mathfrak{a}, \mathfrak{b}, \mathfrak{c}, \mathfrak{c'}, \ldots$. Thus Dirichlet took the $L$ series to depend on particular tuples of numeric parameters involved in the definition of the characters, and took summations to range over these parameters. Dedekind's 1863 presentation followed Dirichlet in this respect, as did de la Vall\'ee-Poussin's 1897 presentation. Hadamard in 1896 and Landau in 1909 adopted a tack similar to Weber's, assigning arbitrary indices to the characters, and then letting the $L$-series depend on those indices. For example, Hadamard wrote $\psi_1, \psi_2, \ldots, \psi_{\varphi(k)}$ for the list of characters modulo $k$, and defined the $L$-functions as follows: \[ L_{v}(s)=\sum_{n=1}^{\infty}\frac{\psi_{v}(n)}{n^{s}}. \] The key summation over the characters is then written $\sum_{v}\frac{\log L_{v}(s)}{\psi_{v}(m)}$. To the modern eye, it seems strange to assign otherwise meaningless indices to the characters in order to express the functional dependence of the $L$ series on a character and to sum over them, when one can just write $L(s, \chi)$ and $\sum_\chi$. But while it was perfectly natural in the nineteenth century to sum over integers, summing over the functions themselves may not even have occurred to these authors. It is not until 1897 that we first see $L$ series expressed as a functional dependence on characters, when de la Vall\'ee Poussin introduced the notation $Z(s, \chi)$. Subsequent authors adopted the notation $L(s, \chi)$, reverting back to Dirichlet's use of the letter $L$. By 1927, for example, Landau was using $L(s, \chi)$ and $\sum_\chi$ just as we do today, and from then on the usage seems to have stuck. \subsection{Extensionalization} Let $f(x)$ be the function on the real numbers defined by $f(x) = 3 x^2 + 1$. In logical parlance, the \emph{intension} of this last expression is the manner of presentation, in some sense --- if not the purely syntactic string of symbols, something close to it. In contrast, the \emph{extension} is the abstract object denoted, that is, the abstract input-output relation. Today, when we refer to functions, we generally have their extensions in mind. A note of intensionality creeps in when we say things like ``the leading coefficient of $f$'' or ``the constant term of $f$,'' but when called on to explain what we mean, we are generally able to clarify the fact that by ``$f$'' we really mean the expression for $f$ rather than the object itself. The extensional nature of the function concept is embodied in the fact that when we define a functional $F(f)$ on a collection of functions, we ensure the definition does not depend on the manner of presentation of $f$, since $F$ is supposed to ``act'' on the extension, not the intension. In ``Concept,'' we argued that this distinction was not as clearly drawn in the nineteenth century treatment of functions. Early instances of functions --- not just functions on the real and complex numbers, but also objects like permutations, automorphisms, and so on --- were more tightly associated with a manner of expression. The history of the treatment of characters in Dirichlet's theorem shows exactly this sort of ambiguity, and a gradual move towards an extensional treatment.\footnote{A referee has suggested that ``abstraction'' and ``abstract treatment'' may be more apt than ``extensionalization'' and ``extensional treatment,'' since ``extensionality'' is often associated with a set-theoretic interpretation of functions. As the referee concedes, however, that the word ``abstract'' has multiple connotations, and so we have stuck with the more focused terminology.} Consider, for example, the definition of the concept of character itself. For each $k$, the set of characters modulo $k$ can be defined extensionally, as the set of nonzero homomorphisms from $(\mathbb Z/ k \mathbb Z)^*$ to the complex numbers, or intensionally, as functions defined by certain algebraic expressions involving certain primitive elements modulo the prime powers occurring in the factorization of $k$, and certain complex roots of unity. Even though the two definitions give rise to the same set of characters, proofs can differ in the extent to which they rely on the specific representations or the abstract characterizing property. Dirichlet's proof relied only on the symbolic representations, but we have seen that later proofs emphasized the key properties of the characters, which were extensional in nature. Recall also that Dirichlet divided the $L$ series into three classes, depending on a corresponding division of the characters on which they depend. Dirichlet described the division in terms of the tuples of roots appearing in the algebraic expressions, whereas a modern characterization describes the three kinds of characters as follows: \begin{enumerate} \item the character with constant value $1$ \item the (other) real-valued characters \item the (other) complex-valued characters \end{enumerate} What is perhaps surprising is that even as later authors introduced notation like $\chi$ or $\psi_i$ to range over characters, they still carried out the classification in terms of the roots. For example, both Dedekind's and Hadamard's division of the characters into the trivial, real, and complex cases was also described in terms of the characters' representations, even though the distinction is naturally expressed in terms of the values they take. Kronecker and de la Vall\'ee-Poussin provided both descriptions, and even though Kronecker made it clear that all operations and classifications can be carried out, algorithmically, in terms of the canonical representations, his careful choice of notation and organization made the extensional properties salient. By 1927, Landau clearly favored the extensional characterization in his textbook. As yet another means of highlighting the difference between intensional and extensional ways of thinking about functions, we will close this section by noting that a number of the authors we considered adopted a strikingly similar means of describing identities parameterized by the characters. Recall that after stating the generalized version of the Euler product identity (\ref{dirichletEulergeneral}), Dirichlet wrote: \begin{quote} The general equation, in which the different roots $\theta, \varphi, \omega, \omega', \ldots$ can be combined with one another arbitrarily, clearly contains $K$-many particular equations. \end{quote} The notion of a single identity ``containing'' $K$-many particular equations sounds strange to us today. In contrast to thinking of an identity like $e^{x + y} = e^x + e^y$ as a single equation in which $x$ and $y$ are taken to range over the real or complex numbers, it is almost as though Dirichlet conceived of the generalized Euler product formula as a \emph{template}, or a \emph{schema}, for the particular assertions obtained by instantiating the variables $\theta, \varphi, \omega, \omega', \ldots$ with the particular data representing each character. In a similar way, when Dedekind defined the $L$ series in 1863, he wrote: \begin{quote} Since these roots can have $a, b, c, c', \ldots$ values, respectively, the form $L$ contains altogether $abcc'\ldots = \varphi(k)$ different particular series\ldots \end{quote} This manner of speaking persisted even after authors began using a single symbol $\chi$ to stand for an arbitrary character. For example, in 1882, Weber, after deriving a pair of identities involving an arbitrary character $\chi$, wrote: \begin{quote} Each of the formulas \ldots represents $h$ different formulas, corresponding to the $h$ different characters $\chi_1, \chi_2, \ldots, \chi_h$. \end{quote} And in a very similar situation, de la Vall\'ee Poussin wrote in 1897: \begin{quote} \ldots this equation (E) represents in reality $\varphi(M)$ distinct ones, which result from exchanging the characters amongst themselves. \end{quote} Such language suggests that, to some extent, authors thought of the act of ``instantiating'' a general identity involving characters at a particular character as somewhat different from instantiating a general identity over numbers at a particular number. \section{Methodology and ontology revisited} \label{analysis:section} Let us review some of the general historical trends we have discerned in the treatment of characters. Over time, authors isolated certain symbolic expressions appearing in Dirichlet's proof, viewed them as functions of an integer parameter (or equivalence class) $n$, and baptized them ``characters.'' They isolated important properties of the characters and articulated them in a way that renders them independent from the rest of the proof. Collaterally, this made it possible to generalize the notion of a character on a multiplicative group of residues to the notion of a character on any abelian group. Initially, each character was seen to be represented by a bundle of defining data, so what we now characterize as a functional dependence on the character was expressed as a dependence on the bundle of data, and a summation over the characters was expressed as a summation of a range of values of the bundle of data. But, over time, the role that the representing data had to play in the proof was diminished. Authors began to adopt notation and patterns of argumentation that suppressed that information, for example, by assigning arbitrary indices to the characters and letting expressions depend on those indices. Ultimately, authors simply began expressing functional dependences on, and summing over, the characters themselves. Avoiding the need to refer to any particular representation of the characters meant relying instead on properties of the characters that can be expressed in terms of the values they take on suitable inputs. In other words, it amounted to adopting an extensional view of the characters, in which statements about the characters are cast purely in those terms. In contemporary proofs of Dirichlet's theorem, this is taken to the extreme when we define the set of characters as the set of nonzero homomorphisms from the group in question to the complex numbers, and carry out the proof without indicating any way of representing individual characters, let alone means of computing with them. One might describe these changes as ``merely notational,'' or ``merely pragmatic.'' But dismissing them in that way belies the fact that these changes reflect a fundamentally different way of talking about, thinking about, and reasoning about the characters. And this was by no means an isolated example. As we have noted in the introduction, during the nineteenth century the treatment of other mathematical entities that we now take to be instances of sets, functions, or structures evolved in similar ways, and for similar reasons. So the history we have traced here is but one instance of a general transformation in mathematical thought, with a new conception of the basic objects of mathematics and appropriate means of reasoning about them. It seems strange to resist seeing this as a change in ontology. (Gray \cite{gray:92} nicely emphasizes this point.) According to the historical model described in Section~\ref{metaphysics:section}, we should view the history of Dirichlet's theorem as a response to fundamental methodological pressures, as mathematicians struggled to meet both intrinsic and extrinsic mathematical goals while respecting intrinsic and extrinsic methodological constraints. As philosophers, we should not be interested so much in the historical and psychological contingencies that shaped the process, but, rather, the sense in which the outcome is rational and justified. In other words, we wish to understand the extent to which the methods of contemporary mathematics serve to achieve our mathematical goals, given some conception of those goals and what it means to do mathematics. Attention to the history can bring some of the goals and constraints to light, but then we are left to weigh their importance and assess the merits of the present solution. This is the point at which philosophical analysis must come into play. In broad terms, here we will view mathematics as a process by which finite beings attempt to impose a useful order on the complex and varied data that confronts them. The philosophical task is then to develop more refined characterizations of the mathematical process, in terms that adequately reflect the constraints we face as mathematical agents and the goals we pursue. In ``Concept,'' we provided a detailed discussion of some of the various methodological benefits and concerns that accrue to the use of the modern function concept. Let us briefly review these here, and see what they have to tell us about the nature of mathematics. Treating characters as objects, in all the senses described in Section~\ref{functions:section}, brings a number of methodological benefits. Expressions become simplified, meaning that the reader has to keep track of less information when parsing them, and the author of a proof can record and convey the relevant information more compactly. Proofs become simplified as well, meaning that readers have to keep track of less information while following the argumentative structure of a proof, and authors have to keep track of less information while working out the details. Information that is irrelevant to the argument at hand, or can be made so, is suppressed, making key data and relationships more salient. Moreover, proofs became more modular, as properties of the characters were abstracted away and proved separately. This further supports the aim of reducing the amount of information in play at any given point. While developing a theory of the characters, we need only work with their defining properties, and when checking that particular instances of functions are characters, we need only check that these instances satisfy the defining properties. Then, when reasoning about these particular characters, we can invoke results from the general theory, such as the orthogonality lemma, as ``black boxes.'' The fact that extraneous information has been filtered out means that expressions depend on fewer parameters, and inferences depend on fewer assumptions. This makes it easier to check details and avoid mistakes. Modularity brings additional benefits, in that definitions and theorems that have been abstracted away from the body of the proof can be reused elsewhere. The process of abstraction clarifies the data that serves to parametrize a definition and the hypotheses that are required to establish a proposition. This facilitates not only using the definitions and proposition in other contexts, but also modifying the definitions and propositions by varying the parameters and hypotheses accordingly. In this way, modularity supports generality as well as reuse. Thus, with a modular structuring, dependencies between mathematical components are minimized, and the mathematics becomes easier to understand. It also becomes easier to ensure correctness, and components can be modified and reused. Notice, incidentally, that these are exactly the benefits associated with modularity in software engineering.\footnote{This is a topic is explored in greater detail in \cite{avigad:unp}.} The key point is that treating characters as objects supports this modularity. To start with, identifying characters as ``things'' means that they can be objects of study. We can make assertions about them, and specify predicates and functions that take them as arguments. Moreover, notations, definitions, and theory designed to handle other ``things'' now applies: we can form sums that range over the characters and reason about them; we can form sets and sequences of characters and reason about them; we can consider groups of characters and reason about them; and so on. In short, all of methods that are available to us for reasoning about mathematical objects become applicable to reasoning about characters. Given the apparent benefits of treating characters as full-blooded objects, why did it take so long for the mathematical community to do so? When we look back at the history of mathematics, it is hard to appreciate the difficulties that accompany significant shifts in method, but they are substantial. Mathematics is a communal activity: when a mathematician writes a proof, his or her intention is that others will read it and judge it to be informative and correct. This requires that the author and the reader have a common understanding not only as to what is permissible, but also as to what is appropriate and desirable. In Section~\ref{metaphysics:section}, we enumerated some of the concerns that arise when new methods are introduced. In ``Concept,'' we explored the way these concerns apply specifically to the modern treatment of characters, and to functions more generally. To start with, it is important that the new manner of speaking about functions come with clear rules of use. If there is no agreement as to which inferences are permissible --- for example, under what conditions it is legitimate to consider two expressions denoting functions as ``equal,'' and to substitute one expression for another in a given context --- then the mathematical enterprise falls apart, and mathematicians cannot read each others' proofs. Moreover, whether the rules of use are presented explicitly or implicitly, there is also the question as to whether they are consistent. Even if we think of the new treatment of characters as a mere short cut to establishing Dirichlet's theorem, such short cuts are clearly illegitimate if they lead to false or nonsensical conclusions. It is by no means apparent that there are no hidden pitfalls in quantifying over characters, summing over characters, and treating characters as arguments to other functions. It would be mathematically reckless to adopt these devices out of sheer convenience, without some assurances that the results obtained are reliable. As suggested in Section~\ref{metaphysics:section}, to some extent it helps to know how the new methods can be interpreted in terms of the prior methods, bolstering the understanding that \emph{if} we try to view talk of characters as short cuts to proving new theorems, the long way is still, in principle, open to us. Even if the new rules of use seem to be reliable, there is still the question as to whether they are meaningful. We argued in Section~\ref{transition:section} that early authors tended to think of characters as symbolic expressions of a certain kind, or at least, as entities with canonical representations as such symbolic expressions. If the new methods no longer support such a view, one has to come to terms with the question of how one \emph{should} think of a character. Put succinctly, once we have proved a statement about characters, what do we know? And even if we come to believe that a certain manner of working with characters is consistent, legitimate, and meaningful, there is still the question as to whether it constitutes \emph{good mathematics}, which is to say, whether it furthers our epistemic goals and provides satisfactory answers to our questions. This issue becomes pressing when we try to reconcile a computational conception of mathematics with the new methods of abstraction. For most of its history, mathematics was essentially computational, supplying methods of calculation that could be used to predict the motion of the planets, succeed in games of chance, and compute lengths and magnitudes of all sorts. A central feature of the modern treatment of characters is that it suppresses details of how to represent and compute with individual characters, and often even eliminates these details entirely. We may feel as though we have an understanding of what it means for a function, viewed as a general procedure, to take a natural number as input, but what does it mean for a function to take a character, viewed abstractly, as input? If we expect a mathematical theory of characters to tell us how to represent them and compute with them, then a theory that fails to provide that information is simply defective. Separating concerns as we have done here is somewhat artificial. For example, maintaining a computational view of characters is one way of interpreting their meaning, and the ability to ascribe any sort of meaning to mathematical objects tends to clarify the rules of use and support the belief in these rules are consistent. Notice, also, that on our analysis, the factors that ultimately support adopting a modern treatment of functions are an uneasy mix of pragmatic, empirical, and broadly philosophical considerations. That does not mean that they are not good reasons, however, nor that we have not made important philosophical progress by understanding them better. In the latter half of the nineteenth century, Frege's development of formal logic was designed to represent mathematical language and methods of reasoning, and offer clear recommendations as to proper usage. Famously, the notion of ``function'' is central to his account, as well as an understanding of the relationship between ``function'' and ``object.'' In the remaining sections of this essay, we will consider Frege's analysis, and argue that his logical and philosophical choices were influenced by many of the same considerations that were faced by his mathematical peers. \section{Frege's view of functions and objects} \label{frege:section} In 1940, Alonzo Church presented a formulation of type theory \cite{church:40}, now known as ``simple type theory.'' Simple type theory can serve as a foundation for a significant portion of mathematics, and, indeed, is the axiomatic foundation of choice for a number of computational interactive theorem provers today \cite{gordon:melham:93,harrison:07c,nipkow:et:al:02}. One starts with some basic types, say, a type $\mathbb{B}$ of Boolean truth values and a type $\mathbb{N}$ of natural numbers, and one forms more complex types $\sigma \times \tau$ and $\sigma \to \tau$ from any two types $\sigma$ and $\tau$. Intuitively, elements of type $\sigma \times \tau$ are ordered pairs, consisting of an element of type $\sigma$ and an element of type $\tau$, and elements of type $\sigma \to \tau$ are functions from $\sigma$ to $\tau$. In a type-theoretic approach to the foundations of mathematics, one identifies sets of natural numbers with predicates, which is to say, elements of type $\mathbb{N} \to \mathbb{B}$. Binary relations on the natural numbers are then elements of type $\mathbb{N} \times \mathbb{N} \to \mathbb{B}$, and sequences of natural numbers are elements of type $\mathbb{N} \to \mathbb{N}$. Objects at this level are called \emph{type 1} elements, because they require one essential use of the function space arrow. Integers can be identified as pairs of natural numbers and rationals can be identified as pairs of integers in the usual ways. Real numbers are then Cauchy sequences of rationals (elements of type 1), or equivalence classes of such, which puts them at type 2. Functions from the reals to the reals and sets of reals are then elements of type 3, and sets of functions from the reals to reals or collections of sets of real numbers are then elements of type 4. For example, the collection of Borel sets of real numbers is an element of type 4, as is Lebesgue measure, which maps certain sets of real numbers to the real numbers. A set of measures on the Borel sets of the real numbers is an element of type 5. And so on up the hierarchy. Simple type theory can be viewed as a descendant of the ramified type theory of Russell and Whitehead's \emph{Principia Mathematica} \cite{russell:whitehead:10}, which, in turn, was inspired by the formal system of Frege's \emph{Grungesetze der Arithmetik} \cite{frege:grundgesetze}. Starting with a basic type of individuals, Frege's system also has variables ranging over higher-type functionals, and so can be seen as an incipient form of modern type theory. For that reason, it may come as a surprise to logicians familiar with the modern type-theoretic understanding that the foundational outlook just described is \emph{not at all} the image of mathematics that Frege had in mind. It is this image that we wish to explore here. Frege took concepts to be instances of functions; for example, in ``Function and concept'' he wrote that ``a concept is a function whose value is always a truth value'' \cite[p.~139]{Frege91}.\footnote{We should note that in this section we will focus on his views from 1884 onwards. Prior to this, he seems to have held a different view of concepts, though he still maintained that they are not objects; see \cite[p.~136]{compthought}.} And, throughout his career, he was insistent that functions are not objects. The third ``fundamental principle'' in his \emph{Grundlagen der Arithmetik} of 1884 was ``never to lose sight of the distinction between concept and object'' \footnote{``\ldots der Unterschied zwischen Begriff und Gegenstand ist in Auge zu behalten.''} \cite[Introduction]{Frege84}, and he later asserted that ``it will not do to call a general concept word the name of a thing'' \cite[\S\,51]{Frege84}.\footnote{``\ldots ist es unpassend, ein allgemeines Begriffswort Namen eines Dinges zu nennen.''} The distinction features prominently in his essays ``Function and concept,'' ``Comments on \emph{Sinn} and \emph{Bedeutung}'' and ``Concept and object'' of 1891, 1891/2, and 1892, respectively. According to Frege, the proper distinction is tracked by linguistic usage: objects are denoted by words and phrases that can fill the subject role in a grammatical sentence, whereas concepts are denoted by words and phrases that can play the role of a predicate. In ``Concept and object'' he wrote: \begin{quote} We may say in brief, taking ``subject'' and ``predicate'' in the linguistic sense: a concept is the \emph{Bedeutung} of a predicate; an object is something that can never be the whole \emph{Bedeutung} of a predicate, but can be the \emph{Bedeutung} of a subject.\footnote{``Wir k\"{o}nnen kurz sagen, indem wir ``Pr\"{a}dikat'' und ``Subjekt'' im sprachlichen Sinne verstehen: Begriff ist Bedeutung eines Pr\"{a}dikates, Gegenstand ist, was nie die ganze Bedeutung Pr\"{a}dikates, wohl aber Bedeutung eines Subjekts sein kann.'' The word \emph{Bedeutung} is often translated as ``reference'' or ``denotation.'' But for difficulties in the translation, see \S4 of the introduction to Beaney \cite{FregeReader}.} \cite[pp.~198]{cando} \end{quote} And: \begin{quote} A concept---as I understand the word---is predicative. On the other hand, a name of an object, a proper name, is quite incapable of being used as a grammatical predicate.\footnote{``Der Begriff---wie ich das Wort verstehe---ist pr\"{a}dikativ. Ein Gegenstandsname hingegen, ein Eigenname ist durchaus unf\"{a}hig, als grammatisches Pr\"{a}dikat gebraucht zu werden.''} \cite[pp.~193]{cando} \end{quote} In the sentence, ``Frege is a philosopher,'' the word ``Frege'' denotes an object, and the phrase ``is a philosopher'' denotes a concept. Frege clarified the distinction by explaining that functional expressions, including concept expressions, are ``unsaturated,'' or incomplete. These stand in contrast to signs that are used to denote objects, which are complete in and of themselves. For example, in the sentence ``Frege is a philosopher,'' the expression ``Frege'' is saturated, and succeeds in picking out an object. In contrast, the expression ``\ldots is a philosopher'' contains a gap, and fails to name an object until one fills in the ellipsis, at which point the expression denotes a truth value.\footnote{While the distinction between saturated and unsaturated expressions is cast as a distinction between linguistic signs, in his 1904 essay ``What is a Function?'' Frege made it clear that the dichotomy extends to functions and objects themselves: ``The peculiarity of functional signs, which we here called `unsaturatedness', naturally has something answering to it in the functions themselves. They too may be called `unsaturated' \ldots'' (``Der Eigent\"{u}mlichkeit der Fuktionszeichen, die wir Unges\"{a}ttigtheit genannt haben, entspricht nat\"{u}rlich etwas an den Funktionen selbst. Auch diese k\"{o}nnen wir unges\"{a}ttigt nennen \ldots'') \cite[p.~665]{Frege04}.} Having distinguished between concepts and objects in such a way, Frege had to deal with objections, such as the one he attributed to Benno Kerry in ``Concept and object.'' In the sentence ``The concept `horse' is a concept easily attained'' the concept denoted by ``horse'' does fill the subject role. Frege's surprising answer was to deny that the phrase ``the concept `horse' '' denotes a concept. He conceded that this sounds strange: \begin{quote} It must indeed be recognized that we are confronted by an awkwardness of language\ldots if we say that the concept \emph{horse} is not a concept\ldots.\footnote{``Es kann ja nicht verkannt werden, da{\ss} hier eine freilich unvermeidbare sprachliche H\"{a}rte vorliegt, wenn wir behaupten: der Begriff Pferd ist kein Begriff \ldots.''} \cite[pp.~196--197]{cando} \end{quote} Yet, he insisted, this is what we must do. He was already clear about this in the \emph{Grundlagen}: \begin{quote} The business of a general concept word is precisely to signify a concept. Only when conjoined with the definite article or a demonstrative pronoun can it be counted as the proper name of a thing, but in that case it ceases to count as a concept word. The name of a thing is a proper name.\footnote{``Ein allgemeines Begriffswort bezeichnet eben einen Begriff. Nur mit dem bestimmten Artikel oder einem Demonstrativpronomen gilt es als Eigenname eines Dinges, h\"{o}rt aber damit auf, als Begriffswort zu gelten. Der Name eines Dinges ist ein Eigenname.''} \cite[\S 51]{Frege84} \end{quote} And so, in ``Concept and object,'' he reminded us: \begin{quote} If we keep it in mind that in my way of speaking expressions like ``the concept $F$'' designate not concepts but objects, most of Kerry's objections already collapse.\footnote{``Wenn wir festhalten, da{\ss} in meiner Redeweise Ausdr\"{u}cke wie ``der Begriff $F$'' nicht Begriffe, sondern Gegenst\"{a}nde bezeichnen, so werden die Einwendungen \emph{Kerrys} schon gr\"{o}{\ss}tenteils hinf\"{a}llig.''} \cite[pp.~198--199]{cando} \end{quote} He similarly urged us to reconstrue expressions like ``all mammals have red blood'' as ``whatever is a mammal has red blood'' so as to avoid the impression that the predicate ``has red blood'' is being applied to an object, ``mammal.'' Although these examples deal with concepts, Frege's analysis makes it clear that he intended the linguistic separation to remain operant for other kinds of functions as well. At the same time, Frege was equally dogmatic in insisting that what we commonly take to be mathematical objects really \emph{are} mathematical objects as such. The introduction to his \emph{Grundlagen} begins as follows: \begin{quote} When we ask someone what the number one is, or what the symbol 1 means, we get as a rule the answer ``Why, a thing.''\footnote{``Auf die Frage, was die Zahl Eins sei, oder was das Zeichen 1 bedeute, wird man meistens die Antwort erhalten: nun, ein Ding.'' All our translations from the \emph{Grundlagen} are taken from the Austin translation cited in the references.} \cite[Introduction]{Frege84} \end{quote} The claim is so curious as to give one pause.\footnote{We are grateful to Steve Awodey for this observation.} The fact that Frege used such a brazen rhetorical flourish to frame the whole project makes it clear just how central the issue is to his analysis. Once again, he took the distinction to be tracked by linguistic use. For example, because the number $7$ plays the role of a subject in the statement ``$7$ is odd,'' $7$ must be an object. But, once again, Frege had to deal with sentences where the syntactic role of a number is murkier. For example, he considered uses of number terms in language that are attributive and do not occur prefixed by the definite article, for example, ``Jupiter has four moons'' \cite[\S 57]{Frege84}. He wrote \begin{quote} ``\ldots our concern here is to arrive at a concept of number usable for the purposes of science; we should not, therefore, be deterred by the fact that in the language of everyday life number appears also in attributive constructions. That can always be got round.''\footnote{``Da es uns hier darauf ankommt, den Zahlbegriff so zu fassen, wie er f\"{u}r die Wissenschaft brauchbar ist, so darf es uns nicht st\"{o}ren, dass im Sprachgebrauche des Lebens die Zahl auch attributiv erscheint. Das l\"{a}sst sich immer vermeiden.''} \cite[\S 57]{Frege84} \end{quote} Specifically, it can be got round by writing an attributive statement such as ``Jupiter has four moons'' as ``the number of Jupiter's moons is the number 4, or 4'' \cite[\S 57]{Frege84}, thereby eliminating the attributive usage. So, for Frege, functions are not objects, but numbers are, because they play the subject role in mathematical statements and can be used with the definite article. There is clearly a difficulty lurking nearby. At least from a modern standpoint, we tend to view functions, sequences, sets, and structures as objects, and certainly in Frege's time locutions such as ``the function $f$'' and ``the series $s$'' were common. Frege's response was similar to his response to Kerry's objection, namely, to deny that that expressions like these denote functions. To understand how this works, consider the fact that Frege's logical system includes an operator which takes any function $f$ from objects to objects and returns an object, $\overset{,}{\varepsilon} f (\varepsilon)$, intended to denote its ``course-of-values'' or ``value range.'' If $f$ is a concept, which is to say, a function which for each object return a truth value, the course-of-values of $f$ is called the ``extension'' of the concept. Frege's Basic Law V asserts that two functions which are extensionally equal---that is, which return equal output values for every input---have the same courses-of-values. Frege used these courses-of-values and extensions as object-proxies for functions and concepts. This is how he analyzed the concept of a cardinal number. Let $F$, for example, be a second-level concept, such that $F$ holds of a first-level concept $f$ if and only $f$ holds of exactly one object. Frege took the number one to be the extension of $F$, thereby achieving the goal of making the number one, well, a thing. But this ``pushing down'' trick is central to the methodology of the \emph{Grundgesetze}: whenever the formal analysis of common mathematical objects seems to suggest identifying such objects as functions or concepts, Frege avoided doing so by replacing the function or concept with its extension. For example, in the \emph{Grundgesetze} he circumvented the need to define mathematical operations on sequences and relations construed as functions, defining the operations rather on the associated courses-of-values.\footnote{In fact, the definition of the number one earlier in the paragraph describes, more precisely, the construction in the \emph{Grundlagen}. In the \emph{Grundgesetze}, he took $F$ to be a \emph{first}-level concept that holds of classes, i.e.~extensions of concepts, that contain one element. This is a nice illustration of how the ``pushing down'' trick can be used repeatedly to avoid the use of higher types. See Reck \cite[Section 5]{reck:07} for a discussion of the two definitions, and Burgess \cite{burgess:05} for an overview of Frege's methodology.\label{number:footnote}} Referring to Frege's concepts as ``attributes'' and their extensions as ``classes,'' Quine described the difference as follows: \begin{quote} Frege treated of attributes of classes without looking upon such discourse as somehow reducible to a more fundamental form treating of attributes of attributes. Thus, whereas he spoke of attributes of attributes as \emph{second-level} attributes, he rated the attributes of classes as of first level; for he took all classes as rock-bottom objects on par with individuals. \cite[p.~147]{quine:55} \end{quote} Frege never got so far as developing mathematical analysis in his system, and we cannot say with certainty how he would have developed, for example, ordinary calculus on the real numbers. But there is a strong hint that here, too, he would have taken, for example, operations like integration and differentiation to operate on extensions, rather than functions, in his system. He touched on the history of analysis in his \emph{Function and concept} of 1891, and noted that, for example, differentiation can be understood as a higher-type functionals. \begin{quote} Now at this point people had particular second-level functions, but lacked the conception of what we have called second-level functions. By forming that, we make the next step forwards. One might think that this would go on. But probably this last step is not so rich in consequences as the earlier ones; for instead of second-level functions one can deal, in further advances, with first-level functions---as shall be shown elsewhere.\footnote{``Damit hatte man nun einzelne Funktionen zweiter Stufe, ohne jedoch das zu erfassen, was wir Funktion zweiter Stufe genannte haben. Indem man dies tut, macht man den n\"{a}chsten Fortschritt. Man k\"{o}nnte denken, dass dies so weiter ginge. Wahrscheinlich ist aber schon dieser letzte Schritt nicht so folgenreich wie die fr\"{u}heren, weil man statt der Funktionen zweiter Stufe im weiteren Fortgang Funktionen erster Stufe betrachten kann, wie an einem anderen Orte gezeigt werden soll.''} \cite[p.~31]{Frege91} \end{quote} Presumably, he had the method of replacing functions by their extensions in mind. Notice, incidentally, that Frege's method of representing mathematical functions as courses-of-values has the effect that mathematical functions are treated extensionally. For example, defining the integral as an operation that applies to a course-of-values means that integration cannot distinguish mathematical functions that are extensionally equal, since any two descriptions of a function that satisfy extensional equality have the same course-of-values, by Basic Law V. There are other interesting features of Frege's treatment of functions that push us away from identifying them with the functions of ordinary mathematics. For example, for Frege, every function has to be defined on the entire domain of individuals; even if one is interested in the exponential function on the real numbers, one has to specify a particular (but arbitrary) value of this function for every object in existence.\footnote{However, Patricia Blanchette has argued \cite{blanchette:12} that Frege intended theories presented in his formal system to treat objects in the domain of a particular subject, in which case ``every object in existence'' really means ``every object in the theory's intended domain.''} And the separation of functions and objects has other effects on the system. There is only one basic type, so, for example, truth values live alongside everything else. There is no notion of identity between higher-type objects---the equality symbol can only be applied to equality between objects---even though Frege pointed out that one can define an extensional notion of ``sameness'' of functions and concept, for example, saying two functions from individuals to individuals are the ``same'' if their values are identical at each input. Frege's system, of course, includes the axiom of universal instantiation. In contemporary notation, this would be expressed as $\fa x \varphi(x) \rightarrow \varphi(a)$ where $x$ is a variable ranging over individuals and $a$ is any individual term. It also includes the corresponding axiom $\fa F \varphi(F) \rightarrow \varphi(A)$, where $F$ ranges over functions from objects to objects. Notably, however, the system does not include analogous axioms for elements of the higher types: the ``pushing down trick'' obviates the need for these. All things considered, Frege's foundational treatment of mathematics seems closer to modern set-theoretic treatments, where there is one homogeneous universe of individuals. Truth values are individuals, numbers are individuals, mathematical sequences and series are individuals---all bona-fide mathematical objects are individuals. Functions are special sorts of entities that our partial expressions refer to when we make statements about objects, but they are not objects in their own right. As Marco Panza puts it: \begin{quote} \ldots according to Frege, appealing to functions is indispensable in order to fix the way his formal language is to run, but functions are not as such actual components of the language. More generally, functions manifest themselves in our referring to objects---either concrete or abstract---and making statements about them, but they are not as such actual inhabitants of some world of \emph{concreta} and \emph{abstracta}. \cite[p.~14]{panza:unp} \end{quote} This is not to say that functions are any less ``real'' or objective than mathematical objects like numbers, only that they play a distinct role: they allow us to define objects, say things about objects, and reason about objects, but they are not objects themselves. \section{Frege's foundational concerns} \label{frege:section:b} We have seen that a curious tension lies at the core of Frege's formal representation of mathematics. On the one hand, Frege asserted, repeatedly, that functions, in the logical and linguistic sense, are not objects. On the other hand, when it comes to formalizing mathematical constructions, he clearly felt that functions, in the mathematical sense, \emph{have to be} objects. His course-of-values operator, together with his Basic Law V, allowed him to have his cake and eat it too, maintaining clear borders between the two realms while passing between them freely. But Frege is often taken to task for failing to realize that this strategy opens the door to Russell's paradox. Indeed, the strategy feels like a hack, a desperate attempt to satisfy the two central constraints. Why was he so committed to them? The goal of this section is to suggest that the concerns Frege was trying to address with the design of the logic of the \emph{Grundgesetze} parallel some of the informal mathematical concerns we were able to discern in the nineteenth century treatment of characters. When one speculates as to the philosophical and logical considerations that influenced the design of Frege's logic, two possibilities come to mind. One is that Frege determined that functions and objects should be separate on broad ontological grounds, and then designed the logic accordingly. The other is that he designed the logic, determined it worked out best with a separation of individuals and functions, and read off the ontological stance from that. But, in fact, there is no clear distinction between these two descriptions. Frege designed his logic to try to model scientific practice at its best, and account for and support its successes while combating and eliminating confusions. The examples in the previous section show that Frege had no qualms about reinterpreting ordinary locutions and reconstruing everyday language, so he was by no means slave to naive ontological intuitions. But even when doing so he appealed to intuitions to convince us that the reconstruals are reasonable. Thus ``doing ontology'' meant analyzing the practice, sorting out intuitions, and trying to regiment and codify them in a coherent and effective way. From the other direction, ``getting the logic to work'' meant being able to account for the informal practice effectively and efficiently, and supporting our intuitions to the extent that they can be fashioned into a coherent system. So it is not a question as to whether the ontology or the logic comes first; working out the ontology and designing the logic are part and parcel of the same enterprise. The following questions therefore seem more appropriate: \begin{enumerate} \item What considerations pushed Frege to maintain the sharp distinction between function and object? \item What considerations pushed Frege to identify mathematical entities, including ordinary mathematical functions, as objects? \end{enumerate} Let us consider each in turn. It seems to us that the answer to the first question is simply that Frege felt that failure to respect the distinction results in linguistic confusion. \begin{quote} If it were correct to take ``one man'' in the same way as ``wise man,'' we should be able to use ``one'' also as a grammatical predicate, and to be able to say ``Solon was one'' just as much as ``Solon was wise.'' It is true that ``Solon was one'' can actually occur, but not in a way to make it intelligible on its own in isolation. It may, for example, mean ``Solon was a wise man,'' if ``wise man'' can be supplied from the context. In isolation, however, it seems that ``one'' cannot be a predicate. This is even clearer if we take the plural. Whereas we can combine ``Solon was wise'' and ``Thales was wise'' into ``Solon and Thales were wise,'' we cannot say ``Solon and Thales were one.'' But it is hard to see why this should be impossible, if ``one'' were a property both of Solon and of Thales in the same way that ``wise'' is.\footnote{``Wenn `Ein Mensch' \"{a}hnlich wie `weiser Mensch' aufzufasen w\"{a}re, so sollte man denken, dass, `Ein' auch als Praedicat gebraucht werden k\"{o}nnte, sodass man wie `Solon war weise' auch sagen k\"{o}nnte `Solon war Ein' oder `Solon war Einer'. Wenn nun der letzte Ausdruck auch vorkommen kann, so ist er doch d\"{u}r sich allein nicht verst\"{a}ndlich. Er kann z.B. heissen: Solon war ein Weiser, wenn `Weiser' aus dem Zusammenhange zu erg\"{a}nzen ist. Aber allein scheint `Ein' nicht Praedicat sein zu k\"{o}nnen. Noch deutlicher zeigt sich dies beim Plural. W\"{a}hrend man `Solon war weise' und `Thales war weise' zusammenziehen kann in `Solon und Thales waren weise,' kann man nicht sagen `Solen und Thales waren Ein'. Hiervon w\"{a}re die Unm\"{o}glichkeit nicht einzusehen, wenn `Ein' sowie `weise' eine Eigenschaft sowohl des Solon als auch des Thales w\"{a}re''.} \cite[\S 29]{Frege84} \end{quote} In other words, even though in some contexts an object word like ``one'' can \emph{appear} to be used as a predicate, and in other contexts a concept can \emph{appear} to be used as a subject, closer inspection shows that these uses do not conform to the rules that govern the use of prototypical subjects and predicates, and so should not be categorized in the naive way. One of Frege's favorite pastimes was to show that assertions made by philosophical and mathematical colleagues degenerate into utter nonsense when they fail to maintain sufficient linguistic hygiene. For example, in his 1904 essay, ``What is a Function,'' Frege was critical of conventional mathematical accounts of variables and functions. It is a mistake, he said, to think of a variable as being an object that varies: \begin{quote} \ldots a number does \emph{not} vary; for we have nothing of which we could predicate the variation. A cube never turns into a prime number; an irrational number never becomes rational.\footnote{``Folglich ver\"{a}ndert sich die Zahl gar nicht; denn wir haben nichts, von dem wir die Ver\"{a}nderung aussagen k\"{o}nnten. Eine Kubikzahl wird nie zu einer Primzahl, und eine Irrationalzahl wird nie rational.''} \cite[p.~658]{Frege04} \end{quote} He took the mathematician Emanuel Czuber to task for giving such a sloppy account of variables and functions in an introductory mathematical text. For example, he criticized Czuber's terminology ``a variable assumes a number'' \cite[288]{Frege04} as being incomprehensible. On Czuber's account, a variable is an ``indefinite number,'' so the terminology can be rephrased ``an indefinite number assumes a (definite) number''; but where we may talk about an object assuming a property, what can it mean for an object to assume another object? \begin{quote} In other connections, indeed, we say that an object assumes a property, here the number must play both parts; as an object it is called a variable or a variable magnitude, and as a property it is called a value. That is why people prefer the word ``magnitude'' to the word ``number''; they have to deceive themselves about the fact that the variable magnitude and the value it is said to assume are essentially the same thing, that in this case we have \emph{not} got an object assuming different properties in succession, and that therefore there can be no question of a variation.\footnote{``Sonst sagt man wohl, da{\ss} ein Gegenstand eine Eigenschaft annehme; heir mu{\ss} die Zahl beide Rollen spielen; als Gegenstand wird sie Variable oder ver\"{a}nderliche Gr\"{o}{\ss}e, als Eigenschaft wird sie Wert genannt. Darum also zieht man das Wort `Gr\"{o}{\ss}e' dem Worte `Zahl'ert, den sie angeblich annimmt, im Grunde dasselbe sind, da{\ss} man gar nicht den Fall hat, wo ein Gegenstand nacheinander verschiedene Eigenschaften annimmt, da{\ss} also von Ver\"{a}nderung in keiner Weise die Rede sein kann.''} \cite[p.~660--661]{Frege04} \end{quote} The essay closes with the following assessment: \begin{quote} The endeavor to be brief has introduced many inexact expressions into mathematical language, and these have reacted by obscuring thought and producing faulty definitions. Mathematics ought properly to be a model of logical clarity. In actual fact there are perhaps no scientific works where you will find more wrong expressions, and consequently wrong thoughts, than in mathematical ones. Logical correctness should never be sacrificed to brevity of expression. It is therefore highly important to devise a mathematical language that combines the most rigorous accuracy with the greatest possible brevity. To this end a symbolic language would be best adapted, by means of which we could directly express thoughts in written or printed symbols without the intervention of spoken language.\footnote{``Das Streben nach K\"{u}rze hat viele ungenaue Ausdr\"{u}cke in die mathematische Sprache eingef\"{u}hrt, und diese haben r\"{u}ckwirkend die Gedanken getr\"{u}bt und fehlerhafte Definitionen zuwege gebracht. Die Mathematik sollte eigentlich ein Muster von logischer Klarheit sein. In Wirklichkeit wird man vielleicht in den Schriften keiner Wissenschaft mehr schiefe Ausdr\"{u}cke und infolgedessen mehr schiefe Gedanken finden als in den mathematischen. Niemals sollte man die logische Richtigkeit der K\"{u}rze des Ausdrucks opfern. Deshalb ist es von gro{\ss}er Wichtigkeit, eine mathematische Sprache zu schaffen, die mit strengster Genauigkeit m\"{o}glichste K\"{u}rze verbindet. Dazu wird wohl am besten eine Begriffsschrift geeignet sein, ein Ganzes von Regeln, nach denen man durch geschriebene oder gedruckte Zeichen ohne Vermittlung des Lautes unmittelbar Gedanken auszudr\"{u}cken vermag.''} \cite[p.~665]{Frege04} \end{quote} Frege aimed to give a clear account of the rules that govern proper logical reasoning. Although, in ordinary language, the line between concepts and objects is sometimes blurry, failure to diagnose and manage the blurriness opens the door to nonsensical reasoning. Even though words like ``one'' and ``horse'' sometimes seem to denote both concepts and objects, conflating the two causes problems. For Frege, the only viable solution was to analyze and regiment such uses in a way that cordons off problematic instances. He found that the best way to do this is to maintain a clear separation of concept and object, and then supplement the analysis with an explanation as to how some words seem to cross the divide in certain contexts. Now let us turn to the second question: why was Frege so dogged in his insistence that mathematical entities like numbers have to be treated as objects, and so persistent, in practice, in pushing mathematical constructions down to that realm? We believe that the answer lies in an observation that we found in Heck \cite{heck:97}: Frege wanted his numbers to be able to count all sorts of entities, and the only way he could make that work was by treating all these entities as inhabitants of the same type. Consider the following statements: \begin{itemize} \item There are two truth values. \item There are two natural numbers strictly between 5 and 8. \item There are two constant functions taking values among the truth values. \item There are two characters on $(\mathbb{Z} / 4\mathbb{Z})^*$. \item There are two subsets of a singleton set. \end{itemize} Frege would have insisted that the word ``two'' in each of these statements refers to the same object. We would like to say that the number of truth values is equal to the number of natural numbers between 5 and 8, but if truth values and numbers were different types of entities, his analysis of number would not do that: even if Frege had a notion of identity for each type, we would have to define a different notion of two for each such type. In other words, for each type $\sigma$, we would have to define a concept $\mathrm{Two}_\sigma$ that holds of concepts of arguments of type $\sigma$ under which two elements fall.\footnote{In Frege's system, in which the equality symbol can only be used with objects, this would have to be expressed instead in terms of a ``sameness'' relation for elements of type $\sigma$, for any $\sigma$ other than the type of objects.} Taking extensions according to Frege's construction would yield an object $2_{\sigma}$ for each $\sigma$. But this results in a proliferation of twos, and since $2_\sigma$ and $2_\tau$ are not guaranteed to be the same object, one would have to exercise great care when reasoning about the relationship between them. This is clearly unworkable. Instead, Frege designed his numbers to count objects, \emph{simpliciter}: $2$ is the extension of the concept of being a concept of \emph{objects} under which exactly two objects fall.\footnote{See footnote~\ref{number:footnote}.} But this means that if you want to count a collection of things, those things have to be elements of the type of objects. This, in turn, provides a strong motivation to locate mathematical entities of all kinds among the type of objects. We have seen in Sections \ref{modern:formulation:section} to \ref{analysis:section} that what holds true of counting holds true of other mathematical operations, relations, and constructions as well. Contemporary proofs of Dirichlet's theorem have us sum over finite sets of characters just as we sum over finite sets of numbers. We view the general operation here as summation over a finite set of objects, viewing both characters and numbers as such. Contemporary proofs also have us consider groups of characters, just as we consider groups of residues. Once again, we consider these as instances of the general group concept, with the understanding that a group's underlying set can be any set of objects. This allows us to speak of a homomorphism between any two groups, without requiring a different notion of ``homomorphism'' depending on the type of objects of the groups' carriers. Characters were not the only mathematical entities studied in the latter half of the nineteenth century that encouraged set-theoretic reification. Gauss' genera of quadratic forms, discussed briefly in ``Concept,'' also bear a group-theoretic structure, and these are sets of quadratic forms. Dedekind developed his theory of ideals in order to supplement rings of algebraic integers with ``ideal divisors,'' extending the unique factorization property of the ordinary integers to these more general domains. Dedekind found that these ideal divisors could be identified with sets of elements in the original ring, now known as ``ideals.'' Like the characters, the ideals of a ring of algebraic integers bear an algebraic structure, and Dedekind was adamant that they should be treated as bona-fide objects.\footnote{See Avigad \cite{avigad:06}, especially page 172, and Edwards \cite{edwards:80}.} Similarly, Dedekind constructed the real numbers by identifying each of them with a pair of sets of rational numbers \cite{dedekind:72}. By the end of the century, it was common to view a quotient group as a group whose elements are equivalence classes, or cosets.\footnote{See the detailed discussion in Schlimm \cite[Section 3]{schlimm:08}. Other nice examples of pieces of nineteenth century mathematics that push in favor of set-theoretic abstraction are discussed in Wilson~\cite{wilson:10,wilson:unp}.} The reasons given above to treat mathematical functions and sets as objects also speak in favor of treating them extensionally. The statements that ``there are two characters on $\mathbb{Z} / 2\mathbb{Z}$'' and ``there are $\varphi(m)$ characters on $(\mathbb{Z} / m\mathbb{Z})^*$'' are false if we take characters to be representations, as there are many different representations of the same character. We could, of course, develop notions of ``counting up to equivalence.'' In the early days of finite group theory, Camille Jordan described quotient groups as systems just like ordinary groups except that equality is replaced by an appropriate equivalence relation.\footnote{Again, see Schlimm \cite[Section 3]{schlimm:08}.} But, if we do that, mathematical statements become ``relativized'' to the appropriate equivalence relations, which constitute additional information that needs to be carried along and managed. The alternative is to extensionalize: then the only equivalence relation one has to worry about is equality. We do not know the extent to which Frege was familiar with examples like these. But Wilson \cite{wilson:10,wilson:unp} calls attention to an important example of abstraction with which Frege was quite well acquainted. Frege was trained as a geometer, and studied under Ernst Schering in G\"ottingen. His dissertation, completed in 1873, was titled ``\"Uber eine geometrische Darstellung der imagin\"aren Gebilde in der Ebene'' (``On a geometric representation of imaginary forms in the plane''). Early nineteenth century geometers found great explanatory value and simplification in extending the usual Euclidean plane with various ideal objects, like ``points at infinity'' and ``imaginary'' points of intersection. One of the few motivating examples that Frege provided in the \emph{Grundlagen} (\S64--\S68) is the fact that one can identify the ``direction'' of a line $a$ in the plane with the extension of the concept ``parallel to $a$.'' As Wilson points out (though Frege does not), these ``directions'' are exactly what is needed to serve as points at infinity, enabling one to embed the Euclidean plane in the larger projective plane, which has a number of pleasing properties. In the projective plane, all points have equal standing, and so it stands to reason that the concept-extensions used to introduce the new entities should be given the same ontological rights as the Euclidean points and lines used in their construction. Wilson characterizes such strategies for expansion as forms of ``relative logicism,'' since they provide a powerful means of relating the newly-minted objects to the more familiar ones.\footnote{See also Tappenden \cite{tappenden:06} for other ways that nineteenth century mathematics seems to have influenced his Frege's philosophical views.} When it comes down to the nitty-gritty details, however, the only sustained formal development we have from Frege is his treatment of arithmetic. But even in this particular case, many of the issues we have raised come to the fore. In the \emph{Grundgesetze}, Frege defined a number of general operations and relations on tuples, sequences, functions, and relations. All of these now can be viewed as general set-theoretic constructions. What gives these constructions universal validity is that they can be applied to any domain of objects, and we now have great latitude in creating objects, as they are needed, to populate these domains. It is precisely the ability to bring a wide variety of mathematical constructions into the realm of objects, and the ability to define predicates and operations uniformly on this realm, that renders Frege's logic so powerful---too powerful, alas. But given Frege's goals, it should be clear why the extension operator held so much appeal.\footnote{The same uniformity is achieved in set theory by having a large universe of sets, and incorporating set-forming operations which return new elements of that universe. Russell introduced the notion of \emph{typical ambiguity} \cite{russell:08,feferman:82b} to allow ``polymorphic'' operations defined uniformly across types, and modern interactive theorem provers based on simple type theory follow such a strategy to obtain the necessary uniformities. For example, most such systems have operations $\mathit{card}_{\sigma}$ which maps a finite set of elements of type $\sigma$ to its cardinality, a natural number. The systems include mechanisms that allow one to define this family of operations uniformly, once and for all, treating $\sigma$ as a parameter. One can then write $\mathit{card} \; A$, and let the system infer the relevant type parameter from the type of $A$. This provides one means of coping with the nonuniformities that arise from a type-theoretic compartmentalization of the mathematical universe, but the difficulties that accrue to taking simple type theory as a mathematical foundation are complex; see, for example, \cite{avigad:12}.} To sum up, we have traced a central tension in Frege's work to the need to balance two competing desiderata: \begin{enumerate} \item the need for flexible but rigorous ways of talking about higher-type entities, like functions, predicates, and relations, without falling prey to incoherence; and \item the need for ways of dealing with mathematical objects uniformly, since mathematical constructions and operations have to be applied to many sorts of objects, many of which cannot be foreseen in advance. \end{enumerate} Compare this to the analysis of the mathematical pros and cons to treating functions as objects, as discussed in Section~\ref{analysis:section}, and note the similarities. At the end of an essay on Frege's treatment of concepts and objects, Thomas Ricketts briefly discusses aspects of nineteenth-century mathematical practice that may have had an influence on Frege. Agreeing with Wilson's assessment of the importance of being able to construct ideal elements in projective geometry, Ricketts writes: \begin{quote} Throughout his career, Frege is concerned with the introduction of new domains in mathematics, with the `creation' of new mathematical concepts. He vigorously polemicizes against formalist account of this practice and aims to develop an alternative to it. Frege's own approach here shines forth in a comment on Dedekind's account of the real numbers: \begin{quote} The most important thing for an arithmetician who recognizes in general the possibility of creation [of mathematical objects] will be to develop in an illuminating way [\emph{in einleuchender Weise}] the laws governing this in order to prove in advance of each individual creative act that the laws allow it. Otherwise, everything will be imprecise, and proofs will degenerate to a mere appearance, to a good-willed self-delusion. \cite[Vol.~2, \S 140]{frege:grundgesetze}. \end{quote} The desired foundation will be provided by formulating a logical law that, in the context of other logical laws, will yield as a theorem the existence of the desired new objects. \cite[pp.~217--218]{ricketts:10} \end{quote} What we have aimed to do here is to explain in greater detail why it is mathematically important to treat certain sorts of things as objects, and what, exactly, that amounts to. Not just Frege's work in geometry, but also his construction of the natural number system, would have impressed upon him the importance of having uniform operations and constructions on higher-order entities, and having a uniform way of making general assertions about these operations and constructions. In other words, Frege, like the various mathematical authors we have considered, was responding to methodological pressures that are inherent in the nature of the mathematical enterprise. As Ricketts emphasizes, Frege's entire foundational project was designed to address the important mathematical need of introducing clear means of expression, and developing general consensus as to the rules of use, while ensuring that the expressions and rules are meaningful, reliable, and consistent. While mathematicians from Dirichlet to Landau were focused on extending the edifice of mathematical knowledge, Frege's goal was to shore up the foundations. This difference translates to differences in perspective, focus, and method, but the distinctions are not sharp. Working from different ends of the spectrum, both Frege and his mathematical counterparts were working to clarify and extend mathematical method in powerful ways. In doing so, they addressed similar mathematical goals, and responded to similar mathematical constraints. Frege is often faulted for failing to recognize the simple inconsistency that arises from the formal means he introduced to resolve the tension between the two concerns enumerated above. Nonetheless, it is worth highlighting the extent to which these two concerns were central to the subsequent development of logic and foundations. Russell's paradox shows that Frege was perfectly right to worry that an overly naive treatment of functions, concepts, and objects would lead to problems in the most fundamental use of our language and methods of reasoning. And, going into the twentieth century, developments in all branches of mathematics called for liberal means of constructing new mathematical domains and structures, as well as uniform ways of reasoning about their essential properties. The most fruitful and appropriate means of satisfying these needs was by no means clear at the turn of the twentieth century. Indeed, these issues were at the heart of the tumultuous foundational debates that were looming on the horizon. \section*{Appendix: From cyclotomy to Dirichlet's theorem} \addcontentsline{toc}{section}{Appendix: From cyclotomy to Dirichlet's theorem} \label{appendix} In Section~\ref{overview:section}, we sketched Dirichlet's approach to proving his theorem on primes in an arithmetic progression. Our goal here is to explain how Dirichlet is likely to have come upon his method of modifying Euler's argument to tease apart the contribution of the primes in each residue class from the overall sum of their reciprocals. Recall that if we split up the sum in Euler's equation (\ref{euler:primes:eqn}), we obtain \begin{equation} \log\sum_{n=1}^{\infty}\frac{1}{n^s}=\sum_{q \equiv 1 \bmod p} \frac{1}{q^{s}} + \sum_{q \equiv 2 \bmod p} \frac{1}{q^{s}} + \ldots + \sum_{q \equiv p-1 \bmod p} \frac{1}{q^{s}} + O(1).\tag{\ref{euler:primes:eqn:b}} \end{equation} As explained in Section~\ref{dirichlet:approach:section}, this shows that (\ref{euler:primes:eqn}) is too crude to prove Theorem~\ref{dirichlet:theorem}: we need to know that each of the terms on the right-hand side tends to infinity, not just their sum. It is here that ideas from the theory of equations are helpful. They come into play specifically in the theory of cyclotomy from Gauss' \emph{Disquisitiones Arithmeticae}, work with which Dirichlet was intimately acquainted. Historical overviews of the relevant ideas can be found in excellent books by Edwards and Tignol on the history of the theory of equations \cite{edwards:84,tignol:01}, and Curtis' equally impressive history of representation theory \cite{curtis:99}. Curtis also explains the role of characters in the theory of cyclotomy and Dirichlet's proof. What we aim to do here is make the progression of ideas leading from cyclotomy to Dirichlet's proof as explicit as possible. An important concern in the field of algebra is the extent to which the roots of a polynomial can be expressed in terms of arithmetic operations on the coefficients together with the extraction of roots. The quadratic formula dates to antiquity, and solutions to the cubic and quartic were presented by Cardano in his \emph{Ars Magna} of 1542. A natural challenge was then to determine a similar formula for the quintic. In 1770, Lagrange presented a general method of attacking this problem, using what has come to be known as the \emph{Lagrange resolvent}. Let $t_0, \ldots, t_{n-1}$ be the roots of the $n$th degree polynomial in question, and let $\omega$ be an $n$th root of unity, that is, a solution to the equation $\omega^n = 1$. Notice that 1 is always a solution to this equation, but there are $n - 1$ others. In fact, all of the roots can be taken to be powers a single ``primitive'' root of unity; for example, taking $\omega$ to be the complex number $e^{2 \pi i / n}$ will do. Lagrange considered the quantity \[ t_0 + \omega t_1 + \omega^2 t_2 + \ldots + \omega^{n-1} t_{n-1}, \] as well as the quantities obtained by permuting the roots $t_0, \ldots, t_{n-1}$. Suppose $\omega$ is a primitive $n$th root of unity, and consider the values obtained by replacing $\omega$ in the previous expression with each of the values $1, \omega, \omega^2, \ldots, \omega^{n-1}$: \begin{align*} x_0 & = t_0 + t_1 + t_2 + \ldots + t_n \\ x_1 & = t_0 + \omega t_1 + \omega^2 t_2 + \ldots + \omega^{n-1} t_{n-1} \\ x_2 & = t_0 + \omega^2 t_1 + \omega^4 t_2 + \ldots + \omega^{2(n-1)} t_{n-1} \\ \vdots \\ x_{n-1} & = t_0 + \omega^{n - 1} t_1 + \omega^{2 (n - 1)} t_2 + \ldots + \omega^{(n-1)^2} t_{n-1} \end{align*} Lagrange observed that one can solve for each of $t_0, t_1, \ldots, t_{n-1}$ in terms of $x_0, x_1, \ldots, x_{n-1}$. For example, consider $x_0 + x_1 + \ldots + x_{n-1}$. Summing the first column gives $n \cdot t_0$. Summing the second column gives $t_1 \cdot (1 + \omega + \omega^2 + \ldots + \omega^{n-1})$. But because $\omega$ is a root of \[ \omega^n - 1 = (\omega - 1) (\omega^{n-1} + \ldots + \omega^2 + \omega + 1) \] and $\omega \neq 1$, we have $1 + \omega + \omega^2 + \ldots + \omega^{n-1} = 0$. Similarly, summing the third column gives $t_2 \cdot (1 + \omega^2 + \omega^4 + \ldots + \omega^{2(n-1)})$; but $\omega^2$ is also an $n$th root of unity, and if $\omega$ is primitive (and $n > 2$), $\omega^2$ is also not equal to $1$, and the same argument shows that this quantity sums to $0$. The same argument shows that the remaining columns also sum to $0$, so we have $t_0 = (x_0 + \ldots + x_{n-1}) / n$, which is the desired expression for $t_0$. A similar trick works to compute the other values $t_k$: multiplying the $i$th equation by $\omega^{-ik}$ simply ``rotates'' the powers of $\omega$, leaving $1$'s in the $k$th column. Thus we have \[ t_k = \frac{1}{n} \sum_{i = 0}^{n-1} \omega^{-ik} x_i, \] which provides an expression for $t_k$ in terms of $t_0, \ldots, t_{n-1}$. Lagrange went on to consider the values of $x_0, \ldots, x_{n-1}$ that are obtained by replacing $\omega$ with other roots of unity, and conditions under which one can solve for those values, and hence $x_0, \ldots, x_n$, in terms of radicals. In doing so, he was analyzing and generalizing methods of solving equations developed by Vi\`ete, Tschrinhaus, and others who had come before. He showed that this ideas can be used to account for the known solutions to the quadratic, cubic, and quartic equations. The methods break down for the general solution to the quintic, but variations on the method can, however, be used to determine roots of \emph{particular} polynomials. Consider, for example, the polynomial $x^n - 1$ itself. We have already noted that we have $x^n - 1 = (x - 1) (x^{n-1} + x^{n-2} + \ldots + x^2 + x + 1)$. If $n$ is not a prime number, the second term can be factored into polynomials of lower degree, until one reaches polynomials that can no longer be factored; these are called \emph{irreducible} polynomials. The task of determining the roots of these polynomials is known as ``cyclotomy,'' or ``circle division,'' because the $n$ complex roots of $x^n - 1$ are evenly spaced around the unit circle in the complex plane. The problem can be reduced to the case where $n$ is a prime number, which we will denote $p$ instead. In that case, $x^{p-1} + x^{p-2} + \ldots + x^2 + x + 1$ is irreducible, and if $\alpha$ is any root of this polynomial, the other $p-2$ roots are $\alpha^2, \alpha^3, \ldots, \alpha^{p-1}$. An expression for these roots in terms of radicals were provided by Vandermonde for the case where $p = 11$, and the general problem was taken up by Gauss in the last chapter of the \emph{Disquisitiones}.\footnote{Gauss was particularly interested in the case where $p$ is a prime number of the form $2^m - 1$, and showed that in that case, the solution enables one to carry out a geometric construction using compass and straightedge that divides the circle into $p$ equal parts. The \emph{Disquisitiones} hints as the solution to the general case, but both Edwards \cite{edwards:84} and Tignol \cite{tignol:01} observe that their are gaps in the presentation; a complete solution was provided by Galois.} The solution involves using the Lagrange resolvent, and taking the roots $t_0, t_1, \ldots, t_{p-2}$ to be the $p - 1$ roots $\alpha, \alpha^2, \ldots, \alpha^{p-1}$, but in a particular order. The proof involves choosing, for the prime $p$ in question, a primitive element $g$ modulo $p$. Recall from Section~\ref{dirichlet:approach:section} that this means that the powers $g^0, g^1, \ldots, g^{p-1}$ modulo $p$ are exactly the nonzero residues modulo $p$. The solution to the equation $x^{p-1} + x^{p-2} + \ldots + x^2 + x + 1 = 0$ is obtained by considering the Lagrange resolvent \begin{equation} \label{cyclotomy:equation} \alpha^{g^0} \cdot \omega^0 + \alpha^{g^1} \cdot \omega^1 + \alpha^{g^2} \cdot \omega^2 + \ldots + \alpha^{g^{p-2}} \cdot \omega^{p-2}, \end{equation} where $\omega$ is a $(p-1)$st root of unity. If we define $t_i$ to be $\alpha^{g^i}$, then this expression becomes \[ t_0 + \omega t_1 + \omega^2 t_2 + \ldots + \omega^{p-2} t_{p-2}, \] and we are in the situation analyzed above. Lagrange's tricks tells us that if we can solve for the values of this expression when $\omega$ is replaced by $1, \omega, \omega^2, \ldots, \omega^{p-2}$ in succession, we can solve for all the values of $\alpha^{g^i}$, which are just the values $\alpha, \alpha^2, \ldots, \alpha^{p-1}$ written in a different order. The reason for writing the elements $\alpha$ in the particular order they appear in (\ref{cyclotomy:equation}) is that, when they are written in that order, it \emph{is} possible to solve for each $t_i$, with an expression involving radicals. The details of the solution are not relevant to the proof of Dirichlet's theorem, but one particular aspect of the solution is. What makes the argument work is the careful pairing of $\alpha^{g^i}$ with $\omega^i$, which has the effect that for each $i$ and $j$, the element \[ (\alpha^{g^i})^{g^j} = \alpha^{g^i \cdot g^j} = \alpha^{g^{i + j}} \] is paired with $\omega^{i + j}$. Using the notion of ``index'' defined in Section~\ref{dirichlet:approach:section}, we can express this as follows: for any $m$ and $n$, $\alpha^m$ is paired with $\omega^{\gamma_m}$, $\alpha^n$ is paired with $\omega^{\gamma_n}$, and $\alpha^{mn}$ is paired with $\omega^{\gamma_{mn}} = \omega^{\gamma_m + \gamma_n} = \omega^{\gamma_m} \omega^{\gamma_n}$. In other words, the key property used in the calculation of the roots of cyclotomic equations is that the map $m \mapsto \omega^{\gamma_m}$ is \emph{multiplicative} on the nonzero residues modulo $p$. Dirichlet's great insight is that these ideas can be applied in the number-theoretic setting at hand, using the fact that the Euler product formula holds more generally with such a multiplicative function in the numerator. In the case where the common difference is a prime number $p$, if we choose a primitive root $g$ modulo $p$ and define $t_i = \sum_{q \equiv g^i \bmod p} 1/ q^s$, then equation (\ref{euler:character:eqn}) in Section~\ref{dirichlet:approach:section} can be written \[ \log\sum_n\frac{\omega^{\gamma_n}}{n^s} = t_0 + t_1 \omega + \ldots + t_{p-2} \omega^{p-2} + O(1). \] The derivation of this equation relies on the generalized Euler formula, which requires that the map $m \mapsto \omega^{\gamma_m}$ is multiplicative. But once we have the equation in hand, we need only use the Lagrange trick, which is exactly what Dirichlet did. The more general case where $p$ is replaced by an arbitrary modulus $k$ is technically more difficult, but it builds on the same idea, combined with the behavior of the multiplicative group of residues modulo $k$ that are coprime to $k$. Once again, this is something which Dirichlet was intimately familiar with, from the work of Gauss. \nocite{Fregecomp} \end{document}
\begin{document} \begin{article} \begin{opening} \title{Decidability of Quantified Propositional\\ Intuitionistic Logic and S4 on Trees} \author{Richard \surname{Zach} \email{[email protected]}} \institute{Department of Philosophy\\ University of Calgary\\ 2500 University Drive NW\\ Calgary, Alberta T2N 1N4\\ Canada} \date{Draft, March 17, 2002---Comments welcome!} \begin{abstract} Quantified propositional intuitionistic logic is obtained from propositional intuitionistic logic by adding quantifiers $\forall p$, $\exists p$, where the propositional variables range over upward-closed subsets of the set of worlds in a Kripke structure. If the permitted accessibility relations are arbitrary partial orders, the resulting logic is known to be recursively isomorphic to full second-order logic \cite{Kremer:97}. It is shown that if the Kripke structures are restricted to trees, the resulting logics are decidable. The result also transfers to modal \ensuremath{\mathbf{S4}}{} and some G\"odel-Dummett logics with quantifiers over propositions. \end{abstract} \end{opening} \section{Introduction} Quantified propositional intuitionistic logic is obtained from propositional intuitionistic logic by adding quantifiers $\forall p$, $\exists p$ over propositions. In the context of Kripke semantics, a proposition is a subset of the worlds in a model structure which is upward closed, i.e., if $h \in P$, then $h' \in P$ for all $h' \ge h$. For propositional intuitionistic logic~\H, several classes of model structures are known to be complete, in particular the class of all partial orders, as well as the class of trees and some of its subclasses. When quantifiers over propositions are added, these results no longer hold. \inlinecite{Kremer:97} has shown that the quantified propositional intuitionistic logic \qp{\H} based on the class of all partial orders is recursively isomorphic to full second-order logic. He raised the question of whether the logic resulting from restriction to trees is axiomatizable. The main part of this note establishes that, in fact, it is decidable. It should be pointed out right away that the trees we consider here are all subtrees of the complete tree of height and arity~$\omega$. That is, trees of uncountable arity, or height more than~$\omega$ are excluded. This is in accord with Kripke's \shortcite{Kripke:65} intuitive interpretation of his possible world semantics for intuitionistic logic. In this interpretation, Kripke explains, the worlds in a structure correspond to ``points in time (or `evidential situations')'' and the accessibility relation $\le$ holds between worlds $h$, $h'$ if ``as far as we know, at time $h$, we may later gain enough information to advance to $h'$.'' If the language is countable, then at each point, there are only countably many sentences about which we could discover new information. So at each point, there are only countably many possibilities for advancing to a new evidentiary situation, i.e., the tree of evidentiary situations should have arity $\le \omega$. Allowing trees of transfinite height would correspond, in this interpretation, to allowing a transfinite process of gathering of evidence. A ``jump'' to a new evidentiary situation only after an infinite amount of time and investigation seems counter to the spirit of Kripke's interpretation; hence, trees should be of height at most~$\omega$. The rest of this note is organized as follows: Section~2 introduces the logics considered, and contains several observations regarding the relationship between the classes of formulas valid on various classes of trees. Section~3 presents the decidability result for quantified propositional intuitionistic logic. Section~4 outlines how the results transfer to a proof of decidability of modal \ensuremath{\mathbf{S4}}{} with propositional quantification on similar types of Kripke structures. (Propositionally quantified \ensuremath{\mathbf{S4}}{} on general partial orders is also known to be not axiomatizable.) Intermediate logics based on linear orders (i.e., 1-ary trees), which correspond to G\"odel-Dummett logics, are also considered. A concluding section discusses limitations and possible extensions of the method. \section{Quantified propositional intuitionistic logics} \begin{defn} An \emph{model structure}~$\langle g, K, \le\rangle$ is given by a set of worlds~$K$, an initial world $g \in K$, and a partial order $\le$ on~$K$, for which $g$ is a least element. Given a structure, an \emph{(intuitionistic) proposition} is a subset $P \subseteq K$ so that when $h \in P$ and $h' \ge h$, then also $h' \in P$. A \emph{valuation}~$\phi$ is a function mapping the propositional variables to propositions of~$M$. A \emph{model}~$M = \langle g, K, \le, \phi\rangle$ is a structure together with a valuation. If $P$ is a proposition in the model $M$, then $M[P/p]$ is the model which is just like $M$ except that it assigns the proposition $P$~to~$p$. \end{defn} \begin{defn} If $M = \langle g, K, \le, \phi\rangle$ is a model, $h \in K$, and $A$ is a formula, we define what it means for \emph{$A$ to be true at $h$}, denoted $M, h \models A$, by induction on formulas as follows: \begin{enumerate} \item $M, h \models p$ if $h \in \phi(p)$; $M, h \nmodels \bot$. \item $M, h \models B \land C$ if $M, h \models B$ and $M, h \models C$. \item $M, h \models B \lor C$ if $M, h \models B$ or $M, h \models C$. \item $M, h \models B \impl C$ if, for all $h' \ge h$, either $M, h' \not\models B$ or $M, h' \models C$. \item $M, h \models \forall p\, B$, if, for all propositions $P$, $M[P/p], h \models B$. \item $M, h \models \exists p\, B$ if there is a proposition~$P$ so that $M[P/p], h \models B$. \end{enumerate} The constant $\bot$ is always assigned the empty proposition; $\neg A$ abbreviates $A \impl \bot$, hence, $M, h \models \neg B$ iff for all $h' \ge h$, $M, h' \nmodels B$. \end{defn} \begin{defn} Given a model~$M$ and a formula $A$, the \emph{proposition defined by $A$} is the set $M(A) = \{h : M, h \models A\}$. \end{defn} \begin{prop} $M(A)$ is a proposition. In fact we have: \[\begin{array}{rcl@{\qquad}rcl} M(p) & = & \phi(p) & M(\bot) & = & \emptyset\\ M(A \land B) & = & M(A) \cap M(B) & M(A \lor B) & = & M(A) \cup M(B)\\ M(\forall p\, A) & = & \bigcap_P M[P/p] A & M(\exists p\, A) & = & \bigcup_P M[P/p] A\\ \multicolumn{6}{c}{M(A \impl B) = \{h : \textrm{for\ all\ }h' \ge h, \textrm{\ if\ }h' \in M(A) \textrm{\ then\ }h' \in M(B)\}} \end{array}\] \end{prop} \begin{pf} By induction on the complexity of formulas.\qed \end{pf} \begin{defn} A model $M$ \emph{validates}~$A$, $M \models A$, if $M, g \models A$. A model structure $S$ validates $A$, if every model based on $S$ validates $A$. $A$ is \emph{valid in a class of model structures~$\mathfrak{C}$}, $\mathfrak{C} \models A$, if $M \models A$ for all models~$M$ based on structures in~$\mathfrak{C}$. $A$ is \emph{valid}, if $M \models A$ for any model~$M$.\end{defn} \begin{defn} A tree~$T$ is a subset of $\omega^*$, the set of words over $\omega$, which is closed under initial segments. $T$ is partially ordered by the prefix ordering $\le$ defined as: $x \le y$ if $y = xz$ for some~$z$, and totally ordered by the lexicographic order~$\preceq$. The empty word~$\Lambda$ is the least element in both orderings. The set $T_\omega = \omega^*$ itself is a tree, the \emph{complete infinitary tree.} The set $T_n = \{i: 0 \le i < n\}^*$ ($n \le \omega$) is also a tree (called the \emph{complete $n$-ary tree}). \end{defn} \begin{defn}\label{logics} We consider the following classes of model structures on trees: \[\begin{array}{rclrcl} \mathfrak{T} & = &\{\langle \Lambda, T, \le\rangle : T \textrm{ is a tree}\} & \mathfrak{T}_{n} & = & \{T_n\}, \\ \multicolumn{6}{c}{\mathfrak{T}_\mathrm{fin} = \{\langle \Lambda, T, \le\rangle : T \textrm{ is a finite tree}\}.} \end{array}\] These models tructures give rise to the following quantified propositional logics:\[\begin{array}{rclrcl} \qp{\H} & = & \{A : {}\models A\} & \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} & = & \{A : \mathfrak{T} \models A\} \\ \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n} & = & \{A : \mathfrak{T}_{n} \models A\} & \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}} & = & \{A : \mathfrak{T}_\mathrm{fin} \models A\}. \\ \end{array}\] \end{defn} To each of these quantified propositional logics $\qp{\L}$ corresponds a propositional logic~\L{} obtained by restriction to quantifier-free formulas. These all collapse to~$\H$, i.e., $\H = \mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}} = \mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n =\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin},$ for $n \ge 2$ \cite{Gabbay:81}. The \emph{quantified} propositional logics, however, do not: \begin{prop}\label{rels} 1. $\qp{\H} \subsetneq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \subsetneq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n}$ and $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \subsetneq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}}$.\\ 2. $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}} \not\subseteq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n}$ and $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n} \not\subseteq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}}$. \end{prop} \begin{pf} The inclusions $\qp{\H} \subseteq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \subseteq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n}$, and $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \subseteq \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}}$ are obvious. To show that the first inclusion is proper, consider: \begin{eqnarray*} A &=& \forall p(\neg p \lor \neg\neg p) \impl \forall p\forall q((p \impl q)\lor(q\impl p)) \end{eqnarray*} Then $\qp{\H} \nmodels A$: The 4-element diamond is a countermodel. On the other hand, $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \models A$, since any $h$ with $h \models \forall p(\neg p \lor \neg\neg p)$ is so that for all $h', h'' \ge h$, either $h' \ge h''$ or $h'' \ge h'$. To see this, suppose $h', h'' \ge h$ but neither $h' \le h''$ nor $h'' \le h'$. Consider the proposition $P = \{k: k \ge h'\}$. Then $M[P/p], h' \models p$, and hence $M[P/p], h \nmodels \neg p$. On the other hand, $M[P/p], k \nmodels p$ for any $k \ge h''$. Hence, $M[P/p], h'' \models \neg p$ and so $M[P/p], h \nmodels \neg\neg p$. In other words, the part of the model above $h$ is linearly ordered, and so $h \models \forall p\forall q((p \impl q) \lor (q \impl p))$. For the second inclusion, take $B = \forall p(p \lor \neg p)$. Since $\forall p(p \lor \neg p)$ is true at any $h$ which has no successor worlds in a model (a leaf node) and false otherwise, $\neg B$ will be true iff the model has no leaf node. Since complete trees don't have leaf nodes, $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n} \models \neg B$ but $\qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}} \nmodels \neg B$.\footnote{This example is due to Tomasz Po\l{}acik. Instead of $p \lor \neg p$ one can use any classical tautology which is not derivable in intuitionistic logic.} On the other hand, in a finite tree, every branch has a world with no successors. If $M$ is a model based on a finite tree, for every world $h$ there is a world $h' \ge h$ such that $M, h' \models \forall p(p \lor \neg p)$. Hence, for every world $h$, $M, h \nmodels \neg B$ and consequently $M, h \models \neg\neg B$. Thus, $$\begin{array}[b]{rclrcl} \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}} & \models & \neg\neg B,\textrm{ but} & \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}}, \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n} & \nmodels & \neg\neg B; \\ \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}} & \nmodels & \neg B,\textrm{ but} & \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n} & \models & \neg B. \end{array}\eqno\Box$$ \end{pf} \section{Decidability results} \begin{thm}[\opencite{Kremer:97}] \qp{\H} is recursively isomorphic to full second-order logic. \end{thm} \begin{thm} Each logic from Definition~\ref{logics}, except \qp{\H}, is decidable. \end{thm} \begin{pf} We use Rabin's tree theorem \cite{Rabin:69}. That theorem says that \ensuremath{\mathrm{S}\omega\mathrm{S}}, the monadic second-order theory of $T_\omega$, is decidable. We reduce validity of quantified propositional formulas to truth of formulas of \ensuremath{\mathrm{S}\omega\mathrm{S}}. The language of \ensuremath{\mathrm{S}\omega\mathrm{S}}{} contains two relation symbols $\le$ and $\preceq$, for the prefix ordering and the lexicographical ordering, respectively, and a constant $\Lambda$ for the empty word. \emph{Finiteness} is definable in \ensuremath{\mathrm{S}\omega\mathrm{S}}: $X$ is finite iff it has a largest element in the lexicographic ordering~$\preceq$. Let $x \le_1 y$ say that $y$ is an immediate successor of $x$. Then we have: \begin{eqnarray*} \mathfrak{T}ree(T) & = & \Lambda \in T \land \forall x \in T\, \forall y (y \le x \impl y \in T)) \\ \mathrm{Prop}(T) & = & \forall x \in T\, \forall y (x \le y \impl y \in T)) \\ \mathrm{Arity}_n(T) & = & \forall x \in T\, \exists^{= n} y (x \le_1 y)) \textrm{ if $n < \omega$} \\ \mathrm{Fin}(T) & = & \exists x\forall y \in T\, y \preceq x) \end{eqnarray*} which say that $T$ is a tree (with root $\Lambda$), a proposition, has arity~$n$, and is finite, respectively. If $A$ is a formula of quantified propositional logic, define $A^x$ by: \[\begin{array}{rclrcl} p^x & = & x \in X_p & (B \impl C)^x & = & \forall y \in T(x \le y \impl (B^y \impl C^y)) \\ \bot^x & = & \bot & (\forall p\, B)^x & = & \forall X_p((X_p \subseteq T \land \mathrm{Prop}(X_p)) \impl B^x) \\ (B \land C)^x & = & B^x \land C^x & (\exists p\, B)^x & = & \exists X_p(X_p \subseteq T \land \mathrm{Prop}(X_p) \land B^x),\\ (B \lor C)^x & = & B^x \lor C^x \end{array}\] where $y$ is a new variable not previously used in the translation. Now let \begin{eqnarray*} \Psi(A, \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}}) & = & \forall T(\mathfrak{T}ree(T) \impl A^x[\Lambda/x]) \\ \Psi(A, \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_n}) & = & \forall T((\mathfrak{T}ree(T) \land \mathrm{Arity}_n(T)) \impl A^x[\Lambda/x] \quad(n < \omega)\\ \Psi(A, \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}^\mathrm{fin}}) & = & \forall T((\mathfrak{T}ree(T) \land \mathrm{Fin}(T)) \impl A^x[\Lambda/x]) \\ \Psi(A, \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_\omega}) & = & \forall T(\forall z(z \in T) \impl A^x[\Lambda/x]) \end{eqnarray*} We may assume, without loss of generality, that $A$ is closed (no free propositional variables). We have to show that $\ensuremath{\mathrm{S}\omega\mathrm{S}} \models \Psi(A, \qp\L)$ iff $\qp\L \models A$. First, let $M = \langle \Lambda, K, \le, \phi\rangle$ be an \qp\L-model (obviously, we may assume that $\Lambda$ is the root). If $M, \Lambda \nmodels A$, then $M(A) \neq K$. Define a variable assignment~$s$ for second-order variables by $s(T) = K$. Then it is easy to see that $M(A) = \{x \in K: \ensuremath{\mathrm{S}\omega\mathrm{S}} \models A^x [s]\}$. Thus, $\Psi(A, \qp\L)$ is false in~\ensuremath{\mathrm{S}\omega\mathrm{S}}. Conversely, if $\ensuremath{\mathrm{S}\omega\mathrm{S}} \nmodels \Psi(A, \qp\L)$, then there is a counterexample witness~$X$ for the initial universal quantifier $\forall T$, which is a tree (in the respective class), $\Lambda \in X$, and $\ensuremath{\mathrm{S}\omega\mathrm{S}} \nmodels A^x[\Lambda/x] [s]$ for $s(T) = X$. (For the case of $\L = \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_\omega}$, $X = T_\omega$.) We show that for any $s$ with $s(T) = X$, the model $M = \langle \Lambda, X, \le, \phi\rangle$ with $\phi(p) = s(X_p)$ is such that $M(A) = \{x \in X: \ensuremath{\mathrm{S}\omega\mathrm{S}} \models A^x [s]\}$. This is obvious if $A = p$, $A = B \land C$ or $A = B \lor C$. Suppose $A = B \impl C$. Then $x \in M(A)$ iff for all $y \in X$ with $x \le y$, $y \notin M(B)$ or $y \in M(C)$. By induction hypothesis, $y \notin M(B)$ iff $\ensuremath{\mathrm{S}\omega\mathrm{S}} \nmodels B^y [s]$; similarly for $y \in M(C)$. So $x \in M(A)$ iff $\ensuremath{\mathrm{S}\omega\mathrm{S}} \models A^x [s]$. If $A = \forall p\, B$, then $x \in M(A)$ iff for all propositions~$P$ in $X$, $x \in M[P/p](B)$. This is the case, by induction hypothesis, iff for all upward-closed subsets~$P$ of $X$, $\ensuremath{\mathrm{S}\omega\mathrm{S}} \models B^x [s']$ where $s'$ is like $s$ except $s'(X_p) = P$; but this is true just in case $\ensuremath{\mathrm{S}\omega\mathrm{S}} \models \forall X_p((X_p \subseteq T \land \mathrm{Prop}(X_p)) \impl B^x)$. (Similarly for the case of $A = \exists p\,B$.) Hence, if $A$ is closed and $\ensuremath{\mathrm{S}\omega\mathrm{S}} \nmodels \Psi(A, \qp\L)$, the structure $M = \langle \Lambda, X, \le, \phi\rangle$ is a countermodel for $A$.\qed \end{pf} \section{S4 and G\"odel-Dummett logics} Modal logic \ensuremath{\mathbf{S4}}{} is closely related to intuitionistic logic, and its Kripke semantics is likewise based on partially ordered structures and trees. In the modal context, a proposition is any (not necessarily upward-closed) subset of the set of worlds. Adding quantifiers over propositions to \ensuremath{\mathbf{S4}}, we obtain the logic \qp{\ensuremath{\mathbf{S4}}}. Specifically, the semantics of \qp{\ensuremath{\mathbf{S4}}} is like that for \qp{\H}, except that an \emph{\ensuremath{\mathbf{S4}}-proposition} in $M$ is a subset $P \subseteq K$, and valuations $\phi$ map variables to \ensuremath{\mathbf{S4}}-propositions. We have the two modal operators $\Box$ and $\Diamond$. $M, h \models A$ is then defined by \begin{enumerate} \item $M, h \models p$ if $h \in \phi(p)$; $M, h \nmodels \bot$. \item $M, h \models B \land C$ if $M, h \models B$ and $M, h \models C$. \item $M, h \models B \lor C$ if $M, h \models B$ or $M, h \models C$. \item $M, h \models B \impl C$ if $M, h \nmodels B$ or $M, h \models C$. \item $M, h \models \Box B$ if all $h' \ge h$, $M, h' \models B$. \item $M, h \models \Diamond B$ if some $h' \ge h$, $M, h' \models B$. \item $h \models \forall p\, B$, if, for all propositions $P$, $M[P/p], h \models B$. \item $h \models \exists p\, B$ if there is a proposition~$P$ so that $M[P/p], h \models B$. \end{enumerate} Depending on the class of Kripke structures considered, we obtain logics \qp{\ensuremath{\mathbf{S4}}}, \qp{\mathbf{S4t}}, \qp{\mathbf{S4t}_n}, \qp{\mathbf{S4t}^\mathrm{fin}} (for the class of partial orders, trees, $n$-ary trees, and finite trees, respectively). The McKinsey-Tarski $T$-embedding of \H{} into \ensuremath{\mathbf{S4}} \cite[Theorem 5.1]{McKinseyTarski:48} can be straightforwardly extended to the propositional quantifiers. For a formula $A$ in the language of \qp{\H}, define a formula $A^T$ of \qp{\ensuremath{\mathbf{S4}}} as follows: \[ \begin{array}{rcl@{\qquad}rcl} p^T & = & \Box p & (B \impl C)^T & = & \Box(B^T \impl C^T)\\ \bot^T & = & \Box \bot & (\forall p\, B)^T & = & \forall p\, B^T \\ (B \land C)^T & = & B^T \land C^T & (\exists p\, B)^T & = & \exists p\, B^T\\ (B \lor C)^T & = & B^T \lor C^T \end{array}\] \begin{prop} $\qp{\H} \models A$ iff $\qp{\ensuremath{\mathbf{S4}}} \models A^T$. \end{prop} \begin{pf} Let $M = \langle g, K, \le, \phi\rangle$ be an intuitionistic structure, and suppose $M, h \nmodels A$. Consider the \ensuremath{\mathbf{S4}}-structure $M' = \langle g, K, \le, \phi'\rangle$ with $\phi'(p) = \phi(p)$. By induction on the complexity of formulas, $M', h \nmodels A^T$. Conversely, if $M' = \langle g, K, \le, \phi'\rangle$ is an \ensuremath{\mathbf{S4}}-structure and $M', h \nmodels A^T$, then $M'', h \nmodels A^T$, where $M'' = \langle g, K, \le, \phi''\rangle$ with $\phi''(p) = M'(\Box p)$. \qed \end{pf} Note that the order structure of $M$ and $M'$ was not changed, so the result holds also relative to any class of tree structures. We can therefore obtain separation results like those in Proposition~\ref{rels} for the propositionally quantified variants of \ensuremath{\mathbf{S4}}{} by considering the images under the $T$-embedding of the formulas $A$, $\neg B$, and $\neg\neg B$ from the proof of Proposition~\ref{rels}. \inlinecite{Fine:70} and \inlinecite{Kremer:93} showed that \qp{\ensuremath{\mathbf{S4}}}, like \qp{\H} is not axiomatizable. By the same method used above, the decidability of \qp{\ensuremath{\mathbf{S4}}} can be established if one is only interested in trees. \begin{prop} \qp{\mathbf{S4t}}, \qp{\mathbf{S4t}_n}, and \qp{\mathbf{S4t}^\mathrm{fin}} are decidable. \end{prop} \begin{pf} We change the definition of $A^x$ as follows: \[\begin{array}{rclrcl} p^x & = & x \in X_p & (\Diamond B)^x & = & \exists y(x \le y \land B^y) \\ \bot^x & = & \bot & (\Box B)^x & = & \forall y(x \le y \impl B^y) \\ (B \land C)^x & = & B^x \land C^x & \forall p\,B^x & = & \forall X_p(X_p \subseteq T \impl B^x) \\ (B \lor C)^x & = & B^x \lor C^x & \exists p\,B^x & = & \exists X_p(X_p \subseteq T \land B^x) \\ (B \impl C)^x & = & B^x \impl C^x \end{array}\] (where $y$ is new.) The definition of $\Psi(A, \qp\L)$ and the proof that $\ensuremath{\mathrm{S}\omega\mathrm{S}} \models \Psi(A, \qp\L)$ iff $\qp\L \models A$ ($\L$ one of \qp{\mathbf{S4t}}, \qp{\mathbf{S4t}_n}, \qp{\mathbf{S4t}^\mathrm{fin}}) is the same as for the intuitionistic case, mutatis mutandis.\qed \end{pf} Other logics which can be treated using the method used above are G\"odel-Dummett logics. These logics were originally characterized as many-valued logics over subsets of~$[0, 1]$. Here, a \emph{valuation} is a mapping of propositional variables to truth values. A valuation~$v$ is extended to formulas by: \[ \begin{array}{cc} \begin{array}{rcl} v(\bot) &=& 0 \\ v(A \land B) &=& \min(v(A), v(B)) \end{array} & \begin{array}{rcl} v(A \lor B) &=& \max(v(A), v(B))\\ v(A \impl B) &=& \left\{ \begin{array}{cl} 1 & {\rm if\ } v(A) \leq v(B) \\ v(B) & {\rm otherwise} \end{array}\right. \end{array} \end{array} \] In the quantifier-free case, taking any infinite subset of $[0, 1]$ as the set of truth values results in the same set of tautologies, axiomatized by $\ensuremath{\mathbf{LC}} = \H + (A \impl B) \lor (B \impl A)$. This is no longer the case if we add propositional quantifiers. In the many-valued context, these can be introduced by: \begin{eqnarray*} v(\exists p\, A) &=& \sup \{ v[w/p](A) : w \in V \}\\ v(\forall p\, A) &=& \mathrm{inf} \{ v[w/p](A) : w \in V\}, \end{eqnarray*} where $v[w/p]$ is the valuation which is like $v$ except that it assigns the value~$w$ to $p$. The resulting class of tautologies depends on the order structure of $V \subseteq [0, 1]$. In fact, there are $2^{\aleph_0}$ different propositionally quantified G\"odel-Dummett logics. \ensuremath{\mathbf{LC}}{} is also characterized as the set of formulas valid on the infinite 1-ary tree $\mathfrak{T}_1$. The G\"odel-Dummett logic which corresponds to this characterization is $\ensuremath{\mathbf{G}}_\downarrow\pi$ based on the truth-value set $V_\downarrow = \{0\} \cup \{1/n : n \ge 1\}$, i.e., $\ensuremath{\mathbf{G}}_\downarrow\pi = \qp{\mathbf{Ht}}\def\H{\ensuremath{\mathbf{H}}}\def\L{\ensuremath{\mathbf{L}}_1}$ (\opencite{BaazZach:98}, Proposition 2.8). The intersection of all finite-valued G\"odel-Dummett logics, however, coincides with $\ensuremath{\mathbf{G}}_\uparrow\pi$ with truth value set $V_\uparrow = \{1\} \cup \{1-1/n : n \ge 1\}$. Since $\ensuremath{\mathbf{G}}_\uparrow\pi \neq \ensuremath{\mathbf{G}}_\downarrow\pi$, this shows that the formulas valid on the infinite 1-ary tree is not identical to the class of formulas valid on all 1-ary trees of finite height. This latter logic was studied and axiomatized by \inlinecite{BCZ:00}. \section{Conclusion} As noted in the introduction, the notion of trees we consider is the only one which accords with Kripke's intuitive interpretation of intuitionistic model structures. It might nevertheless be interesting to consider more general classes of trees (i.e., partial orders with least element and where $h \not\le h'$ and $h' \not\le h$ guarantees that for no $g$ is $h, h' \le g$), or well-founded trees (every branch is well-ordered). The problem of the complexity of the resulting quantified propositional logics on such structures, however, remains open. It is not known whether the monadic second-order theory of such partial orders is decidable, in fact, it most likely is not. If it were, however, the reduction given here would immediately yield the decidability results for the quantified propositional logics on such structures. We can also easily obtain further decidability results for logics based on classes of trees which are definable in the language of \ensuremath{\mathrm{S}\omega\mathrm{S}}. This includes, e.g., tress of finite arity, trees of finite height, and trees of arity or height $\le n$ for some~$n$. \end{article} \end{document}
\begin{document} \title{SUPPRESSING THE SPURIOUS STATES OF THE CENTRE OF MASS} \author{P. Di\c t\u a and L. Micu} \affiliation{Department of Theoretical Physics\\ Horia Hulubei Institute for Physics and Nuclear Engineering\\ Bucharest POB MG-6, RO 077125} \thanks{[email protected]} \pacs{03.65.Ge; 21.60.Cs} \begin{abstract} Following Dirac's ideas concerning the quantization of constrained systems, we suggest to replace the free centre of mass Hamiltonian $H_{CM}$ by another operator which commutes with all the elements of the algebra generated via the commutation relations by $H_{CM}$ and the constraints which fix the centre of mass position. We show that the new Hamiltonian is a multiple of the identity operator and, as a result, its unique effect is to raise the internal energy levels by a constant amount. \end{abstract} \maketitle \section*{} In classical and nonrelativistic quantum mechanics (see e. g. \cite{am}) the first step in the standard approach to the two body bound state problem is to replace the individual variables $(\vec{r}_{1},~\vec{p}_{1}),~(\vec{r}_{2},~\vec{p}_{2})$ in the two-body Hamiltonian by the centre of mass variables $(\vec{R},\vec{P})$ and the relative ones $(\vec{r},\vec{p})$, where \begin{eqnarray} &&\vec{R}=m_1/(m_1+m_2)~\vec{r}_1+m_2/(m_1+m_2)~\vec{r}_2\nonumber\\ &&\vec{P}=\vec{p}_1+\vec{p}_2\nonumber\\ &&\vec{r}=\vec{r}_1-\vec{r}_2\nonumber\\ &&\vec{p}=m_2/(m_1+m_2)~\vec{p}_1-m_1/(m_1+m_2)~\vec{p}_2\nonumber. \end{eqnarray} Then, if the interaction potential $V$ depends only on the relative coordinates, the Hamiltonian separates in the free centre of mass part $H_{CM}$ and an internal Hamiltonian $H_{int}$: \begin{equation}\label{H} H=H_{CM}+H_{int}=\frac{\vec{P}^2}{2M}+\frac{\vec{p}^2}{2m_r}+V(r) \end{equation} where $M=m_1+m_2$ and $m_r=m_1m_2/(m_1+m_2)$. By this mathematical trick the two-body problem separates in two independent single-particle problems for two fictious particles: the centre of mass problem and the relative problem. The first one is the problem of a free particle with the mass $M$ having the centre of mass coordinates and the second is the problem of a particle with the reduced mass in the potential well $V(r)$. We mention that the separation procedure \cite{am} applied to an $N$-body Hamiltonian gives a result similar to (\ref{H}). The centre of mass Hamiltonian has the same form as $H_{CM}$ where now $M=\sum_{i=1}^Nm_i$ and $H_{int}$ depends only on translational invariant (intrinsic) coordinates. In the following we consider the centre of mass problem in the classical and quantum mechanics in the case of a bound system at rest, {\it i. e.} when the position of the centre of mass is fixed. In classical mechanics and Hamilton formulation, this condition takes the form \begin{equation}\label{cm} \dot{\vec{R}}(t)=\{H_{CM},\vec{R}(t)\}=\frac{1}{M}\vec{P}=0 \end{equation} where $\{\cdot,\cdot\}$ is the Poisson bracket. This means that $\vec{R}(t)=\vec{R}_0$ is equivalent to $\vec{P}=0$ and hence the classical Hamiltonian which takes into account the constraint reads \begin{equation}\label{Hrest} H=\frac{\vec{p}^2}{2m_r}+V(\vec{r}). \end{equation} In quantum mechanics the classical Hamiltonian $H$ is replaced by an operator ${\mathcal H}$ whose expression is obtained with the aid of the correspondence principle. If $\mathcal H$ does not explicitly depend on time the state of the physical system is described by a stationary wave function $\Psi(\vec{R},\vec{r})$. Following the separation procedure outlined before, it is the product of the centre of mass wave function $\psi_{CM}(\vec{R})$ and the internal wave function $\psi_{int}(\vec{r})$ which satisfy the Schr\"odinger equations \begin{equation}\label{cm1} (H_{CM}-E_{CM})\psi_{CM}(\vec{R})=0 \end{equation} and \begin{equation} (H_{int}-E_{int})\psi_{int}(\vec{r})=0 \end{equation} with specific boundary conditions. Considering the centre of mass equation (\ref{cm1}) we notice the following: If, $\psi_{CM}(\vec{R})$ is the eigenfunction of $H_{CM}$, from the normalization condition it results that the probability to find the centre of mass in any finite volume $v$ is $v/(2\pi)^3V$ and tends to zero when $V\to\infty$, which does not agree with the real situation. On the contrary, if we require a certain localization and replace the condition $\vec{R}(t)=\vec{R}_0$ in classical mechanics by the confinement of the centre of mass to a 3-dimensional box of side $l$ centred in $\vec{R}_0$ the invariance at translations is destroyed. Moreover, solving the centre of mass equation (\ref{cm1}) with Dirichlet boundary conditions we get an infinite set of states \begin{equation}\label{fct} \psi^{\{n\}}_{CM}(\vec{R})=\prod_{i=1}^3\psi_{n_1}(X-X_0)\psi_{n_2}(Y-Y_0)\psi_{n_3}(Z-Z_0) \end{equation} where $\psi_{n_i}(x)=\sin[n_i\pi(1/2+x/l)],~n_i=1,2,3....$ and the energy of the centre of mass is \begin{equation}\label{ecm} E_{CM}^{\{n\}}=\frac{\pi^2(n_1^2+n^2_2+n_3^2)}{2M~l^2}. \end{equation} We remark that the smaller is $l$, the higher is the kinetic energy of the centre of mass of a system at rest! Obviously, these states are the result of the incompatibility of the position and momentum observables in quantum mechanics, not of a dynamical scheme. They have no classical analog, have not been observed experimentaly, and hence are spurious states whose contribution must be eliminated from the final results. In the two body case the centre of mass problem is usually ignored \cite{am}, the interest being focused on the internal wave function $\psi_{int}$ and on the internal energy levels $E_{int}$. The problem has been extensively studied in connection with the nuclear many body models \cite{rs}, where the achievement of a translational invariant, independent particle picture requires a clear separation of the centre of mass wave function and energy from the internal ones. The solution proposed by Lipkin et al. \cite{lst} is to introduce in the internal Hamiltonian the contribution of the redundant (or superfluous) coordinates which assure the independent treatment of the internal particles. These ones generate a new term in the internal Hamiltonian and new spurious solutions. The additional term in $H_{int}$ compensates the kinetic energy of the centre of mass in the approximation of equal masses but finally its contribution has to be subtracted from $E_{int}$. The separation of the spurious solutions requires special technics which, excepting the case of the harmonic oscillator, is rather hard (see, e.g., Ref.\, \cite{rs} (par. 11.3), \cite{lst}, \cite{l}, \cite{pt}). In this paper we propose a new solution. We are concerned only with the centre of mass Hamiltonian, so that the result is the same for any $N$-body bound systems. Taking into account Dirac's conclusions concerning the quantization of constrained systems, quoted in his books (see Ref. \cite{pamd}): {\it (i)} The Hamiltonian of a constrained system is not uniquely defined; {\it (ii)} The commutators (or the Poisson brackets) of the "naive" Hamiltonian with the constraints represent new constraints; \noindent we suggest to replace the free centre of mass Hamiltonian by another operator which is compatible with the constraints. As demonstrated by one of us in \cite{pd}, an operator of this kind exists if the constraints are compatible and if the Poisson-Lie algebra generated by commutation or Poisson brackets from the naive Hamiltonian and the constraints is finite. It is a Casimir invariant of the algebra or a convenient element of its centre. This is the "right" Hamiltonian which has to be used in the quatization of constrained systems since it commutes with the constraints and hence assures their conservation in time. As it can be seen from some simple examples, the right Hamiltonian actually "absorbs" the constraints, so that these ones are identically satisfied by the solutions of the equation of motion. It is important to notice that the procedures leading to the right Hamiltonian in classical and quantum mechanics are similar, so that the Hamiltonians obtained in this way are in perfect agreement. A clear example in this sense is the motion of a particle on a $n$ dimensional sphere \cite{pd} when the free Hamiltonian and the constraint read \begin{equation} H=\frac{1}{2m}\sum_{i=1}^{n+1}p_i^2,~~~~~U=\frac{1}{\vec{r}_0^2}\sum_{i=1}^{n+1}x_i^2-1=0. \end{equation} $H$, $U$ and their Poisson brackets (or their commutators) generate a closed algebra \begin{equation}\label{alg} \{H,U\}=2V,~~\{H,V\}=\frac{2}{mr_0^2}H,~~\{V,U\}=\frac{2}{mr_0^2}U. \end{equation} which includes the new element \begin{equation} V=\frac{1}{mr_0^2}\sum_{i=1}^{n+1}x_ip_i \end{equation} In this particular case, the right Hamiltonian is the Casimir invariant of the Poisson-Lie algebra (\ref{alg}) and has the form \begin{equation} {\mathcal H}=(U+1)H-\frac{mr_0^2}{2}V^2=\frac{1}{2I}\sum_{i<j}^{n+1}L_{ij}^2 \end{equation} where $L_{ij}=x_ip_j-x_jp_i$ are the components of the angular momentum and $I=mr_0^2$ is the momentum of inertia. One easily checks that $\{{\mathcal H},H\}=\{{\mathcal H},U\}=\{{\mathcal H},V\}=0$ which means that all the constraints are preserved in time. We apply the same procedure to the case of the centre of mass Hamiltonian. We consider the algebra generated by $H_{CM}$, the constraint $\vec{R}-\vec{R}_0=0$ and the elements obtained by commutation. We get the new elements \begin{equation}\label{com} [H_{CM},\vec{R}]=i\frac{1}{M}\hat{\vec{P}} \end{equation} and \begin{equation}\label{M} [\hat{\vec{P}},\vec{R}]=-i{\bf I} \end{equation} where ${\bf I}$ is the identity operator. The algebra closes under the commutation relation and has a centre made of the multiples of the identity operator which commute with all the elements. According to our prescription, a certain element having the dimension of energy of this centre is the right centre of mass Hamiltonian in the quantum mechanical framework. This is an expected result, because the algebra generated by $\hat{\vec{P}},~\vec{R}$ is irreducible and hence, according to Schur lemma, the only operator commuting with all its elements must be a multiple of ${\bf I}$. Then, in agreement with the classical result (\ref{Hrest}), the right Hamiltonian reads \begin{equation}\label{Hnew} {\mathcal H}=f(M)+H_{int}. \end{equation} From the absence of the centre of mass variables in (\ref{Hnew}) we infer that there is no centre of mass wave function and no spurious energy levels. The translational invariance is restored at the quantum level and the state of the bound system is described by the solution $\psi_{int}$ of the Schr\"odinger equation \begin{equation}\label{ecS} \left(f(M)+H_{int}-{\mathcal E}\right)\psi_{int}(\vec{r})=0. \end{equation} In conclusion, the unique effect of the right Hamiltonian is to raise the energy levels of $H_{int}$ by a constant amount denoted by $f(M)$. This one cannot be directly measured, but from physical reasons one may suppose that $f(M)=M$. In this case ${\mathcal E}\to M$ when the strength of the interaction potential $V(\vec{r})$ tends to 0, and hence the negative eigenvalues $E_{int}$ of the internal Hamiltonian acquire the real meaning of a "mass defect" generated by the mutual attraction of the internal bodies. The conclusion may be immediately extended to an isolated $N$-body system at rest where the right centre of mass Hamitonian is $H_{CM}=\sum_{i=1}^{N}m_i$. Closing, we mention that our result is the first step in the attempt to obtain a translational invariant, independent particle picture of bound systems where the place of the redundant variables is taken by the effective variables of the internal field. This makes the subject of a forthcoming paper. \begin{acknowledgments} The authors thank Fl. Stancu and H. Scutaru for valuable remarks and encouragement. The financial support from the Romanian Ministry of Education and Research under the contract PN06 35 01 01 is acknowledged. \end{acknowledgments} \vskip0.5cm \end{document}
{\beta}egin{document} {\mu}aketitle {{\lambda}et\thefootnote\relax \footnote{\hskip-1.2em \textbf{Key-words :} big and pseff adjoints divisors; stable, non-ample and non-nef base locus; rational curves.\\ {\nu}oindentindent \textbf{A.M.S.~classification :} 14J40. }} {\nu}umberwithin{equation}{section} {\beta}egin{abstract} We explain how to deduce from recent results in the Minimal Model Program a general uniruledness theorem for base loci of adjoint divisors. We also show how to recover special cases by extending a technique introduced by Takayama. {\varepsilon}nd{abstract} \section*{Introduction} Let $X$ be a normal projective variety defined over ${{\mu}athbb C}$ (or any algebraically closed field of characteristic $0$) and let $D$ be an ${{\mu}athbb R}$-divisor on $X$ (where ${{\mu}athbb R}$-divisor will mean ${{\mu}athbb R}$-Cartier ${{\mu}athbb R}$-divisor unless otherwise specified). Following~\cite{BCHM} one introduces the (real) {\varepsilon}mph{stable base locus} of $D$ as {\beta}egin{equation}{\lambda}ambdabel{equ:stable}{{\mu}athbb B}(D):={\beta}igcap\{{\mu}athop{\rm Supp}{\nu}olimits E,\,E\text{ effective }{{\mu}athbb R}\text{-divisor},\,E\sim_{{\mu}athbb R} D\}, {\varepsilon}nd{equation} where $E\sim_{{\mu}athbb R} D$ means that $E$ is ${{\mu}athbb R}$-linearly equivalent to $D$, i.e. $E-D$ is an ${{\mu}athbb R}$-linear combination of principal divisors ${\delta}iv(f),\,f{\infty}n{{\mu}athbb C}(X)$. When $D$ is a ${{\mu}athbb Q}$-divisor ${{\mu}athbb B}(D)$ coincides with the usual stable base locus (cf.~Proposition~\ref{prop:stable} below). As in~\cite{ELMNP1} one then defines the {\varepsilon}mph{augmented base locus} of $D$ by{\beta}egin{equation}{\lambda}ambdabel{equ:augmented} {{\mu}athbb B}_+(D):={\beta}igcap_{m>0}{{\mu}athbb B}(D-\frac{1}{m}A) {\varepsilon}nd{equation} and the {\varepsilon}mph{restricted base locus} of $D$ by {\beta}egin{equation}{\lambda}ambdabel{equ:restricted} {{\mu}athbb B}_-(D):={\beta}igcup_{m>0}{{\mu}athbb B}(D+\frac{1}{m}A) {\varepsilon}nd{equation} where $A$ is an ample divisor, the definition being independent of $A$. We thus have the inclusions $$ {{\mu}athbb B}_-(D)\subset{{\mu}athbb B}(D)\subset{{\mu}athbb B}_+(D). $$ The augmented base locus ${{\mu}athbb B}_+(D)$ is Zariski closed and satisfies $$ {{\mu}athbb B}_+(D)\subsetneq X\Lambdaongleftrightarrow D\text{ big}, $$ $$ {{\mu}athbb B}_+(D)={\varepsilon}mptyset\Lambdaongleftrightarrow D\text{ ample}. $$ Augmented base loci are also known as {\varepsilon}mph{non-ample loci} and have been extensively studied in relation with the asymptotic behavior of linear series (see~\cite{Nak,ELMNP1, ELMNP2} and~\cite{B} for the analytic counterpart). The restricted base locus ${{\mu}athbb B}_-(D)$ is an at most countable union of Zariski closed sets - it might not be Zariski closed in general even though no specific example seems to be known for the moment. We have $$ {{\mu}athbb B}_-(D)\subsetneq X\Lambdaongleftrightarrow D\text{ pseudoeffective}, $$ $$ {{\mu}athbb B}_-(D)={\varepsilon}mptyset\Lambdaongleftrightarrow D\text{ nef}. $$ On the other hand the {\varepsilon}mph{non-nef locus} ${\nu}n(D)$ of an ${{\mu}athbb R}$-divisor $D$~\cite{B,BDPP}, called the {\varepsilon}mph{numerical base locus} in~\cite{Naka}, is defined in terms of the asymptotic or numerical vanishing orders attached to $D$ (cf. Definition~\ref{defn:nonnef} below). We always have $$ {\nu}n(D)\subset{{\mu}athbb B}_-(D) $$ and equality was shown to hold when $X$ is smooth in~\cite{ELMNP1}, but seems to be unknown when $X$ is an arbitrary normal variety. The goal of the present paper is to investigate the uniruledness properties of the above loci in the case of adjoint divisors. After having collected basic facts in Section 1, we explain in Section 2 how to obtain the following general result using known parts of the Minimal Model Program~\cite{Kaw,BCHM}. {\beta}egin{thmA} Let $X$ be a normal projective variety and let ${\Delta}$ be an effective ${{\mu}athbb R}$-Weil divisor such that $(X,{\Delta})$ is klt. {\beta}egin{enumerate} {\infty}tem[(i)] We have ${\nu}n(K_X+{\Delta})={{\mu}athbb B}_-(K_X+{\Delta})$ and each of its irreducible components is uniruled. {\infty}tem[(ii)] If $K_X+{\Delta}$ is furthermore big then $$ {\nu}n(D)={{\mu}athbb B}_-(K_X+{\Delta})={{\mu}athbb B}(K_X+{\Delta}) $$ and every irreducible component of ${{\mu}athbb B}_+(K_X+{\Delta})$ is uniruled as well. {\varepsilon}nd{enumerate} {\varepsilon}nd{thmA} As already noticed in~\cite{taka2} the above uniruledness results both fail in general for the more general case of where $(X,{\Delta})$ has log-canonical singularities, even in the log-smooth case (cf. Example 6.4). The special case of Theorem A where $X$ is smooth and either $K_X$ or ${\Delta}$ vanishes was obtained by S.~Takayama in~\cite{taka2} by a completely different (and more direct) method, which combined his extension result for log-pluricanonical forms (see \cite[Theorem 4.5]{taka}) with the characterization of uniruled varieties in terms of the non-pseudo-effectivity of the canonical class already mentioned. In Section 3, we show more generally how to obtain using Takayama's method the following special cases of Theorem A. {\beta}egin{thmB} Let $X$ be a smooth projective variety and $L$ a line bundle on $X$ such that either $-K_X$ of $L-K_X$ is nef. {\beta}egin{enumerate} {\infty}tem[(i)] If $L$ is pseudoeffective, then every irreducible component of the non-nef locus ${{\mu}athbb B}_-(L)$ is uniruled. {\infty}tem[(ii)] If $L$ is furthermore big, then every irreducible component of the stable base locus ${{\mu}athbb B}(L)$ or of the non-ample locus ${{\mu}athbb B}_+(L)$ is uniruled. {\varepsilon}nd{enumerate} {\varepsilon}nd{thmB} {{\beta}f Acknowledgements.} The authors would like to thank St\'ephane Druel for interesting exchanges related to this work. Part of this work was done by G.P. during his stay at the Universit\`a di Roma "La Sapienza". He wishes to thank Kieran O'Grady for making this stay very pleasant and stimulating and for providing the financial support. A.B. thanks Laurent Bonavero for stimulating conversations on this subject. \section{Preliminaries}{\lambda}ambdabel{S:prel} Unless otherwise specified we will use the standard notation, definitions and terminology (cf. for instance \cite{KM}). By convention divisor (resp. ${{\mu}athbb Q}$-divisor, ${{\mu}athbb R}$-divisor) will mean Cartier divisor (resp. ${{\mu}athbb Q}$-Cartier, ${{\mu}athbb R}$-Cartier) unless otherwise specified. \subsection{Approximation by ${{\mu}athbb Q}$-divisors} Let $X$ be a normal projective variety. Recall that the stable base locus of a ${{\mu}athbb Q}$-divisor, that we temporarily denote by ${{\mu}athbb B}_{{\mu}athbb Q}(D)$, can be described as follows: $$ {{\mu}athbb B}_{{\mu}athbb Q}(D):={\beta}igcap\{{\mu}athop{\rm Supp}{\nu}olimits E,\,\,E\text{ effective }{{\mu}athbb Q}\text{-divisor},\,E\sim_{{\mu}athbb Q} D\}. $$ {\beta}egin{prop}{\lambda}ambdabel{prop:stable} Let $D$ be a ${{\mu}athbb Q}$-divisor on $X$. Then its real stable locus ${{\mu}athbb B}(D)$ defined by (\ref{equ:stable}) coincides with the usual stable locus ${{\mu}athbb B}_{{\mu}athbb Q}(D)$. {\varepsilon}nd{prop} {\beta}egin{proof} It is obvious that ${{\mu}athbb B}(D)\subset{{\mu}athbb B}_{{\mu}athbb Q}(D)$. Conversely let $E$ be an effective ${{\mu}athbb R}$-divisor such that $E\sim_{{\mu}athbb R} D$. By Lemma~\ref{lem:approx} below we may find an effective ${{\mu}athbb Q}$-divisor $E'\sim_{{\mu}athbb R} D$ with the same support as $E$ and the result follows. {\varepsilon}nd{proof} {\beta}egin{lem}{\lambda}ambdabel{lem:approx} Let $D$ be a ${{\mu}athbb Q}$-Cartier divisor and let $E$ be an effective ${{\mu}athbb R}$-Cartier divisor such that $E\sim_{{\mu}athbb R} D$. Then $E$ may be written as a (coefficient-wise) limit of effective ${{\mu}athbb Q}$-Cartier divisors $E_j$ with the same support as $E$ and such that $E_j\sim_{{\mu}athbb R} D$. {\varepsilon}nd{lem} {\beta}egin{proof} Denote by $W_{{\mu}athbb R}(X)\supset C_{{\mu}athbb R}(X)$ the space of ${{\mu}athbb R}$-Weil divisors and the subspace of ${{\mu}athbb R}$-Cartier divisors respectively. Let $V$ be the finite dimensional ${{\mu}athbb R}$-vector subspace of $W_{{\mu}athbb R}(X)$ spanned by the irreducible components of $E$. Then $V$ is defined over ${{\mu}athbb Q}$, and so is the affine space of all ${{\mu}athbb R}$-Cartier divisors linearly equivalent to $D$ since the latter is a ${{\mu}athbb Q}$-divisor. As a consequence $$ W:=V\cap\{F{\infty}n C_{{\mu}athbb R}(X),\,F\sim_{{\mu}athbb R} D\} $$ is an affine subspace of $V$ defined over ${{\mu}athbb Q}$. Since $W$ contains $E$, the latter may then approximated inside $V$ by elements of $W\cap V({{\mu}athbb Q})$, which yields the result. {\varepsilon}nd{proof} \subsection{Augmented base loci} We collect in this section some preliminary results regarding augmented base loci. We shall use the following common terminology. {\beta}egin{defn}[Kodaira decompositions] Let $X$ be a normal projective variety and $D$ be a big ${{\mu}athbb R}$-divisor on $X$. A {\varepsilon}mph{Kodaira decomposition} of $D$ is a decomposition $D=A+E$ into ${{\mu}athbb R}$-divisors with $A$ ample and $E$ effective. {\varepsilon}nd{defn} By (\cite{ELMNP1}, Remark 1.3) the augmented base locus of a big ${{\mu}athbb R}$-divisor $D$ can be described as {\beta}egin{equation}{\lambda}ambdabel{equ:bplus} {{\mu}athbb B}_+(D):={\beta}igcap_{D=A+E}{\mu}athop{\rm Supp}{\nu}olimits E, {\varepsilon}nd{equation} where the intersection runs over all Kodaira decompositions of $D$. The following result shows that one obtains the same locus by allowing Kodaira decompositions on birational models. {\beta}egin{lem}{\lambda}ambdabel{lem:birb} Let $X$ be a normal projective variety and let $D$ be a big ${{\mu}athbb R}$-divisor on $X$. Then its augmented base locus satisfies $$ {{\mu}athbb B}_+(D)={\beta}igcap_{\pi^*D=A+E}\pi({\mu}athop{\rm Supp}{\nu}olimits E) $$ where $\pi$ ranges over all birational morphisms $X'\to X$ and $\pi^*D=A+E$ over all Kodaira decompositions of $\pi^*D$ on $X'$. {\varepsilon}nd{lem} {\beta}egin{proof} In view of (\ref{equ:bplus}) it is clear that $$ {{\mu}athbb B}_+(D)\subset{\beta}igcap_{\pi^*D=A+E}\pi({\mu}athop{\rm Supp}{\nu}olimits E). $$ Consider conversely a birational morphism $\pi:X'\to X$ and a Kodaira decomposition $$\pi^*D=A+E$$ on $X'$ and let $x{\infty}n X-\pi({\mu}athop{\rm Supp}{\nu}olimits E)$. We have to show that $x{\infty}n X-{{\mu}athbb B}_+(D)$. Since $E=\pi^*D-A$ is both effective and $\pi$-antiample, its support must contain every curve contracted by $\pi$, i.e. the exceptional locus ${\varepsilon}xc(\pi)$ is contained in ${\mu}athop{\rm Supp}{\nu}olimits E$. Since $x{\nu}oindenttin\pi({\mu}athop{\rm Supp}{\nu}olimits E)$ it follows that there is a unique preimage $x'$ of $x$ by $\pi$ and that $x'{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits E$. Now let $B$ be a small enough ample divisor on $X$, so that $A-\pi^*B$ is ample on $X'$. We then have ${{\mu}athbb B}(A-\pi^*B)={\varepsilon}mptyset$, which means that there exists an effective ${{\mu}athbb R}$-divisor $F$ on $X'$ with $$ F\sim_{{\mu}athbb R} A-\pi^*B $$ and such that $x'{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits F$. As a consequence $x'$ doesn't belong to the support of the effective ${{\mu}athbb R}$-divisor $G':=E+F$. Since $G'$ is ${{\mu}athbb R}$-linearly equivalent to $\pi^*(D-B)$ there exists an effective ${{\mu}athbb R}$-divisor $G\sim_{{\mu}athbb R} D-B$ on $X$ such that $\pi^*G=G'$, and $x'{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits G'$ implies $x=\pi(x'){\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits G$. We have thus constructed a Kodaira decomposition $D=B+G$ with $x{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits G$, which shows that $x{\nu}oindenttin{{\mu}athbb B}_+(D)$ as desired. {\varepsilon}nd{proof} The next result describes the behavior of augmented base loci under birational transforms. {\beta}egin{prop}{\lambda}ambdabel{prop:biratB+} Let $\pi : X\rightarrow Y$ a birational morphism between normal projective varieties. Then for any big ${{\mu}athbb R}$-divisor $D$ on $Y$ and any effective $\pi$-exceptional ${{\mu}athbb R}$-divisor $F$ on $X$ we have $$ {{\mu}athbb B}_+(\pi^*D+F)=\pi^{-1}({{\mu}athbb B}_+(D))\cup{\varepsilon}xc(\pi).$$ {\varepsilon}nd{prop} {\beta}egin{proof} Let $x{\infty}n X-{{\mu}athbb B}_+(\pi^*D+F)$, so that there exists a Kodaira decomposition $$\pi^*D+F=A+E$$ with $x{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits E$. Then $G:=E-F$ is $\pi$-antiample and $\pi_*G=\pi_*E$ is effective since $F$ is $\pi$-exceptional, thus the so-called "negativity lemma" (\cite{KM}, Lemma 3.39) shows that $G$ is effective. Since it is also $\pi$-antiample it must contain ${\varepsilon}xc(\pi)$ in its support. We thus get a Kodaira decomposition $$ \pi^*D=A+G $$ such that $\pi(x){\nu}oindenttin\pi({\mu}athop{\rm Supp}{\nu}olimits G)$, and Lemma \ref{lem:birb} implies that $\pi(x){\nu}oindenttin{{\mu}athbb B}_+(D)$. This shows that $$ \pi^{-1}({{\mu}athbb B}_+(D))\cup{\varepsilon}xc(\pi)\subset{{\mu}athbb B}_+(\pi^*D+F). $$ In order to prove the reverse inclusion we first consider the special case where $D=A$ is an ample ${{\mu}athbb Q}$-Cartier divisor on $Y$ and $F=0$. Our goal is then to show that ${{\mu}athbb B}_+(\pi^*A)\subset{\varepsilon}xc(\pi)$. Pick $x{\nu}oindenttin{\varepsilon}xc(\pi)$ and choose a hyperplane section $H$ of $X$ such that $x{\nu}oindenttin H$. Since $\pi$ is an isomorphism above $\pi(x)$ it follows that $\pi(x)$ doesn't belong to the zero locus of the ideal sheaf ${\mathcal I}:=\pi_*{\mathcal O}_X(-H)$. If we choose $k$ sufficiently large and divisible then ${\mathcal O}_Y(kA)\otimes{\mathcal I}$ is globally generated since $A$ is an ample ${{\mu}athbb Q}$-divisor and we get the existence of a section in $H^0(Y,{\mathcal O}_Y(kA)\otimes{\mathcal I})$ that doesn't vanish at $\pi(x)$, hence a section $s{\infty}n H^0(X,k\pi^*A-H)$ with $s(x){\nu}eq 0$, which indeed shows that $x{\nu}oindenttin{{\mu}athbb B}_+(\pi^*A)$. We now treat the general case. We thus pick $x{\infty}n X-{\varepsilon}xc(\pi)$ such that $\pi(x){\nu}oindenttin{{\mu}athbb B}_+(D)$, and we have to show that $x{\nu}oindenttin{{\mu}athbb B}_+(\pi^*D+F)$. Since $\pi(x){\nu}oindenttin{{\mu}athbb B}_+(D)$ there exists a Kodaira decomposition $$ D=A+E $$ with $\pi(x){\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits E$, and we may assume that $A$ is ${{\mu}athbb Q}$-Cartier by Lemma \ref{lem:approx}. By the special case treated above we have ${{\mu}athbb B}_+(\pi^*A)\subset{\varepsilon}xc(\pi)$, so that there exists a Kodaira decomposition $$ \pi^*A=B+G $$ with $B$ ample and $x{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits G$. Putting all together yields a Kodaira decomposition $$ \pi^*D+F=B+(G+E+F) $$ with $x{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits(G+E+F)$, which concludes the proof. {\varepsilon}nd{proof} \subsection{Restricted base loci vs. non-nef loci}{\lambda}ambdabel{sec:nonnef} Let $D$ be a big ${{\mu}athbb R}$-divisor on the normal projective variety $X$. Given a divisorial valuation $v$ on $X$ we may define the {\varepsilon}mph{numerical vanishing order} of $D$ along $v$ by $$ \mathop{v_{\rm num}}\nolimits(D):={\infty}nf\{v(E),\,E\text{ effective }{{\mu}athbb R}\text{-divisor},\,E{\varepsilon}quiv D\}, $$ where ${\varepsilon}quiv$ denotes numerical equivalence. It also satisfies {\beta}egin{equation}{\lambda}ambdabel{equ:vn} \mathop{v_{\rm num}}\nolimits(D)={\infty}nf\{v(E),\,E\text{ effective }{{\mu}athbb R}\text{-divisor},\,E\sim_{{\mu}athbb R} D\} {\varepsilon}nd{equation} by~\cite{ELMNP1} Lemma 3.3. The induced function on the open convex cone $$ \text{Big}(X)\subset N^1(X) $$ of big classes is homogeneous and convex, hence continuous and sub-additive. When $D$ is a pseudoeffective ${{\mu}athbb R}$-divisor we set following~\cite{Naka,B} {\beta}egin{equation}{\lambda}ambdabel{equ:num} \mathop{v_{\rm num}}\nolimits(D):={\lambda}im_{{\varepsilon}\to 0}\mathop{v_{\rm num}}\nolimits(D+{\varepsilon} A) {\varepsilon}nd{equation} with $A$ ample. This is easily seen to be independent of the choice of $A$. As shown in~\cite{Naka,B} the corresponding function on the pseudoeffective cone $$ \text{Psef}(X)=\overline{\text{Big}(X)}\subset N^1(X). $$ is lower semicontinuous, but {\varepsilon}mph{not} continuous up to the boundary of the pseudoeffective cone in general(cf. \cite{Naka} Example 2.8 p.135) and a pseudoeffective ${{\mu}athbb R}$-divisor $D$ is nef iff $\mathop{v_{\rm num}}\nolimits(D)=0$ for every divisorial valuation $v$. {\beta}egin{lem}{\lambda}ambdabel{lem:bir} Let $\pi:X'\to X$ be a birational morphism and let $D$ be a pseudoeffective ${{\mu}athbb R}$-divisor on $X$. Then we have $$\mathop{v_{\rm num}}\nolimits(\pi^*D)=\mathop{v_{\rm num}}\nolimits(D)$$ for every divisorial valuation $v$. {\varepsilon}nd{lem} {\beta}egin{proof} This is clear when $D$ is big by (\ref{equ:vn}). Let now $D$ be pseudoeffective and pick an ample divisor $A$ on $X$. For every ${\varepsilon}>0$ $D+{\varepsilon} A$ is big thus we have $$ \mathop{v_{\rm num}}\nolimits(D+{\varepsilon} A)=\mathop{v_{\rm num}}\nolimits(\pi^*D+{\varepsilon}\pi^*A){\lambda}e\mathop{v_{\rm num}}\nolimits(\pi^*D) $$ by subadditivity of $\mathop{v_{\rm num}}\nolimits$ since $\mathop{v_{\rm num}}\nolimits({\varepsilon}\pi^*A)=0$, $\pi^*A$ being nef. On the other the lower semicontinuity of $\mathop{v_{\rm num}}\nolimits$ on $\text{Psef}(X')$ implies that $$ \mathop{v_{\rm num}}\nolimits(\pi^*D){\lambda}e{\lambda}iminf_{{\varepsilon}\to 0}\mathop{v_{\rm num}}\nolimits(\pi^*D+{\varepsilon}\pi^*A) $$ and the result follows. {\varepsilon}nd{proof} {\beta}egin{defn}{\lambda}ambdabel{defn:nonnef} Let $D$ be an ${{\mu}athbb R}$-divisor on $X$. The {\varepsilon}mph{non-nef locus}~\cite{B} (or {\varepsilon}mph{numerical base locus}~\cite{Naka}) of $D$ is defined by $${\nu}n(D):={\beta}igcup\{c_X(v),\,\mathop{v_{\rm num}}\nolimits(D)>0\},$$ where $c_X(v)$ denotes the center on $X$ of a given divisorial valuation $v$, when $D$ is pseudoeffective. When $D$ is not pseudoeffective one sets ${\nu}n(D)=X$. {\varepsilon}nd{defn} The non-nef locus is always contained in the restricted base locus: {\beta}egin{lem} For every ${{\mu}athbb R}$-divisor $D$ we have $$ {\nu}n(D)\subset{{\mu}athbb B}_-(D). $$ {\varepsilon}nd{lem} {\beta}egin{proof} If $D$ is not pseudoeffective then ${{\mu}athbb B}_-(D)=X$ by \cite{ELMNP1}. We may thus assume that $D$ is pseudoeffective. Let $x{\nu}oindenttin{{\mu}athbb B}_-(D)$. Given an ample divisor $A$ we have $x{\nu}oindenttin{{\mu}athbb B}(D+{\varepsilon} A)$ for each ${\varepsilon}>0$, thus there exists an effective ${{\mu}athbb R}$-divisor $E_{\varepsilon}\sim_{{\mu}athbb R} D+{\varepsilon} A$ such that $x{\nu}oindenttin{\mu}athop{\rm Supp}{\nu}olimits E_{\varepsilon}$, and we infer that $$ \mathop{v_{\rm num}}\nolimits(D+{\varepsilon} A){\lambda}e\mathop{v_{\rm num}}\nolimits(E_{\varepsilon})=0 $$ for each divisorial valuation $v$ such that $x{\infty}n c_X(v)$. Letting ${\varepsilon}\to 0$ yields $\mathop{v_{\rm num}}\nolimits(D)=0$ for such divisorial valuations, and we conclude that $x{\nu}oindenttin{\nu}n(D)$ as desired. {\varepsilon}nd{proof} When $X$ is {\varepsilon}mph{smooth} it was shown in~\cite{ELMNP1} Proposition 2.8, using Nadel's vanishing theorem, that equality holds, i.e. $$ {\nu}n(D)={{\mu}athbb B}_-(D) $$ for every pseudoeffective ${{\mu}athbb R}$-divisor $D$. This shows in particular that ${\nu}n(D)$ is an at most countable union of Zariski closed subsets of $X$. This property holds as well when $X$ is an arbitrary normal variety since choosing a resolution of singularities $\pi:X'\to X$ yields $${\nu}n(D)=\pi({\nu}n(\pi^*D))$$ by Lemma~\ref{lem:bir}. On the other hand one may wonder whether the equality ${\nu}n={{\mu}athbb B}_-$ holds more generally on all normal projective varieties $X$. This is easily seen to be equivalent to the following: {\beta}egin{con} Let $L$ be a big line bundle on a normal projective variety $X$. Let $x{\infty}n X$ be such that for each divisorial valuation ${\nu}u$ centered at $x$ there exists an infinite sequence $\sigma_k{\infty}n H^0(kL)$ such that ${\nu}u(\sigma_k)=o(k)$. Then there exists an ample divisor $A$ and an infinite sequence $\tau_k{\infty}n H^0(kL+A)$ such that $\tau_k(x){\nu}eq 0$. {\varepsilon}nd{con} Using~\cite{BCHM} we prove: {\beta}egin{prop}{\lambda}ambdabel{prop:baseloci} Let $(X,{\Delta})$ be a klt pair. Then we have ${\nu}n(K_X+{\Delta})={{\mu}athbb B}_-(K_X+{\Delta})$, which furthermore coincides with ${{\mu}athbb B}(K_X+{\Delta})$ when $K_X+{\Delta}$ is big. {\varepsilon}nd{prop} {\beta}egin{proof} We may assume that $K_X+{\Delta}$ is pseudoeffective, since the result is clear otherwise. Given an irreducible component $V$ of ${{\mu}athbb B}_-(D)$ there exists an ample ${{\mu}athbb R}$-divisor $A$ such that $V$ is a component of ${{\mu}athbb B}(D+2A)\subset{{\mu}athbb B}_-(D+A)$, and upon changing $A$ in its ${{\mu}athbb R}$-linear equivalence class we may assume that $(X,{\Delta}+A)$ is klt. We thus see that we may assume that $K_X+{\Delta}$ is big to begin with, and it is then enough to show that ${\nu}n(K_X+{\Delta})={{\mu}athbb B}(K_X+{\Delta})$ since the latter contains ${{\mu}athbb B}_-(K_X+{\Delta})$. By~\cite{BCHM} $K_X+{\Delta}$ admits an ample model, which means that there exist birational morphisms $\pi:Y\to X$ and $\pi':Y\to X'$ such that $$\pi^*(K_X+{\Delta})=\pi'^*H+F$$ where $H$ is ample on $X'$ and $E$ is effective and $\pi'$-exceptional, and $Y$ may be assumed to be smooth. By the "negativity lemma" (\cite{KM}, Lemma 3.39) every effective ${{\mu}athbb R}$-divisor $E$ on $Y$ such that $E{\varepsilon}quiv\pi'^*H+F$ satisfies $E\geqslant F$, and it easily follows that $${\nu}u_{\text{num}}(K_X+{\Delta})={\nu}u_{\text{num}}(\pi^*(K_X+{\Delta}))={\nu}u(F)$$ for every divisorial valuation ${\nu}u$, so that $$ {\nu}n(K_X+{\Delta})=\pi({\mu}athop{\rm Supp}{\nu}olimits F). $$ On the other hand we have $$ {{\mu}athbb B}(K_X+{\Delta})=\pi({{\mu}athbb B}(\pi^*(K_X+{\Delta}))=\pi({\mu}athop{\rm Supp}{\nu}olimits F) $$ and the result follows. {\varepsilon}nd{proof} \section{Proof of Theorem A} Let $X$ be a normal projective and let ${\Delta}$ be an effective ${{\mu}athbb R}$-Weil divisor on $X$ such that $(X,{\Delta})$ is klt. If $K_X+{\Delta}$ is not pseudoeffective, then by \cite{BDPP} $X$ is uniruled : in fact, considering a log-resolution $f : Y \rightarrow X$ of $(X,{\Delta})$, and an effective divisor ${\Gamma}$ such that $$K_Y + {\Gamma} = f^*(K_X +{\Delta}) +E,$$ with $E$ $f$-exceptional, we have that $K_Y + {\Gamma}$ is not pseudoeffective, since $E$ is $f$-exceptional and $f^*(K_X +{\Delta})$ not pseudoeffective. As ${\Gamma}$ is effective, $K_Y$ is not pseudoeffective either, thus $Y$ is uniruled and $X={\nu}n(K_X+{\Delta})={{\mu}athbb B}_-(K_X+{\Delta})$ too. Now assume that $K_X+{\Delta}$ is pseudoeffective and let $V$ be an irreducible component of ${{\mu}athbb B}_-(K_X+{\Delta})$. By \cite{ELMNP1} we have $$ {{\mu}athbb B}_-(K_X+{\Delta})={\beta}igcup\{{{\mu}athbb B}_+(K_X+{\Delta}+A),\, A \text{ ample}\} $$ thus there exists an ample ${{\mu}athbb R}$-divisor $A$ such that $V$ is a component of ${{\mu}athbb B}_+(K_X+{\Delta}+A)$. Since $A$ is ample we may furthermore assume that $(X,{\Delta}+A)$ is klt. Together with Proposition \ref{prop:baseloci} this reduces us to the following situation: assume that $(X,{\Delta})$ is klt, $K_X+{\Delta}$ is big and let $V$ be an irreducible component of ${{\mu}athbb B}_+(K_X+{\Delta})$. We are then to show that $V$ is uniruled. Consider a commutative diagram of birational maps {\beta}egin{equation}{\lambda}ambdabel{equ:diag} \xymatrix{ X {\alpha}r[dr]_{\pi} {\alpha}r@{-->}[rr]^{\psi}& & X'{\alpha}r[dl]^{\pi'}\\ & Z. & \\ } {\varepsilon}nd{equation} with $-(K_X+{\Delta})$ $\pi$-ample, and either $\pi$ is a divisorial contraction and $\pi'$ is the identity, or $\pi$ is a small contraction and $\pi'$ is its flip. Since $-(K_X+{\Delta})$ is $\pi$-ample we have ${\varepsilon}xc(\pi)\subset{{\mu}athbb B}_+(K_X+{\Delta})$. If $V$ is contained in ${\varepsilon}xc(\pi)$ it must therefore be one of its irreducible components, and it follows that $V$ is uniruled by \cite{Kaw}. Otherwise we may consider its strict transform $V'$ on $X'$, since $\psi$ is in both cases an isomorphism away from ${\varepsilon}xc(\pi)$. If we denote by ${\Delta}'$ the strict transform of ${\Delta}$ on $X'$ then $(X',{\Delta}')$ is klt and $K_{X'}+{\Delta}'$ is big. We claim that $V'$ is a component of ${{\mu}athbb B}_+(K_{X'}+{\Delta}')$. Indeed consider a resolution of the indeterminancies of $\psi$ {\beta}egin{equation}{\lambda}ambdabel{eq:bigdiag} \xymatrix{ &Y{\alpha}r[dl]_{{\mu}u} {\alpha}r[dr]^{{\mu}u'}\\ X {\alpha}r@{-->}[rr]^{\psi}& & X' } {\varepsilon}nd{equation} which may be chosen such that ${\mu}u$ (resp. ${\mu}u'$) is an isomorphism above the generic point of $V$ (resp. $V'$). We have $$ {\mu}u^*(K_X+{\Delta})={\mu}u'^*(K_{X'}+{\Delta}')+F, $$ where $F$ is ${\mu}u'$-exceptional and $-F$ is nef over $X'$ (since it is nef over $Z$), thus $F\geqslant 0$ by the Negativity Lemma. The claim now follows by Proposition \ref{prop:biratB+}. By \cite{BCHM} there exists a finite composition of maps $\psi$ as in (\ref{equ:diag}) such that $K_{X'}+{\Delta}'$ is nef at the final stage, and by what we have just shown either the strict transform of $V$ is contained at in ${\varepsilon}xc(\pi)$ at some stage, in which case it is uniruled, or the strict transform $V'$ on the final $X'$ is a component of ${{\mu}athbb B}_+(K_{X'}+{\Delta}')$. By the base point free theorem there exists a further birational morphism $\rho:X'\to W$ such that $K_{X'}+{\Delta}'=\rho^*A$ with $A$ ample on $W$, and Proposition \ref{prop:biratB+} shows that ${{\mu}athbb B}_+(K_{X'}+{\Delta}')={\varepsilon}xc(\rho)$, so that $V'$ is a component of ${\varepsilon}xc(\rho)$. We then conclude that $V'$ is uniruled as desired by a final application of \cite{Kaw}. \section{Proof of Theorem B} In this section we first explain how to infer Theorem B from Theorem A, and then give a direct proof following Takayama's approach and thus avoiding~\cite{BCHM}. \subsection{Theorem A implies Theorem B} We are actually going to show that Theorem A implies Theorem B when $L$ is an ${{\mu}athbb R}$-divisor. As in the proof of Theorem A, we then have the flexibility to assume that $L$ is big upon adding to it a small multiple of an ample divisor. Assume first that $-K_X$ is nef. We then have $${\varepsilon} L=K_X+({\varepsilon} L-K_X)$$ and ${\varepsilon} L-K_X$ is numerically equivalent to a klt divisor ${\Delta}$ for ${\varepsilon}>0$ small enough. Indeed we can write $L{\varepsilon}quiv A+E$ where $A$ is ample and $E$ is effective, hence $${\varepsilon} L-K_X{\varepsilon}quiv{\varepsilon} E+{\varepsilon} A-K_X$$ where ${\varepsilon} A-K_X$ is ample and ${\varepsilon} E$ is klt for ${\varepsilon}$ small enough. Since both ${{\mu}athbb B}_-(L)$ and ${{\mu}athbb B}_+(L)$ are invariant under scaling $L$ we thus get the result by Theorem A applied to $(X,{\Delta})$. Now assume instead that $L-K_X=:N$ is nef. We can then write $$\frac{1}{1-{\varepsilon}}L=K_X+N+\frac{{\varepsilon}}{1-{\varepsilon}} L$$ and $N+\frac{e}{1-{\varepsilon}}L$ is numerically equivalent to a klt divisor ${\Delta}$ for ${\varepsilon}>0$ small enough just as before, and Theorem A again implies the desired result after scaling $L$. \subsection{A (more) direct proof of Theorem B} Takayama's key idea is that the proof of his extension result \cite[Theorem 4.5]{taka} may be used in combination with \cite{MM} and \cite{BDPP} to obtain the following criteria for uniruledness. {\beta}egin{thm}[Takayama, \cite{taka2}, Corollary 3.3]{\lambda}ambdabel{thm:takauniruled} Let $X$ be a smooth projective variety and $V\subset X$ be an irreducible subvariety. Let $D$ be a line bundle on $X$. Assume there exists a decomposition $D{\varepsilon}quiv A+E$, where $A$ is an ample ${{\mu}athbb Q}$-divisor and $E$ is an effective ${{\mu}athbb Q}$-divisor which is a maximal lc center for the pair $(X,E)$. {\beta}egin{enumerate} {\infty}tem[(a)]{\lambda}ambdabel{item:uni1} If $V$ is {\varepsilon}mph{contained} in the stable base locus ${{\mu}athbb B}(K_X+D)$, then $V$ is uniruled. {\infty}tem[(b)]{\lambda}ambdabel{item:uni2} If $K_X+D$ is big and $V$ is a {\varepsilon}mph{component} of the non-ample locus ${{\mu}athbb B}_+(K_X+D)$, then $V$ is uniruled. {\varepsilon}nd{enumerate} {\varepsilon}nd{thm} Recall that a maximal log-canonical (lc for short) center of $(X,E)$ is a subvariety along which the generic log-canonical threshold of $E$ is equal to $1$ (cf.~\cite{Laz}) and which is maximal for that property. We now consider the situation of Theorem B. Let thus $L$ be a line bundle and assume that either $-K_X$ or $L-K_X$ is nef. We begin with $(ii)$ of Theorem B. We thus assume that $L$ is big and let $V$ be an irreducible component of either ${{\mu}athbb B}(L)$ or ${{\mu}athbb B}_+(L)$ that is not contained in ${{\mu}athbb B}_-(L)$. We try to apply Theorem~\ref{thm:takauniruled}. The desired Kodaira-type decomposition will be obtained thanks to the following result. {\beta}egin{lem}[\cite{taka2}, Proposition 4.3]{\lambda}ambdabel{lem:decomp} Let $X$ be a smooth projective variety and $D$ a big ${{\mu}athbb Q}$-divisor on $X$. Assume that $V\subset X$ is an irreducible component of either ${{\mu}athbb B}(D)$ or ${{\mu}athbb B}_+(D)$. Then there exists a rational number ${\alpha}lpha>0$ and a decomposition ${\alpha}lpha D{\varepsilon}quiv A+E$ with $A$ ample ${{\mu}athbb Q}$-divisor and $E$ effective ${{\mu}athbb Q}$-divisor on $X$ such that $V$ is a maximal lc center for $(X,E)$. {\varepsilon}nd{lem} We can now extend \cite[Propositions 5.1 and 5.2]{taka2} as follows. {\beta}egin{prop}{\lambda}ambdabel{prop1} Let $X$ be a smooth projective variety and let $L$ be a big line bundle on $X$. Let $V$ be an irreducible component of either ${{\mu}athbb B}(L)$ or ${{\mu}athbb B}_+(L)$ which is not contained in ${{\mu}athbb B}_-(L)$. Then $V$ is uniruled if either $-K_X$ or $L-K_X$ is nef. {\varepsilon}nd{prop} {\beta}egin{proof} By Lemma \ref{lem:decomp} there exists a rational number ${\alpha}lpha>0$ and a decomposition $${\alpha}lpha L{\varepsilon}quiv A+E$$ with $A$ an ample ${{\mu}athbb Q}$-divisor and $E$ an effective ${{\mu}athbb Q}$-divisor such that $V$ is a maximal lc center for $(X,E)$. Suppose first that $V$ is a component of ${{\mu}athbb B}(L)$. In case $-K_X$ is nef we write $$D:=mL-K_X={\lambda}eft((m-{\alpha})L+\frac 1 2 A\right)+{\lambda}eft(\frac 1 2 A-K_X\right)+E$$ and the result follows from Theorem~\ref{thm:takauniruled}, item (a), applied to $D$ since $\frac 1 2 A-K_X$ is ample and $V$ is not contained in ${{\mu}athbb B}((m-{\alpha})L+\frac 1 2 A)$ for $m\gg 1$ since it is not contained in ${{\mu}athbb B}_-(L)$ by assumption. In case $L-K_X$ is nef we write $$D:=(m+1)L-K_X+={\lambda}eft((m-{\alpha})L+\frac 1 2 A\right)+(\frac 1 2 A+L-K_X)+E$$ and we conclude similarly since $\frac 1 2 A +L-K_X$ is ample and $V$ is not contained in ${{\mu}athbb B}((m-{\alpha})L+\frac 1 2 A)$ for $m\gg 1$. Assume now that $V$ is a component of ${{\mu}athbb B}_+(L)$ not contained in ${{\mu}athbb B}(L)$. In case $-K_X$ is nef we write $$D:=mL-K_X=(m-{\alpha})L+(A-K_X)+E$$ and in case $L-K_X$ $$D:=(m+1)L-K_X=(m-{\alpha})L+(A+L-K_X)+E$$ and conclude as above by applying Theorem~\ref{thm:takauniruled}, item (b), to $D$. {\varepsilon}nd{proof} Proposition~\ref{prop1} already proves $(ii)$ of Theorem B in case the component $V$ of either ${{\mu}athbb B}(L)$ or ${{\mu}athbb B}_+(L)$ is not contained in ${{\mu}athbb B}_-(L)$. We now focus on the case where $V$ is a component of ${{\mu}athbb B}_-(L)$. It is then as before a component of ${{\mu}athbb B}(L+{\varepsilon} A)$ if $A$ is ample and ${\varepsilon}>0$ is small enough, but this does not directly reduce case $(i)$ to case $(ii)$ since $L+{\varepsilon} A$ is not a line bundle anymore, and one thus has to exercise a little more care in the reduction trick. We will argue as in \cite[Proof of Proposition 6.1, (2)]{taka2}. Let thus $$t_0:= {\infty}nf\{t{\infty}n{{\mu}athbb Q}, V\subset{{\mu}athbb B}(tL+A)\}.$$ Note that $t_0>0$ since $tL+A$ is ample for $0<t{\lambda}l 1$. On the other hand we also have $t_0<{\infty}nfty,$ by \cite[Lemma 2.5, item (1)]{taka2}. {\beta}egin{lem}{\lambda}ambdabel{claim} There exist two positive integers $m$ and $n$ such that {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy0} \frac{m+1}{n}>t_0 {\varepsilon}nd{eqnarray} and such that {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy1} mL+nA\sim_{{\mu}athbb Q} A_1+E_1 {\varepsilon}nd{eqnarray} and {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy2} (m+1)L+nA\sim_{{\mu}athbb Q} A_2+E_2 {\varepsilon}nd{eqnarray} where, for each $i=1,2$, the divisor $A_i$ is an ample ${{\mu}athbb Q}$-divisor and $E_i$ is an effective ${{\mu}athbb Q}$-divisor such that $V$ is a maximal lc center for $(X,E_i)$. {\varepsilon}nd{lem} Assume this result for the moment. By the definition of $t_0$, condition (\ref{eq:xy0}) guarantees that $V\subset{{\mu}athbb B}((m+1)L+nA)$. If $-K_X$ is nef, we write $$D:=(m+1)L+nA-K_X{\beta}uildrel{(\ref{eq:xy1})\ \ }\over{\sim_{{\mu}athbb Q}}(A_2-K_X)+E_2$$ and we conclude by Theorem~\ref{thm:takauniruled}, item (i), since $A_2-K_X$ is ample. If $L-K_X$ is nef we write $$D:=(m+1)L+nA-K_X{\beta}uildrel{(\ref{eq:xy2})\ \ }\over{\sim_{{\mu}athbb Q}}(A_1+L-K_X)+E_1$$ and we conclude as before since $A_1+L-K_X$ is nef. {\beta}egin{proof}[Proof of Lemma~\ref{claim}] Choose an integer $m_1>t_0+1$. Since $V$ is an irreducible component of ${{\mu}athbb B}(m_1L+A)$ we can apply Lemma~\ref{lem:decomp} and write {\beta}egin{equation}{\lambda}ambdabel{equ:D}{\alpha}(m_1L+A)=H+F {\varepsilon}nd{equation} where ${\alpha}lpha$ is a positive rational number, $H$ is ample and $V$ is a maximal lc center for $(X,F)$. Now choose two positive integers $m$ and $n$ such that {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy3} m>{\mu}ax\{{\alpha}lpha m_1, t_0\}, {\varepsilon}nd{eqnarray} {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy4} n> {\mu}ax\{{\alpha}lpha , 1\} {\varepsilon}nd{eqnarray} and {\beta}egin{eqnarray}{\lambda}ambdabel{eq:xy5} -1<m-nt_0<{\alpha}. {\varepsilon}nd{eqnarray} The existence of such $m$ and $n$ may be seen as follows: if $t_0{\infty}n{{\mu}athbb Q}$, then take $m$ and $n$ two sufficiently divisible integers such that $t_0=m/n$. If $t_0{\nu}oindentt{\infty}n{{\mu}athbb Q}$, then the existence follows from elementary diophantine approximation. Since $m_1>t_0+1$, by conditions (\ref{eq:xy3}) and (\ref{eq:xy5}), {{\infty}t for all} such integers $m,n$ we have that {\beta}egin{eqnarray}{\lambda}ambdabel{eq:<t_0} \frac{m-{\alpha} m_1}{n-{\alpha}}<t_0. {\varepsilon}nd{eqnarray} Now notice that, for every $\varepsilonilon >0$, among the integers satisfying (\ref{eq:xy3}) and (\ref{eq:xy5}) we can choose $n$ big enough such that $$ \frac{1}{n-{\alpha}}<\varepsilonilon. $$ In conclusion there exist $m$ and $n$ satisfying (\ref{eq:xy3}), (\ref{eq:xy4}) and (\ref{eq:xy5}) and such that we also have {\beta}egin{eqnarray}{\lambda}ambdabel{eq:<t_0bis} \frac{m-{\alpha} m_1}{n-{\alpha}}<\frac{(m+1)-{\alpha} m_1}{y-{\alpha}}= \frac{m-{\alpha} m_1}{n-{\alpha}}+\frac{1}{n-{\alpha}}<t_0. {\varepsilon}nd{eqnarray} From (\ref{eq:<t_0bis}) and the definition of $t_0$ one deduces (as in \cite[Lemma 2.5, item (2)]{taka2}) the existence of the following decompositions {\beta}egin{eqnarray}{\lambda}ambdabel{eq:dec1} (m-{\alpha} m_1)L+(n-{\alpha})A\sim_{{\mu}athbb Q} A'_1+ E'_1 {\varepsilon}nd{eqnarray} and {\beta}egin{eqnarray}{\lambda}ambdabel{eq:dec2} (m+1-{\alpha} m_1)L+(n-{\alpha})H\sim_{{\mu}athbb Q} A'_2+ E'_2 {\varepsilon}nd{eqnarray} where, for each $i=1,2$, the divisor $A_i$ is an ample ${{\mu}athbb Q}$-divisor and $E'_i$ is an effective ${{\mu}athbb Q}$-divisor such that {\beta}egin{eqnarray}{\lambda}ambdabel{eq:star} V{\nu}oindentt\subset {\mu}athop{\rm Supp}{\nu}olimits(E'_i). {\varepsilon}nd{eqnarray} To conclude the proof of the Lemma, notice that thanks to (\ref{eq:dec1}) and (\ref{eq:dec2}), we can write {\beta}egin{eqnarray*} mL+nA={\alpha}(m_1L+A)+ {\beta}ig( (m-{\alpha} m_1)L +(n-{\alpha})A {\beta}ig) {\varepsilon}quiv A'_1+H + (F+E'_1) {\varepsilon}nd{eqnarray*} and {\beta}egin{eqnarray*} (m+1)L+nA={\alpha}(m_1L+A)+ {\beta}ig( (m+1-{\alpha} m_1)L +(n-{\alpha})A {\beta}ig) {\varepsilon}quiv A'_2+H + (F+E'_2). {\varepsilon}nd{eqnarray*} The proof is now concluded by setting $A_i:=A_i'+H$ and $E_i:=F+E_i'$ for $i=1,2$. {\varepsilon}nd{proof} {\beta}egin{thebibliography}{ELMNP1} {\beta}ibitem[BCHM]{BCHM} C.~Birkar, P.~Cascini, C.~Hacon, J.~McKernan: Existence of minimal models for varieties of log general type, {{\infty}t J. Amer. Math. Soc.} {{\beta}f 23} (2010), 405-468. {\beta}ibitem[Bou]{B} S. Boucksom, Divisorial Zariski decompositions on compact complex manifolds, {{\infty}t Ann. Sci. \'Ecole Norm. Sup. (4)} {{\beta}f 37} (2004), 45--76. {\beta}ibitem[BDPP]{BDPP} S. Boucksom, J.-P. Demailly, M. P\u aun, Th. Peternell, The pseudoeffective cone of a compact K\"ahler manifold and varieties of negative Kodaira dimension, preprint {\tt math.AG/0405285}. {\beta}ibitem[Corti+]{corti} A. Corti et al., {{\infty}t Flips for 3-folds and 4-folds.} Edited by Alessio Corti. Oxford Lecture Series in Mathematics and its Applications, {{\beta}f 35}. Oxford University Press, Oxford, 2007. {\beta}ibitem[Cut]{Cut} S.~D.~Cutkosky, Zariski decomposition of divisors on algebraic varieties, {{\infty}t Duke Math. J.} {{\beta}f 53} (1986), no. 1, 149--156. {\beta}ibitem[ELMNP1]{ELMNP1} L. Ein, R. Lazarsfeld, M. Musta\c{t}\u{a}, M. Nakayame, M. Popa, Asymptotic invariants of base loci, {{\infty}t Ann. Inst. Fourier.} {{\beta}f56} (2006), 1701--1734. {\beta}ibitem[ELMNP2]{ELMNP2} L. Ein, R. Lazarsfeld, M. Musta\c{t}\u{a}, M. Nakayame, M. Popa, Restricted volumes and asymptotic intersection theory, {{\infty}t Amer. J. Math.} {{\beta}f 131} (2009), no. 3, 607--651. {\beta}ibitem[Kawa]{Kaw} Y.~Kawamata, On the length of an extremal rational curve, {{\infty}t Invent. Math. } {{\beta}f105} (1991), no. 3, 609--611. {\beta}ibitem[KM]{KM} J.~Koll{\'a}r, S.~Mori, {{\varepsilon}m Birational geometry of algebraic varieties,} Cambridge Tracts in Mathematics, {{\beta}f 134}, Cambridge University Press, Cambridge, 1998. {\beta}ibitem[Laz]{Laz} R. Lazarsfeld, {{\varepsilon}m Positivity in algebraic geometry I,II} Ergebnisse der Mathematik und ihrer Grenzgebiete {{\beta}f 48} and {{\beta}f 49}, Springer-Verlag, Heidelberg, 2004. {\beta}ibitem[MM]{MM} Y. Miyaoka, S. Mori, A numerical criterion for uniruledness, {{\varepsilon}m Ann. of Math.} {{\beta}f 124} (1986), 65--69. {\beta}ibitem[Nak]{Nak} M. Nakamaye, Stable base loci of linear series, {{\varepsilon}m Math. Ann.} {{\beta}f 318} (2000), 837--847. {\beta}ibitem[Naka]{Naka} N. Nakayama, {{\infty}t Zariski-decomposition and abundance}, MSJ Memoirs, {{\beta}f 14}, Math. Soc. Japan, 2004. {\beta}ibitem[Taka1]{taka} S. Takayama, Pluricanonical systems on algebraic varieties of general type, {{\varepsilon}m Invent. Math.} {{\beta}f 165} (2006), 551--587. {\beta}ibitem[Taka2]{taka2} S. Takayama, On the uniruledness of stable base loci, {{\varepsilon}m J. Diff. Geom.} {{\beta}f 78} (2008), 521--541. {\varepsilon}nd{thebibliography} \vskip 30pt {\nu}oindentindent {\small S\'ebastien Boucksom\\ CNRS--Universit\'e Paris 7\\ Institut de Math\'ematiques\\ F-75251 Paris Cedex, France\\ E-mail : {\tt [email protected]}} \vskip 1em {\nu}oindentindent {\small Ama\"el Broustet\\ Universit\'e Lille 1\\ UMR CNRS 8524\\ UFR de math\'ematiques\\ 59 655 Villeneuve d'Ascq Cedex, France\\ E-mail : {\tt [email protected]} } \vskip 1em {\nu}oindentindent {\small Gianluca Pacienza\\ Institut de Recherche Math\'ematique Avanc\'ee\\ Universit\'e de Strasbourg et CNRS\\ 7, Rue R. Descartes - 67084 Strasbourg Cedex, France \\ E-mail : {\tt [email protected]}} {\varepsilon}nd{document}
\begin{document} \title{DECOHERENCE, EINSELECTION, AND THE EXISTENTIAL INTERPRETATION \ (The Rough Guide)} The roles of decoherence and environment-induced superselection in the emergence of the classical from the quantum substrate are described. The stability of correlations between the einselected quantum pointer states and the environment allows them to exist almost as objectively as classical states were once thought to exist: There are ways of finding out what is the pointer state of the system which utilize redundancy of their correlations with the environment, and which leave einselected states essentially unperturbed. This {\it relatively objective existence\/} of certain quantum states facilitates operational definition of probabilities in the quantum setting. Moreover, once there are states that `exist' and can be `found out', a `collapse' in the traditional sense is no longer necessary --- in effect, it has already happened. The records of the observer will contain evidence of an effective collapse. The role of the preferred states in the processing and storage of information is emphasized. The {\it existential interpretation} based on the relatively objective {\it existence\/} of stable correlations between the einselected states of observers memory and in the outside Universe is formulated and discussed. \section{INTRODUCTION} The aim of the program of decoherence and einselection (environment-induced superselection) is to describe consequences of the ``openness'' of quantum systems to their environments and to study emergence of the effective classicality of some of the quantum states and of the associated observables. The purpose of this paper is to assess the degree to which this program has been successful in facilitating the interpretation of quantum theory and to point out open issues and problems. Much work in recent years has been devoted to the clarification and extension of the elements of the physics of decoherence and especially to the connections between measurements and environment-induced superselection.$^{1-10}$ This has included studies of emergence of preferred states in various settings through the implementation of predictability sieve,$^{11-14}$ refinements of master equations and analysis of their solutions,$^{15-17}$ and study of related ideas (such as consistent histories,$^{18-20}$ quantum trajectories, and quantum state diffusion$^{21-23}$). A useful counterpoint to these advances was provided by various applications, including quantum chaos,$^{24-26}$ einselection in the context of field theories and in Bose-Einstein condensates,$^{27,16}$ and, especially, by the interplay of the original information-theoretic aspects$^{1,2,28}$ of the environment-induced superselection approach with the recent explosion of research on quantum computation$^{30-35}$ and related subjects$^{34-38}$. Last not least, first controlled experiment aimed at investigating decoherence is now in place, carried out by Brune, Haroche, Raimond and their collaborators$^{39}$ and additional experiments may soon follow as a result of theoretical$^{40-43}$ and experimental$^{44}$ developments. In nearly all of the recent advances the emphasis was on specific issues which could be addressed by detailed solutions of specific models. This attention to detail was necessary but may lead to impression that practitioners of decoherence and einselection have lost sight of their original motivation --- the interpretation of quantum theory. My aim here is to sketch ``the big picture'', to relate the recent progress on specific issues to the overall goals of the program. I shall therefore attempt to capture ``the whole'' (or at least large parts of it), but in broad brush strokes. Special attention will be paid to issues such as the implications of decoherence for the origin of quantum probabilities and to the role of information processing in the emergence of `objective existence' which significantly reduces and perhaps even eliminates the role of the ``collapse'' of the state vector. In short, we shall describe how decoherence converts quantum entanglement into classical correlations and how these correlations can be used by the observer for the purpose of prediction. What will matter is then encoded in the {\it relations\/} between states (such as a state of the observer's memory and of the quantum systems). Stability of similar {\it co-relations\/} with the environment allows observers to find out unknown quantum states without disturbing them. Recognition of this {\it relatively objective existence\/} of einselected quantum states and investigation of the consequences of this phenomenon are the principal goals of this paper. Relatively objective existence allows for the {\it existential interpretation\/} of quantum theory. Reduction of the wavepacket as well as the ``collapse'' emerge as a consequence of the assumption that the effectively classical states, including the states of the observer's memory must exist over periods long compared to decoherence time if they are to be useful as repositories of information. It will be emphasized that while significant progress has been made since the environment-induced superselection program was first formulated$^{1-4}$, much more remains to be done on several fronts which all have implications for the overarching question of interpretation. We can mention two such open issues right away: Both the formulation of the measurement problem and its resolution through the appeal to decoherence require a Universe split into systems. Yet, it is far from clear how one can define systems given an overall Hilbert space ``of everything'' and the total Hamiltonian. Moreover, while the paramount role of information has been recognised, I do not belive that it has been, as yet, sufficiently thoroughly understood. Thus, while what follows is perhaps the most complete discussion of the interpretation implied by decoherence, it is still only a report of partial progress. \section{OVERVIEW OF THE PROBLEM} When special relativity was discovered some twenty years before quantum mechanics in its modern guise was formulated, it has in a sense provided a model of what a new theory should be. As a replacement of Newtonian kinematics and dynamics, it was a seamless extension. In the limit of the infinite speed of light, $c \to \infty$, equations and concepts of the old theory were smoothly recovered. When Bohr,$^{45}$ Heisenberg,$^{46}$ Born,$^{47}$ and Schr\"odinger$^{48}$ struggled to understand the implications of quantum theory,$^{49}$ one can sense that they had initially expected a similar seamless extension of classical physics. Indeed, in specific cases --- i.e., Bohr's correspondence, Heisenberg's uncertainty, Ehrenfest's theorem --- such hopes were fulfilled in the appropriate limits (i. e., large quantum numbers, $\hbar \to 0$, etc.). However, Schr\"odinger's wave packets did not travel along classical trajectories (except in the special case of the harmonic oscillator). Instead, they developed into delocalized, nonclassical superpositions. And the tempting $\hbar \to 0$ limit did not allow for the recovery of classical locality --- it did not even exist, as the typical expression appearing in wavefunctions such as $\exp (ixp/\hbar)$ is not even analytic as $\hbar \to 0$. The culprit which made it impossible to recover classicality as a limiting case of quantum theory was at the very foundation of the quantum edifice: It was the quantum principle of superposition. It guarantees that any superposition of states is a legal quantum state. This introduced a whole Hilbert space ${\cal H}$ of possibilities while only a small fraction of states in ${\cal H}$ can be associated with the classically allowed states, and superpositions of such states are typically flagrantly nonclassical. Moreover, the number of possible nonclassical states in the Hilbert space increases exponentially with its dimensionality, while the number of classical states increases only linearly. This divergence (which is perhaps the key of the ingredients responsible for the exponential speedup of quantum computations$^{30-35}$) is a measure of the problem. Moreover, it can be argued that it is actually exacerbated in the $\hbar \to 0$ limit, as the dimensionality of the Hilbert space (of say, a particle in a confined phase space) increases with $1/\hbar$ to some power. The first resolution (championed by Bohr$^{45}$) was to outlaw ``by fiat'' the use of quantum theory for the objects which were classical. This Copenhagen Interpretation (CI) had several flaws: It would have forced quantum theory to depend on classical physics for its very existence. It would have also meant that neither quantum nor classical theory were universal. Moreover, the boundary between them was never clearly delineated (and, according to Bohr, had to be ``movable'' depending on the whims of the observer). Last not least, with the collapse looming on the quantum-classical border, there was little chance for a seemless extension. By contrast, Everett's ``Many Worlds'' Interpretation$^{50}$ (MWI) refused to draw a quantum-classical boundary. Superposition principle was the ``law of the land'' for the Universe as a whole. Branching wave functions described alternatives, all of which were realized in the deterministic evolution of the universal state vector. The advantage of the original Everett's vision was to reinstate quantum theory as a key tool in search for its own interpretation. The disadvantages (which were realized only some years later, after the original proposal became known more widely) include (i)~the ambiguity of what constitutes the ``branches'' (i.e., specification which of the states in the Hilbert spaces containing all of the conceivable superpositions are classically ``legal'') and (ii)~re-emergence of the questions about the origin of probabilities (i.e., the derivation of the Born's formula). Moreover, (iii)~it was never clear how to reconcile unique experiences of observers with the multitude of alternatives present in the MWI wave function. \section{DECOHERENCE AND EINSELECTION} Decoherence is a process of continuous measurement-like interaction between the system and an (external or internal) environment. Its effect is to invalidate the superposition principle in the Hilbert space of an open system. It leads to very different stability properties for various pure states. Interaction with the environment destroys the vast majority of the superpositions quickly, and --- in the case of macroscopic objects --- almost instantaneously. This leads of negative selection which in effect bars most of the states and results in singling out of a preferred stable subset of the einselected pointer states. Correlations are both the cause of decoherence and the criterion used to evaluate the stability of the states. Environment correlates (or, rather, becomes entangled) with the observables of the system while ``monitoring'' them. Moreover, stability of the correlations between the states of the system monitored by their environment and of some other ``recording'' system (i.e., an apparatus or a memory of an observer) is a criterion of the ``reality'' of these states. Hence, we shall often talk about {\it relatively objective existence\/} of states to emphasize that they are really defined only through their correlations with the state of the other systems, as well as to remind the reader that these states will never be quite as ``rock solid'' as classical states of a stone or a planet were (once) thought to be. Transfer of a single bit of information is a single ``unit of correlation,'' whether in communication, decoherence, or in measurement.\footnote{It is no accident that the setups used in modern treatments of quantum communication channels$^{34-36}$ bear an eerie resemblance to the by now ``old hat'' system-apparatus-environment ``trio'' used in the early discussions of environment-induced superselection$^{1,2}$. The apparatus ${\cal A}$ is a member of this trio which is supposed to preserve --- in the preferred pointer basis --- the correlation with the state of the system ${\cal S}$ with which it is initially entangled. Hence, ${\cal A}$ is a ``communication channel.''} It suffices to turn a unit of {\it quantum\/} correlation (i.e., entanglement, which can be established in course of the (pre--)measurement -- like an interaction between two one-bit systems) into a {\it classical\/} correlation. This process is illustrated in Fig.~1 with a ``bit by bit'' measurement$^1$ --- a quantum controlled-not (or a {\tt c-not}). An identical {\tt c-not} controlled by the previously passive ``target'' bit (which played the role of the apparatus pointer in course of the initial correlation, Fig.~1a) and a bit ``somewhere in the environment'' represents the process of decoherence. Now, however, the former apparatus (target) bit plays a role of the control. As a result, a pure state of the system \begin{equation} \left|\sigma\right> = \alpha \left| 0\right> + \beta \left| 1\right> \end{equation} is ``communicated'' by first influencing the state of the apparatus; \begin{equation} \left|\Phi_{{\cal S A}} (0)\right> = \left|\sigma\right>_{\cal S} \left|0\right>_{\cal A} \rightarrow \alpha \left| 00\right>_{\cal SA} + \beta \left| 11\right>_{\cal SA} = \left| \Phi_{{\cal S A}} (t_1)\right>\;, \end{equation} and then by spreading that influence to the environment: \begin{equation} \left|\Psi_{{\cal SAE}} (t_1)\right> = \left(\alpha \left| 00\right> + \beta \left| 11\right>\right) \left| 0\right> \rightarrow \alpha \left| 000\right> + \beta \left| 111\right> = \left|\Psi_{{\cal S A E}} (t_2)\right>\;. \end{equation} Above, we have dropped the indices ${\cal SAE}$ for individual qubits (which would have appeared in the obvious order). After the environment is traced out, only the correlation with the pointer basis of the apparatus (i.e., the basis in which the apparatus acts as a control) will survive$^{1-3,28}$: \begin{equation} \rho_{{\cal SA}} (t_2) = \left|\alpha\right|^2 \left|00 \right> \left< 00\right| + |\beta|^2 \left| 11 \right> \left< 11\right| \end{equation} Thus, the apparatus plays the role of the communication channel (memory) (i)~through its ability to retain correlations with the measured system, but also, (ii)~by ``broadcasting'' of these correlations into the environment which is the source of decoherence (see Fig. 1b). Such broadcasting of quantum correlations makes them --- and the observables involved in broadcasting --- effectively classical.$^{29}$ The ability to retain correlations is the defining characteristic of the preferred ``pointer'' basis of the apparatus. In simple models of measurement {\it cum\/} decoherence, the selection of the preferred basis of the apparatus can be directly tied to the form of the interaction with the environment. Thus, an observable $\hat O$ which commutes with the complete (i.e., self-, plus the interaction with the environment) Hamiltonian of the apparatus: \begin{equation} \left[\hat H_{{\cal A}} + \hat H_{{\cal A E}}, \hat O\right] = 0 \end{equation} will be the pointer observable. This criterion can be fulfilled only in the simplest cases: Typically, $\left[\hat H_{{\cal A}}, \hat H_{{\cal A E}}\right] \not= 0$, hence Eq.~(5) cannot be satisfied exactly. In more realistic situations one must therefore rely on more general criteria to which we have alluded above. One can start by noting that the einselected pointer basis is best at retaining correlations with the external stable states (such as pointer states of other apparatus or record states of the observers). The predictability sieve$^{11-14}$ is a convenient strategy to look for such states. It retains pure states which produce least entropy over a period of time long compared to the decoherence timescale. Such states avoid entanglement with the environment and, thus, can preserve correlations with the similarly selected states of other systems. In effect, predictability sieve can be regarded as a strategy to select stable correlations. A defining characteristic of {\it reality} of a state is the possibility of finding out what it is and yet leaving it unperturbed. This criterion of {\it objective existence} is of course satisfied in classical physics. It can be formulated operationally by devising a strategy which would let an observer previously unaware of the state find out what it is and later verifying that the state was (i)~correctly identified, and (ii)~not perturbed. In quantum theory, this is not possible to accomplish with an {\it isolated\/} system. Unless the observer knows in advance what observables commute with the state of the system, he will in general end up re-preparing the system through a measurement employing ``his'' observables. This would violate condition (ii) above. So --- it is said --- quantum states do not {\it exist objectively}, since it is impossible to find out what they are without, at the same time, ``remolding them'' with the questions posed by the measurement.$^{51}$ Einselection allows states of open quantum system to pass the ``existence test'' in several ways. The observer can, for example, measure properties of the Hamiltonian which generates evolution of the system and of the environment. Einselection determines that pointer states will appear on the diagonal of the density matrix of the system. Hence, the observer can know beforehand what (limited) set of observables can be measured with impunity. He will be able to select measurement observables that are already monitored by the environment. Using a set of observables co-diagonal in the Hilbert space of the system with the einselected states he can then perform a nondemolition measurement to find out what is the state without perturbing it. A somewhat indirect strategy which also works involves monitoring the environment and using a {\it fraction\/} of its state to infer the state of the system. This may not be always feasible, but this strategy is worth noting since it is the one universally employed by us, the real observers. Photons are one of the most pervasive environments. We gather most of our information by intercepting a small fraction of that environment. Different observers agree about reality based on a consensus reached in this fashion. That such a strategy is possible can be readily understood from the {\tt c-not} ``caricature'' of decoherence in Fig.~1. The einselected control observables of the system or of the apparatus are redundantly recorded in the environment. One can then ``read them off'' many times (even if each read-off may entail erasure of a part of the information from the environment) without interacting directly with the system of interest. It is important to emphasize that the relatively objective existence is attained at the price of partial ignorance. The observer should {\it not\/} attempt to intercept {\it all\/} of the environment state (which may be entangled with the system and, hence, could be used to redefine its state by sufficiently outrageous measurement$^{52}$). Objective existence is objective only because part of the environment has ``escaped'' with the information about the state of the system and can continue to serve as a ``witness'' to what has happened. It is also important that the fraction of the environment which escapes should not matter, except in the two limits when the observer can intercept all of the relevant environment (the entanglement limit), and in the case when the observer simply does not intercept enough (the ignorance limit). This robustness of the preferred (einselected) observables of the system can be quantified through redundancy$^{28}$, in a manner reminiscent of the recent discussions of the error correction strategies (see, e. g., Ref. 38 and references therein). Consider, for example, a correlated state \begin{equation} \left|\Psi_{\cal SE}\right> = \left(\left|0\right>_{\cal S} \left|000\right>_{\cal E} + \left|1\right>_{\cal S} \left|111\right>_{\cal E}\right)/\sqrt{2} \end{equation} which could have arisen from a sequence of three system-environment {\tt c-not}s. All errors afflicting individual qubits of the environment can be classified by associating them with Pauli matrices acting on individual qubits of the environment. We can now inquire about the number of errors which would destroy the correlation between various observables of the system and the state of the environment. It is quite obvious that the states $\{\left|0\right>_{\cal S}, \left|1\right>_{\cal S}\}$ are in this sense more robustly correlated with the environment than the states $\{\left|+\right>_{\cal S}, \left|-\right>_{\cal S}\}$ obtained by Hadamard transform: \begin{equation} \left|\Psi_{\cal SE}\right> = \left|+\right>_{\cal S} \left(\left|000\right>_{\cal E}+ \left|111\right>_{\cal E}\right)/\sqrt{2}\\ \ + \ \left|-\right>_{\cal S} \left(\left|000\right>_{\cal E}- \left|111\right>_{\cal E}\right)/\sqrt{2} \end{equation} For, a phase flip of any of the environment bits would destroy the ability of the observer to infer the state of the system in the $\{\left|+\right>_{\cal S}, \left|-\right>_{\cal S}\}$ basis. By contrast, a majority vote in a $\{\left|0\right>_{\cal E}, \left|1\right>_{\cal E}\}$ basis would still yield a correct answer concerning $\{\left|0\right>_{\cal S}, \left|1\right>_{\cal S}\}$ if {\it any\/} single error afflicted the state of the environment. Moreover, when there are $N$ bits in the environment, ${N \over 2} - 1$ errors can be in principle still tolerated in the $\{\left|0\right>_{\cal S}, \left|1\right>_{\cal S}\}$ basis, but in the $\{\left|+\right>_{\cal S}, \left|-\right>_{\cal S}\}$ basis a simple phase flip continues to have disastrous consequences. When we assume (as seems reasonable) that the probability of errors increases with the size of the environment ($N$), so that the ``specific error rate'' (i.e., probability of an error per bit of environment per second) is fixed, it becomes clear that the stability of pointer states is purchased at the price of the instability of their Hadamard-Fourier conjugates. This stabilization of certain observables at the expense of their conjugates may be achieved either through the deliberate amplification or as a consequence of accidental environmental monitoring, but in any case it leads to redundancy as it was pointed out already some time ago$^{28}$. This redundancy may be quantified by counting the number of ``flips'' applied to individual environment qubits which ``exchange'' the states of the environment corresponding to the two states of the system. Thus, we can compute the redundancy distance $d$ between the record states of the environment in the case corresponding to the two system states $\phi, \psi$ given by $\{\left|0\right>_{\cal S}, \left|1\right>_{\cal S}\}$ in the decomposition of Eq. (6): \begin{eqnarray*} d (\phi,\psi) = N \end{eqnarray*} while in the case of the complementary observable with $\phi, \psi$ given by $\{\left|+\right>_{\cal S}, \left|-\right>_{\cal S}\}$: \begin{eqnarray*} d (\phi, \psi) = 1\;. \end{eqnarray*} Or, in general, redundancy distance \begin{equation} d (\phi,\psi) = \min (n_x + n_y + n_z) \end{equation} is the least total number of ``flips'', where $n_x, n_y$ and $n_z$ are the numbers of $\hat \sigma_x$, $\hat \sigma_y$, and $\hat \sigma_z$ operations required to convert the state of the environment correlated with $\left|\phi\right>$, which is given, up to normalization constant, by: \begin{equation} \left|{\cal E}_\phi\right> = \left<\phi | \Psi_{\cal SE}\right> \end{equation} with the similarly defined $\left|{\cal E}_\psi\right>$. Redundancy defined in this manner is indeed a measure of distance, since it is nonnegative: \begin{equation} d (\phi,\psi) \geq 0\;, \end{equation} symmetric: \begin{equation} d(\phi,\psi) = d (\psi,\phi)\;, \end{equation} and satisfies the triangle inequality: \begin{equation} d(\phi,\psi) + d(\psi,\gamma) \geq d (\phi, \gamma) \end{equation} as the reader should be able to establish without difficulty. In simplest models which satisfy the commutation condition, Eq.~(5), the most predictable set of states will consist of the eigenstates of the pointer observable $\hat O$. They will not evolve at all and, hence, will be perfect memory states as well as the most (trivially) predictable classical states. In the more general circumstances the states which commute with $\hat H_{\cal SE}$ at one instant will be rotated (into their superpositions) at a later instant with the evolution generated by the self-Hamiltonian $\hat H_{\cal S}$. Thus, a near-zero entropy production at one instant may be ``paid for'' by an enormous entropy production rate a while later. An example of this situation is afforded by a harmonic oscillator, where the dynamical evolution periodically ``swaps'' the state vector between its position and momentum representation, and the two representations are related to each other by a Fourier transformation. In that case the states which are most immune to decoherence in the long run turn out to be the fixed points of the ``map'' defined by the Fourier transformation. Gaussians are the fixed points of the Fourier transformation (they remain Gaussian). Hence, coherent states which are unchanged by the Fourier transform are favored by decoherence.$^{11-14}$ In more general circumstances entropy production may not be minimized by an equally simple set of states, but the lessons drawn from the two extreme examples discussed above are nevertheless relevant. In particular, in the case of systems dominated by the environmental interaction, the Hamiltonian $\hat H_{{\cal SE}}$ will have a major say in selecting the preferred basis, while in the underdamped case of near-reversible ``Newtonian'' limit approximately Gaussian wave packets localized in both position and momentum will be optimally predictable, leading to the idealization of classical trajectories. In either case, einselection will pinpoint the stable set of states in the Hilbert space. These pointer states will be stable, but their superpositions will deteriorate into pointer state mixtures rather quickly --- on the decoherence timescale --- which tends to happen very much faster than relaxation$^3$. This eventual diagonality of the density matrix in the einselected basis is a byproduct, an important symptom, but not the essence of decoherence. I emphasize this because diagonality of $\rho_{\cal S}$ in some basis has been occasionally (mis-) interpreted as a key accomplishment of decoherence. This is misleading. Any density matrix is diagonal in some basis. This has little bearing on the interpretation. Well-known examples of such accidental diagonality are the unit density matrix (which is diagonal in every basis) and the situation where $\rho_{A\cup B} = p\rho_A + (1 - p) \rho_B$ describes a union of two ensembles $A$ and $B$ with density matrices $\rho_A$ and $\rho_B$ which are not co-diagonal (i.e., $[\rho_A, \rho_B] \not= 0$). In either of these two cases states which are on the diagonal of $\rho_{A\cup B}$ are in effect a mathematical accident and have nothing to do with the physical reality. Einselection chooses preferred basis in the Hilbert space in recognition of its predictability. That basis will be determined by the dynamics of the open system in the presence of environmental monitoring. It will often turn out that it is overcomplete. Its states may not be orthogonal, and, hence, they would never follow from the diagonalization of the density matrix. Einselection guarantees that only those ensembles which consist of a mixture of pointer states can truly ``exist'' in the quasi-classical sense. That is, individual members of such ensembles are already immune to the measurement of pointer observables. These remarks cannot be made about an arbitrary basis which happens to diagonalize $\rho$ but are absolutely essential if the quantum system is to be regarded as effectively classical. It is useful to contrast decoherence with the more familiar consequence of interactions with the environment --- the noise. Idealized decoherence [e.g., the case of Eq.~(5)] has absolutely no effect on the observable of interest. It is caused by the environment carrying out a continuous ``nondemolition measurement'' on the pointer observable $\hat O$. Thus, decoherence is caused by the system observables effecting the environment and by the associated transfer of information. Decoherence is, in this sense, a purely quantum phenomenon; information transfers have no effect on classical states. Noise, by contrast, is caused by the environment disturbing an observable. It is, of course, familiar in the classical context. The distinction between the two is illustrated in Fig.~1c, in the {\tt c-not} language we have adopted previously. Astute readers will point out that the distinction between noise and decoherence is a function of the observable in terms of which {\tt c-not} is implemented. This is because a quantum {\tt c-not} is, in contrast with its classical counterpart, a ``two-way street.'' When the Hadamard transform, $\left|\pm\right> = (\left|0\right> \pm \left|1\right>)/\sqrt{2}$, is carried out, control and target swap their functions. Thus, loosely speaking, as the information about the states $\{\left|0\right>, \left|1\right>\}$ travels in one direction, the information about the relative phase (which is encoded in their Hadamard transforms) travels the other way. Thus, in quantum gates the direction of the information flow depends on the states which are introduced at the input. Typically, both noise and decoherence are present. One can reinterpret the predictability sieve$^{11}$ we have mentioned before as a search for the set of states which maximizes the ``control'' role of the system, while simultaneously minimizing its ``target'' role. Eigenstates of the pointer observable are a solution. The phases between them are a ``victim'' of decoherence and are rapidly erased by the interaction with the environment. \section{PROBABILITIES} The classical textbook of Gnedenko$^{53}$ distinguishes three ways of defining probability. These are: (i)~Definitions which introduce probability as a quantitative measure of the {\it degree of certainty\/} of the observer; (ii)~``Standard\footnote{``Classical'' is a more often used adjective. We shall replace it by ``standard'' to avoid confusion with the other kind of classicality discussed here.} definitions'', which originate from the more primitive concept of {\it equal likelihood\/} (and which can be traced to the original applications of probability in gambling); (iii)~{\it Relative frequency\/} definitions, which attempt to reduce probability to a frequency of occurrence of an event in a large number of trials. In the context of the interpretation of quantum theory, the last of these three definitions has been invoked most often in attempts to derive probabilities from the universal quantum dynamics.$^{54-57}$ The argument starts with an ensemble of identical systems (e. g., spin ${1 \over 2}$ system) in a pure state and a definition of a relative frequency operator for that ensemble. The intended role of the relative frequency operator was to act as quantum equivalent of a classical ``counter,'' but in effect it was always a meta-observable of the whole ensemble, and, thus, it could not have been associated with the outcomes of measurements of the individual members of the ensemble. A useful insight into relative frequency observables is afforded by the physically transparent example of Fahri {\it et~al.}$^{56}$ They consider an ensemble of spin ${1 \over 2}$ ``magnets,'' all in an identical state, aligned with some axis $\vec a$. Relative frequency observable along some other direction $\vec b$ would correspond to a measurement of a deflection of the object with a known mass and with a whole ensemble of spins attached to it by a (meta) Stern-Gerlach apparatus with a field gradient parallel to $\vec b$. The angle of deflection would be then proportional to $\vec a \cdot \vec b$, and ${1 + \vec a \cdot \vec b \over 2}$ would be an eigenvalue of the frequency operator. However, none of the spins individually would be required to choose its ``answer.'' This approach is of interest, as it sheds light on the properties of collective observables in quantum physics, but it does not lend itself to the required role of supplying the probability interpretation in the MWI context. A true ``frequency'' with a classical interpretation cannot be defined at a level which does not allow ``events'' --- quantum evolutions which lead to objectively existing states --- to be associated with the individual members of that ensemble. This criticism was made already, in somewhat different guise, by several authors (see Kent$^{57}$ and references therein). The problem is in part traceable to the fact that the relative frequency observables do not eliminate superpositions between the branches of the universal wave function and do not even define what these branches are. Decoherence has obvious implications for the probability interpretation. The reduced density matrix $\rho$ which emerges following the interaction with the environment and a partial trace will be always diagonal in the {\it same\/} basis of einselected pointer states $\{\left|i\right>\}$. These states help define elementary ``events.'' Probabilities of such events can be inferred from their coefficients in $\rho$, which have the desired ``Born's rule'' form. Reservations about this straightforward reasoning have been expressed. Zeh$^{58}$ has noted that interpreting coefficients of the diagonal elements of density matrix as probabilities may be circular. Here we shall therefore examine this problem more closely and prove operational equivalence of two ensembles --- the original one {\bf o} associated with the set of identical decohering systems, and an artificial one {\bf a}, constructed to have the same density matrix $(\rho_{\bf o} = \rho_{\bf a})$, but for a much more classical reason, which allows for a straightforward interpretation in terms of relative frequencies. This will also shed a light on the sense in which the origin of quantum probabilities can be associated with the ignorance of observers. The density matrix alone does not provide a prescription for constructing an ensemble. This is in contrast with the classical setting, where a probability distribution (i.e., in the phase space) suffices. However, a density matrix plus a guarantee that the ensemble is a mixture of the pointer states does give such a prescription. Let us consider $\rho_{\bf o}$ along with the einselected set of states $\{\left|i\right>\}$ which emerge as a result of the interaction with the environment. We consider an artificially prepared ensemble {\bf a} with a density matrix $\rho_{\bf a}$ which we make ``classical by construction.'' Ensemble {\bf a} consists of systems identical to the one described by {\bf o}. They are continuously monitored by an appropriate measuring apparatus which can interact with and keep records of each system in {\bf a}. Let us first focus on the case of pure decoherence. Then, in the einselected basis: \begin{equation} \rho_{\bf o} (t = 0) = \sum_{i,j} \alpha_i^\ast \alpha_j \left|i \right> \left< j\right| \rightarrow \sum_i \left|\alpha_i\right|^2 \left|i \right> \left< i\right| = \rho_{\bf o} (t \gg t_D)\;, \end{equation} where $t_D$ is the decoherence timescale. This very same evolution shall occur for both $\rho_{\bf o}$ and $\rho_{\bf a}$. We can certainly arrange this by adjusting the interactions in the two cases. In particular, the (pointer) states $\{\left|i\right>\}$ shall remain untouched by decoherence. In the artificial case {\bf a}, the interpretation is inescapable. Each number of {\bf a} comes with a ``certificate'' of its state (which can be found in the memory of the recording device). Following the initial measurement (which establishes the correlation and the first record), the subsequent records will reveal a very boring ``history'' (i.e., $\left|i = 17\right>_{@t_1}$, $\left|i = 17\right>_{@t_2}$, \dots $\left|i = 17 \right>_{@t_n}$, etc.). Moreover, the observer --- any observer --- can remeasure members of {\bf a} in the basis $\{\left|i\right>\}$ and count the numbers of outcomes corresponding to distinct states of each of $N$ members. There is no ``collapse'' or ``branching'' and no need to invoke Born's rule. All of the outcomes are in principle pre-determined, as can be eventually verified by comparing the record of the observer with the on-going record of the monitoring carried out by the measuring devices permanently attached to the system. Individual systems in {\bf a} have ``certified'' states, counting is possible, and, hence, probability can be arrived at through the frequency interpretation for the density matrix $\rho_{\bf a}$. But, at the level of density matrices, $\rho_{\bf a}$ and $\rho_{\bf o}$ are indistinguishable by construction. Moreover, they have the {\it same\/} pointer states, $\{\left|i\right>\}_{\bf o} = \{\left|i\right>\}_{\bf a}$. Since all the physically relevant elements are identical for {\bf o} and {\bf a}, and since, in {\bf a}, the frequency interpretation leads to the identification of the coefficients of $\left|i \right> \left< i\right|$ with probabilities, the same must be true for the eigenvalues of $\rho_{\bf o}$. The ``ignorance'' interpretation of the probabilities in {\bf a} is also obvious. The state of each and every system is known, but until the ``certificate'' for a {\it specific\/} system is consulted, its state shall remain unknown. Similarly, each system in {\bf o} can be said to have a state recorded by the environment, waiting to be discovered by consulting the record dispersed between the environmental degrees of freedom. This statement should not be taken too far --- the environment is only {\it entangled\/} with the system --- but it is surprising how little difference there is between the statements one can make about {\bf o} and {\bf a}. In fact, there is surprisingly little difference between this situation and the case where the system is completely classical. Consider the familiar Szilard's engine,$^{59}$ where the observer (Szilard's demon) makes a measurement of a location of a classical particle. The correlation between the particle and the records of the demon can be undone (until or when demon's record is copied). Thus, ``collapse'' may not be as purely quantum as it is usually supposed. And information transfer is at the heart of the issue in both classical and quantum contexts. In any case, our goal here has been a frequentist justification of probabilities. And that goal we have accomplished using a very different approach than these based on the frequency operator attempts to derive Born's formula put forward to date.$^{54-57}$ To apply the strategy of the {\it standard definition\/} of probabilities in quantum physics, we must identify circumstances under which possibilities --- mutually exclusive ``events'' --- can be permuted without having any noticeable effect on their likelihoods. We shall start with the decoherent density matrix which has all of the diagonal coefficients equal: \begin{equation} \rho = N^{-1} \sum_{k=1}^N \left|k \right> \left< k\right| = {\bf 1} \end{equation} Exchanging any two $k$'s has obviously no effect on $\rho$ and, therefore, on the possible measurement outcomes. Thus, when we assume that the total probability is normalized and equal to unity, a probability of any individual outcome $\left|k\right>$ must be given by: \begin{equation} Tr \rho \left|k \right> \left< k\right| = N^{-1}\;. \end{equation} It also follows that a probability of a combination of several ($n$) such elementary events is \begin{equation} Tr \rho \left(\left| k_1 \right> \left< k_1\right| + \left| k_2 \right> \left< k_2\right| + \cdots + \left| k_n \right> \left< k_n\right|\right) = n/N\;. \end{equation} Moreover, when before the onset of decoherence the system was described by the state vector \begin{equation} \left|\psi\right> = N^{-1/2} \sum_{k=1}^N e^{i\phi_k} \left|k\right>\;, \end{equation} the probabilities of the alternatives after decoherence in the basis $\{\left|k\right>\}$ will be \begin{equation} p_{\left|k\right>} = \left|\left<k | \psi\right>\right|^2 = N^{-1} \ . \end{equation} However, in order to be able to add or to permute different alternatives without any operational implications, we must have assumed decoherence. For, as long as $\left|\psi\right>$ is a superposition, Eq.~(17), one can easily invent permutations which will effect measurement outcomes. Consider, for example, \begin{equation} \left|\psi\right> = \left(\left|1\right> + \left| 2\right> - \left| 3\right>\right)/\sqrt{3} \end{equation} A measurement could involve alternatives $\{\left| 1\right>, \left| 2\right> + \left| 3\right>\;,\; \left| 2\right> - \left| 3\right>\}$ and would easily distinguish between the $\left|\psi\right>$ above and the permuted: \begin{equation} \left|\psi'\right> = \left(\left| 3\right> + \left| 2\right> - \left| 1\right>\right)/\sqrt{3} \end{equation} The difference between $\left| \psi\right>$ and $\left|\psi'\right>$ is the relative phase. Thus, decoherence and a preferred basis with identical coefficients are {\it both\/} required to implement the standard definition in the quantum context. The case of unequal probabilities is dealt with by reducing it to (or at least approximating it with) the case of equal probabilities. Consider a density matrix of the system \begin{equation} \rho_{\cal S} = \sum_{k=1}^N p_k \left| k\right> \left< k\right|\;. \end{equation} We note that it can be regarded as an average of an equal probability density matrix of a composite system consisting of ${\cal S}$ and ${\cal R}$. \begin{equation} \rho_{\cal SR} \cong \sum_{k=1}^N\, \sum_{j=n_k}^{n_{k+1}-1} \left| k,j \right> \left< k,j\right|/M \end{equation} Here $M$ is the total number of states in ${\cal H}_{\cal SR}$, and the degeneracies $n_{k}$ are selected so that $p_k \simeq n_k/M$, i.e., \begin{equation} n_k \cong \sum_{k' = 1}^k p_{k'} \cdot M\;,\quad \sum_{k=1}^N n_k = M\;. \end{equation} For sufficiently large $M$ (typically $M \gg N$) ``coarse-grained'' $\tilde \rho_{\cal S}$ and $\rho_{\cal S}$ will become almost identical; \begin{equation} \tilde \rho_{\cal S} =\lim_{{M \over N}\to \infty} \sum_{k=1}^N \sum_{j=n_k}^{n_{k+1}-1} \left<j\right|\rho_{\cal SR}\left|j\right> = \lim_{{M \over N}\to \infty} Tr_{\cal R} \rho_{\cal SR} = \rho_{\cal S} \end{equation} One can now use the ``standard'' argument to obtain, first, the probability interpretation for $\rho_{\cal SR}$ (based on the invariance of $\rho_{\cal SR}$ under the permutations $(kj) \leftrightarrow (k'j')$), and then use it [and Eq.~(16)] to deduce the probabilistic interpretation of $\rho_{\cal S}$. Note that in the above sum, Eq.~(24), we did not have to appeal to the actual numerical values of the eigenvalues of $\rho_{\cal SR}$, but only to their equivalence under the permutations. Thus, we are not assuming a probabilistic interpretation of $\rho_{\cal SR}$ to derive it for $\rho_{\cal S}$. (We also note that the sum over auxilliary states above is, strictly speaking, not a conventional trace since the dimensions of subspaces traced out for distinct $k$'s will in general differ. For those concerned with such matters we point out that one can deal with subspaces of equal dimensionality providing that the ``dimension deficit'' is made up by auxilliary states which have zero probability.) This completes the second approach to the quantum probabilities. Again, we have reduced the problem to counting. This time, it was a count of equivalent alternatives (rather than of events). In both of these approaches decoherence played an important role. In the standard definition, decoherence got rid of the distinguishability of the permuted configurations and einselection defined what they were. In the frequency interpretation einselection was essential --- it singled out states which were stable enough to be counted and verified. Our last approach starts from a point of departure which does not rely on counting. Gnedenko was least sympathetic to the definitions of probability as a measure of a ``degree of certainty,'' which he regarded as a ``branch of psychology'' rather than a foundation of a branch of mathematics. We shall also find our attempt in this direction least concrete of the three, but some of the steps are nevertheless worth sketching. Gnedenko's discomfort with the ``degree of certainty'' might have been alleviated if he had been familiar with the paper of Cox$^{60}$ who has, in effect, derived basic formulae of the theory of probability starting from such a seemingly subjective foundation by insisting that the ``measure'' should be consistent with the laws of Boolean logic. Intuitively, this is a very appealing demand. Probability emerges as an extension of the two-valued logic into a continuum of the ``degrees of certainty.'' The assumption that one should be able to carry classical reasoning concerning ``events'' and get consistent estimates of the conditional degree of certainty leads to algebraic rules which must be followed by the measure of the degree of certainty. This implies that an information processing observer who employs classical logic states and classical memory states which do not interfere will be forced to adopt calculus of probabilities essentially identical to what we have grown accustomed to. In particular, likelihood of $c$ and $b$ (i.e., ``proposition $c\cdot b$'') will obey a multiplication theorem: \begin{equation} \mu \left(c \cdot b | a\right) = \mu \left(c | b \cdot a\right) \mu \left(b | a\right)\;. \end{equation} Above $\mu (b|a)$ designates a conditional likelihood of $b$ given that $a$ is certain. Moreover, $\mu$ should be normalized: \begin{equation} \mu \left(a|b\right) + \mu \left(\sim a | b\right) = 1\;, \end{equation} where $\sim a$ is the negation of the proposition $a$. Finally, likelihood of $c$ or $b \ (c\cup b)$ is: \begin{equation} \mu \left(c\cup b | a\right) = \mu (c | a) + \mu (b | a) - \mu (c \cdot b|a)\;, \end{equation} which is the ordinary rule for the probability that at least one of two events will occur. In short, if classical Boolean logic is valid, then the ordinary probability theory follows. We are halfway through our argument, as we have not yet established the connection between the $\mu$'s and the state vectors. But it is important to point out that the assumption of the validity of Boolean logic in the derivation involving quantum theory is nontrivial. As was recognized by Birkhoff and von~Neumann,$^{61}$ the distributive law $a \cdot (b\cup c) = (a \cdot b) \cup (a \cdot c)$ is {\it not\/} valid for quantum systems. Without this law, the rule for the likelihood of the logical sum of alternatives, Eqs.~(26), (27) would not have held. The physical culprit is quantum interference, which, indeed, invalidates probability sum rules (as is well appreciated in the examples such as the double slit experiment). Decoherence destroys interference between the einselected states. Thus, with decoherence, Boolean logic, and, consequently, classical probability calculus with its sum rules are recovered. Once it is established that ``likelihood'' must be a measure (which, in practice, means that $\mu$ is nonnegative, normalized, satisfies sum rules, and that it depends only on the state of the system and on the proposition) Gleason's theorem$^{62}$ implies that \begin{equation} \mu (a|b) = Tr \left(\left|a \right> \left< a\right| \rho_b\right)\;, \end{equation} where $\rho_b$ is a density matrix of the system, and $\left|a \right> \left< a\right|$ is a projection operator corresponding to the proposition $a$. Thus, starting from an assumption about the validity of {\it classical\/} logic (i.e., absence of interference) we have arrived, first, at the sum rule for probabilities and, subsequently, at the Born's formula. Of the three approaches outlined in this section the two ``traditional'' are more direct and --- at least to this author --- more convincing. The last one is of interest more for its connection between logic and probability than as a physically compelling derivation of probabilities. We have described it in that spirit. These sorts of logical considerations have played an important part in the motivation and the subsequent development of the ``consistent histories'' approach.$^{18-20}$ \section{RELATIVELY OBJECTIVE EXISTENCE: IN WHAT SENSE IS THE MOON THERE WHEN NOBODY LOOKS?} The subjective nature of quantum states is at the heart of the interpretational dilemmas of quantum theory. It seems difficult to comprehend how quantum fuzziness could lead to the hard classical reality of our everyday experience. A state of a classical system can be in principle measured without being perturbed by an observer who knew nothing about it beforehand. Hence, it is said that classical physics allows states to exist objectively. Operationally, when observer A prepares a classical ensemble ${\bf a_c}$ and hides the list ${\cal L}_A^c$ with the records of the state of each system in ${\bf a_c}$ from the observer B, it will be still possible for B to find out the states of each member of ${\bf a_c}$ through a measurement, with no {\it a~priori\/} knowledge. To verify this, B could supply his list ${\cal L}_B^c$ for inspection. Classical physics allows ${\cal L}_A^c$ and ${\cal L}_B^c$ to be always identical. Moreover, both lists will be the same as the new list ${\cal L}_A^{c'}$ with the states of ${\bf a_c}$ remeasured by A to make sure that ${\bf a_c}$ was not perturbed by B's measurements. Indeed, it is impossible for A to find out, just by monitoring the systems in ensemble ${\bf a_c}$, whether some enterprising and curious B has discovered all that is to know about ${\bf a_c}$. Measurements carried out on a classical ${\bf a_c}$ can be accomplished without leaving an imprint. This gedankenexperiment shall be the criterion for the ``objective existence.'' When all of the relevant lists match, we shall take it as operational evidence for the ``objective nature of measured states.'' In the case of a quantum ensemble ${\bf a_q}$ this experiment cannot succeed {\it when it is carried out on a closed system}. Observer A can of course prepare his list ${\cal L}_A^q$ --- a list of Hilbert space states of all the systems in ${\bf a_q}$. B could attempt to discover what these states are, but in the absence of any prior knowledge about the observable selected by A in the preparation of ${\bf a_q}$ he will fail --- he will reprepare members of ${\bf a_q}$ in the eigenstates of the observables he has selected. Hence, unless by sheer luck B has elected to measure the same observables as A for each member of ${\bf a_q}$, ${\cal L}_A^q$ and ${\cal L}_B^q$ will not match. Moreover, when A remeasures the quantum ensemble using his ``old'' observables (in the Heisenberg picture, if necessary) following the measurement carried out by B, he will discover that his new list ${\cal L}_A^{q'}$ and his old list ${\cal L}_A^q$ do not match either. This illustrates the sense in which states of quantum systems are subjective --- they are inevitably shaped by measurements. In a closed quantum system it is impossible to just ``find out'' what the state is. Asking a question (choosing the to-be-measured observable) guarantees that the answer (its eigenstate) will be consistent with the question posed.$^{51}$ Before proceeding, we note that in the above discussions we have used a ``shorthand'' to describe the course of events. What was really taking place should have been properly described in the language of correlations. Thus, especially in the quantum case, the objectivity criterion concerned the correlation between a set of several lists (${\cal L}_A^q$, ${\cal L}_B^q$, ${\cal L}_A^{q'}$) which were presumably imprinted in effectively classical (i.e., einselected with the help of appropriate environment) sets of record states. The states of the systems in the ensemble ${\bf a_q}$ played a role similar to the communication channels. The operational definition of objective existence of the state of the system hinges on the ability of the state of the system to serve as a ``template,'' which can remain unperturbed while it is being imprinted onto the records of the initially ignorant observers. States of isolated quantum systems cannot serve this purpose --- they are too malleable! (Energy eigenstates are somewhat of an exception, but that is a different story (Paz and Zurek, in preparation).) We shall see below that the einselected states of decohering quantum systems are less fragile and can be used as a ``template''. Again, we shall use a shorthand, talking about states, while the real story is played out at the level of multipartite {\it correlations}. We assume the reader shall continue to translate the shorthand into the ``full version.'' Consider ${\bf a_e}$, an ensemble of quantum systems subject to ideal einselection caused by the interaction with the environment. If the systems are to retain their states in spite of decoherence, the observer A has very little choice in the observables he can use for preparation. The menu of stable correlations between the states of systems in ${\bf a_e}$ and his records is limited to these involving einselected pointer states. Only such states will be preserved by the environment for periods of time long enough to contemplate the gedankenexperiment described above. The task of the observer B (who is trying to become correlated with the stable states of ${\bf a_e}$ without destroying pre-existing stable correlations established by the observer A) is simplified. As soon as he finds out what are the pointer observables, he can measure at will. He can be sure that -- in order to get sets of records with predictive power -- A must have selected the same pointer observables to prepare ${\bf a_e}$. And as soon as the pointer observables are known, their eigenstates can be found without being perturbed. Moreover, B will be measuring observables which are already being monitored by the environment, so his measurements will have no discernible effect on the states of the systems in ${\bf a_e}$. Hence, either A was smart enough to choose pointer states (in which case his lists ${\cal L}_A^e$, ${\cal L}_A^{e'}$, \dots will all be identical) and B's spying will not be detected, {\it or\/} A chooses to measure and prepare arbitrary states in the Hilbert space, guaranteeing their deterioration into mixtures of pointer states at a rapid decoherence rate. In this second case A's lists will reflect a steady increase of entropy caused by the mismatch between the observable he has elected to measure and the pointer observables he should have measured. And B's spying will most likely still be undetected (especially if he is smart enough to focus on the pointer observables). Let us now compare the three variants of the gedankenexperiment above. In the classical case, anyone can find out states of the systems in ${\bf a_c}$ without perturbing them. Prior information is unnecessary, but only classical (i.e., localized, etc.) states can be used. In the case of a quantum isolated system an enormous variety of quantum states --- including all conceivable superpositions of the classical states --- can be prepared by A in ${\bf a_q}$. Now B's measurement will almost inevitably reprepare these states, unless somehow B knows beforehand what to measure (i.e., what observables has A measured). In the third case --- quantum, but with decoherence and einselection --- the choices of A are limited to the preferred pointer states. Only a very small subset of all the superpositions in the Hilbert space ${\cal H}$ is available. Moreover, the environment is already carrying out continuous monitoring of the observables it has elected to ``measure.'' B can use the correlations established between the system and the state of the environment to find out what are the preferred observables and to measure them. He will of course discover that his list matches A's lists and that A could not have detected B's ``spying.'' When the states can be ``revealed'' without being reprepared in the process, they can be thought to exist objectively. Both the classical case and the quantum plus einselection case share this feature. The environment-induced superselection simultaneously decreases the number of states in ${\cal H}$ while allowing the einselected states to ``exist objectively'' --- to be found out without the necessity of repreparing them in the process. In fact, the measurements we carry out as observers are taking an even more immediate advantage of the monitoring carried out by the environment. Our measurements are almost never direct --- nearly without exception they rely on interception of the information already present in the environment. For instance, all of the visual information we acquire comes from a tiny fraction of the photon environment intercepted by the rod and cone cells in our eyes. Indeed, this is perhaps the best strategy observer B could have used in the third version of the gedankenexperiment above. Rather than directly interacting with the system in ${\bf a_e}$, he could have monitored the environment. An imprint left in a small fraction of its state is usually enough to determine the state of the system --- the environment contains a vastly redundant record of the pointer observables.$^{28}$ Thus, perception of classical reality seems inevitable for the observers who --- like us --- rely on the second-hand information, on the correlations acquired indirectly, from the environment. In a sense the environment plays the role of a commonly accessible ``internet--like'' data base which allowes one to make copies of the records concerning the state of the system. There is no need to measure the system directly --- it suffices to consult the relevant ``web page''. And there is no danger of altering the state of the system: Nonseparability and other such manifestations of quantum theory could reappear only if, somehow, all of the widely disseminated copies of the information were captured and processed in the appropriate (quantum) manner. The difficulty of such an enterprise in the macroscopic domain (which we have quantified before by the redundancy distance, Eqs. (6)-(12)) is a measure of irreversibility of the decoherence--induced ``reduction of the wavepacket''. We have just established that states of quantum systems interacting with their environments exist much like the classical states were presumed to exist. They can be {\it revealed\/} by measurements of the pointer observables which can be ascertained without prior knowledge. In particular, indirect measurements --- observes monitoring the environment in search of the imprints of the state of the system --- seem to be the safest and at the same time most realistic way to reveal that state. Moreover, there are many fewer possible einselected states than there are states in the Hilbert space. Thus, the relative objectivity based on the system-environment correlations and, hence, on decoherence and einselection, comes at a price: The choice is severely limited.\footnote{How limited? There are of course infinitely many superpositions in ${\cal H}$ of finite dimensionality, but that is already true of a spin ${1 \over 2}$ Hilbert space, and it does not capture the reason for the incredible proliferation of superpositions. In the Hilbert space of a decohering system, there will be $N \sim {\rm Dim} ({\cal H})$ pointer states $\{\left|k\right>\}$. For a typical superposition state $\left|\psi\right>$ composed of all $N$ states, with the possibilities ``coarse grained'' by assuming that all $\left|\psi\right>$ have a form: \begin{displaymath} \left|\psi\right> = {1 \over \sqrt{N}} \sum_k (-)^{i_k} \left|k\right> \end{displaymath} where $i_k$ is 0 or 1, there will be \begin{displaymath} {\cal W} \sim 2^N \end{displaymath} such superpositions. That is, even when we set all the coefficients to have the same absolute value, and coarse-grain phases to the least significant (binary) digit, we will have exponentially many possibilities.} In the above operational approach to the definition of existence, we have made several simplifying assumptions. We have (i)~neglected the evolution; (ii)~assumed perfect decoherence, and; (iii)~focused on observers with ``perfect knowledge,'' i.e., used pure states rather than mixtures as initial conditions. All of these assumptions can be relaxed with relatively little pain. Hamiltonian evolution {\it alone\/} would not be a problem --- the system could be described in Heisenberg's picture. But the combination of evolution and decoherence will lead to complications, resulting in a preferred basis which is imperfect$^{12-14,17}$ --- even the optimal pointer states would eventually deteriorate into mixtures, albeit on a timescale long compared to the decoherence timescale for random superposition in ${\cal H}$. This difference between the einselected states and arbitrary superpositions could be quantified by defining the predictability horizon: \begin{equation} t_p = \int\limits_0^\infty \left(H_{EQ} - H (t)\right)\,dt/ \left(H_{EQ} - H (0)\right) \end{equation} This characterizes the timescale over which the initial information $H_{EQ} - H(0)$ is lost as the von~Neumann entropy, $H(\rho)= -Tr \rho \log \rho$, approaches the long-term (equilibrium) value $H_{EQ}$. Easier to compute (and similarly motivated) \begin{equation} t'_p = \int\limits_0^\infty Tr \left(\rho_t^2 - \rho^2 (\infty)\right)\, dt \end{equation} should supply equivalent information. Thus $t'_p$ would be short (and of the order of the decoherence time) for a typical initial state in the Hilbert space. By contrast, the predictability horizon may be long (and, perhaps, even infinite) for pointer states of integrable systems, while for chaotic systems one would anticipate predictability timescales determined by the Lyapunov exponents when decoherence dominates.$^{24}$ The gedankenexperiment at the foundation of our ``operational definition of existence'' is based on the comparison of records of two observers A and B\null. It could now be repeated, provided that the duration of the experiment is brief compared to the predictability timescale, or that the natural rate of information loss is accounted for when evaluating the final results. In fact, the predictability sieve could be implemented using this strategy. Einselected pure states will maximize $t_p$. Moreover, such procedure based on the predictability timescale can be easily applied to compare pure and mixed states. That is, one can find out how much more durable are various correlations between the observer's records of the coarse-grained measurements. The key difference from the original predictability sieve,$^{11}$ which has been successfully used to demonstrate the special role of Gaussians,$^{11-14}$ is a somewhat different sieve criterion, which may even have certain advantages. All these caveats and technicalities should not obscure the central point of our discussion. Environment-induced superselection allows observers to anticipate what states in the Hilbert space have a ``relatively objective existence'' and can be revealed by measurements without being simultaneously reprepared. {\it Relatively objective existence\/} is a deliberate double entendre, trying to point out both the relative manner in which existence is defined (i.e., through correlations, similar in spirit to the relative states of Everett$^{50}$) and a reminder that the existence is not absolutely stable but, rather, that it is purchased at the price of decoherence and based on the monitoring by the environment. Concerns about the predictability timescale do not imply that, on timescales long compared to $t_p$, the states of the systems in question do not ``exist.'' Rather, $t_p$ indicates the predictability horizon on which evolution and decoherence destroy the relevance of the ``old'' data (the record-state correlation). But even then the essence of our definition of reality --- the ability of the observer to ``reveal'' the state --- captures the essence of ``existence.'' \section{THE EXISTENTIAL INTERPRETATION} The interpretation based on the ideas of decoherence and einselection has not really been spelled out to date in any detail. I have made a few half-hearted attempts in this direction,$^{11,63}$ but, frankly, I was hoping to postpone this task, since the ultimate questions tend to involve such ``anthropic'' attributes of the ``observership'' as ``perception,'' ``awareness,'' or ``consciousness,'' which, at present, cannot be modelled with a desirable degree of rigor. It was my hope that one would be able to point to the fact that decoherence and einselection allow for existence (as defined operationally through relative states and correlations in the preceding section) and let those with more courage than I worry about more esoteric matters. I have been gradually changing my mind as a result of penetrating questions of my colleagues and the extrapolations (attempted by the others) of the consequences of decoherence and einselection which veered in the directions quite different from the one I have anticipated. (See references 64--67 for a sample of questions, criticisms, and attempts at an interpretation.) Moreover, while there are ``deep'' questions which may be too ambiguous to attack with the tools used by physicists, there are aspects of information processing which bear direct relevance for these deeper issues, and which can be profitably analysed in a reasonably concrete setting. Here I intend to amplify some of the points I have made before and to provide the ``next iteration'' by investigating consequences of environmental monitoring a step or two beyond the operational definition of ``existence.'' I shall proceed in the general direction indicated earlier,$^{11, 63}$ and focus on the stability of the einselected correlations. We start by noting that the relatively objective existence of certain states ``negotiated'' with the environment has significant consequences for the observers and their information-processing abilities. In the gedankenexperiments involving observers~A and~B in the preceding section we could have equally well argued for the objective existence of the states of their memory cells. Again, superpositions of all the possibilities are ruled out by einselection, and the brain of an observer can preserve, for long periods of time, only the pointer states of its neurons. These states exist in the same relatively objective sense we have defined before --- they can be revealed (correlated with) the states of other neurons without having to be simultaneously re-prepared. Indeed, real neurons are coupled very strongly to their environments and certainly cannot exist in superpositions. Their two stables states are characterized by different rates of firing, each a consequence of a nonequilibrium dissipation-dominated phenomena, which are bound to leave a very strong imprint on the environmental degrees of freedom not directly involved in the information processing. In an obviously overdamped system operating at a relatively high temperature, the inability to maintain superpositions is not a surprise.\footnote{Neurons work more like a diode or a transistor, relying on two stable steady states (characterized by different firing rates) for stability of the logical ``0'' and ``1,'' rather than the two-state spin ${1 \over 2}$-like systems, which often end up playing the neuron's roles in models of neuron networks invented by theoretical physicists. I believe, however, that for the purpose of the ensuing discussion this distinction is not essential and I will continue to invoke {\it licentia physica theoretica\/} and consider spin ${1 \over 2}$-like neurons for the remainder of this paper.} When we assume, as seems reasonable, that the states of neurons are the ``seat'' of memory and that their interactions lead to information processing (which eventually results in ``awareness,'' and other such ``higher functions''), we have tangible hardware issues to analyze. Indeed, at this level of discussion there is little fundamental difference between a brain and a massively parallel, neural network--like, {\it effectively classical\/} computer. The ability to process information concerning states of objects external to memory (for, say, the purpose of prediction) is then based on the stable existence of correlations between the record bits of memory and the state of the object. It is fairly easy to imagine how such a correlation can be initially established either by a direct measurement or, as we have noted previously, through an indirect process involving environment. For the reliability of memories, it is absolutely crucial that this correlation be immune to further correlations with the environment, i.e., to decoherence. Following a measurement (and the initial bout of decoherence), the reduced joint density matrix of the system and the relevant part of memory and the environment will have the form: \begin{equation} \rho_{\cal SM} = \sum_i p_i \left|s_i \right> \left< s_i\right| \left|\mu_i\right> \left< \mu_i\right| \end{equation} The predictability horizon can be defined as before through \begin{equation} t_p^{(i)} = {\int\limits_p^\infty \left(H (s_i,\mu_i;t) - H (s_i, \mu_i;\infty)\right)\, dt \over H(s_i, \mu_i;0) - H(s_i,\mu_i; \infty)} \end{equation} for individual outcomes. Here $H$ can stand for either Shannon-von~Neumann, or linear (or still other) measure of information content of the conditional (re-) normalized $\left<\mu_i \left|\rho_{\cal SM}\right| \mu_i\right>$. After a perfect measurement there is a one-to-one correlation between the outcome and the record (Eq. (31)). It will, however, deteriorate with time as a result of dynamics and the openness of the system, even if the record--keeping memory states are perfectly reliable.$^{68}$ The predictability timescale for memory-system joint density matrices has a more specific interpretation than the one defined by Eqs. (29) and (30). It is also safe to assume that memories use stable states to preserve records. In this ``no amnesia'' case, $t_p^{(i)}$ will measure the timescale on which the acquired information is becoming a useless ``old news'' because of the unpredictable evolution of the open system. Predictability horizon can (and typically will) depend on the outcome. We note that more often than not, both the states of memory and the states of the measured systems will be mixed, coarse-grained states inhabiting parts of large Hilbert spaces, rather than pure states. Thus the record $\mu_i$ will correspond to $\rho_{\mu_i}$ and $Tr (\rho_{\mu_i} \rho_{\mu_j}) \cong \delta_{ij}$. It is straightforward to generalize Eqs.~(31) and~(32) to cover this more realistic case. Indeed, it is likely that the memory states will be redundant, so that the likely perturbations to the ``legal'' memory states shall retain orthogonality. This would allow for classical error correction, such as is known to be implemented, for example, in neural circuits responsible for photodetection in mammalian eyes, where several (approximately seven) rods must fire more or less simultaneously to result in a record of detection of a light source. Observers will be able to make accurate predictions for as long as a probabilistic equivalent of logical implication shall be valid, that is, as long as the conditional probability $g(t)$ defined by: \begin{equation} p \left(\sigma_i (t)|\mu_i\right) = p \left(\sigma_i (t), \mu_i\right)/p (\mu_i) = g(t) \end{equation} is close to unity. Here $\sigma_i (t)$ is a ``proposition'' about the state of the system at a time $t$. One example of such a formal equivalent of a proposition would be a projection operator onto a subspace of a Hilbert space. When $\sigma_i (t)$ is taken to be a pure state $\left|s_i (t)\right>$, and $t \ll t_p^{(i)}$ so that $g(t) \cong 1$, Eq.~(33) becomes in effect a formal statement of the ``collapse'' axiom of quantum measurement theory. For, memory will continue to rediscover the system in the same state upon repeated re-measurements. Again --- as in the preceding section --- relatively objective existence of {\it correlations\/} (established in contact with the environment, with their stability purchased at the expense of the limitation of the possible states of memory and the measured system) is decisive for the predictive utility of the records. These records must be repeatedly and reliably accessible for the other parts of memory to allow for information processing. This is why the record states which {\it exist\/} (at least in the relatively objective operational sense employed before) are essential for the reliability of memories inscribed in open systems. They can be re-measured by the other memory cells spreading the useful correlation throughout the information processing network of logical gates but suffer no ill effects in the process. The record state $\left| \mu_i \right> $ must then obviously be decoherence resistant, but the same should be true for the measured states $\left|s_i(0)\right>$ and (hopefully) for their near-future descendants $\left|s_i(t)\right>$. Only then will the correlation between memory and the state of the system be useful for the purpose of predictions. One can analyze this persistence of quasi-classical correlations from various points of view, including the algorithmic information content one. We shall mention this idea here only briefly as a more complete account is already available.$^{68}$ In essence, when $\left| s_i (t)\right>$ evolves predictably, a sequence of repeated measurements of the appropriate observables yields a composite record $R = \{\mu_{@t_1}^{(1)}, \mu_{@t_2}^{(2)},..., \mu_{@t_n}^{(n)}\}$, which will be all derivable from the initial $\mu_{@t_0}^{(0)}$ and from the algorithm for the evolution of the monitored system. This predictability could be expressed from the viewpoint of the observer by comparing the algorithmic information content of $R$ with its size in bits. When the whole $R$ can be computed from the initial condition, the algorithmic information content $K(R)$ (defined as the size of the shortest program for a universal computer with the output $R$$^{69}$) is much less than the size of the ``uncompressed'' $R$ in bits. An illustrative (if boring) example of this would be a sequence of records of a fixed state of an object such as a stone. Then $R$ would simply be the same record, repeated over and over. This is of course trivially algorithmically compressible. This immobility of objects such as stones is the basic property which, when reflected in the perfectly predictable sequence of records, provides a defining example of (the most basic type of) perception of existence, of permanence which defines ``classical reality''. In this case, the same set of observables ``watched'' by the environment is also being watched by the observer. In general, the state of a system evolving in contact with the environment will become less and less closely correlated with its initial state. Therefore, entropy will increase, and the reliability of the implication measured by the conditional probability $p(\sigma_i (t)|\mu_i)$ will decrease. Alternatively, one may want to retain the safety of predictions (i.e., have $p(\sigma_i(t)|\mu_i)$ close to unity at the price of decreased accuracy). This could be accomplished by choosing a safer (but less precise) prediction $\tilde \sigma_i(t)$ which includes $\sigma_i (t)$ with some ``error margin'' and thus supplies the requisite redundancy. For a judiciously selected initial measurement the conditional probability $p (\sigma_i (t)|\mu_i)$ will decrease relatively slowly (for example, on a dynamically determined Lyapunov timescale in the case of chaotic systems$^{68}$), while for measurements which result in the preparation of a ``wrong'' initial condition --- a state at odds with einselection --- the conditional probability would diminish suddenly, on a near-instantaneous decoherence timescale. Typically, the prediction $\sigma_i$ will not be a pure state but a suitably macroscopic patch in the phase space (and a corresponding ``chunk'' of the Hilbert space). Increase in the size of the patch will help extend the relative longevity of the predictive power of the records at the expense of the accuracy. Nevertheless, even in this case predictive power of the old records shall eventually be lost with time. The memory must be stored in robust record states, which will persist (ideally forever, or, at least, until they are deliberately erased). Deliberate erasure is an obvious strategy when the records outlive their usefulness. In this picture of a network of effectively classical memory elements interconnected with logical gates, stability of the records is essential. It is purchased at the price of ``censorship'' of the vast majority of all of the superpositions which are in principle available in the Hilbert space. It is in an obvious contrast with quantum computers,$^{30-35}$ where all of the superpositions are available for information processing. and where the states of memory are unstable and prone to the environment-induced decoherence and other errors. Let us now consider how such a quasi-classical, environmentally stable memory ``perceives'' the Universe. To avoid generalities, we consider --- from the point of view of this memory --- the task of determining what are the classical branches of the universal state vector. That is, we shall ask the observer to find out what are the pointer states in his branch of the Universe. In effect, we have already presented all of the elements necessary for the definition of the branches in the course of the discussion of existence in the preceding section. A branch is defined by its predictability --- by the fact that the correlations between its state and the records of the observer are stable. In other words, a branch is defined by the fact that it does not split into branches as rapidly as a randomly selected state. The observer is ``attached'' to the branch by the correlations between its state and the einselected states which characterize the branch. Indeed, the observer is a part of his branch. In the case of perfect predictability (no entropy production; initial records of the observer completely determine the future development of the branch), there would be no further branching. An observer will then be able --- on the basis of his perfect predictability --- to develop a view that the evolution of the Universe is completely deterministic and that his measurements either confirm perfect predictability (when carried out on the system he has already measured in the past) or that they help reveal the pre-existing state of affairs within ``his'' branch. This classically motivated and based on Newtonian intuitions ``single branch'' (single trajectory) limit of quantum physics is responsible for the illusion that we live in a completely classical Universe. It is an excellent approximation in the realm of the macroscopic, but it begins to fail as the resolution of the measurements increases. One example of failure is supplied by quantum measurements and, more generally, by the situations where the state of the macroscopic object is influenced by the state of a quantum object. Then the past initial condition is exhaustive --- the observer knows a pure state, i.e., all that is possible to know --- and yet this is demonstrably insufficient to let him determine the future development of his branch. For, when the past correlation is established in a basis incompatible either with the monitoring carried out by the environment, or when it prepares a state which is not an eigenstate of the new measurement, the new outcome cannot be inferred from the old initial condition. Relatively objective existence is at the core of the definition of branches. Stability of the correlations between the state of the observer and the branch is responsible for the perception of classicality. Stability of the record states of the observer is an obvious precondition. The observer with a given set of records is firmly attached to the branch which has induced these records --- he ``hangs on'' to the branch by the correlations. He may even be regarded as a part of that branch, since mutual correlations between systems define branches. Observers described here are quite different from the aloof observers of classical physics, which simply ``note'' outcomes of their measurements by adding to their abstract and immaterial repository of information. In a quantum universe {\it information is physical}$^{70}$ --- there is simply {\it no information without representation}.$^{63}$ In our context this implies that an observer who has recorded one of the potential outcomes is {\it physically distinct\/} from an observer who has detected an alternative outcome. Moreover, these two states of the observer are objectively different --- they can be ``revealed'' from the outside (i.e., by monitoring the environment in which record states are immersed) without disturbing the observer's records. The question ``why don't we perceive superpositions'' (which has been repeated also by some of those who investigate and support ideas of decoherence$^{71}$) has a straightforward answer. The very physical state of the observer and, thus, his {\it identity\/} is a reflection of the information he has acquired. Hence, the acquisition of information is not some abstract, physically insignificant act, but a cause of reshaping of the state of the observer. An exaggerated example is afforded by the famous case of Schr\"odinger's cat.$^{48}$ A cat that dies as the result of an amplified quantum event will certainly be easily distinguishable from the cat that lives on (and can continue observations). Similarly, an effectively classical computer playing the role of an observer will be measurably distinct depending on what outcome was recorded in its data bank. Coherent superpositions of two memory states will disappear on the decoherence timescale in the presence of the environment. Hence, a coherent superposition of two distinct identities of an observer does not exist in the relatively objective operational sense introduced previously. Even in the rare cases when a memory bit of an observer enters into a bona fide entanglement with an isolated quantum system, decoherence will intervene and turn that entanglement into an ordinary classical correlation in the basis defined by the einselected record states. The interpretation which recognizes that decoherence and environment-induced superselection allow for the {\it existence\/} of states at the expense of the superposition principle is known as the {\it existential interpretation}.$^{11,63}$ It accounts for the inability of the observers to ``perceive'' arbitrary superpositions. The (relatively) objective existence of the records is a precondition for their classical processing and, therefore, for perception.\footnote{It is amusing to speculate that a truly quantum observer (i.e., an observer processing quantum information in a quantum computer-like fashion) might be able to perceive superpositions of branches which are inaccessible to us, beings limited in our information processing strategies to the record states ``censored'' by einselection.} It is easy to design logical states which would distinguish between objectively existing records and accidental states. Redundancy is the key. Thus, when an entanglement between a (two-state) system and a memory cell develops, \begin{equation} \left|\phi_{{\cal S}{\mu}}\right> = \alpha \left|\uparrow\right> \left| 1\right> + \beta \left|\downarrow\right> \left| 0\right>\;, \end{equation} and, under the influence of the environmental decoherence, rapidly deteriorates to a classical correlation \begin{equation} \rho_{{\cal S}{\mu}} = \left|\alpha\right|^2 \left|\uparrow\right> \left< \uparrow\right| \left| \uparrow\right> \left< 1\right| + \left|\beta\right|^2 \left|\downarrow\right> \left<\downarrow\right| \left| 0 \right> \left< 0\right|\;, \end{equation} the reliability of the record state in an arbitrary basis can be in principle tested by the other parts of the memory. Repeated measurements of the same memory cell in different bases and comparing longevity of the state in the $\{\left|0\right>, \left| 1\right>\}$ basis with the (lack of) longevity of the state in the $\{\left|+\right>, \left|-\right>\}$ basis would do the trick. Thus, as a consequence of decoherence and einselection, \begin{eqnarray} \rho_{\cal S M} & = & \left|\alpha\right|^2 \left|\uparrow\right> \left<\uparrow\right| \left| 1\right> \left< 1\right| \left|1'\right> \left< 1'\right| \left| 1''\right> \left< 1''\right|\ldots \nonumber \\ &+ & \left|\beta\right|^2 \left|\downarrow\right> \left<\downarrow\right| \left| 0\right> \left< 0\right| \left|0'\right> \left< 0'\right| \left| 0''\right> \left< 0''\right|\ldots\;, \end{eqnarray} in the transparent notation, while in the case of measurements of $\{\left|+\right>, \left|-\right>\}$ record states there would be no correlation between the consecutive measurements carried out at intervals exceeding decoherence timescale. Instead of two branches of Eq.~(36), there would be $2^N$ branches, where $N$ is the number of two-state memory cells implicated. And a typical branch would be algorithmically random, easily betraying unreliability of the $\{\left|+\right>, \left|-\right>\}$ record states. This simplistic model has no pretense to realism. Rather, its aim is to demonstrate a strategy for testing what is reliably known by the observer. A slightly more realistic model would entail redundant records we have mentioned already, Eqs. (6)-(12). Thus, the initial correlations would involve several $(n)$ memory cells: \begin{eqnarray} \rho_{{\cal S}{\mu^n}} & = & \left|\alpha\right|^2 \left|\uparrow\right> \left<\uparrow\right| \left| 1\right> \left< 1\right|_1 \left|1\right> \left< 1\right|_2 \ldots \left| 1\right> \left< 1\right|_n \nonumber \\ &+ & \left|\beta\right|^2 \left|\downarrow\right> \left<\downarrow\right| \left| 0\right> \left< 0\right|_1 \left|0\right> \left< 0\right|_2 \ldots \left| 0\right> \left< 0\right|_n;. \end{eqnarray} Then the reliability of the records can be tested by looking for the basis in which all of the records are in accord. This can be accomplished without destruction of all of the original redundant correlation between some of the records and the system. These toy models establish that, in the presence of decoherence, it is possible to record and that it is possible to find out what information is reliable (which correlations are immune to decoherence). Again, we emphasize that the above toy models have no pretense to even a remote kinship with real-world observers.\footnote{Indeed, one could argue that if some unhappy evolutionary mutation resulted in creatures which were bred to waste their time constantly questioning the reliability of their records, they would have become nourishment for other, more self-assured creatures which did not need to pose and settle such philosophical questions before making a useful prediction.} \section{CONCLUSION} What we have described above is a fairly complete sketch of the physics involved in the transition from quantum to classical. Whether one would now claim that the emerging picture fits better Bohr's ``Copenhagen'' framework or Everett's ``Many Worlds'' interpretation seems to be a semantic rather than a substantial issue. To begin with, decoherence was {\it not\/} a part of either of these interpretations. Thus, what we have presented here is clearly beyond either CI or MWI. The existential interpretation owes Bohr the central question which was always implicit in the early discussions. This question --- about the location of the quantum-classical border --- is really very similar to questions about ``existence.'' We have posed and settled these questions operationally and, thus, provided a quantum justification for some of the original CI program. On the other hand, we owe Everett the observation that quantum theory should be the key tool in the search for its interpretation. The question and concern may be traced to Bohr, but the language of branches and the absence of explicit collapses and {\it ab~initio\/} classicality are very much in tune with MWI. We believe that the point of view based on decoherence settles many of the questions which were left open by MWI and CI\null. This includes the origin of probabilities as well as the emergence of ``objective existence'', although more needs to be done. In particular, one issue which has been often taken for granted is looming big, as a foundation of the whole decoherence program. It is the question of what are the ``systems'' which play such a crucial role in all the discussions of the emergent classicality. This issue was raised earlier,$^{2,28}$ but the progress to date has been slow at best. Moreover, replacing ``systems'' with, say, ``coarse grainings'' does not seem to help at all --- we have at least tangible evidence of the objectivity of the existence of systems, while coarse-grainings are completely ``in the eye of the observer.'' It should be emphasized that reliance on systems does not undermine the progress achieved to date in the study of the role of decoherence and einselection. As noted before,$^{11}$ the problem of measurement cannot be even stated without a recognition of the existence of systems. Therefore, our appeal to the same assumption for its resolution is no sin. However, a compelling explanation of what are the systems --- how to define them given, say, the overall Hamiltonian in some suitably large Hilbert space --- would be undoubtedly most useful. I would like to thank Chris Jarzynski, Michael Nielsen, and Chris Zalka for comments on the manuscript. \eject \begin{center} \mbox{\psfig{figure=f1a.eps,height=0.75in,width=2.0in}} \begin{center} \mbox{\psfig{figure=f1b.eps,height=1.5in,width=3.2in}} \end{center} \mbox{\psfig{figure=f1c.eps,height=2.00in,width=4.0in}} \end{center} \noindent Fig. 1. Information transfer in measurements and in decoherence. a) Controlled not ({\tt c-not}) as an elementary bit-by-bit measurement. Its action is described by the ``truth table'' according to which the state of the target bit (apparatus memory in the quantum measurement vocabulary) is ``flipped'' when the control bit (measured system) is $|1>$ and untouched when it is $|0>$ (Eq. (2)). This can be accomplished by the unitary Schr\"odinger evolution (see, Refs. 1, 28, or 31 for the information theoretic discussion). b) Decoherence process ``caricatured'' by means of {\tt c-not}s. Pointer state of the apparatus (and, formerly, target bit in the pre-measurement, Fig. 1a) now acts as a control in the continuous monitoring by the {\tt c-not}s of the environment. This continuous monitoring process is symbolically ``discretized'' here into a sequence of {\tt c-not}s, with the state of the environment assuming the role of the multi-bit target. Monitored observable of the apparatus -- its pointer observable -- is in the end no longer entangled with the system, but the classical correlation remains. Decoherence is associated with the transfer of information about the to-be-classical observables to the environment. Classically, such information transfer is of no consequence. In quantum physics it is, however, absolutely crucial, as it is responsible for the effective classicality of certain quantum observables, and for the relatively objective existence of preferred pointer states. c) Noise is a process in which a pointer observable of the apparatus is perturbed by the environment. Noise differs from the purely quantum decoherence -- now the environment acts as a control, and the {\tt c-not}s which represent it carry information in the direction opposite to the decoherence {\tt c-not}s. Usually, both decoherence and noise are present. Preferred pointer observables and the associated pointer states are selected so that the noise is minimized. \end{document}
\begin{document} \title{ Steerability detection of arbitrary 2-qubit state via machine learning} \author{Changliang Ren}\thanks{These authors contributed equally.} \affiliation{Center for Nanofabrication and System Integration, Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, Chongqing 400714, People's Republic of China}\email{ [email protected]}\affiliation{CAS Key Laboratory of Quantum Information, University of Science and Technology of China, Hefei 230026, PR China} \author{Changbo Chen}\thanks{These authors contributed equally.} \affiliation{Chongqing Key Laboratory of Automated Reasoning and Cognition, Chongqing Institute of Green and Intelligent Technology, Chinese Academy of Sciences, Chongqing 400714, People's Republic of China}\email{[email protected]} \date{\today} \begin{abstract} Quantum steering is an important nonclassical resource for quantum information processing. However, even lots of steering criteria exist, it is still very difficult to efficiently determine whether an arbitrary two-qubit state shared by Alice and Bob is steerable or not, because the optimal measurement directions of Alice are unknown. In this work, we provide an efficient quantum steering detection scheme for arbitrary 2-qubit states with the help of machine learning, where Alice and Bob only need to measure in a few fixed measurement directions. In order to prove the validity of this method, we firstly realize a high performance quantum steering classifier with the whole information. Furthermore, a high performance quantum steering classifier with partial information is realized, where Alice and Bob only need to measure in three fixed measurement directions. Our method outperforms the existing methods in generic cases in terms of both speed and accuracy, opening up the avenues to explore quantum steering via the machine learning approach. \end{abstract} \maketitle \section{I. Introduction} In the great debate of quantum mechanics in 1930s, Einstein, Podolsky, and Rosen (EPR) \cite{Einstein} challenged the completeness of quantum mechanics (QM) based on local realism usually called EPR paradox. It points out a way to deeply investigate the difference or conflict between classical theory and quantum theory. Especially, three types of quantum correlations originated from EPR paradox: quantum entanglement \cite{Horodecki}, Bell nonlocality \cite{Brunner}, and EPR steering \cite{Cavalcanti}, have been put forward. Within the hierarchy of nonlocalities, the set of EPR steerable states is a subset of entangled states and a superset of Bell nonlocal states. Quantum entanglement and Bell nonlocality have attained flourishing developments since 1964. However, a rigorous formulation of the concept of EPR steering was not elaborately interpreted until 2007 \cite{Wiseman}. Recently, it has gained increasing interest in quantum optics and quantum information communities \cite{Wiseman,Jones,Skrzypczyk}. For instance, EPR steering can provide security in one-sided device-independent quantum key distribution (1SDI-QKD) \cite{Branciard,Gehring,Walk} and play an operative role in channel discrimination \cite{Piani} and teleamplification \cite{He}. Naturally, detection and characterization of steering have attracted increasing attention \cite{Wiseman,Reid,Reid1,Jones,Piani,Cavalcanti1,Cavalcanti2,Walborn,Schneeloch,Pusey,Pramanik,Kogias,Skrzypczyk,Kogias1,Zhu,Nguyen}. Various steering criteria and inequalities have been derived, such as linear steering inequalities \cite{Cavalcanti1,Cavalcanti2,Pusey}, inequalities based on multiplicative variances \cite{Reid,Reid1,Cavalcanti1}, entropic uncertainty relations \cite{Walborn,Schneeloch}, fine-grained uncertainty relations \cite{Pramanik}, and hierarchy of steering criteria based on moments \cite{Kogias}. In particular, a necessary and sufficient condition for a two-qubit state to be steerable with respect to projective measurements is exhibited \cite{Nguyen}. Actually, these criteria can be computed through semidefinite programming \cite{Cavalcanti3}. For an arbitrary quantum state shared by Alice and Bob, to determine if Alice can steer Bob, those criteria boils down to finding optimal measurement directions of Alice, which is resource demanding as explained below. In real experimental test, if Alice and Bob share an unknown state, there are two ways to identify the steerability of this state. One is computing through the steering criteria after a complete quantum state tomograph measurement, the other is trying to directly observe the characterized phenomenon (such as the violation of the steering inequality), which can distinguish the steerability and non-steerability. Obviously, the former needs to measure the whole information of the state, while the latter has to try many times by choosing a large amount of measurement directions until the characterized phenomenon is observed. Hence, both of them need a lot of measurements and not efficient. It is even worse when there are a large amount of different states to be detected, which is typical if one wants to detect steerability of a sequence of distinct rapidly generated states. Thus it remains challenging to develop an efficient approach to detect steerability for experimental test. Recently, the successful applications of machine learning approach on entanglement \cite{Lu,Ma} and nonlocality discriminants \cite{Deng} shed a new light on this problem. Machine learning possesses the capability to instantly make predictions on new data with reasonable accuracy after learning from large amount of existing data. In the past few decades, there has been a rapid growing interest not only in theoretical studies, but also in a variety of applications of machine learning. Interestingly, beside its extensive applications in industry, machine learning has also been employed to investigate physics-related problems in recent years. Nowadays, many quantum implementations of machine learning have been introduced to achieve better performance for quantum information processing \cite{Deng,Lu,Ma,Assion,Sasaki,Bisio,Hentschel,Bang,Wiebe,Krenn,Schoenholz,Zhang}, such as the hamiltonian learning \cite{Wiebe}, automated quantum experiments search \cite{Krenn}, phase transition identification \cite{Schoenholz}, identification of topological phase of matter \cite{Zhang}, entanglement classification \cite{Deng,Lu,Ma}, just to name a few. Certainly, these works motivate us to adopt machine learning as an alternative approach for investigations of various quantum tasks. Different from the previous researches, in this paper, we employ the machine learning techniques to tackle the bipartite steering detection problem by recasting it as a learning task. We build several new steerability classifiers underpinned by machine learning techniques. Firstly, an efficient steerability classifiers with the whole information demonstrated the validity of steering classification by machine learning. Secondly, and more importantly, we provide a quantum steering detection scheme for arbitrary two-qubit states with the help of machine learning, where Alice and Bob only need to measure in three fixed measurement directions. These efficient steerability classifiers, which work for arbitrary 2-qubit states, are exhibited and fully analyzed. Either for arbitrary 2-qubit state or special states, they can perform better than the traditional semidefinite programming (SDP). More importantly, comparing with the classical method, this approach is much less resource demanding and can quickly determine whether a state is steerable with a well-trained classifier. Hence, it provides a simpler and more efficient way to detect steerability, which sheds new light on classification of quantum steering with limited resources, and represents a step towards large-scale machine-learning-based applications in quantum information processing. \section{II. Quantum steering} We start by defining the scenario in which quantum steering is discussed. For the sake of convenience, let us only consider the simplest case --- two qubit system. Consider a bipartite situation composed by Alice and Bob sharing an arbitrary quantum state $\rho$. Suppose Alice performs measurement $\hat{A}$ with outcome $a$ and Bob performs measurement $\hat{B}$ with outcome $b$. These outcomes are thus in general governed by a joint probability distribution $P(a,b\mid\hat{A},\hat{B},\rho)$. Such joint probability distribution predicted by quantum theory is defined by \begin{eqnarray}\label{QM} P(a,b\mid\hat{A},\hat{B},\rho)=\mathrm{Tr}(\hat{M}^{a}_{A}\otimes \hat{M}^{b}_{B}\rho), \end{eqnarray} where $\hat{M}^{a}_{A}$ and $\hat{M}^{b}_{B}$ are the projective operators for Alice and Bob respectively. It is well-known that, Wiseman, Jones and Doherty formally defined quantum steering as the possibility of remotely generating ensembles that could not be produced by a local hidden state (LHS) model. The mathematic formulation of the LHS model adds an extra requirement on Bob's probabilities, which can be expressed as \begin{equation}\label{LHS} \begin{array}{rcl} P(a,b\mid\hat{A},\hat{B},\rho)&=&\sum_{\lambda}P(\lambda)P(a\mid\hat{A},\lambda)P_{Q}(b\mid\hat{B},\lambda)\quad\\ P_{Q}(b\mid\hat{B},\lambda)&=&\mathrm{Tr}(\rho_{\lambda}\hat{M}^{b}_{B}), \end{array} \end{equation} where $\rho_{\lambda}$ is a qubit specified by $\lambda$. If the joint probability can be decomposed in the form of Eq. (\ref{LHS}), then we say that Alice can not steer Bob's state. Otherwise $P(a,b\mid\hat{A},\hat{B},\rho)$ shows quantum steering correlation (in the sense that Alice steers Bob). The steering scenario consists of the situation where no characterisation of Alice's measurements is assumed, while Bob has full control of his measurements and can thus access the unnormalised conditional states $\sigma_{a\mid A}$, where $\sigma_{a\mid A}=\mathrm{Tr}_A[(\hat{M}^{a}_{A}\otimes I) \rho]$. In other words, deciding whether an assemblage $\sigma_{a\mid A}$ demonstrates steering amounts to checking whether there exists a collection of quantum states $\rho_{\lambda}$ and probability distributions $P(\lambda)$ and $P(a\mid\hat{A},\lambda)$ such that (\ref{LHS}) holds. Obviously this is in principle a hard problem, since the variable $\lambda$ could assume infinitely many values. However, if the number of measurements and outputs is finite, this problem becomes much simpler, and it was shown in \cite{Cavalcanti} that the problem can be solved through semi definite programming (SDP) \cite{Vandenberghe}. Next, we briefly review this approach. Suppose that Alice performs $m$ measurements, labeled as $x=0,1......,m-1$. One can write a SDP that determines if Alice can steer Bob \cite{Cavalcanti}, \begin{equation} \label{eq1} \begin{array}{lll} &\mbox{given} & \{\sigma_{a|x}\}, \{D(a|x, \lambda')\}_{\lambda'}\\ &\underset{\{F_{a|x}\}}{\mbox{min}} & \mathrm{Tr}\sum_{ax} F_{a|x}\sigma_{a|x}\\ &\mbox{s.t.} &\sum_{ax}F_{a|x}D(a|x,\lambda')\geq 0\;\;\;\forall \lambda'\\ & &\mathrm{Tr}\sum_{ax,\lambda'} F_{a|x}D(a|x,\lambda')=1, \end{array} \end{equation} where $F_{a|x}$ are Hermitian matrices, $\lambda'$ is a map from $\{x\}$ to $\{a\}$ and $D(a|x,\lambda')=\delta_{a,\lambda'(x)}$, that is $D(a|x,\lambda')=1$ if $\lambda'(x)=a$ and $D(a|x,\lambda')=0$ if $\lambda'(x)\neq a$. If the objective value of~(\ref{eq1}) is negative for some measurements $x$, then $\rho$ is steerable from Alice to Bob. On the other hand, a non-negative value means that there exists an LHS model. \section{III. Quantum steering classifier with whole information} Theoretically, we can detect steering more and more precise with the increase of measurements by SDP \cite{Cavalcanti3}. However, there is yet a noticeable drawback of the above SDP approach from the perspective of the tradeoff between the accuracy and time consumption. Boosting the accuracy means adding additional extreme points to enlarge the convex hull, which requires more time to determine if a point is inside the enlarged convex hull or not. To overcome this, we combined SDP with supervised learning, as machine learning has the power to speed up such computations. Naturally, the bipartite steering detection problem can be formulated as a supervised binary classification task. Here, the datasets are generated by adopting the following procedure: \begin{itemize} \item First generating two random $4\times 4$ matrices $M$ and $N$, which are used to generate a Hermitian matrix $H := (M+iN)(M+iN)^{\dag}$, where $\dag$ means taking the conjugate transpose, and a density matrix $\rho_{AB} := H/{\rm Tr}(H)$. \item Since $\rho_{AB}$ is a density matrix of $4\times 4$, it is enough to use the first $3$ elements on the diagonal and the real and imaginary parts of $6$ elements below the diagonal of the matrix to form the vector of features, which is a real vector of $15$ numbers in the interval $(-1, 1)$, denoted by $F_1$. \item For a given density matrix $\rho_{AB}$, we run SDP Program~(\ref{eq1}) $100$ times with different values of measurements. If the objective value is negative, we assign a label $-1$; otherwise we assign a label $+1$, which means that we do not know if Alice can steer Bob. \end{itemize} For each $m=2,\ldots,8$, we generate the corresponding dataset until at least $5000$ samples with label $+1$ and $5000$ samples with label $-1$ are obtained. Generating the datasets for all different settings $m=2,\ldots,8$ take several months on a workstation. Here, we should emphasize that the collected states for different settings $m$ are totally random and independent. Finally, we collected over $70000$ samples in total \cite{Chen}. Actually, it becomes harder and harder to obtain a dataset with the increase of amount of measurements. For example, when $m=8$, it spent about $63$ days to collect the dataset. For each $m=2,\ldots,8$, the last $1000$ positive samples and the last $1000$ negative samples are reserved for test. The rest $4000$ positive samples and $4000$ negative samples are kept as the training set to learn a classifier. We employ a $4$-fold cross-validation technique and a grid search approach for selecting best hyperparameters. The machine learning method we use is support vector machine (SVM). SVM is a supervised learning model used for classification and regression analysis, which requires solving the following optimization problem: \begin{equation} \label{svm} \begin{array}{lll} &\mbox{given} & ({\bf x}_i, y_i), i=1,\ldots,\ell \\ &\underset{{\bf w}, b, \mathbf{\xi}}{\mbox{min}} & \frac{1}{2}{\bf w}^T{\bf w}+C\sum_{i=1}^{\ell}\xi_i\\ &\mbox{s.t.} &y_i({\bf w}^T\phi({\bf x}_i)+b)\geq 1-\xi_i\\ & &{\xi}_i\geq 0. \end{array} \end{equation} Here $\ell$ is the number of samples, $y_i$ and ${\bf x}_i$ are respectively the label and the vector of features of sample $i$, $\phi$ is a mapping implictly defined by a kernel function and we choose the radial basis function (RBF) kernel $K(\phi({\bf x}_i)^T, \phi({\bf x}_j))={\rm exp}(-\gamma\lVert{{\bf x}_i-{\bf x}_j}\rVert^2)$ with the parameters $C$ and $\gamma$ to be determined by a grid search approach when training the model. In the rest of this section, the models are trained with feature vectors of type $F_1$, which encodes the full information of a two-qubit quantum state. After the SVM model is trained, we test the performance by creating a new set of quantum ensemble that is distinct from the data set employed for training. The classification accuracy of the learning model for each $m$ is illustrated in Fig.~\ref{fig:accuracy}. All the accuracies for training and cross validation are higher than $0.95$, which clearly shows that the models are well-trained. \begin{figure} \caption{\label{fig:accuracy} \label{fig:accuracy} \end{figure} It is reasonable to predict that, if these steerability classifiers are well-trained, the classifiers should turn more precise with the increase of $m$. To show such validity, we use classifiers learned for different $m$ to test against the random data for $m=8$. As illustrated in Fig.~\ref{fig:test8}, the blue-circle line is for $F_1$ features, it is shown that, the error drops very rapidly (except $m=6$, which may come from the imperfection in the learning process). However, the variation tendency is identical to the theoretical prediction in general (the more measurement settings, the more precise the prediction) which further demonstrates that these are well-trained classifiers of quantum steering. \begin{figure} \caption{\label{fig:test8} \label{fig:test8} \end{figure} To demonstrate the generalization ability of the steering classifiers, we study their ability in clarifying a special state which has unambiguous bound for steerability. The state can be written as, \begin{eqnarray}\label{target state} \rho_{\mathrm{W}} &=& p\mid\psi\rangle\langle\psi\mid + (1-p)\rho_{\mathrm{A}} \otimes I/2 \end{eqnarray} where $\mid\psi\rangle =\cos \xi \mid 00\rangle + \sin \xi \mid 11\rangle$, $\rho_{\mathrm{A}}=\mathrm{Tr_B}(\mid\psi\rangle\langle\psi\mid )$. This state is a two-qubit one-way steerable state, which was exhibited by \emph{Bowles et.al} in \cite{Bowles} recently. The state reduces to the Werner state when $\xi=\frac{\pi}{4}$. In simplicity, this state can be called ``generalized Werner state". Different from entanglement and nonlocality, each qubit in Alice and Bob plays different role in the steering scenario. There exists one-way quantum steering. That is, special entangled states such that steering can occur from Alice to Bob, but not from Bob to Alice. One-way steering states attracted more attention due to their special characterization. For the state in Eq. (\ref{target state}), which is unsteerable from Alice to Bob \cite{Bowles}, if \begin{eqnarray}\label{one-way} \cos^{2}2\xi\geq\frac{2p-1}{(2-p)p^3} \end{eqnarray} which has also been experimentally demonstrated very recently in \cite{Xiao}. Obviously, the bound of the parameter $p$ that Alice can steer Bob's state is determined by Eq. (\ref{one-way}). Here we apply our classifiers to predict the steerability of such states. The test states are constructed according to the uniform distribution of $p$ and $\xi$. For each $\xi=\{\frac{\pi}{4},\frac{\pi}{6},\frac{\pi}{8},\frac{\pi}{12}\}$, we generate $10000$ test samples. We predict the steerability bounds using both learned classifiers and SDP. \begin{figure} \caption{\label{fig:angle} \label{fig:angle} \end{figure} As illustrated in Fig.~\ref{fig:angle}, four subfigs in this picture correspond to $\xi=\{\frac{\pi}{4},\frac{\pi}{6},\frac{\pi}{8},\frac{\pi}{12}\}$ respectively. In each subfig, the blue line is the result predicted by the learned classifiers for $m=2,...,8$ respectively. Similarly, the red line is the result predicted by SDP with $m=2,...,8$. The orange line is the steerability bound from Alice to Bob which is defined by Eq. (\ref{one-way}). Obviously, the learning classifiers perform better than the traditional SDP. Especially, when $\xi=\frac{\pi}{4}$, Werner state, the learning classifiers demonstrate the best performance. In Fig.~\ref{fig:accuracy}, the third green column depicts the classification accuracy on the Werner state. It is interesting to notice that for $m>4$, the classification accuracy on Werner state is even higher than on random data, despite the fact that the model is trained with random data. As the decrease of $\xi$, the prediction errors of both the learning classifiers and SDP increase. It is a reasonable phenomenon since the predictions for the marginal states become harder and harder. In Fig.~\ref{fig:error-Fs}, the first subfig for $F_1$ features shows the classification error for generalized Werner States (with different angles). Obviously, the error increases when the angle drops which coincides with the above analysis. Another interesting phenomenon is, even though the learned classifiers can be more effective than SDP, it is still possible that they may predict the value of $p$ lower than the steerability bound, which almost never happens for SDP. The reason is that, one typical character for machine learning classifiers is that they can predict the positive to be negative and vice versa. However, the main error for SDP occurs when it predicts the negative to be positive. Hence, this phenomenon can be used to distinguish which method is used. \begin{figure} \caption{\label{fig:error-Fs} \label{fig:error-Fs} \end{figure} The above results clearly demonstrate the validity of steerability detection by machine learning. Even the whole information is still needed by this scheme, same as the traditional SDP method, the machine learning method is much more efficient than SDP in data processing. Take $m=8$ for example, the learned classifier spends about $10^{-2}\mathrm{s}$ to predict an unknown state while it takes about $10^2\mathrm{s}$ for the SDP with $m=8$. Maybe it is unfair to exhibit the time advantage in testing only one state, after all the time cost of the machine learning classifier should contain both the training time and the prediction time. However, when the task is to predict a large number of unknown states, the time advantage of machine learning classifier is obvious. \section{IV. Quantum steering classifiers with partial information} Although the above steering classifier via machine learning boosts the performance of the state classification compared with traditional SDP method, it still has a disadvantage that such classifier needs the whole information of the state as the input feature. However, the size of a quantum state grows exponentially when scaled up, which makes large-scale quantum state tomography intractable to carry out. Hence, it will become more and more difficult to extend the method to higher dimensions. Hence, it is important to further explore the possibility of learning with only partial information of the quantum state. Here we will introduce an efficient quantum steering detection scheme for arbitrary two-qubit states with the help of machine learning, where Alice and Bob only need to measure in a few fixed measurement directions. Steerability is unaffected by local unitaries for Alice and ``local filters"/''stochastic local operations" for Bob. Hence the relevant information for steerability could be encoded in a smaller feature vector. Actually, an arbitrary two-qubit state can be expressed in the local Pauli basis {\small \begin{eqnarray}\label{state} \rho= \frac{1}{4} \biggr(I + \sum_{i=1}^{3}r_{i}\sigma_{i} \otimes I + \sum_{j=1}^{3} s_{j}I\otimes \sigma_j + \sum_{k,l=1}^3 \tau_{kl} \sigma_k \otimes \sigma_l \biggr). \end{eqnarray} } Steerability is determined by all the parameters $r_{i}, s_{j},\tau_{kl}$. It is intuitively believed that steerability is dominated by the correlation terms $\tau_{kl}$ between the two qubits from Eq. (\ref{state}). Hence, it is natural to extract the coefficients of the correlation terms, $ \{\tau_{kl}\}$, as features. More precisely, the partial information is extracted by computing $\mathrm{Tr}[(\sigma_k\otimes \sigma_l)\rho]$ as features, denoted by $F_2$. We repeat the same training and test process as for $F_1$. The classification accuracy of the learned models for each $m$ is illustrated in Fig.~\ref{fig:accuracy-F2}. It is interesting to see that, even the classification accuracy on random quantum state is apparently high, the accuracy for classifying Werner states is rather low. Similarly, the second subfigure of Fig.~\ref{fig:error-Fs} shows that the classification errors for generalized Werner states are high. Thus the models trained with features $F_2$ have poor generalization ability. As a result, these classifiers have poor performance compared with traditional SDP method, as illustrated by Fig.~\ref{fig:errorBound-F2}. Therefore, exploring high performance classifier with partial information is not trivial, and such a simple and crude way is impracticable. \begin{figure} \caption{\label{fig:accuracy-F2} \label{fig:accuracy-F2} \end{figure} \begin{figure} \caption{\label{fig:errorBound-F2} \label{fig:errorBound-F2} \end{figure} To further explore high performance classifier with partial information, we convert the state $\rho$ into a canonical form $\rho_{0}$ by local unitaries, which preserves the steerability of $\rho$. As proved in \cite{Bowles}, the map given by, \begin{eqnarray}\label{map} \rho_{0}=(\mathrm{I}\otimes\rho_{B}^{1/2}) \rho (\mathrm{I}\otimes\rho_{B}^{1/2}) \end{eqnarray} where $\rho_{B}=\mathrm{Tr}_{A}[\rho]$, preserves the steerability of $\rho$. The interesting property of this map is that when applied to an arbitrary state $\rho$, it can be realized by only local operation on Bob. Similarly, we can extract the coefficients of the correlation terms of the resulting state $\rho_{0}$, $ \tau_{kl}$, to combine a real vector of $9$ numbers as features. More precisely, we compute $\mathrm{Tr}[(\sigma_k\otimes \sigma_l)\rho_{0}]$ as features, denoted by $F_3$. The classification accuracy of learned models for each $m$ is illustrated in Fig.~\ref{fig:accuracy-F3}. In general, all the accuracies turn to higher and higher ($>0.95$) with the increase of $m$, which clearly shows that we get several well-trained learning machines. \begin{figure} \caption{\label{fig:accuracy-F3} \label{fig:accuracy-F3} \end{figure} Moreover, we observe the following similar phenomenon as using the full information features. As illustrated in Fig.~\ref{fig:test8}, the green-star line is for $F_3$ features, it is shown that, the error drops very rapidly. Hence, the variation tendency is identical to the theoretical prediction in general (the more measurement settings, the more precise the prediction). Fig.~\ref{fig:errorBound-F3} illustrates the steerability bounds predicted by machine learning classifiers and SDP with the angle $\xi=\{\frac{\pi}{4},\frac{\pi}{6},\frac{\pi}{8},\frac{\pi}{12}\}$ respectively. Obviously, the learned classifiers outperforms the traditional SDP except for the states when $\xi=\frac{\pi}{12}$, which is near the boundary of steerability. As the decrease of $\xi$, the prediction errors of both the learning classifiers and SDP increase. As we mentioned for features $F_1$, it is a reasonable phenomenon since the predictions for the marginal states become harder and harder. In Fig.~\ref{fig:error-Fs}, the third subfig shows the classification error for generalized Werner States (with different angles) for $F_3$ features. Obviously, the error increases when the angle drops which coincides with the above analysis. Generally, this classifier has better performance compared with traditional SDP method. \begin{figure} \caption{\label{fig:errorBound-F3} \label{fig:errorBound-F3} \end{figure} Note that in this scheme, for any unknown state, Alice and Bob only need to measure in three fixed measurement directions. Therefore, it is more efficient than SDP in both physical measurement process and data processing. In particular, it should be very efficient for testing a large amount of arbitrary states in quantum information process, such as one-sided device-independent quantum key distribution (1SDI-QKD), channel discrimination and teleamplification, \emph{etc}. To explore the performance of machine learning based quantum steering using even less information, according to the symmetry, we dropped the coefficients of the correlation terms, $\sigma_y\otimes \sigma_x,\sigma_z\otimes \sigma_x,\sigma_z\otimes \sigma_y$ from $F_3$ and named the rest features as $F_4$. The training process was carried out as before. As illustrated in Fig.~\ref{fig:accuracy-F4}, the classification accuracy of such classifiers on random states is acceptable but lower than with $F_3$ features. Interestingly, it performs better on Werner states as illustrated in Fig.~\ref{fig:errorBound-F4} except for $m=8$. The fact that the accuracy drops for $m=8$ may be caused by overfitting, as shown in Fig.~\ref{fig:accuracy-F4}. Similarly, the predictions for the marginal states becomes harder and harder. Even such classifiers with $F_4$ features perform worse than those with $F_3$ features, they are still more effective than those with $F_2$ features. Hence, correctly extracting partial information is very important for realizing high performance steering classifiers via machine learning. \begin{figure} \caption{\label{fig:accuracy-F4} \label{fig:accuracy-F4} \end{figure} \begin{figure} \caption{\label{fig:errorBound-F4} \label{fig:errorBound-F4} \end{figure} \section{V. conclusion} In this work, we have applied a method of machine learning to solve problems of quantum state classification in quantum information science. Several reliable enhanced steerability classifiers by combining supervised learning and the SDP method are achieved. At first, we build a high performance quantum steering classifier with the whole information, which are used to test some random unknown states and the generalized Werner states. The prediction performance of such learning classifier and SDP are completely analyzed and discussed. It clearly demonstrates the validity and efficiency of steering classification by machine learning. Secondly, we investigate the possibility of constructing steering classifiers with partial information. It is shown that, correctly extracting partial information is very important for realizing high quality steering classifiers via machine learning. Finally, an efficient quantum steering detection scheme for arbitrary two-qubit states via machine learning is realized, where Alice and Bob only need to measure in three fixed measurement directions. It should be very efficient for testing the steerability of a large amount of arbitrary states in quantum information process, such as one-sided device-independent quantum key distribution (1SDI-QKD), channel discrimination and teleamplification, \emph{etc}. \emph{Acknowledgement.-}C.L.R. is supported by National key research and development program (No. 2017YFA0305200), the Youth Innovation Promotion Association (CAS) (No. 2015317), the National Natural Science Foundation of China (No. 11605205), the Natural Science Foundation of Chongqing (No. cstc2015jcyjA00021, cstc2018jcyjAX0656), the Entrepreneurship and Innovation Support Program for Chongqing Overseas Returnees (No.cx017134), the fund of CAS Key Laboratory of Microscale Magnetic Resonance, and the fund of CAS Key Laboratory of Quantum Information. And C.C. is supported by the National Natural Science Foundation of China (No. 11771421, 11471307, 61572024, 11671377), cstc2018jcyj-yszxX0002 of Chongqing, and the Key Research Program of Frontier Sciences of CAS (QYZDB-SSW-SYS026). C. L. Ren and C. B. Chen contributed equally to this work. \begin{appendices} \section{Appendix: The vector of features} For an arbitrary quantum state $\rho$, the four different features used in this work is summarized as below: \begin{table}[htbp] \begin{center} \begin{tabular}{|c|c|} \hline \hline \small{$\mathrm{F}_1$} & \small{$\rho_{ii}, i\in\{1,2,3\}$, the real and imaginary part of $\rho_{ij}, j>i$} \\ \hline \small{$\mathrm{F}_2$} &\small{$\mathrm{Tr}[(\sigma_k\otimes \sigma_l)\rho]$, $\{k,l\}\in\{x,y,z\}$}\\ \hline \small{$\mathrm{F}_3$} &\small{$\rho\rightarrow\rho_0$, $\mathrm{Tr}[(\sigma_k\otimes \sigma_l)\rho_0]$, $\{k,l\}\in\{x,y,z\}$} \\ \hline \small{$\mathrm{F}_4$} &\small{$\mathrm{F}_3$ except for the terms of $\{\sigma_y\otimes \sigma_x,\sigma_z\otimes \sigma_x,\sigma_z\otimes \sigma_y\}$} \\ \hline \hline \end{tabular} \end{center} \end{table} \end{appendices} \end{document}
\begin{document} \title{Stochastic Proximal Gradient Methods for Nonconvex Problems in Hilbert Spaces} \begin{abstract} For finite-dimensional problems, stochastic approximation methods have long been used to solve stochastic optimization problems. Their application to infinite-dimensional problems is less understood, particularly for nonconvex objectives. This paper presents convergence results for the stochastic proximal gradient method applied to Hilbert spaces, motivated by optimization problems with partial differential equation (PDE) constraints with random inputs and coefficients. We study stochastic algorithms for nonconvex and nonsmooth problems, where the nonsmooth part is convex and the nonconvex part is the expectation, which is assumed to have a Lipschitz continuous gradient. The optimization variable is an element of a Hilbert space. We show almost sure convergence of strong limit points of the random sequence generated by the algorithm to stationary points. We demonstrate the stochastic proximal gradient algorithm on a tracking-type functional with a $L^1$-penalty term constrained by a semilinear PDE and box constraints, where input terms and coefficients are subject to uncertainty. We verify conditions for ensuring convergence of the algorithm and show a simulation. \end{abstract} \section{Introduction} In this paper, we focus on stochastic approximation methods for solving a stochastic optimization problem on a Hilbert space $H$ of the form \begin{equation} \label{eq:ProblemFormulation-basic} \tag{P} \min_{u \in H} \{f(u) = j(u) + h(u) \}, \end{equation} where the expectation $j(u) = \mathbb{E} [J(u, \xi)]$ is generally nonconvex with a Lipschitz continuous gradient and $h$ is a proper, lower semicontinuous, and convex function that is generally nonsmooth. Our work is motivated by applications to PDE-constrained optimization under uncertainty, where a nonlinear PDE constraint can lead to an objective function that is nonconvex with respect to the Hilbert-valued variable. To handle the (potentially infinite-dimensional) expectation, algorithmic approaches for solving such problems involve either some discretization of the stochastic space or an ensemble-based approach with sampling or carefully chosen quadrature points. Stochastic discretization includes polynomial chaos and the stochastic Galerkin method; cf.~\cite{Keshavarzzadeh2017,Kunoth2016,Lee2013,Rosseel2012}. For ensemble-based methods, the simplest method is sample average approximation (SAA), where the original problem is replaced by a proxy problem with a fixed set of samples, which can then be solved using a deterministic solver. A number of standard improvements to Monte Carlo sampling have been applied to optimal control problems in, e.g.,~\cite{Ali2017,VanBarel2017}. Another ensemble-based approach is the stochastic collocation method, which has been used in optimal control problems in e.g.~\cite{Rosseel2012,Tiesler2012}. Sparse-tensor discretization has been used for optimal control problems in, for instance, \cite{Kouri2013,Kouri2014a}. The approach we use is an ensemble-based approach called stochastic approximation, which is fundamentally different in the sense that sampling takes place dynamically as part of the optimization procedure, leading to an algorithm with low complexity and computational effort when compared to other approaches. Stochastic approximation originated in a groundbreaking paper by \cite{Robbins1951}, where an iterative method to find the root of an unknown function using noisy estimates was proposed. The authors of \cite{Kiefer1952} used this idea to solve a regression problem using finite differences subject to noise. Algorithms of this kind, with bias in addition to stochastic noise, are sometimes called stochastic quasi-gradient methods; see, e.g., \cite{Ermoliev1969,Uryasev1992}.Basic versions of these algorithms rely on positive step sizes $t_n$ of the form $\sum_{n=1}^\infty t_n = \infty$ and $\sum_{n=1}^\infty t_n ^2 < \infty$. The (almost sure) asymptotic convergence of stochastic approximation algorithms for convex problems is classical in finite dimensions; we refer to the texts by \cite{Duflo2013,Kushner1978}. There have been a number of contributions with proofs of convergence of the stochastic gradient method for unconstrained nonconvex problems; see \cite{Bottou1998,Bottou2018,Shapiro1996,Wardi1989}. Fewer results exist for constrained and/or nonsmooth nonconvex problems. A randomized stochastic algorithm was proposed by \cite{Ghadimi2016}; this scheme involves running a stochastic approximation process and randomly choosing an iterate from the generated sequence. There have been some contributions involving constant step sizes with increasing sampling; see \cite{Lei2018,Reddi2016a}. Convergence of projection-type methods for nonconvex problems was shown in \cite{Kushner2003} and for prox-type methods by \cite{Davis2018}. As far as stochastic approximation on function spaces is concerned, many contributions were motivated by applications with nonparametric statistics. Perhaps the oldest example is from \cite{Venter1966}. Goldstein \cite{Goldstein1988} studied an infinite-dimensional version of the Kiefer--Wolfowitz procedure. A significant contribution for unconstrained problems was by \cite{Yin1990}. Projection-type methods were studied by \cite{Barty2007,Chen2002,Culioli1990,Nixdorf1984}. In this paper, we prove convergence results for nonconvex and nonsmooth problems in Hilbert spaces. We present convergence analysis that is based on the recent contributions in \cite{Davis2018,Lei2018}. Applications of the stochastic gradient method to PDE-constrained optimization have already been explored by \cite{Geiersbach2019,Martin2018}. In these works, however, convexity of the objective function is assumed, leaving the question of convergence in the more general case entirely open. We close that gap by making the following contributions: \begin{itemize} \item For an objective function that is the sum of a smooth, generally nonconvex expectation and a convex, nonsmooth term, we prove that strong accumulation points of iterates generated by the method are stationary points. \item We show that convergence holds even in the presence of systematic additive bias, which is relevant for the application in mind. \item We demonstrate the method on an application to PDE-constrained optimization under uncertainty and verify conditions for convergence. \end{itemize} The paper is organized as follows. In Sect.~\ref{sec:background}, notation and background is given. Convergence of two related algorithms is proven in Sect.~\ref{sec:convergence}. In Sect.~\ref{sec:numerical-experiments}, we introduce a problem in PDE-constrained optimization under uncertainty, where coefficients in the semilinear PDE constraint are subject to uncertainty. The problem is shown to satisfy conditions for convergence, and numerical experiments demonstrate the method. We finish the paper with closing remarks in Sect.~\ref{sec:conclusion}. \section{Notation and Background} \label{sec:background} We recall some notation and background from convex analysis and stochastic processes; see \cite{Bauschke2011,Clarke1990,Metivier2011,Pisier2016}. Let $H$ be a Hilbert space with the scalar product $\langle \cdot, \cdot \rangle$ and norm $\lVert \cdot \rVert$. The symbols $\rightarrow$ and $\rightharpoonup$ denote strong and weak convergence, respectively. The set of proper, convex, and lower semicontinuous functions $h:H \rightarrow (-\infty, \infty]$ is denoted by $\Gamma_0(H).$ Given a function $h \in \Gamma_0(H)$ and $t > 0$, the proximity operator $\textup{prox}_{th}:H \rightarrow H$ is given by \begin{equation*} \label{eq:prox-definition} \textup{prox}_{t h}(u) := \argmin_{v \in H} \left( h(v) + \frac{1}{2t} \lVert v-u\rVert^2\right). \end{equation*} We recall that for a proper function $h:H \rightarrow (-\infty, \infty]$, the subdifferential (in the sense of convex analysis) is the set-valued operator $$\partial h: H \rightrightarrows H: u \mapsto \{ v \in H: \langle y - u, v \rangle + h(u) \leq h(y) \quad \forall y \in H \}.$$ For any $h \in \Gamma_0(H)$, the subdifferential $\partial h$ is maximally monotone. The domain of $h$ is denoted by $\textup{d}om (h)$. The indicator function of a set $C$ is denoted by $\textup{d}elta_C$, where $\textup{d}elta_C(u) = 0$ if $u \in C$ and $\textup{d}elta_C(u) = \infty$ otherwise. The sum of two sets $A$ and $B$ with $\lambda \in \mathbb{R}$ is given by {$A+\lambda B:=\{ a+\lambda b: a \in A, b \in B\}.$} The distance of a point $u$ to a nonempty, closed set $A$ is denoted by $d(u,A):=\inf_{a \in A} \lVert u-a\rVert$ and the diameter of $A$ is denoted by the symbol $\text{diam}(A):=\sup_{u, v \in A} \lVert u - v \rVert.$ For a nonempty and convex set $C$, the normal cone $N_C(u)$ at $u \in C$ is defined by $$N_C(u):= \{ z \in H: \langle z, w-u \rangle \leq 0, \quad \forall w \in C\}.$$ We set $N_C(u) := \emptyset$ if $u \notin C$. We recall that $\partial \textup{d}elta_C(u) = N_C(u)$ for all $u \in C$. If $h_1,h_2 \in \Gamma_0(H)$ and $\textup{dom} (h_2) = H$, then $\partial [h_1(u) + h_2(u)] = \partial h_1(u) +\partial h_2(u)$. If $h$ is proper and $u \in \textup{dom}(h)$, then $\partial h(u)$ is closed and convex. We recall that the graph of $ \partial h$ for a function $h \in \Gamma_0(H)$, given by the set $\textup{gra}(\partial h) = \{ (u,\partial h(u)): u \in H\}$, is sequentially closed in the strong-to-weak topology, meaning that for $u_n \rightarrow u$, $\zeta_n \in \partial h(u_n)$, and $\zeta_n \rightharpoonup \zeta$, it follows that $\zeta \in \partial h(u)$. The normal cone $N_C(u)$ is strong-to-weak sequentially closed if $C$ is convex. Throughout, $(\Omega, \mathcal{F}, \mathbb{P})$ will denote a probability space, where $\Omega$ represents the sample space, $\mathcal{F} \subset 2^{\Omega}$ is the $\sigma$-algebra of events on the power set of $\Omega$, denoted by $2^{\Omega}$, and $\mathbb{P}\colon \Omega \rightarrow [0,1]$ is a probability measure. Given a random vector $\xi:\Omega \rightarrow \Xi \subset \mathbb{R}^m$, we write $\xi \in \Xi$ to denote a realization of the random vector. The operator $\mathbb{E}[\cdot]$ denotes the expectation with respect to this distribution; for a parametrized functional $J: H \times \Xi \rightarrow \mathbb{R}$, this is defined as the integral over all elements in $\Omega$, i.e., $$\mathbb{E}[J(u,\xi)] = \int_\Omega J(u,\xi(\omega)) \,\mathrm{d} \mathbb{P}(\omega).$$ A filtration is a sequence $\{ \mathcal{F}_n\}$ of sub-$\sigma$-algebras of $\mathcal{F}$ such that {$\mathcal{F}_1 \subset \mathcal{F}_2 \subset \cdots \subset \mathcal{F}.$} We define a discrete $H$-valued stochastic process as a collection of $H$-valued random variables indexed by $n$, in other words, the set $\{ \beta_n: \Omega \rightarrow H \, \vert \, n \in \mathbb{N}\}.$ The stochastic process is said to be adapted to a filtration $\{ \mathcal{F}_n \}$ if and only if $\beta_n$ is $\mathcal{F}_n$-measurable for all $n$. The natural filtration is the filtration generated by the sequence $\{\beta_n\}$ and is given by $\mathcal{F}_n = \sigma(\{\beta_1, \textup{d}ots ,\beta_n\})$.\footnote{The $\sigma$-algebra generated by a random variable $\beta:\Omega \rightarrow \mathbb{R}$ is given by $\sigma(\beta) = \{ \beta^{-1}(B): B \in \mathcal{B}\}$, where $\mathcal{B}$ is the Borel $\sigma$-algebra on $\mathbb{R}$. Analogously, the $\sigma$-algebra generated by the set of random variables $\{ \beta_1, \textup{d}ots, \beta_n\}$ is the smallest $\sigma$-algebra such that $\beta_i$ is measurable for all $i=1, \textup{d}ots, n$.} If for an event $F \in \mathcal{F}$ it holds that $\mathbb{P}(F) = 1$, or equivalently, $\mathbb{P}(\Omega\backslash F) = 0$, we say $F$ occurs almost surely (a.s.). Sometimes we also say that such an event occurs with probability one. A sequence of random variables $\{\beta_n\}$ is said to converge almost surely to a random variable $\beta$ if and only if $$\mathbb{P}\left(\left\lbrace \omega \in \Omega: \lim_{n \rightarrow \infty} \beta_n(\omega) = \beta(\omega) \right\rbrace\right) = 1.$$ For an integrable random variable $\beta:\Omega \rightarrow \mathbb{R}$, the conditional expectation is denoted by $\mathbb{E}[\beta | \mathcal{F}_n]$, which is itself a random variable that is $\mathcal{F}_n$-measurable and which satisfies $\int_A \mathbb{E}[\beta | \mathcal{F}_n](\omega) \,\mathrm{d} \mathbb{P}(\omega) = \int_A \beta(\omega) \,\mathrm{d} \mathbb{P}(\omega)$ for all $A \in \mathcal{F}_n$. Almost sure convergence of $H$-valued stochastic processes and conditional expectation are defined analogously. Given a random operator $F:X \times \Omega \rightarrow Y$, where $X$ and $Y$ are Banach spaces, we will sometimes use the notation $F_\omega:=F(\cdot, \omega):X \rightarrow Y$ for a fixed (but arbitrary) $\omega \in \Omega$. For a Banach space $(X,\lVert \cdot \rVert_X)$, the Bochner space $L^p(\Omega,X)$ is the set of all (equivalence classes of) strongly measurable functions $u:\Omega \rightarrow X$ having finite norm, where the norm is defined by $$\lVert u \rVert_{L^p(\Omega,X)}:= \begin{cases} (\int_\Omega \lVert u(\omega) \rVert_X^p \,\mathrm{d} \mathbb{P}(\omega))^{1/p}, \quad &p < \infty\\ \esssup_{\omega \in \Omega} \lVert u(\omega) \rVert_X, \quad &p=\infty \end{cases}. $$ A sequence $\{\beta_n\}$ in $L^1(\Omega, X)$ is called a martingale if a filtration $\{ \mathcal{F}_n\}$ exists such that $\beta_n$ is $\mathcal{F}_n$-measurable and $\mathbb{E}[\beta_{n+1}|\mathcal{F}_n] = \beta_{n}$ is satisfied for all $n$. For an open subset $U$ of a Banach space $X$ and a function $J_\omega:U \rightarrow \mathbb{R}$, we denote the G\^{a}teaux derivative at $u \in U$ in the direction $v \in X$ by $dJ_\omega(u; v).$ The Fr\'echet derivative at $u$ is denoted by $J_\omega':U \rightarrow \mathcal{L}(X,\mathbb{R})$, where $\mathcal{L}(X,\mathbb{R})$ is the set of bounded and linear operators mapping $X$ to $\mathbb{R}$. We recall this is none other than the dual space $X^*$ and we denote the dual pairing by $\langle \cdot, \cdot \rangle_{X^*,X}$. For an open subset $U$ of a Hilbert space $H$ and a Fr\'echet differentiable function $j:U \rightarrow \mathbb{R}$, the gradient $\nabla j:U \rightarrow H$ is the Riesz representation of $j':U \rightarrow H^*$, i.e.,~it satisfies $\langle \nabla j(u), v \rangle = \langle j'(u),v \rangle_{H^*,H}$ for all $u \in U$ and $v \in H.$ In Hilbert spaces, the Riesz representation relates elements of the dual space to the Hilbert space itself, allowing us to drop the dual pairing notation and use simply $\langle \cdot, \cdot \rangle$. The notation $C_L^{1,1}(U)$ is used to denote the set of continuously differentiable functions on $U \subset H$ with an $L$-Lipschitz gradient, meaning $\lVert \nabla j(u) - \nabla j(v) \rVert \leq L \lVert u - v \rVert$ is satisfied for all $u,v \in U.$ The following lemma gives a classical Taylor estimate for such functions. \begin{lemma}\label{lemma:lipschitzderivative-Hilbert} Suppose $j \in C_L^{1,1}(U)$, $U\subset H$ open and convex. Then for all $u, v \in U$, $$j(v) + \langle \nabla j(v), u-v\rangle - \frac{L}{2} \lVert u - v \rVert^2 \leq j(u) \leq j(v) + \langle \nabla j(v), u-v\rangle + \frac{L}{2} \lVert u-v\rVert^{2}.$$ \end{lemma} \section{Asymptotic Convergence Results} \label{sec:convergence} In this section, we show asymptotic convergence results for two variants of the stochastic proximal gradient method in Hilbert spaces for solving Problem \eqref{eq:ProblemFormulation-basic}. Let $G:H \times \Xi \rightarrow H$ be a parametrized operator (the \textit{stochastic gradient}) approximating (in a sense to be specified later) the gradient $\nabla j:H \rightarrow H$ and let $t_n$ be a positive step size. Both algorithms in this section will share the basic iterative form $$u_{n+1} := \textup{prox}_{t_n h}(u_n - t_n G(u_n,\xi_n)),$$ where $h$ is the nonsmooth term from Problem \eqref{eq:ProblemFormulation-basic}. The following assumptions will be in force in all sections. \begin{assumption} \label{asu1} Let $\{\mathcal{F}_n \}$ be a filtration and let $\{ u_n\}$ and $\{G(u_n,\xi_n)\}$ be sequences of iterates and stochastic gradients. We assume \\ \subasu \label{asu1i} The sequence $\{ u_n\}$ is a.s.~contained in a bounded set $V \subset H$ and $u_n$ is adapted to $\mathcal{F}_n$ for all $n$.\\ \subasu \label{asu1ii} On an open and convex set $U$ such that $V \subset U \subset H$, the expectation $j\in C_L^{1,1}(U)$ is bounded below.\\ \subasu \label{asu1iii} For all $n$, the $H$-valued random variable $r_n := \mathbb{E}[G(u_n,\xi_n) | \mathcal{F}_n] - \nabla j(u_n)$ is adapted to $ \mathcal{F}_n$ and for $K_n:=\esssup_{\omega \in \Omega} \lVert r_n(\omega)\rVert$, {$ \sum_{n=1}^\infty t_n K_n< \infty$} and $\sup_{n} K_n<\infty$ are satisfied. \\ \subasu \label{asu1iv} For all $n$, $\mathfrak{w}_n := G(u_n, \xi_n) - \mathbb{E}[G(u_n, \xi_n) | \mathcal{F}_n]$ is an $H$-valued random variable. \end{assumption} \begin{remark} \label{remark:general-assumptions} The assumption that the sequence $\{ u_n\}$ stays bounded with probability one is by no means automatically fulfilled, but can be verified or enforced in different ways. We refer to \cite[Section 5.2]{Bottou1998} and \cite[Section 6.1]{Davis2018} for conditions on the function, constraint set, and/or regularizers that ensure boundedness of iterates. The conditions in Assumption~\ref{asu1} allow for additive bias $r_n$ in the stochastic gradient in addition to zero-mean error $\mathfrak{w}_n$. The requirement that $u_n$ and $r_n$ are adapted to $\mathcal{F}_n$ is automatically fulfilled if $\{ \mathcal{F}_n\}$ is chosen to be the natural filtration generated by $\{\xi_1, \textup{d}ots, \xi_n \}$. Together, Assumption~\ref{asu1iii} and Assumption~\ref{asu1iv} imply $$G(u_n,\xi_n) = \nabla j(u_n) + r_n + \mathfrak{w}_n$$ and $\mathbb{E}[\mathfrak{w}_n | \mathcal{F}_n] = 0.$ Notice that a single realization $\xi_n \in \Xi$ can be replaced by $m_n$ independently drawn realizations $\xi_{n}^1, \textup{d}ots, \xi_{n}^{m_n} \in \Xi$ since \begin{equation*} \mathbb{E}[G(u_n,\xi_n)|\mathcal{F}_n] =\frac{1}{m_n} \mathbb{E} \left[\sum_{i=1}^{m_n} G(u_n,\xi_{n}^{i}) |\mathcal{F}_n \right]. \end{equation*} This set of $m_n$ samples is sometimes called a ``batch''; batches clearly reduce the variance of the stochastic gradient. \end{remark} The result in Sect.~\ref{subsection:SPGM-Variance-Reduced} shows asymptotic convergence of the proximal gradient method with constant step sizes and increasing sampling. In Sect.~\ref{subsection:ODE-proof}, we switch to the versatile ordinary differential equation (ODE) method to prove convergence of the stochastic proximal gradient method with decreasing step sizes. We emphasize that the convergence results generalize existing convergence theory from the finite-dimensional case. Our analysis includes convergence in possibly infinite-dimensional Hilbert spaces. Additionally, we allow for stochastic gradients subject to additive bias, which is not covered by existing results. This theory can be used to develop mesh refinement strategies in applications with PDEs \cite{Geiersbach2020b}. \subsection{Variance-Reduced Stochastic Proximal Gradient Method} \label{subsection:SPGM-Variance-Reduced} In this section, we show under what conditions the variance-reduced stochastic proximal gradient method converges to stationary points for Problem \eqref{eq:ProblemFormulation-basic}. With $\xi_n = (\xi_{n}^1,\textup{d}ots, \xi_{n}^{m_n}),$ the stochastic gradient is given by the average $$G(u_n,\xi_n) = \frac{\sum_{i=1}^{m_n}G(u_n,\xi_{n}^i)}{m_n}$$ over an \textit{increasing} number of samples $m_n$. The algorithm is presented below, which uses constant step sizes $t_n \equiv t$ depending on the Lipschitz constant $L$ from Assumption~\ref{asu1ii}. \begin{algorithm}[H] \begin{algorithmic}[0] \STATE \textbf{Initialization:} $u_1 \in H$, $0<t<\tfrac{1}{2L}$ \FOR{$n=1,2,\textup{d}ots$} \STATE Generate independent $\xi_{n}^1, \textup{d}ots, \xi_{n}^{m_n} \in \Xi$, independent of $\xi_{1}^1, \textup{d}ots, \xi_{n-1}^{m_{n-1}}$ \STATE $u_{n+1}:=\textup{prox}_{t h}\left( u_n - t \frac{\sum_{i=1}^{m_n}G(u_n,\xi_{n}^i)}{m_n}\right)$ \mathbb{E}NDFOR \end{algorithmic} \captionof{algorithm}{Variance-Reduced Stochastic Proximal Gradient Method} \label{alg:PSG_Hilbert_Nonconvex} \end{algorithm} \begin{remark} \label{remark:indicator-projection} If $h(u)=\textup{d}elta_C(u)$ and $\pi_C$ denotes the projection onto $C$, then the algorithm reduces to $u_{n+1}:=\pi_C\left( u_n - t \frac{\sum_{i=1}^{m_n}G(u_n,\xi_{n}^i)}{m_n}\right),$ i.e., the variance-reduced projected stochastic gradient method. \end{remark} In addition to Assumption~\ref{asu1}, the following assumptions will be in force in this section. \begin{assumption} \label{assumption:well-posedness-constrained} Let $\{ u_n\}$ and $\{G(u_n,\xi_n)\}$ be generated by Algorithm~\ref{alg:PSG_Hilbert_Nonconvex}. We assume\\ \setcounter{subassumption}{0} \subasu \label{asu3i} The function $h$ satisfies $h \in \Gamma_0(H)$.\\ \subasu \label{asu3ii} For all $n$, $$w_n : = \frac{\sum_{i=1}^{m_n}G(u_n,\xi_{n}^i)}{m_n} - \nabla j(u_n)$$ an $H$-valued random variable and there exists an $M \geq 0$ such that $\mathbb{E}[\lVert w_n\rVert^2 | \mathcal{F}_n] \leq \frac{M}{m_n}$ and $\sum_{n=1}^\infty \tfrac{1}{m_n} < \infty$. \end{assumption} \begin{remark} We use assumptions similar to those found in \cite{Lei2018}, but we do not require the effective domain of $h$ to be bounded; we instead use boundedness of the iterates by Assumption~\ref{asu1i}. Notice that $w_n = r_n + \mathfrak{w}_n$ from Assumption~\ref{asu1iv}, hence Assumption~\ref{asu3ii} also provides a condition on the rate at which $r_n$ and $\mathfrak{w}_n$ must decay. \end{remark} For the convergence result, we need the following lemma \cite{Robbins1971}. \begin{lemma}[Robbins--Siegmund] \label{lemma:Robbins-Siegmund-Chapter-3} Assume that $\{\mathcal{F}_n\}$ is a filtration and $v_n$, $a_n$, $b_n$, $c_n$ nonnegative random variables adapted to $\mathcal{F}_n.$ If \begin{equation*}\label{eq:Robbins-Siegmund} \mathbb{E}[v_{n+1} | \mathcal{F}_n] \leq v_n(1+a_n)+ b_n-c_n \quad \text{a.s.} \end{equation*} and $\sum_{n=1}^\infty a_n < \infty, \sum_{n=1}^\infty b_n < \infty$ a.s., then with probability one, $\{v_n\}$ is convergent and $\sum_{n=1}^\infty c_n < \infty$. \end{lemma} To show convergence, we first present a technical lemma. \begin{lemma} \label{lemma:fundamental-inequality-prox} Let $u\in U$ and $t>0$. Suppose $v:=\textup{prox}_{t h}(u-t g) \in U$ for a given $g \in H$. Then for any $z \in U$, \begin{equation} \label{eq:fundamental-inequality-prox} \begin{aligned} f(v) &\leq f(z) + \langle v-z, \nabla j(u) - g\rangle + \left( \frac{L}{2}- \frac{1}{2t}\right) \lVert v-u \rVert^2\\ &\quad\quad + \left( \frac{L}{2} + \frac{1}{2t}\right) \lVert z-u \rVert^2 - \frac{1}{2t} \lVert v-z \rVert^2. \end{aligned} \end{equation} \end{lemma} \begin{proof} We first claim that for all $y,z\in H$, $t>0$ and $p=\textup{prox}_{th}(y)$, \begin{equation} \label{eq:prox-inequality} h(p) + \frac{1}{2t} \lVert p-y \rVert^2 \leq h(z) + \frac{1}{2t} \lVert z-y \rVert^2 - \frac{1}{2t} \lVert p-z \rVert^2. \end{equation} This follows by definition of the $\textup{prox}$ operator. Indeed, for $t > 0$, $p = \textup{prox}_{t h}(y)$ if and only if for all $z \in H$, \begin{equation} \label{eq:first-inequality-prox} h(z) \geq h(p) + \frac{1}{t} \langle y -p, z-p \rangle. \end{equation} It is straightforward to verify the following equality (the law of cosines) \begin{equation} \label{eq:second-inequality-cosine-law} \lVert z - y \rVert^2 = \lVert z-p\rVert^2 + \lVert p-y\rVert^2 - 2 \langle y-p, z-p\rangle. \end{equation} Multiplying \eqref{eq:second-inequality-cosine-law} by $\tfrac{1}{2t}$ and adding it to \eqref{eq:first-inequality-prox}, we get \eqref{eq:prox-inequality}. Now, since $j\in C^{1,1}_L(U)$, it follows by Lemma~\ref{lemma:lipschitzderivative-Hilbert} for $u,v,z\in U$ that \begin{align} j(v)& \leq j(u) + \langle \nabla j(u), v-u \rangle + \frac{L}{2} \lVert v - u \rVert^2, \label{eq:Lipschitz-inequality1}\\ j(u)& \leq j(z) + \langle \nabla j(u), u-z \rangle + \frac{L}{2} \lVert z - u \rVert^2. \label{eq:Lipschitz-inequality2} \end{align} Combining \eqref{eq:Lipschitz-inequality1} and \eqref{eq:Lipschitz-inequality2}, we get \begin{equation} \label{eq:Lipschitz-inequality-combined} j(v) \leq j(z) + \langle \nabla j(u), v-z \rangle + \frac{L}{2} \lVert v - u \rVert^2 + \frac{L}{2} \lVert z - u \rVert^2. \end{equation} Now, by \eqref{eq:prox-inequality} applied to $v = \textup{prox}_{th}(u-tg)$, \begin{align*} h(v) + \frac{1}{2t} \lVert v-(u-tg)\rVert^2 &\leq h(z) +\frac{1}{2t} \lVert z-(u-tg)\rVert^2 -\frac{1}{2t} \lVert v-z\rVert^2 \end{align*} if and only if \begin{equation} \label{eq:prox-inequality-with-values} \begin{aligned} & h(v) + \frac{1}{2t} \lVert v-u \rVert^2 + \langle v-u,g\rangle\\ &\quad \quad \leq h(z)+\frac{1}{2t} \lVert z-u\rVert^2+\langle z-u,g\rangle -\frac{1}{2t} \lVert v-z\rVert^2. \end{aligned} \end{equation} Finally, adding \eqref{eq:Lipschitz-inequality-combined} and \eqref{eq:prox-inequality-with-values}, and using that $f = j+h$, we get \eqref{eq:fundamental-inequality-prox}. \end{proof} In the following, we define \begin{equation} \label{eq:prox-point-full-gradient} \bar{u}_{n+1} := \textup{prox}_{t h}(u_n - t \nabla j(u_n) ) \end{equation} as the iterate at $n+1$ if the true gradient were used. \begin{lemma} \label{lemma:martingale-inequality-fundamental-constant-steps} For all $n$, \begin{equation} \label{inequality-prox-sequence} \mathbb{E}[f(u_{n+1}) |\mathcal{F}_n] \leq f(u_n) - \left( \frac{1}{2t} - L \right) \lVert \bar{u}_{n+1} - u_n \rVert^2 + \frac{t}{2} \mathbb{E}[ \lVert w_n \rVert^2 | \mathcal{F}_n] \quad \textup{a.s}. \end{equation} \end{lemma} \begin{proof} Using Lemma~\ref{lemma:fundamental-inequality-prox} with $v = \bar{u}_{n+1}$, $u=z=u_n$, and $g = \nabla j(u_n)$, we have \begin{equation} \label{eq:martingale-inequality-proof-inequality1} f(\bar{u}_{n+1}) \leq f(u_n) + \left( \frac{L}{2} - \frac{1}{t}\right) \lVert \bar{u}_{n+1} - u_n \rVert^2. \end{equation} Again using Lemma~\ref{lemma:fundamental-inequality-prox}, with $v=u_{n+1}$, $z=\bar{u}_{n+1}$, $u=u_n$, and $g = \nabla j(u_n) + w_n$, we get \begin{equation} \label{eq:martingale-inequality-proof-inequality2} \begin{aligned} f(u_{n+1}) &\leq f(\bar{u}_{n+1}) - \langle u_{n+1}-\bar{u}_{n+1}, w_n\rangle + \left( \frac{L}{2} - \frac{1}{2t}\right) \lVert u_{n+1} - u_n \rVert^2 \\ & \qquad + \left( \frac{L}{2} + \frac{1}{2t}\right) \lVert \bar{u}_{n+1} - u_n \rVert^2 - \frac{1}{2t} \lVert u_{n+1}-\bar{u}_{n+1}\rVert^2. \end{aligned} \end{equation} By Young's inequality, $ \langle u_{n+1}-\bar{u}_{n+1}, w_n\rangle \leq \frac{1}{2t}\lVert u_{n+1}-\bar{u}_{n+1} \rVert^2 + \frac{t}{2} \lVert w_n \rVert^2, $ so combining \eqref{eq:martingale-inequality-proof-inequality1} and \eqref{eq:martingale-inequality-proof-inequality2}, we obtain since $0 < t < \tfrac{1}{2L}$ that \begin{equation} \begin{aligned} \label{eq:martingale-inequality-proof-inequality3} f(u_{n+1}) &\leq f(u_n) + \left( L-\frac{1}{2t} \right) \lVert \bar{u}_{n+1}-u_{n}\rVert^2 + \left( \frac{L}{2}-\frac{1}{2t} \right) \lVert u_{n+1}-u_{n}\rVert^2 \\ & \quad \quad + \frac{t}{2} \lVert w_n \rVert^2 \\ & \leq f(u_n) + \left( L-\frac{1}{2t} \right) \lVert \bar{u}_{n+1}-u_{n}\rVert^2 + \frac{t}{2} \lVert w_n \rVert^2. \end{aligned} \end{equation} Taking conditional expectation on both sides of \eqref{eq:martingale-inequality-proof-inequality3}, and noting that $\bar{u}_{n+1}$ is $\mathcal{F}_n$-measurable by $\mathcal{F}_n$-measurability of $u_n$, we get \eqref{inequality-prox-sequence}. \end{proof} \begin{remark} Any bounded sequence $\{ u_n\}$ in $H$ contains a weakly convergent subsequence $\{ u_{n_k}\}$ such that $u_{n_k} \rightharpoonup u$ for a $u \in H.$ Generally this convergence is not strong, so we cannot conclude from $\lVert \bar{u}_{n+1}-u_{n}\rVert^2 \rightarrow 0$ that there exists a $\tilde{u}$ such that, for a subsequence $\{ u_{n_k}\}$, {$\lim_{k \rightarrow \infty} \bar{u}_{n_k+1} = \lim_{k \rightarrow \infty} u_{n_k} = \tilde{u}.$} Therefore, to obtain convergence to stationary points, we will assume that $\{ u_n\}$ has a strongly convergent subsequence. \end{remark} We are ready to state the convergence result for sequences generated by~Algorithm~\ref{alg:PSG_Hilbert_Nonconvex}. \begin{theorem} \label{theorem:convergence-variance-reduced-stochastic-gradient} Let Assumption~\ref{asu1} and Assumption~\ref{assumption:well-posedness-constrained} hold. Then \begin{enumerate} \item The sequence $\{ f(u_n)\}$ converges a.s. \item The sequence $\{ \lVert \bar{u}_{n+1} - u_n \rVert \}$ converges to zero a.s. \item Every strong accumulation point of $\{ u_n\}$ is a stationary point with probability one. \end{enumerate} \end{theorem} \begin{proof} The sequence $\{ u_n\}$ is contained in a bounded set $V$ by Assumption~\ref{asu1i}. By Assumption~\ref{asu3i}, $h \in \Gamma_0(H)$ must therefore be bounded below on $V$ \cite[Corollary 9.20]{Bauschke2011}; $j$ is bounded below by Assumption~\ref{asu1ii}. W.l.o.g.~we can thus assume $f \geq 0$. Since $\frac{1}{2t} > L $ and $\sum_{n=1}^\infty \mathbb{E}[\lVert w_n \rVert^2 |\mathcal{F}_n] < \infty$ by Assumption~\ref{asu3ii}, we can apply Lemma~\ref{lemma:Robbins-Siegmund-Chapter-3} to \eqref{inequality-prox-sequence} to conclude that $f(u_n)$ converges almost surely. The second statement follows immediately, since by Lemma~\ref{lemma:Robbins-Siegmund-Chapter-3}, \begin{equation} \label{eq:finite-sum-difference-iterates} \sum_{n=1}^\infty \lVert \bar{u}_{n+1} - u_n \rVert^2 < \infty \quad \text{a.s.}, \end{equation} which implies that for almost every sample path, $\lim_{n \rightarrow \infty} \lVert \bar{u}_{n+1} - u_n \rVert^2 = 0.$ For the third statement, we have that there exists a subsequence $\{ u_{n_k}\}$ such that $u_{n_k} \rightarrow u$. We argue that then $\bar{u}_{n_k+1} \rightarrow u$. Since $\{ \bar{u}_{n_k+1}\}$ is bounded, there exists a weak limit point $\tilde{u}$ (potentially on a subsequence with the same labeling). Then, using weak lower semicontinuity of the norm as well as the rule $\langle a_n, b_n \rangle \rightarrow \langle a,b\rangle$ for $a_n \rightharpoonup a$ and $b_n \rightarrow b$, \begin{align*} 0 &= \lim_{k \rightarrow \infty} \lVert \bar{u}_{n_k+1} - u_{n_k}\rVert^2 = \lim_{k \rightarrow \infty} \lVert \bar{u}_{n_k+1} \rVert^2 - 2 \langle \bar{u}_{n_k+1}, u_{n_k} \rangle + \lVert u_{n_k} \rVert^2\\ & = \liminf_{k \rightarrow \infty} \lVert \bar{u}_{n_k+1} \rVert^2 - 2 \langle \bar{u}_{n_k+1}, u_{n_k} \rangle + \lVert u_{n_k} \rVert^2 \\ &\geq \lVert \tilde{u} \rVert^2 - 2 \langle \tilde{u}, u \rangle + \lVert u \rVert^2 = \lVert \tilde{u} - u \rVert^2 \geq 0, \end{align*} implying $u=\tilde{u}.$ It follows $\bar{u}_{n_k+1} \rightarrow u$ by assuming {$\lim_{k \rightarrow \infty} \lVert \bar{u}_{n_k+1} \rVert^2 \neq \lVert u \rVert^2$} and arriving at a contradiction. Now, by definition of the $\textup{prox}$ operator, \begin{align*} \bar{u}_{n_k+1} &= \textup{prox}_{t h}(u_{n_k} - t \nabla j(u_{n_k}) )\\ &= \argmin_{v \in H} \Big\lbrace h(v) + \frac{1}{2t} \lVert v- u_{n_k} + t \nabla j(u_{n_k}) \rVert^2 \Big\rbrace\\ & = \argmin_{v \in H} \Big\lbrace h(v) + \langle \nabla j(u_{n_k}), v \rangle + \frac{1}{2t} \lVert v \rVert^2- \frac{1}{t} \langle v,u_{n_k}\rangle =:H(v)\Big\rbrace. \end{align*} Clearly, $\partial H(v)=\partial h(v) + \nabla j(u_{n_k}) + \tfrac{1}{t} (v-u_{n_k})$. By optimality of $\bar{u}_{n_k+1}$ (see Fermat's rule, \cite[Theorem 16.2]{Bauschke2011}), $0 \in \partial H(\bar{u}_{n_k+1})$, or equivalently, $$-\frac{1}{t} (\bar{u}_{n_k+1} - u_{n_k}) \in \nabla j(u_{n_k}) + \partial h(\bar{u}_{n_k+1}).$$ Taking the limit as $k \rightarrow \infty$, and using continuity of $\nabla j$, we conclude by strong-to-weak sequential closedness of $\textup{gra}(\partial h)$ that \begin{equation} \label{eq:KKT-nonsmooth} 0 \in \nabla j(u) + \partial h(u), \end{equation} so therefore $u$ is a stationary point. \end{proof} \subsection{Stochastic Proximal Gradient Method - Decreasing Step Sizes} \label{subsection:ODE-proof} An obvious drawback of Algorithm~\ref{alg:PSG_Hilbert_Nonconvex} is the fact that step sizes are restricted to small steps bounded by a factor depending on the Lipschitz constant, which in applications might be difficult to determine. Additionally, the algorithm requires increasing batch sizes to dampen noise, which is unattractive from a complexity standpoint. In this section, we obtain convergence with a nonsmooth and convex term $h$ using the step size rule \begin{equation}\label{eq:Robbins-Monro-stepsizes} t_n \geq 0, \quad \sum_{n=1}^\infty t_n = \infty, \quad \sum_{n=1}^\infty t_n^2 < \infty. \end{equation} This step size rule dampens noise enough so that increased sampling is not necessary. We observe Problem \eqref{eq:ProblemFormulation-basic} with $$h(u) := \eta(u) + \textup{d}elta_C(u).$$ For asymptotic arguments, it will be convenient to treat the term $\textup{d}elta_C$ separately. To that end, we define $$\varphi(u):=j(u)+\eta(u)$$ and note that $f(u) = \varphi(u) + \textup{d}elta_C(u).$ The stochastic gradient $G(u,\xi):H\times \Xi \rightarrow H$ can be comprised of one or more samples as in the unconstrained case; see Remark~\ref{remark:general-assumptions}. The algorithm is now stated below. \begin{algorithm}[H] \begin{algorithmic}[0] \STATE \textbf{Initialization:} $u_1 \in C$ \FOR{$n=1,2,\textup{d}ots$} \STATE Generate $\xi_{n} \in \Xi$, independent of $\xi_{1}, \textup{d}ots, \xi_{n-1}$ \STATE Choose $t_n$ satisfying \eqref{eq:Robbins-Monro-stepsizes} \STATE $u_{n+1}:=\textup{prox}_{t_n h}\left( u_n - t_n G(u_n,\xi_{n})\right)$ \mathbb{E}NDFOR \end{algorithmic} \captionof{algorithm}{Stochastic Proximal Gradient Method} \label{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps} \end{algorithm} To prove convergence of Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps}, we will use the ODE method, which dates back to \cite{Kushner1978,Ljung1977}. While we use many ideas from \cite{Davis2018}, we emphasize that we generalize results to (possibly infinite-dimensional) Hilbert spaces and moreover, we handle the case when $j$ is the expectation. We define the set-valued map $S:C \rightrightarrows H$ by $$S(u) := - \nabla j(u) - \partial \eta(u) - N_C(u).$$ Additionally, we define the sequence of (single-valued) maps $S_n:C \rightarrow H$ for all $n$ by $$S_n (u):= - \nabla j(u) - \frac{1}{t_n} \mathbb{E}[u - t_n G(u,\xi) - \textup{prox}_{t_n h}(u - t_n G(u,\xi))]. $$ In addition to Assumption~\ref{asu1}, the following assumptions will apply in this section. \begin{assumption} \label{assumptions:general-convergence-proof} Let $\{ u_n\}$ and $\{G(u_n,\xi_n)\}$ be generated by Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps}. We assume\\ \setcounter{subassumption}{0} \subasu \label{asu4i} The set $C$ is nonempty, bounded, convex, and closed.\\ \subasu \label{asu4ii} The function $\eta \in \Gamma_0(H)$ with $\textup{dom}(\eta) = H$ is locally Lipschitz and bounded below on $C$, and there exists a function $L_{\eta}: H \rightarrow \mathbb{R}$, which is bounded on bounded sets, satisfying \begin{equation} \label{eq:local-Lipschitz-bound-h} L_\eta(u) \geq \sup_{z:\eta(z) \leq \eta(u)} \frac{\eta(u)-\eta(z)}{\lVert u - z\rVert}. \end{equation} \subasu \label{asu4iii} There exists a function $M:H \rightarrow [0,\infty)$, which is bounded on bounded sets, such that $\mathbb{E}[\lVert G(u,\xi) \rVert^2] \leq M(u).$\\ \subasu \label{asu4iv} For any strongly convergent sequence $\{u_n\}$, $\mathbb{E}[ \sup_n \lVert G(u_n,\xi) \rVert] < \infty$ holds.\\ \subasu \label{asu4v} The set of critical values $\{ f(u): 0 \in \partial f(u)\}$ does not contain any segment of nonzero length. \end{assumption} \begin{remark} To handle the infinite-dimensional case, we use assumptions that are generally more restrictive than in \cite{Davis2018}; we restrict ourselves to the case where $C$ and $\eta$ are convex and we assume higher regularity of $j$ in Assumption~\ref{asu1ii} to handle the case $j(u) = \mathbb{E}[J(u,\xi)]$. However, we allow for bias $r_n$, which is not covered in \cite{Davis2018}. We note that $C$ does not need to be bounded if $\eta$ is Lipschitz continuous over $C$. Assumption~\ref{asu4ii} is satisfied if $\textup{dom}(\partial \eta) = H$ and $\partial \eta$ maps bounded sets to bounded sets; see also \cite[Proposition 16.17]{Bauschke2011} for equivalent conditions. The last assumption is technical but standard; see \cite[Assumption H4]{Ruszczynski1983}. \end{remark} The main result is the following, which we will prove in several parts. Throughout, we use the notation $g_n:=G(u_n,\xi_n)$. \begin{theorem} \label{theorem:convergence-variance-reduced-stochastic-gradient-decreasing-steps} Let Assumption~\ref{asu1} and Assumption~\ref{assumptions:general-convergence-proof} hold. Then \begin{enumerate} \item The sequence $\{ f(u_n) \}$ converges a.s. \item Every strong accumulation point $u$ of the sequence $\{u_n\}$ is a stationary point with probability one, namely, $0 \in \partial f(u)$ a.s. \end{enumerate} \end{theorem} \begin{lemma} \label{lemma:recursion-relation} The sequence $\{ u_n\}$ satisfies the recursion \begin{equation} \label{eq:fundamental-recursion} u_{n+1} = u_n + t_n (y_n - r_n +w_n), \end{equation} where $y_n = S_n(u_n)$ and $w_n=-\frac{1}{t_n}\mathbb{E}[\textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n] + \frac{1}{t_n}\textup{prox}_{t_n h}(u_n - t_n g_n)$. \end{lemma} \begin{proof} Note that $u_n$ and $r_n$ are $\mathcal{F}_n$-measurable, so $\mathbb{E}[g_n|\mathcal{F}_n] = \nabla j(u_n) + r_n$. Then \begin{align*} &u_{n+1} - u_n =\textup{prox}_{t_n h}(u_n - t_n g_n) - u_n\\ & \quad= -t_n \mathbb{E}[g_n |\mathcal{F}_n] - \mathbb{E}[u_n - t_n g_n - \textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n] \\ &\quad \quad \quad - \mathbb{E}[\textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n] + \textup{prox}_{t_n h}(u_n - t_n g_n)\\ &\quad= t_n S_n(u_n) - t_n r_n - \mathbb{E}[\textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n] + \textup{prox}_{t_n h}(u_n - t_n g_n), \end{align*} where we used that $\xi_n$ is independent from $\xi_1, \textup{d}ots, \xi_{n-1}$, so \begin{equation} \label{eq:measurability-yn} \begin{aligned} &\mathbb{E}[u_n - t_n g_n - \textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n]\\ & \quad\quad = \mathbb{E}[u_n - t_n G(u_n,\xi) - \textup{prox}_{t_n h}(u_n - t_n G(u_n,\xi))]. \end{aligned} \end{equation} By definition of $y_n$ and $w_n$, we arrive at the conclusion. \end{proof} \begin{lemma} \label{lemma:inequality-single-step} For any $u \in C$, $g \in H$ and $t > 0$, we have for $\bar{u} = \textup{prox}_{t h}(u - t g)$ that $$\frac{1}{t} \lVert \bar{u} - u \rVert \leq 2 L_\eta(u) + 2 \lVert g \rVert.$$ \end{lemma} \begin{proof} By definition of the proximity operator, $$ \eta(\bar{u}) + \textup{d}elta_C(\bar{u})+\frac{1}{2t} \lVert \bar{u} - (u-tg) \rVert^2 \leq \eta(u)+ \textup{d}elta_C({u}) +\frac{1}{2t} \lVert u - (u-tg) \rVert^2,$$ or equivalently (note $\bar{u}, u \in C$), $$ \eta(\bar{u}) + \frac{1}{2t} \lVert \bar{u} -u \rVert^2 + \langle \bar{u}-u, g\rangle \leq \eta(u). $$ By \eqref{eq:local-Lipschitz-bound-h}, in the case $\eta(u) \geq \eta(\bar{u})$, we obtain \begin{equation} \label{lemma-inequality-single-step-proof1} \frac{1}{t} \lVert \bar{u} - u \rVert^2 \leq 2 (\eta(u) - \eta(\bar{u})) - 2\langle \bar{u} - u, g\rangle \leq 2L_\eta(u) \lVert \bar{u} - u \rVert + 2\lVert \bar{u} - u \rVert \lVert g \rVert. \end{equation} Notice that the last inequality \eqref{lemma-inequality-single-step-proof1} is trivial whenever $\eta(u) \leq \eta(\bar{u})$. This yields the conclusion. \end{proof} \begin{lemma} \label{lemma:vanishing-diff-inclusion-approximation} The sequence $\{ y_n\}$ is bounded a.s. \end{lemma} \begin{proof} By the characterization of $y_n=S_n(u_n)$ from Lemma~\ref{lemma:recursion-relation} and \eqref{eq:measurability-yn}, followed by Jensen's inequality, and the application of Lemma~\ref{lemma:inequality-single-step} in the fourth inequality, we get \begin{equation} \label{eq:y_n_bounded} \begin{aligned} \lVert y_n \rVert &\leq \lVert \nabla j(u_n)\rVert + \lVert\tfrac{1}{t_n}\mathbb{E}[u_n - t_n g_n - \textup{prox}_{t_n h}(u_n - t_n g_n)|\mathcal{F}_n] \rVert\\ &\leq \lVert \nabla j(u_n)\rVert + \mathbb{E} \big[\lVert\tfrac{1}{t_n}\big(u_n - t_n g_n - \textup{prox}_{t_n h}(u_n - t_n g_n)\big) \rVert |\mathcal{F}_n\big]\\ &\leq \lVert \nabla j(u_n)\rVert + \mathbb{E}[\lVert g_n\rVert |\mathcal{F}_n]+ \mathbb{E} \big[ \lVert\tfrac{1}{t_n}\big(u_n - \textup{prox}_{t_n h}(u_n - t_n g_n)\big) \rVert |\mathcal{F}_n\big]\\ &\leq \lVert \nabla j(u_n)\rVert + \mathbb{E}[\lVert g_n\rVert |\mathcal{F}_n]+ 2 L_{\eta}(u_n) + 2 \mathbb{E}[\lVert g_n \rVert |\mathcal{F}_n]\\ &\leq \lVert \nabla j(u_n)\rVert + 3\sqrt{M(u_n)} + 2 L_{\eta}(u_n). \end{aligned} \end{equation} The last step follows by $\mathbb{E}[\lVert g_n\rVert | \mathcal{F}_n] = \mathbb{E}[\lVert G(u_n,\xi)\rVert]$ and Assumption~\ref{asu4iii} with Jensen's inequality. We have from Assumption~\ref{asu1i} that $\{ u_n\}$ is bounded a.s.; therefore, all terms on the right-hand side of \eqref{eq:y_n_bounded} are bounded a.s. \end{proof} For Lemma~\ref{lemma:vanishing-white-noise-terms}, we need the following result, which is a generalization of a convergence theorem for quadratic variations from~\cite[p.~111]{Williams1991} to Bochner spaces. The proof can be found in Sect.~\ref{subsection:auxiliary-proofs}. \begin{lemma} \label{lemma:quadratic-variations-bounded-imply-convergence} Let $\{v_n\}$ be an $H$-valued martingale. Then $\{v_n\}$ is bounded in $L^2(\Omega,H)$ if and only if \begin{equation} \label{eq:quadratic-variations-proof} \sum_{n=1}^\infty \mathbb{E}[\lVert v_{n+1}-v_n\rVert^2] < \infty, \end{equation} and when this is satisfied, $v_n \rightarrow v_\infty$ a.s.~as $n \rightarrow \infty$. \end{lemma} \begin{lemma} \label{lemma:vanishing-white-noise-terms} The series $\sum_{j=1}^N t_j w_j $ a.s.~converges to a limit as $N \rightarrow \infty$. \end{lemma} \begin{proof} Recall the elementary inequality $\mathbb{E}[\lVert X - \mathbb{E}[X|\mathcal{F}_n]\rVert^2|\mathcal{F}_n]\leq \mathbb{E}[\lVert X\rVert^2|\mathcal{F}_n]$, which holds for any random variable $X$. By Lemma~\ref{lemma:recursion-relation} with $$X:=\tfrac{1}{t_n}(\textup{prox}_{t_n h}(u_n - t_n g_n)-u_n),$$ followed by Lemma~\ref{lemma:inequality-single-step} and Assumption~\ref{asu4iii}, we get \begin{equation} \label{eq:bounds-second-moment-white-noise} \begin{aligned} \mathbb{E}[\lVert w_n \rVert^2 | \mathcal{F}_n] &\leq \tfrac{1}{t_n^2}\mathbb{E}[\lVert \textup{prox}_{t_n h}(u_n - t_n g_n) - u_n\rVert^2 | \mathcal{F}_n] \\ &\leq 4 (L_{\eta}(u_n))^2 +4M(u_n) <\infty. \end{aligned} \end{equation} Let $v_n := \sum_{j=1}^n t_j w_j$. We show that $v_n$ is a square integrable martingale, i.e.,~$v_n \in L^2(\Omega, H)$ for every $n$ and $\sup_{n} \mathbb{E}[\lVert v_n \rVert^2]<\infty.$ It is clearly a martingale, since for all $n$, $\mathbb{E}[w_n|\mathcal{F}_n] = 0$ and thus $$\mathbb{E}[v_n|\mathcal{F}_n] = \mathbb{E}[t_n w_n |\mathcal{F}_n] + \sum_{j=1}^{n-1} t_j w_j = v_{n-1}.$$ To show that $v_n$ is square integrable, we use \eqref{eq:bounds-second-moment-white-noise} and the fact that $\mathbb{E}[v_n]=0$ for all $n$ to conclude that its quadratic variations are bounded. Indeed, \begin{align*} A_n &:= \sum_{j=2}^n \mathbb{E}[\lVert v_{j} - v_{j-1} \rVert^2 | \mathcal{F}_{j}]= \sum_{j=2}^{n} t_j^2 \mathbb{E}[\lVert w_j \rVert^2|\mathcal{F}_j]. \end{align*} Because of the condition \eqref{eq:Robbins-Monro-stepsizes}, we have that $\sup_n \mathbb{E}[A_n] < \infty.$ We have obtained that $\{v_n\}$ is square integrable, so by Lemma~\ref{lemma:quadratic-variations-bounded-imply-convergence}, it follows that $\{v_n\}$ converges a.s.~to a limit as $n\rightarrow \infty$. \end{proof} \begin{lemma} \label{lemma:easy-lemma} The following is true with probability one: \begin{equation} \label{eq:Cauchy-sequence_u_n} \lim_{n \rightarrow \infty} \lVert u_{n+1} - u_n \rVert = 0. \end{equation} \end{lemma} \begin{proof} This is a simple consequence of \eqref{eq:fundamental-recursion} and a.s.~boundedness of $ y_n$, $r_n $, and $ w_n$ for all $n$ by Lemma~\ref{lemma:vanishing-diff-inclusion-approximation}, Assumption~\ref{asu1iii}, and Lemma~\ref{lemma:vanishing-white-noise-terms}, respectively. \end{proof} \begin{lemma} \label{lemma:distance-between-sets-to-zero} For any sequence $\{z_n\}$ in $C$ such that $z_n \rightarrow z$ as $n \rightarrow \infty$, it follows that \begin{equation} \label{eq:dist_statement} \lim_{m\rightarrow \infty} d\left( \frac{1}{m} \sum_{n=1}^{m} S_{n}(z_{n}), S(z) \right) = 0 \quad \text{a.s.} \end{equation} \end{lemma} \begin{proof} Notice that $C$ is closed, so $z\in C$. The fact that $S(z)$ is nonempty, closed, and convex follows by these properties of $\nabla j(z)$, $\partial \eta(z)$, and $N_C(z)$. We define $g_n^\xi:=G(z_n,\xi)$ and \begin{equation} \label{eq:S_n_tilde} \tilde{S}_n(z_n, \xi):=- \nabla j(z_n) - \tfrac{1}{t_n} (z_n -t_n g_n^\xi - \textup{prox}_{t_n h}(z_n - t_n g_n^\xi)). \end{equation} Clearly, $\mathbb{E}xi[\tilde{S}_n(z_n, \xi)] = S_n(z_n).$ Now, by Jensen's inequality and convexity of the mapping $u \mapsto d (u,S(z))$, \begin{align*} d\left( \frac{1}{m} \sum_{n=1}^{m} S_{n}(z_{n}),S(z) \right) &\leq \frac{1}{m} \sum_{n=1}^{m} d(S_n(z_n),S(z))\\ &\leq \frac{1}{m} \sum_{n=1}^{m} \mathbb{E}xi \left[d(\tilde{S}_n(z_n, \xi),S(z))\right]. \end{align*} Notice that $\bar{z} = \textup{prox}_{t h}(u)$ if and only if $0 \in \partial \eta (\bar{z})+ N_C(\bar{z}) +\tfrac{1}{t}(\bar{z}-u)$, so with \begin{equation} \label{eq:ch3-z-bar} \bar{z}_n:=\textup{prox}_{t_n h}(z_n -t_n g_n^\xi), \end{equation} there exist $\zeta_{\eta,n} \in \partial \eta(\bar{z}_n)$ and $\zeta_{C,n} \in N_C(\bar{z}_n)$ such that \begin{equation} \label{eq:optimality-prox-step-in-dist-proof} -(\zeta_{\eta,n} + \zeta_{C,n}) = \tfrac{1}{t_n}(\bar{z}_n - z_n + t_n g_n^\xi). \end{equation} Because $\{ z_n\}$ converges, it is contained in a bounded set. Hence, by Lemma~\ref{lemma:inequality-single-step}, we get \begin{equation} \label{eq:boundedness_of_zeta_h_zeta_C} \begin{aligned} \lVert \zeta_{\eta,n} + \zeta_{C,n} \rVert &= \tfrac{1}{t_n} \lVert \bar{z}_n - z_n + t_n g_n^\xi \rVert \leq 2 L_{\eta}(z_n) + 3\lVert g_n^\xi\rVert, \end{aligned} \end{equation} which must be almost surely finite by Assumption~\ref{asu4iv}. Now, by \eqref{eq:S_n_tilde} and \eqref{eq:ch3-z-bar}, followed by \eqref{eq:optimality-prox-step-in-dist-proof}, \begin{align*} d(\tilde{S}_n(z_n, \xi),S(z)) &= d(- \nabla j(z_n) + \tfrac{1}{t_n} (\bar{z}_n - z_n +t_n g_n^\xi),S(z))\\ & = d(- \nabla j(z_n) - \zeta_{\eta,n} - \zeta_{C,n}, S(z)). \end{align*} By the simple rule $d(u+v,A+B) \leq d(u,A)+d(v,B)$ for sets $A$ and $B$ and points $u, v\in H$, we get by definition of $S(z)$ that $$ d(\tilde{S}_n(z_n, \xi),S(z)) \leq \lVert \nabla j(z_n)-\nabla j(z)\rVert+ d(\zeta_{\eta,n},\partial \eta(z)) + d(\zeta_{C,n},N_C(z)).$$ By strong-to-weak sequential closedness of $\textup{gra}(\partial \eta)$ and $\textup{gra}( N_C)$ as well as continuity of $\nabla j$, it follows that \begin{equation} \label{eq:distance-sequence-to-set-of-solutions-diff-incl.} \lim_{n \rightarrow \infty} d(\tilde{S}_n(z_n, \xi),S(z)) = 0 \quad \text{a.s.} \end{equation} We show that $d(\tilde{S}_n(z_n, \xi),S(z))$ is almost surely bounded by an integrable function $\tilde{M}(z)$ for all $n$. Using elementary arguments and \eqref{eq:boundedness_of_zeta_h_zeta_C} in the third inequality, \begin{align*} & d(\tilde{S}_n(z_n, \xi),S(z))\\ &\leq \quad d(-\nabla j(z_n) -\zeta_{\eta,n} - \zeta_{C,n},S(z))\\ &\leq \quad\lVert \nabla j(z_n) -\nabla j(z)\rVert + d (\zeta_{\eta,n} + \zeta_{C,n},\partial \eta(z) + N_C(z))\\ & \leq \quad\lVert \nabla j(z_n) - \nabla j(z)\rVert + 2 L_{\eta}(z_n) + 3\lVert g_n^\xi\rVert + d(0,\partial \eta(z) + N_C(z))\\ &\leq \quad\sup_{n \in \mathbb{N}} \left\lbrace\lVert \nabla j(z_n) - \nabla j(z)\rVert + 2 L_{\eta}(z_n) + 3\lVert g_n^\xi\rVert + d(0,\partial \eta(z) + N_C(z))\right\rbrace, \end{align*} which is almost surely bounded by Assumption~\ref{asu4ii} and Assumption~\ref{asu4iv}. By the dominated convergence theorem, it follows by \eqref{eq:distance-sequence-to-set-of-solutions-diff-incl.} that as $n \rightarrow\infty$, $\mathbb{E}xi [d(\tilde{S}_n(z_n, \xi),S(z))]\rightarrow 0$. Finally, \eqref{eq:dist_statement} follows from the fact that if $a_n \rightarrow 0$ as $n \rightarrow \infty$, it follows that $\tfrac{1}{m}\sum_{n=1}^m a_n \rightarrow 0$ as $m \rightarrow \infty$. \end{proof} Now we will show a compactness result, adapted from \cite{Duchi2018}, namely that in the limit, the time shifts of the linear interpolation of the sequence $\{ u_n\}$ can be made arbitrarily close to trajectories, or solutions, of the differential inclusion \begin{equation} \label{eq:differential-inclusion} \textup{d}ot{z}(t) \in S(z(t)). \end{equation} The set $C(I,H)$ denotes the space of continuous functions from $I$ to $H$. We recall that if $z(\cdot) \in C([0,\infty),H)$ satisfies \eqref{eq:differential-inclusion} and is absolutely continuous on any compact interval $[a,b] \subset (0,\infty)$, it is called a strong solution. The existence and uniqueness of this solution is guaranteed by the following result. \begin{proposition} \label{thm:Brezis-well-posedness-differential-inclusion} For every $z_0=z(0)\in C$ there exists a unique strong solution $z \in C([0,\infty),H)$ to the differential inclusion \eqref{eq:differential-inclusion}. \end{proposition} \begin{proof} The function $u \mapsto \eta(u) + \textup{d}elta_C(u)$ is proper, convex, and lower semicontinuous and $B := - \nabla j$ is Lipschitz continuous. Therefore, by \cite[Proposition 3.12]{Brezis1973}, the statement follows. \end{proof} For the next result, we set $s_n: = \sum_{j=1}^{n-1} t_j$ and define the linear interpolation $u:[0,\infty) \rightarrow H$ of iterates as well as the piecewise constant extension $y:[0,\infty) \rightarrow H$ of the sequence $\{y_n\}$ via \begin{equation} \label{eq:interpolation-sequences-un-yn} u(t) := u_n + \frac{t-s_n}{s_{n+1}-s_n} (u_{n+1} - u_n), \quad y(t) := y_n, \quad \forall t \in [s_n,s_{n+1}), \forall n \in \mathbb{N}. \end{equation} The time shifts of $u(\cdot)$ are denoted by $u(\cdot+\tau)$ for $\tau>0$. We define $u^\tau:[0,\infty) \rightarrow H$ by \begin{equation} \label{eq:absolutely-continuous-trajectory} u^\tau(t):=u(\tau) + \int_\tau^{t} y(s) \,\mathrm{d} s \end{equation} as the solution to the ODE $$\textup{d}ot{u}^\tau(\cdot) = y(\cdot), \quad u^\tau(\tau) = u(\tau),$$ which is guaranteed to exist by \cite[Theorem 1.4.35]{Cazenave1998}. \begin{theorem} \label{theorem:compactness-result} For any $T>0$ and any nonnegative sequence $\{ \tau_n\}$, the sequence of the time shifts $\{ u(\cdot +\tau_n)\}$ is relatively compact in $C([0,T],H)$. If $\tau_n \rightarrow \infty$, all limit points $\bar{u}(\cdot)$ of the time shifts $\{ u(\cdot+\tau_n)\}$ are in $C([0,T],H)$ and there exists a $\bar{y}:[0,T] \rightarrow H$ such that $\bar{y}(t) \in S(\bar{u}(t))$ and $\bar{u}(t) = \bar{u}(0) + \int_0^t \bar{y}(s) \,\mathrm{d} s.$ \end{theorem} \begin{proof} \textbf{Relative compactness of time shifts.} We first claim that for all $T>0$, \begin{equation} \label{eq:compactness-result-claim1} \lim_{\tau \rightarrow \infty} \sup_{t \in [\tau, \tau+T]} \lVert u^{\tau}(t)-u(t) \rVert = 0 \quad \text{a.s.} \end{equation} We consider a fixed (but arbitrary) sample path $\omega = (\omega_1, \omega_2, \textup{d}ots)$ throughout the proof. Let $p:=\min\{n:s_n \geq \tau\}$ and $q:=\max\{n:s_n \leq t\}$. By \eqref{eq:absolutely-continuous-trajectory} and \eqref{eq:interpolation-sequences-un-yn}, \begin{equation} \label{eq:proof-compactness-absolutely_continuous} \begin{aligned} u^\tau(t) &= u(\tau) + \int_{\tau}^t y(s) \,\mathrm{d} s = u(\tau) + \int_{\tau}^{s_{p}} y(s) \,\mathrm{d} s + \sum_{\ell=p}^{q-1} t_\ell y_\ell + \int_{s_{q}}^t y(s) \,\mathrm{d} s. \end{aligned} \end{equation} Notice that due to the recursion~\eqref{eq:fundamental-recursion}, \begin{equation} \label{eq:proof-compactness-2} \sum_{\ell = p}^{q-1} t_\ell y_\ell = u_{q} - u_{p} - \sum_{\ell=p}^{q-1} t_\ell (w_\ell-r_\ell). \end{equation} Plugging \eqref{eq:proof-compactness-2} into \eqref{eq:proof-compactness-absolutely_continuous}, we get \begin{align*} u^\tau(t) - u(t) &= u(\tau) + u_{q} - u_{p} - u(t) + \int_{\tau}^{s_{p}} y(s) \,\mathrm{d} s \\ &\quad \quad - \sum_{\ell=p}^{q-1} t_\ell (w_\ell - r_\ell)+ \int_{s_{q}}^t y(s) \,\mathrm{d} s. \end{align*} Therefore, \begin{align*} \lVert u^\tau(t) - u(t) \rVert &\leq \left\lVert u(\tau) - u_{p} +\int_{\tau}^{s_{p}}y(s) \,\mathrm{d} s \right\rVert + \left\lVert u_{q} - u(t) +\int_{s_{q}}^{t} y(s) \,\mathrm{d} s\right\rVert \\ &\quad \quad + \left\lVert \sum_{\ell=p}^{q-1} t_\ell w_\ell\right\rVert + \left\lVert \sum_{\ell=p}^{q-1} t_\ell r_\ell\right\rVert. \end{align*} Note that by \eqref{eq:interpolation-sequences-un-yn}, it follows that \begin{align*} \lVert u(\tau) - u_{p} \rVert &\leq \lVert u_{p-1} - u_{p}\rVert = t_{p-1} \lVert y_{p-1} - r_{p-1} + w_{p-1}\rVert, \\ \lVert u_{q} - u(t)\rVert &\leq \lVert u_{q} - u_{q+1}\rVert = t_{q}\lVert y_{q} - r_{q} + w_{q}\rVert. \end{align*} Moreover, by \eqref{eq:interpolation-sequences-un-yn}, we have $$\left\lVert \int_{\tau}^{s_{p}} y(s) \,\mathrm{d} s\right\rVert \leq t_{p-1} \lVert y_{p-1}\rVert \quad \text{and} \quad \left\lVert \int_{s_{q}}^{t} y(s) \,\mathrm{d} s\right\rVert \leq t_{q} \lVert y_{q}\rVert.$$ Therefore, \begin{equation} \label{eq:inequality-ODE-solutions-interpolation} \begin{aligned} \lVert u^\tau(t) - u(t) \rVert &\leq t_{p-1} (2\lVert y_{p-1}\rVert +\lVert r_{p-1}\rVert+ \lVert w_{p-1}\rVert)\\ & \qquad +t_{q} (2\lVert y_{q}\rVert +\lVert r_{q}\rVert+ \lVert w_{q}\rVert) + \left\lVert \sum_{\ell=p}^{q-1} t_\ell w_\ell\right\rVert + \left\lVert \sum_{\ell=p}^{q-1} t_\ell r_\ell\right\rVert. \end{aligned} \end{equation} We take the limit $p,q \rightarrow \infty$ on the right-hand side of \eqref{eq:inequality-ODE-solutions-interpolation} and observe that by Lemma~\ref{lemma:vanishing-diff-inclusion-approximation}, $\lim_{n \rightarrow \infty} \sup_{m \geq n} t_m \lVert y_m \rVert = 0$ and by Lemma~\ref{lemma:vanishing-white-noise-terms}, we have $\lim_{n \rightarrow \infty} \sup_{m \geq n} \lVert \sum_{\ell=n}^{m-1} t_\ell w_\ell\rVert = 0$ as well as $\lim_{n \rightarrow \infty} \sup_{m\geq n} t_m \lVert w_m\rVert$. By Assumption~\ref{asu1iii}, we have $\lim_{n \rightarrow \infty} \sup_{m \geq n} \left\lVert \sum_{\ell=n}^{m-1} t_\ell r_\ell \right\rVert = 0.$ We have shown \eqref{eq:compactness-result-claim1}, so it follows that the set $$A:=\{u^\tau(\cdot): \tau \in [0,\infty)\}$$ is a family of equicontinuous functions. To invoke the Arzel\`{a}--Ascoli theorem, we first show that the set $$A(t):=\{ u^{\tau}(t): \tau \in [0,\infty)\}$$ is relatively compact for all $t\in [0,T]$, $T>0$. We show this by proving that arbitrary sequences in $A(t)$ have a Cauchy subsequence, which converge in $H$ by completeness of $H$. To this end, let $\varepsilon>0$ be arbitrary and observe first the case $\tau_n \rightarrow \infty.$ Let $n_k$ be the index such that $\tau_k \in [s_{n_k}, s_{n_k+1})$ and $$u^{\tau_k}(t) = u_{n_k} + \frac{\tau_k - s_{n_k}}{s_{n_k+1}-s_{n_k}} (u_{n_k+1} - u_{n_k}) + \int_{\tau_k}^t y(s) \,\mathrm{d} s.$$ Similarly, let $m_j$ be the index such that $\tau_j \in [s_{m_j},s_{m_j+1})$. Thus we have \begin{equation} \label{eq:first-cauchy-inequality} \begin{aligned} &\lVert u^{\tau_k}(t) - u^{\tau_j}(t)\rVert \\ &\qquad \leq \left\lVert \frac{\tau_k - s_{n_k}}{s_{n_k+1}-s_{n_k}} ( u_{n_k+1} - u_{n_k}) - \frac{\tau_j - s_{m_j}}{s_{m_j+1}-s_{m_j}} ( u_{m_j+1} - u_{m_j}) \right\rVert \\ &\qquad \qquad + \left\lVert u_{n_k} - u_{m_j} + \int_{\tau_k}^{\tau_j} y(s) \,\mathrm{d} s\right\rVert. \end{aligned} \end{equation} Using \eqref{eq:proof-compactness-2}, we get (w.l.o.g.~$\tau_k \leq \tau_j$) \begin{equation} \label{eq:second-cauchy-inequality} \begin{aligned} \left\lVert u_{n_k} - u_{m_j} + \int_{\tau_k}^{\tau_j} y(s) \,\mathrm{d} s\right\rVert &\leq \lVert u_{n_k} - u_{n_k+1} \rVert + \left\lVert \int_{\tau_k}^{s_{n_k+1}} y(s) \,\mathrm{d} s \right\rVert\\ & \quad\quad +\left\lVert \int_{s_{m_j}}^{\tau_j} y(s) \,\mathrm{d} s \right\rVert +\left\lVert \sum_{\ell=n_k+1}^{m_j-1} t_\ell (w_\ell - r_\ell)\right\rVert. \end{aligned} \end{equation} Combining \eqref{eq:first-cauchy-inequality} and \eqref{eq:second-cauchy-inequality}, and observing that $\left|\tfrac{\tau_k - s_{n_k}}{s_{n_k+1}-s_{n_k}}\right| \leq 1$ as well as $\left|\tfrac{\tau_j - s_{m_j}}{s_{m_j+1}-s_{m_j}}\right| \leq 1$, we obtain \begin{equation} \begin{aligned} \label{eq:third-cauchy-inequality} \lVert u^{\tau_k}(t) - u^{\tau_j}(t)\rVert &\leq 2 \lVert u_{n_k+1} - u_{n_k}\rVert + \lVert u_{m_j+1} - u_{m_j}\rVert + t_{n_k} \lVert y_{n_k}\rVert \\ & \quad\quad + t_{m_j} \lVert y_{m_j}\rVert + \left \lVert \sum_{\ell=n_k+1}^{m_j-1} t_\ell (w_\ell - r_\ell)\right\rVert. \end{aligned} \end{equation} By Lemma~\ref{lemma:easy-lemma} as well as convergence of the other terms on the right-hand side of \eqref{eq:third-cauchy-inequality}, for $\varepsilon >0$ there exists a $N$ such that for all $k, j > N$, $\lVert u^{\tau_k}(t) - u^{\tau_j}(t)\rVert \leq \varepsilon$ for all $k, j > N$ and thus $\{ u^{\tau_n}(t)\}$ has a Cauchy subsequence for $\tau_n \rightarrow \infty$. Now we observe the case where the sequence $\{\tau_n \}$ is bounded. Then $\tau_n \rightarrow \bar{\tau}$ for some $\bar{\tau}>0$ at least on a subsequence (with the same labeling). By convergence of $\{\tau_n\}$ we get that $m_j=n_k$ for $k, j \geq N$ and $N$ large enough. Therefore \eqref{eq:first-cauchy-inequality} reduces to \begin{equation} \label{eq:fifth-cauchy-inequality} \lVert u^{\tau_k}(t) - u^{\tau_j}(t)\rVert \leq \left\lvert \frac{\tau_k - \tau_j}{s_{n_k+1}-s_{n_k}}\right\rvert \lVert u_{n_k+1}-u_{n_k} \rVert + \left\lVert \int_{\tau_k}^{\tau_j} y(s) \,\mathrm{d} s \right\rVert. \end{equation} We can bound terms on the right-hand side of \eqref{eq:fifth-cauchy-inequality} as before to obtain that $\{ u^{\tau_n}(t) \}$ has a Cauchy subsequence. We have shown that $A(t)$ is relatively compact for all $t\in [0,T]$, $T>0$, so by the Arzel\`{a}--Ascoli theorem, it follows that the set $A$ is relatively compact. Now, the relative compactness of the set of time shifts $\{ u(\cdot +\tau): \tau \in [0,\infty)\}$ follows from the relative compactness of the set $A$. Indeed, for any sequence $\{u^{\tau_n}(\cdot + \tau_n)\}$ there exists a convergent subsequence such that $u^{\tau_{n_k}}(\cdot + \tau_{n_k}) \rightarrow \bar{u}(\cdot)$ for some $\bar{u}(\cdot) \in C([0,T],H)$. Now, for the time shift $u(\cdot +\tau_{n_k})$, we have \begin{align*} &\sup_{t \in [0,T]}\lVert u(t+\tau_{n_k}) - \bar{u}(t)\rVert \\ &\qquad \leq \sup_{t \in [0,T]}\lVert u(t+\tau_{n_k}) - u^{\tau_{n_k}}(t+\tau_{n_k})\rVert + \sup_{t \in [0,T]}\lVert u^{\tau_{n_k}}(t+\tau_{n_k}) - \bar{u}(t)\rVert,\end{align*} so it follows that $u(\cdot +\tau_{n_k}) \rightarrow \bar{u}(\cdot)$ in $C([0,T],H)$ as $\tau_{n_k}\rightarrow \infty$ by convergence of $u^{\tau_{n_k}}(\cdot)$ and \eqref{eq:compactness-result-claim1}. If $\tau_{n_k} \rightarrow \bar{\tau}$, then $u(\cdot+\tau_{n_k}) \rightarrow u(\cdot+\bar{\tau})$ by uniform continuity of $u(\cdot)$ on $[0,\bar{\tau}+T].$ \textbf{Limit points are trajectories of the differential inclusion.} Let $\{ \tau_n\}$ be a sequence such that as $\tau_n \rightarrow \infty$, $u^{\tau_n}(\cdot+\tau_n) \rightarrow \bar{u}(\cdot)$ in $C([0,T],H)$ (potentially on a subsequence). The sequence $\{ y(\cdot+\tau_n) \} \subset L^2([0,T], H)$ is bounded by boundedness of $\{ y_n\}$, and since $L^2([0,T], H)$ is a Hilbert space, there exists a subsequence $\{ n_k\}$ such that $y(\cdot+\tau_{n_k}) \rightharpoonup \bar{y}(\cdot)$ in $L^2([0,T],H)$ for some $\bar{y} \in L^2([0,T],H)$. Notice that for $\{\tau_{n_k}\}$, by \eqref{eq:absolutely-continuous-trajectory} it follows that \begin{equation} \label{eq:time-shifted-subsequence} u^{\tau_{n_k}}(t+\tau_{n_k}) = u^{\tau_{n_k}}(\tau_{n_k}) + \int_0^t y(s+\tau_{n_k}) \,\mathrm{d} s. \end{equation} By \eqref{eq:compactness-result-claim1}, $u^{\tau_{n_k}}( \cdot+\tau_{n_k}) \rightarrow \bar{u}(\cdot)$ in $C([0,T],H)$ as $k \rightarrow \infty$. Taking $k \rightarrow \infty$ on both sides of \eqref{eq:time-shifted-subsequence} we get, due to $y(\cdot+\tau_{n_k}) \rightharpoonup \bar{y}(\cdot)$ for $ t \in [0,T]$, that $$\bar{u}(t) = \bar{u}(0) + \int_0^t \bar{y}(s) \,\mathrm{d} s.$$ Now, we will show that $\bar{y}(t) \in S(\bar{u}(t))$ for a.e.~$t \in [0,T]$. By the Banach-Saks theorem (cf.~\cite{Okada1984}), there exists a subsequence of $\{ y(\cdot+\tau_{n_k})\}$ (where we use the same notation for the sequence as its subsequence) such that \begin{equation} \label{eq:Banach-Saks-sum} \lim_{m \rightarrow \infty}\frac{1}{m} \sum_{k=1}^m y(\cdot+\tau_{n_k}) = \bar{y}(\cdot). \end{equation} Recall that $y_n = S_n(u_n)$ by Lemma~\ref{lemma:recursion-relation} and set $\ell_k^t:= \max\{ \ell: s_\ell \leq t+\tau_{n_k}\}.$ Then we have $$y(t+\tau_{n_k}) = y(s_{\ell_k^t}) = y_{\ell_k^t} = S_{\ell_k^t}(u_{\ell_k^t}).$$ Therefore, since $t+\tau_{n_k} \in [\ell_k^t, \ell_k^t+1]$, \begin{equation} \label{eq:convergence-of-iterates-to-limit} \begin{aligned} \lVert u(s_{\ell_k^t}) - \bar{u}(t)\rVert &\leq \lVert u(s_{\ell_k^t}) - u(t+\tau_{n_k}) \rVert + \lVert u(t+\tau_{n_k}) - \bar{u}(t) \rVert\\ & \leq \lVert u(s_{\ell_k^t}) - u(s_{\ell_k^t+1}) \rVert + \lVert u(t+\tau_{n_k}) - \bar{u}(t) \rVert\\ & \leq t_{\ell_k^t} (\lVert y_{\ell_k^t}\rVert + \lVert r_{\ell_k^t}\rVert+ \lVert w_{\ell_k^t}\rVert) +\lVert u(t+\tau_{n_k}) - \bar{u}(t) \rVert, \end{aligned} \end{equation} which a.s.~converges to zero as $k \rightarrow \infty$, since $u(\cdot+\tau_{n_k}) \rightarrow \bar{u}(\cdot)$ and the fact that $t_{n} \rightarrow 0$ by \eqref{eq:Robbins-Monro-stepsizes} (combined a.s.~boundedness of $y_n, r_n$, and $w_n$ for all $n$ by Lemma~\ref{lemma:vanishing-diff-inclusion-approximation}, Assumption~\ref{asu1iii}, and Lemma~\ref{lemma:vanishing-white-noise-terms}, respectively). Now, using $y(t+\tau_{n_k}) = y_{\ell_k^t}$, we get \begin{align*} &d(\bar{y}(t),S(\bar{u}(t)))\\ &\qquad \leq \left\lVert \frac{1}{m} \sum_{k=1}^m y(t+\tau_{n_k}) - \bar{y}(t) \right\rVert + d\left(\frac{1}{m} \sum_{k=1}^m y(t+\tau_{n_k}), S(\bar{u}(t))\right) \\ &\qquad \leq \left\lVert \frac{1}{m} \sum_{k=1}^m y(t+\tau_{n_k}) - \bar{y}(t) \right\rVert + d\left(\frac{1}{m} \sum_{k=1}^m S_{\ell_k^t}(u(s_{\ell_k^t})), S(\bar{u}(t))\right), \end{align*} which converges to zero as $m \rightarrow \infty$ by \eqref{eq:Banach-Saks-sum} and Lemma~\ref{lemma:distance-between-sets-to-zero}, where we note that {$u(s_{\ell_k^t}) \rightarrow \bar{u}(t)$} as $k \rightarrow \infty$ by \eqref{eq:convergence-of-iterates-to-limit}. Since $S(\bar{u}(t))$ is a closed set and the sample path was chosen to be arbitrary, we have that the statement must be true with probability one. \end{proof} Now, we show that there is always a strict decrease in $\varphi$ along a trajectory that originates at a noncritical point $z(0)$. \begin{lemma} \label{lemma:descent-property} Whenever $z: [0,\infty) \rightarrow C$ is a trajectory satisfying the differential inclusion \eqref{eq:differential-inclusion} and $0 \not\in S(z(0))$, then there exists a $T>0$ such that \begin{equation} \label{eq:descent-property} \varphi(z(T)) < \sup_{t \in [0,T]} \varphi(z(t)) \leq \varphi(z(0)). \end{equation} \end{lemma} \begin{proof}We modify the proof from \cite[Lemma 5.2]{Davis2018}. Let $\textup{d}elta, \tau$ satisfying $0<\textup{d}elta<\tau$ be fixed but arbitrary. From Theorem~\ref{thm:Brezis-well-posedness-differential-inclusion} we have that $z$ is absolutely continuous on $[\textup{d}elta,\tau]$. It is straightforward to show that $\varphi\circ z: [\textup{d}elta,\tau] \rightarrow \mathbb{R}$ is absolutely continuous, since $C$ is bounded and $\varphi$ is a composition of a locally Lipschitz map with an absolutely continuous function. Therefore, by Rademacher's theorem, it is differentiable for almost every $t\in [\textup{d}elta,\tau].$ On the other hand, notice that since $\eta$ is locally Lipschitz near $z(t)$ and convex, it is Clarke regular, so the chain rule $\partial(\eta \circ z)(t) = \partial \eta(z(t)) \circ \textup{d}ot{z}(t)$ holds by \cite[Theorem 2.3.10]{Clarke1990}. The chain rule for $j$ holds by differentiability. Therefore for almost every $t$, it follows for all $v \in \partial \varphi(z(t))$ that \begin{equation} \label{eq:chain-rule-step} (\varphi \circ z)'(t) = \partial (\varphi \circ z)(t) = (\nabla j(z(t)) + \partial \eta(z(t)))\circ \textup{d}ot{z}(t) = \langle v, \textup{d}ot{z}(t) \rangle. \end{equation} We now observe the following property for the subdifferential of $\textup{d}elta_C$, namely, \begin{equation}\label{eq:chain-rule-step2} \langle v, \textup{d}ot{z}(t)\rangle =0 \quad \forall v\in N_C(z(t)). \end{equation} Indeed, since $z(\cdot)$ takes values in $C$ and by definition of the subdifferential, for all $r\geq 0$ it follows that \begin{equation*} 0=\textup{d}elta_C(z(t+r))-\textup{d}elta_C(z(t)) \geq \langle v, z(t+r)-z(t)\rangle. \end{equation*} Hence, \begin{equation*} 0\geq \lim_{r\rightarrow 0^+} \left\langle v,\frac{z(t+r)-z(t)}{r}\right\rangle = \langle v,\textup{d}ot{z}(t)\rangle. \end{equation*} The reverse inequality can be obtained by using the left limit of the difference quotient, and we get \eqref{eq:chain-rule-step2}. By \eqref{eq:chain-rule-step} and \eqref{eq:chain-rule-step2}, we obtain for a.e.~$t$ that \begin{equation}\label{eq:chain_rule} \langle v, \textup{d}ot{z}(t)\rangle = \partial (\varphi \circ z)(t) \quad \forall v\in -S(z(t)). \end{equation} We now show that $\lVert \textup{d}ot{z}(t) \rVert = d(0,S(z(t)))$. Trivially, $d(0,S(z(t))) \leq \lVert \zeta -0\rVert$ for all {$\zeta \in S(z(t))$,} so it follows that $d(0,S(z(t))) \leq \lVert \textup{d}ot{z}(t)\rVert.$ Notice that for all $v, w \in \partial \varphi(z(t))$, by \eqref{eq:chain-rule-step}, $0 = \langle v-w, \textup{d}ot{z}(t)\rangle.$ Setting {$W := \text{span}(\partial \varphi(z(t)) - \partial \varphi(z(t)))$,} we get $\textup{d}ot{z}(t) \in W^\perp$. Clearly, {$-\textup{d}ot{z}(t) \in (-\textup{d}ot{z}(t) + W) \cap W^\perp$} so $\lVert \textup{d}ot{z}(t)\rVert \leq d(0, -\textup{d}ot{z}(t)+W)$. Since $\partial \varphi(z(t)) \subset \textup{d}ot{z}(t)+W$, it follows {$\lVert \textup{d}ot{z}(t) \rVert \leq d(0, \partial \varphi(z(t)))$} and we get $\lVert \textup{d}ot{z}(t) \rVert = d(0,S(z(t)))$. Now, notice that by \eqref{eq:chain_rule} and the fact that $\textup{d}ot{z}(t) \in S(z(t))$, we have for a.e.~$t$ that $$ \partial({\varphi} \circ z)(t) = -\lVert \textup{d}ot{z}(t) \rVert^2 = -d(0,S(z(t)))^2.$$ Since $\varphi \circ z$ is absolutely continuous on $[\textup{d}elta,\tau]$, \begin{equation} \label{eq:distance-in-proof-for-chain-rule} \varphi( z (\tau)) = \varphi( z (\textup{d}elta)) - \int_{\textup{d}elta}^\tau d(0, S(z(s)))^2 \,\mathrm{d} s \end{equation} and hence $\varphi(z(\textup{d}elta)) \geq \varphi(z(\tau))$. Using the continuity of $\varphi \circ z$, and the fact that $0<\textup{d}elta<\tau$ were arbitrarily chosen, we get $\varphi(z(0)) \geq \varphi(z(t))$ for all $t>0$. To finish the proof, we must find some $T>0$ such that $\varphi(z(T)) < \sup_{t \in [0,T]}\varphi(z(t))$. Suppose that $d(0,S(z(t))) = 0$ for a.e.~$t \in [0,T]$ for all $T>0$. Since $\lVert \textup{d}ot{z}(t) \rVert = d(0,S(z(t)))$ then $z \equiv z(0)$. This is a contradiction, since $\textup{d}ot{z}(\cdot) \in S(z(\cdot))$ and $0 \not\in S(z(0))$. By \eqref{eq:distance-in-proof-for-chain-rule}, we conclude that there exists a $T>0$ such that \eqref{eq:descent-property} holds. \end{proof} The following proof is standard, but we need to make several arguments differently in the infinite-dimensional setting. We will proceed as in \cite{Davis2018}. We define the level sets of $\varphi$ as $$\mathcal{L}_r := \{u\in H: \varphi(u) \leq r \}.$$ \begin{proposition} For all $\varepsilon>0$ there exists a $N$ such that for all $n\geq N$, if $u_n \in \mathcal{L}_\varepsilon$, then $u_{n+1} \in \mathcal{L}_{2 \varepsilon}$ a.s. \end{proposition} \begin{proof} First, we remark that $\varphi$ is uniformly continuous on $V$, since $\eta(\cdot)$ satisfies \eqref{eq:local-Lipschitz-bound-h} and, in turn, is Lipschitz continuous on $V$, as well as the fact that $j$ is Lipschitz continuous on $V$. Therefore, for any $\varepsilon>0$ there exists a $\textup{d}elta>0$ such that if $\lVert u_{n+1}- u_n \rVert < \textup{d}elta$, then $ |\varphi(u_{n+1}) - \varphi(u_n)| < \varepsilon.$ Now, we choose $N$ such that $\lVert u_{n+1} - u_n \rVert < \textup{d}elta$ for all $n\geq N$, which is possible by Lemma~\ref{lemma:easy-lemma}. Then it must follow that $|\varphi(u_{n+1}) - \varphi(u_n)| < \varepsilon$ for all $n\geq N$ as well. Now, since $u_n \in \mathcal{L}_\varepsilon$, it follows that $\varphi(u_{n+1}) \leq 2\varepsilon$, so therefore $u_{n+1} \in \mathcal{L}_{2\varepsilon}$. \end{proof} \begin{lemma} \label{lemma:same-limits} The following equalities hold. \begin{equation} \label{eq:limit-sequence-limit-trajectory} \liminf_{n \rightarrow \infty} \varphi(u_n) = \liminf_{t\rightarrow \infty} \varphi(u(t)) \quad \text{and} \quad \limsup_{n \rightarrow \infty} \varphi(u_n) = \limsup_{t\rightarrow \infty} \varphi(u(t)). \end{equation} \end{lemma} \begin{proof} We argue that $\liminf_{n \rightarrow \infty} \varphi(u_n) \leq \liminf_{t\rightarrow \infty} \varphi(u(t))$; the other direction is clear by construction of $u(\cdot)$ from \eqref{eq:interpolation-sequences-un-yn}. Let $\{\tau_n\}$ be a sequence such that $\tau_n \rightarrow \infty$, $\lim_{n \rightarrow \infty} u(\tau_n) = \bar{u}$ for some $\bar{u} \in H$, and $\liminf_{n \rightarrow \infty} \varphi(u(\tau_n)) = \varphi(\bar{u})$. With $k_n := \max \{ n: t_k \leq \tau_n\}$, we get \begin{align*} \lVert u_{k_n} - \bar{u} \rVert \leq \lVert u_{k_n} - u(\tau_n) \rVert + \lVert u(\tau_n) - \bar{u} \rVert \leq \lVert u_{k_n} - u_{k_{n+1}} \rVert + \lVert u(\tau_n) - \bar{u} \rVert, \end{align*} which converges to zero as $n\rightarrow \infty$ by \eqref{eq:Cauchy-sequence_u_n} and convergence of the sequence $\{ u(\tau_n)\}.$ Therefore $u_{k_n} \rightarrow \bar{u}$ and so by continuity of $\varphi$, it follows that $$\liminf_{t \rightarrow \infty} \varphi(u(t)) = \varphi(\bar{u}) = \lim_{n \rightarrow \infty} \varphi(u_{k_n}) \geq \liminf_{n \rightarrow \infty} \varphi(u_n).$$ Analogous arguments can be made for the claim $$\limsup_{n \rightarrow \infty} \varphi(u_n) = \limsup_{t\rightarrow \infty} \varphi(u(t)).$$ \end{proof} \begin{lemma} \label{lemma:exit-lemma} Only finitely many iterates $\{ u_n\}$ are contained in $H \backslash \mathcal{L}_{2\varepsilon}.$ \end{lemma} \begin{proof} We choose $\varepsilon>0$ such that $\varepsilon \notin \varphi(S^{-1}(0)),$ which is possible for arbitrarily small $\varepsilon$ by Assumption~\ref{asu4v}, where we note that $\varphi(S^{-1}(0)) = f(S^{-1}(0))$. We construct the process given by the recursion \begin{align*} i_1 & := \min \{n : u_n \in \mathcal{L}_\varepsilon \text{ and } u_{n+1}\in \mathcal{L}_{2\varepsilon} \backslash \mathcal{L}_{\varepsilon}\},\\ e_1 & := \min \{ n: n > i_1 \text{ and } u_n \in H\backslash \mathcal{L}_{2\varepsilon}\}, \\ i_2 & := \min \{n: n > e_1 \text{ and } u_n \in \mathcal{L}_\varepsilon \}, \end{align*} and so on. We argue by contradiction and recall that $s_n = \sum_{j=1}^{n-1} t_j$. Suppose infinitely many $\{ u_n\}$ are in $H \backslash \mathcal{L}_{2\varepsilon}$, then it must follow that $i_j \rightarrow \infty$ as $j \rightarrow \infty$. By Theorem~\ref{theorem:compactness-result}, $\{u(\cdot+s_{i_j})\}$ is relatively compact in $C([0,T], H)$ for all $T>0$ and there exists a subsequence (with the same labeling) and limit point $z(\cdot)$ such that $z(\cdot)$ is a trajectory of \eqref{eq:differential-inclusion}. Now, since by construction $\varphi(u_{i_j}) \leq \varepsilon$ and $\varphi(u_{i_j+1}) > \varepsilon$, it follows that \begin{equation} \label{eq:simple-inequality-proof-exit-lemma} \begin{aligned} \varepsilon \geq \varphi(u_{i_j}) &= \varphi(u_{i_j+1}) + \varphi(u_{i_j}) - \varphi(u_{i_j+1})\\ &\geq \varepsilon + \varphi(u_{i_j})- \varphi(u_{i_j+1}). \end{aligned} \end{equation} Recall that $\lim_{j \rightarrow \infty} u_{i_j} = u(\cdot+s_{i_j}) = z(0)$. Taking the limit $j \rightarrow \infty$ on both sides of \eqref{eq:simple-inequality-proof-exit-lemma}, by continuity of $\varphi$, we get $$\lim_{j \rightarrow \infty} \varphi(u_{i_j}) = \varphi(z(0)) = \varepsilon,$$ meaning $z(0)$ is not a critical point of $\varphi$. Thus we can invoke Lemma~\ref{lemma:descent-property} to get the existence of a $T>0$ such that \begin{equation} \label{eq:descent-at-T} \varphi(z(T)) < \sup_{t \in [0,T]} \varphi(z(t)) \leq \varphi(z(0)) = \varepsilon. \end{equation} By uniform convergence of $u(\cdot+s_{i_j})$ to $z(\cdot)$, it follows for $j$ sufficiently large that $$\sup_{t \in [0,T]} |\varphi(u(t+s_{i_j})) - \varphi(z(t))| < \varepsilon,$$ so $$\sup_{t \in [0,T]} \varphi(u(t+s_{i_j})) \leq \sup_{t \in [0,T]} |\varphi(u(t+s_{i_j})) - \varphi(z(t))| + \sup_{t \in [0,T]} \varphi(z(t)) \leq 2\varepsilon.$$ Therefore it must follow that \begin{equation} \label{eq:statement-to-be-contradicted} s_{e_{i_j}} > s_{i_j} + T \end{equation} for $j$ sufficiently large. We now find a contradiction to the statement \eqref{eq:statement-to-be-contradicted}. This is done by observing the sequence $\ell_j := \max \{ \ell: s_{i_j} \leq s_\ell \leq s_{i_j} + T\}.$ From \eqref{eq:descent-at-T}, we have that there exists a $\textup{d}elta > 0$ such that $\varphi(z(T)) \leq \varepsilon - 2 \textup{d}elta.$ Observe that \begin{align*} \lVert u_{\ell_j} - u(T+s_{i_j}) \rVert = \lVert u(s_{\ell_j}) - u(T+s_{i_j})\rVert \leq \lVert u_{\ell_j} - u_{\ell_j + 1}\rVert \rightarrow 0 \quad \text{ as } j \rightarrow \infty. \end{align*} Therefore $u_{\ell_j} \rightarrow u(T+s_{i_j})$ and hence $u_{\ell_j} \rightarrow z(T)$ as $j \rightarrow \infty$. By continuity, we get $\lim_{j \rightarrow \infty} \varphi(u_{\ell_j}) = \varphi(z(T)).$ Thus $\varphi(u_{\ell_j}) < \varepsilon - \textup{d}elta$ for $j$ sufficiently large, a contradiction to \eqref{eq:statement-to-be-contradicted}. \end{proof} \begin{proposition} \label{prop:non-escape-argument} The limit $\lim_{t \rightarrow \infty} \varphi(u(t))$ exists. \end{proposition} \begin{proof} W.l.o.g.~assume $\liminf_{t\rightarrow \infty}\varphi(u(t))=0$; this is possible by the fact that $j$ and $\eta$ are bounded below. Choosing $\varepsilon>0$ such that $\varepsilon \notin \varphi(S^{-1}(0)),$ we have by Lemma~\ref{lemma:exit-lemma} that for $N$ sufficiently large, $u_n \in \mathcal{L}_{2\varepsilon}$ for all $n \geq N$. Since $\varepsilon$ can be chosen to be arbitrarily small, we conclude that $\lim_{t\rightarrow \infty} \varphi(u(t)) = 0.$ \end{proof} \paragraph{Proof of Theorem~\ref{theorem:convergence-variance-reduced-stochastic-gradient-decreasing-steps}.} The fact that $\{\varphi(u_n)\}$ converges follows from Proposition~\ref{prop:non-escape-argument} and Lemma~\ref{lemma:same-limits}. Since $\{u_n \} \subset C$, it trivially follows that $\{ f(u_n)\}$ converges a.s. Let $\bar{u}$ be a limit point of $\{ u_n\}$ and suppose that $0 \notin S(\bar{u})$. Let $\{ u_{n_k}\}$ be a subsequence converging to $\bar{u}$ and let $z(\cdot)$ be the limit of $\{u(\cdot+s_{n_k})\}$. Then, by Lemma~\ref{lemma:descent-property}, there exists a $T>0$ such that \begin{equation} \label{eq:assumption-to-be-contradicted-convergence} \varphi(z(T)) < \sup_{t \in [0,T]} \varphi(z(t)) \leq \varphi(\bar{u}). \end{equation} However, it follows from Proposition~\ref{prop:non-escape-argument} that $$\varphi(z(T)) = \lim_{k \rightarrow \infty} \varphi(u(T+s_{n_k})) = \lim_{t\rightarrow \infty} \varphi(u(t)) = \varphi(\bar{u}),$$ which is a contradiction to \eqref{eq:assumption-to-be-contradicted-convergence}. \section{Application to PDE-Constrained Optimization under Uncertainty} \label{sec:numerical-experiments} In this section, we apply the algorithm presented in Sect.~\ref{subsection:ODE-proof} to a nonconvex problem from PDE-constrained optimization under uncertainty. In Sect.~\ref{subsection:ModelProblem}, we set up the problem and verify conditions for convergence of the stochastic proximal gradient method. We show numerical experiments in Sect.~\ref{subsection:experiments}. \subsection{Model Problem} \label{subsection:ModelProblem} We first introduce notation and concepts specific to our application; see \cite{Troeltzsch2009,Evans1998}. Let $D \subset \mathbb{R}^d$, $d \leq 3$ be an open and bounded Lipschitz domain. The inner product between vectors $x, y \in \mathbb{R}^d$ is denoted by $x \cdot y = \sum_{i=1}^d x_i y_i$. For a function $v:\mathbb{R}^d \rightarrow \mathbb{R}$, let $\nabla v(x) = ({\partial v(x)}/{\partial x_1}, \textup{d}ots, {\partial v(x)}/{\partial x_d})^\top$ denote the gradient and for $w: \mathbb{R}^d \rightarrow \mathbb{R}^d$, let $\nabla \cdot w(x) = {\partial w_1(x)}/{\partial x_1} + \cdots + {\partial w_d(x)}/{\partial x_d}$ denote the divergence. We define the Sobolev space $H^1(D)$ = \{$u\in L^2(D)$ \text{ having weak derivatives } ${\partial u}/{\partial x_i} \in L^2(D)$, $i =1, \textup{d}ots, d$\} and the closure of $C_c^\infty(D)$ in $H^1(D)$ by $H_0^1(D)$. We will focus on a semilinear diffusion-reaction equation with uncertainties, which describes transport phenomena at equilibrium and is motivated by \cite{Nouy2018}. We assume that there exist random fields $a: D \times \Omega \rightarrow \mathbb{R}$ and $r: D \times \Omega \rightarrow \mathbb{R}$, which are the diffusion and reaction coefficients, respectively. To facilitate simulation, we will make a standard finite-dimensional noise assumption, meaning the random field has the form $$a(x,\omega) = a(x,\xi(\omega)), \quad r(x,\omega) = r(x,\xi(\omega)) \quad \text{ in } D \times \Omega,$$ where $\xi(\omega) = (\xi_1(\omega), \textup{d}ots, \xi_m(\omega))$ is a vector of real-valued uncorrelated random variables $\xi_{i}:\Omega \rightarrow \Xi_i \subset\mathbb{R}$. The support of the random vector will be denoted by $\Xi := \prod_{i=1}^m \Xi_i$. We consider the following PDE constraint, to be satisfied for almost every $\xi \in \Xi$: \begin{equation} \label{eq:semilinear-PDE} \begin{aligned} - \nabla \cdot (a(x,\xi) \nabla y(x,\xi)) + r(x,\xi) (y(x,\xi))^3 &= u(x), \qquad (x,\xi) \in D \times \Xi, \\ y(x,\xi) &= 0, \phantom{tex}\qquad (x,\xi) \in \partial D \times \Xi.\\ \end{aligned} \end{equation} Optimal control problems with semilinear PDEs involving random coefficients have been studied in, for instance, \cite{Kouri2016,Kouri2019a}. We include a nonsmooth term as in \cite{Reyes2015} with the goal of obtaining sparse solutions. In the following, we assume that $\lambda_1 \geq 0$, $\lambda_2 \geq 0$, and $y_D \in L^2(D)$. The model problem we solve is given by \begin{equation} \label{eq:model-problem-semilinear}\tag{P'} \begin{aligned} &\min_{u \in C} \quad \left\lbrace\varphi(u):= \frac{1}{2} \mathbb{E} [ \lVert y(\xi) - y_D\rVert_{L^2(D)}^2 ] + \frac{\lambda_2}{2} \lVert u \rVert_{L^2(D)}^2 +\lambda_1 \lVert u \rVert_{L^1(D)}\right\rbrace \\ & \quad \text{s.t.} \quad - \nabla \cdot (a(x,\xi) \nabla y) + r(x,\xi) y^3 = u(x), \qquad (x,\xi) \in D \times \Xi, \\ & \quad \phantom{\text{s.t. } \quad - \nabla \cdot (a(x,\xi) \nabla y) + R(x,\xi)} y = 0, \phantom{tex}\qquad (x,\xi) \in \partial D \times \Xi,\\ &\quad \quad \quad \quad \quad C:= \{ u \in L^2(D): \,u_a(x) \leq u(x) \leq u_b(x)\,\,\text{ a.e. } x\in D\}. \end{aligned} \end{equation} The following assumptions will apply in this section. In particular, we do not require uniform bounds on the coefficient $a(\cdot,\xi)$, which allow for modeling with log-normal random fields. \begin{assumption} \label{assumption:bilinearform} We assume $y_D \in L^2(D)$, $u_a, u_b \in L^2(D)$, and $u_a \leq u_b$. There exist $a_{\min}(\cdot), a_{\max}(\cdot)$ such that $0< a_{\min}(\xi) < a(\cdot,\xi) < a_{\max}(\xi)< \infty$ in $D$ a.s.~and $a_{\min}^{-1}, a_{\max} \in L^p(\Xi)$ for all $p \in [1,\infty)$. Furthermore, there exists $r_{\max}(\cdot)$ such that $0 \leq r(\cdot,\xi) \leq r_{\max}(\xi)<\infty$ a.s.~and $r_{\max} \in L^p(\Xi)$ for all $p \in [1,\infty)$. \end{assumption} Existence of a solution to Problem~\eqref{eq:model-problem-semilinear} follows by applying \cite[Proposition 3.1]{Kouri2019a}. The following result holds by \cite[Proposition 2.1]{Kouri2019a} combined with standard a priori estimates for a fixed realization $\xi$ to obtain \eqref{eq:bounds-y} and \eqref{eq:continuous-dependence-y}. \begin{lemma} \label{lemma:well-posedness-PDE} For almost every $\xi \in \Xi$, \eqref{eq:semilinear-PDE} has a unique solution $y(\xi)=y(\cdot,\xi) \in H_0^1(D)$ and there exists a positive random variable $C_1 \in L^p(\Xi)$ for all $p \in [1,\infty)$ independent of $u$ such that for almost every $\xi \in \Xi$, \begin{equation} \label{eq:bounds-y} \lVert y(\xi) \rVert_{L^2(D)} \leq C_1(\xi) \lVert u \rVert_{L^2(D)}. \end{equation} Additionally, for $y_1(\xi)$ and $y_2(\xi)$ solving \eqref{eq:semilinear-PDE} with $u=u_1$ and $u=u_2$, respectively, we have for almost every $\xi \in \Xi$ that \begin{equation} \label{eq:continuous-dependence-y} \lVert y_1(\xi) - y_2(\xi) \rVert_{L^2(D)} \leq C_1(\xi) \lVert u_1 - u_2 \rVert_{L^2(D)}. \end{equation} \end{lemma} By Lemma~\ref{lemma:well-posedness-PDE}, the control-to-state operator $T(\xi):L^2(D) \rightarrow H_0^1(D), u \mapsto T(\xi)u$ is well-defined for almost every $\xi$ and all $u \in L^2(D)$. Additionally, for almost every $\xi \in \Xi$, this mapping is in fact continuously Fr\'echet differentiable; this can be argued by verifying \cite[Assumption 1.47]{Hinze2009} as in \cite[pp.~76-78]{Hinze2009}. With that, we define the reduced functional $J:L^2(D) \times \Xi \rightarrow \mathbb{R}$ by $J(u,\xi):= \frac{1}{2} \lVert T(\xi)u - y_D\rVert_{L^2(D)}^2 + \frac{\lambda_2}{2} \lVert u \rVert_{L^2(D)}^2$ and we can define the stochastic gradient. \begin{proposition} \label{proposition:random-J-is-differentiable} $J:L^2(D)\times\Xi \rightarrow \mathbb{R}$ is continuously Fr\'echet differentiable and the stochastic gradient is given by \begin{equation} \label{eq:stochastic-gradient-application} G(u,\xi) := \lambda_2 u - p(\cdot,\xi), \end{equation} where, given a solution $y = y(\cdot,\xi)$ to \eqref{eq:semilinear-PDE}, the function $p = p(\cdot,\xi) \in H_0^1(D)$ is the solution to the adjoint equation \begin{equation} \label{eq:adjoint-equation} \begin{aligned} -\nabla \cdot (a(x,\xi) \nabla p) + 3 r(x, \xi)y^2 p &= y_D-y, \quad (x,\xi) \in D \times \Xi \\ p &=0,\phantom{- y_D} \quad \phantom{t}(x,\xi) \in \partial D \times \Xi. \end{aligned} \end{equation} Furthermore, for almost every $\xi \in \Xi$, with the same $C_1 \in L^p(\Xi)$ for all $p \in [1,\infty)$ as in Lemma~\ref{lemma:well-posedness-PDE}, \begin{equation} \label{eq:a-priori-p} \lVert p(\cdot,\xi) \rVert_{L^2(D)} \leq C_1(\xi) \lVert y_D - y(\xi)\rVert_{L^2(D)}. \end{equation} Additionally, for $p_1(\xi)$ and $p_2(\xi)$ solving \eqref{eq:adjoint-equation} with $y=y_1(\xi)$ and $y=y_2(\xi)$, respectively (where $y_i(\xi)$ solves \eqref{eq:semilinear-PDE} with $u=u_i$), \begin{equation} \label{eq:continuous-dependence-p} \lVert p_1(\xi) - p_2(\xi) \rVert_{L^2(D)} \leq C_1(\xi) \lVert y_1(\xi) - y_2(\xi) \rVert_{L^2(D)}. \end{equation} \end{proposition} The proofs of the above and following proposition are in Sect.~\ref{subsection:auxiliary-proofs-application}. We define $j:L^2(D) \rightarrow \mathbb{R}$ by $j(u):= \mathbb{E}[J(u,\xi)]$ for all $u\in L^2(D)$ and show that it is continuously Fr\'echet differentiable in the following proposition. \begin{proposition} \label{proposition:j-is-differentiable} The function $j:L^2(D) \rightarrow \mathbb{R}$ is continuously Fr\'echet differentiable and $\mathbb{E}[G(u,\xi)] = \nabla j(u)$ for all $u\in L^2(D)$. \end{proposition} Now, we present the main result of this section, which is the verification of assumptions for the convergence of Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps}. \begin{theorem} Problem \eqref{eq:model-problem-semilinear} satisfies Assumption~\ref{asu1ii} as well as Assumption~\ref{asu4i}--Assumption~\ref{asu4iv}. \end{theorem} \begin{proof} For Assumption~\ref{asu1ii}, we note that by Proposition~\ref{proposition:j-is-differentiable}, $j$ is continuously Fr\'echet differentiable and $\mathbb{E}[G(u,\xi)] = \nabla j(u)$ for all $u \in L^2(D)$. Now, for arbitrary $u_1,u_2 \in L^2(D)$, we have by Jensen's inequality, \eqref{eq:stochastic-gradient-application}, and H\"older's inequality applied to \eqref{eq:continuous-dependence-p} and \eqref{eq:continuous-dependence-y} that \begin{align*} &\lVert \nabla j(u_1) - \nabla j(u_2)\rVert_{L^2(D)} \leq \mathbb{E}[\lVert G(u_1,\xi)-G(u_2,\xi)\rVert_{L^2(D)}]\\ & \quad\leq \lambda_2 \lVert u_1 - u_2 \rVert_{L^2(D)} + \mathbb{E}[\lVert p_1(\xi) - p_2(\xi)\rVert_{L^2(D)} ]\\ & \quad\leq \lambda_2 \lVert u_1 - u_2 \rVert_{L^2(D)} + \left(\mathbb{E}[(C_1(\xi))^2]\right)^{1/2}\left(\mathbb{E}[\lVert y_1(\xi) - y_2(\xi)\rVert_{L^2(D)}^2 ]\right)^{1/2}\\ & \quad\leq \lambda_2 \lVert u_1 - u_2 \rVert_{L^2(D)} + \lVert C_1\rVert_{L^2(\Xi)}^2 \lVert u_1 - u_2\rVert_{L^2(D)}. \end{align*} Since $\lVert C_1\rVert_{L^2(\Xi)}^2 < \infty$ it follows that $j \in C^{1,1}_L(L^2(D))$. Assumption~\ref{asu4i} is obviously satisfied. For Assumption~\ref{asu4ii}, we have that the function $\eta(u)=\lambda_1 \lVert u\rVert_{L^1(D)} \in \Gamma_0(L^2(D))$ and is clearly bounded below; additionally, $\eta$ is globally Lipschitz and therefore satisfies \eqref{eq:local-Lipschitz-bound-h}. For Assumption~\ref{asu4iii}, we have by \eqref{eq:stochastic-gradient-application}, \eqref{eq:a-priori-p}, and \eqref{eq:bounds-y} the bound \begin{equation} \label{eq:stochastic-gradient-bound-example} \lVert G(u,\xi) \rVert_{L^2(D)} \leq \lambda_2 \lVert u \rVert_{L^2(D)} + C_1(\xi) \lVert y_D \rVert_{L^2(D)} + (C_1(\xi))^2 \lVert u\rVert_{L^2(D)} \end{equation} and furthermore $\mathbb{E}[\lVert G(u,\xi) \rVert_{L^2(D)}^2] =:M(u)<\infty$ by integrability of $\xi \mapsto C_1(\xi)$. Assumption~\ref{asu4iv} follows for any $u \in C$ (and hence any convergent sequence $\{u_n\}$ in $C$) by \eqref{eq:stochastic-gradient-bound-example}. \end{proof} The last assumption from Assumption~\ref{assumptions:general-convergence-proof} is technical and difficult to verify for general functions in infinite dimensions. Indeed, \cite{Kupka1965} gave an example of a $C^\infty$-function whose critical values make up a set of measure greater than zero. In finite dimensions the story is easier: the Morse--Sard theorem guarantees that Assumption~\ref{asu4v} holds if $f:\mathbb{R}^n \rightarrow \mathbb{R}$ and $f \in C^k$ for $k \geq n$. In infinite dimensions, certain well-behaved functions, in particular Fredholm operators, see \cite{Smale2000}, satisfy this assumption. \subsection{Numerical Experiments} \label{subsection:experiments} In this section, we demonstrate Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps} on Problem \eqref{eq:model-problem-semilinear}. Simulations were run using FEniCS by \cite{Alnes2015} on a laptop with Intel Core i7 Processor (8 x 2.6 GHz) with 16 GB RAM. Let the domain be given by $D=(0,1)\times(0,1)$ and the constraint set be given by {$C= \{ u \in L^2(D) \,|\, -0.5 \leq u(x) \leq 0.5 \,\, \forall x \in D\}.$} We modify \cite[Example 6.1]{Reyes2015}, with $y_D(x)=\sin(2 \pi x_1)\sin (2\pi x_2) \exp(2 x_1)/6$, $\lambda_1 = 0.008$, and $\lambda_2 = 0.001.$ We generate random fields using a Karhunen-Lo\`eve expansion, with means $a_0 = 0.5$ and $r_0 = 0.5$, number of summands $m = 20$, and $\xi^{a,i},\xi^{r,i} \sim U(-\sqrt{0.5},\sqrt{0.5})$, where $U(a,b)$ denotes the uniform distribution between real numbers $a$ and $b$, $a<b$. The eigenfunctions and eigenvalues are given by $$\tilde{\phi}_{j,k}(x):= 2\cos(j \pi x_2)\cos(k \pi x_1), \quad \tilde{\lambda}_{k,j}:=\frac{1}{4} \exp(-\pi(j^2+k^2)l^2), \quad j,k \geq 1,$$ where we reorder terms so that the eigenvalues appear in descending order (i.e., $\phi_1 = \tilde{\phi}_{1,1}$ and $\lambda_1 = \tilde{\lambda}_{1,1}$) and we choose correlation length $l=0.5$. Thus \begin{equation} \label{eq:random-field-expansion} a(x,\xi) = a_0 + \sum_{i=1}^m \sqrt{\lambda_i} \phi_i \xi^{a,i}, \quad r(x,\xi) = r_0 + \sum_{i=1}^m \sqrt{\lambda_i} \phi_i \xi^{r,i}. \end{equation} For Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps}, we generate samples with $\xi_n = (\xi_n^{a,1}, \textup{d}ots, \xi_n^{a,m}, \xi_n^{r,a}, \textup{d}ots,\xi_n^{r,m})$ at each iteration $n$. The step size is chosen to be $t_n = \theta/n$ with $\theta = 100$, where the scaling was chosen such that $\theta \approx 1/\lVert G(u_1, \xi_1)\rVert$. The initial point was $u_1(x) = \sin(4\pi x_1) \sin (4 \pi x_2).$ A uniform mesh $\mathcal{T}$ with 9800 shape regular triangles $T$ was used. We denote the mesh fineness with $\hat{h} = \max_{T \in \mathcal{T}}\operatorname{diam}(T)$. The state and adjoint were discretized using piecewise linear finite elements, (where $\mathcal{P}_i$ denotes the space of polynomials of degree up to $i$), given by the set \begin{align*} V_{\hat{h}} &:= \lbrace v \in H_0^1(D): v|_{T} \in \mathcal{P}_1(T) \text{ for all } T\in \mathcal T \rbrace. \end{align*} For the controls, we choose a discretization of $L^2(D)$ by piecewise constants, given by the set \begin{align*} U_{\hat{h}} &:= \lbrace u \in L^2(D): v|_{T} \in \mathcal{P}_0(T) \text{ for all } T\in \mathcal{T} \rbrace,\quad C_{\hat{h}} := U_{\hat{h}}\cap C. \end{align*} We use the $L^2$-projection $P_{\hat{h}}\colon L^2(D) \rightarrow U_{\hat{h}}$ defined for each $v \in L^2(D)$ by \[ P_{\hat{h}}(v)\bigl\lvert_T := \frac{1}{|T|}\int_T v\,\mathrm{d}x. \] This is done to project the stochastic gradient onto the $L^2(D)$ space as in \cite{Geiersbach2020b}. Hence, the last line of Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps} is given by the expression $u_{n+1}:=\textup{prox}_{t_n h}\left( u_n - t_nP_{\hat{h}} G(u_n,\xi_{n})\right).$ For the computation of the proximity operator $\textup{prox}_{t(\eta+\textup{d}elta_C)}(z) = \argmin_{-0.5 \leq v \leq 0.5} \{ \lambda_1 \lVert v \rVert_{L^1(D)} + \frac{1}{2t} \lVert v - z \rVert_{L^2(D)}^2\}$, we use the formula from \cite[Example 6.22]{Beck2017}, defined piecewise on each element of the mesh. For each $T \in \mathcal{T}$, it is given by $$\textup{prox}_{t(\eta+\textup{d}elta_C)}(z|_T) = \min \{ \max \{ |z|_T|-t \lambda_1,0\},0.5\}\textup{sgn}(z|_T).$$ For convergence plots, we use a heuristic to approximate the objective function and the measure of stationarity by increasing sampling as the control reaches stationarity. To be more precise, we use a sequence of sample sizes $\{m_n\}$ with $m_{ n} = 10\lfloor \tfrac{n}{50}\rfloor +1$ newly generated i.i.d.~samples $(\xi_{n,1}, \textup{d}ots, \xi_{n,m_n})$ and compute \begin{align*} \hat{f}_n &:= \frac{1}{m_n}\sum_{j=1}^{m_n} J(u_n, \xi_{n,j}) + \eta(u_n), \\ {r}_n &:= \left\lVert u_n - \textup{prox}_{\eta+\textup{d}elta_C}\left(u_n - \frac{1}{m_n}\sum_{j=1}^{m_n}P_{\hat{h}} G(u_n,\xi_{n,j})\right)\right\rVert_{L^2(D)}. \end{align*} The algorithm is terminated for $n\geq 50$ if $\hat{r}_n := \sum_{k=n-50}^n {r}_n \leq \text{tol}$ with $\text{tol}=2e^{-4}$. The parameters for our heuristic termination rule were tuned, for illustration purposes only, so that the algorithm stopped after several hundred iterations. A plot of the control after termination is shown in~Fig.~\ref{fig:control}. The effect of the sparse term $\eta$ as well as the constraint set $C$ can be seen clearly. Decay of the objective function value and the stationarity measure are shown in Fig.~\ref{fig:experiment1}. We see convergence of the objective function values and the stationarity measure tends to zero as expected. Additionally, we conduct an experiment to demonstrate mesh independence of the algorithm by running the algorithm once each for different meshes and comparing the number of iterations needed until the tolerance $\text{tol}$ is reached. In Table~\ref{table:mesh-independence}, we see that these iteration numbers are of the same order. The estimate for the objective function $\hat{f}_N$ is also included at the final iteration $N$, demonstrating how solutions become more exact on finer meshes. \begin{figure} \caption{The control $u$ after 251 iterations} \label{fig:control} \end{figure} \begin{figure} \caption{Behavior of the objective function (left) and the stationarity measure (right)} \label{fig:experiment1} \end{figure} \begin{table} \begin{center} \begin{tabular}{| c | c | c |c |} \hline $\hat{h}$ & \# triangles & objective function $\hat{f}_N$ & \# iterations $N$ until $\hat{r}_N \leq$ tol \\ \hline $ 7.1e^{-2}$ & $800$ & $4.160e^{-2}$ & $191$ \\ $ 4.7e^{-2}$ & $1800$ & $4.157e^{-2}$ & $295$\\ $3.5e^{-2}$ & $3200$ & $4.157e^{-2}$ & $233$ \\ $2.8e^{-2}$ & $5000$ & $4.156e^{-2}$ & $257$ \\ $2.4e^{-2}$ & $7200$ & $4.156e^{-2}$ & $271$ \\ $2.0e^{-2}$ & $9800$ & $4.155e^{-2}$ & $251$ \\ \hline \end{tabular} \end{center} \caption{Experiment showing mesh independence} \label{table:mesh-independence} \end{table} \section{Conclusion} \label{sec:conclusion} In this paper, we presented asymptotic convergence analysis for two variants of the stochastic proximal gradient algorithm in Hilbert spaces. The main results address the asymptotic convergence to stationary points of general functions defined over a Hilbert space. Moreover, we presented an application to the theory in the form of a problem from PDE-constrained optimization under uncertainty. Assumptions for convergence were verified for a tracking-type problem with a $L^1$-penalty term subject to a semilinear elliptic PDE with random coefficients and box constraints. Numerical experiments demonstrated the effectiveness of the method. The ODE method from Sect.~\ref{subsection:ODE-proof} allowed us to prove a more general result with weaker assumptions on the objective function. However, we needed to introduce an assumption on the set of critical values in the form of Assumption~\ref{asu4v}. While we did not verify this assumption for our model problem, it would be interesting to know whether this assumption is verifiable for this class of problems. We had to be slightly more restrictive on the nonsmooth term in Sect.~\ref{subsection:ODE-proof} than we were in Sect.~\ref{subsection:SPGM-Variance-Reduced}. The advantages in terms of computational cost of Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps} over Algorithm~\ref{alg:PSG_Hilbert_Nonconvex} are clear: the use of decreasing step sizes in Algorithm~\ref{alg:PSG_Hilbert_Nonconvex_Decreasing_Steps} means that increased sampling is not needed. Additionally, there is no need to determine the Lipschitz constant for the gradient, which in the application depends on (among other things) the Poincar\'e constant and the lower bound on the random fields, and thus lead to a prohibitively small constant step size. This phenomenon has been demonstrated in \cite{Geiersbach2020b}. How to scale the decreasing step size $t_n$ remains an open question. In practice, the scaling of the step size can be tuned offline. An improper choice of the scaling $c$ in the step size $t_n = c/n^\alpha$ for $0.5 < \alpha \leq 1$ can lead to arbitrarily slow convergence; this was demonstrated in \cite{Nemirovski2009}. While this was not the focus of our work, efficiency estimates for nonconvex problems might also be possible following the work by \cite{Bottou2018,Lei2018,Ghadimi2016}. In lieu of efficiency estimates, it would be desirable to have better termination conditions that do not rely on increased sampling as our heuristic did in the numerical experiments. Finally, it would be natural to investigate mesh refinement strategies as in \cite{Geiersbach2020b}. For more involved choices of nonsmooth terms, the $\textup{prox}$ computation is also subject to numerical error and should be treated. \appendix \section{Auxiliary Results}\label{subsection:auxiliary-proofs} To prove Lemma~\ref{lemma:quadratic-variations-bounded-imply-convergence}, we first need the following result. \begin{proposition} \label{proposition:radon-nikodym-property} For $1 \leq p \leq \infty$, every $H$-valued martingale that is bounded in the Bochner space $L^p(\Omega,H)$ converges a.s. \end{proposition} \begin{proof} Since $H$ is a Hilbert space, it is reflexive and therefore has the Radon--Nikodym property by \cite[Corollary 2.11]{Pisier2016}. The rest of the proof can be found in \cite[Theorem 2.5]{Pisier2016}. \end{proof} \paragraph{Proof of Lemma~\ref{lemma:quadratic-variations-bounded-imply-convergence}} \begin{proof} It is straightforward to show that $\mathbb{E}[\lVert v_n\rVert^2] = \mathbb{E}[\lVert v_1 \rVert^2] + \sum_{k=1}^{n-1} \mathbb{E}[\lVert v_{k+1} - v_{k}\rVert^2],$ and therefore boundedness of $v_n$ for all $n$ follows from \eqref{eq:quadratic-variations-proof} and vice versa. Supposing now that \eqref{eq:quadratic-variations-proof} holds, the fact that $\{v_n\}$ converges to a limit $v_\infty$ follows by Proposition~\ref{proposition:radon-nikodym-property}. \end{proof} \section{Auxiliary Proofs for Application}\label{subsection:auxiliary-proofs-application} \paragraph{Proof of Proposition~\ref{proposition:random-J-is-differentiable}} \begin{proof} Continuous differentiability of $J(\cdot,\xi):L^2(D)\rightarrow \mathbb{R}$ follows from continuous differentiability of $u \mapsto T(\xi)u$and the fact that $(u,y) \mapsto \tilde{J}(u,y) = \tfrac{1}{2} \lVert y-y_D\rVert_{L^2(D)}^2 + \tfrac{\lambda_2}{2}\lVert u\rVert_{L^2(D)}^2$ is continuously Fr\'echet differentiable. One obtains \eqref{eq:stochastic-gradient-application} and \eqref{eq:adjoint-equation} by fixing a realization $\xi \in \Xi$ and computing the derivative of $u \mapsto J(u,\xi)$ as in, e.g., \cite[p.~58-59]{Hinze2009}. Bounds \eqref{eq:a-priori-p} and \eqref{eq:continuous-dependence-p} follow from standard a priori estimates. \end{proof} \paragraph{Proof of Proposition~\ref{proposition:j-is-differentiable}} \begin{proof} We verify the conditions of Lemma~\ref{lemma:frechet-exchange-derivative-expectation} from Sect.~\ref{subsection:differentiability-expectation}. Fr\'echet differentiability of $J:L^2(D) \times \Xi \rightarrow \mathbb{R}$ for almost every $\xi$ follows from Proposition~\ref{proposition:random-J-is-differentiable}. The function $j$ is well-defined and finite-valued for all $u \in L^2(D)$, since \begin{align*} j(u) &= \frac{1}{2}\mathbb{E}[\lVert y - y_D \rVert^2_{L^2(D)}] + \frac{\lambda_2}{2} \lVert u \rVert_{L^2(D)}^2 \leq \mathbb{E}[\lVert T(\xi)u \rVert_{L^2(D)}^2] + \lVert y_D\rVert_{L^2(D)}^2 + \frac{\lambda_2}{2} \lVert u \rVert_{L^2(D)}^2 \end{align*} is finite by $T(\xi)u = y(\xi)$ and \eqref{eq:bounds-y} along with the assumption that $y_D \in L^2(D)$. Now, for every $v \in C$, there exists a $y_v(\xi)$ satisfying \eqref{eq:semilinear-PDE} with $u=v$ and a $p_v(\xi)$ satisfying \eqref{eq:adjoint-equation} with $y=y_v(\xi)$. Thus by \eqref{eq:a-priori-p} followed by \eqref{eq:bounds-y}, \begin{align*} \lVert G(v,\xi) \rVert_{L^2(D)} &= \lVert \lambda_2v - p_v(\xi) \rVert_{L^2(D)} \leq \lambda_2 \lVert v \rVert_{L^2(D)} + C_1(\xi)\lVert y_D - y_v(\xi)\rVert_{L^2(D)}\\ & \leq \lambda_2 \lVert v \rVert_{L^2(D)} + C_1(\xi) \lVert y_D \rVert_{L^2(D)} + (C_1(\xi))^2 \lVert v\rVert_{L^2(D)}=:C(\xi). \end{align*} Notice that $C \in L^p(\Xi)$ for all $p \in [1,\infty)$ by nature of the mapping $\xi \mapsto C_1(\xi)$. Therefore, the conditions of Lemma~\ref{lemma:frechet-exchange-derivative-expectation} are satisfied and we have proven Fr\'echet differentiability of $j$. \end{proof} \section{Differentiability of Expectation Functionals} \label{subsection:differentiability-expectation}Let $(X, \lVert \cdot \rVert_X)$ be a Banach space and let $J: X \times \Omega \rightarrow \mathbb{R}$ be a random variable functional. We summarize under what conditions we can exchange the integral and the derivative for the functional $j: X \rightarrow \mathbb{R}$, where $j(u) = \int_\Omega J(u,\omega) \,\mathrm{d} \mathbb{P}(\omega)$. The following definition gives the minimal requirement for exchanging the derivative and expectation, namely, requiring $J:X \times \Omega \rightarrow \mathbb{R}$ to be $L^1$-Fr\'echet differentiable. \begin{definition} \label{definition-Lp-differentiable} A $p$-times integrable random functional $J:X \times \Omega \rightarrow \mathbb{R}$ is called $L^p$-Fr\'echet differentiable at $u$ if for an open set $U \subset X$ there exists a bounded and linear random operator $A:U \times \Omega \rightarrow \mathbb{R}$ such that $\lim_{h \rightarrow 0} \lVert J_{\omega}(u + h) - J_{\omega}(u) + A(u,\omega)h \rVert_{L^p(\Omega)} / \lVert h \rVert_X = 0$. \end{definition} By H\"older's inequality, if $u \mapsto J(u,\cdot)$ is $L^p$-differentiable and $1 \leq r < p$, then it is also $L^r$-differentiable with the same derivative. This implies that $j:X \rightarrow \mathbb{R}$ is Fr\'echet differentiable at $u$. The condition in \ref{definition-Lp-differentiable} might be difficult to verify directly. For this reason, we consider other assumptions on an open neighborhood $U$ of $X$ containing $u$. We denote the functional $J(\cdot,\omega):X \rightarrow \mathbb{R}$ for a fixed realization $\omega \in \Omega$ by $J_\omega:X \rightarrow \mathbb{R}$. \begin{assumption} \label{asu-expectation} \subasu \label{subasu-expectation1} The expectation $j(v)$ is well-defined and finite-valued for all $v \in U.$\\ \subasu \label{subasu-expectation2} For almost every $\omega \in \Omega$, the functional $J_\omega:X \rightarrow \mathbb{R}$ is Fr\'echet differentiable at $u$. Moreover, there exists a positive random variable $C(\cdot) \in L^1(\Omega)$ such that for all $v \in U$ and almost every $\omega \in \Omega$, \begin{equation} \label{eq:randomLipschitz} \lVert J'_\omega(v) \rVert_{X^*} \leq C(\omega). \end{equation} \end{assumption} \begin{lemma} \label{lemma:frechet-exchange-derivative-expectation} Suppose Assumption~\ref{asu-expectation} holds. Then $j$ is Fr\'echet differentiable at $u$ and \begin{equation} \label{eq:expectation-frechet-ch3} j'(u) = \mathbb{E}[J'_\omega(u)]. \end{equation} \end{lemma} \begin{proof} By the mean value theorem, for $h$ close enough to $u$, there exists a $z$ within the neighborhood containing $u+h$ and $u$ that satisfies $| J_{\omega}(u + h) - J_{\omega}(u) | \leq \lVert J'_\omega(z)\rVert_{X^*} \rVert h\rVert_{X}.$ Now, we have for almost every $\omega \in \Omega$ that \begin{equation*} \begin{aligned} \frac{|J_{\omega}(u+h) - J_{\omega}(u) - J'_{\omega}(u)h|}{\lVert h \rVert_X} &\leq \frac{|J_{\omega}(u+h) - J_{\omega}(u) |}{\lVert h \rVert_X} + \frac{|J'_{\omega}(u)h|}{\lVert h \rVert_X}\\ & \leq \lVert J'_{\omega}(z) \rVert_{X^*} + \lVert J'_{\omega}(u) \rVert_{X^*} \leq 2 C(\omega). \end{aligned} \end{equation*} By Assumption~\ref{subasu-expectation2}, $C(\cdot)$ is integrable, so by Lebesgue's dominated convergence theorem, it follows that \begin{equation} \label{eq:switch-expectation-proof} \begin{aligned} &\lim_{h \rightarrow 0} \frac{\int_{\Omega} |J_{\omega}(u+h) - J_{\omega}(u) - J'_{\omega}(u)h| \,\mathrm{d} \mathbb{P}(\omega)}{\lVert h \rVert_X} \\ & \quad \quad= \int_{\Omega} \lim_{h \rightarrow 0} \frac{ |J_{\omega}(u+h) - J_{\omega}(u) - J'_{\omega}(u)h|}{\lVert h \rVert_X} \,\mathrm{d} \mathbb{P}(\omega) = 0, \end{aligned} \end{equation} where the last equality follows by Assumption~\ref{subasu-expectation2}. Now consider the mapping {$F: h \mapsto \int_{\Omega} J_{\omega}'(u)h \,\mathrm{d} \mathbb{P}(\omega).$} It is straightforward to show that this is a bounded and linear operator. Therefore, we use Assumption~\ref{subasu-expectation1} to get \begin{align*} &\lim_{h \rightarrow 0} \frac{|\int_{\Omega} J_{\omega}(u+h) \,\mathrm{d} \mathbb{P}(\omega) - \int_{\Omega}J_{\omega}(u) \,\mathrm{d} \mathbb{P}(\omega) - F(h)| }{\lVert h \rVert_X} \\ &\quad \quad= \lim_{h \rightarrow 0} \frac{|\int_{\Omega} (J_{\omega}(u+h) - J_{\omega}(u) - J'_{\omega}(u)h) \,\mathrm{d} \mathbb{P}(\omega)|}{\lVert h \rVert_X} = 0, \end{align*} where the second equality holds by the triangle inequality and and \eqref{eq:switch-expectation-proof}. Therefore $j$ is Fr\'echet differentiable at $u$ with derivative $F = \int_{\Omega} J'_{\omega}(u) \,\mathrm{d} \mathbb{P}(\omega)$. \end{proof} \newenvironment{acknowledgements} {\renewcommand\abstractname{Acknowledgements}\begin{abstract}} {\end{abstract}} \end{document}
\begin{document} \title{A Weight-scaling Algorithm for $f$-factors of Multigraphs} \iffalse (Including Matchings) of General Graphs} with a Revisit to Ordinary Matching Scaling Algorithms for Weighted Matching and Small $f$-factors of General Graphs} \fi \author{ Harold N.~Gabow \thanks{Department of Computer Science, University of Colorado at Boulder, Boulder, Colorado 80309-0430, USA. E-mail: {\tt [email protected]} } } \maketitle \iffalse {\bf \parindent=0pt \hbox to \textwidth{ \today} } \fi \input prelude \begin{abstract} The challenge for graph matching algorithms, and their generalizations to $f$-factors, is to extend known time bounds for bipartite graphs to general graphs. We discuss combinatorial algorithms for finding a maximum weight $f$-factor on an arbitrary multigraph, for given integral weights of magnitude at most $W$. For simple bipartite graphs the best-known time bound is $O(n^{2/3}\, m\, \log nW)$ (\cite{GT89}; $n$ and $m$ are respectively the number of vertices and edges). A recent algorithm of Duan and He et al. \cite{DHZ} for $f$-factors of simple graphs comes within logarithmic factors of this bound, $\widetilde{O} (n^{2/3}\, m\, \log W)$. The best-known bound for bipartite multigraphs is $O(\sqrt {\Phi}\, m\, \log \Phi W)$ ($\Phi\le m$ is the size of the $f$-factor, $\Phi=\sum_{v\in V}f(v)/2$). This bound is more general than the restriction to simple graphs, and is even superior on "small" simple graphs, i.e., $\Phi=o(n^{4/3})$. We present an algorithm that comes within a $\sqrt {\log \Phi}$ factor of this bound, i.e., $O(\sqrt {\Phi \log \Phi}\,m \,\log \Phi W)$. The algorithm is a direct generalization of the algorithm of Gabow and Tarjan \cite{GT} for the special case of ordinary matching ($f\equiv 1$). We present our algorithm first for ordinary matching, as the analysis is a simplified version of \cite{GT}. Furthermore the algorithm and analysis both get incorporated without modification into the multigraph algorithm. To extend these ideas to $f$-factors, the first step is "expanding" edges (i.e., replacing an edge by a length 3 alternating path). \cite{DHZ} uses a one-time expansion of the entire graph. Our algorithm keeps the graph small by only expanding selected edges, and "compressing" them back to their original source when no longer needed. Several other ideas are needed, including a relaxation of the notion of "blossom" to e-blossom ("expanded blossom"). \end{abstract} \iffalse We present an algorithm to find a maximum weight $f$-factor of a multigraph in time $O(\sqrt {\Phi\log \Phi}\; m\log \Phi W)$, where $\Phi$ is the size of the $f$-factor, i.e., $\Phi=\sum{v\in V}f(v)$, $m\ge \Phi/2$ is the number of edges, and $W$ is an upper bound on the given integral edge weights. This is within a factor $\sqrt \Phi$ of the best-known bound for the simple special case of bipartite multigraphs. \fi \ifcase 0 \input intro \input malg \input goaleqn \input credit \input logbound \input fFactor \input Fgoaleqn \input app \overliner \input exit \fi \ifcase 1 \overliner \section*{Acknowledgments} The author thanks Seth Pettie for helpful conversations regarding reference \cite{DPS}. \begin{thebibliography}{99} \footnotesize \input bmacros \def\talg #1,{{\it ACM Trans.~on Algorithms}, #1,} \bibitem{CCPS} W.J. Cook, W.H. Cunningham, W.R. Pulleyblank, and A.~Schrijver, {\it Combinatorial Optimization}, Wiley and Sons, NY, 1998. \bibitem{CMSV} M.B. Cohen, A. Madry, P. Sankowski, and A. Vladu, "Negative-weight shortest paths and unit capacity minimum cost flow in $\widetilde O (m^{10/7} \log W)$ time", \soda 28th, 2017, \pp 752-771. \bibitem{DHZ} R. Duan, H. He, and T. Zhang, "A scaling algorithm for weighted f-factors in general graphs", arXiv:2003.07589v1, 2020. \bibitem{DPS} R. Duan, S. Pettie, and H-H. Su, "Scaling algorithms for weighted matching in general graphs", {\em ACM Trans. Algorithms} 14, 1, 2018, Article 8, 35 pages. \bibitem{E} J. Edmonds, ``Maximum matching and a polyhedron with 0,1-vertices'', {\it J.\ Res.\ Nat.\ Bur.\ Standards 69B}, 1965, \pp 125-130. \bibitem{ET} S. Even and R.E. Tarjan, ``Network flow and testing graph connectivity'', \sicomp 4, 1975, \pp 507-518. \bibitem{FT} M.L. Fredman and R.E. Tarjan, ``Fibonacci heaps and their uses in improved network optimization algorithms'', \jacm 34, 3, 1987, \pp 596-615. \bibitem{G76} H.N. Gabow, "An efficient implementation of Edmonds’ algorithm for maximum matching on graphs", \jacm 23, 2, 1976, \pp 221-234. \iffalse \bibitem{G83} H.N. Gabow, ``An efficient reduction technique for degree-constrained subgraph and bidirected network flow problems'', \stoc 15th, 1983, \pp 448-456. \fi \bibitem{G} H.N. Gabow, ``A scaling algorithm for weighted matching on general graphs,'' \focs 26th, 1985, \pp 90-100. \bibitem{G17} H.N. Gabow, "The weighted matching approach to maximum cardinality matching," {\it Fundamenta Informaticae} 154, 1-4, 2017, \pp 109-130. \bibitem{G18} H.N.~Gabow, "Data structures for weighted matching and extensions to $b$-matching and $f$-factors," \talg 14, 3, 2018, Article 39, 80 pages. \bibitem{GT85} H.N. Gabow and R.E. Tarjan, ``A linear-time algorithm for a special case of disjoint set union'', \jcss 30, 2, 1985, \pp 209-221. \bibitem{GT89} H.N. Gabow and R.E. Tarjan, ``Faster scaling algorithms for network problems,'' \sicomp 18, 5, 1989, \pp 1013-1036. \bibitem{GT} H.N. Gabow and R.E. Tarjan, ``Faster scaling algorithms for general graph matching problems'', {\it J.\ ACM} 38, 4, 1991, \pp 815-853. \bibitem{GMG} Z. Galil, S. Micali and H.N. Gabow, ``An $O(EV \log V)$ algorithm for finding a maximal weighted matching in general graphs'', \sicomp 15, 1, 1986, \pp 120-130. \iffalse \bibitem{GoT} A.V. Goldberg and R.E. Tarjan, ``Finding minimum-cost circulations by successive approximation'', {\it Math. of Oper. Res.} 15, 3, 1990, \pp 430-466. \fi \bibitem{HK} J. Hopcroft and R. Karp, ``An $n^{5 / 2}$ algorithm for maximum matchings in bipartite graphs'', \sicomp 2, 4, 1973, \pp 225-231. \bibitem{HP} D. Huang and S. Pettie, ``Approximate generalized matching: $f$-matchings and $f$-edge covers", arXiv:1706.05761, 2017. \bibitem{K} H.W. Kuhn, ``The Hungarian method for the assignment problem", {\em Naval Research Logistics Quarterly} 2, 1955, \pp 83-97. \bibitem{L} E.L. Lawler, {\it Combinatorial Optimization: Networks and Matroids}, Holt, Rinehart and Winston, New York, 1976. \bibitem{LP} L. Lov\'asz and M.D. Plummer, {\it Matching Theory}, North-Holland Mathematic Studies 121, North-Holland, New York, 1986. \iffalse \bibitem{LS} DOESNT DO WEIGHTED Y.T. Lee and A. Sidford, ``Path finding methods for linear programming: Solving linear programs in $o(vrank)$ $\sqrt {rank}$ iterations and faster algorithms for maximum flow", \focs 55th, 2014, \pp 424-433. \fi \bibitem{MV} S. Micali and V.V. Vazirani, ``An $O(\sqrt {|V|} \cdot |E|)$ algorithm for finding maximum matching in general graphs'', \focs 21st, 1980, \pp 17-27. \bibitem{S} A.~Schrijver, {\it Combinatorial Optimization: Polyhedra and Efficiency}, Springer, NY, 2003. \bibitem{T} R.E. Tarjan, ``Applications of path compression on balanced trees'', \jacm 26, 4, 1979, \pp 690-715. \bibitem{Th} M. Thorup, "Undirected single-source shortest paths with positive integer weights in linear time, \jacm 46, 3, 1999, \pp 362-394. \end{thebibliography} \fi \end{document}
\begin{document} \begin{abstract} Couples of proper, non-empty real projective conics can be classified \emph{modulo} rigid isotopy and ambient isotopy. We characterize the classes by equations, inequations and inequalities in the coefficients of the quadratic forms defining the conics. The results are well--adapted to the study of the relative position of two conics defined by equations depending on parameters. \keywords{arrangements of conics, rigid isotopy, relative position of two conics, classical invariant theory.} \end{abstract} \maketitle {\bf MSC2000:} 13A50 (invariant theory), 13J30 (real algebra). {\emph{To appear in \emph{Applicable Algebra in Engineering, Communication and Computing}.}} \section{Introduction}\label{section:introduction} Couples of proper real projective conics, admitting real points, can be classified \emph{modulo} ambient isotopy. The goal of this paper is to provide equations, inequations and inequalities characterizing each class. This is particularly well-suited for the following problem: given two conics whose equations depend on parameters, for which values of the parameters are these conics in a given ambient isotopy class ? Such problems are of interest in geometric modeling. They are considered for instance in the articles \cite{E:GV:dR,Wang:Krasauskas} (and \cite{Wang:Wang:Kim} for the similar problem for ellipsoids). We consider this paper as the systematization of their main ideas. Specially, in \cite{E:GV:dR}, an algorithm was proposed to determine the configuration of a pair of ellipses, by means of calculations of Sturm-Habicht sequences. Our approach is different: when there, computations were performed for each particular case, we perform the computations once for all in the most general case. The formulas obtained behave well under specialization. Instead of working with ambient isotopy, we consider another equivalence relation, \emph{rigid isotopy}\footnote{We follow the terminology used in real algebraic geometry in similar situations.}, corresponding to real deformation of the equations of the conics that doesn't change the nature of the (complex) singularities (definition \ref{definition:rigid_isotopy}, following the ideas of \cite{Gudkov}). Figures \ref{fig:generic} and \ref{fig:singular} provide a drawing for a representative of each class. \begin{figure} \caption{The rigid isotopy classes for generic pairs of conics.} \label{fig:generic} \end{figure} \begin{figure} \caption{The rigid isotopy classes for non-generic pairs of proper conics.} \label{fig:singular} \end{figure} Rigid isotopy happens to be an equivalence relation just slightly finer than ambient isotopy. So we get the classification under ambient isotopy directly from the one under rigid isotopy. The classification of pairs of real projective conics under rigid isotopy was first obtained by Gudkov and Polotovskiy \cite{Gudkov:Polotovskiy:1,Gudkov:Polotovskiy:2,Gudkov:Polotovskiy:3} in their work on quartic real projective curves. Nevertheless, we start (section 2) with re--establishing this classification. We emphasize the following key ingredient: that any rigid isotopy decomposes into a path in one orbit of the space of pencils of conics under projective transformations, and a rigid isotopy stabilizing some pencil of conic (Lemma \ref{lemma:basic_ri}). As a consequence, each rigid isotopy class is determined by an orbit of pencils of conics and the position of the two conics with respect to the degenerate conics in the pencil they generate. This has two direct applications. First, because there are finitely many orbits of pencils of conics under projective transformations, we get easily a finite set of couples of conics meeting at least once each rigid isotopy class. Second, it indicates clearly how to derive the equations, inequations, inequalities characterizing the classes, which is done in section 3. The determination of the position of the conics with respect to the degenerate conics in a pencil essentially reduces to problems of location of roots of univariate polynomials. They can be treated using standard tools from real algebra, namely Descartes' law of signs and subresultant sequences. This contributes both to the characterization of the orbits of pencils of conics, and the characterization of the rigid isotopy class associated to each orbit of pencils of conics. Classical invariant theory is also used for the first task. Last, section 4 provides some examples of computations using the previous results. \subsection*{Generalities and notations} The real projective space of dimension $k$ will be denoted with $\R\P^k$; in particular, $\RP^2$ denotes the projective plane. The space of real ternary quadratic forms will be denoted with $S^2 {\R^3}^*$. We will consider $\P(S^2 {\R^3}^*)$, the associated projective space (see \cite{CLO} for the definitions of the notions of projective geometry needed here). The term \emph{conic} will be used with two meanings: \begin{itemize} \item an algebraic meaning: an element of $\P(S^2 {\R^3}^*)$. The algebraic conic associated to the quadratic form $f$ will be denoted with $[f]$. \item a geometric meaning: the zero locus, in $\R\P^2$, of a non-zero quadratic form $f$. It will be denoted with $[f=0]$. \end{itemize} A (geometric or algebraic) conic is said \emph{proper} if it comes from a non-degenerate quadratic form; \emph{degenerate} if it comes from a degenerate quadratic form. Note that, with this definition, the empty set is a proper (geometric) conic. Algebraic and geometric proper non-empty conics are in bijection, and can be identified. We define the \emph{discriminant of the quadratic form $f$} to be \[ \Disc(f) = \det(\operatorname{Matrix}(f)). \] Any proper non-empty conic cuts out the real projective plane into two connected components. They are topologically non-equivalent: one is homeomorphic to a M\"obius strip, the other to an open disk. The former is the \emph{outside} of the conic, the latter is its \emph{inside}. Let $f_0=x^2 + y^2 - z^2$. The inside of $[f_0=0]$ is the solution set of the inequation $f_0 < 0$, or, equivalently, the set of points where $f_0$ has the sign of $\Disc(f_0)$. These signs change together under linear transformations. Now any proper, non-empty conic is obtained from $[f_0=0]$ by means of a transformation of $PGL(3,\R)$. Thus \emph{the inside of $[f=0]$ is the set of points where $f$ takes the sign of $\Disc(f)$}. The \emph{tangential quadratic form} associated to the quadratic form $f$ on $\R^3$ is the quadratic form $\tilde{f}$ on ${\R^3}^*$ whose matrix is the matrix of the cofactors of the matrix of $f$. The $\emph{tangential conic}$ associated to $[f]$ (resp. $[f=0]$) is $[\tilde{f}]$ (resp. $[\tilde{f}=0]$). A \emph{pencil of quadratic forms} is a plane (through the origin) in ${S^2 \R^3}^*$; the associated \emph{(projective) pencil of conics} is the corresponding line in $\P({S^2 \R^3}^*)$. It is said to be \emph{non-degenerate} if it contains proper conics\footnote{Contrary, for instance, to the pencil of the zero loci of the $f(x,y,z)=\lambda x y + \mu x z$.}. The common points of all conics of a given pencil are called the \emph{base points} of the pencil. They are also the common points of any two distinct conics of the pencil. A non-degenerate pencil of conics has always four common points in the complex projective space, when counted with multiplicities. Note that we will distinguish between (ordered) \emph{couples} of conics ($(C_1,C_2)$ distinct from $(C_2,C_1)$, except when $C_1=C_2$) and (unordered) \emph{pairs} of conics ($\{C_1,C_2\}=\{C_2,C_1\}$). The \emph{characteristic form} of the couple $(f,g)$ of real ternary quadratic forms is the binary cubic in $(t,u)$: \[ \Phi(f,g;t,u) := \Disc(t f + u g). \] Its coefficients will be denoted as follows: \[ \Phi(f,g;t,u) = \Phi_{30} t^3 +\Phi_{21} t^2 u +\Phi_{12} t u^2 +\Phi_{30} u^3. \] We will also consider the de-homogenized polynomial obtained from $\Phi$ by setting $u=1$. It will be denoted with $\phi(f,g;t)$, or $\phi(t)$ when there is no ambiguity about $f,g$. So: \[ \phi(t):=\Disc(t f + g) \] Note that $\Phi_{30}=\Disc(f)$ and $\Phi_{03}=\Disc(g)$. An \emph{isotopy} of a manifold $M$ is a continuous mapping $\theta: I\times M \rightarrow M$, where $I$ is an interval containing $0$, such that for each $t\in I$, the mapping $x \mapsto \theta(t,x)$ is an homeomorphism of $M$ onto itself, and $x\mapsto \theta(0,x)$ is the identity of $M$. Two subsets $N_1,N_2$ of $M$ are \emph{ambient isotopic} if there is an isotopy of $M$ such that, at some instant $t\in I$, $\theta(t,N_1)=N_2$. This definition is immediately generalized to couples of subsets: $(N_1,N'_1)$ and $(N_2,N'_2)$ are ambient isotopic if there is an isotopy of $M$ such that, at some instant $t$, $\theta(t,N_1)=N_2$ and $\theta(t,N'_1)=N'_2$. \section{Classification} \subsection{Rigid isotopy} To classify the couples of conics up to ambient isotopy, we introduce a slightly finer equivalence relation, \emph{rigid isotopy}, corresponding to a continuous path in the space of couples of distinct proper conics, that doesn't change the nature of the complex singularities of the union of the conics. Before stating formally the definition (definition \ref{definition:rigid_isotopy} below), we clarify this point. The complex singularities of the union of the conics correspond to the (real and imaginary) intersections of the conics. For a given multiplicity, there is only one analytic type of intersection point of two conics. Thus the nature of the singularities for the union of two distinct proper conics is determined by the numbers of real and imaginary intersections of each multiplicity. This is narrowly connected to the projective classification of pencils of conics, that can be found in \cite{Degtyarev,Levy}. The connection is the following theorem. \begin{theorem}\label{pencils:base_points}\emph{(\cite{Degtyarev,Levy})} Two non-degenerate pencils of conics are equivalent \emph{modulo} $PGL(3,\R)$ if and only if they have the same numbers of real and imaginary base points of each multiplicity. \end{theorem} The space of couples of distinct real conics is an algebraic fiber bundle over the variety of pencils, which is a grassmannian of the $\RP^1$'s in a $\RP^5$. The fibers are isomorphic to the space of couples of distinct points in $\RP^1$. The sets of couples of distinct conics with given numbers of real and imaginary intersections of each multiplicity are, after Theorem \ref{pencils:base_points}, exactly the inverse images of the orbits of the variety of pencils under $PGL(\R^3)$, and are thus also smooth real algebraic submanifolds. We can now state the following definition. \begin{definition}\label{definition:rigid_isotopy} Two couples of distinct proper conics are \emph{rigidly isotopic} if they are connected by a path in the space of couples of distinct proper conics, along which the numbers of real and imaginary intersections of each multiplicity don't change. \end{definition} We will now show that \emph{rigidly isotopic} implies \emph{ambient isotopic}. We first show it for some special rigid isotopies. \begin{definition} Let $f,g$ be two non-degenerate non-proportional quadratic forms. We define a \emph{sliding} for $[f]$ and $[g]$ as a path of the form $t \mapsto ([f+t kg],[g])$ (or $t \mapsto ([f],[g+t kf])$) for $t$ in a closed interval containing $0$; no $t$ with $f+t k g$ (resp. $g+t k f$) degenerate; and $k$ some real number. \end{definition} Let $\alpha$ be an homeomorphism of $\RP^2$. For a couple $([f],[g])$ of non-empty proper conics, we write $\alpha([f],[g])$ for the couple of algebraic conics corresponding to $(\alpha([f=0]),\alpha([g=0]))$. \begin{lemma}\label{lemma:riap} Any sliding, for a couple of non-empty conics, lifts to an ambient isotopy. \end{lemma} For a sliding: \[ t \mapsto ([f+t k g],[g]),\quad t\in I \] with $[f=0]$ and $[g=0]$ non-empty, this means that there exists a family of homeomorphisms $\beta_t$ of $\RP^2$, with $\beta_0=\id$ and $\beta_t([f],[g])= ([f+t k g],[g])$. \begin{proof2} Let $B$ be the set of the base points of the pencil of $[f]$ and $[g]$. A stratification of $\RP^2 \times I$ is given by: \begin{align*} S_1&=B \times I\\ S_2&=\left([g=0]\times I\right) \setminus S_1\\ S_3&= \left\lbrace ({\bf p};t) \; | \; (f+t k g)({\bf p})=0 \right\rbrace \setminus S_1\\ S_4&= \left(\RP^2 \times I\right) \setminus \left(S_1 \cup S_2 \cup S_3\right) \end{align*} One checks this stratification is Whitney. The projection from $\RP^2\times I$ to $I$ is a proper stratified submersion. The lemma now follows, by direct application of Thom's isotopy lemma, as it is stated in \cite{Goresky:MacPherson}. \end{proof2} \begin{lemma}\label{lemma:basic_ri} Consider two couples of distinct proper non-empty conics. If they are rigidly isotopic, then they can also be connected by a rigid isotopy $\alpha_t(s_t)$ where \begin{itemize} \item $s_t$ is a sequence of slidings along one given pencil. \item $\alpha_t$ is a path in $PGL(3,\R)$ with $\alpha_0=\id$ \end{itemize} \end{lemma} \begin{proof2} Let $(C_0,D_0)$ and $(C_1,D_1)$ be the couples of conics, and $t\mapsto (C_t,D_t)$, $t\in [0;1]$ be the rigid isotopy that connects them. It projects to a path in one $PGL(3,\R)$-orbit of the variety of pencils. This path lifts to a path $\alpha_t$ of $PGL(3,\R)$ with $\alpha_0=\id$ (indeed, the group is a principal fiber bundle over each orbit; specially, it is a locally trivial fiber bundle: \cite{Brocker:tomDieck}, ch. I, 4). The mapping $t \mapsto \alpha_t^{-1}(C_t,D_t)$ is a rigid isotopy drawn inside one pencil of conics. Such an isotopy is easy to describe: the pencil is a space $\RP^1$ with a finite set $\Gamma$ of degenerate conics. Let $E=\RP^1\setminus \Gamma$. A rigid isotopy inside the pencil is exactly a path in $E \times E \setminus \Diag(E \times E)$. There exists a finite sequence $s_t$ of horizontal and vertical paths, \emph{i.e.} of slidings, having also origin $(C_0,D_0)$ and extremity $\alpha_1^{-1}(C_1,D_1)$. Consider now $\alpha_t(s_t)$. This is a rigid isotopy connecting $(C_0,D_0)$ to $(C_1,D_1)$. \end{proof2} \begin{theorem}\label{rigid:ambiant} Two couples of distinct proper non-empty conics that are rigidly isotopic are also ambient isotopic. \end{theorem} \begin{proof2} Let $(C_0,D_0)$ and $(C_1,D_1)$ be rigidly isotopic. Consider a path $t \in [0,1] \mapsto \alpha_t(s_t)$ connecting them, as in Lemma \ref{lemma:basic_ri}. After Lemma \ref{lemma:riap}, $s_t$ lifts to an ambient isotopy $\beta_t$ with $\beta_0=\id$. Then $\alpha_t \circ \beta_t$ is an ambient isotopy carrying $(C_0,D_0)$ to $(C_1,D_1)$. \end{proof2} \subsection{Orbits of pencils of conics} After \cite{Degtyarev,Levy}, there are nine orbits of non-degenerate pencils of conics under the action of $PGL(3,\R)$. We follow Levy's nomenclature \cite{Levy} for them. It is presented in the following table, where the second and third lines display the multiplicities of the real and imaginary base points. For instance, $211$ stands for one base point of multiplicity $2$ and two base points of multiplicity $1$. \[ \aaeccarray{|c|c|c|c|c|c|c|c|c|c|} {\text{\small Orbit} & {\PI} & {{\PIa}} & {\PIb} & {\PII}& {\PIIa} & {\PIII} & {\PIIIa} & {\PIV} & {\PV}} { \text{\small real\ points} & 1111 & - & 11 & 211 & 2 & 22 & - & 31 & 4\\ \hline \text{\small imaginary\ points} & - & 1111 & 11 & - & 11 & - & 22 & - & - \\ } \] We will also use the representatives of the orbits provided by Levy \cite{Levy}. Each representative is given by a pair of generators of the corresponding pencil of quadratic forms. They are presented in Table \ref{table:orbit_representatives}. \begin{table}[t] \[ \aaeccarray{|c||c|c|c|} {\text{\ Orbit\ } & f_0 & g_0} { {\PI}& x^2-y^2 & x^2-z^2 \\ {\PIa} & x^2+y^2+z^2 & xz \\ {\PIb} & x^2+y^2-z^2 & xz \\ {\PII}& yz & x(y-z) \\ {\PIIa} & y^2+z^2 & xz \\ {\PIII} & xz & y^2\\ {\PIIIa} & x^2+y^2 & z^2 \\ {\PIV} & xz-y^2 & xy \\ {\PV} & xz-y^2 & x^2\\ } \] \caption{Levy's representatives for each orbit of pencils. Each representative is the pencil generated by $[f_0]$ and $[g_0]$.}\label{table:orbit_representatives} \end{table} We provide, in figures \ref{pencils:partI} and \ref{pencils:partII}, graphical representations of characteristic features of the pencils in each orbit. This has two goals: finding how to discriminate between the different orbits of pencils, and determining the possible rigid isotopy classes corresponding to each orbit of pencils. Each pencil is displayed as a circle, as it is topologically. In addition, the following information is represented: \begin{itemize} \item the degenerate conics of the pencil. They are given by roots of the characteristic form, so the multiplicity of this root is also indicated, following the encoding shown in Figure \ref{multiplicities}. \begin{figure} \caption{Degenerate conics corresponding to multiple roots of the discriminant, in the representations of the pencils.} \label{multiplicities} \end{figure} \item the nature of the proper conics (empty or non-empty) and of the degenerate conics (pair of lines, line or isolated point). The nature of the proper conics is constant on each arc between two degenerate conics. \begin{figure} \caption{Nature of the conics, in the representations of the pencils.} \label{nature} \end{figure} \item In the case where the conics of one arc are nested, we indicate, by means of an arrow, which are the inner ones. \end{itemize} \begin{figure} \caption{Pencils of conics up to projective equivalence (beginning).} \label{pencils:partI} \end{figure} \begin{figure} \caption{Pencils of conics up to projective equivalence (end).} \label{pencils:partII} \end{figure} These features are conserved under projective equivalence. Thus the representations are established by considering Levy's representative. \subsection{Rigid isotopy classification for pairs} We first classify \emph{pairs} of proper conics, that is: couples of distinct proper conics, under rigid isotopy \emph{and permutation of the two conics}. Later, for each pair class, we will check whether it is also a couple class (that is: the exchange of the two conics corresponds to a rigid isotopy) or it splits into two couple classes. Any pencil of conics is cut into arcs by its degenerate conics. Two proper conics are either on a same arc, or on distinct arcs on the pencil they generate. \begin{lemma}\label{lemma:pairs} If a pencil of conics has (at least) two arcs of non-empty conics, then there are two equivalence classes for pairs of conics generating it. They correspond to the following situations: \begin{itemize} \item the conics are on a same arc. \item the conics are on distinct arcs. \end{itemize} If the pencil has only one arc with proper non-empty conics, there is only one class. \end{lemma} \begin{proof2} The orbit of pencils is assumed to be fixed. Because of Lemma \ref{lemma:riap}, to get (at least) one representative for each class, it is enough: \begin{itemize} \item to choose arbitrarily one conic on each arc and consider all the possible pairs of these conics. \item to choose arbitrarily two conics on each arc and consider these pairs for each arc. \end{itemize} But one observes that for a pencil in one of the orbits {\PIa}, {\PII}, {\PIIa}, {\PIII}, there is a projective automorphism that leaves it globally invariant and exchanges its two arcs (the two arcs bearing non-empty conics for orbit {\PIa}). Again, this is proved by considering only Levy's representatives: the reflection $x \leftrightarrow -x$ is suitable. Similarly, a pencil in orbit {\PI} is left globally invariant by some projective automorphism that permutes cyclically the three arcs. For Levy's representative, one can take the cyclic permutation of coordinates: $x \mapsto y \mapsto z \mapsto x$. Pencils in the four other orbits have only one arc with non-empty proper conics. We have shown it is enough: \begin{itemize} \item to choose arbitrarily one arc with non-empty conics and two conics on this arc. \item to choose arbitrarily two arcs and one conic on each arc. \end{itemize} This gives nine representatives for the pairs of conics on a same arc, denoted with {{\CIN}}, \ldots, {\CVN} ({\CN} like \emph{neighbors}) and five representatives for pairs of conics on distinct arcs, denoted with {\CIS}, {\CIaS}, {\CIIS}, {\CIIaS}, {\CIIIS} ({\CS} like \emph{separated}). Now it remains to check that for orbits {\PI}, {\PIa}, {\PII} {\PIIa} and {\PIII}, the {\CS}--representative and the {\CN}--representative are not equivalent. We use that a rigid isotopy conserves the topological type of $(\RP^2,[f=0],[g=0])$, after Theorem \ref{rigid:ambiant}. To distinguish between {{\CIN}} and {\CIS}, one can count the number of connected components of the complement of $[f=0] \cup [g=0]$: there are $6$ in the first case and $5$ in the second, the topological types are different, so are the rigid isotopy classes. For the other four orbits of pencils, one conic lies in the inside of the other (at least at the neighborhood of the double point for {\PII}) for the {\CN}-representative, while there is no such inclusion for the {\CS}-representative. \end{proof2} \begin{corollary} There are $14$ equivalence classes for pairs of proper non-empty conics under rigid isotopy and exchange. Representatives for them are given in Table \ref{table:r_isotopy}. They correspond to the graphical representations displayed in Figures \ref{fig:generic} and \ref{fig:singular}. \end{corollary} \begin{table} \[ \aaeccarray{|c|c|c|} { \text{\ class\ } & f & g }{ {{\CIN}} & 3x^2-2y^2-z^2 & 3x^2-y^2-2z^2 \\ {\CIS} & 3x^2-2y^2-z^2 & x^2-2 y^2+z^2 \\ {\CIaN} (*) & x^2+y^2+z^2+3xz & x^2+y^2+z^2+4xz \\ {\CIaS} & x^2+y^2+z^2+3xz & x^2+y^2+z^2-3xz \\ {\CIbN} & x^2+y^2-z^2+xz & x^2+y^2-z^2-xz \\ {\CIIN} (*) & yz+xy-xz & yz+2xy-2xz \\ {\CIIS} & yz+xy-xz & yz-xy+xz \\ {\CIIaN} (*) & y^2+z^2+xz & y^2+z^2+2 xz \\ {\CIIaS} & y^2+z^2+xz & y^2+z^2-xz \\ {\CIIIN} (*) & xz+^2 & xz+2 y^2 \\ {\CIIIS} & xz+y^2 & xz-y^2 \\ {\CIIIaN} (*) & x^2+y^2-z^2 & x^2+y^2-2 z^2 \\ {\CIVN} & xz-y^2+xy & xz-y^2-2 xy \\ {\CVN} (*) & xz-y^2-x^2 & xz-y^2+x^2 \\ } \] \caption{The rigid isotopy classes.}\label{table:r_isotopy} \end{table} \subsection{Rigid isotopy classification for couples} We now derive from our classification for pairs of conics the classification for couples of conics. \begin{lemma} For each of the following representatives: {{\CIN}}, {\CIS}, {\CIaS}, {\CIbN}, {\CIIS}, {\CIIaS}, {\CIIIS}, {\CIVN}, there is a rigid isotopy that swaps the two conics. As a consequence, each of these classes for pairs is also a class for couples. \end{lemma} \begin{proof2} For {{\CIN}}, {\CIS}, {\CIaS}, {\CIIS}, {\CIIaS}, {\CIIIS}, it is enough to exhibit projective automorphisms that stabilize the corresponding Levy's representative and swap two arcs of non-empty proper conics. It was already done in the proof of Lemma \ref{lemma:pairs}, except for {{\CIN}} and {\CIS}. For them, the reflection $y \leftrightarrow z$ is convenient. For {\CIbN} and {\CIVN}, it is enough to exhibit projective automorphisms that stabilize the corresponding Levy's representative and reverse the pencil's orientation. For {\CIbN}, the reflection $x \mapsto -x$ is convenient; for {\CIVN}, one may use the transformation $x\mapsto -x, z \mapsto -z$. \end{proof2} \begin{lemma}\ \begin{itemize} \item Each of the classes of pairs {\CIaN}, {\CIIaN}, {\CIIIN}, {\CIIIaN}, {\CVN} splits into two classes for couples, corresponding to one conic lying inside the other (except for the base points). \item The class of pairs {\CIIN} also splits into two classes for couples, corresponding to one conic lying inside the other in a neighborhood of the double point (except the double point itself). \end{itemize} \end{lemma} \begin{proof2} The property that one conic lies inside the second is conserved under ambient homeomorphism, and thus under rigid isotopy. The same holds for inclusion at the neighborhood of a double intersection point. Thus it is enough to consider the representatives of the given pair classes and check the inclusion to show the theorem. The computations are trivial, hence we omit them. \end{proof2} \begin{theorem} There are $20$ classes of couples under rigid isotopy. A set of representatives is given by Table \ref{table:r_isotopy}, where the reader should add the couple obtained by swapping $f$ and $g$ for each of the lines marked with $(*)$. \end{theorem} \begin{corollary} The ambient isotopy classes for couples of conics are the following unions of rigid isotopy classes: \begin{itemize} \item classes where the two conics can be swapped: {{\CIN}}, {\CIS}, {\CIaS}, ${\CIbN} \cup {\CIVN}$, {\CIIS}, {\CIIaS} and {\CIIIS}. \item pair classes splitting into two classes for couples, one with $[f=0]$ inside $[g=0]$, one with $[g=0]$ inside $[f=0]$: ${\CIaN} \cup {\CIIIaN}$, {\CIIN}, ${\CIIaN} \cup {\CVN}$ and {\CIIIN}. \end{itemize} \end{corollary} \begin{proof2} \emph{(sketch)} One shows that {\CIbN} and {\CIVN} are ambient isotopic by building explicitly a homeomorphism\footnote{This is enough. Indeed, the set of the homeomorphisms of $\RP^2$ is connected, so any homeomorphism is the extremity of some ambient isotopy.} of $\RP^2$ sending a representative of the first to a representative of the second. Details on how to do it are tedious, we skip them. \emph{Idem} (with $[f=0]$ inside $[g=0]$) for {\CIaN} and {\CIIIaN}, and for {\CIIaN} and {\CVN}. Next, one shows the displayed rigid isotopy classes or unions of rigid isotopy classes are not equivalent \emph{modulo} ambient isotopy. This is done by considering topological invariants of the triples $(\RP^2, [f=0],[g=0])$ which take different values on the $15$ representatives. Let $C$ be the conic $[f=0]$ (resp. $D$ the conic $[g=0]$), $I$ (resp. $J$) its inside and $\bar{I}$ (resp. $\bar{J}$) the topological closure of this inside. Then one checks that the numbers of connected components of the four following sets are suitable for separating the ambient isotopy classes: \[ C \cap D,\quad \RP^2 \setminus (C \cup D), \quad I \setminus \bar{J}, \quad J \setminus \bar{I} \] \end{proof2} \begin{remark} It is legitimate curiosity to compare these isotopy classes of couples of conics with the isotopy classes of projective quartic curves presented in \cite{Korchagin:Weinberg}, the union of two conics being a quartic. One then observes that {{\CIN}} corresponds to $17p$, {\CIS} to $16p$, {\CIaS} to $22p$, {\CIIS} to $34p$, {\CIIaS} to $44p$, {\CIIIS} to $38p$, ${\CIaN} \cup {\CIIIaN}$ to $21p$, {\CIIN} to $36p$ and ${\CIIaN} \cup {\CVN}$ to $43p$. Finally both ${\CIbN} \cup \CIVN$ and {\CIIIN} correspond to $18p$. \end{remark} \section{Characterizing the isotopy classes by equations, inequations and inequalities} \subsection{Preliminaries} \subsubsection{The invariants and covariants of two ternary quadratic forms}\label{subsection:invariants} Invariants and covariants (see \cite{Olver} for a modern reference about classical invariant theory) are the convenient objects to discriminate, by means of equations and inequalities, between the different orbits for couples of complex conics under the group $PGL(3,\C)$. Invariants and covariants of a couple of quadratic ternary forms have been calculated by the classics, and can be found in Glenn's book \cite{Glenn} or Casey's treatise \cite{Casey}. \begin{proposition} The algebra of invariants of a couple of ternary quadratic forms is freely generated by the coefficients of the characteristic form\footnote{The analogue assertion is still true for a couple of quadratic forms in $n$ variables, for any $n$.}. \end{proposition} The invariants alone are not sufficient to discriminate between the complex orbits. One has to consider the covariants. Some remarkable covariants of a couple of ternary quadratic forms are: \begin{itemize} \item The \emph{apolar covariant of the tangential quadratic forms} $\tilde{f}$ and $\tilde{g}$. We will denote it with $F$. This is a quadratic form that depends quadratically on $f$, as on $g$. \item The \emph{autopolar triangle covariant} $G$, a cubic form that is also cubic in $f$, as in $g$, and that always factorizes as a product of three linear forms. When $[f=0]$ and $[g=0]$ have four distinct intersections, they are equations of the sides of the unique autopolar triangle associated to them (see \cite{Berger}, 14.5.4 and 16.4.10). \end{itemize} \begin{proposition} The algebra of covariants of a couple of ternary quadratic forms $(f,g)$ is generated by the invariants, the ground forms $f$ and $g$, the apolar covariant $F$ and the autopolar triangle covariant $G$. \end{proposition} The covariant $F$ will not be needed in this paper, but $G$ will be used. We now explain how to derive a formula for it. Consider a generic couple of forms $f,g$. Let $t_1,t_2,t_3$ be the three roots of $\Disc(t f+g)$. Each of the $t_i f+g$ has rank two. Their respective associated tangential quadratic forms have all rank one: they are the squares of three linear forms $p_1,p_2,p_3$ of the dual space, and the associated points $[p_1],[p_2],[p_3]$ of $\P(\R^3)$ are exactly the vertices of the autopolar triangle. The sides of the triangle are obtained as the zero loci of the product of determinants: \[ \det(p_1,p_2,p)\det(p_1,p_3,p)\det(p_2,p_3,p). \] Working in coordinates, with $p_i=p_{i1}X+p_{i2}Y+p_{i3}Z$, where $X,Y,Z$ are coordinates on ${\R^3}^*$ dual to $x,y,z$, one expands this and replace the products $p_{ij} p_{ik}$ by the corresponding term given by the equality \[ p_i^2=\widetilde{f t_i+g}. \] This product is antisymmetric in $t_1,t_2,t_3$, and thus can be divided by the Vandermonde determinant $(t_1-t_2)(t_1-t_3)(t_2-t_3)$. The quotient happens to be free of $t_i$'s: it is, up to a rational number in factor, the covariant $G$. One finds that the formula for this covariant can be displayed shortly. Denote: \[ \widetilde{t f+g} = \tilde{f} t^2 + \Omega(f,g) t + \tilde{g} \] and \[ \tilde{f}= \sum \tilde{a}_{ijk} X^i Y^j Z^k,\quad \Omega(f,g)= \sum \omega_{ijk} X^i Y^j Z^k,\quad \tilde{g}= \sum \tilde{b}_{ijk} X^i Y^j Z^k. \] Consider the matrix of their coefficients: \[ M= \left[ \begin{matrix} \tilde{a}_{200} & \tilde{a}_{020} & \tilde{a}_{002} & \tilde{a}_{011} & \tilde{a}_{101} & \tilde{a}_{110} \\ \omega_{200} & \omega_{020} & \omega_{002} & \omega_{011} & \omega_{101} & \omega_{110} \\ \tilde{b}_{200} & \tilde{b}_{020} & \tilde{b}_{002} & \tilde{b}_{011} & \tilde{b}_{101} & \tilde{b}_{110} \\ \end{matrix} \right] \] and label its columns with $1,2,3,\bar{1},\bar{2},\bar{3}$. Label the maximal minor corresponding to columns $i,j,k$ with $[ijk]$. Then the autopolar triangle covariant is, up to a rational number in factor, \begin{multline*} - [\bar{1}\,2\,3] x^3 - [1\,\bar{2}\,3] y^3 - [1\,2\,\bar{3}] z^3+ ([1\,\bar{1}\,3]+2[\bar{3}\,\bar{2}\,3])x y^2 \\ + ([1\,2\,\bar{1}]+2[\bar{2}\,2\,\bar{3}])x z^2 + ([\bar{2}\,2\,3]+2[\bar{1}\,\bar{3}\,3])y x^2 + ([1\,2\,\bar{2}]+2[1\,\bar{1}\,\bar{3}])y z^2\\ + ([\bar{3}\,2\,3]+2[\bar{1}\,2\,\bar{2}])z x^2 + ([1\,\bar{3}\,3]+2[1\,\bar{2}\,\bar{1}])z y^2 + ([1\,2\,3]+4[\bar{1}\,\bar{2}\,\bar{3}]) xyz. \end{multline*} \subsubsection{Resultants} Let $U(t)$ and $V(t)$ be two univariate polynomials. Remember that their resultant $\Res(U,V)$ is the determinant of their Sylvester matrix: the matrix of the coefficients of degree $\deg(U)+\deg(V)-1$ down to $0$ of \[t^{\deg(V)-1}U,t^{\deg(V)-2}U, \ldots, U, t^{\deg(U)-1}V,t^{\deg(U)-2}V, \ldots, V. \] A few classical formulas about resultants will be needed. \begin{lemma}\label{res:exchange} One has \[ \Res(U,V)= (-1)^{\deg(U)\deg(V)} \Res(V,U). \] \end{lemma} \begin{lemma}\label{res:roots} Let $c$ be the leading coefficient of $U$. Then \[ \Res(U,V)=c^{\deg(V)} \prod V(\rho) \] where the product is carried over the complex roots $\rho$ of $U$, counted with multiplicities\footnote{\emph{e.-g.} a double real root should be here counted as two roots.}. \end{lemma} \begin{lemma}\label{res:product} Let $U(t),V(t),W(t)$ be three univariate polynomials. Then \[ \Res(U,VW)=\Res(U,V)\Res(U,W). \] \end{lemma} And last: \begin{lemma}\label{res:remainder} Let $U(t),V(t)$ be two univariate polynomials, and $W$ the remainder in the euclidean division of $U$ by $V$. Let $c$ be the leading coefficient of $V$. Then \[ \Res(U,V)=(-1)^{\deg(U)\deg(V)} c^{\deg(U)-\deg(W)} \Res(V,W). \] \end{lemma} This is Lemma 4.27 in \cite{Basu:Pollack:Roy}, where a proof is provided. \subsubsection{Descartes' law of signs} Let $U(t)$ be an univariate polynomial. Then Descartes' law of signs give some insight about its number $\mathcal{N}(U)$ of positive real roots, counted with multiplicities. Consider the sequence of the signs ($+$'s and $-$'s) of the (non-zero) coefficients of $U$ and denote with $\mathcal{V}(U)$ the number of changes in consecutive terms. The following lemma is Descartes' Law of signs. It can be found as Theorem 2.34 in \cite{Basu:Pollack:Roy}. \begin{lemma} One has $\mathcal{V}(U) \geq \mathcal{N}(U)$, and $\mathcal{V}(U)-\mathcal{N}(U)$ is even. \end{lemma} Only the following particular consequence will be needed in the sequel: \begin{lemma}\label{Descartes:hyperbolic} Let $U(t)=u_3 t^3 + u_2 t^2 + u_1 t + u_0$ of degree $3$. Suppose $U$ has all its roots real and non-zero. Then they have all the same sign if and only if: $u_3 u_1 >0$ and $u_2 u_0 >0$. \end{lemma} It is obtained by applying Descartes' law of signs to $U(t)$ and $U(-t)$. \subsubsection{Subresultant sequences}\label{subsection:subresultants} Here we briefly introduce another tool: subresultant sequences. More details about them can be found in the book \cite{Basu:Pollack:Roy}. Let $U(t),V(t)$ be two univariate polynomials. One wants to know on how many\footnote{When dealing with subresultant sequences, the multiplicities of the roots are not taken into account, \emph{e.-g.} a double root will be counted as one root.} of the (real) roots of $V$ the polynomial $U$ is positive, negative, and zero. The \emph{Sturm query of $U$ for $V$} is defined as the number of roots of $V$ making $U>0$, \emph{minus} the number of roots of $V$ making $U<0$. This information is easily accessible once one knows the signs of the $\deg(V)+1$ \emph{signed subresultants principal coefficients} of $V$ and $W$, where $W$ is the remainder in the euclidean division of $U \cdot V'$ by $V$. We give the formulas for these signed subresultant principal coefficients, and the procedure for getting the Sturm query from their signs, only for the particular case needed: when $V$ has degree $3$. Write \[ \begin{array}{cll} V &=v_3 t^3 + & v_2 t^2 + v_1 t + v_0\\ W &= & w_2 t^2 + w_1 t + w_0. \end{array} \] Then \[ \begin{array}{c@{\qquad}c} \sr_3(V,W)=v_3,& \sr_2(V,W)=w_2, \\ \sr_1(V,W)= \left\vert \begin{matrix} v_3 & v_2 & v_1 \\ 0 & w_2 & w_1 \\ w_2 & w_1 & w_0 \end{matrix} \right\vert,& \sr_0(V,W)= \left\vert \begin{matrix} v_3 & v_2 & v_1 & v_0 & 0 \\ 0 & v_3 & v_2 & v_1 & v_0 \\ 0 & 0 & w_2 & w_1 & w_0 \\ 0 & w_2 & w_1 & w_0 & 0 \\ w_2 & w_1 & w_0 & 0 & 0 \end{matrix} \right\vert. \end{array} \] Note that $\sr_0(V,W)=-\Res(V,W)$, the opposite of the resultant of $V$ and $W$. The Sturm query is obtained from the sequence of the signs of $\sr_3,\sr_2,\sr_1,\sr_0$ the following way\footnote{for this specific case with $4$ terms in the sign sequence.}: \begin{enumerate} \item If there is a pair of consecutive zeros, remove it and change the signs that were following to their opposites. \item From the resulting sequences of consecutive non-zero terms, compute the difference: number of sign permanences (identical consecutive terms, $++$ or $--$) \emph{minus} number of sign exchanges (opposite consecutive terms, $+-$ or $-+$). This gives the Sturm query\footnote{So for instance, the sign sequence $+0-0$ has no sign permanence, nor sign change (because there are no consecutive non-zero terms). For the sign sequence $+00-$, the Sturm query is computed as for $++$: one permanence, no change, this gives $1$.}. \end{enumerate} \subsection{Discriminating between the orbits of pencils} In this section, we give the equations and inequations characterizing the couples $(f,g)$ of non-degenerate quadratic forms generating a pencil of each of the orbits. We first use invariants and covariants whose vanishing depends only of the generated pencil, that is those $C$ that, besides the good behavior with respect to the action of $SL(3,\C)$: \[ C(f \circ \theta, g \circ \theta; x,y,z;t,u)=C(f,g;\theta(x,y,z);t,u) \qquad \forall \theta \in SL(3,\C) \] are covariant with respect to combinations of $f$ and $g$: \[ C(\theta(f,g); x,y,z;\theta(t,u))=C(f,g;x,y,z;t,u) \qquad \forall \theta \in SL(2,\C). \] Such objects are called \emph{combinants}. Obviously, the characteristic form $\Phi$ and its covariants are combinants. Remember that the algebra of the covariants of a binary cubic form $\Phi(t,u)$ is generated by the ground form $\Phi$, its discriminant\footnote{The discriminant of the characteristic form, $\Disc(\Phi)$, is called the \emph{Tact invariant} by the classics, because it vanishes exactly when the two conics are tangent \cite{Casey}.}, and its Hessian determinant, which are \[ \Disc(\Phi)= \frac{\operatorname{Res}(\phi,\phi')}{27 \Phi_{30}}, \quad H(t,u) = \left\vert \begin{matrix} \frac{\partial^2 \Phi}{dt^2} & \frac{\partial^2 \Phi}{dt\, du} \\ \frac{\partial^2 \Phi}{dt \, du} & \frac{\partial^2 \Phi}{d u^2} \end{matrix} \right\vert \] (the division is a simplification in the definition of the discriminant, that is: there remains no $\Phi_{30}$ at the denominator). The covariant $G$ is also a combinant. The vanishing or non-vanishing of each of the combinants are properties of the orbits of pencils of conics. The sign of $\Disc(\Phi)$ is also invariant on each orbit of pencils of conics (because $\Disc(\Phi)$ has even degree in $f$ as well as in $g$). Thus we just evaluate the combinants on Levy's representatives, and we get the following result: \begin{proposition} Let $f,g$ be two non-proportional non-degenerate ternary quadratic forms. \begin{itemize} \item If $\Disc(\Phi)<0$ then $f,g$ generate a pencil in orbit {\PI} or {\PIa}. \item If $\Disc(\Phi)>0$ then $f,g$ generate a pencil in orbit {\PIb}. \item If $\Disc(\Phi)=0$ then $f,g$ generate a pencil in one of the six other orbits. The following table indicates how the vanishings of $H$ and $G$ discriminate further between the orbits of pencils (under the hypothesis that the discriminant vanishes): \[ \begin{array}{|c|c|c|} \cline{2-3} \multicolumn{1}{c|}{} & H \neq 0 & H=0 \\ \hline G\neq 0 & {\PII},{\PIIa} & {\PIV} \\ \hline G=0 & {\PIII},{\PIIIa} & {\PV} \\ \hline \end{array}. \] \end{itemize} \end{proposition} \begin{remark} The fact that the coefficients of $G$ are linear combinations of maximal minors of the matrix $M$ defined in \ref{subsection:invariants} suggests that the vanishing of $G$ is equivalent to: \emph{$M$ takes rank two}. This is true. To see this, consider the image of the (complex) pencil generated by $[f]$ and $[g]$ by the quadratic mapping ``tangential quadratic form'' from $\P({S^2 \C^3}^*)$ to $\P(S^2 \C^3)$. It is an irreducible conic, thus either a proper conic or a line. One checks on Levy's representative that it is a line exactly when $G=0$ (see also \cite{Berger}, 16.5.6.2). Finally, remark that the rows of $M$ are the coordinates of generators of the linear span of this conic. \end{remark} It remains now to discriminate between {\PI} and {\PIa}, between {\PII} and {\PIIa} and between {\PIII} and {\PIIIa}. For this we use that the numbers of degenerate conics of each type (pair of lines, isolated point or double line) in a pencil characterize its orbit, as shown in the table below, established by considering figures \ref{pencils:partI} and \ref{pencils:partII}. \[ \begin{tabular}{|c||c|c||c|c||c|c|} \hline orbit of pencils &{\PI}&{\PIa}&{\PII}&{\PIIa}&{\PIII}&{\PIIIa}\\[3pt] \hline num. pairs of line & 3 & 1 & 2 & 1 & 1 & 0 \\ \hline num. isolated points & 0 & 2 & 0 & 1 & 0 & 1 \\ \hline num. (double) lines & 0 & 0 & 0 & 0 & 1 & 1 \\ \hline \end{tabular} \] This has an algebraic translation. Consider \[ \det(v \cdot I - \operatorname{Matrix}(t f + g)) \] that expands into \[ v^3 - \mu(t) v^2 + \psi(t) v - \phi(t). \] A degenerate conic of the pencil corresponds to a parameter $t$ that annihilates $\phi$, and is \begin{itemize} \item a pair of lines when $\operatorname{Matrix}(t f + g)$ has one eigenvalue positive, one negative, and one zero. Then $\psi(t)<0$. \item an isolated point when the matrix has an eigenvalue zero and the two other both positive or both negative. Then $\psi(t)>0$. \item a single line when the matrix has two eigenvalues zero, and one non-zero. Then $\psi(t)=0$. \end{itemize} Thus the discriminations can be performed by a Sturm query of $\psi$ for $\phi$. In order not to introduce denominators, we consider the euclidean division of $\Phi_{30} \psi \phi'$ by $\phi$, instead of the division of $\psi \phi'$ by $\phi$ suggested by \ref{subsection:subresultants}. Set \begin{align*} P&=\operatorname{Remainder}(\Phi_{30} \cdot \Psi \cdot \phi',\phi)\\ &=p_2 t^2 + p_1 t + p_0 \end{align*} and \[ A_i=\sr_i(\phi,P) \] for $i$ between $0$ and $3$. The consideration of the sign permanences and sign exchanges in $\Phi_{30}=A_3,A_2,A_1,A_0$ gives the Sturm query of $\Phi_{30} \psi$ for $\phi$. The Sturm query of $\psi$ for $\phi$ is the same as the Sturm query of $\Phi_{30}^2 \psi$ for $\phi$. Using that $\sr_i(\phi,\Phi_{30} P)=\Phi_{30}^{4-i}\sr_i(\phi,P)$, we get that this Sturm query is obtained by considering the sign permanences and sign exchanges in $\Phi_{30},\Phi_{30}A_2,A_1,\Phi_{30}A_0$; or, simpler, those in $1,A_2,\Phi_{30}A_1,A_0$. The polynomial $A_1$ is \[ \left\vert \begin{matrix} \Phi_{30} & \Phi_{21} & \Phi_{12} \\ 0 & p_2 & p_1 \\ p_2 & p_1 & p_0 \end{matrix} \right\vert. \] And \[ A_0=-\operatorname{Res}(\phi,P). \] This simplifies. Applying Lemma \ref{res:remainder}, one gets \[ \operatorname{Res}(\Phi_{30} \psi \phi', \phi) = \Phi_{30}^2 \operatorname{Res}(\phi,P). \] And, on the other hand, from Lemma \ref{res:product} and the definition of $\Disc(\Phi)$: \[ \operatorname{Res}(\Phi_{30} \psi \phi', \phi) = \Phi_{30}^4 \cdot \operatorname{Res}(\psi, \phi) \cdot \Disc(\Phi). \] Thus \[ A_0=- \Phi_{30}^2 \Res(\psi,\phi) \Disc(\Phi). \] \subsubsection{Discriminating between {\PI} and {\PIa}} Suppose $f$ and $g$ generate a pencil in orbit {\PI} or {\PIa}. The Sturm query of $\psi$ for $\phi$ is $-3$ for orbit {\PI} and $1$ for orbit {\PIa}. The assumption that $f,g$ generate a pencil in orbit {\PI} or {\PIa} gives more information: \begin{lemma} If $f,g$ generate a pencil in orbit {\PI} or {\PIa}, then $A_0<0$. \end{lemma} \begin{proof2} We had established that $A_0=- \Phi_{30}^2 \Res(\psi,\phi) \Disc(\Phi)$. For orbit {\PI} or {\PIa}, one has $\Disc(\Phi)<0$. Moreover, $\operatorname{Res}(\psi,\phi)<0$ because, from lemmas \ref{res:exchange} and \ref{res:roots}, \[ \operatorname{Res}(\psi,\phi)= \operatorname{Res}(\phi,\psi)= \Phi_{30}^2 \prod \psi(\rho), \] where the product is carried over the three roots $\rho$ of $\phi$. They make either $\psi$ three times negative (orbit {\PI}), either one time negative and two times positive (orbit {\PIa}). In both cases, the product is negative. \end{proof2} There is only one sign sequence giving Sturm query $-3$ and beginning with $+$ and finishing with $-$, that is $+-+-$. There are several sign sequences giving Sturm query $1$, beginning with $+$, finishing with $-$: \[ +++-\quad ++-- \quad ++0- \quad +--- \quad +0-- \quad +00-. \] We deduce from this the criterion stated in the following proposition. \begin{proposition} Let $f,g$ be non-degenerate quadratic forms generating a pencil in orbit {\PI} or {\PIa}. \begin{itemize} \item if it is orbit {\PI} then $p_2 < 0$ and $\Phi_{30}A_1 > 0$. \item if it is orbit {\PIa}, then $p_2 > 0$, or $\Phi_{30} A_1 < 0$, or $p_2=A_1=0$. \end{itemize} \end{proposition} \subsubsection{Discriminating between {\PII} and {\PIIa}} Suppose $f$ and $g$ generate a pencil in orbit {\PII} or {\PIIa}. Note first that $A_0=0$. The Sturm query of $\psi$ for $\phi$ is $-2$ for orbit {\PII} and $0$ for orbit {\PIIa}. There is only one sign sequence with beginning with $+$, finishing with $0$ that gives Sturm query $-2$, this is $+-+0$. Those giving Sturm query $0$ are \[ ++-0 \quad +--0 \quad +0+0 \quad +0-0 \quad +000. \] \begin{proposition} Let $f,g$ be non-degenerate quadratic forms generating a pencil in orbit {\PII} or {\PIIa}. \begin{itemize} \item if it is in orbit {\PII} then $p_2<0$ and $\Phi_{30} A_1 >0$. \item if it is in orbit {\PIIa}, then $p_2=0$ or $\Phi_{30} A_1 <0$. \end{itemize} \end{proposition} \subsubsection{Discriminating between {\PIII} and {\PIIIa}} Suppose $f$ and $g$ generate a pencil in orbit {\PIII} or {\PIIIa}. Once again, $A_0=0$. The Sturm query of $\psi$ for $\phi$ is $-1$ for orbit {\PIII}, $1$ for orbit {\PIIIa}. The only sign sequence (beginning with $+$, terminating with $0$) giving Sturm query $-1$ is $+-00$. There is also only one giving Sturm query $1$, that is $++00$. \begin{proposition} Let $f,g$ be non-degenerate quadratic forms generating a pencil in orbit {\PIII} or {\PIIIa}. \begin{itemize} \item if it is in orbit {\PIII}, then $p_2 < 0$. \item if it is in orbit {\PIIIa}, then $p_2 > 0$. \end{itemize} \end{proposition} \subsection{Characterizing the rigid isotopy classes for pairs inside each pencil} Given $f,g$ two non-proportional non-degenerate quadratic forms, we suppose we know the orbit of the pencil they generate. After Lemma \ref{lemma:pairs}, one decides to which class belongs $\{[f],[g]\}$ by looking whether or not $[f]$ and $[g]$ are on a same arc of their pencil. This corresponds to $\phi(t)$ having, or not, all its real roots of the same sign. The simplest way to translate it into algebraic identities is by using Descartes' law of signs (precisely Lemma \ref{Descartes:hyperbolic}, because $\phi$ has all its roots real and non-zero in the considered cases). \begin{proposition} Let $[f],[g]$ be two distinct proper non-empty conics, generating a pencil in one of the orbits: {\PI}, {\PIa}, {\PII} {\PIIa}, {\PIII}. Then $\{[f],[g]\}$ is in the class {\CN} if and only if \[ \Phi_{30}\Phi_{12}>0 \wedge \Phi_{03}\Phi_{21}>0 \] \end{proposition} \subsection{Which is inside ?} Suppose the pair of conics is in one of the classes: {\CIaN}, {\CIIN}, {\CIIaN}, {\CIIIN}, {\CIIIaN}, {\CVN}. Which conic lies inside the other ? Otherwise stated, for any given class of pairs inside, we want to characterize the corresponding classes of couples. \subsubsection{The antisymmetric invariant solves the problem for pair classes {\CIIN}, {\CIIaN}, {\CIIIN}, {\CIIIaN}} The \emph{antisymmetric invariant} is \[ \mathcal{A}=\Phi_{30} \Phi_{12}^3-\Phi_{03}\Phi_{21}^3. \] First it is homogeneous of even degree, $6$, in $f$, as well as in $g$. So its sign depends only on the algebraic conics, not on the quadratic forms defining them. Consider again Table \ref{table:orbit_representatives}. Set \begin{equation}\label{t:V} f=f_0 + t_1 g_0, \qquad g=f_0 + t_2 g_0. \end{equation} From figures \ref{pencils:partI} and \ref{pencils:partII}, for the cases {\PIa},{\PII} {\PIIa}, {\PIII}, {\PIIIa}, the inner conic is the one nearer from $f_0$, that is the one whose parameter ($t_1$ or $t_2$) has smaller absolute value. For case {\PV}, it is the one with whose parameter is smaller. Evaluate the antisymmetric invariant on $(f,g)$. For {\PII}, {\PIIa}, {\PIII}, {\PIIIa}, we get each time a positive rational number times \[ \left(t_1 t_2 (t_1 - t_2)\right)^2 (t_1^2-t_2^2). \] This proves the following proposition. \begin{proposition} Suppose $([f],[g])$ is a couple of distinct proper non-empty conics, such that $\{[f],[g]\}$ is in class {\CIIN}, {\CIIaN}, {\CIIIN} or {\CIIIaN}. Then $[f=0]$ lies inside\footnote{only at the neighborhood of the double intersection point for class {\CIIN}.} of $[g=0]$ if and only if $\mathcal{A}(f,g)<0$. \end{proposition} For {\CVN}, the evaluation of the antisymmetric invariant gives zero, and for {\CIaN} it gives the expression \[ (t_1^2 -t_2^2)(t_1-t_2)^2 \left( (t_1+t_2)^2-(t_1 t_2 - 3)^2 \right) \] whose sign is not clear. We need other methods to solve the question in these two cases. \subsubsection{The antisymmetric covariant solves the problem for class {\CVN}} Instead of considering the antisymmetric invariant, we can consider the following \emph{antisymmetric covariant}\footnote{This is a quadratic form, and actually the antisymmetric invariant of the previous paragraph is its discriminant.}: \[ \mathcal{B}(f,g)= \Phi_{12} f - \Phi_{21} g. \] We consider its value on $f$, $g$ generating a pencil in orbit {\PV}. It is enough to look at Levy's representative. Consider $f$, $g$ as in (\ref{t:V}) for Levy's representative of orbit {\PV}. Then \[ \mathcal{B}(xz-y^2+t_1 x^2,xz-y^2+ t_2 x^2)=\frac{t_1-t_2}{4} x^2. \] Thus $\mathcal{B}(f,g)$ is a semi-definite quadratic form, negative when $t_1<t_2$ (that is $[f=0]$ lies inside $[g=0]$) and positive in the opposite case. For the purpose of calculation, we use that one decides if a semi-definite quadratic form is negative or positive merely by considering the sign of the trace of its matrix. Define $T=\operatorname{tr}(\mathcal{B}(f,g))$. \begin{proposition} If $f,g$ generate a pencil in orbit {\PV}, then the conic $[f=0]$ lies in inside the conic $[g=0]$ if and only if $T<0$. \end{proposition} \subsubsection{Case {\CIaN}} This case is more difficult than the previous ones. Suppose $(f,g)$ is in class {\CIaN}. After Figure \ref{pencils:partI}, $\phi(t)$ has three roots of the same sign, two making $\psi>0$ (conics of the pencil degenerating into isolated points) and one making $\psi<0$ (conic degenerating into a double line). Denote them with $t_1,t_2,t_3$, such that $|t_1|<|t_2|<|t_3|$. Denote also with $\nu$ their common sign (note it is obtained as the sign of $-\Phi_{30}\Phi_{03}$). The sign of $\Phi_{30}\phi''(t_1)$ is $-\nu$ and the sign of $\Phi_{30}\phi''(t_3)$ is $\nu$ (because $\Phi_{30}\phi''$ is linear, with leading coefficient $6\;\Phi_{30}^2$, positive, so it is increasing; its root lies between $t_1$ and $t_3$). The sign of $\Phi_{30}\phi''(t_2)$ is unknown, denote it with $\varepsilon$. After Figure \ref{pencils:partI}, $[f=0]$ (resp. $[g=0]$) is inside the other \emph{iff} $\psi(t_1)<0$ (resp. $\psi(t_3)<0$). Thus we have the following table of signs: \[ \begin{array}{|c|c|c|c|c|} \cline{3-5} \multicolumn{2}{c|}{} & t_1 & t_2 & t_3 \\ \hline \multicolumn{2}{|c|}{\Phi_{30}\phi''} & -\nu & \varepsilon & \nu \\ \hline \psi & [f=0] \text{\ inside} & - & + & + \\ \cline{2-5} & [g=0] \text{\ inside} & + & + & - \\ \hline \hline \Phi_{30}\phi''\psi & [f=0] \text{\ inside} & \nu & \varepsilon & \nu \\ \cline{2-5} & [g=0] \text{\ inside} & -\nu & \varepsilon & -\nu \\ \hline \end{array} \] One sees that a Sturm query of $\Phi_{30}\phi''\psi$ for $\phi$ will give $3$ or $1$ in one case, $-3$ or $-1$ in the other, allowing to obtain the relative position of the conics. Precisely, the Sturm queries corresponding to the situations \emph{$[f=0]$ inside \emph{vs.} $[g=0]$ inside} are given by the following table: \[ \begin{array}{|c|c|c|} \cline{2-3} \multicolumn{1}{c|}{} & \nu=+ & \nu=- \\ \hline \varepsilon=+ & 3\; \text{vs.\ } -1 & -1\; \text{vs.\ } 3 \\ \hline \varepsilon=- & 1\; \text{vs.\ } -3 & -3\; \text{vs.\ } 1 \\ \hline \varepsilon=0 & 2\; \text{vs.\ } -2 & -2\; \text{vs.\ } 2 \\ \hline \end{array} \] Let \begin{align*} Q&=\frac{1}{2}\operatorname{Remainder}(\Phi_{30}\phi''\phi'\psi,\phi)\\ &= q_2 t^2 + q_1 t + q_0. \end{align*} Note that $Q$ can be defined in a simpler way from the already introduced polynomial $P=\operatorname{Remainder}(\Phi_{30}\phi'\psi,\phi)$, that is: $Q=\frac{1}{2}\operatorname{Remainder}(\phi''P,\phi)$. Define $B_i=\sr_i(\phi,Q)$ for $i$ between $0$ and $3$. Then \begin{align*} B_3&=\Phi_{30}\\ B_2&=q_2\\ B_1&= \left| \begin{matrix} \Phi_{30} & \Phi_{21} & \Phi_{12} \\ 0 & q_2 & q_1 \\ q_2 & q_1 & q_0 \end{matrix} \right|. \end{align*} Finally \[ B_0=-\Res(\phi,Q). \] This last polynomial simplifies. Using Lemma \ref{res:remainder}, one gets: \[ \Res(P\phi'',\phi)=- 8 \Phi_{30}\Res(\phi,Q)= 8 \Phi_{30} B_0. \] On the other hand, from Lemma \ref{res:product}, \[ \Res(P\phi'',\phi)=\Res(P,\phi)\Res(\phi'',\phi). \] From Lemma \ref{res:exchange}, $\Res(P,\phi)=\Res(\phi,P)$, and this is $-A_0$, which was proved to be equal to: \[ \Phi_{30}^2 \Res(\psi, \phi) \Disc(\Phi). \] Gathering this information, we get that: \[ B_0=\frac{1}{8} \Phi_{30} \Res(\psi,\phi) \Res(\phi'',\phi) \Disc(\Phi). \] It is convenient to remark here that $\Phi_{30}$ divides $\Res(\phi'',\phi)$. We will define \[ R:=\frac{\Res(\phi'',\phi)}{8 \Phi_{30}} \] Thus \[ B_0= \Phi_{30}^2 \Res(\psi,\phi) R \Disc(\Phi). \] From lemmas \ref{res:exchange} and \ref{res:roots}, it comes that $\Res(\psi,\phi)=\Res(\phi,\psi)<0$ and $\Res(\phi'',\phi)=-\Res(\phi,\phi'')$ has the sign $\varepsilon$. Last, $\Disc(\Phi)<0$. Thus $B_0$ has the sign of $\varepsilon \Phi_{30}$. The sign sequences $s_1,s_2,s_3,s_4$ giving $3$ or $-3$ are characterized by $s_1 s_3 >0$ with $s_2 s_4 >0$. The sign sequences giving $2$ are $+++0$ and $---0$, those giving $-2$ are $+-+0$ and $-+-0$. The first are characterized with respect to the second by $s_1 s_2>0$. If $\varepsilon \nu >0$, then $[f=0]$ is inside iff $\Phi_{30} B_1 >0$ with $q_2 \varepsilon \Phi_{30}>0$. If $\varepsilon \nu <0$, then this characterizes \emph{$[g=0]$ inside}. If $\varepsilon=0$, $[f=0]$ is inside iff $\nu \Phi_{30} q_2 >0$. Using that $\varepsilon$ is obtained as the sign of $\Res(\phi'',\phi)$: \begin{proposition} Suppose $f,g$ are two non-degenerate quadratic forms generating a pencil in orbit {\PIa}. Suppose that their zero locus are nested, that is $(f,g)$ is in class $N1$. The following are the necessary and sufficient conditions for $[f=0]$ lies inside $[g=0]$: \begin{itemize} \item when $\Phi_{03} R < 0$, it is \[ \Phi_{30} B_1 >0 \text{\ and\ } \Phi_{03} q_2 <0; \] \item when $\Phi_{03} R >0$, it is \[ \Phi_{30} B_1 \leq 0 \text{\ or\ } \Phi_{03} q_2 \leq 0; \] \item when $R=0$, it is \[ \Phi_{03} q_2 <0. \] \end{itemize} \end{proposition} \subsection{Recapitulation} Here we display the explicit definitions of the polynomials appearing in the description of the rigid isotopy classes. Note that all these formulas are short: the complicated polynomials express simply in terms of the less complicated ones. We also display the explicit description of the rigid isotopy classes. \subsubsection{Formulas} We will denote the two forms as follows: \[ \begin{array}{l} f(x,y,z)=a_{200}x^2+ a_{020} y^2 + a_{002} z^2 + a_{110} xy + a_{101} xz + a_{011} yz\\ g(x,y,z)=b_{200}x^2+ b_{020} y^2 + b_{002} z^2 + b_{110} xy + b_{101} xz + b_{011} yz. \end{array} \] We will denote similarly the coefficients of $\tilde{f},\tilde{g},\Omega$ with $\tilde{a}_{ijk},\tilde{b}_{ijk},\omega_{ijk}$ respectively. One has: \begin{align*} \tilde{a}_{200} = \left| \begin{matrix} a_{020} & a_{011}/2 \\ a_{011}/2 & a_{002} \end{matrix} \right|, \qquad & \tilde{a}_{011} = -2\; \left| \begin{matrix} a_{200} & a_{110}/2 \\ a_{101}/2 & a_{011}/2 \end{matrix} \right|, \\ \tilde{a}_{020} = \left| \begin{matrix} a_{200} & a_{101}/2 \\ a_{101}/2 & a_{002} \end{matrix} \right|, \qquad & \tilde{a}_{101} = - 2\;\left| \begin{matrix} a_{020} & a_{110}/2 \\ a_{011}/2 & a_{101}/2 \end{matrix} \right|, \\ \tilde{a}_{002} = \left| \begin{matrix} a_{200} & a_{110}/2 \\ a_{110}/2 & a_{020} \end{matrix} \right|, \qquad & \tilde{a}_{110} = -2\; \left| \begin{matrix} a_{002} & a_{011}/2 \\ a_{101}/2 & a_{110}/2 \end{matrix} \right|. \end{align*} Similarly the $\tilde{b}_{ijk}$'s are defined from the $b_{ijk}$'s, and \begin{align*} \omega_{200}&= a_{020}b_{002}+a_{002}b_{020}-a_{011}b_{011}/2, \\ \omega_{020}&= a_{002}b_{200}+a_{200}b_{002}-a_{101}b_{101}/2, \\ \omega_{002}&= a_{020}b_{200}+a_{200}b_{020}-a_{110}b_{110}/2, \\ \omega_{011}&= a_{200}b_{011}+a_{011} b_{200} - a_{110}b_{101}/2 - a_{101}b_{110}/2, \\ \omega_{101}&= a_{020}b_{101}+a_{101} b_{020} - a_{011}b_{110}/2 - a_{110}b_{011}/2, \\ \omega_{110}&= a_{002}b_{110}+a_{110} b_{002} - a_{011}b_{110}/2 - a_{110}b_{011}/2. \end{align*} The (de-homogenized) characteristic form is \[ \phi(t)=\Phi_{30}t^3+\Phi_{21} t^2 + \Phi_{12} t + \Phi_{03} =\Disc(t f+g). \] Note that: \[ \Phi_{30}= a_{200}\tilde{a}_{200} + a_{110}\tilde{a}_{110} + a_{101}\tilde{a}_{101}, \] and \[ \Phi_{21}= b_{200}\tilde{a}_{200} + b_{002}\tilde{a}_{002}\\ + b_{020}\tilde{a}_{020} + b_{110}\tilde{a}_{110}\\ + b_{101}\tilde{a}_{101} + b_{011}\tilde{a}_{011}. \] There are similar formulas for $\Phi_{03}$ and $\Phi_{12}$, by exchanging $a$ and $b$. The discriminant of the characteristic form can be obtained as \[ \Disc(\Phi)= \frac{1}{81} \left\vert \begin{matrix} 3 \Phi_{30} & 2 \Phi_{21} & \Phi_{12} & 0\\ 0 & 3 \Phi_{30} & 2 \Phi_{21} & \Phi_{12} \\ \Phi_{21} & 2 \Phi_{12} & 3 \Phi_{03} & 0 \\ 0 & \Phi_{21} & 2\Phi_{12} & 3 \Phi_{03} \end{matrix} \right\vert, \] and its Hessian determinant as \begin{align} H &= H_{20}t^2 + H_{11} tu + H_{02} u^2 \\ &= 4 \left\vert \begin{matrix} 3 \Phi_{30} & \Phi_{21} \\ \Phi_{21} & \Phi_{12} \end{matrix} \right\vert \; t^2 +4 \left\vert \begin{matrix} 3 \Phi_{30} & \Phi_{12} \\ \Phi_{21} & 3 \Phi_{03} \end{matrix} \right\vert\; t\;u +4 \left\vert \begin{matrix} \Phi_{21} & \Phi_{12} \\ \Phi_{12} & 3 \Phi_{03} \end{matrix} \right\vert \; u^2. \end{align} The autopolar triangle covariant is: \begin{multline*} G=- [\bar{1}\,2\,3] x^3 - [1\,\bar{2}\,3] y^3 - [1\,2\,\bar{3}] z^3 + ([1\,\bar{1}\,3]+2[\bar{3}\,\bar{2}\,3])x y^2 \\ + ([1\,2\,\bar{1}]+2[\bar{2}\,2\,\bar{3}])x z^2 + ([\bar{2}\,2\,3]+2[\bar{1}\,\bar{3}\,3])y x^2 + ([1\,2\,\bar{2}]+2[1\,\bar{1}\,\bar{3}])y z^2\\ + ([\bar{3}\,2\,3]+2[\bar{1}\,2\,\bar{2}])z x^2 + ([1\,\bar{3}\,3]+2[1\,\bar{2}\,\bar{1}])z y^2 + ([1\,2\,3]+4[\bar{1}\,\bar{2}\,\bar{3}]) xyz. \end{multline*} where $[i\,j\,k]$ denote the maximal minors of the matrix \[ M= \left[ \begin{matrix} \tilde{a}_{200} & \tilde{a}_{020} & \tilde{a}_{002} & \tilde{a}_{011} & \tilde{a}_{101} & \tilde{a}_{110} \\ \omega_{200} & \omega_{020} & \omega_{002} & \omega_{011} & \omega_{101} & \omega_{110} \\ \tilde{b}_{200} & \tilde{b}_{020} & \tilde{b}_{002} & \tilde{b}_{011} & \tilde{b}_{101} & \tilde{b}_{110} \\ \end{matrix} \right] \] whose columns have been labeled $1,2,3,\bar{1},\bar{2},\bar{3}$. \noindent Denote the coefficients of $\psi$ as follows: \[ \psi(t)=\Psi_{20}\;t^2+ 2 \; \Psi_{11} \;t + \Psi_{02}, \] (beware the coefficient of $t$ is $2 \Psi_{11}$) then \[ \Psi_{20}= \tilde{a}_{200}+\tilde{a}_{020}+\tilde{a}_{002}, \] $\Psi_{02}$ is the corresponding expression with $b$ instead of $a$, and \[ \Psi_{11}= \frac{1}{2} \left( \omega_{200}+\omega_{020}+\omega_{002} \right). \] There is also $\mu=\mu_{10}t+\mu_{01}$. Then \[ \mu_{10}=a_{200}+a_{020}+a_{002} \] and $\mu_{01}$ is defined by the corresponding formula with $b$ instead of $a$. \noindent The polynomial $P$ for the Sturm query of $\psi$ for $\phi$ is \[ P=\operatorname{Remainder}(\Phi_{30}\phi'\psi,\phi) =p_2 \;t^2 + p_1\; t + p_0 \] with \begin{align*} p_2&= 3 \Phi_{30}^2 \Psi_{02} - 2 \Phi_{21} \Phi_{30} \Psi_{11} -2 \Phi_{12} \Phi_{30} \Psi_{20} + \Phi_{21}^2 \Psi_{20},\\ p_1&= 2 \Phi_{21} \Phi_{30} \Psi_{02} - 4 \Phi_{12} \Phi_{30} \Psi_{11} + \Phi_{12} \Phi_{21} \Psi_{20} - 3 \Phi_{03} \Phi_{30} \Psi_{20},\\ p_0&= \Phi_{12} \Phi_{30} \Psi_{02} - 6 \Phi_{03} \Phi_{30} \Psi_{11} + \Phi_{03} \Phi_{21} \Psi_{20}. \end{align*} The subresultant $A_1$ is \[ A_1 = \left\vert \begin{matrix} \Phi_{30} & \Phi_{21} & \Phi_{12} \\ 0 & p_2 & p_1 \\ p_2 & p_1 & p_0 \end{matrix} \right\vert. \] The antisymmetric invariant is \[ \mathcal{A}=\Phi_{30}\Phi_{12}^3-\Phi_{03}\Phi_{21}^3. \] and the trace of the antisymmetric covariant is \[ T= \Phi_{12}\mu_{10} - \Phi_{21}\mu_{01}. \] The polynomial $Q$ for the Sturm query of $\Phi_{30} \phi'' \psi$ for $\phi$ is: \[ Q=\operatorname{Remainder}(P\;\phi'',\phi) = P\;\phi''-6 p_2 \phi = q_2 \;t^2 + q_1\; t + q_0. \] Its coefficients are \begin{align*} q_2 &= 3 p_1 \Phi_{30} - 2 p_2 \Phi_{21}, \\ q_1 &= 3 p_0 \Phi_{30} + p_1 \Phi_{21} -3 p_2 \Phi_{12}, \\ q_0 &= p_0 \Phi_{21} - 3 p_2 \Phi_{03}. \end{align*} The subresultant $B_1$ is \[ B_1= \left\vert \begin{matrix} \Phi_{30} & \Phi_{21} & \Phi_{12} \\ 0 & q_2 & q_1 \\ q_2 & q_1 & q_0 \end{matrix} \right\vert. \] The last quantity to consider is \[ R= 27 \Phi_{30}^2 \Phi_{03} + 2 \Phi_{21}^3 -6 \Phi_{30} \Phi_{21} \Phi_{12}. \] Each of these expressions is homogeneous in the coefficients of $f$ and as well in the coefficients of $g$. The following table gives their bi-degree. \[ \begin{array}{cl@{\qquad}cl@{\qquad}cl} \tilde{a}_{\alpha} &: (2,0) & H_{i,j} &: (2+i,2+j) & \mathcal{A} &: (6,6) \\ \omega_{\alpha} &: (1,1) & G &: (3,3) & T &: (2,2) \\ \tilde{b}_{\alpha} &: (0,2) & \Psi_{i,j} &: (i,j) & q_2 &: (8,3) \\ \Phi_{ij} &: (i,j) & p_2 &: (6,2) & B_1 &: (17,8) \\ \Disc(\Phi) &: (6,6) & A_1 &: (13,6) & R &: (6,3) \end{array} \] \subsubsection{The decision procedure} \paragraph{First step: decide the orbit of pencils.} Here are the descriptions of the sets of couples of distinct proper conics generating a pencil in a given orbit. \begin{align*} {\PI}:\quad& \Disc(\Phi)<0 \wedge p_2<0 \wedge \Phi_{30} A_1 > 0 \\ {\PIa}:\quad& \Disc(\Phi)<0 \wedge \left[ p_2 > 0 \vee \Phi_{30}A_1 < 0 \vee \left[ A_1=0 \wedge p_2=0 \right] \right] \\ {\PIb}:\quad& \Disc(\Phi) > 0 \\ {\PII}:\quad& \Disc(\Phi)=0 \wedge H \neq 0 \wedge G \neq 0 \wedge p_2<0 \wedge \Phi_{30} A_1 > 0 \\ {\PIIa}:\quad& \Disc(\Phi)=0 \wedge H \neq 0 \wedge G \neq 0 \wedge \left[ p_2 = 0 \vee \Phi_{30} A_1 < 0 \right] \\ {\PIII}:\quad& \Disc(\Phi)=0 \wedge H \neq 0 \wedge G = 0 \wedge p_2 < 0 \\ {\PIIIa}:\quad& \Disc(\Phi)=0 \wedge H \neq 0 \wedge G = 0 \wedge p_2 > 0 \\ {\PIV}:\quad& H=0 \wedge G\neq 0 \\ {\PV}:\quad& H=0 \wedge G=0. \end{align*} \paragraph*{Second step: decide the class of pairs.} There is only one rigid isotopy class for pairs (class {\CN}) corresponding to each of the orbits of pencils {\PIb}, {\PIIIa}, {\PIV}, {\PV}. There are two classes ({\CN} or {\CS}) corresponding to {\PI} {\PIa}, {\PII}, {\PIIa}, {\PIII}. The criterion for being in the class {\CN} is: \[ \Phi_{30}\Phi_{12}>0 \wedge \Phi_{03}\Phi_{21}>0. \] \paragraph{Third step (nested cases): decide which of the conics is inside the other.} The classes of pairs splitting into two classes of couples are: {\CIaN}, {\CIIaN}, {\CIIIN}, {\CIIIaN}, {\CVN}. The criteria for $[f=0]$ lies inside $[g=0]$ are the following: \begin{itemize} \item {\CIIN}, {\CIIaN}, {\CIIIN}, {\CIIIaN}: $\mathcal{A}<0$. \item {\CVN}: $T<0$. \item {\CIaN}: \[ \begin{array}{|c|c|c|c|} \cline{2-4} \multicolumn{1}{c|}{}& \multicolumn{3}{|c|}{\text{\ sign\ of\ }\Phi_{03} R}\\ \cline{2-4} \multicolumn{1}{c|}{}& - & + & 0 \\ \hline \text{criterion\ } & \Phi_{30}B_1>0 & \Phi_{03}B_1\leq 0 & \\ \text{for\ }[f=0]& \wedge & \vee & \Phi_{03}q_2<0 \\ \text{inside}& \Phi_{03}q_2<0 & \Phi_{03}q_2 \leq 0& \\ \hline \end{array} \] \end{itemize} \section{Examples and applications}\label{section:examples} We consider examples and applications for our work. In all of them, we specialize the above general formulas to pairs of quadratic forms depending on parameters. We obtain a complicated description of the partition of the parameters space into the subsets corresponding to the isotopy classes. We then use Christopher Brown's program \emph{SLFQ} of simplification of large quantifier-free formulas \cite{SLFQ} to get simpler descriptions. \subsection{Two ellipsoids} We consider two ellipsoids given by the equations (example 2 in \cite{Wang:Wang:Kim}): \begin{eqnarray*} x^2+y^2+z^2-25=0,\\ \frac{(x-6)^2}{9}+\frac{y^2}{4}+\frac{z^2}{16}-1=0. \end{eqnarray*} We consider then as equations in $x,y$ of two affine conics depending on a parameter $z$. This corresponds to using a sweeping plane to explore the two ellipsoids. We homogenize the equations in $x,y$ with $t$, thus considering: \begin{eqnarray*} f=&x^2+y^2+t^2(z^2-25),\\ g=&\frac{(x-6t)^2}{9}+\frac{y^2}{4}+t^2\left(\frac{z^2}{16}-1\right). \end{eqnarray*} The quantity $\Disc(\Phi)$ is here $h=49 z^4 + 2516 z^2 - 229376$. One checks easily that $h$ has two single real roots $z_0,-z_0$ with $0<z_0<4$. When the two conics are proper and non-empty, that is when $-4<z<4$, one finds, using our formulas, that the following classes can occur: \begin{itemize} \item[$\bullet$] {\CIaS} when $h>0$, that is $-4<z<-z_0$ or $z_0<z<4$. \item[$\bullet$] {\CIIaS} when $h=0$, that is $z=\pm z_0$. \item[$\bullet$] {\CIbN} when $h<0$, that is $-z_0<z<z_0$. \end{itemize} The ellipsoids go each through the other. \subsection{A paraboloid and an ellipsoid} Our equations, inequations, inequalities can tell the relative position of any two conics, not only ellipses, because of the choice of working in the projective plane. Thus we can apply also the method of the previous example to any kind of quadric. In the following example, one considers a paraboloid and an ellipsoid: \begin{eqnarray*} 4 x^2-4 xy + 2 y^2 - 4 xz +14 x - 6y +2z^2-10 z +12=0,\\ 3 x^2-4 xy+2y^2 -4 xz+16 x + 2 yz - 12 y +2 z^2-16 z+ 39=0 \end{eqnarray*} As before, we consider $z$ as a parameter and homogenize the equations in $x,y$ with $t$, thus considering: \begin{eqnarray*} f=&4 x^2-4 xy + 2 y^2 +t(- 4 xz +14 x - 6y) +t^2(2z^2-10 z +12),\\ g=&3 x^2-4 xy+2y^2 +t(-4 xz+16 x + 2 yz - 12 y) +t^2(2 z^2-16 z+ 39) \end{eqnarray*} We specialize our equations, inequations and inequalities and run \emph{SLFQ}. Let $z_0=-1/4$ and $z_1<z_2$ be the two roots of $z^2-12 z + 34$. One finds that $z_0 < z_1$, and $[f=0]$ is proper non-empty when $z>z_0$, $[g=0]$ is proper non-empty when $z_1<z<z_2$. When both are proper and non-empty, one finds that the isotopy class is always {\CIaN}. Thus the ellipsoid is inside the paraboloid. \subsection{Uhlig's canonical forms} In \cite{Uhlig}, Uhlig presented representatives for the orbits under $GL(n,\R)$ of couples of quadratic forms generating a non-degenerate pencil. For conics ($n=3$), it follows from Uhlig's presentation that any couple of conics can be transformed, by means of $PSL(3,\R)$, into one with associated couple of matrices among: \[ \begin{array}{l@{,}l@{\quad}l@{\qquad}l@{,}l@{\quad}l} \left[ \begin{matrix} 1 & & \\ & 1 & \\ & & 1 \end{matrix} \right] & \left[ \begin{matrix} \lambda_1 & & \\ &\lambda_2 & \\ & & \lambda_3 \end{matrix} \right] & {(U_{11})}; & \left[ \begin{matrix} 1 & & \\ & 1 & \\ & & -1 \end{matrix} \right] & \left[ \begin{matrix} \lambda_1 & & \\ &\lambda_2 & \\ & & -\lambda_3 \end{matrix} \right] & {(U_{12})}; \\ \left[ \begin{matrix} & 1 & \\ 1 & & \\ & & 1 \end{matrix} \right] & \left[ \begin{matrix} & \lambda_1 & \\ \lambda_1 & 1 & \\ & & \lambda_2 \end{matrix} \right] & {(U_{21})}; & \left[ \begin{matrix} & 1 & \\ 1 & & \\ & & -1 \end{matrix} \right] & \left[ \begin{matrix} & \lambda_1 & \\ \lambda_1 & 1 & \\ & & -\lambda_2 \end{matrix} \right] & {(U_{22})}; \\ \left[ \begin{matrix} & 1 & \\ 1 & & \\ & & 1 \end{matrix} \right] & \left[ \begin{matrix} b & a & \\ a & -b & \\ & & \lambda \end{matrix} \right] & {(U_{31})}; & \left[ \begin{matrix} & 1 & \\ 1 & & \\ & & -1 \end{matrix}\right] & \left[ \begin{matrix} b & a & \\ a & -b & \\ & & -\lambda \end{matrix} \right] & {(U_{32})}; \\ \multicolumn{6}{c}{ \begin{array}{l@{,}l@{\quad}l} \left[ \begin{matrix} & & 1\\ & 1 & \\ 1 & & \end{matrix} \right] & \left[ \begin{matrix} & & \lambda\\ & \lambda & 1 \\ \lambda & 1 & \end{matrix}\right] & {(U_4)}. \end{array} } \end{array} \] To which configuration corresponds each of these normal forms ? We find simple description for the subsets of each parameters space corresponding to the isotopy classes. As an illustration, we show the result for $U_{21}$. \begin{itemize} \item $g$ is degenerate when $\lambda_1=0$ or $\lambda_2=0$. \item the conics are in class {\CVN} when $\lambda_1=\lambda_2 \neq 0$. In this case, $[g=0]$ lies inside $[f=0]$. \item in the other cases, the conics are in class {\CIIN}, {\CIIS}, {\CIIaN} or {\CIIaS} as shown in Figure \ref{U21}. \end{itemize} \begin{figure} \caption{isotopy classes for representatives $U_{21} \label{U21} \end{figure} \section{Final remarks} For clarity of the exposition, we have not considered the case when one conic, or both conics, are degenerated; but it is easy to list the corresponding isotopy classes and describe them with equations, inequations and inequalities. Remark that the polynomials involved in the description of the classes, specially invariants and covariants, have often very compact expressions in function of the smaller ones. Thus they can be evaluated with substantial saving of arithmetic operations, as was pointed out in \cite{D:F:M:T}. The following, more ambitious, step is the classification of couples of quadrics drawn in $\RP^3$. Hopefully some of the methods developed in the present paper will be useful in this task, on which we wish to return in another paper. It follows from our study that the rigid isotopy classes for couples of conics are characterized nearly totally by the behavior of the signature function on the pencil generated by the quadratic forms. For the non-generic classes, this provides a precise answer to a question formulated in \cite{Wang:Krasauskas}. We plan also to develop this point in a forthcoming paper with B. Mourrain. Finally, the reader will find some implementations and complements on the subject on the author's web page devoted to the paper: \newline \texttt{http://emmanuel.jean.briand.free.fr/publications/twoconics} \end{document}
\begin{document} \interfootnotelinepenalty=10000 \title{Quantum information causality} \author{Dami\'an Pital\'ua-Garc\'ia} \affiliation{Centre for Quantum Information and Foundations, DAMTP, Centre for Mathematical Sciences, University of Cambridge, Wilberforce Road, Cambridge, CB3 0WA, United Kingdom} \begin{abstract} How much information can a transmitted physical system fundamentally communicate? We introduce the principle of \emph{quantum information causality}, which states the maximum amount of quantum information that a quantum system can communicate as a function of its dimension, independently of any previously shared quantum physical resources. We present a new quantum information task, whose success probability is upper bounded by the new principle, and show that an optimal strategy to perform it combines the quantum teleportation and superdense coding protocols with a task that has classical inputs. \end{abstract} \maketitle Quantum information science studies how information can fundamentally be encoded, processed and communicated via systems described by quantum physics \cite{NielsenandChuangbook}. Interesting features of information arise with this approach. The no-cloning theorem states that unknown quantum states cannot be copied perfectly \cite{WZ82,D82}. Unknown quantum states can be teleported \cite{teleportation}. Two classical bits can be encoded in one qubit via the superdense coding protocol \cite{sdc}. Fundamentally-secure cryptography can be achieved with quantum information protocols \cite{BB84,E91,BHK05}. Many of the quantum information protocols are possible due to quantum entanglement: two systems are entangled if their global quantum state cannot be expressed as a convex combination of individual states in a tensor product form. Another interesting property is quantum nonlocality, that is, measurement outcomes of separate systems can exhibit correlations that cannot be described by local classical models \cite{EPR35,Bell}. Since the value of quantum correlations does not vary with the time difference of the measurements and the distance between the systems, one could think that they can be used to communicate arbitrarily-fast messages. However, quantum physics obeys the no-signaling principle. No-signaling says that a measurement outcome obtained by a party (Bob) does not provide him with any information about what measurement is performed by another party (Alice) at a distant location, despite any nonlocal correlations previously shared by them \cite{GRW80}. If any information that Alice has is to be learned by Bob, no-signaling requires that a physical system sharing correlations with Alice's system must be transmitted to him. Thus, an interesting question to ask is: how much information can a physical system fundamentally communicate? In the scenario in which Alice has a classical random variable $X$, she encodes its value in a quantum state that she sends Bob and Bob applies a quantum measurement on the received state in order to obtain a classical random variable $Y$ as the output, the Holevo theorem \cite{K73} provides an upper bound on the classical mutual information between $X$ and $Y$. In the scenario in which Alice sends Bob $m$ classical bits, information causality states that the increase of the mutual information between Bob's and Alice's systems is upper bounded by $m$, independently of any no-signaling physical resources that Alice and Bob previously shared \cite{ic}. Information causality has important implications for the set of quantum correlations \cite{ic,ABPS09,CSS10,GWAN11,YCATS12}. For example, it implies the Cirel'son bound \cite{C80}, while the no-signaling principle does not \cite{PR94}. Here we consider the scenario in which Bob receives a quantum system from Alice, who possibly shares quantum correlations with another party, Charlie, and ask the question: how much quantum information can Bob obtain about Alice's or Charlie's data? \footnote{A different question, investigated in Refs. \cite{CMMPPP12,SKB12} is how much entanglement can increase under local operations and quantum communication.} We introduce a new principle that we call \emph{quantum information causality}, which states that the maximum amount of quantum information that a quantum system can communicate is limited by its dimension, independently of any quantum physical resources previously shared by the communicating parties. Namely, the principle says that \emph{the increase of the quantum mutual information between Bob's and Charlie's systems, after a quantum system of $m$ qubits is transmitted from Alice to Bob, is upper bounded by $2m$}. In order to illustrate quantum information causality, we introduce a new quantum task that we call the \emph{quantum information causality (QIC) game} (see Fig.~\ref{fig1}). \emph{The QIC game (version I)}. Initially, Alice and Bob may share an arbitrary entangled state. However, they do not share any correlations with Charlie. Let $A'$ and $B$ denote the quantum systems at Alice's and Bob's locations, respectively. Charlie prepares the qubits $A_j$ and $C_j$ in the singlet state $\lvert\Psi^-\rangle$, for $j = 0, 1,\ldots, n-1$. Charlie keeps the system $C \equiv C_0C_1\cdots C_{n-1}$ and sends Alice the system $A\equiv A_0A_1\cdots A_{n-1}$. Charlie generates a random integer $k\in\lbrace 0,1,\ldots,n-1\rbrace$ and gives it to Bob. Bob gives Charlie a qubit $B_k$, whose joint state with the qubit $C_k$, denoted as $\omega_k$, must be as close as possible to the singlet. Alice and Bob may play any strategy allowed by quantum physics as long as the following constraint is satisfied: their communication is limited to a single message from Alice to Bob only, encoded in a quantum system $T$ of $m < n$ qubits, with no extra classical communication allowed. Let $B'$ denote the joint system $BT$ after Bob's quantum operations. In general, the qubit $B_k$ is obtained by Bob from $B'$. Charlie applies a Bell measurement (BM) on the joint system $C_kB_k$. Alice and Bob win the game if Charlie obtains the outcome corresponding to the singlet. The success probability is \begin{equation} \label{eq:m2} P\equiv\frac{1}{n}\sum_{k=0}^{n-1}\langle\Psi^-\rvert\omega_k\lvert\Psi^-\rangle. \end{equation} \begin{figure} \caption{\label{fig1} \label{fig1} \end{figure} In version II of the QIC game, Charlie does not prepare singlets. Instead, Charlie prepares $n$ qubits in the pure states $\lbrace\lvert\psi_j\rangle\rbrace_{j=0}^{n-1}$ that he gives Alice. Bob outputs a qubit $B_k$ in the state $\rho_k$. Charlie measures $B_k$ in the orthonormal basis $\lbrace\lvert\psi_k\rangle,\lvert\psi_k^\bot\rangle\rbrace$. Alice and Bob win the game if Charlie's outcome corresponds to the state $\lvert\psi_k\rangle$. This version is equivalent to version I and its success probability $p$ satisfies: $p=(1+2P)/3$ (see details in the Supplemental Material). For convenience, in what follows we only refer to version I of the QIC game, unless otherwise stated. Consider the following \emph{naive} strategy to play the QIC game. Alice simply sends Bob $m$ of the $n$ received qubits from Charlie without applying any operations on these. Alice and Bob previously agree on which qubits Alice would send Bob, for example, those with index $0 \leq j < m$. If Bob receives from Charlie a number $k < m$, he outputs the correct state; in this case, $\langle\Psi^-\rvert\omega_k\lvert\Psi^-\rangle=1$. However, if $m \leq k$, Bob does not have the correct state, hence, he can only give Charlie a fixed state, say $\lvert 0 \rangle$; in this case, $\langle\Psi^-\rvert\omega_k\lvert\Psi^-\rangle=1/4$. Thus, this strategy succeeds with probability $P_{\text{N}}=(1+3m/n)/4$, where the label N stands for \emph{naive}. There are other strategies that achieve success probabilities higher than $P_{\text{N}}$. However, it turns out that in general, $P < 1$, if $m < n$. We show that this follows from quantum information causality. The principle of \emph{quantum information causality} states an upper bound on the amount of quantum information that $m$ qubits can communicate: \begin{equation} \label{eq:m4} \Delta I(C:B) \leq 2m, \end{equation} where $\Delta I(C:B) \equiv I(C:B')- I(C:B)$ is Bob's gain of quantum information about $C$, $I(C:B) \equiv S(C) + S(B)-S(CB)$ is the quantum mutual information \cite{NielsenandChuangbook} between $C$ and $B$, $S(C)$ is the von Neumann entropy \cite{NielsenandChuangbook} of $C$, etc., $B'$ denotes the joint system $BT$ after Bob's quantum operations. Since the quantum mutual information quantifies the total correlations between two quantum systems \cite{HV01,OZ02,GPW05}, we consider $\Delta I(C:B)$ to be a good measure for the communicated quantum information \footnote{Note that Refs.~\cite{HV01,OZ02,GPW05} propose measures for the purely classical and purely quantum parts of the correlations between two quantum systems, whose sum is equal to the quantum mutual information (see Ref. \cite{MBCPV12} for a review). We do not consider such a classification in our discussion.}. The proof is very simple. By definition, $I(C:BT) = S(C) + S(BT)-S(CBT)$. Subadditivity \cite{LR68} states that $S(BT) \leq S(B) + S(T)$. The triangle inequality \cite{AL70}, $\lvert S(CB) - S(T)\rvert \leq S(CBT)$, implies that $-S(CBT) \leq S(T) - S(CB)$. Hence, we have that $I(C:BT) \leq 2S(T) + I(C:B)$. The data-processing inequality states that local operations cannot increase the quantum mutual information \cite{NielsenandChuangbook}. Thus, $I(C:B') \leq I(C:BT)$, which implies that $I(C:B') \leq 2S(T) + I(C:B)$. Therefore, we obtain that $\Delta I(C:B) \leq 2S(T)$. Finally, since $S(T) \leq \log_2 (\text{dim}T)$, the quantum information that $T$ can communicate is limited by its dimension. Therefore, if $T$ is a system of $m$ qubits, Eq.~(\ref{eq:m4}) follows because in this case $S(T) \leq m$. Achievability of equality in Eq.~(\ref{eq:m4}) requires that $T$ is maximally entangled with $C$ (see details in the Supplemental Material). It is easy to see that the naive strategy in the QIC game saturates this bound. We notice that in the previous proof we did not require to mention Alice's system. This means that Eq.~(\ref{eq:m4}) is valid independently of how much entanglement Alice and Bob share. This also means that Eq.~(\ref{eq:m4}) is valid too if we consider that Alice and Charlie are actually the same party. Thus, quantum information causality shows: \emph{the maximum possible increase of the quantum mutual information between Charlie's and Bob's systems is only a function of the dimension of the system $T$ received by Bob, independently of whether it is Alice or Charlie who sends Bob the system $T$ and of how much entanglement Bob shares with them}. If the transmitted system $T$ is classical, equality in Eq.~(\ref{eq:m4}) cannot be achieved. Information causality states that in this case, $\Delta I(C:B) \leq m$, where $C$ is a classical system, $B$ is a quantum system and $I(C:B)$ denotes their quantum mutual information \cite{ic}. In fact, this bound is valid even if both systems $C$ and $B$ are quantum (see details in the Supplemental Material). As stated above, quantum information causality follows from three properties of the von Neumann entropy: subadditivity, the data-processing and the triangle inequalities. The concept of entropy in mathematical frameworks for general probabilistic theories \cite{H01,B07,BBLW07} and its implication for information causality have been recently investigated \cite{SW10,BBCLSSWW10,AS11,DLR12}. Particularly, it has been shown that a physical condition on the measure of entropy implies subadditivity and the data-processing inequality, and hence that information causality follows from this condition \cite{AS11}. It would be interesting to investigate whether physically-sensible definitions of entropy for more general probabilistic theories satisfy the three mentioned properties, and hence a generalized version of quantum information causality. A different version of information causality in more general probabilistic theories has been considered in Ref.~\cite{MMAP12}. Quantum information causality implies an upper bound on the success probability in the QIC game: \begin{equation} \label{eq:m5} P \leq P', \end{equation} where we define $P'$ to be the maximum solution of the equation $h(P')+(1-P')\log_23=2(1-m/n)$ and $h(x)=-x\log_2x-(1-x)\log_2(1-x)$ denotes the binary entropy. The value of $P'$ is a strictly increasing function of the ratio $m/n$, achieving $P' = 1/4$ if $m = 0$ and $P' = 1$ if $m = n$. Therefore, we have that $P < 1$ if $m < n$. A plot with some values of $P'$ and the complete proof of Eq.~(\ref{eq:m5}) are given in the Supplemental Material. Below we present a sketch of the proof. Firstly, we notice that for any strategy that Alice and Bob may play that achieves success probability $P$, there exists a covariant strategy achieving the same value of $P$ that Alice and Bob can perform. By covariance, we mean the following: in version II of the QIC game, if, when Alice's input qubit $A_k$ is in the state $\lvert\psi_k\rangle$, Bob's output qubit state is $\rho_k$, then, when $A_k$ is in the state $U\lvert\psi_k\rangle$, Bob's output state is $U\rho_kU^\dagger$, for any qubit state $\lvert\psi_k\rangle\in\mathbb{C}^2$ and unitary operation $U\in\text{SU(2)}$. Recall that $k$ is the number that Charlie gives Bob. Therefore, without loss of generality, we consider that a covariant strategy is implemented. This means that the Bloch sphere of the qubit $A_k$ is contracted uniformly and output in the qubit $B_k$. In version I, this means that the joint system $C_kB_k$ is transformed into the state \begin{equation} \label{eq:m7} \omega_k=\lambda_k\Psi^-+\frac{1-\lambda_k}{3}\bigl(\Psi^++\Phi^++\Phi^-\bigr), \end{equation} where $1/4 \leq \lambda_k \leq 1$ and $\Psi^- $ denotes $\lvert\Psi^-\rangle\langle\Psi^-\rvert$, etc. That is, the depolarizing map \cite{NielsenandChuangbook} is applied to the qubit $A_k$, and output by Bob in the qubit $B_k$. Then, we use the data-processing inequality and the fact that the qubits $C_j$ and $C_{j'}$ are in a product state for every $j \neq j'$ in order to show that $\sum_{k=0}^{n-1}I(C_k:B_k)\leq I(C:B')$. We notice that since Charlie's and Bob's systems are initially uncorrelated, Eq.~(\ref{eq:m4}) reduces to $I(C:B') \leq 2m$. Thus, we have that $\sum_{k=0}^{n-1}I(C_k:B_k)\leq 2m$. From this inequality and the concavity property of the von Neumann entropy, we obtain an upper bound on $\sum_{k=0}^{n-1}\lambda_k/n$, which from Eqs.~(\ref{eq:m2}) and~(\ref{eq:m7}) equals $P$. Below we show that an optimal strategy to play the QIC game reduces to an optimal strategy to perform the following task. \emph{The IC-2 game}. Alice is given random numbers $x_j \equiv (x_j^0,x_j^1)$, where $x_j^0,x_j^1\in\lbrace 0,1\rbrace$, for $j = 0, 1,\ldots, n- 1$. Bob is given a random value of $k = 0, 1,\ldots, n-1$. The game's goal is that Bob outputs $x_k$. Alice and Bob can perform any strategy allowed by quantum physics with the only condition that communication is limited to a single message of $2m < 2n$ bits from Alice to Bob. In particular, Alice and Bob may share an arbitrary entangled state. Let $y_k\equiv (y_k^0,y_k^1)$ be Bob's output, where $y_k^0,y_k^1\in\lbrace 0,1\rbrace$. We define the success probability as \begin{equation} \label{eq:m8} Q\equiv \frac{1}{n}\sum_{k=0}^{n-1}P\left(y_k=x_k\right). \end{equation} We call this task the \emph{IC-2} game. The version we call the \emph{IC-1} game, in which the inputs and output are one bit values and Alice's message is of $m < n$ bits, was considered in the paper that introduced information causality \cite{ic}. The strategies to play the IC-1 game in which no entanglement is used were first considered by Wiesner in 1983 with the name of conjugate coding \cite{W83}. They were investigated further in 2002 with the name of random access codes (RACs) \cite{ANTV02}. The most general quantum strategy, in which Alice and Bob share an arbitrary entangled state, is called an entanglement-assisted random access code (EARAC) \cite{PZ10}. Let $Q_{\text{max}}$ be the maximum value of $Q$ over all possible strategies to play the IC-2 game. Below we show that $P\leq Q_{\text{max}}$. Consider the following strategy to play the IC-2 game. Alice and Bob initially share a singlet state in the qubits $A_j$ and $C_j$, for $j = 0, 1,\ldots, n-1$. Alice has the system $A \equiv A_0A_1\cdots A_{n-1}$, while Bob has the system $C \equiv C_0C_1\cdots C_{n-1}$. Alice applies the unitary operation $\sigma_{x_j}$ on the qubit $A_j$, for every $j$, where $\sigma_{0,0}\equiv I$ is the identity operator acting on $\mathbb{C}^2$ and $\sigma_{0,1}\equiv \sigma_{1}$, $\sigma_{1,0}\equiv \sigma_{2}$, $\sigma_{1,1}\equiv \sigma_{3}$ are the Pauli matrices. Then, Alice and Bob play the QIC game, applying some operation on the input system $A$, which includes a message of $m$ qubits from Alice to Bob. However, instead of sending these $m$ qubits directly, Alice teleports \cite{teleportation} them to Bob. Thus, communication consists of $2m$ bits only, as required. At this stage, Bob does not apply any operations on the system $C$, which is consistent with the QIC game. As previously indicated, we can consider that in a general strategy in the QIC game the depolarizing map is applied to the qubit $A_k$. Therefore, Bob outputs the qubit $B_k$ in the joint state $\Omega_k=(I\otimes\sigma_{x_k})\omega_k(I\otimes\sigma_{x_k})$ with the qubit $C_k$, where $\omega_k$ is given by Eq.~(\ref{eq:m7}). Then, Bob measures $\Omega_k$ in the Bell basis. Bob learns the encoded value $x_k$ with probability $\lambda_k$. Thus, from Eq.~(\ref{eq:m8}) we have that $Q=\sum_{k=0}^{n-1}\lambda_k/n$, which equals $P$, as we can see from Eqs.~(\ref{eq:m2}) and~(\ref{eq:m7}). Since by definition $Q \leq Q_{\text{max}}$, we have that $P \leq Q_{\text{max}}$, as claimed. Consider the following class of strategies to play the QIC game that combine quantum teleportation \cite{teleportation}, superdense coding \cite{sdc} (SDC) and the IC-2 game. \emph{Teleportation strategies in the QIC game}. Alice and Bob share a singlet state in the qubits $A'_j$, at Alice's site, and $B_j$, at Bob's site, for $j = 0, 1,\ldots, n - 1$. Alice applies a Bell measurement on her qubits $A_jA'_j$ and obtains the two bit outcome $x_j \equiv (x_j^0,x_j^1)$. Thus, the state of the qubit $A_j$ is teleported to Bob's qubit $B_j$, up to the Pauli error $\sigma_{x_j}$. This means that the joint state of the system $C_jB_j$ transforms into one of the four Bell states, according to the value of $x_j$. Alice and Bob play the IC-2 game with Alice's and Bob's inputs being $x \equiv (x_0, x_1, \ldots, x_{n-1})$ and $k$, respectively. However, instead of sending Bob the $2m-$bits message directly, Alice encodes it in $m$ qubits via SDC. Bob receives the $m$ qubits and decodes the correct $2m$-bits message, which he inputs to his part of the IC-2 game. Bob outputs the two bit number $y_k \equiv (y_k^0,y_k^1)$ and applies the Pauli correction operation $\sigma_{y_k}$ on the qubit $B_k$, which then he outputs and gives to Charlie. If $y_k = x_k$, the output state $\omega_k$ of the system $C_kB_k$ is the singlet; otherwise, we have that $\langle\Psi^-\rvert\omega_k\lvert\Psi^-\rangle=0$. Thus, from the definition of $P$, Eq.~(\ref{eq:m2}), we see that $P = Q$, where $Q$ is given by Eq.~(\ref{eq:m8}). Therefore, since $P\leq Q_{\text{max}}$, we see that an optimal strategy in the QIC game is a teleportation strategy in which the IC-2 game is played achieving the maximum success probability $Q = Q_{\text{max}}$. We have obtained an upper bound on $Q$ for a particular class of strategies in the case $m = 1$ (see Supplemental Material). The best strategy that we have found to play the QIC game in the case $m = 1$ is a teleportation strategy in which the IC-2 game is played with two equivalent and independent protocols in the IC-1 game. In both protocols Bob inputs the number $k$, while Alice inputs the bits $\lbrace x_j^0\rbrace_{j=0}^{n-1}$ in the first protocol and the bits $\lbrace x_j^1\rbrace_{j=0}^{n-1}$ in the second one. If Bob outputs the correct value of $x_k^0$ with probability $q$ in the first protocol, and similarly, he outputs the correct value of $x_k^1$ with probability $q$ in the second protocol, for any $k$, then the success probability in the IC-2 game is $Q = q^2$. The maximum value of $q$ that has been shown \cite{AS11,PZ10} is $q = (1 + n^{-1/2})/2$. Explicit strategies to achieve this value are given by EARACs in the case in which $n = 2^r3^l$ and $r, l$ are nonnegative integers \cite{PZ10}. With this value of $Q$ we achieve a success probability in the QIC game of $P_{\text{T}}=\bigl(1+n^{-1/2}\bigr)^2/4$, where the label T stands for \emph{teleportation}. Here we have introduced the quantum information causality principle as satisfaction of an upper bound on the quantum information that Bob can gain about Charlie's data as a function of the number of qubits $m$ that Alice (who shares correlations with Charlie) sends Bob, Eq.~(\ref{eq:m4}). We have presented a new quantum information task, the QIC game, whose success probability is limited by quantum information causality, Eq.~(\ref{eq:m5}). We have shown that an optimal strategy to play the QIC game combines the quantum teleportation and the quantum superdense coding protocols, with an optimal strategy to perform another task that has classical inputs, the IC-2 game. An optimal strategy in the IC-2 game remains as an interesting open problem. \section{Supplemental Material} \subsection{An equivalent version of the QIC game} \emph{The QIC game (version II)}. This version is similar to version I, presented in the main text, with the following differences. Charlie does not prepare singlet states. Instead, Charlie prepares $n$ qubits in the pure states $\lbrace\lvert\psi_j\rangle\rbrace_{j=0}^{n-1}$, completely randomly. Charlie sends Alice the qubit $A_j$ in the quantum state $\lvert\psi_j\rangle$, for $j = 0, 1,\ldots, n-1$, and keeps a classical record of the states. We denote the global system that Alice receives from Charlie as $A \equiv A_0A_1\cdots A_{n-1}$. Bob gives Charlie a qubit $B_k$ in the state $\rho_k$, which must be as close as possible to $\lvert\psi_k\rangle$. Charlie measures the received state $\rho_k$ in the orthonormal basis $\lbrace\lvert\psi_k\rangle,\lvert\psi_k^\bot\rangle\rbrace$, where $\lvert\psi_k^\bot\rangle$ is the qubit state with Bloch vector antiparallel to that one of $\lvert\psi_k\rangle$. Alice and Bob win the game if Charlie's measurement outcome corresponds to the state $\lvert\psi_k\rangle$. The success probability is \begin{equation} \label{eq:m1} p\equiv \int d\mu_0 \int d\mu_1 \cdots \int d\mu_{n-1} \biggl(\frac{1}{n}\sum_{k=0}^{n-1}\langle\psi_k\rvert\rho_k\lvert\psi_k\rangle\biggr), \end{equation} where $\int d\mu_j$ is the normalized integral over the Bloch sphere corresponding to the state $\lvert\psi_j\rangle$. Now we show that both versions of the QIC game are equivalent and that their success probabilities satisfy the relation $p = (1 + 2P)/3$. More precisely, we show that if Alice and Bob play a strategy in version I of the QIC game that achieves a success probability $P$, the same strategy applied to version II achieves a success probability $p$ that satisfies the relation $p = (1 + 2P)/3$, for any strategy that they may play, and vice versa. We change to a more convenient notation, $\lvert\psi_k\rangle\equiv\lvert\uparrow_{\vec{r}_k}\rangle$, $\lvert\psi_k^\bot\rangle\equiv\lvert\downarrow_{\vec{r}_k}\rangle$, in order to make clear that $\lvert\psi_k\rangle$ and $\lvert\psi_k^\bot\rangle$ correspond to pure qubit states with Bloch vectors $\vec{r}_k$ and $-\vec{r}_k$, respectively. Version II of the QIC game is equivalent to the following. Charlie initially prepares the pair of qubits $A_j$ and $C_j$ in the singlet state $\lvert\Psi^-\rangle$, he gives Alice the qubit $A_j$ and keeps the qubit $C_j$, for $j = 0, 1, \ldots, n-1$. Charlie generates a random integer $k\in\lbrace 0, 1, \ldots, n-1\rbrace$ and gives it to Bob. Charlie measures the joint state $\omega_k$ of his qubit $C_k$ and the one received by Bob $B_k$ in the orthonormal basis $\mathcal{B}_{\vec{r}_k}\equiv\lbrace\lvert\uparrow_{\vec{r}_k}\rangle\lvert\uparrow_{\vec{r}_k}\rangle,\lvert\downarrow_{\vec{r}_k}\rangle\lvert\downarrow_{\vec{r}_k}\rangle,\lvert\uparrow_{\vec{r}_k}\rangle\lvert\downarrow_{\vec{r}_k}\rangle,\lvert\downarrow_{\vec{r}_k}\rangle\lvert\uparrow_{\vec{r}_k}\rangle\rbrace$ for some vector $\vec{r}_k$ that he chooses completely randomly from the Bloch sphere. Opposite outcomes correspond to success. Therefore, the success probability $p$ that Alice and Bob achieve in version II of the QIC game, given by Eq.~(\ref{eq:m1}), equals the following in this version: \begin{align} \label{eq:1} p=&\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\!\biggl[\frac{1}{n}\sum_{k=0}^{n-1}\bigl(\langle\uparrow_{\vec{r}_k}\rvert\langle\downarrow_{\vec{r}_k}\rvert\omega_k\lvert\uparrow_{\vec{r}_k}\rangle\lvert\downarrow_{\vec{r}_k}\rangle \biggr. \biggr.\nonumber\\ &\qquad \biggl. \bigl. +\langle\downarrow_{\vec{r}_k}\rvert\langle\uparrow_{\vec{r}_k}\rvert\omega_k\lvert\downarrow_{\vec{r}_k}\rangle\lvert\uparrow_{\vec{r}_k}\rangle\bigr)\biggr], \end{align} where $\int\! d\mu_j$ is the normalized integral over the Bloch sphere corresponding to the Bloch vector $\vec{r}_j$. The Bell states defined in the basis $\mathcal{B}_{\vec{r}_k}$ are \begin{eqnarray*} \lvert\Phi_{\vec{r}_k}^\pm\rangle&\equiv&\frac{1}{\sqrt{2}}\bigl(\lvert\uparrow_{\vec{r}_k}\rangle\lvert\uparrow_{\vec{r}_k}\rangle\pm\lvert\downarrow_{\vec{r}_k}\rangle\lvert\downarrow_{\vec{r}_k}\rangle\bigr),\\ \lvert\Psi_{\vec{r}_k}^\pm\rangle&\equiv&\frac{1}{\sqrt{2}}\bigl(\lvert\uparrow_{\vec{r}_k}\rangle\lvert\downarrow_{\vec{r}_k}\rangle\pm\lvert\downarrow_{\vec{r}_k}\rangle\lvert\uparrow_{\vec{r}_k}\rangle\bigr). \end{eqnarray*} Consider that instead of measuring the state $\omega_k$ in the basis $\mathcal{B}_{\vec{r}_k}$, Charlie measures it in this Bell basis. Since the singlet state is the same in any basis, this corresponds to version I of the QIC game. Therefore, versions I and II of the QIC game are equivalent. Below we show that their success probabilities satisfy the claimed relation. Using the Bell basis, we obtain from Eq.~(\ref{eq:1}) that \begin{align} \label{eq:2} p=&\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\!\biggl[\frac{1}{n}\sum_{k=0}^{n-1}\bigl(\langle\Psi_{\vec{r}_k}^-\rvert\omega_k\lvert\Psi_{\vec{r}_k}^-\rangle \bigr. \biggr.\nonumber\\ &\qquad \biggl. \bigl. +\langle\Psi_{\vec{r}_k}^+\rvert\omega_k\lvert\Psi_{\vec{r}_k}^+\rangle\bigr)\biggr]. \end{align} Since the singlet state $\lvert\Psi_{\vec{r}_k}^-\rangle$ is the same in any basis, by the definition of $P$ (Eq.~(\ref{eq:m2}) of the main text), we have that \begin{equation} \label{eq:3} \int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\frac{1}{n}\sum_{k=0}^{n-1}\langle\Psi_{\vec{r}_k}^-\rvert\omega_k\lvert\Psi_{\vec{r}_k}^-\rangle=P. \end{equation} On the other hand, we have that \begin{eqnarray} \label{eq:4} \lefteqn{\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\langle\Psi_{\vec{r}_k}^+\rvert\omega_k\lvert\Psi_{\vec{r}_k}^+\rangle}\nonumber\\ &=& \int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\text{Tr}\bigl(\omega_k\lvert\Psi_{\vec{r}_k}^+\rangle\langle\Psi_{\vec{r}_k}^+\lvert\bigr)\nonumber\\ &=&\text{Tr}\biggl(\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\omega_k\lvert\Psi_{\vec{r}_k}^+\rangle\langle\Psi_{\vec{r}_k}^+\lvert\biggr)\nonumber\\ &=&\text{Tr}\biggl(\omega_k\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\lvert\Psi_{\vec{r}_k}^+\rangle\langle\Psi_{\vec{r}_k}^+\lvert\biggr)\nonumber\\ &=&\text{Tr}\biggl(\omega_k\int\!\! d\mu_k\lvert\Psi_{\vec{r}_k}^+\rangle\langle\Psi_{\vec{r}_k}^+\lvert\biggr), \end{eqnarray} where in the third line we have used the linearity of the trace; in the fourth line we have used the fact that $\omega_k$ does not depend on the Bloch vector $\vec{r}_k$ because Charlie chooses it completely randomly to define the measurement basis $\mathcal{B}_{\vec{r}_k}$, and can do so after Bob gives him the qubit $B_k$, and naturally does not depend on the Bloch vectors $\vec{r}_j$ with $j\ne k$ for the same reason; and in the last line we have used that the state $\lvert\Psi_{\vec{r}_k}^+\rangle$ is defined in terms of the Bloch vector $\vec{r}_k$, which is parameterized by $\mu_k$, and so is independent of the parameters $\mu_j$ with $j\neq k$. It is easy to obtain that \begin{equation} \label{eq:5} \int\!\! d\mu_k\lvert\Psi_{\vec{r}_k}^+\rangle\langle\Psi_{\vec{r}_k}^+\rvert=\frac{1}{3}\left(I-\lvert\Psi^-\rangle\langle\Psi^-\rvert\right), \end{equation} where $\lvert\Psi^-\rangle\equiv\bigl(\lvert 01\rangle-\lvert 10\rangle\bigr)/\sqrt{2}$ is the singlet state in the computational basis and $I$ is the identity operator acting on $\mathbb{C}^4$. From Eqs.~(\ref{eq:4}) and (\ref{eq:5}) and the definition of $P$ we have that \begin{equation} \label{eq:6} \frac{1}{n}\sum_{k=0}^{n-1}\int\!\! d\mu_0\!\int\!\! d\mu_1\cdots\!\int\!\! d\mu_{n-1}\langle\Psi_{\vec{r}_k}^+\rvert\omega_k\lvert\Psi_{\vec{r}_k}^+\rangle=\frac{1}{3}-\frac{1}{3}P. \end{equation} Finally, we substitute Eqs.~(\ref{eq:3}) and~(\ref{eq:6}) into Eq.~(\ref{eq:2}) to obtain that $p = (1 + 2P)/3$, as claimed. \subsection{Achievability of the quantum information causality bound} We show that equality in Eq.~(\ref{eq:m4}) of the main text, $\Delta I(C:B)\leq 2m$, requires that the transmitted system $T$ is maximally entangled with Charlie's system $C$. Following the proof of Eq.~(\ref{eq:m4}) of the main text, we note that equality requires the following conditions to be satisfied. The transmitted system $T$ cannot be entangled with Bob's system $B$ in order to satisfy $S(BT)=S(B)+S(T)$. The system $T$ can only be entangled with the joint system $CB$ so that we have $-S(CBT)=S(T)-S(CB)$, as shown below. The state of the system $T$ has to be completely mixed so that its entropy is maximum: $S(T)=m$. This means that $T$ has to be maximally entangled with the system that purifies it. Together, these conditions imply that $T$ has to be maximally entangled with $C$. We also require that the quantum mutual information between $BT$ and $C$ does not decrease by Bob's operations: $I(C:B') = I(C:BT)$. Now we show that satisfaction of the equation $-S(CBT)=S(T)-S(CB)$ is achieved if and only if $T$ is entangled only with the joint system $CB$ \cite{NielsenandChuangbook}. Let $A$ be the quantum system that Charlie gives Alice, and hence is initially maximally entangled with $C$. Let any other physical system that Alice has to be denoted by $A'$. In particular, $A'$ can be entangled with Bob's system $B$, but not with Charlie's system $C$. Let $T$ be the system that Alice sends Bob. Since the systems $A'$ and $B$ are arbitrarily big, without loss of generality, we can consider that the global system $AA'CBT$ is in a pure state. Alice applies some quantum operation on the system $TAA'$, which in general can be represented by a unitary operation followed by a projective measurement. Thus, after Alice's operation, the global system $AA'CBT$ remains in a pure state. Due to the Schmidt decomposition of a bipartite pure state, we have that \begin{eqnarray} \label{eq:7} S(CB)&&=S(TAA'),\nonumber\\ S(AA')&&=S(CBT). \end{eqnarray} We apply the subadditivity property to obtain \begin{equation} \label{eq:8} S(TAA')\leq S(AA')+S(T), \end{equation} which from Eq.~(\ref{eq:7}) implies that \begin{equation} \label{eq:9} S(CB)\leq S(CBT)+S(T). \end{equation} Equality in Eq.~(\ref{eq:9}) is achieved if and only if equality in Eq.~(\ref{eq:8}) is satisfied, which occurs if and only if $T$ is in a product state with $AA'$. Therefore, the relation $-S(CBT)=S(T)- S(CB)$ is satisfied if and only if $T$ is entangled only with the system $CB$, as claimed. \subsection{The information causality bound} If the transmitted system $T$ is classical, equality in Eq.~(\ref{eq:m4}) of the main text, $\Delta I(C:B)\leq2m$, can no longer be achieved. If $T$ represents a classical variable of $m$ bits then the smaller upper bound $\Delta I(C:B)\leq m$ is satisfied. The only difference in the proof of this bound compared to the one of $\Delta I(C:B)\leq2m$ is that if $T$ is classical then the bound $-S(CBT)\leq S(T)-S(CB)$ can no longer be saturated. In fact, in this case the smaller upper bound $-S(CBT)\leq-S(CB)$ is satisfied. A way to see this is that, if $T$ is a classical variable, the state of the joint system $CBT$ is a distribution over all possible values $x$ of $T$ and states of $CB$ for each $x$. Therefore, there exists a transformation $x\rightarrow(CB)_x$. From the data-processing inequality we have that $I(CB:T)\leq I(T:T)$. Hence, since $I(CB:T)=S(CB)+S(T)-S(CBT)$ and $I(T:T)=S(T)$, we obtain $S(CB)\leq S(CBT)$ \cite{ic}. \subsection{Reduction of a general strategy in the QIC game to a covariant strategy} For convenience, consider version II of the QIC game in which Charlie gives Alice $n$ pure qubits in the product state $\vec{\psi}\equiv\otimes_{j=0}^{n-1}\bigl(\lvert\psi_j\rangle\langle\psi_j\rvert\bigr)_{A_j}\in\mathcal{D}\Bigl(\bigl(\mathbb{C}^2\bigr)^{\otimes n}\Bigr)$, where we define $\mathcal{D}(\mathcal{H})$ to be the set of density operators acting on the Hilbert space $\mathcal{H}$. Let $\Gamma_k:\mathcal{D}\Bigl(\bigl(\mathbb{C}^2\bigr)^{\otimes n}\Bigr)\rightarrow\mathcal{D}\bigl(\mathbb{C}^2\bigr)$ be the map that Alice and Bob apply to the state $\vec{\psi}$, which outputs the state $\rho_k\equiv\Gamma_k\bigl(\vec{\psi}\!~\bigr)$ that Bob gives Charlie. Recall that $k$ is the number that Charlie gives Bob. After averaging over all possible input pure product states of qubits with index $j\neq k$, the output only depends on the state $\psi_k\equiv\lvert\psi_k\rangle\langle\psi_k\rvert$, which we identify with the map \begin{multline} \label{eq:10} \bar{\Gamma}_k(\psi_k)\\ \equiv\!\int\!\!\! d\mu_0\!\!\int\!\!\! d\mu_1\!\cdots\!\!\int\!\!\! d\mu_{k-1}\!\!\int\!\!\! d\mu_{k+1}\!\!\int\!\!\! d\mu_{k+2}\!\cdots\!\!\int\!\!\! d\mu_{n-1}\Gamma_k\bigl(\vec{\psi}\!~\bigr), \end{multline} where $\int\!\! d\mu_j$ is the normalized integral over the Bloch sphere corresponding to the state $\lvert\psi_j\rangle$. We define the map \begin{equation} \label{eq:11} \bar{\Gamma}_k^{\text{cov}}(\phi)\equiv\int\!\! d\nu U_{\nu}^{\dagger}\bar{\Gamma}_k\bigl(U_{\nu}\phi U_{\nu}^{\dagger}\bigr)U_{\nu}, \end{equation} where $\phi\in\mathcal{D}\bigl(\mathbb{C}^2\bigr)$, $U_\nu\in\text{SU}(2)$ and $d\nu$ is the Haar measure on SU(2). It is easy to see that this map is covariant, that is, $\bar{\Gamma}_k^{\text{cov}}\bigl(U\phi U^\dagger\bigr)=U\bar{\Gamma}_k^{\text{cov}}(\phi)U^\dagger$, for all $\phi\in\mathcal{D}\bigl(\mathbb{C}^2\bigr)$ and $U\in\text{SU}(2)$. In principle, for any map $\Gamma_k$ that Alice and Bob perform, they can implement the covariant map $\bar{\Gamma}_k^{\text{cov}}$ as follows. Alice and Bob initially share randomness. With uniform probability, they obtain the random number $\nu$ in the range $d\nu$ that corresponds to an, ideally, infinitesimal region of the Haar measure on SU(2). This can be done, for example, if Alice and Bob share a maximally entangled state of arbitrarily big dimension and they both apply a local projective measurement in the Schmidt basis on their part of the state; their measurement outcome indicates the number $\nu$. Alice applies the unitary operation $U_\nu$ parameterized by the obtained number $\nu$ on each of her input qubit states $\lvert\psi_j\rangle$. Then, Alice and Bob apply the map $\Gamma_k$ to the input state $\otimes_{j=0}^{n-1}\bigl(U_\nu\lvert\psi_j\rangle\langle\psi_j\rvert U_\nu^\dagger\bigr)_{A_j}$. Finally, Bob applies the unitary $U_\nu^\dagger$ to his output qubit. From Eq.~(\ref{eq:10}) we obtain that, after averaging over all possible input pure qubits states with index distinct to $k$ and after Bob's final unitary operation $U_\nu^\dagger$, Bob's output state is $U_\nu^\dagger\bar{\Gamma}_k\bigl(U_\nu\psi_kU_\nu^\dagger\bigr)U_\nu$. Averaging over all shared random numbers $\nu$, we obtain $\bar{\Gamma}_k^\text{cov}(\psi_k)$, as defined by Eq.~(\ref{eq:11}). It is straightforward to see that the map $\bar{\Gamma}_k^\text{cov}$ satisfies \begin{equation} \int\!\! d\mu_k\langle\psi_k\rvert\bar{\Gamma}_k^{\text{cov}}(\psi_k)\lvert\psi_k\rangle=\int\!\! d\mu_k \langle\psi_k\rvert\bar{\Gamma}_k(\psi_k)\lvert\psi_k\rangle.\nonumber \end{equation} Therefore, it achieves the same value of $p$ (see Eq.~(\ref{eq:m1})) as $\bar{\Gamma}_k$. Thus, by convenience we consider that Alice and Bob implement the covariant map $\bar{\Gamma}_k^{\text{cov}}(\psi_k)$. In general, this is the depolarizing map \cite{NielsenandChuangbook}: \begin{equation} \bar{\Gamma}_k^{\text{cov}}(\phi)=\sum_{i=0}^{3}E_i\phi E_i^\dagger,\nonumber \end{equation} where $\phi\in\mathcal{D}\bigl(\mathbb{C}^2\bigr)$, $E_0=\lambda_k I$, $E_i=((1-\lambda_k)/3)\sigma_i$, $1/4\leq \lambda_k\leq 1$ and $\sigma_i$ are the Pauli matrices, for $i = 1, 2, 3$. Application of the depolarizing map to a qubit that is in the singlet state with another qubit, as in version I of the QIC game, gives as output the state $\omega_k$ given by Eq.~(\ref{eq:m7}) of the main text. \subsection{A useful bound} We show the bound \begin{equation} \label{eq:12} \sum_{k=0}^{n-1}I\left(C_k:B_k\right)\leq I\left(C:B'\right), \end{equation} which will be useful to deduce an upper bound on $P$. The proof is equivalent to the one for classical bits \cite{ic}. We notice that \begin{eqnarray} \label{eq:13} I\left(C:B'\right)&\equiv& I\left(C_0C_1\cdots C_{n-1}:B'\right)\nonumber\\ &=&I\left(C_0:B'\right)+I\left(C_1C_2\cdots C_{n-1}:B'C_0\right)\nonumber\\ &&-\: I\left(C_1C_2\cdots C_{n-1}:C_0\right). \end{eqnarray} Since Charlie's qubits are in a product state with each other, we have that \begin{equation} \label{eq:14} I\left(C_1C_2\cdots C_{n-1}:C_0\right)=0. \end{equation} The data-processing inequality implies that \begin{equation} \label{eq:15} I\left(C_1C_2\cdots C_{n-1}:B'C_0\right)\geq I\left(C_1C_2\cdots C_{n-1}:B'\right). \end{equation} From Eqs.~(\ref{eq:13})--(\ref{eq:15}) we obtain that \begin{multline} I\left(C_0C_1\cdots C_{n-1}:B'\right)\\ \geq I\left(C_0:B'\right)+I\left(C_1C_2\cdots C_{n-1}:B'\right).\nonumber \end{multline} After iterating these steps $n-1$ times, we have \begin{equation} \label{eq:16} I\left(C:B'\right)\geq \sum_{k=0}^{n-1}I\left(C_k:B'\right). \end{equation} Since the system $B_k$ is output by Bob after local operations on his system $B'$, applying the data-processing inequality, we obtain $I(C_k:B')\geq I(C_k:B_k )$, which from Eq.~(\ref{eq:16}) implies Eq.~(\ref{eq:12}). \subsection{Upper bound on $P$ from quantum information causality} We show an upper bound on the success probability $P$ in the QIC game from quantum information causality: \begin{equation} \label{eq:17} P\leq P', \end{equation} where we define $P'$ to be the maximum solution of the equation \begin{equation} \label{eq:18} h(P')+(1-P')\log_23=2\left(1-\frac{m}{n}\right), \end{equation} and $h(x)=-x\log_2x-(1-x)\log_2(1-x)$ denotes the binary entropy. Some values of $P'$ are plotted in Fig.~\ref{fig2}. We notice that since Charlie's and Bob's systems are initially uncorrelated, the quantum information causality bound (Eq.~(\ref{eq:m4}) of the main text) reduces to $I(C:B')\leq 2m$. Thus, from the bound given by Eq.~(\ref{eq:12}) we have that \begin{equation} \label{eq:19} \sum_{k=0}^{n-1}I(C_k:B_k)\leq 2m. \end{equation} Charlie initially prepares the qubits $C_k$ and $A_k$ in the singlet state $\lvert\Psi^-\rangle_{C_kA_k }$, which after Alice's and Bob's operations is transformed into some state $\omega_k$, now in the joint system $C_kB_k$. We have shown that in general we can consider $\omega_k$ to be of the form given by Eq.~(\ref{eq:m7}) of the main text: \begin{equation} \omega_k=\lambda_k\Psi^-+\frac{1-\lambda_k}{3}\bigl(\Psi^++\Phi^++\Phi^-\bigr).\nonumber \end{equation} Thus, we have that $I(C_k:B_k)=2-S(\omega_k)$. Hence, from Eq.~(\ref{eq:19}) we have that \begin{equation} \label{eq:20} \frac{1}{n}\sum_{k=0}^{n-1}S(\omega_k)\geq 2\left(1-\frac{m}{n}\right). \end{equation} We define the state $\omega\equiv\sum_{k=0}^{n-1}\omega_k/n$. From the concavity of the von Neumann entropy \cite{NielsenandChuangbook}, we obtain $S(\omega)\geq \sum_{k=0}^{n-1}S(\omega_k)/n$, which together with Eq.~(\ref{eq:20}) implies \begin{equation} \label{eq:21} S(\omega)\geq 2\left(1-\frac{m}{n}\right). \end{equation} From the definitions of $P$ (Eq.~(\ref{eq:m2}) of the main text) and $\omega$, and the form of $\omega_k$ (Eq.~(\ref{eq:m7}) of the main text) we have that \begin{equation} \label{eq:22} \omega=P\Psi^-+\frac{1-P}{3}\bigl(\Psi^++\Phi^++\Phi^-\bigr), \end{equation} which has von Neumann entropy $S(\omega)=h(P)+(1-P)\log_23$, where $h(x)=-x\log_2x-(1-x)\log_2(1-x)$ is the binary entropy. Thus, from Eq.~(\ref{eq:21}) we have that \begin{equation} \label{eq:23} h(P)+(1-P)\log_23\geq 2\left(1-\frac{m}{n}\right), \end{equation} which implies Eq.~(\ref{eq:17}). This can be seen as follows. The function $h(P)+(1-P)\log_23$ corresponds to the Shannon entropy of a random variable taking four values, one with probability $P$ and the others with probability $(1-P)/3$ \cite{NielsenandChuangbook}. It is a strictly increasing function of $P$ in the range $[0,1/4]$ and a strictly decreasing function in the range $[1/4,1]$. It takes the values $\log_23$ at $P=0$ and $P=0.609$, 2 at $P=1/4$ and 0 at $P=1$. If $2(1-m/n)\geq \log_23$, Eq.~(\ref{eq:18}) has two solutions, one in the range $[0, 1/4]$ and the other one in the range $[1/4,0.609]$. Otherwise, Eq.~(\ref{eq:18}) has a single solution in the range $(0.609, 1]$. Therefore, the maximum solution of Eq.~(\ref{eq:18}) is in the range $[1/4, 1]$. Since in this range the function $h(P)+(1-P)\log_23$ is strictly decreasing, Eq.~(\ref{eq:23}) implies Eq.~(\ref{eq:17}). In particular, we can easily see from Eq.~(\ref{eq:21}) that if $m< n$ then $S(\omega)>0$. Therefore, in this case $\omega$ cannot be a perfect singlet, which from Eq.~(\ref{eq:22}) implies that $P<1$. \begin{figure} \caption{\label{fig2} \label{fig2} \end{figure} \subsection{Upper bound on $Q$ for nonlocal strategies} We have obtained an upper bound on the success probability $Q$ in the IC-2 game, defined in the main text, for a particular class of strategies in the case $m=1$: \begin{equation} \label{eq:24} Q\leq Q', \end{equation} where $Q'\equiv\bigl(1 + 3n^{-1/2}\bigr)/4$. The considered class of strategies is the following. \emph{Nonlocal strategies in the IC-2 game}. Alice and Bob share an entangled state $\lvert\psi\rangle\in\mathcal{H}$. They perform a local projective measurement on their part of $\lvert\psi\rangle$. Alice chooses her measurement according to her value of $x\equiv(x_0,x_1,\ldots,x_{n-1})$. Recall that $x_j\equiv (x_j^0,x_j^1)$, for $j=0, 1, \ldots, n-1$. Bob chooses his measurement according to his number $k$. Their measurement outcomes are the two bit numbers $(a_k^0,a_k^1)$ and $(b_k^0,b_k^1)$, respectively. Alice sends Bob her outcome. Bob outputs the two bit value $y_k\equiv (y_k^0,y_k^1)$, where $y_k^j=a_k^j\oplus b_k^j$, for $j = 0, 1$, and $\oplus$ denotes sum modulo 2. The success probability is \begin{equation} Q=\frac{1}{n}\sum_{k=0}^{n-1}P\left(y_k^0=x_k^0,y_k^1=x_k^1\right).\nonumber \end{equation} This class of strategies is not general. For example, a more general strategy would be one in which Bob uses Alice's message in order to choose his measurement. It can easily be computed that for $m = 1$ and $n \geq 50$, $P' < Q',$ where $P'$ is defined by Eq.~(\ref{eq:18}). Therefore, the bound given by Eq.~(\ref{eq:24}) cannot be achieved for $n \geq 50$, otherwise Eq.~(\ref{eq:m5}) of the main text, and hence quantum information causality, could be violated by a teleportation strategy achieving $P = Q'$. Now we present the proof of Eq.~(\ref{eq:24}). This is an extension of the one given in Ref.~\cite{AS11} for the IC-1 game. Let $\mathcal{H}=\mathcal{H}_A\otimes\mathcal{H}_B$. Alice and Bob measure their respective systems, $A$ and $B$, in the orthonormal bases $\lbrace\lvert\nu_{r,s}^{x}\rangle\rbrace_{r,s=0}^1$ and $\lbrace\lvert w_{t,u}^{k}\rangle\rbrace_{t,u=0}^1$. After the measurement is completed, the state $\lvert\psi\rangle$ projects into the state $\lvert\nu_{a_k^0,a_k^1}^{x}\rangle\lvert w_{b_k^0,b_k^1}^{k}\rangle$. We define the Hermitian operators \begin{eqnarray} \hat{A}_x&\equiv &\sum_{r=0}^1\sum_{s=0}^1(-1)^{r+s}\lvert\nu_{r,s}^x\rangle\langle\nu_{r,s}^x\rvert,\nonumber\\ \hat{B}_k&\equiv &\sum_{t=0}^1\sum_{u=0}^1(-1)^{t+u}\lvert w_{t,u}^k\rangle\langle w_{t,u}^k\rvert, \nonumber \end{eqnarray} acting on $\mathcal{H}_A$ and $\mathcal{H}_B$, respectively. We also define $E_{x,k}\equiv(-1)^{x_k^0+x_k^1}\langle\psi\rvert\hat{A}_x\hat{B}_k\lvert\psi\rangle$. Writing the state $\lvert\psi\rangle$ in the basis $\lbrace\lvert\nu_{r,s}^{x}\rangle\lvert w_{t,u}^{k}\rangle\rbrace_{r,s,t,u=0}^1$, using that $y_k^j=a_k^j\oplus b_k^j$, for $j=0,1$, and noticing that $x$ is a completely random variable of $4^n$ possible values, it is easy to obtain that \begin{multline} \label{eq:25} \frac{1}{n}\sum_{k=0}^{n-1}\left[P\left(y_k^0=x_k^0,y_k^1=x_k^1\right)+P\left(y_k^0\neq x_k^0,y_k^1\neq x_k^1\right)\right]\\ =\frac{1}{2}\biggl(1+\frac{1}{n4^n}\sum_{x,k}E_{x,k}\biggr). \end{multline} Following the procedure of Ref.~\cite{AS11}, it is obtained that \begin{equation} \frac{1}{2}\biggl(1+\frac{1}{n4^n}\sum_{x,k}E_{x,k}\biggr)\leq\frac{1}{2}\biggl(1+\frac{1}{\sqrt{n}}\biggr),\nonumber \end{equation} which from Eq.~(\ref{eq:25}) implies \begin{multline} \label{eq:26} \frac{1}{n}\sum_{k=0}^{n-1}\left[P\left(y_k^0=x_k^0,y_k^1=x_k^1\right)+P\left(y_k^0\neq x_k^0,y_k^1\neq x_k^1\right)\right]\\ \leq\frac{1}{2}\biggl(1+\frac{1}{\sqrt{n}}\biggr). \end{multline} Following a similar procedure, by defining $E_{x,k}^j\equiv(-1)^{x_k^j}\langle\psi\rvert\hat{A}_x^j\hat{B}_k^j\lvert\psi\rangle$, for $j = 0, 1$, in terms of the operators \begin{eqnarray} \hat{A}_x^0&\equiv&\sum_{r=0}^1\sum_{s=0}^1(-1)^{r}\lvert\nu_{r,s}^x\rangle\langle\nu_{r,s}^x\rvert,\nonumber\\ \hat{B}_k^0&\equiv&\sum_{t=0}^1\sum_{u=0}^1(-1)^{t}\lvert w_{t,u}^k\rangle\langle w_{t,u}^k\rvert, \nonumber\\ \hat{A}_x^1&\equiv&\sum_{r=0}^1\sum_{s=0}^1(-1)^{s}\lvert\nu_{r,s}^x\rangle\langle\nu_{r,s}^x\rvert,\nonumber\\ \hat{B}_k^1&\equiv&\sum_{t=0}^1\sum_{u=0}^1(-1)^{u}\lvert w_{t,u}^k\rangle\langle w_{t,u}^k\rvert,\nonumber \end{eqnarray} it can be shown that \begin{multline} \label{eq:27} \frac{1}{n}\sum_{k=0}^{n-1}\left[P\left(y_k^0=x_k^0,y_k^1=x_k^1\right)+P\left(y_k^0=x_k^0,y_k^1\neq x_k^1\right)\right]\\ \leq\frac{1}{2}\biggl(1+\frac{1}{\sqrt{n}}\biggr), \end{multline} and that \begin{multline} \label{eq:28} \frac{1}{n}\sum_{k=0}^{n-1}\left[P\left(y_k^0=x_k^0,y_k^1=x_k^1\right)+P\left(y_k^0\neq x_k^0,y_k^1= x_k^1\right)\right]\\ \leq\frac{1}{2}\biggl(1+\frac{1}{\sqrt{n}}\biggr). \end{multline} Adding Eqs.~(\ref{eq:26})--(\ref{eq:28}), using normalization of probabilities and arranging terms we obtain that \begin{equation} \frac{1}{n}\sum_{k=0}^{n-1}P\left(y_k^0=x_k^0,y_k^1=x_k^1\right)\leq\frac{1}{4}\biggl(1+\frac{3}{\sqrt{n}}\biggr),\nonumber \end{equation} as claimed. \end{document}
\begin{eqnarray}gin{document} \title{ Convergence of Phase-Field Free Energy and Boundary Force for Molecular Solvation} \author{ Shibin Dai\thanks{Department of Mathematical Sciences, New Mexico State University, Las Cruces, NM 88003, USA. Email: [email protected].} \and Bo Li\thanks{Department of Mathematics and Quantitative Biology Graduate Program, University of California, San Diego, 9500 Gilman Drive, Mail code: 0112, La Jolla, CA 92093-0112, USA. Email: [email protected].} \and Jianfeng Lu \thanks{Department of Mathematics, Department of Physics, and Department of Chemistry, Duke University, Box 90320, Durham, NC 27708-0320, USA. Email: [email protected].} } \date{June 14, 2016} \maketitle \begin{eqnarray}gin{abstract} We study a phase-field variational model for the solvaiton of charged molecules with an implicit solvent. The solvation free-energy functional of all phase fields consists of the surface energy, solute excluded volume and solute-solvent van der Waals dispersion energy, and electrostatic free energy. The surface energy is defined by the van der Waals--Cahn--Hilliard functional with squared gradient and a double-well potential. The electrostatic part of free energy is defined through the electrostatic potential governed by the Poisson--Boltzmann equation in which the dielectric coefficient is defined through the underlying phase field. We prove the continuity of the electrostatics---its potential, free energy, and dielectric boundary force---with respect to the perturbation of dielectric boundary. We also prove the $\Gamma$-convergence of the phase-field free-energy functionals to their sharp-interface limit, and the equivalence of the convergence of total free energies to that of all individual parts of free energy. We finally prove the convergence of phase-field forces to their sharp-interface limit. Such forces are defined as the negative first variations of the free-energy functional; and arise from stress tensors. In particular, we obtain the force convergence for the van der Waals--Cahn--Hilliard functionals with minimal assumptions. \mbox{\normalsize\boldmath$n$}oindent {\bf Key words and phrases}: solvation free energy, phase field, van der Waals--Cahn--Hilliard functional, Poisson--Boltzmann equation, $\Gamma$-convergence, convergence of boundary force. \end{abstract} {\alphalowdisplaybreaks \section{Introduction} \langlebel{s:introduction} We study the convergence of a phase-field variational model to its sharp-interface limit for the solvation of charged molecules. In this section, we present first the sharp-interface then the phase-field models of molecular solvation. We also describe our main results and discuss their connections to existing studies. To ease the presentation, the quantities are only formally defined in this section; their precise definitions are given in Section~\ref{s:MainResults}. \subsection{A Sharp-Interface Variational Model of Solvation} \langlebel{ss:SharpInterfaceModel} We denote by $\Omegaega \subset \mathbb{R}^3$ the entire solvation region. It is divided into a solute (e.g., protein) region $\Omegaega_{\rm p}$ (p for protein) that contains solute atoms located at $x_1, \dots, x_N$, and solvent region $\Omegaega_{\rm w}$ (w for water), separated by a solute-solvent (e.g., protein-water) interface $\Gamma$. The solute atomic positions $x_1, \dots, x_N$ are given and fixed. A solute-solvent interface is treated as a dielectric boundary as it separates the low dielectric solutes from high dielectric solvent. In a variational implicit-solvent model, an optimal solute-solvent interface is defined as to minimize the solvation free-energy functional of all the possible interfaces $\Gamma \subset \Omegaega$ that enclose $x_1,\dots, x_N$ \cite{DSM_PRL06, DSM_JCP06, Wang_VISMCFA_JCTC12, Zhou_VISMPB_JCTC14}: \begin{eqnarray}gin{align} \langlebel{FGamma} F[\Gamma] & = P_0 \mbox{Vol}\, (\Omegaega_{\rm p}) + \gammamma_0 \mbox{Area}\, (\Gamma) + \rho_0 \int_{\Omegaega_{\rm w}} U(x) \, dx + F_{\rm ele}[\Gamma]. \end{align} The first term of $F[\Gamma]$ describes the work it takes to create the solute region $\Omegaega_{\rm p}$ in a solvent medium at hydrostatic pressure $P_0,$ where $\mbox{Vol}\,(\Omegaega_{\rm p})$ is the volume of $\Omegaega_{\rm p}.$ The second term is the solute-solvent interfacial energy, where $\gammamma_0$ is an effective, macroscopic surface tension. The third term, in which $\rho_0 $ is the constant bulk solvent density, is the solute-solvent interaction energy described by a potential $U$ that accounts for the solute-excluded volume and solute-solvent van der Waals attraction. The interaction potential $U$ is often given by \[ U(x) = \sum_{i=1}^N U_{\text{LJ}}^{(i)}(|x-x_i|), \] where each \[ U_{\text{LJ}}^{(i)}(r) = 4\varepsilon_i\left[\left(\frac{\sigmagma_i}{r}\right)^{12} - \left(\frac{\sigmagma_i}{r}\right)^6 \right] \] is a Lennard-Jones potential with parameters $\varepsilon_i$ of energy and $\sigmagma_i$ of length. The last term is the electrostatic free energy. In the classical Poisson--Boltzmann theory, it is defined to be \cite{Li_SIMA09, DavisMcCammon_ChemRev90, SharpHonig_Rev90, CDLM_JPCB08, AndelmanHandbook95, ZhouJCP94, Zhou_VISMPB_JCTC14} \begin{eqnarray}gin{equation} \langlebel{FeleGamma} F_{\rm ele}[\Gamma] = \int_\Omegaega \left[ -\frac{\varepsilon_\Gamma }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_{\Gamma}|^2 + \rho \psi_{\Gamma} - \chi_{\Omegaega_{\rm w}} B(\psi_{\Gamma} ) \right] dx, \end{equation} where $\psi = \psi_{\Gamma }$ is the electrostatic potential. It solves the boundary-value problem of the Poisson--Boltzmann equation \cite{CDLM_JPCB08, AndelmanHandbook95, ZhouJCP94, Zhou_VISMPB_JCTC14} \begin{eqnarray}gin{align} \langlebel{PBE} & \mbox{\normalsize\boldmath$n$}abla\cdot\varepsilon_\Gamma \mbox{\normalsize\boldmath$n$}abla\psi - \chi_{\Omegaega_{\rm w}} B'(\psi ) = - \rho \qquad \text{in } \Omegaega, \\ \langlebel{BC} &\psi = \psi_\infty \qquad \mbox{on } \partialrtial \Omegaega. \end{align} Here, the dielectric coefficient $\varepsilon_\Gamma$ (in the unit of vacuum permittivity) is defined by \begin{eqnarray}gin{comment} \begin{eqnarray}gin{equation*} \varepsilon_{\Gamma}(x) = \left\{ \begin{eqnarray}gin{array}{ll} \varepsilon_{\rm p} \quad & \text{if}\ x\in \Omegaega_{\text{p}},\\ \varepsilon_{\rm w} \quad & \text{if}\ x\in \Omegaega_{\text{w}}, \end{array} \right. \end{equation \end{comment} $\varepsilon_\Gamma(x) = \varepsilon_{\rm p} $ if $x \in \Omegaega_{\rm p}$ and $\varepsilon_\Gamma(x) = \varepsilon_{\rm w} $ if $x \in \Omegaega_{\rm w}$, where $\varepsilon_{\rm p}$ and $\varepsilon_{\rm w}$ are the dielectric coefficients (relative permittivities) of the solute and solvent regions, respectively. In general, $\varepsilon_{\rm p} \approx 1$ and $\varepsilon_{\rm w} \approx 80.$ The function $\rho: \Omegaega \to \mathbb{R}$ is the density of solute atomic charges. It is an approximation of the point charges $\sum_{i=1}^N Q_i \deltalta_{x_i}$, where $Q_i$ is the partial charge carried by the $i$th atom at $x_i$ and $\deltalta_{x_i}$ denotes the Dirac mass at $x_i$ $(1 \le i \le N).$ The function $\chi_A$ is the characteristic function of $A.$ The function $\psi_\infty: \partialrtial \Omegaega \to \mathbb{R}$ is a given boundary value of $\psi_\Gamma.$ The term $B(\psi_{\Gamma})$ models the ionic effect and the function $B$ is given by \[ B(s) = k_{\rm B} T \sum_{j=1}^M c_j^\infty \left( e^{- q_j s/ (k_{\rm B} T) } - 1 \right), \] where $k_{\text{B}}$ is the Boltzmann constant and $T$ absolute temperature, and $c_j^{\infty}$ and $q_j = z_j e$ are the bulk concentration and charge for the $j$th ionic species, respectively, with $z_j$ the valence and $e$ elementary charge. Note that $B'' > 0$ on $\mathbb{R}$; so $B$ is strictly convex. We assume there are $M$ species of ions in the solvent. Moreover, in the bulk, the charge neutrality is reached: $ \sum_{j=1}^M q_j c_j^\infty = 0. $ This implies that $B'(0) = 0,$ and hence $B$ is also minimized at $0.$ For a smooth dielectric boundary $\Gamma$, we denote by $\mbox{\normalsize\boldmath$n$}u$ its unit normal pointing from the solute region $\Omegaega_{\rm p}$ to the solvent region $\Omegaega_{\rm w}$. We define the normal component of the boundary force (per unit surface area) as the negative variation, $-\deltalta_\Gamma F[\Gamma]: \Gamma \to \mathbb{R}$, of the solvation free energy $F[\Gamma]$ (cf.~\reff{FGamma}). It is given by \cite{CDLM_JPCB08,Zhou_VISMPB_JCTC14,LiChengZhang_SIAP11, Luo_PCCP12, CXDMCL_JCTC09, ChengChengLi_Nonlinearity11,XiaoLuo_JCP2013} \begin{eqnarray}gin{align} \langlebel{BoundaryForce} -\deltalta_{\Gamma}F[\Gamma] &= -P_0 - 2 \gammamma_0 H + \rho_0 U - \frac{1}{2} \left( \frac{1}{\varepsilon_{\rm p} } - \frac{1}{\varepsilon_{\rm w} } \right) \left( \varepsilon_\Gamma \frac{\partialrtial \psi_\Gamma }{\partialrtial \mbox{\normalsize\boldmath$n$}u } \right)^2 \mbox{\normalsize\boldmath$n$}onumber \\ &\quad - \frac{1}{2} ( \varepsilon_{\rm w} - \varepsilon_{\rm p} ) \left| \mbox{\normalsize\boldmath$n$}abla_\Gamma \psi_\Gamma \right|^2 - B (\psi_\Gamma) \qquad \mbox{on } \Gamma, \end{align} where $H$ is the mean curvature, defined as the average of principal curvatures, positive if $\Omegaega_{\rm p}$ is convex, $\psi_\Gamma$ is electrostatic potential defined by \reff{PBE} and \reff{BC}, and $\mbox{\normalsize\boldmath$n$}abla_\Gamma = (I - \mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}abla $, with $I$ the identity matrix, is the surface gradient along $\Gamma$. \subsection{A Phase-Field Variational Model of Solvation} \langlebel{ss:PhaseFieldModel} To incorporate more detailed physical and chemical properties in the solute-solvent interfacial region, such as the asymmetry of dielectric environment, Li and Liu \cite{LiLiu_SIAP15}, and Sun {\it et al.} \cite{Sun_PFVISM_JCP15} constructed and implemented a related phase-field model for the solvation of charged molecules (cf.\ also \cite{LiZhao_SIAP13,PhaseField_JCP13}). In such a model, a phase field $\phi: \Omegaega \to \mathbb{R},$ a continuous function that takes values close to $0$ and $1$ in $\Omegaega$ except in a thin transition layer, is used to describe the solvation system. The solute and solvent regions (or phases) are approximated by $\{ \phi \approx 1 \}$ and $\{ \phi \approx 0 \}$, respectively, and the thin transition layer is the diffuse solute-solvent interface. Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i > 0$ be a small number. The phase-field solvation free-energy functional of phase fields $\phi: \Omegaega\to \mathbb{R}$ is \cite{PhaseField_JCP13,LiZhao_SIAP13,Sun_PFVISM_JCP15, LiLiu_SIAP15}: \begin{eqnarray}gin{align} \langlebel{Fxiphi} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] & = P_0 \int_\Omegaega \phi^2 \, d x + \gammamma_0 \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i }{2} |\mbox{\normalsize\boldmath$n$}abla\phi|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i } W(\phi ) \right] dx + \rho_0 \int_\Omegaega ( \phi - 1 )^2 U \, dx + F_{\rm ele}[\phi], \end{align} where \begin{eqnarray}gin{align} \langlebel{Felephi} F_{\rm ele}[\phi] = \int_\Omegaega \left[ -\frac{\varepsilon (\phi) }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_{\phi}|^2 + \rho \psi_{\phi} - (\phi-1)^2 B(\psi_{\phi} ) \right] dx, \end{align} and $\psi = \psi_{\phi}$ solves the boundary-value problem of the phase-field Poisson--Boltzmann equation \begin{eqnarray}gin{align} \langlebel{PhaseFieldPBE} & \mbox{\normalsize\boldmath$n$}abla\cdot\varepsilon(\phi)\mbox{\normalsize\boldmath$n$}abla\psi - (\phi-1)^2 B'(\psi ) = - \rho \qquad \text{in } \Omegaega, \\ \langlebel{PhaseFieldBC} &\psi = \psi_\infty \qquad \mbox{on } \partialrtial \Omegaega. \end{align} All the four terms in \reff{Fxiphi} correspond to those in the sharp-interface free-energy functional \reff{FGamma}. The second integral term, in which \begin{eqnarray}gin{equation} \langlebel{W} W(\phi) = 18 \phi^2 ( 1 - \phi )^2, \end{equation} is the van der Waals--Cahn--Hilliard functional \cite{vdW1893,Rowlinson_vdWtrans79,CahnHilliard58} (sometimes called the Allen--Cahn functional \cite{AllenCahn79}) that is known to $\Gamma$-converge to the area of solute-solvent interface as $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \to 0$ \cite{Modica_ARMA87,Sternberg_ARMA88}. The pre-factor $18$ is so chosen that \[ \int_0^1 \sqrt{2 W(t)} \, dt = 1. \] In the last term of electrostatic free energy, the dielectric coefficient $\varepsilon = \varepsilon(\phi)$ is constructed to be a smooth function, taking the values $\varepsilon_{\rm p} $ and $\varepsilon_{\rm w} $ in the solute region $\{ \phi \approx 1 \}$ and solvent region $\{ \phi \approx 0 \}$, respectively \cite{LiLiu_SIAP15,Sun_PFVISM_JCP15}. The first variation of the functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi]$ is given by \cite{LiLiu_SIAP15, Sun_PFVISM_JCP15} \begin{eqnarray}gin{align} \langlebel{deltaphi} \deltalta_\phi F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] &= 2 P_0\, \phi + \gammamma_0 \left[-\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \Deltalta\phi+\dfrac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i } W'(\phi)\right] + 2\rho_0 (\phi-1)U \mbox{\normalsize\boldmath$n$}onumber \\ &\quad - \frac{1}{2} \varepsilon'(\phi) |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2 - 2 (\phi-1) B(\psi_\phi). \end{align} \begin{eqnarray}gin{comment} \tcb{ For a phase field that approximates a sharp solute-solvent interface, the solute and solvent regions are described by $\{\phi\approx 1\}$ and $\{\phi\approx 0\}$, respectively. Therefore, the unit normal $\mbox{\normalsize\boldmath$n$}u$ at the sharp interface pointing from the solute to solvent region is approximated by $-\mbox{\normalsize\boldmath$n$}abla \phi.$ Consequently, we define the phase-field force (per unit volume) to be \begin{eqnarray}gin{align}\langlebel{phase-field-force} f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}(\phi):= \deltalta_\phi F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] \mbox{\normalsize\boldmath$n$}abla\phi. \end{align} This is consistent with the definition given by the method of variations of domain. Moreover, the force such defined arises from a stress field; cf.~Subsection~\ref{ss:Force}. } {\bf [[BL: This is force per unit volume not per unit area. Explanation?]]} \end{comment} We remark that the van der Waals--Cahn--Hilliard functional in the phase-field model \reff{Fxiphi} is exactly the interfacial free energy defined through the macroscopic component of water density in the Lum-Chandler-Weeks solvation theory \cite{LCW99}, where though the electrostatics is not included. It has been recognized that such interfacial free energy is crucial in the description of hydrophobic interactions \cite{Chandler05,BerneWeeksZhou_Rev09,LCW99}. \subsection{Main Results and Connections to Existing Studies} \langlebel{ss:MainResults} In this work, we study the limit properties of the phase-field free-energy functionals \reff{Fxiphi} in terms of their sharp-interface limit. We prove the following: \begin{eqnarray}gin{compactenum} \item[(1)] The convergence of the phase-field Poisson--Boltzmann electrostatics to the corresponding sharp-interface limit. More precisely, if a sequence of phase fields converge to a characteristic function of a subset of $\Omegaega$, then the corresponding sequences of electrostatic potentials, electrostatic free energies, and forces converge to their respective sharp-interface counterparts; cf.~Theorem~\ref{t:PBenergy} and Theorem~\ref{th:f_ele-conv}; \item[(2)] The free-energy convergence. There are two main results concerning such convergence. First, the $\Gamma$-convergence of phase-field free-energy functionals to the corresponding sharp-interface limit; cf.\ Theorem~\ref{t:EnergyConvergence}. The existence of a global minimizer of the sharp-interface free-energy functional $F$ is then a consequence of this $\Gamma$-convergence; cf.\ Corollary~\ref{c:existenceF0}. The proof of $\Gamma$-convergence is similar to that for the van der Waals--Cahn--Hilliard functional. Care needs to be taken for the solute-solvent interaction part, i.e., the third term in \reff{FGamma} and that in \reff{Fxiphi}. In particular, we construct the recovering sequence as the same canonical phase fields for the van der Waals--Cahn--Hilliard functional \cite{Modica_ARMA87,Sternberg_ARMA88}. Second, the equivalence of the convergence of total free energies and that of the individual parts of free energy (volume, surface, solute-solvent van der Waals interaction, and electrostatics); cf.\ Theorem~\ref{t:individual}; \item[(3)] The force convergence: if a sequence of phase fields converge to a characteristic function and the corresponding solvation free energies converge to the sharp-interface free energy, then the corresponding phase-field forces converge to their sharp-interface counterpart. In fact, each individual part of the force converges to the corresponding sharp-interface part; cf.~Theorem~\ref{t:ForceConvSolvation}. There are two non-trivial parts in the proof of this force convergence. One is the proof of electrostatic force convergence, which is Theorem~\ref{th:f_ele-conv}. The other is the proof of surface force convergence, i.e., the force convergence for the van der Waals--Cahn--Hilliard functional. Due to its general interest, we state and prove a separate theorem, Theorem~\ref{th:CH-force-conv}, for the surface force convergence. All the different kinds of forces are defined as the first variations of the corresponding parts of the free-energy functionals. These forces are shown to arise from stress tensors. Our results on force convergence are then stated in terms of the weak convergence of corresponding stress tensors. \end{compactenum} Our work is closely related to the analysis in \cite{LiZhao_SIAP13} and \cite{LiLiu_SIAP15}. In \cite{LiZhao_SIAP13}, Li and Zhao study a similar but simpler phase-field model in which the electrostatic free energy is described by the Coulomb-field approximation \cite{ChengChengLi_Nonlinearity11,Wang_VISMCFA_JCTC12}, without the need of solving a dielectric Poisson or Poisson--Boltzmann equation. They obtain the $\Gamma$-convergence of the phase-field free-energy functionals to the respective sharp-interface functional. They also prove the existence of a global minimizer of the sharp-interface free-energy functional. In \cite{LiLiu_SIAP15}, the authors obtain the well-posedness of the phase-field Poisson--Boltzmann equation and derive the variation \reff{deltaphi}. Using the matched asymptotic analysis, they also show that, in the sharp-interface limit as $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \to 0$, the relaxation dynamics $ \phi_t = - \deltalta_\phi F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi]$ approaches that of the sharp-interface governed by $v_n = -\deltalta_\Gamma F[\Gamma]$, where $v_n$ is the normal velocity of the sharp boundary. We shall use some of the results on the Poisson--Boltzmann electrostatics obtained in \cite{LiLiu_SIAP15}. We remark that the force convergence for (a subsequence of) van der Waals--Cahn--Hilliard functionals is proved in \cite{RogerSchatzle06} under the assumption that corresponding sequence of free energy is bounded and that \begin{eqnarray}gin{equation} \langlebel{phasefieldH2} \sup_{0 < \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \ll 1} \int_\Omegaega \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \left[ - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \Deltalta \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W'(\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i) \right]^2 dx < \infty, \end{equation} where $\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i $ $(0 < \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \ll 1)$ is the underlying family of phase fields; cf.\ also \cite{Sato_IndianaUnivJ08, Ilmanen1993, PadillaTonegawa_CPAM98, HutchinsonTonegawa_2000, MizunoTonegawa_SIAM15, RogerSchatzle06} and the references therein. These assumptions provide additional regularities that allow one to show the equi-partition of the free energy, the existence of variation of the varifold corresponding to the limit of Radon measures \[ \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i |^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W(\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i) \right] dx, \] and the rectifiability of the varifold. Here, we only assume the convergence of phase fields to a characteristic function and the corresponding convergence of the van der Waals--Cahn--Hilliard free energies to that of the sharp-interface counterpart, i.e., the perimeter of the limit set. The free-energy convergence is a natural assumption as the free energies can converge to a different number even if the sequence of phase fields converge to the same limit characteristic function; see an example constructed in Subsection~\ref{ss:Force}. Our proof of force convergence involves no varifolds. It is rather based on the observation that the free-energy convergence implies the asymptotic equi-partition of energy, and that the gradients of phase fields are controlled asymptotically by their projections onto the direction normal to the limit interface. Note that, without the additional assumption \qref{phasefieldH2}, we do not have the necessary regularities, and in turn we have to define the limit force in a weak sense through stress tensors. Consequently, the force convergence is proved as the weak convergence of stress tensors. \subsection{Organization of the Rest of Paper} \langlebel{ss:Organization} In Section~\ref{s:MainResults}, we state our assumptions and main theorems. We also define forces and their corresponding stresses. In Section~\ref{s:PB}, we present results on the Poisson--Boltzmann electrostatics. These include a unified result on the well-posedness of the Poisson--Boltzmann equation, the continuity of the electrostatic free energy with respect to the change of dielectric regions, and the convergence of phase-field dielectric boundary force to the sharp-interface limit. In Section~\ref{s:FreeEnergyConvergence}, we prove the $\Gamma$-convergence of the phase-field free-energy functionals to their sharp-interface limit. We also prove that the convergence of total free energies is equivalent to that of individual parts of free energy. Finally, in Section~\ref{s:ForceConvergenceSolvation}, we first prove the convergence of all the individual and total phase-field forces to their sharp-interface counterparts for the solvation free-energy functional, except the surface force. We then focus on the proof of such surface that corresponds to the van der Waals--Cahn--Hilliard functional for a general $n$-dimensional space with $n \ge 2.$ \section{Main Theorems} \langlebel{s:MainResults} \subsection{Assumptions} \langlebel{ss:Assumptions} Unless otherwise stated, we assume the following throughout the rest of paper: \begin{eqnarray}gin{compactenum} \item[(A1)] The set $\Omegaega \subset\mathbb{R}^3$ is nonempty, open, connected, and bounded with a $C^2$ boundary $\partialrtial \Omegaega.$ The integer $N \ge 1$ and all points $x_1, \dots, x_N$ in $\Omegaega $ are given. All $P_0$, $\gammamma_0$, and $\rho_0 $ are positive numbers. The functions $\rho \in H^1(\Omegaega)\cap L^\infty(\Omegaega)$ and $\psi_\infty \in W^{2,\infty}(\Omegaega)$ are given; \begin{eqnarray}gin{comment} $f_X \in C^1(\mathbb{R}^3) $ with $\mbox{supp}\,(f_X) \subseteq \cup_{i=1}^N B(x_i, \sigmagma_i)$, where $\sigmagma_i$ $(1 \le i \le N)$ are given positive constants, and as usual $B(y,r)$ denotes the open ball with radius $r$ centered at $y$. Moreover, \begin{eqnarray}gin{align*} & \sup_{X \in (\mathbb{R}^3)^N } \|f_X \|_{C^1(\mathbb{R}^3)} < \infty, \end{align*} We shall also use the same notation $f_X$ for its restriction onto $\overline{\Omegaega}$. \item[(2)] We assume that $U: \overline{\Omegaega}^{N+1} \to \mathbb{R} \cup \{ +\infty \}$ is finite and continuous in $ \overline{\Omegaega}^{N+1} \cap O_{N+1}$ but is $+\infty$ in $\overline{\Omegaega}^{N+1} \setminus O_{N+1}$. Moreover, \[ U(X, x)\rightarrow+\infty \quad \mbox{as } \min_{0 \le i < j \le N} |x_i-x_j|\rightarrow 0, \quad \mbox{where } x_0 = x. \] Consequently, {\bf [Need to check this.]} \[ U_{\rm min}: = \inf_{(X,x)\in \overline{\Omegaega}^{N+1}} U(X, x) \mbox{ is finite}. \] \end{comment} \item[(A2)] The function $U: \mathbb{R}^3 \to \mathbb{R}\cup \{ +\infty \}$ satisfies \[ U(x_i) = +\infty \quad \mbox{and} \quad \lim_{x\to x_i} U(x) = +\infty \quad ( i = 1, \dots, N), \quad \mbox{and} \quad \lim_{x \to \infty} U(x) = 0. \] Restricted onto $\mathbb{R}^3 \setminus \{ x_1, \dots, x_N \}$, $U$ is a $C^1$-function with \[ U_{\rm min}: = \inf \{ U(x): x\in \mathbb{R}^3 \} \in (-\infty, 0]. \] Moreover, $U$ is not integrable in the neighborhood of each $x_i$ $(1\le i \le N)$ in the following sense: for any measurable subset $\omegaega \subset \mathbb{R}^3$, \[ \int_{\omegaega} U \, dx = +\infty \quad \mbox{if there exists } i\in\{ 1, \dots, N \} \mbox{ such that } \inf_{r > 0} \frac{| \omegaega \cap B(x_i, r)|}{r^3} > 0, \] where $|Q|$ denotes the Lebesgue measure of $Q$ in $\mathbb{R}^3;$ (In what follows, measure means the Lebesgue measure, unless otherwise stated.) \item[(A3)] The numbers $\varepsilon_{\rm p}$ and $\varepsilon_{\rm w}$ are positive and distinct. The function $\varepsilon \in C^1(\mathbb{R})$ and it satisfies that $ \varepsilon(\phi) = \varepsilon_{\rm w}$ if $\phi \le 0$, $ \varepsilon(\phi) = \varepsilon_{\rm p}$ if $\phi \ge 1$, and $\varepsilon(\phi) $ is monotonic in $(0, 1);$ (Two examples of such a function $\varepsilon$ are given in \cite{LiLiu_SIAP15}.) \item[(A4)] The function $B\in C^2(\mathbb{R})$ is strictly convex with $B(0) = \min_{s \in \mathbb{R}} B(s) = 0.$ Moreover, $B(\pm \infty) = \infty$ and $B'(\pm \infty) = \pm \infty.$ \begin{eqnarray}gin{comment} \jl{I don't think this is consistent with the form of $B$ assumed, why is the minimum achieved at $0$ (consider for example $M = 1$)? If we assume a general $B$ (but not the specific one defined on Page 3, we will also need to add that $B(0) = 0$, which is used later (bounded from below should be enough).} \sd{I agree that we need $M\ge 2$. Since $c_j^\infty>0$, direct calculations show $B(0)=0$ using the charge neutrality condition $\sum_{j=1}^\infty c_j^\infty q_j=0$, and $B'(0)=0$, $B''(\psi)>0$ for all $\psi$. I also agree that we only need $B$ to be bounded from below, but this is trivial since a shift in the potential does not change anything.} {\bf [[BL: I added that the minimum value of $B$ is $0$.]]} \end{comment} \end{compactenum} \subsection{Theorems on Free-Energy Convergence} \langlebel{ss:FreeEnergyConvergence} We denote \begin{eqnarray}gin{equation} \langlebel{A} {\mathcal A} = \left\{ u \in H^1(\Omegaega): u = \psi_\infty \mbox{ on } \partialrtial \Omegaega \right\}. \end{equation} For any $\phi \in L^4(\Omegaega),$ we define $E_\phi: {\mathcal A} \to \mathbb{R} \cup \{ \infty, -\infty \}$ by \begin{eqnarray}gin{equation} \langlebel{Ephiu} E_\phi [ u ] = \int_\Omegaega \left[ \frac{\varepsilon (\phi) }{2} |\mbox{\normalsize\boldmath$n$}abla u |^2 - \rho u + (\phi-1)^2 B( u ) \right] dx. \end{equation} Since $B(u) \ge 0$, $E_\phi [u] > -\infty$ for any $u\in {\mathcal A} .$ By Theorem~\ref{t:phiPB}, the functional $E_\phi: {\mathcal A} \to \mathbb{R} \cup \{ +\infty \}$ has a unique minimizer $\psi_\phi \in {\mathcal A} $ that is also the unique weak solution of the corresponding boundary-value problem of the Poisson--Boltzmann equation: \reff{PBE} and \reff{BC} if $\phi$ is the characteristic function of the solute region with boundary $\Gamma$; and \reff{PhaseFieldPBE} and \reff{PhaseFieldBC} if $\phi \in H^1(\Omegaega)$ is a general phase field. Moreover, in both cases, \[ F_{\rm ele}[\phi] = -E_\phi[\psi_\phi] = -\min_{u\in {\mathcal A} } E_\phi [u]. \] This is exactly the electrostatic free energy $F_{\rm ele}[\Gamma]$ defined in \reff{FeleGamma} in the sharp-interface setting or $F_{\rm ele}[\phi]$ in \reff{Felephi} in the phase-field setting. Let us fix $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0 \in (0, 1).$ We consider the phase-field functionals $ F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i: L^1(\Omegaega)\to \mathbb{R} \cup \{\pm\infty\} $ for all $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0]$ \cite{LiLiu_SIAP15,Sun_PFVISM_JCP15}: \begin{eqnarray}gin{equation} \langlebel{newFxiphi} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] = \left\{ \begin{eqnarray}gin{aligned} & P_0 \int_\Omegaega \phi^2 \, d x + \gammamma_0 \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i }{2} |\mbox{\normalsize\boldmath$n$}abla\phi|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i } W(\phi ) \right] dx + \rho_0 \int_\Omegaega ( \phi - 1 )^2 U \, dx + F_{\rm ele}[\phi] \\ & \qquad \qquad \, \mbox{if } \phi \in H^1(\Omegaega), \\ & +\infty \qquad \mbox{otherwise}. \end{aligned} \right. \end{equation} Note that $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ never takes the value $-\infty$, as $U$ is bounded below and $F_{\rm ele}[\phi]$ is finite for any $\phi \in H^1(\Omegaega).$ Let $D$ be a nonempty, bounded, and open subset of $\mathbb{R}^n$ for some $n \ge 2.$ We recall that a function $u\in L^1(D) $ has bounded variations in $D$, if \begin{eqnarray}gin{equation*} |\mbox{\normalsize\boldmath$n$}abla u|_{{BV}(\Omegaega)} := \sup\left\{\int_{D} u \, \mbox{div}\, g \, dx : g\in C_{\rm c}^1(D, \mathbb{R}^n), |g|\le 1 \ \mbox{in } D \right\} < \infty, \end{equation*} where $C_{\rm c}^1(D, \mathbb{R}^n)$ denotes the space of all $C^1$-mappings from $D$ to $\mathbb{R}^n$ that are compactly supported inside $D$; cf.\ \cite{Giusti84,Ziemer_Book89,EvansGariepy_Book92}. If $u \in W^{1, 1}(D)$ then $ |\mbox{\normalsize\boldmath$n$}abla u|_{BV(\Omegaega)} = \| \mbox{\normalsize\boldmath$n$}abla u \|_{L^1(D)}$. The space $BV(D)$ of all $L^1(D)$-functions that have bounded variations in $D$ is a Banach space with the norm \begin{eqnarray}gin{align*} \|u\|_{BV(D)}: = \|u\|_{L^1( D) } + |\mbox{\normalsize\boldmath$n$}abla u|_{{BV}(D)} \qquad \forall u \in BV(D). \end{align*} For any Lebesgue-measurable subset $A \subseteq \mathbb{R}^n$, the perimeter of $A$ in $D$ is defined by \cite{Giusti84,Ziemer_Book89,EvansGariepy_Book92} \[ P_{D}(A):= |\mbox{\normalsize\boldmath$n$}abla \chi_A|_{{BV}(D)}. \] We define the sharp-interface free-energy functional $F_0: L^1(\Omegaega) \to \mathbb{R} \cup \{ \infty, -\infty\}$ by \begin{eqnarray}gin{equation} \langlebel{def-F} F_0[\phi] = \left\{ \begin{eqnarray}gin{aligned} & \displaystylelaystyle{ P_0 |A|+\gammamma_0 P_\Omegaega(A) + \rho_0 \int_{\Omegaega\setminus A} U \, dx + F_{\rm ele}[\phi] } & & \quad \mbox{if } \phi=\chi_A\in BV(\Omegaega), & \\ & +\infty & & \quad \mbox{otherwise}. & \end{aligned} \right. \end{equation} If $\phi = \chi_A \in BV(\Omegaega),$ where $A\subset\Omegaega$ is an open subset with a smooth boundary $\Gamma$ and the closure $\overline{A} \subset \Omegaega$, then $F_0[\phi] = F[\Gamma]$ as defined in \reff{FGamma}. Note that the functional $F_0$ never takes the value $-\infty.$ We use the notation $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$ to indicate that $\{ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \}$ is a sequence of real numbers such that $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_1 > \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_2 > \cdots$ and $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \to 0$ as $k \to \infty.$ We always assume that $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_1 \in (0,\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0]$. The following theorem on free-energy convergence is proved in Section~\ref{s:FreeEnergyConvergence}: \begin{eqnarray}gin{theorem}[$\Gamma$-convergence of free-energy functionals] \langlebel{t:EnergyConvergence} For any sequence $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$, the sequence of functionals $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}: L^1(\Omegaega)\to \mathbb{R} \cup \{ +\infty \}$ $(k = 1, 2, \dots)$ $\Gamma$-converges to the functional $F_0: L^1(\Omegaega) \to \mathbb{R} \cup \{ + \infty \}$ with respect to the $L^1(\Omegaega)$-convergence. This means precisely that the following two properties hold true: \begin{eqnarray}gin{compactenum} \item[\rm (1)] {\rm The liminf condition.} If $\phi_k \to \phi$ in $L^1(\Omegaega)$ then \begin{eqnarray}gin{equation} \langlebel{liminf-ineq} \liminf_{k \to \infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] \ge F_0[\phi]; \end{equation} \item[\rm (2)] {\rm The recovering sequence.} For any $\phi \in L^1(\phi)$, there exist $\phi_k \in L^1(\Omegaega)$ $(k = 1, 2, \dots)$ such that $\phi_k \to \phi$ in $L^1(\Omegaega)$ and \begin{eqnarray}gin{equation} \langlebel{limsup-ineq} \limsup_{k \to \infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] \le F_0[\phi]. \end{equation} \end{compactenum} \end{theorem} We remark that this result does not follow immediately from the stability of $\Gamma$-convergence under continuous perturbations. In fact, the solute-solvent interaction term (i.e., the third term) and the electrostatics term (i.e., the fourth term) in the phase-field functional \reff{newFxiphi} are not simple continuous perturbations of the van der Waals--Cahn--Hilliard functionals. The convergence of those terms require more than the $L^1(\Omegaega)$-convergence of underlying phase-field functions. The following corollary of the above theorem provides the existence of minimizers of the corresponding sharp-interface free-energy functional: \begin{eqnarray}gin{corollary} \langlebel{c:existenceF0} There exists a measurable subset $G \subseteq \Omegaega$ with finite perimeter $P_\Omegaega(G)$ in $\Omegaega$ such that $F_0[\chi_G] = \min_{\phi \in L^1(\Omegaega)} F_0[\phi],$ which is finite. \end{corollary} The next result, also proved in Section~\ref{s:FreeEnergyConvergence}, is of interest by itself. It states that each component of the free energy converges to its sharp-interface analog, if the total free energy converges. \begin{eqnarray}gin{theorem} \langlebel{t:individual} Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0,$ $\phi_k\in H^1(\Omegaega)$ $(k = 1, 2, \dots),$ and $G\subseteq \Omegaega$ be measurable with $P_\Omegaega(G) < \infty.$ Assume that $\phi_k \to \chi_G $ a.e.\ in $ \Omegaega $ and $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] \to F_0 [\chi_G] $ with $F_0[\chi_G]$ finite. Then \begin{eqnarray}gin{align} \langlebel{volume} &\lim_{k\to \infty} \int_\Omegaega \phi_k^2 dx = |G|, \\ \langlebel{surface} &\lim_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx = P_\Omegaega(G), \\ \langlebel{LJ} &\lim_{k\to \infty} \int_\Omegaega (\phi_k-1)^2 U \, dx = \int_{\Omegaega\setminus G} U\, dx, \\ \langlebel{Ele} &\lim_{k\to \infty} F_{\rm ele}[\phi_k] = F_{\rm ele}[\chi_G]. \end{align} All the limits are finite. \end{theorem} \subsection{Definition of Force and Theorems on Force Convergence} \langlebel{ss:Force} \subsubsection{Force in the Phase-Field Model} Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0]$. We define the individual forces as vector-valued functions on $\Omegaega$ as follows: \begin{eqnarray}gin{align*} & f_{\rm vol} (\phi) = 2 P_0 \phi \mbox{\normalsize\boldmath$n$}abla \phi & & \mbox{if } \phi \in H^1(\Omegaega), \\ & f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}} (\phi) = \gammamma_0\left[ -\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \Deltalta \phi + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i } W'(\phi) \right] \mbox{\normalsize\boldmath$n$}abla \phi & & \mbox{if } \phi \in H^2(\Omegaega), \\ & f_{\rm vdW}(\phi) = 2 \rho_0 (\phi - 1) U \mbox{\normalsize\boldmath$n$}abla \phi & & \mbox{if } \phi \in H^1(\Omegaega), \\ & f_{\rm ele}(\phi) = \left[ - \frac{\varepsilon'(\phi)}{2}|\mbox{\normalsize\boldmath$n$}abla \psi_\phi|^2 - 2 (\phi - 1)B(\psi_\phi) \right] \mbox{\normalsize\boldmath$n$}abla \phi & & \mbox{if } \phi \in H^1(\Omegaega), \end{align*} where $\psi_\phi \in {\mathcal A} $ is electrostatic potential corresponding to $\phi$, i.e., the solution to the boundary-value problem of Poisson--Boltzmann equation \reff{PhaseFieldPBE} and \reff{PhaseFieldBC}; cf.~Theorem~\ref{t:phiPB}. If $\phi \in H^2(\Omegaega)$, we define the total force \begin{eqnarray}gin{equation} \langlebel{f_xi_total} f_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i(\phi) = f_{\rm vol} (\phi) + f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}} (\phi) + f_{\rm vdW}(\phi) + f_{\rm ele}(\phi). \end{equation} Note that these forces are given as $-\mbox{\normalsize\boldmath$n$}abla \phi$ multiplied by the negative first variations of the volume, surface, van der Waals solute-solvent interaction, electrostatics, and the total free energy, respectively; cf.~\reff{deltaphi}. Note also that a phase field $\phi$ of lower free energy is close to the characteristic function of solute region. The direction $-\mbox{\normalsize\boldmath$n$}abla \phi$ then points from the solute to solvent region, same as the direction $\mbox{\normalsize\boldmath$n$}u$ in the sharp-interface force \reff{BoundaryForce}. The forces can be also defined by the method of domain variations. Given $V\in C_c^1(\Omegaega, \mathbb{R}^n)$, we define $x = x(t,X)$ with $t \in (-t_0, t_0)$ for some $t_0 > 0$ small and $X \in \Omegaega$ by $\dot{x} = V(x)$ and $ x(0,X) = X. $ This defines a family of transformations $T_t: \Omegaega \to \Omegaega$ with $T_t(X) = x (t,X). $ For a smooth phase field $\phi$, these transformations define the perturbations $\phi\circ T_t$ of $\phi.$ For the phase-field functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i $, one then defines naturally the force to be $-(d/dt)|_{t=0} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi \circ T_t]$, the negative variation of the phase-field free-energy functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ at $\phi$ with respect to these perturbations. Note that \[ T_t(X) = X + t V(X) + o(t) \quad \mbox{as } t \to 0. \] Hence, \[ (\phi \circ T_t)(X) = \phi (X) + t \mbox{\normalsize\boldmath$n$}abla \phi (X) \cdot V(X) + o(t) \qquad\mbox{as } t \to 0. \] Therefore, \[ - \frac{d}{dt}\biggr|_{t=0} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi \circ T_t] = - \frac{d}{dt}\biggr|_{t=0} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi + t \mbox{\normalsize\boldmath$n$}abla \phi \cdot V + o(t)] = - \deltalta_\phi F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] \mbox{\normalsize\boldmath$n$}abla \phi \cdot V. \] By \reff{f_xi_total}, this differs from $- f_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i (\phi) \cdot V$ only by a sign. This sign difference results from our choice of force direction as discussed above. \begin{eqnarray}gin{comment} \begin{eqnarray}gin{lemma} \langlebel{l:DomainVariations} Let $V\in C_c^1(\Omegaega, \mathbb{R}^n).$ Let $T_t: \Omegaega \to \Omegaega$ with $|t|\ll 1$ be defined by $T_t(X) = x(t,X)$ and \reff{xdotVx}. We have \begin{eqnarray}gin{align*} & \frac{d}{dt}\biggr|_{t=0} P_0 \int_\Omegaega ( \phi \circ T_t )^2 dx, = \int_\Omegaega f_{\rm vol} (\phi) \cdot V\, dx && \mbox{if } \phi \in H^1(\Omegaega), \\ &\frac{d}{dt}\biggr|_{t=0} \gammamma_0\int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} | \mbox{\normalsize\boldmath$n$}abla (\phi \circ T_t)|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W( \phi \circ T_t ) \right] dx = \int_\Omegaega f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}} (\phi) \cdot V\, dx && \mbox{if } \phi \in H^2(\Omegaega), \\ &\frac{d}{dt}\biggr|_{t=0} \rho_0 \int_\Omegaega (\phi\circ T_t -1)^2 U \, dx = \int_\Omegaega f_{\rm vdW} (\phi) \cdot V\, dx && \mbox{if } \phi \in H^1(\Omegaega), \\ & \frac{d}{dt}\biggr|_{t=0} F_{ele}[\phi \circ T_t] = \int_\Omegaega f_{\rm ele} (\phi) \cdot V\, dx && \mbox{if } \phi \in H^1(\Omegaega). \end{align*} \end{lemma} \begin{eqnarray}gin{proof} {\bf [[BL: Should we move the proof later to Section 5?]]} \sd{We can just give a reference and omit a proof here since I don't see technical difficulties.} {\bf [[BL: I will finish this part later. The electrostatics part is not straightforward.]]} \end{proof} \end{comment} We now define the corresponding individual stress tensors (with respect to the underlying coordinate system) by \begin{eqnarray}gin{align} \langlebel{stressphivol} & T_{\rm vol}(\phi) = P_0 \phi^2 I && \mbox{if }\phi \in L^4(\Omegaega), \\ \langlebel{stressphisurf} &T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur} } (\phi) = \gammamma_0\left\{ \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} |\mbox{\normalsize\boldmath$n$}abla \phi|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W(\phi)\right] I- \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \mbox{\normalsize\boldmath$n$}abla \phi \otimes \mbox{\normalsize\boldmath$n$}abla \phi \right\} && \mbox{if }\phi \in H^1(\Omegaega),\\ \langlebel{stressphivdW} &T_{\rm vdW}(\phi) = \rho_0 (\phi-1)^2 U I && \mbox{if }\phi \in L^4(\Omegaega), \\ \langlebel{stressphielec} &T_{\rm ele}(\phi) = \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi \otimes \mbox{\normalsize\boldmath$n$}abla \psi_\phi - \left[ \frac{\varepsilon(\phi)}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 + (\phi - 1)^2 B(\psi_\phi) \right] I && \mbox{if }\phi \in L^4(\Omegaega). \end{align} Note that we assume $\phi \in L^4(\Omegaega)$, as our double-well potential $W = W(\phi)$ defined in \reff{W} is a polynomial of degree $4$. Moreover, that $\phi\in L^4(\Omegaega)$ is necessary for the term $(\phi-1)^2$ in the functional $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}[\phi] $ defined in \reff{Fxiphi} and $F_{\rm ele}[\phi] $ defined in \reff{Felephi} to be in $L^2(\Omegaega)$. Note also that we have the Sobolev embedding $H^1(\Omegaega) \hookrightarrow L^4(\Omegaega).$ We recall that the divergence of a tensor field $T = (T_{ij})$, denoted $\mbox{\normalsize\boldmath$n$}abla \cdot T$ or $\mbox{div}\, T$, is the vector field with components $\partialrtial_j T_{ij}$ $(i = 1, 2, 3)$, if exist. For a differentiable vector field $V: \Omegaega \to \mathbb{R}^3$ that has components $V_i $ $(i = 1, 2, 3)$, the gradient $\mbox{\normalsize\boldmath$n$}abla V$ is the matrix-valued function with the $(i,j)$-entry $\partialrtial_j V_i.$ For any $3 \times 3$ matrices $A$ and $B,$ we define $A : B = \sum_{i,j=1}^3 A_{ij} B_{ij}.$ We also define $|A|$ by $|A|^2 = {\sum_{i,j=1}^3 |A_{ij}|^2}.$ It is straightforward to generalize these definition and notation to $\mathbb{R}^n$ for any $n \ge 2.$ The following lemma indicates that the phase-field forces defined above arise from the corresponding stress tensors. Moreover, lower regularities of phase field $\phi$ are needed to define the stress tensors: \begin{eqnarray}gin{lemma} \langlebel{l:Stress} We have for almost all points in $\Omegaega$ that \begin{eqnarray}gin{align} \langlebel{Tfvol} &f_{\rm vol}(\phi) = \mbox{\normalsize\boldmath$n$}abla\cdot T_{\rm vol}(\phi) & & \mbox{if } \phi \in H^1(\Omegaega), & \\ \langlebel{Tfsur} &f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}}(\phi) =\mbox{\normalsize\boldmath$n$}abla \cdot T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}}(\phi) && \mbox{if } \phi \in H^2(\Omegaega), & \\ \langlebel{TfvdW} &f_{\rm vdW}(\phi) = \mbox{\normalsize\boldmath$n$}abla \cdot T_{\rm vdW }(\phi) - \rho_0 (\phi-1)^2 \mbox{\normalsize\boldmath$n$}abla U && \mbox{if } \phi \in H^1(\Omegaega), & \\ \langlebel{Tfele} &f_{\rm ele}(\phi)=\mbox{\normalsize\boldmath$n$}abla \cdot T_{\rm ele}(\phi) +\rho \mbox{\normalsize\boldmath$n$}abla \psi_\phi && \mbox{if } \phi \in W^{1,\infty}(\Omegaega). & \end{align} Moreover, we have for any $V \in C_c^1(\Omegaega,\mathbb{R}^3)$ that \begin{eqnarray}gin{align} \langlebel{weak-f_vol} & \int_\Omegaega f_{\rm vol}(\phi) \cdot V \, dx = - \int_\Omegaega T_{\rm vol} (\phi): \mbox{\normalsize\boldmath$n$}abla V\, dx \qquad \mbox{if } \phi \in H^1(\Omegaega), \\ \langlebel{weak-f_sur} & \int_\Omegaega f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}} (\phi) \cdot V \, dx = - \int_\Omegaega T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}}(\phi) : \mbox{\normalsize\boldmath$n$}abla V\, dx \qquad \mbox{if } \phi \in H^2(\Omegaega), \\ \langlebel{weak-f_vdW} & \int_\Omegaega f_{\rm vdW } (\phi) \cdot V \, dx = - \int_\Omegaega \left[ T_{\rm vdW} (\phi): \mbox{\normalsize\boldmath$n$}abla V + \rho_0 (\phi-1)^2 \mbox{\normalsize\boldmath$n$}abla U \cdot V\right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad \qquad \qquad \qquad \qquad \mbox{if } \{ x_1, \dots, x_N \} \cap \mbox{\rm supp}\, (V) = \emptyset \quad \mbox{and} \quad \phi \in H^1(\Omegaega), \\ \langlebel{weak-f_ele} & \int_\Omegaega f_{\rm ele}(\phi) \cdot V \, dx = - \int_\Omegaega \left[ T_{\rm ele}(\phi) :\mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot V\right] \, dx \qquad \mbox{if } \phi \in W^{1, \infty} (\Omegaega). \end{align} \end{lemma} \begin{eqnarray}gin{proof} The identities \reff{Tfvol} and \reff{TfvdW} follow from direct calculations. All the identities \reff{weak-f_vol}--\reff{weak-f_ele} follow from \reff{Tfvol}--\reff{Tfele} and integration by parts. Therefore, it remains only prove \reff{Tfsur} and \reff{Tfele}. Let $\phi\in H^2(\Omegaega)$ and $i \in \{ 1, 2, 3\}.$ We have by the definition of $T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}}(\phi)$ and using the summation convention that \begin{eqnarray}gin{align*} \partialrtial_j T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}, ij}(\phi) &= \gammamma_0 \partialrtial_j \left\{ \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} \partialrtial_k \phi \partialrtial_k \phi + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i } W(\phi)\right] \deltalta_{ij} - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \partialrtial_i \phi \partialrtial_j \phi \right\} \\ & =\gammamma_0\left\{ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \partialrtial_{ik} \phi \partialrtial_k \phi + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W'(\phi) \partialrtial_i \phi -\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \partialrtial_{ij}\phi \partialrtial_j \phi - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \partialrtial_i \phi \Deltalta \phi \right\}\\ & =\gammamma_0 \left[ - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \Deltalta \phi + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W'(\phi) \right] \partialrtial_i \phi, \end{align*} where $\deltalta_{ij} = 1 $ if $i = j$ and $0$ otherwise. This is the $i$th component of the force vector $f_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i, {\rm sur}}$; \reff{Tfsur} is thus proved. Now let $\phi\in W^{1,\infty}(\Omegaega).$ By Theorem~\ref{t:phiPB}, $\psi_\phi$ is bounded on $\chi_{\{ \phi \mbox{\normalsize\boldmath$n$}e 1\}}$. Since $\phi \in W^{1,\infty}(\Omegaega),$ we have \[ \varepsilon(\phi) \Deltalta \psi_\phi = -\rho - \varepsilon'(\phi) \mbox{\normalsize\boldmath$n$}abla \phi \cdot \mbox{\normalsize\boldmath$n$}abla \psi_\phi + (\phi-1)^2 B'(\psi_\phi) \in L^2(\Omegaega). \] Hence $\psi_\phi \in H^2(\Omegaega).$ By direct calculations using the fact that $\psi_\phi$ solves the Poisson--Boltzmann equation, we obtain \begin{eqnarray}gin{align*} \partialrtial_j T_{{\rm ele}, ij} & = \partialrtial_j ( \varepsilon(\phi) \partialrtial_i \psi_\phi \partialrtial_j \psi_\phi ) - \deltalta_{ij} \partialrtial_j \left[ \frac12 \varepsilon(\phi) \partialrtial_k \psi_\phi \partialrtial_k \psi_\phi + (\phi-1)^2 B(\psi_\phi) \right] \\ & = \varepsilon'(\phi) \partialrtial_j \phi \partialrtial_i \psi_\phi \partialrtial_j \psi_\phi + \varepsilon(\phi) \partialrtial_{ij} \psi_\phi \partialrtial_j \psi_\phi +\varepsilon(\phi) \partialrtial_i \psi_\phi \Deltalta \psi_\phi \\ &\qquad - \frac12 \varepsilon'(\phi) \partialrtial_i \phi |\partialrtial \psi_\phi|^2 - \varepsilon(\phi) \partialrtial_{ik} \psi_\phi \partialrtial_k \psi_\phi - 2 (\phi-1) \partialrtial_i \phi B(\psi_\phi) - (\phi-1)^2 B'(\psi_\phi) \partialrtial_i \psi_\phi \\ & = \left[ \mbox{\normalsize\boldmath$n$}abla \cdot \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi - (\phi-1)^2 B'(\psi_\phi) \right] \partialrtial_i \psi_\phi - \left[ \frac{\varepsilon'(\phi)}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 + 2 (\phi - 1 ) B(\psi_\phi) \right] \partialrtial_i \phi \\ &=- \rho \partialrtial_i \psi_\phi - \left[ \frac{\varepsilon'(\phi)}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 + 2 (\phi - 1 ) B(\psi_\phi) \right] \partialrtial_i \phi, \qquad i = 1, 2, 3, \end{align*} proving \reff{Tfele}. \end{proof} \subsubsection{Force in the Sharp-Interface Model} Let $G$ be an open subset of $\Omegaega$ such that the closure $\overline{G} \subset \Omegaega$, the boundary $\partialrtial G$ is $C^2,$ and $x_i \in G$ $(i = 1, \dots, N).$ Denote by $\mbox{\normalsize\boldmath$n$}u$ the unit vector on $\partialrtial G$ that points from $G$ to $G^c = \Omegaega \setminus G.$ Following \reff{FGamma} (with $\Gamma = \partialrtial G$) or \reff{def-F} (with $A = G$), and \reff{BoundaryForce} (with $\Gamma = \partialrtial G$), we define the individual volume, surface, van der Waals, and electrostatic forces on the boundary $\partialrtial G$ as vector-valued functions on $\partialrtial G$ as follows: \begin{eqnarray}gin{align} & f_{0,{\rm vol}}[\partialrtial G] = -P_0 \mbox{\normalsize\boldmath$n$}u, \langlebel{f0_vol}\\ & f_{0, {\rm sur} } [\partialrtial G]= - 2\gammamma_0 H\mbox{\normalsize\boldmath$n$}u, \langlebel{f0_sur} \\ & f_{0,{\rm vdW}}[\partialrtial G] =\rho_0U\mbox{\normalsize\boldmath$n$}u, \langlebel{f0_vdW} \\ & f_{0, {\rm ele}}[\partialrtial G] = \left[ - \frac{1} {2 } \left( \frac{1}{\varepsilon_{\rm p}} - \frac{1}{\varepsilon_{\rm w}} \right) \left|\varepsilon (\chi_G) \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot \mbox{\normalsize\boldmath$n$}u \right|^2 \right. \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad \quad \left. - \frac12 (\varepsilon_{\rm w} - \varepsilon_{\rm p}) \left| (I - \mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \right|^2 - B(\psi_{\chi_G}) \right] \mbox{\normalsize\boldmath$n$}u. \langlebel{f0_ele} \end{align} We also define the total boundary force to be \begin{eqnarray}gin{align*} f_0[\partialrtial G]&= f_{0,{\rm vol}}[\partialrtial G] + f_{0,{\rm sur}} [\partialrtial G]+f_{0,{\rm vdW}}[\partialrtial G] +f_{0, {\rm ele}}[\partialrtial G]. \end{align*} In \qref{f0_sur}, $H$ is the mean curvature of $\partialrtial G$, defined as the average of the principal curvatures, and is positive if $G$ is convex. In \reff{f0_ele}, $\psi_{\chi_G}\in {\mathcal A} $ is the electrostatic potential corresponding to $\chi_G$; cf.~Theorem~\ref{t:phiPB}. It satisfies $\psi_{\chi_G} |_G \in H^2(G)$ and $\psi_{\chi_G} |_{G^c} \in H^2(G^c)$. Moreover (cf.~\cite{Li_SIMA09,LiChengZhang_SIAP11}), \begin{eqnarray}gin{align} \langlebel{psiGp} & - \varepsilon_{\rm p} \Deltalta \psi_{\chi_G} = \rho & & \mbox{in } G, & \\ \langlebel{psiGw} & - \varepsilon_{\rm w} \Deltalta \psi_{\chi_G} + B'(\psi) = \rho && \mbox{in } G^c, & \\ \langlebel{psiGcont} & \psi_{\chi_G} |_{G} = \psi_{\chi_G} |_{G^c} & & \mbox{on } \partialrtial G, & \\ \langlebel{psiGgrad} & \varepsilon_{\rm p} \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} |_{G}\cdot \mbox{\normalsize\boldmath$n$}u = \varepsilon_{\rm w} \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} |_{G^c} \cdot \mbox{\normalsize\boldmath$n$}u && \mbox{on } \partialrtial G. & \end{align} The quantity $\varepsilon(\chi_G) \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot \mbox{\normalsize\boldmath$n$}u $ in \reff{f0_ele} is the common value of both sides of \reff{psiGgrad}. By \reff{psiGcont}, the tangential gradient $(I-\mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u)\mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} $ in \reff{f0_ele} is the same when $\psi_{\chi_G}$ is restricted onto either side of the boundary $\partialrtial G.$ We recall that the stress tensors $T_{\rm vol}(\chi_G)$, $T_{\rm vdW}(\chi_G)$, and $T_{\rm ele}(\chi_G)$ are defined in \reff{stressphivol}, \reff{stressphivdW}, and \reff{stressphielec}, respectively, with $\phi $ replaced by $\chi_G.$ The following lemma indicates that the forces defined above in \reff{f0_vol}--\reff{f0_ele} also arise from stress tensors in the sharp-interface model and that only lower regularity of the subset $G$ is needed to define the stresses: \begin{eqnarray}gin{lemma} \langlebel{l:sharpboundaryforce} Let $G$ be an open subset of $\Omegaega$ such that the closure $\overline{G} \subset \Omegaega$ and the boundary $\partialrtial G$ is $C^2.$ Let $\mbox{\normalsize\boldmath$n$}u$ denote the unit vector $\mbox{\normalsize\boldmath$n$}u$ on $\partialrtial G$ that points from $G$ to $G^c.$ We have for any $V\in C_c^1(\Omegaega,\mathbb{R}^n)$ that \begin{eqnarray}gin{align} & \int_{\partialrtial G} f_{0, {\rm vol}}[\partialrtial G] \cdot V \, dS = -\int_\Omegaega T_{\rm vol}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V \,dx, \langlebel{weak-f0_vol}\\ & \int_{\partialrtial G} f_{0, {\rm sur}}[\partialrtial G] \cdot V \, dS = -\gammamma_0\int_{\partialrtial G} (I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d S, \langlebel{weak-f0_sur} \\ & \int_{\partialrtial G} f_{0, {\rm vdW}}[\partialrtial G] \cdot V \, dS = -\int_{\Omegaega} \left[ T_{\rm vdW}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V +\rho_0(1-\chi_G)^2\mbox{\normalsize\boldmath$n$}abla U\cdot V\right] \,dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad \qquad \qquad \qquad \qquad \qquad \qquad \mbox{if } \{ x_1, \dots, x_N \} \cap \mbox{\rm supp}\, (V) = \emptyset, \langlebel{weak-f0_vdW}\\ & \int_{\partialrtial G} f_{0, {\rm ele}}[\partialrtial G] \cdot V \, dS =- \int_\Omegaega \left[ T_{\rm ele}(\chi_G) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot V \right] \, dx. \langlebel{weak-f0_ele} \end{align} \end{lemma} \begin{eqnarray}gin{proof} Eq.~\reff{weak-f0_vol} follows from the identity $I:\mbox{\normalsize\boldmath$n$}abla V = \mbox{\normalsize\boldmath$n$}abla \cdot V$ and an application of the divergence theorem. Eq.~\reff{weak-f0_sur} follows from our definition of force $f_{0, {\rm sur}}$ and the known result (cf.\ Lemma~10.8 in \cite{Giusti84}): \[ \int_{\partialrtial G} 2 H \mbox{\normalsize\boldmath$n$}u \cdot V \, dS = \int_{\partialrtial G} (I - \mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u) : \mbox{\normalsize\boldmath$n$}abla V \, dS. \] Assume each $x_i \mbox{\normalsize\boldmath$n$}ot\in \mbox{supp}\,(V) $ $(1 \le i \le N).$ Noticing that $\mbox{\normalsize\boldmath$n$}u$ points from $G$ to $G^c=\Omegaega\setminus G$, we have by the definition of $T_{\rm vdW}(\chi_G)$ (cf.\ \reff{stressphivdW}) and the divergence theorem that \begin{eqnarray}gin{align*} &\int_{\Omegaega} \left[ T_{\rm vdW}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V +\rho_0(1-\chi_G)^2\mbox{\normalsize\boldmath$n$}abla U\cdot V\right] \,dx \\ &\qquad = \rho_0 \int_{G^c} ( U \mbox{\normalsize\boldmath$n$}abla \cdot V + \mbox{\normalsize\boldmath$n$}abla U \cdot V) \, dx \\ &\qquad = \rho_0 \int_{G^c} \mbox{\normalsize\boldmath$n$}abla ( U V) \, dx \\ &\qquad = -\rho_0 \int_{\partialrtial G} U \mbox{\normalsize\boldmath$n$}u \cdot V\, dS, \end{align*} leading to \reff{weak-f0_vdW}. Finally, Eq.~\reff{weak-f0_ele} is part of Theorem~\ref{th:f_ele-conv} that is proved in Section~\ref{s:PB}. \end{proof} \begin{eqnarray}gin{comment} These boundary forces are functionals in the dual space of $C_c^1(\Omegaega,\mathbb{R}^n)$ in the sense that for any $V\in C_c^1(\Omegaega,\mathbb{R}^n)$ \begin{eqnarray}gin{align*} &\langlengle f_{0,\rm vol}[\partialrtial G], V\ranglengle:=\int_{\partialrtial G} f_{0,\rm vol}[\partialrtial G]\cdot V\,d{\cal H}^{n-1}, && \langlengle f_{0,\rm sur}[\partialrtial G], V\ranglengle:=\int_{\partialrtial G} f_{0,\rm sur}[\partialrtial G]\cdot V\,d{\cal H}^{n-1}, \\ &\langlengle f_{0,\rm vdW}[\partialrtial G], V\ranglengle:=\int_{\partialrtial G} f_{0,\rm vdW}[\partialrtial G]\cdot V\,d{\cal H}^{n-1}, && \langlengle f_{0,\rm ele}[\partialrtial G], V\ranglengle:=\int_{\partialrtial G} f_{0,\rm ele}[\partialrtial G]\cdot V\,d{\cal H}^{n-1}. \end{align*} Now consider the case when $G\subset\Omegaega$ is a set with finite perimeter and $\overline{G}\subset\Omegaega$. Let $\partialrtial^*G $ be the reduced boundary of $G$ and $\| \partialrtial G\|={\cal H}^{n-1}\mres(\partialrtial^*G\cap\Omegaega)$ be the perimeter measure of $G$ in $\Omegaega$. Let $\mbox{\normalsize\boldmath$n$}u: \Omegaega \to \mathbb{R}^n$ be the unit outer normal of $\partialrtial^*G$, which satisfies $|\mbox{\normalsize\boldmath$n$}u| = 1$ $\|\partialrtial G\|$-a.e., and \cite{Giusti84,EvansGariepy_Book92} \[ \int_G \mbox{\normalsize\boldmath$n$}abla \cdot g \, dx = \int_{\partialrtial^*G} g \cdot \mbox{\normalsize\boldmath$n$}u \, d{\mathcal H}^{n-1} \qquad \forall g \in C_c^1(\Omegaega, \mathbb{R}^n). \] \begin{eqnarray}gin{lemma} We can extend the definitions of the boundary forces $f_{0,\rm vol}[\partialrtial G]$, $f_{0,\rm sur}[\partialrtial G]$, $f_{0,\rm vdW}[\partialrtial G]$ and $f_{0,\rm ele}[\partialrtial G]$ to include any set $G$ with finite perimeter, by defining them as functionals in the dual space of $C_c^1(\Omegaega,\mathbb{R}^n)$, in the sense that for any $V\in C_c^1(\Omegaega,\mathbb{R}^n)$, \begin{eqnarray}gin{align} &\langlengle f_{0,{\rm vol}}[\partialrtial G], V\ranglengle := -\int_\Omegaega T_{\rm vol}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V \,dx, \langlebel{weak-f0_vol}\\ &\langlengle f_{0,\rm sur}[\partialrtial G], V\ranglengle := -\gammamma_0\int_{\partialrtial^* G} (I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d{\cal H}^{n-1}, \langlebel{weak-f0_sur} \\ &\langlengle f_{0,\rm vdW}[\partialrtial G], V\ranglengle:= -\int_{\Omegaega} \Bigl(T_{\rm vdW}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V +\rho_0(1-\chi_G)^2\mbox{\normalsize\boldmath$n$}abla U\cdot V\Bigr)\,dx, \langlebel{weak-f0_vdW}\\ &\langlengle f_{0,\rm ele}[\partialrtial G], V\ranglengle:=- \int_\Omegaega \Bigl( T_{\rm ele}(\chi_G) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot V \Bigr) \, dx. \langlebel{weak-f0_ele} \end{align} \end{lemma} \begin{eqnarray}gin{proof} To prove this lemma, for \qref{weak-f0_vol}, \qref{weak-f0_vdW}, \qref{weak-f0_ele}, we only need to realize that they hold if $\partialrtial G$ is $C^2$ by integration by parts, and that the integrals on their right hand sides are well defined for any $G$ with finite perimeter since $\chi_G\in L^4(\Omegaega)$. Eq.\ \qref{weak-f0_sur} deserves further explanation. If $\partialrtial G$ is $C^2$, then for all $V\in C_c^1(\Omegaega,\mathbb{R}^n)$, \begin{eqnarray}gin{align*} \langlengle f_{0,\rm sur}[\partialrtial G], V \ranglengle &=-\gammamma_0(n-1)\int_{\partialrtial G} H\mbox{\normalsize\boldmath$n$}u\cdot V\,d{\cal H}^{n-1} \mbox{\normalsize\boldmath$n$}n\\ &=-\gammamma_0\int_{\partialrtial^* G}(I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d{\cal H}^{n-1} . \end{align*} The second equality comes from Lemma 10.8 of \cite{Giusti84}. The last integral is well defined for any set $G$ with finite perimeter, and is the variation of the perimeter $P_\Omegaega(G)$ under a smooth perturbation of $\partialrtial G$. To see this, for any velocity field $V\in C_c^1(\Omegaega, \mathbb{R}^n)$, define $x = x(t,X)$ by \[ \dot{x} = V(x) \quad \mbox{and} \quad x(0,X) = X \] for any $X\in \Omegaega$ and the transformation $T_t(X) = x(t,X)$ for $|t|\ll 1.$ The variation of the perimeter $P_\Omegaega (G)$ is given \cite{Giusti84}: \[ \frac{d}{dt}\biggr|_{t=0} P_{\Omegaega} (T_t(G)) = \int_{\partialrtial^* G} \left( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u \cdot \mbox{\normalsize\boldmath$n$}u \right)\, d{\mathcal H}^{n-1} =\int_{\partialrtial^* G}(I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d{\cal H}^{n-1}. \] Hence for any $G$ with finite perimeter, $f_{0,\rm sur}[\partialrtial G]$ defined through \qref{weak-f0_sur} is the negative variation of the surface energy $\gammamma_0 P_{\Omegaega}(G)$ with respect to perturbations of the boundary $\partialrtial G$. \end{proof} \end{comment} \subsubsection{Force Convergence} Let $D$ be a nonempty, open, and bounded subset of $\mathbb{R}^n$ with $n \ge 2.$ For any measurable subset $G$ of $D $ with $\overline{G} \subset D $ and $P_D (G) < \infty,$ we denote by $\partialrtial^*G $ the reduced boundary of $G$ and by $\| \partialrtial G\|={\cal H}^{n-1}\mres(\partialrtial^*G\cap D )$ the perimeter measure of $G$ in $D$, where ${\cal H}^{n-1}$ denotes the $(n-1)$-dimensional Hausdorff measure \cite{Giusti84,EvansGariepy_Book92,Ziemer_Book89}. We also denote by $\mbox{\normalsize\boldmath$n$}u: D \to \mathbb{R}^n$ the unit outer normal of $\partialrtial^*G$. We recall that $|\mbox{\normalsize\boldmath$n$}u| = 1$ $\|\partialrtial G\|$-a.e.\ and \begin{eqnarray}gin{equation} \langlebel{perimetermeasure} \int_G \mbox{\normalsize\boldmath$n$}abla \cdot g \, dx = \int_{\partialrtial^*G} g \cdot \mbox{\normalsize\boldmath$n$}u \, d{\mathcal H}^{n-1} \qquad \forall g \in C_c^1(\Omegaega, \mathbb{R}^n). \end{equation} The following result states that the convergence of total force is equivalent to that of individual forces; its proof is given in Section~\ref{s:ForceConvergenceSolvation}: \begin{eqnarray}gin{theorem}[Force convergence for the solvation free-energy functional] \langlebel{t:ForceConvSolvation} Let $G$ be a measurable subset of $\Omegaega$ such that $\overline{G} \subset\Omegaega$, $P_\Omegaega(G) < \infty,$ and $F_0[\chi_G]$ is finite. Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\searrow 0$ and $\phi_k \in H^1(\Omegaega) $ $(k = 1, 2, \dots)$ be such that $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and $ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} [\phi_k] \to F_0[\chi_G]. $ Then we have for any $V \in C_c^1(\Omegaega, \mathbb{R}^3)$ that \begin{eqnarray}gin{align} & \lim_{k \to \infty} \int_\Omegaega T_{\rm vol} (\phi_k) : \mbox{\normalsize\boldmath$n$}abla V \, dx = \int_{\Omegaega} T_{\rm vol}(\chi_G):\mbox{\normalsize\boldmath$n$}abla V \, dx, \langlebel{f_vol-conv}\\ & \lim_{k \to \infty} \int_\Omegaega T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k, {\rm sur}} (\phi_k) : \mbox{\normalsize\boldmath$n$}abla V \, dx = \gammamma_0\int_{\partialrtial^* G} \left( I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u \right):\mbox{\normalsize\boldmath$n$}abla V\, d{\mathcal H}^{2}, \langlebel{CH-force-conv} \\ &\lim_{k \to \infty} \int_\Omegaega \left[ ( T_{\rm vdW} (\phi_k) : \mbox{\normalsize\boldmath$n$}abla V + \rho_0 ( \phi_k - 1)^2 \mbox{\normalsize\boldmath$n$}abla U \cdot V \right] dx \mbox{\normalsize\boldmath$n$}n\\ &\quad= \int_\Omegaega \left[ T_{\rm vdW} (\chi_G) : \mbox{\normalsize\boldmath$n$}abla V + \rho_0 ( \chi_G - 1)^2 \mbox{\normalsize\boldmath$n$}abla U \cdot V \right] dx \quad \mbox{if } \{ x_1, \dots, x_N \} \cap \mbox{\rm supp}\, (V) = \emptyset, \langlebel{f_vdW-conv}\\ & \lim_{k\to \infty} \int_\Omegaega \left[ T_{\rm ele}(\phi_k) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\phi_k} \cdot V \right] \, dx = \int_\Omegaega \left[ T_{\rm ele}(\chi_G) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot V \right] \, dx. \langlebel{f_ele-conv} \end{align} \end{theorem} The force convergence for the van der Waals--Cahn--Hilliard functional is the main part of the above theorem. Since this functional is rather a general model, we state separately the result of its force convergence for a general $n$-dimensional space. For simplicity of notation, we define the stress tensor $T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}(\phi)$ to be the same as $T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i,{\rm sur}}(\phi)$ defined in \reff{stressphisurf}, except we take $\gammamma_0 = 1$, i.e., we define for a function $\phi$ of $n$-variables \begin{eqnarray}gin{equation*} T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} (\phi) = \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} |\mbox{\normalsize\boldmath$n$}abla \phi|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W(\phi)\right] I - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \mbox{\normalsize\boldmath$n$}abla \phi \otimes \mbox{\normalsize\boldmath$n$}abla \phi, \end{equation*} where $I$ is the $n\times n$ identity matrix. \begin{eqnarray}gin{theorem}[Force convergence for the van der Walls--Cahn--Hilliard functional] \langlebel{th:CH-force-conv} Let $\Omegaega $ be a nonempty, bounded, and open subset of $\mathbb{R}^n.$ Let $G$ be a nonempty, measurable subset of $\Omegaega$ such that $\overline{G} \subset \Omegaega$ and $P_\Omegaega (G) < \infty$. Assume $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$ and $\phi_k \in H^1(\Omegaega)$ $(k = 1, 2, \dots)$ satisfy that $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and that \begin{eqnarray}gin{equation} \langlebel{important} \lim_{k \to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx = P_\Omegaega(G). \end{equation} Then we have for any $\Psi \in C_c(\Omegaega, \mathbb{R}^{n\times n})$ that \begin{eqnarray}gin{align} \langlebel{CH-force-conv1} \lim_{k \to \infty} \int_\Omegaega T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} (\phi_k) : \Psi \, dx = \int_{\partialrtial^* G} \left(I- \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u\right):\Psi\, d{\mathcal H}^{n-1}. \end{align} If, in addition, $\phi_k\in W^{2,2}(\Omegaega)$ $(k = 1, 2, \dots)$, $G$ is open, and $\partialrtial G$ is of $C^2$, then we have for any $V\in C_c^1(\Omegaega,\mathbb{R}^n)$ that \begin{eqnarray}gin{align}\langlebel{CH-force-conv2} \lim_{k\to\infty}\int_\Omegaega\left[ -\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\Deltalta\phi_k + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W'(\phi_k)\right] \mbox{\normalsize\boldmath$n$}abla\phi_k \cdot V\;dx = - (n-1)\int_{\partialrtial G} H\mbox{\normalsize\boldmath$n$}u\cdot V \, dS. \end{align} \end{theorem} \begin{eqnarray}gin{comment} Eq.\ \qref{CH-force-conv1} says that the surface stress tensors converge as measures \[ T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k,\rm sur}(\phi_k) dx\quad \rightharpoonup \quad \gammamma_0(I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u){\cal H}^{n-1}\mres\partialrtial*G.\] By taking $\Psi=\mbox{\normalsize\boldmath$n$}abla V$ for any $V\in C_c^1(\Omegaega,\mathbb{R}^n)$, we recover \qref{CH-force-conv}. Then \qref{CH-force-conv2} can be proved using integration by parts and Lemma 10.8 of \cite{Giusti84} \[ \int_{\partialrtial^* G} \left(I- \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u\right):\Psi\, d{\mathcal H}^{n-1} = (n-1)\int_{\partialrtial G} H\mbox{\normalsize\boldmath$n$}u\cdot V \, d {\mathcal H}^{n-1}. \] Note that for $\phi_k$ close to $\chi_G$, $\mbox{\normalsize\boldmath$n$}abla\phi_k$ is in the opposite direction of $\mbox{\normalsize\boldmath$n$}u$. \end{comment} We remark that the assumption of the above theorem requires the convergence of free-energy, i.e., \reff{important}. Such convergence is not guaranteed by the assumptions that $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and $\phi_k \to \chi_G$ in $L^1(\Omegaega)$. This is expected as not every such sequence is a recovery sequence of the $\Gamma$-convergence. In particular, let $G$ be an open subset of $\Omegaega$ with a smooth boundary $\partialrtial G$ and $\overline{G} \subset \Omegaega$, and let $\begin{eqnarray}ta $ be any real number such that \[ \begin{eqnarray}ta\geq \sigmagma :=\int_0^1\sqrt{2W(s)}\;ds. \] (We have $\sigmagma = 1$ for our choice of $W$.) We show that there exist $\phi_k\in H^1(\Omegaega)$ $(k = 1, 2, \dots)$ such that \begin{eqnarray}gin{enumerate} \item[(1)] $\phi_k\to \chi_G$ a.e.\ in $\Omegaega$ and $\phi_k\to \chi_G$ in $L^1(\Omegaega)$; \item[(2)] $\displaystyle \lim_{k\to\infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 +\frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \right] dx =\begin{eqnarray}ta P_{\Omegaega}(G).$ \end{enumerate} \begin{eqnarray}gin{comment} Let $a > 0$ and define $\widetilde W(s) = {W(s)}/a$ $(s\in \mathbb{R}).$ Denote by $d: \Omegaega \to\mathbb{R}$ be the signed distance function to the boundary $\partialrtial G:$ $d(x) = \mbox{dist}\,(x, \partialrtial G)$ if $ x \in G$ and $d(x) = -\mbox{dist}\,(x, \partialrtial G)$ if $ x \in G^c$. Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$. For each $k \ge 1$, we define $\phi_k: \Omegaega \to \mathbb{R}$ by $\phi_k (x) = \tilde{g}_k (d(x))$ $(x\in \Omegaega)$, where $\tilde{g}_k: \mathbb{R} \to [0, 1]$ is defined by \begin{eqnarray}gin{align*} \tilde g_k'(z) = \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\sqrt{2 \left[ \widetilde W(\tilde g_k(z)) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] } \quad \mbox{and} \quad \tilde{g}_k(0) = 0. \end{align*} Then $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and $\phi_k \to \chi_G$ in $L^1(\Omegaega).$ Moreover, since $\partialrtial G$ is smooth, we have for a.e.\ $x\in \Omegaega$ that \begin{eqnarray}gin{align*} |\mbox{\normalsize\boldmath$n$}abla\phi_k(x)| = | \tilde g_k'(d(x))\mbox{\normalsize\boldmath$n$}abla d(x)| = \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\sqrt{ 2 \left[ \widetilde W(\phi_k(x)) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }. \end{align*} Consequently, applying the co-area formula, we obtain that \begin{eqnarray}gin{align*} \lim_{k\to\infty} & \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \right] dx \mbox{\normalsize\boldmath$n$}n\\ &=\lim_{k\to\infty}\int_\Omegaega \left[ \frac{ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k }{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} + \frac {a\widetilde W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \right] dx \mbox{\normalsize\boldmath$n$}n\\ &=\lim_{k\to\infty}\int_\Omegaega \left( \frac{\sqrt{ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k } }{\sqrt 2} + \frac {a \widetilde W(\phi_k)}{\sqrt {2 \left[ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }} \right) \frac{\sqrt{ 2 \left[ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] } }{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \;dx \mbox{\normalsize\boldmath$n$}n\\ &= \lim_{k\to\infty}\int_\Omegaega \left( \frac{ \sqrt{ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k } }{\sqrt 2} + \frac {a \widetilde W(\phi_k)}{\sqrt {2 \left[ \widetilde W(\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }} \right) |\mbox{\normalsize\boldmath$n$}abla\phi_k|\, dx \mbox{\normalsize\boldmath$n$}n \\ &=\lim_{k\to\infty} P_{\Omegaega}(G) \int_0^1\left( \frac{\sqrt{ \widetilde W(s) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k } }{\sqrt 2} + \frac {a \widetilde W(s)}{\sqrt {2 \left[ \widetilde W(s) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }} \right)\;ds \mbox{\normalsize\boldmath$n$}n\\ &=P_{\Omegaega}(G)\int_0^1 \frac{1+a}{\sqrt 2} \sqrt{\widetilde W(s)}\;ds \mbox{\normalsize\boldmath$n$}n\\ &= \frac{1+a}{2\sqrt{a}} P_{\Omegaega}(G) \int_0^1 \sqrt{ 2 W(s)}\;ds \mbox{\normalsize\boldmath$n$}n\\ &= \frac{1+a}{2\sqrt{a}}\sigmagma \, P_{\Omegaega}(G). \end{align*} If $\begin{eqnarray}ta = \sigmagma$, we can take $a=1$. If $\begin{eqnarray}ta>\sigmagma$, we have two choices of $a>0$ such that $\begin{eqnarray}ta = (1+a)\sigmagma /(2\sqrt{a}) $. Thus for any $\begin{eqnarray}ta\geq \sigmagma$ we can find $\phi_k$ $(k = 1, 2, \dots)$ that satisfy (1) and (2). \end{comment} Let $a > 0$ and define $W_a (s) = {W(s)}/a$ $(s\in \mathbb{R}).$ For each $k \ge 1$, we define $q_k: [0,1]\to \mathbb{R}$ by \[ q_k (t) = \int_0^t \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k }{\sqrt{2 [ W_a(\tau ) + \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k] }}\, d\tau \qquad \forall t\in[0,1]. \] Clearly, $q_k $ is a strictly increasing function of $t \in [0, 1]$ with $q_k (0) = 0,$ $\langlembda_k := q_k (1) \in (0, \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k /2}),$ and $q_k(t) \le t$ for any $t \in [0,1].$ Let $g_k : [0,\langlembda_k ]\rightarrow[0,1]$ be the inverse of $q_k: [0,1]\to [0,\langlembda_k ]$. By using the formula of derivatives of inverse functions, we obtain \begin{eqnarray}gin{equation*} g_k'( s ) = \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \sqrt{ 2 [ W_a (g_k(s)) + \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k] } \qquad \forall s \in[0,\langlembda_k ]. \end{equation*} We extend $g_k $ onto the entire real line by defining $g_k (s) = 0$ for any $ s < 0$ and $g_k (s) = 1$ for any $ s > \langlembda_k.$ Denote now by $d: \Omegaega \to\mathbb{R}$ the signed distance function to the boundary $\partialrtial G:$ $d(x) = \mbox{dist}\,(x, \partialrtial G)$ if $ x \in G$ and $d(x) = -\mbox{dist}\,(x, \partialrtial G)$ if $ x \in G^c$. Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0.$ Define $\phi_k: \Omegaega \to [0, 1]$ by $\phi_k (x) = {g}_k (d(x))$ $(x\in \Omegaega)$. Then $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and $\phi_k \to \chi_G$ in $L^1(\Omegaega)$ \cite{Modica_ARMA87,Sternberg_ARMA88}. Moreover, since $\partialrtial G$ is smooth, we have for a.e.\ $x\in \Omegaega$ and $k$ large enough that \begin{eqnarray}gin{align*} |\mbox{\normalsize\boldmath$n$}abla\phi_k(x)| = | g_k'(d(x))\mbox{\normalsize\boldmath$n$}abla d(x)| = \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\sqrt{ 2 \left[ W_a (\phi_k(x)) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }. \end{align*} Note for any $s \in [0, 1]$ that $\phi_k(x) = s$ if and only if $ d(x) = q_k(s)$, and $q_k(s) \le \langlembda_k \to 0$ as $k \to \infty.$ Since $\partialrtial G$ is smooth, we have (cf.\ Lemma~4 in \cite{Modica_ARMA87} and Lemma~2 in \cite{Sternberg_ARMA88}) that \begin{eqnarray}gin{align*} & \lim_{k\to \infty} \sup_{0 \le s \le 1 } {\mathcal H}^{n-1}( \{ x\in \Omegaega: \phi_k (x) = s \} ) \\ &\qquad = \lim_{k\to \infty} \sup_{0 \le s \le 1} {\mathcal H}^{n-1}( \{ x\in \Omegaega: d(x) = q_k(s) \} ) \\ &\qquad = P_\Omegaega(G). \end{align*} Consequently, applying the co-area formula and the Lebesgue Dominated Convergence Theorem, we obtain that \begin{eqnarray}gin{align*} \lim_{k\to\infty} & \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \right] dx \mbox{\normalsize\boldmath$n$}n\\ &=\lim_{k\to\infty}\int_{\Omegaega} \left( \frac{ \sqrt{ W_a (\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k } }{\sqrt 2} + \frac {a W_a (\phi_k)}{\sqrt {2 \left[ W_a (\phi_k) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }} \right) |\mbox{\normalsize\boldmath$n$}abla\phi_k|\, dx \mbox{\normalsize\boldmath$n$}n \\ &=\lim_{k\to\infty}\int_0^1 {\mathcal H}^{n-1}( \{ x\in \Omegaega: \phi_k (x) = s \} ) \left( \frac{\sqrt{ W_a (s) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k } }{\sqrt 2} + \frac {a W_a (s)}{\sqrt {2 \left[ W_a (s) +\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \right] }} \right)\;ds \mbox{\normalsize\boldmath$n$}n\\ &=P_{\Omegaega}(G)\int_0^1 \frac{1+a}{\sqrt 2} \sqrt{ W_a (s)}\;ds \mbox{\normalsize\boldmath$n$}n\\ &= \frac{1+a}{2\sqrt{a}}\sigmagma \, P_{\Omegaega}(G). \end{align*} If $\begin{eqnarray}ta = \sigmagma$, we can take $a=1$. If $\begin{eqnarray}ta>\sigmagma$, we have two choices of $a>0$ such that $\begin{eqnarray}ta = (1+a)\sigmagma /(2\sqrt{a}) $. Thus for any $\begin{eqnarray}ta\geq \sigmagma$ we can find $\phi_k$ $(k = 1, 2, \dots)$ that satisfy (1) and (2). \begin{eqnarray}gin{comment} Since $\Gamma$ need to be rectifiable, to be precise, we will rewrite $\tilde F$ in terms of $BV$ functions with values in $\{0,1\}$. For any $\phi\in BV(\Omegaega;\{0,1\})$, we can write $\phi = \chi_G$ for some set $G$ with finite perimeter $Per(G)$ and rewrite $\tilde F$ in terms of $\phi$. In addition, we will extend the definition of $\tilde F$ to all functions $\phi\in L^1(\Omegaega)$ by requiring it to be infinity if $\phi\mbox{\normalsize\boldmath$n$}ot\in BV(\Omegaega;\{0,1\})$. Here $\psi_\phi$ is the minimizer for the electrostatic energy $E_\phi$ in the admissible set $ {\mathcal A}_0 $, where \begin{eqnarray}gin{align}\langlebel{E_phi} E_\phi(\psi) :=\int_\Omegaega \left( \frac{\varepsilonp(\phi)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi|^2 - \rho_ {\rm {f}} \psi + (\phi-1)^2V(\psi)\right)dx. \end{align} $\varepsilonp(s)$ is a continuous function satisfying $\varepsilonp_ {\rm{p}} =\varepsilonp(1)\leq \varepsilonp(s) \leq \varepsilonp(0) = \varepsilonp_ {\rm{w}} $ for all $s\in \mathbb{R}$. $E_\phi$ is defined in fact for all $\phi\in L^4(\Omegaega)$, such a requirement on $\phi$ is due to integrability conditions on $V(\psi)$. Phase-field model: for any $\phi\in H^1(\Omegaega)$, define \begin{eqnarray}gin{align} \langlebel{F_xi} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i(\phi)&:= P\int_\Omegaega \phi^2\;dx + \gammamma_0\int_\Omegaega \left(\frac\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i2|\mbox{\normalsize\boldmath$n$}abla\phi|^2 + \frac1\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i W(\phi)\right)dx + \rho_ {\rm{w}} \int_\Omegaega(\phi-1)^2U(x)\;dx \mbox{\normalsize\boldmath$n$}n\\ &\quad- E_\phi(\psi_\phi), \end{align} where $\psi_\phi$ is the minimizer of $E_\phi$ in $ {\mathcal A}_0 $. Here $W(\phi)= 18\phi^2(\phi-1)^2$ is a quartic double well potential with two equal minima at $\phi=0,1$. The specific form is chosen so that $\int_0^1 \sqrt{2W(s)}\;ds =1.$ Throughout this paper we use $C$ to indicate a generic positive constant that may depend on $\Omegaega$ and $n$. Assumptions: \begin{eqnarray}gin{enumerate} \item $\varepsilonp(s)$ is continous and decreasing in $s$, and \begin{eqnarray}gin{align} \langlebel{vep-assumption} \left\{ \begin{eqnarray}gin{array}{ll} \varepsilonp(s)=\varepsilonp_ {\rm{w}} &\mbox{if } s\leq 0, \\ \varepsilonp(s)\in [\varepsilonp_ {\rm{p}} ,\varepsilonp_ {\rm{w}} ] &\mbox{if } 0<s<1,\\ \varepsilonp(s)=\varepsilonp_ {\rm{p}} &\mbox{if }s\geq 1; \end{array} \right. \end{align} \item $\psi_\infty\in H^1(\Omegaega)\cap L^\infty(\Omegaega)$; \item $U(x)\geq -c_0$ for all $x\in\mathbb{R}$, where $c_0\geq 0$ is a constant; \item $V(s)\in C^1(\mathbb{R})$ is convex and \begin{eqnarray}gin{align}\langlebel{V-assumption} V(s)\to +\infty, \; V'(s)\to \pm\infty \mbox{ as }s\to \pm\infty. \end{align} \end{enumerate} \begin{eqnarray}gin{lemma} \cite{LiLiu_SIAP15} For any $\phi\in L^4(\Omegaega)$, we have $\psi_\phi\in L^\infty(\Omegaega)$ and \begin{eqnarray}gin{align} \| \psi_\phi\|_{L^\infty(\Omegaega)} &\leq C(1 + \|\phi\|_{L^4(\Omegaega)}^2), \langlebel{psi-bound1}\\ \| \psi_\phi\|_{H^1(\Omegaega)} &\leq C(1 + \|\phi\|_{L^2(\Omegaega)}). \langlebel{psi-bound2} \end{align} \end{lemma} We want to study the $\Gamma-$limit of $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ as $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i\to 0^+$ under the strong $L^1$ topology, as is in the classical phase transition theory \cite{leoni:note, Modica_ARMA87}. \end{comment} \section{The Poisson--Boltzmann Electrostatics} \langlebel{s:PB} We first present some basic results regarding the boundary-value problem of Poisson--Boltzmann equation and the corresponding electrostatic free energy for a function $\phi: \Omegaega \to \mathbb{R}$ that describes the dielectric environment. These results unify and improve those of Theorem~2.1 in \cite{LiChengZhang_SIAP11} and Theorem~2.1 in \cite{LiLiu_SIAP15}. We recall that the set $ {\mathcal A} $ and functional $E_\phi$ are defined in \reff{A} and \reff{Ephiu}, respectively. \begin{eqnarray}gin{theorem} \langlebel{t:phiPB} Let $\phi \in L^4(\Omegaega).$ There exists a unique $\psi_{\phi} \in {\mathcal A} $ such that \begin{eqnarray}gin{equation} \langlebel{Ephipsiphi} E_\phi [\psi_\phi] = \min_{u \in {\mathcal A} } E_\phi [u], \end{equation} which is finite. Moreover, $\psi_\phi \in {\mathcal A} $ is the unique weak solution to the boundary-value problem of Poisson--Boltzmann equation \reff{PhaseFieldPBE} and \reff{PhaseFieldBC}, i.e., $\psi_{\phi} \in {\mathcal A} $ and \begin{eqnarray}gin{equation} \langlebel{psiphiweak} \int_\Omegaega \left[ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_{\phi} \cdot \mbox{\normalsize\boldmath$n$}abla \eta + (\phi-1)^2 B'(\psi_{\phi} ) \, \eta \right] dx = \int_\Omegaega \rho \eta \, dx \qquad \forall \eta \in H_0^1(\Omegaega). \end{equation} Finally, $\psi_\phi \in L^\infty(\Omegaega)$ and there exists a constant $C > 0$ independent of $\phi \in L^4(\Omegaega)$ such that \begin{eqnarray}gin{align*} & \|\chi_{\{ \phi \mbox{\normalsize\boldmath$n$}e 1\} } \psi_{\phi} \|_{L^\infty(\Omegaega)} \le C, \\ & \|\psi_{\phi} \|_{H^1(\Omegaega)} \le C \left( 1 + \| \phi \|_{L^2(\Omegaega)} \right), \\ & \|\psi_{\phi} \|_{L^\infty(\Omegaega)} \le C \left( 1 + \| \phi \|_{L^4(\Omegaega)}^2 \right). \end{align*} \end{theorem} \begin{eqnarray}gin{comment} Note that the assumption that $\phi \in L^4(\Omegaega)$ is related to the requirement that $(\phi-1)^2$ in the Poisson--Boltzmann equation \reff{PBE} (cf.\ also \reff{psiphiweak}) to be in $L^2(\Omegaega)$. This coincides with our choice of the canonical double-well potential $W = W(\phi)$ defined in \reff{W} that is a polynomial of degree $4$. See some of the related assumptions in our next theorems. \end{comment} \begin{eqnarray}gin{proof} This is similar to that of Theorem~2.1 in \cite{LiLiu_SIAP15}. First, note that $B \in C^2(\mathbb{R})$ is convex and nonnegative. By direct methods in the calculus of variations, there exists a unique $\psi_\phi \in {\mathcal A} $ that satisfies \reff{Ephipsiphi}. The minimum value is finite as it is bounded above by $E_\phi [\psi_\infty]< \infty.$ Next, by a comparison argument using the growth property and convexity of $B$ (cf.\ the proof of Theorem~2.1 in \cite{LiLiu_SIAP15}), we have $ | \psi_\phi | \le C $ a.e.\ on $ {\{\phi \mbox{\normalsize\boldmath$n$}e 1\}} $ for some constant $C > 0$ independent of $\phi.$ This is the first desired estimate. This estimate, together with the Lebesgue Dominated Convergence Theorem, allows us to obtain \reff{psiphiweak} for $\eta\in H_0^1(\Omegaega)\cap L^\infty(\Omegaega).$ By approximation, \reff{psiphiweak} is true for all $\eta \in H^1_0(\Omegaega).$ Finally, the fact that $\psi_\phi \in L^\infty(\Omegaega)$ and the other two desired estimates follow from the regularity theory for elliptic problems; cf.\ Theorem 8.3 and Theorem 8.16 in \cite{GilbargTrudinger83}, and the proof of Theorem~2.1 in \cite{LiLiu_SIAP15}. In particular, the estimate (10) in \cite{LiLiu_SIAP15} provides the bound $C (1 + \| \phi \|^2_{L^4(\Omegaega)})$ for $\| \psi_\phi \|_{L^\infty(\Omegaega)}.$ \end{proof} The following theorem indicates that the electrostatic potential and electrostatic free energy are continuous with respect to the change of dielectric boundary: \begin{eqnarray}gin{theorem} \langlebel{t:PBenergy} Let $\phi_k \in L^4(\Omegaega)$ $(k = 1, 2, \dots)$ and $\phi \in L^4(\Omegaega) $ be such that \begin{eqnarray}gin{equation} \langlebel{L4L1} \sup_{k \ge 1} \| \phi_k \|_{L^4(\Omegaega)} < \infty \quad \mbox{and} \quad \phi_k \to \phi \quad \mbox{in } L^1(\Omegaega). \end{equation} Let $\psi_{\phi_k}\in {\mathcal A} $ $(k = 1, 2, \dots)$ and $\psi_\phi \in {\mathcal A} $ be the corresponding electrostatic potentials, i.e., \[ E_{\phi_k} [\psi_{\phi_k} ] = \min_{u \in {\mathcal A} } E_{\phi_k} [ u ] \quad (k = 1, 2, \dots) \quad \mbox{and} \quad E_{\phi} [\psi_\phi ] = \min_{u \in {\mathcal A} } E_{\phi} [ u ], \] respectively. Then, $\psi_{\phi_k} \to \psi_\phi$ in $H^1(\Omegaega)$ and $E_{\phi_k} [\psi_{\phi_k}] \to E_\phi [\psi_\phi].$ \end{theorem} To prove this and other theorems, we need the following lemma which holds true for any measurable subset $\Omegaega \subset \mathbb{R}^n$ of finite measure $|\Omegaega|:$ \begin{eqnarray}gin{lemma} \langlebel{l:Lq} Let $1 < p < \infty$ and $\phi_k \in L^p(\Omegaega)$ $(k = 1, 2, \dots)$ be such that \begin{eqnarray}gin{equation} \langlebel{pnormbound} \sup_{k \ge 1} \| \phi_k \|_{L^p(\Omegaega)} < \infty. \end{equation} Let $\phi \in L^1(\Omegaega)$. Assume either $\phi_k \to \phi $ a.e.\ in $\Omegaega$ or $ \phi_k \to \phi$ in $L^1(\Omegaega).$ Then $\phi \in L^p(\Omegaega)$ and $\phi_k \to \phi$ in $L^q(\Omegaega)$ for any $q \in [1, p).$ \end{lemma} \begin{eqnarray}gin{proof} Assume $\phi_k \to \phi$ a.e.\ in $\Omegaega$. Fatou's lemma then leads to \[ \int_\Omegaega |\phi|^p dx \le \liminf_{k\to\infty} \int_\Omegaega |\phi_{k}|^p dx < \infty. \] Hence $\phi \in L^p(\Omegaega).$ Let $\varepsilon > 0.$ Egoroff's Theorem implies that there exists a measurable subset $A \subseteq \Omegaega$ such that $|A| < \varepsilon$ and $\phi_k \to \phi$ uniformly on $A^c = \Omegaega \setminus A $. Therefore, it follows from H\"older's inequality and \reff{pnormbound} that for any $q \in [1, p)$ \begin{eqnarray}gin{align*} \limsup_{k\to \infty} \int_\Omegaega | \phi_k - \phi |^q dx & = \limsup_{k\to \infty} \left[ \int_{A} | \phi_k - \phi |^q dx + \int_{A^c} | \phi_k - \phi |^q dx \right] \\ & \le \limsup_{k\to \infty} |A|^{(p-q)/p} \| \phi_k \|_{L^p(\Omegaega)}^q + \limsup_{k \to \infty} \int_{A^c } | \phi_k - \phi |^q dx\\ & \le \varepsilon^{(p-q)/p} \left( \sup_{k \ge 1} \| \phi_k \|_{L^p(\Omegaega)}^q \right). \end{align*} Hence $\phi_k \to \phi$ in $L^q(\Omegaega).$ Assume now $\phi_k \to \phi$ in $L^1(\Omegaega)$. Then there exists a subsequence of $\{ \phi_k \}$ that converges to $\phi$ a.e.\ in $\Omegaega$. Applying Fatou's lemma to this subsequence, we also get $ \phi \in L^p(\Omegaega).$ Let $1 < q < p.$ Every subsequence of $\{ \phi_k \}$ has a further subsequence that converges to $\phi$ a.e.\ in $\Omegaega$, and hence, as proved above, converges to $\phi$ in $L^q(\Omegaega)$. Thus $\phi_k\to \phi$ in $L^q(\Omegaega)$. \end{proof} \begin{eqnarray}gin{comment} Since $p > 1$, we have for any measurable $E \subseteq \Omegaega$ that \[ \int_E |\phi_k|\, dx \le \|\phi_k \|_{L^p(\Omegaega)} | E|^{1-1/p} \qquad \forall k \ge 1. \] This and \reff{pnormbound} imply the uniform absolute continuity of $\{ \phi_k\}$ on $\Omegaega.$ Suppose now $\phi_k \to \phi$ a.e.\ in $\Omegaega$. Egoroff's Theorem that Since $\phi_k \to \phi$ in $L^1(\Omegaega)$, there exists a subsequence $\{\phi_{k_j} \}$ such that $\phi_{k_j} \to \phi $ a.e.\ in $\Omegaega$. Fatou's Lemma then leads to Hence $\phi \in L^p(\Omegaega)$. Now, the strong convergence $\phi_k \to \phi$ in $L^1(\Omegaega)$ implies that $\phi_k \to \phi$ in measure. Therefore, for any $\deltalta > 0$ and $\omegaega_k(\deltalta) := \{ x \in \Omegaega: | \phi_k (x) - \phi(x)| > \deltalta \}$, we have $| \omegaega_k(\deltalta) | \to 0$ as $k \to \infty.$ Consequently, it follows from H\"older's inequality and the boundedness of $\{ \| \phi_k \|_{L^p(\Omegaega)} \}$ that for any $q \in (1, p)$ \begin{eqnarray}gin{align*} \limsup_{k\to \infty} \int_\Omegaega | \phi_k - \phi |^q dx & = \limsup_{k\to \infty} \left[ \int_{\omegaega_k(\deltalta)} | \phi_k - \phi |^q dx + \int_{\Omegaega\setminus \omegaega_k(\deltalta)} | \phi_k - \phi |^q dx \right] \\ & \le \limsup_{k\to \infty} \int_{\omegaega_k(\deltalta)} | \phi_k - \phi |^q dx + \deltalta^q |\Omegaega|\\ &\le \limsup_{k\to \infty} |\omegaega_k(\deltalta)|^{1-q/p} \left(\int_{\omegaega_k(\deltalta)} | \phi_k - \phi |^p dx \right)^{q/p} + \deltalta^q |\Omegaega|\\ &\le \left( \|\phi\|_{L^p(\Omegaega)} + \sup_{k \ge 1} \|\phi_k \|_{L^p(\Omegaega)}\right)^q \limsup_{k\to \infty} |\omegaega_k(\deltalta)|^{1-q/p} + \deltalta^q |\Omegaega|\\ &= \deltalta^q |\Omegaega|. \end{align*} Hence $\phi_k \to \phi$ in $L^q(\Omegaega).$ \end{comment} We are now ready to prove Theorem~\ref{t:PBenergy}. We use the symbol $\rightharpoonup$ to denote the weak convergence: \begin{eqnarray}gin{proof}[Proof of Theorem~\ref{t:PBenergy}] For notational convenience, let us write $\psi_k = \psi_{\phi_k}$ and $\psi = \psi_\phi.$ We first prove that $\psi_k \to \psi$ in $H^1(\Omegaega)$. It suffices to prove that any subsequence of $\{ \psi_k \}$ has a further subsequence that converges to $\psi$ in $H^1(\Omegaega).$ Note by Theorem~\ref{t:phiPB} and \reff{L4L1} that \begin{eqnarray}gin{align} \langlebel{PBk} &\int_\Omegaega \left[ \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_{k} \cdot\mbox{\normalsize\boldmath$n$}abla \eta + ( \phi_k - 1)^2 B'(\psi_{k}) \eta \right] dx = \int_\Omegaega \rho \eta \, dx \qquad \forall \eta \in H^1_0(\Omegaega) \quad \forall k \ge 1, \\ \langlebel{PBphi} &\int_\Omegaega \left[ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \cdot\mbox{\normalsize\boldmath$n$}abla \eta + ( \phi - 1)^2 B'(\psi) \eta \right] dx = \int_\Omegaega \rho \eta \, dx \qquad \forall \eta \in H^1_0(\Omegaega), \\ \langlebel{supsup} & \sup_{k \ge 1} \left( \| \psi_{k} \|_{H^1(\Omegaega)} +\| \psi_{k} \|_{L^\infty(\Omegaega)}\right)<\infty \quad \mbox{and} \quad \psi_\phi \in L^\infty(\Omegaega). \end{align} By \reff{L4L1} and \reff{supsup}, any subsequence of $\{\psi_k\}$ has a further subsequence $\{\psi_{k_j} \}$ that converges to some $\hat{\psi} \in H^1(\Omegaega)$ weakly in $H^1(\Omegaega)$, strongly in $L^2(\Omegaega),$ and a.e.\ in $\Omegaega$; and the corresponding sequence $\{ \phi_{k_j} \}$ converges to $\phi$ a.e.\ in $\Omegaega$. We prove that $\hat{\psi} = \psi $ in $H^1(\Omegaega)$ and $\psi_{k_j} \to \psi$ strongly in $H^1(\Omegaega).$ Since $ {\mathcal A} $ is convex and strongly closed in $H^1(\Omegaega),$ it is sequentially weakly closed. Hence $\hat{\psi} \in {\mathcal A} .$ Since $\psi_{k_j} \to \hat{\psi}$ a.e.\ in $\Omegaega$, by \reff{supsup}, $\hat{\psi} \in L^\infty(\Omegaega).$ By Lemma~\ref{l:Lq}, $\phi_{k_j} \to \phi$ in $L^q(\Omegaega)$ for any $q \in [1, 4).$ Hence, $\varepsilon(\phi_{k_j}) \to \varepsilon( \phi ) $ in $L^2(\Omegaega).$ Similarly, \begin{eqnarray}gin{equation} \langlebel{L3over2} (\phi_{k_j} -1)^2 \to (\phi - 1 )^2 \quad \mbox{in } L^{3/2}(\Omegaega). \end{equation} By the compact embedding $H^1(\Omegaega) \hookrightarrow L^3(\Omegaega)$ and the weak convergence $\psi_{k_j} \rightharpoonup \hat{\psi}$ in $H^1(\Omegaega)$, we have that $\psi_{k_j} \to \hat{\psi}$ in $L^3(\Omegaega),$ and hence that \begin{eqnarray}gin{equation} \langlebel{BprimeL3} B'(\psi_{k_j}) \to B'(\hat{\psi}) \quad \mbox{in } L^3(\Omegaega). \end{equation} Therefore, replacing $\phi_k$ and $\psi_k$ in \reff{PBk} by $\phi_{k_j}$ and $\psi_{k_j}$, respectively, and then sending $j \to \infty$, we obtain for any $\eta \in C_c^1(\Omegaega)$ that \[ \int_\Omegaega \left[ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \hat{\psi} \cdot\mbox{\normalsize\boldmath$n$}abla \eta + (\phi-1)^2 B'(\hat{\psi} ) \eta \right] dx = \int_\Omegaega \rho\eta \, dx. \] Since $C_c^1(\Omegaega)$ is dense in $H_0^1(\Omegaega),$ this identity holds true also for any $\eta \in H_0^1(\Omegaega)$. This and \reff{PBphi}, together with the uniqueness of weak solution established in Theorem~\ref{t:phiPB}, imply that $\hat{\psi} =\psi$ in $H^1(\Omegaega).$ We now prove $\psi_{k_j} \to \psi$ in $H^1(\Omegaega).$ By our assumptions on $\varepsilon$, the fact that $\psi_{k_j} - \psi\in H_0^1(\Omegaega)$ $(j = 1, 2, \dots)$, and Poincar\'e's inequality, it suffices to prove \begin{eqnarray}gin{equation} \langlebel{H1strong} \lim_{j \to \infty} \int_\Omegaega \varepsilon(\phi_{k_j } ) | \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} - \mbox{\normalsize\boldmath$n$}abla \psi|^2 dx = 0. \end{equation} By \reff{L4L1} and Lemma~\ref{l:Lq}, we have $\phi_{k_j} \to \phi$ in $L^{7/2}(\Omegaega)$ and hence $(\phi_{k_j} - 1)^2 \to (\phi - 1)^2$ in $L^{7/4}(\Omegaega).$ Similarly, by the convergence $\psi_{k_j} \to \psi$ in $L^2(\Omegaega)$, the embedding $H^1(\Omegaega) \hookrightarrow L^{14/3}(\Omegaega)$, \reff{supsup}, and Lemma~\ref{l:Lq}, we have $\psi_{k_j} \to \psi$ and hence $B(\psi_{k_j}) \to B(\psi)$ in $L^{14/3}(\Omegaega).$ Consequently, by H\"older's inequality, \begin{eqnarray}gin{equation*} \lim_{j\to \infty} \int_\Omegaega (\phi_{k_j} - 1 )^2 B(\psi_{k_j}) ( \psi_{k_j} - \psi_\infty ) \, dx = \int_\Omegaega (\phi - 1 )^2 B(\psi) ( \psi - \psi_\infty) \, dx. \end{equation*} Setting $\eta = \psi_{k_j} - \psi_\infty \in H_0^1(\Omegaega)$ in \reff{PBk} and \reff{PBphi}, we then obtain \begin{eqnarray}gin{align} \langlebel{psikpsik} & \lim_{j\to \infty} \int_\Omegaega \varepsilon(\phi_{k_j}) |\mbox{\normalsize\boldmath$n$}abla \psi_{k_j} |^2 dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \lim_{j \to \infty} \int_\Omegaega \left[ \varepsilon(\phi_{k_j}) \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} \cdot \mbox{\normalsize\boldmath$n$}abla \psi_\infty + \varepsilon(\phi_{k_j}) \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} \cdot \mbox{\normalsize\boldmath$n$}abla (\psi_{k_j} - \psi_\infty) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \lim_{j \to \infty} \int_\Omegaega \left[ \varepsilon(\phi_{k_j}) \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} \cdot \mbox{\normalsize\boldmath$n$}abla \psi_\infty + \rho ( \psi_{k_j}- \psi_\infty ) - (\phi_{k_j} -1)^2 B'(\psi_{k_j}) ( \psi_{k_j} - \psi_\infty ) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left[ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}abla \psi_\infty +\rho ( \psi - \psi_\infty ) - (\phi-1)^2 B'(\psi) ( \psi - \psi_\infty ) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left[ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}abla \psi_\infty + \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}abla (\psi - \psi_\infty) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_\Omegaega \varepsilon(\phi) |\mbox{\normalsize\boldmath$n$}abla \psi |^2 dx. \end{align} \begin{eqnarray}gin{comment} By \reff{L3over2}, \reff{BprimeL3}, \reff{supsup}, and H\"older's inequality, we have \begin{eqnarray}gin{equation*} \lim_{k\to \infty} \int_\Omegaega (\phi_k - 1 )^2 B(\psi_k) (\psi - \psi_\infty) \, dx = \int_\Omegaega (\phi - 1 )^2 B(\psi) (\psi - \psi_\infty) \, dx. \end{equation*} Consequently, setting $\eta = \psi - \psi_\infty$ in \reff{PBk} and \reff{PBphi}, we have \begin{eqnarray}gin{align} \langlebel{new2} & \lim_{j\to \infty}\int_\Omegaega \varepsilon(\phi_{k_j})\mbox{\normalsize\boldmath$n$}abla \psi_{k_j} \cdot \mbox{\normalsize\boldmath$n$}abla (\psi - \psi_\infty) \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad =\lim_{j\to \infty} \int_\Omegaega \left[ \rho (\psi-\psi_\infty) - (\phi_{k_j} -1)^2 B'(\psi_{k_j}) (\psi-\psi_\infty) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad =\int_\Omegaega \left[ \rho(\psi-\psi_\infty) - (\phi-1)^2 B'(\psi) (\psi-\psi_\infty) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_\Omegaega \varepsilon(\phi)\mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}abla (\psi - \psi_\infty) \, dx. \end{align} \end{comment} Since $\phi_{k_j} \to \phi$ a.e.\ in $\Omegaega,$ the Lebesgue Dominated Convergence Theorem implies that \begin{eqnarray}gin{equation} \langlebel{vekpsi2} \lim_{j \to \infty} \int_\Omegaega \varepsilon(\phi_{k_j}) | \mbox{\normalsize\boldmath$n$}abla \psi|^2 dx = \int_\Omegaega \varepsilon(\phi) | \mbox{\normalsize\boldmath$n$}abla \psi|^2 dx. \end{equation} It now follows from \reff{psikpsik}, \reff{vekpsi2}, and the fact that $\varepsilon(\phi_{k_j}) \to \varepsilon(\phi)$ in $L^2(\Omegaega)$ and $\psi_{k_j} \rightharpoonup \psi$ in $H^1(\Omegaega)$ that \begin{eqnarray}gin{align*} & \lim_{j \to \infty} \int_\Omegaega \varepsilon(\phi_{k_j}) | \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} - \mbox{\normalsize\boldmath$n$}abla \psi|^2 dx \\ &\quad = \lim_{j \to \infty} \int_\Omegaega \left[ \varepsilon(\phi_{k_j}) | \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} |^2 - 2 \varepsilon(\phi_{k_j}) \mbox{\normalsize\boldmath$n$}abla \psi_{k_j} \cdot \mbox{\normalsize\boldmath$n$}abla \psi + \varepsilon(\phi_{k_j}) | \mbox{\normalsize\boldmath$n$}abla \psi |^2 \right] dx \\ &\quad = \int_\Omegaega \left[ \varepsilon(\phi) | \mbox{\normalsize\boldmath$n$}abla \psi |^2 - 2 \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}abla \psi + \varepsilon(\phi) | \mbox{\normalsize\boldmath$n$}abla \psi |^2 \right] dx \\ & \quad = 0, \end{align*} leading to \reff{H1strong}. We finally prove the energy convergence $E_{\phi_k}[\psi_k] \to E_\phi [\psi].$ Since $\phi_k \to \phi$ in $L^1(\Omegaega)$ and $\psi_k \to \psi$ in $H^1(\Omegaega)$, any subsequence of $\{ \phi_k \}$ and the corresponding subsequence of $\{ \psi_k \}$ have further subsequneces $\{ \phi_{k_j} \}$ and $\{ \psi_{k_j} \}$, respectively, such that $\phi_{k_j} \to \phi $ a.e.\ in $\Omegaega$, and $\psi_{k_j} \to \psi$ in $H^1(\Omegaega)$ and a.e.\ in $\Omegaega.$ By \reff{L3over2}, and \reff{BprimeL3} with $\psi$ replacing $\hat{\psi}$, we have \begin{eqnarray}gin{equation} \langlebel{fB} \lim_{k \to \infty} \int_\Omegaega \left[ -\rho \psi_{k_j} + (\phi_{k_j} -1)^2 B(\psi_{k_j} ) \right] dx = \int_\Omegaega \left[ -\rho \psi + (\phi-1)^2 B(\psi) \right] dx. \end{equation} This and \reff{psikpsik} implies that $ E_{\phi_{k_j}} [\psi_{k_j}] \to E_{\phi}[\psi]$. Hence $E_{\phi_k}[\psi_k] \to E_\phi [\psi].$ \end{proof} We now state and prove the last result in this section: the convergence to the sharp-interface limit of phase-field electrostatic boundary forces, in terms of the weak convergence of the corresponding stress fields; cf.~Lemma~\ref{l:Stress}. We recall that $ f_{0,{\rm ele}}[\partialrtial G]$ is defined in \reff{f0_ele}. \begin{eqnarray}gin{theorem}[Convergence of dielectric boundary force] \langlebel{th:f_ele-conv} Let $\phi_k \in L^4(\Omegaega)$ $(k = 1, 2, \dots)$ and $\phi \in L^1(\Omegaega)$ be such that \begin{eqnarray}gin{equation} \langlebel{threephik} \sup_{k \ge 1} \| \phi_k \|_{L^4(\Omegaega)} < \infty \quad \mbox{and} \quad \phi_k \to \phi \quad \mbox{a.e.\ in } \Omegaega. \end{equation} We have for any $ V \in C_c^1(\Omegaega, \mathbb{R}^3)$ that \begin{eqnarray}gin{align}\langlebel{Fgeneral} \lim_{k\to \infty} \int_\Omegaega \bigl[ T_{\rm ele}(\phi_k) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\phi_k} \cdot V \bigr] dx = \int_\Omegaega \bigl[ T_{\rm ele}(\phi) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\phi} \cdot V \bigr] dx. \end{align} If, in addition, $\phi = \chi_G$ for some open subset $G$ of $\Omegaega$ with a $C^2$ boundary $\partialrtial G$ and the closure $\overline{G} \subset \Omegaega$, then this limit is \begin{eqnarray}gin{equation} \langlebel{Fspecial} \int_\Omegaega \bigl[ T_{\rm ele}(\chi_G ) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot V \bigr] dx = - \int_\Omegaega f_{0, {\rm ele}}[\partialrtial G] \cdot V \, dS. \end{equation} \end{theorem} \begin{eqnarray}gin{proof} We first note that, by Lemma~\ref{l:Lq}, $\phi \in L^4(\Omegaega)$ and $\phi_k \to \phi$ in $L^q(\Omegaega)$ for any $q\in [1, 4).$ Let us denote $\psi_k = \psi_{\phi_k}$ $(k \ge 1)$ and $\psi = \psi_\phi.$ Since $\varepsilon$ is a bounded function and $\psi_k \to \psi$ in $H^1(\Omegaega)$ by Theorem~\ref{t:PBenergy}, we have \begin{eqnarray}gin{align*} &\lim_{k \to \infty} \int_\Omegaega \varepsilon(\phi_k) \left[ ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \otimes ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \right. \\ &\qquad \left. + \mbox{\normalsize\boldmath$n$}abla \psi \otimes ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) + ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \otimes \mbox{\normalsize\boldmath$n$}abla \psi \right] : \mbox{\normalsize\boldmath$n$}abla V \, dx = 0. \end{align*} Since $\phi_k \to \phi$ a.e.\ in $\Omegaega$, the Lebesgue Dominated Convergence Theorem implies that \[ \lim_{k \to \infty} \int_\Omegaega \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi : \mbox{\normalsize\boldmath$n$}abla V \, dx = \int_\Omegaega \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi : \mbox{\normalsize\boldmath$n$}abla V \, dx. \] Therefore, \begin{eqnarray}gin{align} \langlebel{q1} &\lim_{k\to \infty} \int_\Omegaega \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_k \otimes \mbox{\normalsize\boldmath$n$}abla \psi_k : \mbox{\normalsize\boldmath$n$}abla V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \lim_{k \to \infty} \int_\Omegaega \varepsilon(\phi_k) \left[ ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \otimes ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) + \mbox{\normalsize\boldmath$n$}abla \psi \otimes ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \right. \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad \left. + ( \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi) \otimes \mbox{\normalsize\boldmath$n$}abla \psi + \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi \right] : \mbox{\normalsize\boldmath$n$}abla V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_\Omegaega \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi : V \, dx. \end{align} Similarly, \begin{eqnarray}gin{equation} \langlebel{q2} \lim_{k\to \infty} \int_\Omegaega \varepsilon(\phi_k) |\mbox{\normalsize\boldmath$n$}abla \psi_k|^2 \mbox{\normalsize\boldmath$n$}abla \cdot V \, dx = \int_\Omegaega \varepsilon(\phi) |\mbox{\normalsize\boldmath$n$}abla \psi|^2 \mbox{\normalsize\boldmath$n$}abla \cdot V \, dx. \end{equation} As in the proof of Theorem~\ref{t:PBenergy}, we have again by the convergence $\psi_k \to \psi$ in $H^1(\Omegaega)$ that \begin{eqnarray}gin{align} \langlebel{q3} & \lim_{k \to \infty} \int_\Omegaega \left[ (\phi_k-1)^2 B(\psi_k) \mbox{\normalsize\boldmath$n$}abla \cdot V +\rho \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot V \right] \, dx = \int_\Omegaega \left[ (\phi-1)^2 B(\psi) \mbox{\normalsize\boldmath$n$}abla \cdot V + \rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right] \, dx. \end{align} It now follows from the definition of $T_{\rm ele}$ (cf.\ \reff{stressphielec}) and \reff{q1}--\reff{q3} that \begin{eqnarray}gin{align*} &\lim_{k\to \infty} \int_\Omegaega \bigl[ T_{\rm ele}(\phi_k) : \mbox{\normalsize\boldmath$n$}abla V - \rho \mbox{\normalsize\boldmath$n$}abla \psi_{\phi_k} \cdot V \bigr] dx \\ &\qquad = \lim_{k\to \infty} \int_\Omegaega \biggl\{ \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_k \otimes \mbox{\normalsize\boldmath$n$}abla \psi_k : \mbox{\normalsize\boldmath$n$}abla V - \left[ \frac12 \varepsilon(\phi_k) | \mbox{\normalsize\boldmath$n$}abla \psi_k |^2 + (\phi_k-1)^2 B(\psi_k) \right] \mbox{\normalsize\boldmath$n$}abla \cdot V \\ &\qquad \qquad \quad - \rho \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot V \biggr\} dx \\ &\qquad = \int_\Omegaega \biggl\{ \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi : \mbox{\normalsize\boldmath$n$}abla V - \left[ \frac12 \varepsilon(\phi) | \mbox{\normalsize\boldmath$n$}abla \psi |^2 + (\phi-1)^2 B(\psi) \right] \mbox{\normalsize\boldmath$n$}abla \cdot V - \rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \biggr\} dx \\ &\qquad = \int_\Omegaega \bigl[ T_{\rm ele}(\phi) : \mbox{\normalsize\boldmath$n$}abla V - \rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \bigr] dx. \end{align*} This is exactly \reff{Fgeneral}, since $\psi = \psi_\phi.$ We now prove \reff{Fspecial}. Denote again $\psi = \psi_\phi = \psi_{\chi_G} \in {\mathcal A} .$ Denote also by $V_i$ and $\mbox{\normalsize\boldmath$n$}u_i$ $(i = 1, 2, 3)$ the components of $V$ and $\mbox{\normalsize\boldmath$n$}u$, respectively. Notice that the unit normal $\mbox{\normalsize\boldmath$n$}u$ points from $G $ to $G^c=\Omegaega \setminus G$. Using the conventional summation notation, we have by integration by parts that \begin{eqnarray}gin{align} \langlebel{DBFlong} &\int_\Omegaega \left[ T_{\rm ele}(\chi_G) : \mbox{\normalsize\boldmath$n$}abla V -\rho \mbox{\normalsize\boldmath$n$}abla \psi_{\chi_G} \cdot V \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left\{ \varepsilon(\chi_G) \mbox{\normalsize\boldmath$n$}abla \psi \otimes \mbox{\normalsize\boldmath$n$}abla \psi : \mbox{\normalsize\boldmath$n$}abla V - \left[ \frac{\varepsilon(\chi_G)}{2} | \mbox{\normalsize\boldmath$n$}abla \psi |^2 + \chi_{G^c} B(\psi) \right] \mbox{\normalsize\boldmath$n$}abla \cdot V -\rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right\} dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_G \left( \varepsilon_{\rm p} \partialrtial_i \psi \partialrtial_j \psi \partialrtial_j V_i - \frac{\varepsilon_{\rm p}}{2} \partialrtial_i \psi \partialrtial_i \psi \partialrtial_j V_j -\rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right) dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \int_{G^c} \left[ \varepsilon_{\rm w} \partialrtial_i \psi \partialrtial_j \psi \partialrtial_j V_i - \frac{\varepsilon_{\rm w}}{2} \partialrtial_i \psi \partialrtial_i \psi \partialrtial_j V_j - B(\psi) \partialrtial_j V_j -\rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_G \left( -\varepsilon_{\rm p} \partialrtial_{ij} \psi \partialrtial_j \psi V_i -\varepsilon_{\rm p} \partialrtial_i \psi \partialrtial_{jj} \psi V_i + \varepsilon_{\rm p} \partialrtial_{ij} \psi \partialrtial_i \psi V_j -\rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right) \, dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \int_{\partialrtial G} \left( \varepsilon_{\rm p} \partialrtial_i \psi|_G \partialrtial_j \psi|_G V_i \mbox{\normalsize\boldmath$n$}u_j - \frac{\varepsilon_{\rm p}}{2} \partialrtial_i \psi|_G \partialrtial_i \psi|_G V_j \mbox{\normalsize\boldmath$n$}u_j \right) dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \int_{G^c} \left[ - \varepsilon_{\rm w} \partialrtial_{ij} \psi \partialrtial_j \psi V_i - \varepsilon_{\rm w} \partialrtial_i \psi \partialrtial_{jj} \psi V_i + \varepsilon_{\rm w} \partialrtial_{ij} \psi \partialrtial_i \psi V_j +B'(\psi) \partialrtial_j \psi V_j -\rho \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \right] \, dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \int_{\partialrtial G} \left[ - \varepsilon_{\rm w} \partialrtial_i \psi|_{G^c} \partialrtial_j \psi|_{G^c} V_i \mbox{\normalsize\boldmath$n$}u_j + \frac{\varepsilon_{\rm w}}{2} \partialrtial_i \psi|_{G^c} \partialrtial_i \psi|_{G^c} V_j \mbox{\normalsize\boldmath$n$}u_j + B(\psi) V_j \mbox{\normalsize\boldmath$n$}u_j \right] dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_G ( - \varepsilon_{\rm p} \Deltalta \psi -\rho ) \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \, dx + \int_{G^c} \left[ -\varepsilon_{\rm w} \Deltalta \psi + B'(\psi) -\rho \right] \mbox{\normalsize\boldmath$n$}abla \psi \cdot V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \int_{\partialrtial G} \biggl\{ \varepsilon_p ( \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}abla \psi|_{G} \cdot V - \varepsilon_{\rm w} ( \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} \cdot V \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad \qquad + \left[ \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} |^2 - \frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G} |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \biggr\} dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_{\partialrtial G} \biggl\{ \varepsilon(\chi_G) (\mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) (\mbox{\normalsize\boldmath$n$}abla \psi|_{G} - \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} ) \cdot V \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad + \left[ \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} |^2 - \frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G} |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \biggr\} dS, \end{align} where in the last step we used \reff{psiGp}--\reff{psiGgrad}. The gradient $\mbox{\normalsize\boldmath$n$}abla \psi$ restricted onto $\partialrtial G$ from either $G$ or $G^c$ has the decomposition \[ \mbox{\normalsize\boldmath$n$}abla \psi = (\mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}u + (I - \mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi \qquad \mbox{on } \partialrtial G. \] Since $\psi$ is continuous across $\partialrtial G$ (cf.\ \reff{psiGcont}), the tangential derivatives of $\psi$, and hence $(I-\mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi,$ are continuous across the interface $\partialrtial G$: \[ (I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi|_G = (I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} \qquad \mbox{on } \partialrtial G. \] Thus \[ \mbox{\normalsize\boldmath$n$}abla \psi|_{G} - \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} = (( \mbox{\normalsize\boldmath$n$}abla \psi|_{G} - \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} ) \cdot \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}u \qquad \mbox{on } \partialrtial G. \] Moreover, restricted onto $\partialrtial G$ from either $G$ or $G^c$, \[ |\mbox{\normalsize\boldmath$n$}abla \psi|^2 = | (\mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) \mbox{\normalsize\boldmath$n$}u + (I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi|^2 = | \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u |^2 + |(I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi|^2. \] Therefore, \begin{eqnarray}gin{align*} & \varepsilon(\chi_G) ( \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u ) (\mbox{\normalsize\boldmath$n$}abla \psi|_{G} - \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} ) \cdot V + \left[ \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} |^2 - \frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G} |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \\ &\qquad = \left[ \varepsilon_{\rm p} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G } \cdot \mbox{\normalsize\boldmath$n$}u |^2 - \varepsilon_{\rm w} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^{c}} \cdot \mbox{\normalsize\boldmath$n$}u |^2 + \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} |^2 - \frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G} |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \\ &\qquad = \left[ \frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G } \cdot \mbox{\normalsize\boldmath$n$}u |^2 - \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi|_{G^c} \cdot \mbox{\normalsize\boldmath$n$}u |^2 + \frac12 ( \varepsilon_{\rm w} - \varepsilon_{\rm p} ) | (I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \\ &\qquad = \left[ \frac12 \left( \frac{1}{\varepsilon_{\rm p}} - \frac{1}{\varepsilon_{\rm w}} \right) | \varepsilon(\chi_G) \mbox{\normalsize\boldmath$n$}abla \psi \cdot \mbox{\normalsize\boldmath$n$}u |^2 + \frac12 ( \varepsilon_{\rm w} - \varepsilon_{\rm p} ) | (I - \mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u) \mbox{\normalsize\boldmath$n$}abla \psi |^2 + B(\psi) \right] V \cdot \mbox{\normalsize\boldmath$n$}u \\ &\qquad = - f_{0, {\rm ele}} [\partialrtial G] \cdot V. \end{align*} With our notation $\psi = \psi_{\chi_G},$ this and \reff{DBFlong} imply \reff{Fspecial}. \end{proof} \begin{eqnarray}gin{comment} \begin{eqnarray}gin{align} \langlebel{Felek} &\int_\Omegaega \mathbf F_{{\rm ele}}(\phi) \cdot V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left[ \frac{\varepsilon'(\phi)}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 + 2 ( \phi - 1) B(\psi_\phi) \right] \mbox{\normalsize\boldmath$n$}abla \phi \cdot \mathbf V\, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left[ \frac12 \mbox{\normalsize\boldmath$n$}abla (\varepsilon(\phi)) \cdot | \mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 \mathbf V + \mbox{\normalsize\boldmath$n$}abla \left( (\phi - 1)^2 \right) \cdot B(\psi_\phi ) \mathbf V \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \left[ - \frac12 \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \cdot \left( |\mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 \mathbf V \right) - (\phi-1)^2 \mbox{\normalsize\boldmath$n$}abla \cdot \left( B(\psi_\phi ) \mathbf V \right) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \biggl[ -\varepsilon(\phi) \left( \mbox{\normalsize\boldmath$n$}abla^2 \psi_\phi \mbox{\normalsize\boldmath$n$}abla \psi_\phi \right) \cdot \mathbf V - \frac12 \varepsilon(\phi) |\mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 \left( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V \right) \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad \qquad - (\phi-1)^2 B'(\psi_\phi ) (\mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) - (\phi-1)^2 B(\psi_\phi ) ( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \biggr] dx, \end{align} where $\mbox{\normalsize\boldmath$n$}abla^2 \psi_\phi$ is the Hessian matrix of $\psi_\phi.$ Setting $\eta = \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V \in H^1_0(\Omegaega)$ in \reff{psiphiweak}, we obtain \begin{eqnarray}gin{align*} &\int_\Omegaega (\phi-1)^2 B'(\psi_\phi) (\mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) \, dx \\ &\qquad = \int_\Omegaega \left[ f (\mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) - \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mbox{\normalsize\boldmath$n$}abla ( \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) \right] dx \\ & \qquad = \int_\Omegaega \left[ f (\mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) - \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \left( \mbox{\normalsize\boldmath$n$}abla^2 \psi_\phi \mathbf V \right) - \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \left( \mbox{\normalsize\boldmath$n$}abla \mathbf V \mbox{\normalsize\boldmath$n$}abla \psi_\phi \right) \right] dx. \end{align*} This and \reff{Felek} lead to \begin{eqnarray}gin{align} \langlebel{FV} &\int_\Omegaega f_{{\rm ele}}(\phi) \cdot \mathbf V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \biggl[ -\frac12 \varepsilon(\phi) |\mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) + \varepsilon(\phi) \mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot ( \mbox{\normalsize\boldmath$n$}abla \mathbf V \mbox{\normalsize\boldmath$n$}abla \psi_\phi ) \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad - (\phi-1)^2 B(\psi_\phi ) ( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) - f (\mbox{\normalsize\boldmath$n$}abla \psi_\phi \cdot \mathbf V) \biggr] dx. \end{align} For each $k \ge 1$, let $\psi_{\phi_k}$ be the electrostatic potential corresponding to $\phi_k \in W^{1,\infty}(\Omegaega).$ Denote for simplicity $\psi_k = \psi_{\phi_k}$ for all $k \ge 1.$ We now show that the right-hand side of \reff{FV}, with $\phi$ and $\psi_\phi$ replaced by $\phi_k$ and $\psi_k$, respectively, converges to the desired limit. We denote $\psi_G = \psi_{\chi_G}.$ By Theorem~\ref{t:PBenergy}, $\psi_k \to \psi_G $ in $H^1(\Omegaega).$ Therefore, since $\varepsilon$ is a bounded function, we have \begin{eqnarray}gin{align*} & \left| \int_\Omegaega \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot (\mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi_G) (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \, dx \right| \\ & \qquad \le \| \mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi_G \|_{L^2(\Omegaega)} \, \sup_{k \ge 1} \left( \| \varepsilon (\phi_k)\|_{L^\infty(\Omegaega)} \| \psi_k \|_{H^1(\Omegaega)} \right)\, \| \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V \|_{L^\infty(\Omegaega)} \\ & \qquad \to 0 \quad \mbox{as } k \to \infty. \end{align*} Hence, since $\psi_G \in W_{\rm loc}^{1,\infty}(\Omegaega),$ and $\varepsilon(\phi_k) \to \varepsilon(\chi_G)$ in $L^2(\Omegaega)$ which follows from \reff{assume} and Lemma~\ref{l:Lq}, we obtain \begin{eqnarray}gin{align} \langlebel{k2G_1} &\int_\Omegaega \varepsilon(\phi_k) |\mbox{\normalsize\boldmath$n$}abla \psi_k|^2 (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \, dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_\Omegaega \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot (\mbox{\normalsize\boldmath$n$}abla \psi_k - \mbox{\normalsize\boldmath$n$}abla \psi_G) (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \, dx + \int_\Omegaega \varepsilon(\phi_k) (\mbox{\normalsize\boldmath$n$}abla \psi_k \cdot \mbox{\normalsize\boldmath$n$}abla \psi_G) (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad \to \int_\Omegaega \varepsilon(\chi_G ) | \mbox{\normalsize\boldmath$n$}abla \psi_G |^2 (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) \, dx \quad \mbox{as } k \to \infty. \end{align} Similarly, \begin{eqnarray}gin{equation} \langlebel{k2G_2} \int_\Omegaega \varepsilon(\phi_k) \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot ( \mbox{\normalsize\boldmath$n$}abla \mathbf V \mbox{\normalsize\boldmath$n$}abla \psi_k) \, dx \to \int_\Omegaega \varepsilon(\chi_G ) \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot ( \mbox{\normalsize\boldmath$n$}abla \mathbf V \mbox{\normalsize\boldmath$n$}abla \psi_G ) \, dx \quad \mbox{as } k \to \infty. \end{equation} Now, since $(\phi_k - 1)^2 \to (\chi_G - 1)^2 = \chi_{G^c}$ in $L^{3/2}(\Omegaega)$, where $G^c =\Omegaega \setminus G$, and $B(\psi_k) \to B(\psi_G)$ in $L^3(\Omegaega)$, we have \begin{eqnarray}gin{align} \langlebel{k2G_3} &\int_\Omegaega\biggl[ (\phi_k-1)^2 B(\psi_k) ( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) + f ( \mbox{\normalsize\boldmath$n$}abla \psi_k \cdot \mathbf V) \biggr] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \to \int_\Omegaega\biggl[ \chi_{G^c} B(\psi_G) ( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) + f ( \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot \mathbf V) \biggr] dx \quad \mbox{as } k \to \infty. \end{align} It now follows from \reff{FV} with $\phi$ and $\psi_\phi$ replaced by $\phi_k$ and $\psi_k$, respectively, and \reff{k2G_1}--\reff{k2G_3} that \begin{eqnarray}gin{align} \langlebel{VVV} & \lim_{k\to \infty} \int_\Omegaega f_{{\rm ele}}(\phi_k) \cdot \mathbf V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_\Omegaega \biggl[ -\frac12 \varepsilon(\chi_G) |\mbox{\normalsize\boldmath$n$}abla \psi_G|^2 (\mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) + \varepsilon(\chi_G) \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot ( \mbox{\normalsize\boldmath$n$}abla \mathbf V \mbox{\normalsize\boldmath$n$}abla \psi_G) \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \qquad - \chi_{G^c} B(\psi_G) ( \mbox{\normalsize\boldmath$n$}abla \cdot \mathbf V) - f ( \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot \mathbf V) \biggr] dx. \end{align} \end{comment} \begin{eqnarray}gin{comment} By integration by parts, we have from \reff{VVV} that \begin{eqnarray}gin{align*} & \lim_{k\to \infty} \int_\Omegaega f_{{\rm ele}}(\phi_k) \cdot V \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_G \biggl[ -\frac12 \varepsilon_{\rm p} |\mbox{\normalsize\boldmath$n$}abla \psi_G|^2 (\mbox{\normalsize\boldmath$n$}abla \cdot V) + \varepsilon_{\rm p} \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot ( \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}abla \psi_G) - f (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) \biggr] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{G^c} \biggl[ -\frac12 \varepsilon_{\rm w} |\mbox{\normalsize\boldmath$n$}abla \psi_G|^2 (\mbox{\normalsize\boldmath$n$}abla \cdot V) + \varepsilon_{\rm w} \mbox{\normalsize\boldmath$n$}abla \psi_G \cdot ( \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}abla \psi_G) - B(\psi_G) ( \mbox{\normalsize\boldmath$n$}abla \cdot V) - f (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) \biggr] dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad = \int_{G} \biggl[ \varepsilon_{\rm p} \left( \mbox{\normalsize\boldmath$n$}abla^2 \psi_G \mbox{\normalsize\boldmath$n$}abla \psi_G \right) \cdot V - \varepsilon_{\rm p} \Deltalta \psi_G (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) -\varepsilon_{\rm p} ( \mbox{\normalsize\boldmath$n$}abla^2 \psi_G \mbox{\normalsize\boldmath$n$}abla \psi_G ) \cdot V - f (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) \biggr] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{G^c} \biggl[ \varepsilon_{\rm w} \left( \mbox{\normalsize\boldmath$n$}abla^2 \psi_G \mbox{\normalsize\boldmath$n$}abla \psi_G \right) \cdot V - \varepsilon_{\rm w} \Deltalta \psi_G (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) -\varepsilon_{\rm w} ( \mbox{\normalsize\boldmath$n$}abla^2 \psi_G \mbox{\normalsize\boldmath$n$}abla \psi_G ) \cdot V \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad +B'(\psi_G) (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) - f (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot V) \biggr] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{\partialrtial G} \biggl[ -\frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_G|_G |^2 ( V \cdot n) +\frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_G|_{G^c} |^2 ( V \cdot n) \biggr] dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{\partialrtial G} \biggl[ \varepsilon_{\rm p} ( \mbox{\normalsize\boldmath$n$}abla \psi_G|_G \cdot V ) \, (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot n ) - \varepsilon_{\rm w} ( \mbox{\normalsize\boldmath$n$}abla \psi_G|_{G^c} \cdot V ) \, (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot n ) \biggr] dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{\partialrtial G} B(\psi_G) (V \cdot n) \, dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \int_{\partialrtial G} \biggl[ \frac{\varepsilon_{\rm w}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_G|_{G^c} |^2 -\frac{\varepsilon_{\rm p}}{2} | \mbox{\normalsize\boldmath$n$}abla \psi_G|_G |^2 + B(\psi_G) \biggr] ( V \cdot n) dS \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \quad + \int_{\partialrtial G} \biggl[ \varepsilon_{\rm p} ( \mbox{\normalsize\boldmath$n$}abla \psi_G|_G \cdot V ) \, (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot n ) - \varepsilon_{\rm w} ( \mbox{\normalsize\boldmath$n$}abla \psi_G|_{G^c} \cdot V ) \, (\mbox{\normalsize\boldmath$n$}abla \psi_G \cdot n ) \biggr] dS. \end{align*} \end{comment} \section{Free-Energy Convergence} \langlebel{s:FreeEnergyConvergence} In this section, we first prove some lemmas. We then prove Theorem~\ref{t:EnergyConvergence} on the $\Gamma$-convergence of free-energy functionals and its Corollary~\ref{c:existenceF0}. Finally, we prove Theorem~\ref{t:individual} on the equivalence of the convergence of total free energy and that of each individual part of the free energy. The first lemma is on the existence of a phase-field minimizer for the functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ (cf.\ \reff{newFxiphi}) for each $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0].$ This result will be used in proving Corollary~\ref{c:existenceF0}. \begin{eqnarray}gin{lemma} \langlebel{l:minimizerFxi} Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0].$ There exists $\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in H^1(\Omegaega)$ such that \[ F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i ] = \min_{\phi \in H^1(\Omegaega)} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ] = \min_{\phi \in L^1(\Omegaega)} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ], \] which is finite. \end{lemma} \begin{eqnarray}gin{proof} Let $\phi \in H^1(\Omegaega).$ We have by our assumptions on the functions $U$ and $\varepsilon$, the fact that \[ W(s)-s^4 = 18s^2(s-1)^2 - s^4 \to +\infty \quad \mbox{as } s \to \infty, \] the inequality \[ \min_{u\in {\mathcal A} } E_\phi [u] \le E_\phi [\psi_\infty] = \int_\Omegaega \left[ \frac{\varepsilon (\phi) }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_\infty |^2 - \rho \psi_\infty + (\phi-1)^2 B( \psi_\infty ) \right] dx, \] and H\"older's inequality that \begin{eqnarray}gin{align} \langlebel{boundxi} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ] & \ge \int_\Omegaega \left[ P_0\phi^2 + \frac{\gammamma_0 \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{2} |\mbox{\normalsize\boldmath$n$}abla \phi|^2 \right] dx + \frac{\gammamma_0}{ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \|\phi \|_{L^4(\Omegaega)}^4 + \frac{\gammamma_0}{ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \int_\Omegaega \left[ W(\phi ) - \phi^4 \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad + \rho_0 \int_{\{ x \in \Omegaega: U(x) \le 0 \}} (\phi -1)^2 U\, dx - E_\phi [\psi_\infty] \mbox{\normalsize\boldmath$n$}onumber \\ &\ge C_1 \left( \| \phi \|_{H^1(\Omegaega)}^2 + \|\phi \|^4_{L^4(\Omegaega)} \right) - 2 \left( \rho_0 |U_{\rm min}| + \| B(\psi_\infty) \|_{L^\infty(\Omegaega)} \right) \int_{\Omegaega} \phi^2 \, dx - C_2 \mbox{\normalsize\boldmath$n$}onumber \\ &\ge C_3 \left( \| \phi \|_{H^1(\Omegaega)}^2 + \|\phi \|^4_{L^4(\Omegaega)} \right) - C_4, \end{align} where all $C_i $ $(i = 1, \dots, 4)$ are positive constants independent of $\phi \in H^1(\Omegaega).$ Let $\alphapha = \inf_{\phi \in H^1(\Omegaega)} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi ].$ By \reff{boundxi}, $\alphapha > -\infty.$ Setting $\phi (x) = 1$ for all $x \in \Omegaega$, we have $\alphapha \le E_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ] < \infty.$ So, $\alphapha $ is finite. Let $\phi_k \in H^1(\Omegaega)$ $(k = 1, 2, \dots)$ be such that $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi_k] \to \alphapha.$ By \reff{boundxi}, $\{ \phi_k \}$ is bounded in $H^1(\Omegaega)$. Hence, it has a subsequence, not relabeled, such that $\psi_k \to \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i $ weakly in $H^1(\Omegaega)$, strongly in $L^2(\Omegaega)$, and a.e.\ in $\Omegaega$ for some $\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in H^1(\Omegaega).$ Since $\phi_k \to \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i $ in $L^2(\Omegaega)$ and $U$ is bounded below, \begin{eqnarray}gin{align} \langlebel{L2phik} & \lim_{k\to \infty } \left[ P_0\int_\Omegaega \phi_k^2 \, dx + \rho_0 \int_{\{ x\in\Omegaega: U(x) \le 0 \}} (\phi_k-1)^2 U \, dx \right] \mbox{\normalsize\boldmath$n$}onumber \\ & \quad = P_0 \int_\Omegaega \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i^2 \, dx + \rho_0 \int_{\{ x\in\Omegaega: U(x) \le 0 \}} (\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i -1)^2 U \, dx. \end{align} Since $\phi_k \to \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i $ weakly in $H^1(\Omegaega)$, \begin{eqnarray}gin{equation} \langlebel{H1weak} \liminf_{k\to \infty} \gammamma_0 \int_\Omegaega \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i }{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k |^2 dx \ge \gammamma_0 \int_\Omegaega \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i }{2} |\mbox{\normalsize\boldmath$n$}abla \phi_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} |^2 dx. \end{equation} Since $\phi_k \to \phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ a.e.\ in $\Omegaega$, Fatou's Lemma implies that \begin{eqnarray}gin{align} \langlebel{Fatouxi} & \liminf_{k\to \infty} \left[ \gammamma_0 \int_\Omegaega \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W(\phi_k) \, dx + \rho_0 \int_{\{ x \in \Omegaega:U(x) > 0 \}} (\phi_k-1)^2 U\, dx \right] \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad \ge \gammamma_0 \int_\Omegaega \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} W(\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i) \, dx + \rho_0 \int_{\{ x \in \Omegaega:U(x) > 0 \}} (\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i -1)^2 U\, dx. \end{align} By the Sobolev embedding $H^1(\Omegaega) \hookrightarrow L^4(\Omegaega),$ $\sup_{k \ge 1} \| \phi_k \|_{L^4(\Omegaega)} < \infty.$ Hence it follows from Theorem~\ref{t:PBenergy} that \begin{eqnarray}gin{equation} \langlebel{elexi} \lim_{k \to \infty} \min_{u \in {\mathcal A} } E_{\phi_k}[u] = \min_{u \in {\mathcal A} } E_{\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}[u]. \end{equation} \begin{eqnarray}gin{comment} Let $\psi_\phi \in {\mathcal A} $ be such that $E_\phi[\psi_\phi] = \min_{u \in {\mathcal A} } E_\phi [u]. $ Then $\min_{u \in {\mathcal A} } E_{\phi_k}[u] \le E_{\phi_k} [\psi_\phi]$ for all $k \ge 1$. Hence, it follows from the convergence $\varepsilon(\phi_k) \to \varepsilon(\phi)$ a.e.\ in $\Omegaega$, the Lebesgue Dominatd Convergence Theorem, the convergence $\phi_k \to \phi$ strongly in $L^2(\Omegaega)$, and the fact that $B(\psi_\phi) \in L^\infty(\Omegaega)$ by Theorem~\ref{t:phiPB} that \begin{eqnarray}gin{align} \langlebel{elexi} \liminf_{k\to \infty} \left( - \min_{u \in {\mathcal A} } E_{\phi_k}[u] \right) &\ge \liminf_{k\to \infty} \left( - E_{\phi_k}[\psi_\phi] \right) \mbox{\normalsize\boldmath$n$}onumber \\ & = \liminf_{k\to \infty} \left\{ - \int_\Omegaega \left[ \frac{\varepsilon (\phi_k) }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 - f \psi_\phi + (\phi_k-1)^2 B( \psi_\phi ) \right] dx \right\} \mbox{\normalsize\boldmath$n$}onumber \\ & = - \int_\Omegaega \left[ \frac{\varepsilon (\phi) }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_\phi |^2 - f \psi_\phi + (\phi-1)^2 B( \psi_\phi ) \right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & = - E_\phi[\psi_\phi] \mbox{\normalsize\boldmath$n$}onumber \\ & = -\min_{u \in {\mathcal A} } E_\phi [u]. \end{align} \end{comment} Combining \reff{L2phik}--\reff{elexi}, we obtain \[ \alphapha = \liminf_{k\to \infty} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi_k]\ge F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i] \ge \alphapha. \] Hence $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i] = \min_{\phi \in H^1(\Omegaega)} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ]$. But $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi ] = +\infty$ if $\phi \in L^1(\Omegaega) \setminus H^1(\Omegaega).$ Hence $ F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i[\phi_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i ] = \min_{\phi \in L^1(\Omegaega)} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi]. $ \end{proof} Next, we establish some lower bound for the functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i = F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi]$ for all $\phi$ and $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i.$ \begin{eqnarray}gin{lemma} \langlebel{l:LowerBound} There exists a constant $C$ such that for any $\phi \in H^1(\Omegaega)$ and any $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0]$ \begin{eqnarray}gin{equation} \langlebel{FxiphiLowerBound} F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] \ge \frac{\gammamma_0 }{2} \left[ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \| \mbox{\normalsize\boldmath$n$}abla \phi \|_{L^2(\Omegaega)}^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \| W(\phi) \|_{L^1(\Omegaega)}\right] + 9 \gammamma_0 \| \phi \|_{L^4(\Omegaega)}^4 + \rho_0 \int_\Omegaega (\phi-1)^2 | U | \, dx +C. \end{equation} \end{lemma} \begin{eqnarray}gin{proof} Fix $\phi \in H^1(\Omegaega)$ and $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \in (0, \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0]$. Recall from \reff{Ephiu} that \[ E_\phi [ \psi_\infty ] = \int_\Omegaega \left[ \frac{\varepsilon (\phi) }{2} |\mbox{\normalsize\boldmath$n$}abla \psi_\infty |^2 - \rho \psi_\infty + (\phi-1)^2 B( \psi_\infty ) \right] dx. \] We have then by the definition of $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ (cf.\ \reff{newFxiphi}) that \begin{eqnarray}gin{align} \langlebel{proofLowerBound} 0 &\le \frac{\gammamma_0 }{2} \left[ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i \| \mbox{\normalsize\boldmath$n$}abla \phi \|_{L^2(\Omegaega)}^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \| W(\phi) \|_{L^1(\Omegaega)}\right] + 9 \gammamma_0 \| \phi \|_{L^4(\Omegaega)}^4 + \rho_0 \int_\Omegaega (\phi-1)^2 | U | \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & =F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] - P_0 \|\phi\|^2_{L^2(\Omegaega)}-\frac{\gammamma_0}{2 \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i} \|W(\phi)\|_{L^1(\Omegaega)} + 9 \gammamma_0 \| \phi \|_{L^4(\Omegaega)}^4 \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad + \rho_0 \int_\Omegaega (\phi-1)^2 ( |U| - U) \, dx + \min_{u \in {\mathcal A} } E_\phi[u] \mbox{\normalsize\boldmath$n$}onumber \\ & \le F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] - \frac{\gammamma_0}{2 \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0} \|W(\phi)\|_{L^1(\Omegaega)} + 9 \gammamma_0 \| \phi \|_{L^4(\Omegaega)}^4 + 2 \rho_0 \int_{\{ x \in \Omegaega: U(x) \le 0\}} (\phi-1)^2 |U| \, dx + E_\phi[\psi_\infty] \mbox{\normalsize\boldmath$n$}onumber \\ & \le F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] - \frac{\gammamma_0}{2 \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0} \|W(\phi)\|_{L^1(\Omegaega)} + 9 \gammamma_0 \| \phi \|_{L^4(\Omegaega)}^4 + 2 \rho_0 | U_{\rm min} | \int_{\Omegaega} (\phi-1)^2 \, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \qquad +\frac{1}{2} \max (\varepsilon_{\rm p}, \varepsilon_{\rm w}) \| \mbox{\normalsize\boldmath$n$}abla \psi_\infty \|^2_{L^2(\Omegaega)} + \| \rho\|_{L^2(\Omegaega)} \| \psi_\infty \|_{L^2(\Omegaega)} + \| B(\psi_\infty)\|_{L^\infty(\Omegaega)} \int_\Omegaega (\phi-1)^2 dx \mbox{\normalsize\boldmath$n$}onumber \\ & = F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i [\phi] - \int_\Omegaega g(\phi) \, dx +\frac{1}{2} \max (\varepsilon_{\rm p}, \varepsilon_{\rm w}) \| \mbox{\normalsize\boldmath$n$}abla \psi_\infty \|^2_{L^2(\Omegaega)} + \| \rho \|_{L^2(\Omegaega)} \| \psi_\infty \|_{L^2(\Omegaega)}, \end{align} where $g:\mathbb{R}\to\mathbb{R}$ is given by \[ g(s) = \frac{\gammamma_0}{2 \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0} W(s) - 9 \gammamma_0 s^4 - \left[ 2 \rho_0 | U_{\rm min} | + \|B(\psi_\infty)\|_{L^\infty(\Omegaega)} \right] (s-1)^2. \] Note that $\lim_{s\to \infty} g(s) = +\infty$, since $0 < \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_0 < 1$ and $W(s) = 18s^2 (s-1)^2$. Therefore, $g$ is bounded below. Setting \[ C = |\Omegaega| \min_{s \in \mathbb{R}} g(s) -\frac{1}{2} \max (\varepsilon_{\rm p}, \varepsilon_{\rm w}) \| \mbox{\normalsize\boldmath$n$}abla \psi_\infty \|^2_{L^2(\Omegaega)} - \| \rho\|_{L^2(\Omegaega)} \| \psi_\infty \|_{L^2(\Omegaega)}, \] we then obtain the desired estimate \reff{FxiphiLowerBound} from \reff{proofLowerBound}. \end{proof} \begin{eqnarray}gin{comment} \begin{eqnarray}gin{lemma}[Compactness] \langlebel{l:compactness} For any sequence $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\searrow 0$ and $\{\phi_k\}\subset H^1(\Omegaega)$ such that $\{F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}(\phi_k)\}$ is bounded above, there exists a subsequence $\{ \phi_{k_j}\}$ and a subset $G \subseteq \Omegaega$ of finite perimeter in $\Omegaega$ such that $\phi_{k_j} \to \chi_G $ strongly in $L^1(\Omegaega)$ and a.e.\ in $\Omegaega$. \end{lemma} \begin{eqnarray}gin{proof} Since $\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} ( \phi_k) \}$ is bounded above, Lemma~\ref{l:LowerBound} leads to \[ \sup_{k \ge 1} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k) \right] dx < \infty. \] Since $W(s) = 18s^2 (s-1)^2$, the assertion of lemma follows from a usual argument \cite{Modica_ARMA87}. \end{proof} \begin{eqnarray}gin{proof} Since $\psi_\infty\in H^1(\Omegaega)\cap L^\infty(\Omegaega)$ and $V\in C^1(\mathbb{R})$ we have $V(\psi_\infty)\in L^\infty(\Omegaega)$. For each $n$, let $\psi_n$ be the minimizer of $E_{\phi_n}$ in $ {\mathcal A}_0 $. Suppose $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n} ( \phi_n ) \leq M<\infty.$ Since $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n\searrow 0$, there exists $N_0$ big enough so that for all $n>N_0$, \begin{eqnarray}gin{align} \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n<\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_*:= \frac{C_1}{ 4c_0\rho_0 +4\| V(\psi_\infty)\|_{L^\infty(\Omegaega)}}. \end{align} then for all $n>N_0$ \begin{eqnarray}gin{align} & P\int_\Omegaega \phi_n ^2\;dx + \gammamma_0\int_\Omegaega \left(\frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}2|\mbox{\normalsize\boldmath$n$}abla\phi_n |^2 + \frac1{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n} W(\phi_n)\right)dx + \rho_0 \int_\Omegaega(\phi_n-1)^2(U(x) +c_0)\;dx \mbox{\normalsize\boldmath$n$}n\\ \leq & M - \int_\Omegaega\frac1{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n} W(\phi_n)\;dx+ c_0\rho_0 \int_\Omegaega (\phi_n-1)^2\;dx + E_{\phi_n}(\psi_n) \mbox{\normalsize\boldmath$n$}n\\ \leq & M - \int_\Omegaega\frac1{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_*} W(\phi_n)\;dx+ c_0\rho_0 \int_\Omegaega (\phi_n-1)^2\;dx + E_{\phi_n}(\psi_\infty) \mbox{\normalsize\boldmath$n$}n\\ \leq & M - \frac1{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_*}\int_\Omegaega (C_1\phi_n^2 - C_2)\;dx+ 2c_0\rho_0 \int_\Omegaega (\phi_n^2 +1)\;dx + \frac{\varepsilonp_ {\rm{w}} }2 \| \mbox{\normalsize\boldmath$n$}abla\psi_\infty\|_{L^2(\Omegaega)}^2 \mbox{\normalsize\boldmath$n$}n\\ &\quad + \|\rho_ {\rm {f}} \|_{L^2(\Omegaega)} \|\psi_\infty\|_{L^2(\Omegaega)} + 2 \| V(\psi_\infty)\|_{L^\infty(\Omegaega)} \int_\Omegaega (\phi_n^2+1)\;dx \mbox{\normalsize\boldmath$n$}n\\ = & M + \left( \frac{C_2}{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_*} + 2c_0\rho_0 + 2\| V(\psi_\infty)\|_{L^\infty(\Omegaega)}\right)|\Omegaega| + \frac{\varepsilonp_ {\rm{w}} }2 \| \mbox{\normalsize\boldmath$n$}abla\psi_\infty\|_{L^2(\Omegaega)}^2 \mbox{\normalsize\boldmath$n$}n\\ &\quad + \|\rho_ {\rm {f}} \|_{L^2(\Omegaega)} \|\psi_\infty\|_{L^2(\Omegaega)} - \left( \frac{C_1}{2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_*} -2c_0\rho_0 - 2\| V(\psi_\infty)\|_{L^\infty(\Omegaega)}\right)\int_\Omegaega \phi_n^2\;dx \mbox{\normalsize\boldmath$n$}n\\ \leq & C. \end{align} Hence \begin{eqnarray}gin{align}\langlebel{bound0} \int_\Omegaega \left( \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}{2} |\mbox{\normalsize\boldmath$n$}abla\phi_n|^2 + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}W(\phi_n) \right)dx \leq \frac{2C}{\gammamma_0} \quad\mbox{ for all } n>N_0. \end{align} Since $\{0, 1\}$ are the two minimizers of $W$, there is a subsequence $\phi_{n_j}$ and a function $\phi\in BV(\Omegaega;\{0,1\})$ such that $\phi_{n_j} \to \phi $ strongly in $L^1(\Omegaega)$ and a.e.\ in $\Omegaega$. \end{proof} \begin{eqnarray}gin{remark} The above proof used the quadratic growth condition of $W$. If $W$ grows sub-quadratically, then we may move the $P\int_\Omegaega\phi^2\;dx $ term to the right hand side. In this case if $P\geq 2c_0\rho_0 + 2\| V(\psi_\infty)\|_{L^\infty(\Omegaega)} $, we still get compactness. \end{remark} \end{comment} The following lemma, stated for $\mathbb{R}^n$ with a general $n \ge 2$, is refinement of a standard result; it is used in the proof of Theorem~\ref{t:EnergyConvergence} and Theorem~\ref{th:CH-force-conv}: \begin{eqnarray}gin{lemma} \langlebel{l:etak} Let $\Omegaega$ be a nonempty, bounded, and open subset of $\mathbb{R}^n$ with $n \ge 2.$ Let $G$ be a measurable subset of $\Omegaega$ with $P_\Omegaega(G) < \infty.$ Assume that $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$ and $\phi_k \in H^1(\Omegaega)$ $(k = 1, 2, \dots)$ satisfy $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and \begin{eqnarray}gin{equation} \langlebel{supsupsup} \sup_{k \ge 1} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx < \infty. \end{equation} Define \[ \eta_k(x) = \int_0^{\phi_k(x)} \sqrt{2 W(t) } \, dt \qquad \forall x \in \Omegaega, \, k = 1, 2, \dots \] Then \begin{eqnarray}gin{align} \langlebel{etakbound} & \sup_{k \ge 1} \left[ \| \eta_k \|_{L^{4/3}(\Omegaega)} + \| \eta_k \|_{W^{1,1}(\Omegaega)} \right] < \infty, \\ \langlebel{etakconv} & \eta_k \to \chi_G \ \mbox{a.e.\ in } \Omegaega \ \mbox{and} \ \mbox{in } L^q(\Omegaega) \ \mbox{for any } q \in [1, 4/3), \\ \langlebel{POGetak} & P_\Omegaega(G) \le \liminf_{k \to \infty} \int_\Omegaega | \mbox{\normalsize\boldmath$n$}abla \eta_k | \, dx \le \liminf_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx. \end{align} If, in addition, $\overline{G} \subset \Omegaega,$ then \begin{eqnarray}gin{equation} \langlebel{etakweakconv} \lim_{k\to \infty} \int_\Omegaega \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot g \, dx = -\int_{\partialrtial^* G} g\cdot \mbox{\normalsize\boldmath$n$}u \, d{\mathcal H}^{n-1} \qquad \forall g \in C_c(\Omegaega, \mathbb{R}^n). \end{equation} \end{lemma} \begin{eqnarray}gin{proof} Since $W$ is a quartic potential, we have $\sqrt{2 W(t)} \le C (1+ t^2) $ for all $ t \in \mathbb{R}. $ Here and below, $C$ denotes a generic, positive constant. Therefore, \[ |\eta_k| \le C( |\phi_k| + |\phi_k|^3) \qquad \mbox{a.e.\ in } \Omegaega, \, k = 1, 2, \dots \] By \reff{supsupsup}, $\sup_{k \ge 1} \| \phi_k \|_{L^4(\Omegaega)} < \infty.$ This implies that \begin{eqnarray}gin{equation} \langlebel{onesup} \sup_{k \ge 1 } \|\eta_k \|_{L^{4/3}(\Omegaega)} < \infty. \end{equation} Note for each $k \ge 1$ that $\mbox{\normalsize\boldmath$n$}abla \eta_k = \sqrt{2 W(\phi_k)} \mbox{\normalsize\boldmath$n$}abla \phi_k$ a.e.\ in $\Omegaega.$ Hence, \begin{eqnarray}gin{equation*} \int_\Omegaega|\mbox{\normalsize\boldmath$n$}abla \eta_k| \;dx= \int_\Omegaega \left|\sqrt{2 W(\phi_k)} \mbox{\normalsize\boldmath$n$}abla \phi_k\right|\;dx \le \int_\Omegaega\left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k)\right] dx. \end{equation*} This, together with \reff{supsupsup} and \reff{onesup}, then implies that \begin{eqnarray}gin{equation} \langlebel{twosup} \sup_{k \ge 1 } \|\eta_k \|_{W^{1,1}(\Omegaega)} < \infty. \end{equation} Now \reff{etakbound} follows from \reff{onesup} and \reff{twosup}. Since $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$ and the integral of $\sqrt{2 W(s)}$ over $[0,1]$ is $1$, we have $\eta_k \to \chi_G$ a.e.\ in $\Omegaega$. Lemma~\ref{l:Lq} and \reff{onesup} imply that $\eta_k \to \chi_G$ in $L^q(\Omegaega)$ for any $q \in [1, 4/3).$ Hence \reff{etakconv} is proved. \begin{eqnarray}gin{comment} By \reff{supsupsup} and Lemma~\ref{l:Lq}, $\phi_k \to \chi_G$ in $L^3(\Omegaega)$. Therefore, \begin{eqnarray}gin{align*} \| \eta_k - \chi_G \|_{L^1(\Omegaega)} & \le C \int_\Omegaega \left| \int_{\chi_G(x)}^{\phi_k(x)} (1+t^2)\, dt \right| \, dx \\ &\le C \int_\Omegaega \left| \phi_k (x) - \chi_G(x) \right| \, \left( 1 + |\phi_k (x)|^2 + |\chi_G(x)|^2 \right) dx \\ &\le C \|\phi_k - \chi_G\|_{L^3(\Omegaega)} \left( 1 + \|\phi_k\|_{L^3(\Omegaega)}^2 + \| \chi_G\|_{L^3(\Omegaega)} ^2 \right) \\ & \to 0 \qquad \mbox{as } k \to \infty. \end{align*} This proves \reff{etakconv}. \end{comment} By the fact that $W^{1,1}(\Omegaega) \hookrightarrow BV(\Omegaega)$ and \reff{etakbound}, we have $\sup_{k\ge 1}\|\eta_k\|_{\rm BV(\Omegaega)} <\infty.$ Consequently, by \reff{etakconv} \cite{Giusti84,Ziemer_Book89,EvansGariepy_Book92}, \begin{eqnarray}gin{align*} P_\Omegaega(G) &\le \liminf_{k\to \infty} \int_\Omegaega |\mbox{\normalsize\boldmath$n$}abla \eta_k | \, dx \\ & = \liminf_{k\to \infty} \int_\Omegaega \sqrt{2 W(\phi_k)} |\mbox{\normalsize\boldmath$n$}abla \phi_k| \, dx \\ & \le \liminf_{k\to\infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx. \end{align*} This is \reff{POGetak}. Finally, if $g \in C_c^1(\Omegaega, \mathbb{R}^n)$, then it follows from \reff{etakconv} and \reff{perimetermeasure} that \begin{eqnarray}gin{align*} \lim_{k\to \infty} \int_\Omegaega \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot g \, dx = - \lim_{k \to \infty} \int_\Omegaega \eta_k \mbox{\normalsize\boldmath$n$}abla \cdot g \, dx = -\int_G \mbox{\normalsize\boldmath$n$}abla \cdot g \, dx = -\int_{\partialrtial^* G} g\cdot \mbox{\normalsize\boldmath$n$}u \, d{\mathcal H}^{n-1}. \end{align*} Since $\sup_{k \ge 1} \| \eta_k \|_{W^{1,1}(\Omegaega)} < \infty$ by \reff{etakbound} and the perimeter measure $\| \partialrtial G\|={\cal H}^{n-1}\mres(\partialrtial^*G\cap \Omegaega )$ is a Radon measure on $\Omegaega$, the equation in \reff{etakweakconv} for any function $g \in C_c(\Omegaega, \mathbb{R}^n)$ follows from the fact that such a function can be approximated uniformly on any compact subsets of $\Omegaega$ by functions in $C_c^1(\Omegaega, \mathbb{R}^n)$. \end{proof} We denote $B(\sigmagma) = \cup_{i=1}^N B(x_i, \sigmagma)$ for any $\sigmagma > 0.$ The following is the last lemma we need to prove our $\Gamma$-convergence result: \begin{eqnarray}gin{lemma} \langlebel{l:setapprox} Let $G$ be a measurable subset of $\Omegaega$ such that $P_\Omegaega(G) < \infty,$ $G \supseteq B(\sigmagma)$ for some $\sigmagma > 0$, and $|G| < |\Omegaega|$. Then there exist bounded open sets $D_k \subseteq \mathbb{R}^3$ $(k = 1, 2, \dots)$ that satisfy the following properties: \begin{eqnarray}gin{compactenum} \item[\rm (1)] For each $k \ge 1$, $D_k \cap \Omegaega \supseteq B(\sigmagma/2); $ \item[\rm (2)] For each $k \ge 1$, $\partialrtial D_k$ is a nonempty compact hypersurface of class $C^\infty$ and $\partialrtial D_k \cap \Omegaega$ is of class $C^2;$ \item[\rm (3)] For each $k \ge 1$, $ {\mathcal H}^2(\partialrtial D_k \cap \partialrtial \Omegaega) = 0; $ \item[\rm (4)] $ | (D_k \cap \Omegaega ) \Deltalta G | \to 0$ as $k \to \infty;$ \item[\rm (5)] $P_\Omegaega(D_k) = P_\Omegaega( D_k \cap \Omegaega) \to P_\Omegaega(G)$ as $k \to \infty. $ \end{compactenum} \end{lemma} This lemma is similar to Lemma~1 in \cite{Modica_ARMA87} and Lemma~1 in \cite{Sternberg_ARMA88}. Here we assume $G \supseteq B(\sigmagma).$ Moreover, part (1) above replaces the volume constraint $| D_k \cap \Omegaega | = |G|$ in \cite{Modica_ARMA87,Sternberg_ARMA88}. An outline of the proof of this lemma is given in the proof of Lemma~2.2 in \cite{LiZhao_SIAP13}. For completeness, here we provide the main steps of proof, pointing out how the property (1) is satisfied. \begin{eqnarray}gin{proof}[Proof of Lemma~\ref{l:setapprox}] Since $P_\Omegaega (G) < \infty$, there exists $u \in \mbox{BV}\, (\mathbb{R}^3)\cap L^\infty(\mathbb{R}^3)$ such that $u = \chi_G $ in $\Omegaega$ and \begin{eqnarray}gin{equation} \langlebel{u} \int_{\partialrtial\Omegaega}|\mbox{\normalsize\boldmath$n$}abla u|\, d{\mathcal H}^2 = 0; \end{equation} cf.\ Sections 2.8 and 2.16 in \cite{Giusti84}. Since $\Omegaega$ is bounded, by using mollifiers, we can further modify $u$ so that it is compactly supported. Notice that $u = 1$ on $B(\sigmagma)$. By using mollifiers again, we can construct $u_k \in C^\infty(\mathbb{R}^3)$ $(k = 1, 2, \dots)$ such that $\mbox{supp}\,(u_k) \subseteq B(0,L)$ $(k=1,2,\dots)$ for some $L > 0$ sufficiently large, $u_k = 1$ in $B(\sigmagma/2)$ $(k = 1, 2, \dots)$, $u_k \to u $ in $L^1(\Omegaega)$, and using \reff{u} \[ \lim_{k\to \infty} \int_\Omegaega | \mbox{\normalsize\boldmath$n$}abla u_k | \, dx = | \mbox{\normalsize\boldmath$n$}abla u |_{BV(\Omegaega)} = P_\Omegaega(A); \] cf.\ Sections 2.8 and 2.16 in \cite{Giusti84}. For any $t \in \mathbb{R}$, we define $D_k (t) = \{ x \in \mathbb{R}^3: u_k(x) > t \}$ $(k = 1, 2, \dots)$. Following Sections 1.24 and 1.26 in \cite{Giusti84}, and the proof of Lemma~1 in \cite{Modica_ARMA87} and Lemma~1 in \cite{Sternberg_ARMA88} (using the co-area formula and Sard's Theorem), there exists $t_0 \in (0,1)$ and a subsequence of $\{ D_k(t_0)\}$, not relabeled, that satisfy (2)--(5) in the lemma with $D_k = D_k(t_0)$ $(k = 1, 2, \dots)$. Clearly, for each $k \ge 1$, $D_k$ is an open set with $D_k\subseteq B(0,L).$ Moreover, \[ D_k \supseteq \{ x\in \mathbb{R}^3: u_k (x) = 1 \} \supseteq B(\sigmagma/2), \qquad k = 1, 2, \dots \] This, and the fact that $B(\sigmagma) \subseteq G \subseteq \Omegaega,$ implies part (1). \end{proof} We are now ready to prove Theorem~\ref{t:EnergyConvergence}. \begin{eqnarray}gin{proof}[Proof of Theorem~\ref{t:EnergyConvergence}] Fix $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0.$ (1) The liminf condition. Assume that $\phi_k \to \phi$ in $L^1(\Omegaega).$ \begin{eqnarray}gin{comment} \begin{eqnarray}gin{theorem}[Liminf inequality] \langlebel{th-liminf} Consider $W(s)=18s^2(1-s)^2$. For any sequence $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n\searrow 0$ and $\phi_n\in L^1(\Omegaega)$ such that $\phi_n\to \phi$ strongly in $L^1(\Omegaega)$, we have \begin{eqnarray}gin{align} \langlebel{liminf-ineq} F(\phi) \leq \liminf_{n\to\infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}(\phi_n) \end{align} \end{theorem} \end{comment} If $ \liminf_{k\to\infty}F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]=+\infty, $ then \qref{liminf-ineq} is true. Otherwise, we may assume, without loss of generality, that \[ \lim_{k\to\infty}F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] = \liminf_{k\to\infty}F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] < \infty \] and that there exists a constant $C > 0$ such that $ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k ]\leq C$ for all $k \ge 1.$ By the definition of functional $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$ (cf.\ \reff{newFxiphi}), this implies that $\phi_k \in H^1(\Omegaega)$ for each $k \ge 1.$ Hence, since $\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]\}$ is bounded, it follows from Lemma~\ref{l:LowerBound} that \[ \sup_{k \ge 1} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k) \right] dx < \infty. \] Since $W(s) = 18s^2 (s-1)^2$ has exactly two minimum points $0$ and $1$, by a usual argument \cite{Modica_ARMA87}, there exists a subsequence of $\{ \phi_{k} \}$, not relabeled, that converges strongly in $L^1(\Omegaega)$ and a.e.\ in $\Omegaega$ to $\chi_G$ for some measurable subset $G\subseteq \Omegaega$ of finite perimeter in $\Omegaega$. Since $\phi_k \to \phi$ in $L^1(\Omegaega)$, we have $\phi = \chi_G$ a.e.\ in $\Omegaega$. Since $\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]\}$ is bounded, $\{ \| \phi_k \|_{L^4(\Omegaega)} \}$ is bounded by Lemma~\ref{l:LowerBound}. Hence, it follows from Lemma~\ref{l:Lq} that $\phi_k \to \chi_G$ in $L^q(\Omegaega)$ for any $q \in [1, 4).$ \begin{eqnarray}gin{comment} By Theorem \ref{th-compactness}, there is a subsequence of $\{\phi_n\}$, not relabeled, and a function $\phi\in BV(\Omegaega;\{0,1\})$ such that $\phi=\chi_G$ and $\phi_n\to \phi$ strongly in $L^1(\Omegaega)$ and a.e.\ in $\Omegaega$. We split the proof into two steps, each containing a claim. {\em (1) Claim:} \begin{eqnarray}gin{align}\langlebel{liminf-claim1} &P|G| +\gammamma_0Per(G) +\rho_0 \int_{\Omegaega\setminus G} U(x)\;dx \mbox{\normalsize\boldmath$n$}n\\ &\leq \liminf_{n\to\infty}\left\{ P\int_\Omegaega \phi_n^2\;dx + \gammamma_0\int_\Omegaega \left(\frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}2|\mbox{\normalsize\boldmath$n$}abla\phi_n|^2 + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n} W(\phi_n)\right)dx \right. \mbox{\normalsize\boldmath$n$}n\\ &\hspace{.5in} \left.+ \rho_0 \int_\Omegaega(\phi_n-1)^2U(x)\;dx\right\}. \end{align} We will prove \reff{liminf-claim1} term by term. \end{comment} Since $\phi_k \to \chi_G $ in $L^2(\Omegaega)$, \begin{eqnarray}gin{align} \langlebel{liminf-term1} |G| = \int_\Omegaega \chi_G^2 \, dx = \lim_{k\to\infty} \int_\Omegaega \phi_k^2\;dx. \end{align} Lemma~\ref{l:etak} implies that \begin{eqnarray}gin{align} \langlebel{liminf-term2} P_\Omegaega(G) \leq \liminf_{k\to\infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 +\frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k)\right] dx. \end{align} By Fatou's Lemma, the convergence $\phi_k \to \chi_G $ a.e.\ in $\Omegaega$, the convergence $\phi_k \to \chi_G $ in $L^2(\Omegaega)$, and the fact that $U$ is bounded below, we obtain \begin{eqnarray}gin{align} \langlebel{liminf-term3} \int_{\Omegaega\setminus G} U \;dx & = \int_{\{x\in \Omegaega\setminus G: U(x) > 0\}} (\chi_G - 1)^2 U\, dx + \int_{\{x\in \Omegaega\setminus G: U(x) \le 0\}} (\chi_G - 1)^2 U\, dx \mbox{\normalsize\boldmath$n$}onumber \\ & \le \liminf_{k\to \infty} \int_{\{x\in \Omegaega\setminus G: U(x) > 0\}} (\phi_k - 1)^2 U\, dx + \lim_{k\to \infty} \int_{\{x\in \Omegaega\setminus G: U(x) \le 0\}} (\phi_k - 1)^2 U\, dx \mbox{\normalsize\boldmath$n$}onumber \\ & = \liminf_{k\to \infty} \int_{\Omegaega\setminus G} (\phi_k - 1)^2 U\, dx. \end{align} Since $\{ \| \phi_k \|_{L^4(\Omegaega)} \}$ is bounded by Lemma~\ref{l:LowerBound} and $\phi_k \to \chi_G$ in $L^1(\Omegaega),$ Theorem~\ref{t:PBenergy} implies that \begin{eqnarray}gin{equation} \langlebel{liminf-elec} \lim_{k\to \infty} \min_{u \in {\mathcal A} } E_{\phi_k}[u] = \min_{u \in {\mathcal A} } E_{\chi_G }[u]. \end{equation} The liminf inequality \reff{liminf-ineq} now follows from \reff{liminf-term1}--\reff{liminf-elec}. \begin{eqnarray}gin{comment} Next we will show that $\int_\Omegaega (1-\phi)^2\;dx = \lim_{n\to\infty}\int_\Omegaega(1-\phi_n)^2\;dx$. Since $W(\phi_n)=18\phi_n^2(1-\phi_n)^2$ and \qref{bound0}, we have for all $n$, \begin{eqnarray}gin{align} \langlebel{bound-phi_n} \int_\Omegaega \phi_n^4 \;dx \leq C\int_\Omegaega(W(\phi_n) + 1)\;dx \leq C (C\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n +|\Omegaega|) \leq C<\infty. \end{align} Since $\phi_n\to\phi$ strongly in $L^1(\Omegaega)$, for any $\deltalta>0$ there exists $N>0$ such that the set \begin{eqnarray}gin{align} \langlebel{def-omega-delta} \omegaega_n^\deltalta:=\{ x\in\Omegaega: |\phi_n(x)-\phi(x) | >\deltalta\} \end{align} has measure smaller than $\deltalta$ for all $n>N$. Hence for all $n>N$, \begin{eqnarray}gin{align} \int_{\omegaega_n^\deltalta} | (1-\phi_n)^2- (1-\phi)^2 |dx &\leq C \left(\int_{\omegaega_n^\deltalta} (\phi_n^4 + \phi^4 +1)\;dx\right)^{1/2}|\omegaega_n^\deltalta|^{1/2} \leq C\deltalta^{1/2}, \langlebel{liminf-conv-est1} \end{align} \begin{eqnarray}gin{align} \int_{\Omegaega\setminus\omegaega_n^\deltalta} | (1-\phi_n)^2- (1-\phi)^2 |dx &= \int_{\Omegaega\setminus\omegaega_n^\deltalta} |\phi_n-\phi|\cdot |\phi_n+\phi-2|\;dx \mbox{\normalsize\boldmath$n$}n\\ &\leq \deltalta \int_{\Omegaega\setminus\omegaega_n^\deltalta} |\phi_n+\phi-2|\;dx \mbox{\normalsize\boldmath$n$}n\\ &\leq C \deltalta \left(\int_\Omegaega (\phi_n^4 +\phi^4+1)\;dx\right)^{1/4} |\Omegaega|^{3/4} \mbox{\normalsize\boldmath$n$}n\\ &\leq C\deltalta. \langlebel{liminf-conv-est2} \end{align} Combining \qref{liminf-conv-est1} and \qref{liminf-conv-est2} and by the arbitrariness of $\deltalta$, we see that \begin{eqnarray}gin{align} \int_\Omegaega (1-\phi)^2 \;dx = \lim_{n\to\infty} \int_\Omegaega (1-\phi_n)^2\;dx. \langlebel{liminf-term4} \end{align} Combining \qref{liminf-term1}, \qref{liminf-term2}, \qref{liminf-term3} and \qref{liminf-term4}, we prove \qref{liminf-claim1}. By Theorem~\ref{t:PBenergy}, there exists a unique $\psi_G \in {\mathcal A} $ such that $E_{G} [\psi_G] = \min_{u \in {\mathcal A} } E_{G} [u]$, and $\psi_G \in H^1(\Omegaega)\cap L^\infty(\Omegaega).$ Consequently, it follows from the convergence $\varepsilon(\phi_k) \to \varepsilon(\chi_G)$ a.e.\ in $\Omegaega$, the Lebesgue Dominated Convergence Theorem, and the convergence $\phi_k \to \chi_G$ in $L^2(\Omegaega)$ that \begin{eqnarray}gin{align} \langlebel{liminf-elec} - \min_{u\in {\mathcal A} } E_G[u] & = -E_{G} [\psi_G] \mbox{\normalsize\boldmath$n$}onumber \\ & = - \int_\Omegaega \left[ \frac{\varepsilonp(\chi_G)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_G|^2 - f \psi_G + (\chi_G -1 )^2 B(\psi_G)\right] dx \mbox{\normalsize\boldmath$n$}onumber \\ & = \lim_{k\to \infty} \left\{ - \int_\Omegaega \left[ \frac{\varepsilonp(\phi_k)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_G|^2 - f \psi_G + (\phi_k-1 )^2 B(\psi_G)\right] dx \right\} \mbox{\normalsize\boldmath$n$}onumber \\ & = \lim_{k\to \infty} \left( - E_{\phi_k}[\psi_G] \right) \mbox{\normalsize\boldmath$n$}onumber \\ & \le \lim_{k\to \infty} \left( - \min_{u \in {\mathcal A} } E_{\phi_k}[u] \right). \end{align} {\em (2) Claim:} \begin{eqnarray}gin{align}\langlebel{liminf-claim2} - E_\phi(\psi_\phi) \leq \liminf_{n\to\infty} \left\{- E_{\phi_n}(\psi_{\phi_n})\right\} \end{align} To prove \qref{liminf-claim2}, notice \begin{eqnarray}gin{align}\langlebel{liminf-claim2-est1} E_{\phi_n}(\psi_{\phi_n}) &\leq E_{\phi_n}(\psi_\phi) = \int_\Omegaega \left( \frac{\varepsilonp(\phi_n)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2 -\rho_ {\rm {f}} \psi_\phi + (\phi_n-1 )^2V(\psi_\phi)\right)dx. \end{align} For the first term, since $\varepsilonp(\phi_n)\to \varepsilonp(\phi)$ a.e.\ in $\Omegaega$ and $\varepsilonp_p\leq \varepsilonp(\phi_n)\leq \varepsilonp_ {\rm{w}} $ for all $n$, by the Lebesgue Dominated Convergence Theorem, \begin{eqnarray}gin{align} \langlebel{liminf-term5} \lim_{n\to\infty} \int_\Omegaega \frac{\varepsilonp(\phi_n)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2\;dx = \int_\Omegaega \frac{\varepsilonp(\phi)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2\;dx. \end{align} To study the third term of \qref{liminf-claim2-est1}, since $\phi=\chi_G\in L^\infty(\Omegaega)$ , by \qref{psi-bound1}, $\psi_\phi\in L^\infty(\Omegaega)$. Hence $V(\psi_\phi)\in L^\infty(\Omegaega)$. For any $\deltalta>0$, since $\phi_n\to\phi$ strongly in $L^1(\Omegaega)$, there exists $N>0$ such that $|\omegaega_n^\deltalta|<\deltalta$ for all $n>N$. Here $\omegaega_n^\deltalta$ is defined in \qref{def-omega-delta}. So for any $n>N$, \begin{eqnarray}gin{align} \langlebel{liminf-conv-est3} &\int_{\omegaega_n^\deltalta} |(\phi_n-1)^2-(\phi-1)^2| V(\psi_\phi)\;dx \mbox{\normalsize\boldmath$n$}n\\ \leq& C \left(\int_\Omegaega (\phi_n^4 +\phi^4+1)\;dx\right)^{1/2} \| V(\psi_\phi \|_{L^\infty(\Omegaega)} |\omegaega_n^\deltalta|^{1/2} \leq C\deltalta^{1/2}. \end{align} \begin{eqnarray}gin{align} \langlebel{liminf-conv-est4} &\int_{\Omegaega\setminus\omegaega_n^\deltalta} |(\phi_n-1)^2-(\phi-1)^2| V(\psi_\phi)\;dx \mbox{\normalsize\boldmath$n$}n\\ =&\int_{\Omegaega\setminus\omegaega_n^\deltalta} |\phi_n-\phi|\cdot|\phi_n+\phi-2| V(\psi_\phi)\;dx \mbox{\normalsize\boldmath$n$}n\\ \leq & C\deltalta\left( \int_\Omegaega (\phi_n^2+\phi^2+1) \right)^{1/2} |\Omegaega|^{1/2} \| V(\psi_\phi \|_{L^\infty(\Omegaega)} \leq C\deltalta. \end{align} Combining \qref{liminf-conv-est3} and \qref{liminf-conv-est4}, and by the arbitrariness of $\deltalta$, we have \begin{eqnarray}gin{align}\langlebel{liminf-term6} \lim_{n\to\infty} \int_\Omegaega (\phi_n-1)^2V(\psi_\phi)\;dx = \int_\Omegaega (\phi-1)^2V(\psi_\phi)\;dx. \end{align} By \qref{liminf-term5} and \qref{liminf-term6} and taking $\limsup$ of \qref{liminf-claim2-est1}, we have \begin{eqnarray}gin{align} \mbox{\normalsize\boldmath$n$}n \limsup_{n\to\infty} E_{\phi_n}(\psi_{\phi_n}) \leq \int_\Omegaega \left( \frac{\varepsilonp(\phi)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2 -\rho_ {\rm {f}} \psi_\phi + (\phi-1 )^2V(\psi_\phi)\right)dx = E_{\phi}(\psi_\phi), \end{align} which is equivalent to \qref{liminf-claim2}. {\em (3)} Summing up \qref{liminf-claim1} and \qref{liminf-claim2}, we obtain \qref{liminf-ineq}. Finally, the liminf inequality \reff{liminf-ineq} follows from \reff{liminf-term1}--\reff{liminf-elec} \end{proof} \begin{eqnarray}gin{theorem}\langlebel{th-limsup} For any $\phi\in L^1(\Omegaega)$ there exists a recovery sequence $\{\phi_n\}\subset L^1(\Omegaega)$ such that $\phi_n\to \phi$ strongly in $L^1(\Omegaega)$ and \begin{eqnarray}gin{align} \langlebel{limsup-ineq} \limsup_{n\to\infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}(\phi_n)\leq F(\phi). \end{align} \end{theorem} \begin{eqnarray}gin{proof} \end{comment} (2) The recovering sequence. Let $\phi \in L^1(\Omegaega)$. If $F_0[\phi]=+\infty$, then we can take $\phi_k=\phi$ for all $k\ge 1$ to obtain \reff{limsup-ineq}. Assume $F_0[\phi]<\infty.$ We then have $\phi = \chi_G\in BV(\Omegaega)$ for some measurable subset $G \subseteq \Omegaega$ of finite perimeter in $\Omegaega.$ We divide the rest of proof into two steps. {\it Step 1.} We first consider the case that $G = D\cap \Omegaega$ for some bounded open set $D \subset \mathbb{R}^3$ such that the boundary $\partialrtial D$ is a nonempty compact hypersurface of class $C^\infty$, $\partialrtial D\cap \Omegaega $ is $C^2$, and ${\mathcal H}^2(\partialrtial D\cap \partialrtial \Omegaega) = 0$. It follows from a standard argument \cite{Sternberg_ARMA88, Modica_ARMA87, LiZhao_SIAP13}, for $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\searrow 0$, there exist $\phi_k\in H^1(\Omegaega)$ $(k=1,2,\dots)$ satisfying \begin{eqnarray}gin{align} \langlebel{phi_n1} & 0\leq \phi_k \leq \chi_G \quad \mbox{in } \Omegaega, \\ \langlebel{phi_n2} & \phi_k = 1 \quad \mbox{in } G_{k}:=\left\{ x\in G: \mbox{dist}(x,\partialrtial G)\geq \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \right\}, \\ \langlebel{phi_n3} &\phi_k = 0 \quad\mbox{in } \Omegaega \setminus G, \\ \langlebel{phi_n4} & \phi_k\to \chi_G \quad \mbox{strongly in } L^1(\Omegaega) \mbox{ and a.e.\ in }\Omegaega, \\ \langlebel{limsup-term2} &\limsup_{k\to\infty} \int_\Omegaega\left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 +\frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k)\right] dx \leq P_\Omegaega(G). \end{align} By \reff{phi_n1}, \reff{phi_n4}, and Lemma~\ref{l:Lq}, we have $\phi_k \to \chi_G$ in $L^q(\Omegaega)$ for any $q > 1$. Hence \begin{eqnarray}gin{align} \langlebel{limsup-term1} \lim_{k\to\infty} \int_\Omegaega \phi_k^2\;dx & = \int_\Omegaega \chi_G^2\;dx = |G|. \end{align} Since $F_0[\chi_G]< \infty$, by \reff{def-F} with $G$ replacing $A$, the integral of $U$ over $\Omegaega \setminus G $ is finite. Since $G = D\cap \Omegaega$ is open and $\partialrtial D \cap \Omegaega $ is $C^2$, it follows from our assumptions on $U$, all points $x_i \in \Omegaega $ $ (1 \le i \le N)$ must be interior points of $G.$ Consequently, there exists $r_0 > 0$ and $N_0 \ge 1$ such that $B(r_0):= \cup_{i=1}^N B(x_i, r_0) \subseteq G_{k} \subseteq G$ for all $k \ge N_0.$ Hence, by \reff{phi_n2}, $\phi_k = 1$ on $B(r_0)$ for all $k \ge N_0.$ Note that $U$ is bounded on $\Omegaega\setminus B(r_0).$ Therefore, by \reff{phi_n2} and the convergence $\phi_k \to \chi_G$ in $L^2(\Omegaega)$, \begin{eqnarray}gin{align} \langlebel{limsup-term3} \lim_{k\to\infty} \int_\Omegaega (\phi_k-1)^2 U\;dx &= \lim_{k\to\infty} \int_{\Omegaega\setminus B(r_0)} (\phi_k-1)^2 U\;dx \mbox{\normalsize\boldmath$n$}n\\ & = \int_{\Omegaega\setminus B(r_0)} (\chi_G -1)^2 U\;dx \mbox{\normalsize\boldmath$n$}onumber \\ & = \int_{\Omegaega \setminus G} U\;dx. \end{align} By Theorem~\ref{t:PBenergy}, \begin{eqnarray}gin{equation} \langlebel{limsup-term4} \lim_{k\to \infty} \min_{u \in {\mathcal A} } E_{\phi_k} [u] = \min_{u \in {\mathcal A} } E_{\chi_G}[u]. \end{equation} Combining \qref{limsup-term2}--\qref{limsup-term4}, we obtain \qref{limsup-ineq}. \begin{eqnarray}gin{comment} The third term of $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n}(\phi_n)$ is $\int_\Omegaega (\phi_n-1)^2U(x)\;dx$. Since $U$ is not necessarily in $L^1(\Omegaega)$ (for example, the Lennard-Jones potential $U_{LJ}\mbox{\normalsize\boldmath$n$}ot\in L^1(\Omegaega)$), we can not use dominated convergence theorem directly. However, since $\int_{\Omegaega\setminus G} U\;dx <\infty$, if $x_j\in\Omegaega$ is a singular point of $U$ in the sense that $U$ is not integrable in any neighborhood of $x_j$, that is, if \begin{eqnarray}gin{align} \langlebel{singular-pt} \lim_{x\to x_j}U(x)\to \infty \mbox{ and }\int_{\{x\in\Omegaega: |x-x_j|<r\}} U(x)\;dx =\infty \mbox{ for all }r>0, \end{align} then $x_j\in G$ and ${\rm dist}(x_j, \partialrtial G)>0$. Since $U$ has finitely many such singular points, let $S$ be the collection of all such singular points of $U$. It is easy to see that $S\subset G$ and $r_0:={\rm dist}(S, \partialrtial G)>0$. Since $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n\to 0$, let $N_0>0$ big enough so that $\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_n} < r_0/2$ for all $n>N_0$. Since $G$ is open, there exists $0<r<r_0/2$ such that \begin{eqnarray}gin{align} \mbox{\normalsize\boldmath$n$}n S_{r}:=\{x\in\Omegaega: {\rm dist}(x,S)\leq r\} \subset G. \end{align} Then $U\in L^1(\Omegaega\setminus S_{r})$ and \begin{eqnarray}gin{align} \mbox{supp}\, \{1-\phi\}\subset\Omegaega\setminus S_r, \quad \mbox{supp}\, \{1-\phi_n\}\subset \Omegaega\setminus S_{r}\mbox{ for all }n>N_0. \mbox{\normalsize\boldmath$n$}n \end{align} Hence we may use dominated convergence theorem to show that \begin{eqnarray}gin{align}\langlebel{limsup-term3} &\lim_{n\to\infty}\int_\Omegaega (\phi_n-1)^2 U(x)\;dx = \lim_{n\to\infty} \int_{\Omegaega\setminus S_r} (\phi_n-1)^2 U(x)\;dx \mbox{\normalsize\boldmath$n$}n\\ =& \int_{\Omegaega\setminus S_r} (\phi-1)^2 U(x)\;dx = \int_\Omegaega (\phi-1)^2 U(x)\;dx \end{align} By Theorem~\ref{t:phiPB}, $\min_{u\in {\mathcal A} } E_{\phi_k}[u] = E_{\phi_k}[\psi_{\phi_k}]$ for a unique $\psi_{\phi_k} \in {\mathcal A} $ for each $k \ge 1.$ Since $\varepsilonp(\cdot)$ is a decreasing function by the assumption (A3), we have by \reff{phi_n1} that $\varepsilon(\chi_G) \leq \varepsilonp(\phi_k).$ By \reff{phi_n3}, $(1-\chi_G )^2 \leq (1-\phi_k)^2$. Hence, \begin{eqnarray}gin{align*} \min_{u \in {\mathcal A} } E_G[u] &\leq E_{G} [\psi_{\phi_k}] \\ & = \int_\Omegaega \left[ \frac{\varepsilonp(\chi_G) }{2}|\mbox{\normalsize\boldmath$n$}abla\psi_{\phi_k}|^2 -f \psi_{\phi_k} + (1-\chi_G )^2 B(\psi_{\phi_k})\right] dx \\ &\leq \int_\Omegaega \left[ \frac{\varepsilonp(\phi_k)}{2}|\mbox{\normalsize\boldmath$n$}abla\psi_{\phi_k}|^2 -f \psi_{\phi_k} + (1-\phi_k )^2 B(\psi_{\phi_k})\right] dx \\ &=E_{\phi_k}[\psi_{\phi_k}] \\ & = \min_{u\in {\mathcal A} } E_{\phi_k} [u] \qquad \forall k \ge 1. \end{align*} Thus \begin{eqnarray}gin{align} \langlebel{limsup-term4} \limsup_{k\to\infty}\left( - \min_{u \in {\mathcal A} } E_{\phi_k} [u] \right) \le - \min_{u \in {\mathcal A} } E_G[u]. \end{align} \end{comment} {\it Step 2.} We now assume that $G\subseteq \Omegaega$ is an arbitrary measurable subset of finite perimeter in $\Omegaega$. Since $F_0[\chi_G] $ is finite, the integral of $U$ over $\Omegaega \setminus G$ is finite. This implies that $| G | > 0.$ If $|G| = |\Omegaega|$ then $P_\Omegaega(G) = 0.$ We can thus choose $\phi_k = \chi_G$ to get the limsup inequality \reff{limsup-ineq}. We assume now $0 < |G| < |\Omegaega|.$ Choose $\sigmagma_k \searrow 0$ such that the closure of $ B(\sigmagma_k) := \cup_{i=1}^N B(x_i,\sigmagma_k) $ is included in $\Omegaega,$ $U \ge 0$ on $B(\sigmagma_k)$, and $0 < |G \cup B(\sigmagma_k)| < |\Omegaega|$ for each $k \ge 1.$ Denote $\widehat{G}_k = G \cup B(\sigmagma_k) $ for $k \ge 1$. Then $G \subseteq \widehat{G}_{k+1} \subseteq \widehat{G}_k$ for all $k \ge 1$ and $\chi_{\widehat{G}_k} \to \chi_G$ in $L^1(\Omegaega)$. We claim that \begin{eqnarray}gin{equation} \langlebel{GkGclaim} \limsup_{k\to \infty} F_0[\chi_{\widehat{G}_k}] \le F_0[\chi_G]. \end{equation} Clearly, \begin{eqnarray}gin{equation} \langlebel{GkG} |\widehat{G}_k | = |G| + |B (\sigmagma_k) \setminus G| \to |G| \quad \mbox{as } k \to \infty. \end{equation} Moreover \cite{Giusti84}, \begin{eqnarray}gin{align} \langlebel{PGkPG} \limsup_{k \to \infty} P_{\Omegaega} ( \widehat{G}_k) &= \limsup_{k \to \infty} P_{\Omegaega} ( G \cup B_k) \mbox{\normalsize\boldmath$n$}n \\ & \le \limsup_{k \to \infty} \left[ P_{\Omegaega}(G) + P_{\Omegaega} (B_k) \right] \mbox{\normalsize\boldmath$n$}onumber \\ & = P_{\Omegaega}(G) + \lim_{k\to \infty} P_{\Omegaega} (B_k) \mbox{\normalsize\boldmath$n$}n \\ & = P_{\Omegaega}(G). \end{align} Since $\Omegaega \setminus \widehat{G}_k \subseteq \Omegaega \setminus \widehat{G}_{k+1}$, we have by the Lebesgue Monotone Convergence Theorem that \begin{eqnarray}gin{align*} \lim_{k\to \infty} \int_{\Omegaega \setminus \widehat{G}_k} \chi_{\{ x \in \Omegaega: U(x) > 0 \}} U \, dx & = \lim_{k\to \infty} \int_\Omegaega \chi_{\Omegaega \setminus \widehat{G}_k} \chi_{\{ x \in \Omegaega: U(x) > 0 \}} U \, dx \\ & = \int_\Omegaega \chi_{\Omegaega \setminus G} \chi_{\{ x \in \Omegaega: U(x) > 0 \}} U \, dx \\ & = \int_{\Omegaega \setminus G} \chi_{\{ x \in \Omegaega: U(x) > 0 \}} U \, dx. \end{align*} Since $U$ is bounded below and $| \Omegaega \setminus \widehat{G}_k | \to | \Omegaega \setminus G|,$ \[ \lim_{k\to \infty} \int_{\Omegaega \setminus \widehat{G}_k} \chi_{\{ x \in \Omegaega: U(x) \le 0 \}} U \, dx = \int_{\Omegaega \setminus G} \chi_{\{ x \in \Omegaega: U(x) \le 0 \}} U \, dx. \] Combining the above two equations, we get \begin{eqnarray}gin{comment} \begin{eqnarray}gin{align*} \limsup_{k \to \infty} P_{\Omegaega} ( \widehat{G}_k) & = \limsup_{k \to \infty} P_{\Omegaega} ( G \cup B_k) \mbox{\normalsize\boldmath$n$}onumber \\ & \le \limsup_{k \to \infty} \left[ P_{\Omegaega}(G) + P_{\Omegaega} (B_k) \right] \mbox{\normalsize\boldmath$n$}onumber \\ & = P_{\Omegaega}(G) + \lim_{k\to \infty} P_{\Omegaega} (B_k) \mbox{\normalsize\boldmath$n$}onumber \\ & = P_{\Omegaega}(G). \end{align*} \end{comment} \begin{eqnarray}gin{equation} \langlebel{GkGU} \lim_{k \to \infty} \int_{\Omegaega \setminus \widehat{G}_k } U\, dx = \int_{\Omegaega \setminus G} U\, dx. \end{equation} By Theorem~\ref{t:PBenergy}, \begin{eqnarray}gin{equation} \langlebel{GkGelec} \lim_{k \to \infty} \min_{u \in {\mathcal A} } E_{\chi_{\widehat{G}_k}}[u] = \min_{u \in {\mathcal A} } E_{\chi_G}[u]. \end{equation} Now, \reff{GkGclaim} follows from \reff{GkG}--\reff{GkGelec}. Fix an arbitrary $k \ge 1$. It follows from Lemma~\ref{l:setapprox} that there exist open sets $D_{k,j} \subseteq \mathbb{R}^3 $ $(j = 1, 2, \dots)$ such that, for each $j \ge 1$ and $ G_{k,j} := D_{k,j} \cap \Omegaega,$ $G_{k,j} \supseteq B(\sigmagma_k/2),$ $\partialrtial D_{k,j} $ is $C^\infty$ and $\partialrtial D_{k,j} \cap \Omegaega $ is $C^2,$ and ${\mathcal H}^2(\partialrtial D_{k,j} \cap \partialrtial \Omegaega) = 0,$ and that $|G_{k,j} \Deltalta \widehat{G}_k | \to 0$, which is equivalent to $\chi_{G_{k,j}} \to \chi_{\widehat{G}_k}$ in $L^1(\Omegaega)$, and $P_\Omegaega(G_{k,j})\to P_\Omegaega (\widehat{G}_k)$ as $j \to \infty.$ Clearly, $|G_{k,j}| \to |\widehat{G}_k|$ as $j \to \infty.$ Since each $G_{k,j}\supseteq B(\sigmagma_k/2)$ and $\chi_{G_{k,j}} \to \chi_{\widehat{G}_k}$ in $L^1(\Omegaega)$, \[ \lim_{j\to \infty} \int_{\Omegaega\setminus G_{k,j}} U\, dx = \int_{\Omegaega\setminus \widehat{G}_{k}} U\, dx. \] By Theorem~\ref{t:PBenergy}, $\min_{u \in {\mathcal A} } E_{\chi_{G_{k,j}}}[u] \to \min_{u \in {\mathcal A} } E_{\chi_{\widehat{G}_k}} [u]$ as $j \to \infty.$ Therefore, \[ \lim_{j\to \infty} F_0[\chi_{G_{k,j}}] = F_0[\chi_{\widehat{G}_k}], \quad k = 1, 2, \dots \] By induction, we can choose $j_1 < j_2 < \cdots$ with $j_k \to \infty$ such that, with the notation $H_k = G_{k, j_k}$ for all $k \ge 1$, \[ \| \chi_{ H_k} - \chi_{\widehat{G}_k} \|_{L^1(\Omegaega)} < \frac{1}{k} \quad \mbox{and} \quad |F_0[\chi_{H_k} ] - F_0[\chi_{\widehat{G}_k}] | < \frac{1}{k}, \quad k = 1, 2, \dots \] These, together with the fact that $\chi_{\widehat{G}_k} \to \chi_G$ in $L^1(\Omegaega)$ and \reff{GkGclaim}, imply that \begin{eqnarray}gin{equation} \langlebel{Hk} \lim_{k\to \infty} \| \chi_{ H_k} - \chi_{G} \|_{L^1(\Omegaega)} = 0 \quad \mbox{and} \quad \limsup_{k\to \infty} F_0[\chi_{H_{k} }] \le F_0[\chi_G]. \end{equation} \begin{eqnarray}gin{comment} We can approximate $G$ by a sequence of open sets $\widehat{G}_n$ with $\partialrtial \widehat{G}_n$ a nonempty compact hypersurface of class $C^2$ such that $G\subset \widehat{G}_n$, the singular points $\{x_j:j=1,\dots,N\}$ of $U$ are interior points of $G_n$, $\chi_{\widehat{G}_n}\in BV(\Omegaega)$ for all $n$, and \begin{eqnarray}gin{align} \langlebel{def-Gn} \chi_{\widehat{G}_n}\to\chi_G \mbox{ in }L^1(\Omegaega) \mbox{ and a.e.\ in }\Omegaega, \;\; |\widehat{G}_n|\to |G|, \;\;Per(\widehat{G}_n)\to Per(G)\mbox{ as }n\to\infty. \end{align} \end{comment} By Step 1, we can find for each $k \ge 1$ a recovering sequence $ \{ \phi_{k,l} \}_{l=1}^\infty$ for $\chi_{H_k} $ such that all $\phi_{k,l} \in H^1(\Omegaega)$ $(l = 1, 2, \dots)$, \begin{eqnarray}gin{equation} \langlebel{phikl} \lim_{l\to \infty} \| \phi_{k,l} - \chi_{H_k}\|_{L^1(\Omegaega)} = 0 \quad \mbox{and} \quad \limsup_{l\to \infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_l} [\phi_{k,l} ] \le F_0[\chi_{H_k} ], \quad k = 1, 2, \dots \end{equation} By \reff{Hk} and \reff{phikl}, and induction, we can choose $l_1 < l_2 < \cdots $ with $l_k \to \infty$ such that $\phi_{k,l_k} \to \chi_{G}$ in $L^1(\Omegaega)$ and \[ \limsup_{k\to \infty} F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_{l_k} } [\phi_{k,l_k} ] \le F_0[\chi_G ]. \] The proof is complete. \end{proof} \begin{eqnarray}gin{proof}[Proof of Corollary~\ref{c:existenceF0}] Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0$. For each $k \ge 1$, let $\phi_k \in H^1(\Omegaega)$ be such that $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] = \min_{\phi \in L^1(\Omegaega)}F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi]$; cf.\ Lemma~\ref{l:minimizerFxi}. By Lemma~\ref{l:LowerBound} and comparing $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]$ to the free energy of the constant function $\phi = 1$, the sequence $\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]\}$ is bounded. Hence the corresponding sequence of the van der Waals--Cahn--Hilliard functionals of $\phi_k$ is also bounded. This and a usual argument \cite{Modica_ARMA87,Sternberg_ARMA88} imply that there exists a subsequence of $\{ \phi_k \}$, not relabeled, such that $\phi_k \to \chi_G$ in $L^1(\Omegaega)$ for some measurable subset $G$ of $\Omegaega$. Theorem~\ref{t:EnergyConvergence} then implies $\chi_G$ minimizes $F_0$. \end{proof} We need the following elementary result in the proof of Theorem~\ref{t:individual}: \begin{eqnarray}gin{lemma} \langlebel{l:AkBk} Let $a_k$ and $b_k$ $(k = 1, 2, \dots)$, and $a$ and $b$ be all nonnegative numbers such that \[ \lim_{k\to \infty} (a_k+b_k) = a + b, \quad \liminf_{k\to \infty} a_k \ge a, \quad \mbox{and} \quad \liminf_{k\to \infty} b_k \ge b. \] Then \[ \lim_{k\to \infty} a_k = a \quad \mbox{and} \quad \lim_{k\to \infty} b_k = b. \] \end{lemma} \begin{eqnarray}gin{proof} Since $a_k \ge 0$ and $b_k \ge 0$ $(k = 1, 2, \dots)$ and $\{ a_k + b_k \}$ converges, both $\{ a_k \}$ and $\{ b_k \} $ are bounded. Let $\{ a_{k_j} \}$ be any subsequence of $\{ a_k \}.$ Let $\{ a_{{k_j}_i}\}$ be a further subsequence such that \begin{eqnarray}gin{equation} \langlebel{kji} \lim_{i \to \infty} a_{k_{j_i}} = \liminf_{j \to \infty} a_{k_j}. \end{equation} We have then \[ a + b = \liminf_{j\to \infty} (a_{k_j} + b_{k_j}) \ge \liminf_{j \to \infty} a_{k_j} + \liminf_{j \to \infty} b_{k_j} \ge \liminf_{k \to \infty} a_{k} + \liminf_{k \to \infty} b_{k} \ge a + b, \] leading to \[ 0 \ge \left( \liminf_{j \to \infty} a_{k_j} - a \right) + \left( \liminf_{j \to \infty} b_{k_j} - b \right) \ge 0. \] Each term in the sum is nonnegative, and hence is $0$. Thus $ \liminf_{j \to \infty} a_{k_j} = a. $ This and \reff{kji} imply that $a_{k_{j_i}} \to a$ as $i \to \infty$, and hence $a_k \to a$ as $k\to \infty$. Similarly, $b_k \to b$ as $k\to \infty$. \end{proof} We are now ready to prove Theorem~\ref{t:individual}. \begin{eqnarray}gin{proof}[Proof of Theorem~\ref{t:individual}] Since $\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]\}$ converges, it is bounded. Lemma~\ref{l:LowerBound} then implies that $\sup_{k \ge 1}\|\phi_k \|_{L^4(\Omegaega)} < \infty.$ Since $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$, Lemma~\ref{l:Lq} implies that $\phi_k \to \chi_G$ in $L^q(\Omegaega)$ for any $q\in [1, 4).$ Hence, \reff{volume} follows. Moreover, Theorem~\ref{t:PBenergy} implies \reff{Ele}. By our assumptions on $U$ and the Lebesgue Dominated Convergence Theorem, \begin{eqnarray}gin{equation} \langlebel{Une0} \lim_{k \to \infty} \int_{\{ x\in \Omegaega: U(x) \le 0 \}} (\phi_k - 1)^2 U \, dx = \int_{\{ x\in \Omegaega: U(x) \le 0 \}} \chi_{\Omegaega \setminus G} U \, dx. \end{equation} Since $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] \to F_0[\chi_G]$ with $F_0[\chi_G]$ being finite, it follows from \reff{volume}, \reff{Ele}, and \reff{Une0} that \begin{eqnarray}gin{align} \langlebel{ABge0} &\lim_{k\to \infty} \left\{ \gammamma_0 \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx + \rho_0 \int_{\{ x\in \Omegaega: U(x) > 0 \}} (\phi_k-1)^2 U\, dx \right\} \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \lim_{k \to \infty} \left\{ F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k] - P_0 \int_\Omegaega \phi_k^2\, dx - \rho_0 \int_{\{ x\in \Omegaega: U(x) \le 0 \}} (\phi_k-1)^2 U\, dx + \min_{u \in {\mathcal A} } E_{\phi_k}[u] \right\} \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = F_{0} [\chi_G] - P_0 |G| - \rho_0 \int_{\{ x\in \Omegaega: U(x) \le 0 \}} \chi_{\Omegaega\setminus G} U\, dx + \min_{u \in {\mathcal A} } E_{\chi_G} [u] \mbox{\normalsize\boldmath$n$}onumber \\ &\qquad = \gammamma_0 P_\Omegaega(G) + \rho_0 \int_{\{ x\in \Omegaega: U(x) > 0 \}} \chi_{\Omegaega \setminus G } U\, dx. \end{align} By Lemma~\ref{l:etak}, we have \begin{eqnarray}gin{equation} \langlebel{Age0} \liminf_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx \ge P_\Omegaega(G). \end{equation} Fatou's Lemma implies that \begin{eqnarray}gin{equation} \langlebel{Bge0} \liminf_{k \to \infty} \int_{\{ x\in \Omegaega: U(x) > 0 \}} (\phi_k - 1)^2 U \, dx \ge \int_{\{ x\in \Omegaega: U(x) > 0 \}} \chi_{\Omegaega\setminus G} U \, dx. \end{equation} By \reff{ABge0}--\reff{Bge0} and Lemma~\ref{l:AkBk}, the inequalities \reff{Age0} and \reff{Bge0} become equalities. Therefore \reff{surface} is true; and further, \reff{LJ} is true. Finally, since all $F_0[\chi_G]$, $|G|$, $P_\Omegaega(G)$, and $F_{\rm ele}[G]$ are finite, the right-hand side of \reff{LJ} is also finite. \end{proof} \begin{eqnarray}gin{comment} we can find a recovery sequence $\{\phi_{n,k}:k=1,2,\dots\}$ such that $\phi_{n,k}\to \chi_{G_n}$ in $L^1(\Omegaega)$ as $k\to\infty$ and \begin{eqnarray}gin{align} \mbox{\normalsize\boldmath$n$}n \limsup_{k\to\infty} F( \phi_{n,k}) \leq F(\chi_{G_n}). \end{align} A diagonal argument generates a recovery sequence $\{\phi_{n,k_n}\}$ that converges in $L^1(\Omegaega)$ to $\phi=\chi_G$ and \begin{eqnarray}gin{align} \mbox{\normalsize\boldmath$n$}n \limsup_{n\to\infty} F( \phi_{n,k_n}) \leq \limsup_{n\to\infty} F(\chi_{G_n}). \end{align} We can find a subsequence $\{G_{n_j}\}$ of $\{G_n\}$, such that \begin{eqnarray}gin{align} \lim_{j\to\infty} F(\chi_{G_{n_j}}) = \limsup_{n\to\infty} F(\chi_{G_n}). \end{align} So we need only to show that $\lim_{j\to\infty} F(\chi_{G_{n_j}}) \leq F(\chi_G)$. Since \begin{eqnarray}gin{align} F(\chi_{G_{n_j}}) & = P_0|{G_{n_j}}| + \gammamma_0 Per({G_{n_j}}) + \rho_0 \int_{\Omegaega\setminus {G_{n_j}}} U(x)\;dx - E_{\chi_{G_{n_j}}}(\psi_{\chi_{G_{n_j}}}), \langlebel{F-of-Gnj} \\ F(\chi_{G}) & = P|G| + \gammamma_0 Per(G) + \rho_0 \int_{\Omegaega\setminus G} U(x)\;dx - E_{\chi_{G}}(\psi_{\chi_{G}}), \langlebel{F-of-G} \end{align} by \qref{def-Gn} the first two terms of \qref{F-of-Gnj} converge to the first two terms of \qref{F-of-G}, respectively. Since $\Omegaega\setminus {G_{n_j}}\subset \Omegaega\setminus G$ and $|{G_{n_j}}\setminus G|\to 0$, we obtain \begin{eqnarray}gin{align}\mbox{\normalsize\boldmath$n$}n \int_{\Omegaega\setminus {G_{n_j}}} U(x)\;dx \to \int_{\Omegaega\setminus G} U(x)\;dx \quad\mbox{as }n\to \infty. \end{align} Since both $F(\chi_{G_{n_j}})$ and its first three terms have limits as $j\to\infty$, the limit of $E_{\chi_{G_{n_j}}}(\psi_{\chi_{G_{n_j}}})$ also exists. The proof is concluded by the following claim. {\em (3) Claim: } \begin{eqnarray}gin{align}\langlebel{limsup-claim} E_{\chi_{G}}(\psi_{\chi_{G}}) \leq \liminf_{n\to\infty} E_{\chi_{G_{n_j}}}(\psi_{\chi_{G_{n_j}}}). \end{align} To prove \qref{limsup-claim}, since $|\chi_{G_{n_j}}|\leq 1, |\chi_G|\leq 1$, by \qref{psi-bound1}, \qref{psi-bound2}, there exists $C>0$ such that for all $j$, \begin{eqnarray}gin{align} \| \psi_{\chi_{G_{n_j}}}\|_{L^\infty(\Omegaega)} \leq C, \quad \| \psi_{\chi_{G_{n_j}}}\|_{H^1(\Omegaega)} \leq C. \end{align} So there is a subsequence $\psi_{\chi_{G_{n_j}}k}$ of $\psi_{\chi_{G_{n_j}}}$, and a function $\tilde\psi\in H^1(\Omegaega)\cap L^\infty(\Omegaega)$ such that \begin{eqnarray}gin{align} \psi_{\chi_{G_{n_j}}k} &\rightharpoonup \tilde \psi \mbox{ weakly in }H^1(\Omegaega), \langlebel{psi-conv1}\\ \psi_{\chi_{G_{n_j}}k} &\to \tilde \psi \mbox{ strongly in }L^2(\Omegaega) \mbox{ and a.e.\ in }\Omegaega. \langlebel{psi-conv2} \end{align} Since $\psi_{\chi_{G_{n_j}}k}$ solves \begin{eqnarray}gin{align} \int_\Omegaega \varepsilonp(\chi_{G_{n_j}}k)\mbox{\normalsize\boldmath$n$}abla \psi_{\chi_{G_{n_j}}k}\cdot\mbox{\normalsize\boldmath$n$}abla v + (1-\chi_{G_{n_j}}k)^2 V'(\psi_{\chi_{G_{n_j}}k}) v =\int_\Omegaega \rho_ {\rm {f}} v\;dx \end{align} for all $v\in H^1_0(\Omegaega)$ and hence for all $v\in C_0^\infty(\Omegaega)$, by \qref{psi-conv1}, \qref{psi-conv2}, $\varepsilonp(\chi_{G_{n_j}}k) \to \varepsilonp(\chi_G)$ strongly in $L^2(\Omegaega)$ and the dominated convergence theorem, we obtain \begin{eqnarray}gin{align} \int_\Omegaega \varepsilonp(\chi_G)\mbox{\normalsize\boldmath$n$}abla \tilde\psi\cdot\mbox{\normalsize\boldmath$n$}abla v + (1-\chi_G)^2 V'(\tilde\psi) v =\int_\Omegaega \rho_ {\rm {f}} v\;dx \end{align} for all $v\in C_0^\infty(\Omegaega)$. But $\psi_{\chi_G}$ is also a solution for the above equation. By the uniqueness of solution, we have $\tilde\psi = \psi_{\chi_G}$. By the uniqueness of limits, we see that $\psi_{\chi_{G_{n_j}}}$ converges to $\psi_{\chi_G}$ weakly in $H^1(\Omegaega)$ and strongly in $L^2(\Omegaega)$ and a.e.\ in $\Omegaega$. The dominated convergence theorem gives \begin{eqnarray}gin{align} \langlebel{E-term23} &\lim_{j\to\infty} \int_\Omegaega\left( -\rho_ {\rm {f}} \psi_{\chi_{G_{n_j}}} + (1-\chi_{G_{n_j}})^2 V(\psi_{\chi_{G_{n_j}}}) \right)dx \mbox{\normalsize\boldmath$n$}n\\ =& \int_\Omegaega\left( -\rho_ {\rm {f}} \psi_{\chi_G} + (1-\chi_G)^2 V(\psi_{\chi_G}) \right)dx. \end{align} Since $\sqrt{ \varepsilonp(\chi_{G_{n_j}})} \to \sqrt{ \varepsilonp(\chi_G)}$ strongly in $L^2(\Omegaega)$ and $\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_{G_{n_j}}} \rightharpoonup \mbox{\normalsize\boldmath$n$}abla\psi_{\chi_G}$ weakly in $L^2(\Omegaega;\mathbb{R}^N)$, we obtain \begin{eqnarray}gin{align}\langlebel{weak-L1} \sqrt{ \varepsilonp(\chi_{G_{n_j}})}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_{G_{n_j}}} \rightharpoonup \sqrt{ \varepsilonp(\chi_G)}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_G} \mbox{ weakly in } L^1(\Omegaega;\mathbb{R}^N). \end{align} But by \qref{psi-bound2} we know that $\sqrt{ \varepsilonp(\chi_{G_{n_j}})}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_{G_{n_j}}}$ is bounded in $L^2(\Omegaega;\mathbb{R}^N)$ and hence it has a subsequence that weakly converges in $L^2(\Omegaega;\mathbb{R}^N)$ to some function $\zetata\in L^2(\Omegaega;\mathbb{R}^N)$. Compared to \qref{weak-L1}, we see that $\zetata = \sqrt{ \varepsilonp(\chi_G)}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_G}$. Thus \begin{eqnarray}gin{align}\langlebel{weak-L2} \sqrt{ \varepsilonp(\chi_{G_{n_j}})}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_{G_{n_j}}} \rightharpoonup \sqrt{ \varepsilonp(\chi_G)}\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_G} \mbox{ weakly in } L^2(\Omegaega;\mathbb{R}^N). \end{align} Consequently \begin{eqnarray}gin{align}\langlebel{E-term1} \int_\Omegaega \frac{\varepsilonp(\chi_G)}{2}|\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_G}|^2 \leq \liminf_{j\to\infty} \int_\Omegaega \frac{\varepsilonp(\chi_{G_{n_j}}) }{2} |\mbox{\normalsize\boldmath$n$}abla\psi_{\chi_{G_{n_j}}} |^2 . \end{align} Combining \qref{E-term23} and \qref{E-term1}, we prove \qref{limsup-claim}. \end{comment} \section{Force Convergence} \langlebel{s:ForceConvergenceSolvation} We first prove Theorem~\ref{t:ForceConvSolvation}. We then focus on the proof of Theorem~\ref{th:CH-force-conv}, which is for a general space dimension $n \ge 2.$ \begin{eqnarray}gin{proof}[Proof of Theorem~\ref{t:ForceConvSolvation}] Since $F_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}[\phi_k]\to F_0[\chi_G]$, Lemma~\ref{l:LowerBound} implies that $ \{ \| \phi_k \|_{L^4(\Omegaega)} \}$ is bounded. Since, $\phi_k \to \chi_G$ a.e.\ in $\Omegaega$, Lemma~\ref{l:Lq} then implies that $\phi_k \to \chi_G$ in $L^q(\Omegaega)$ for any $q \in [1, 4).$ This implies \reff{f_vol-conv}; it also implies \reff{f_vdW-conv} as both $U$ and $\mbox{\normalsize\boldmath$n$}abla U$ are continuous on $\mbox{supp}\,(V).$ The second equation \reff{CH-force-conv} is part of Theorem~\ref{th:CH-force-conv}. Finally, the equation \reff{f_ele-conv} is part of Theorem~\ref{th:f_ele-conv}. \end{proof} \begin{eqnarray}gin{comment} \begin{eqnarray}gin{align*} \langlengle f_{0,\rm sur}[\partialrtial G], V \ranglengle &=-\gammamma_0(n-1)\int_{\partialrtial G} H\mbox{\normalsize\boldmath$n$}u\cdot V\,d{\cal H}^{n-1} \mbox{\normalsize\boldmath$n$}n\\ &=-\gammamma_0\int_{\partialrtial^* G}(I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d{\cal H}^{n-1} . \end{align*} The second equality comes from Lemma 10.8 of \cite{Giusti84}. The last integral is well defined for any set $G$ with finite perimeter, and is the variation of the perimeter $P_\Omegaega(G)$ under a smooth perturbation of $\partialrtial G$. To see this, for any velocity field $V\in C_c^1(\Omegaega, \mathbb{R}^n)$, define $x = x(t,X)$ by \[ \dot{x} = V(x) \quad \mbox{and} \quad x(0,X) = X \] for any $X\in \Omegaega$ and the transformation $T_t(X) = x(t,X)$ for $|t|\ll 1.$ The variation of the perimeter $P_\Omegaega (G)$ is given \cite{Giusti84}: \[ \frac{d}{dt}\biggr|_{t=0} P_{\Omegaega} (T_t(G)) = \int_{\partialrtial^* G} \left( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u \cdot \mbox{\normalsize\boldmath$n$}u \right)\, d{\mathcal H}^{n-1} =\int_{\partialrtial^* G}(I-\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u):\mbox{\normalsize\boldmath$n$}abla V\,d{\cal H}^{n-1}. \] Hence for any $G$ with finite perimeter, $f_{0,\rm sur}[\partialrtial G]$ defined through \qref{weak-f0_sur} is the negative variation of the surface energy $\gammamma_0 P_{\Omegaega}(G)$ with respect to perturbations of the boundary $\partialrtial G$. Note that the integral on the right-hand side of the third equation \reff{weak-f0_vdW} is the integral of $- \rho_0 \mbox{\normalsize\boldmath$n$}abla \cdot (U V)$ on $G^c$. With out definition of the direction of $\mbox{\normalsize\boldmath$n$}u$, the divergence theorem then implies \reff{weak-f0_vdW}. \end{comment} \begin{eqnarray}gin{comment} It is known that \cite{LiLiu_SIAP15} \begin{eqnarray}gin{align} \langlebel{delta-F_xi} \frac{\deltalta F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{\deltalta\phi}(\phi) = 2P\phi +\gammamma_0 \left( -\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i\Deltalta\phi +\frac1\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i W'(\phi) \right) + 2\rho_0 (\phi-1)U - \frac{ \deltalta E_\phi}{\deltalta\phi}(\psi_\phi), \end{align} where \begin{eqnarray}gin{align}\langlebel{delta-E_phi} \frac{\deltalta E_\phi}{\deltalta\phi}(\psi_\phi)= \left( \frac{\varepsilonp'(\phi)}{2} |\mbox{\normalsize\boldmath$n$}abla\psi_\phi|^2 +2(\phi-1) V(\psi_\phi)\right). \end{align} The variational force is the negative gradient of the energy $F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i$, that is, $ - \frac{\deltalta F_\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i}{\deltalta\phi}(\phi)\mbox{\normalsize\boldmath$n$}abla\phi$. For the sharp-interface energy $\tilde F[\Gamma]$ defined by \qref{F_Gamma}, where $\Gamma=\partialrtial G$ for some open set $G\subset\Omegaega$, the variational force is \begin{eqnarray}gin{align} \frac{\deltalta \tilde F}{\deltalta \Gamma} [\partialrtial G] n \end{align} where $n$ is the outer normal of $\partialrtial G$. \end{comment} To prove Theorem~\ref{th:CH-force-conv}, we need the following lemma which states that the convergence of phase-field surface energies to their sharp-interface limit implies the asymptotic equi-partition of energies. Indeed, we prove that \begin{eqnarray}gin{equation*} \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 - \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \to 0 \quad\mbox{strongly in }L^1(\Omegaega) \mbox{ as }k\to \infty. \end{equation*} This is stronger than the weak convergence of the discrepancy measures \[ \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 - \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k)\right] dx \quad (k = 1, 2, \dots) \] that are defined in \cite{RogerSchatzle06,Ilmanen1993}: \begin{eqnarray}gin{lemma}[Asymptotic equi-partition of energy] \langlebel{l:equienergy} Let $\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \searrow 0,$ $\phi_k\in H^1(\Omegaega)$ $(k = 1, 2, \dots),$ and $G\subseteq \Omegaega$ be measurable with $P_\Omegaega(G) < \infty.$ Assume that $\phi_k \to \chi_G$ a.e.\ in $\Omegaega.$ Assume also that \begin{eqnarray}gin{equation} \langlebel{POG} \lim_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx = P_\Omegaega(G). \end{equation} Then, we have \begin{eqnarray}gin{equation} \langlebel{equienergy-0} \lim_{k \to \infty} \int_\Omegaega \left| \sqrt{ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} } |\mbox{\normalsize\boldmath$n$}abla \phi_k| - \sqrt{ \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} \right|^2 dx = 0, \end{equation} and \begin{eqnarray}gin{equation} \langlebel{equienergy} \lim_{k\to \infty} \int_\Omegaega \left| \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 - \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right| dx = 0. \end{equation} \end{lemma} \begin{eqnarray}gin{comment} Since $W$ is a quartic potential, we have \begin{eqnarray}gin{equation*} \sqrt{2 W(t)} \le C (1+ s^2), \qquad \forall s \in \mathbb{R} \end{equation*} for some generic constant $C>0$. Therefore \[ |\eta_k(x)| \le C( |\phi_k(x)| + |\phi_k(x)|^3) \qquad \mbox{a.e.\ in } \Omegaega, \, k = 1, 2, \dots \] By \reff{POG}, $\sup_{k \ge 1} \| \phi_k \|_{L^4(\Omegaega)} < \infty;$ cf.\ the proof of Lemma~\ref{l:LowerBound}. Thus, $\sup_{k \ge 1 } \|\eta_k \|_{L^{4/3}(\Omegaega)} < \infty.$ Since \begin{eqnarray}gin{equation*} \int_\Omegaega|\mbox{\normalsize\boldmath$n$}abla \eta_k| \;dx= \int_\Omegaega |\sqrt{2 W(\phi_k)} \mbox{\normalsize\boldmath$n$}abla \phi_k|\;dx \le \int_\Omegaega\left( \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k)\right)dx \le C<\infty, \end{equation*} we then have $\eta_k \in W^{1,1}(\Omegaega) \hookrightarrow BV(\Omegaega)$ $(k = 1, 2, \dots)$ and $\sup_{k\ge 1}\|\eta_k\|_{\rm BV(\Omegaega)}\le C <\infty.$ By Lemma~\ref{l:Lq}, $\phi_k \to \chi_G$ in $L^3(\Omegaega)$. Therefore, \begin{eqnarray}gin{align*} \| \eta_k - \chi_G \|_{L^1(\Omegaega)} & \le C \int_\Omegaega \left| \int_{\chi_G(x)}^{\phi_k(x)} (1+t^2)\, dt \right| \, dx \\ &\le C \int_\Omegaega \left| \phi_k (x) - \chi_G(x) \right| \, \left( 1 + |\phi_k (x)|^2 + |\chi_G(x)|^2 \right) dx \\ &\le C \|\phi_k - \chi_G\|_{L^3(\Omegaega)} \left( 1 + \|\phi_k\|_{L^3(\Omegaega)}^2 + \| \chi_G\|_{L^3(\Omegaega)} ^2 \right) \\ & \to 0 \qquad \mbox{as } k \to \infty. \end{align*} Consequently \cite{Giusti84,Ziemer_Book89}, \[ \liminf_{k\to \infty} \int_\Omegaega \sqrt{2 W(\phi_k)} |\mbox{\normalsize\boldmath$n$}abla \phi_k| \, dx = \liminf_{k\to \infty} \int_\Omegaega |\mbox{\normalsize\boldmath$n$}abla \eta_k | \, dx \ge P_\Omegaega(G). \] This and \reff{POG} then imply that \end{comment} \begin{eqnarray}gin{proof} Define $\eta_k = \eta_k(x)$ as in Lemma~\ref{l:etak}. We have by Lemma~\ref{l:etak} and \reff{POG} that \begin{eqnarray}gin{align*} 0 & \le \limsup_{k \to \infty} \int_\Omegaega \left| \sqrt{ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} } |\mbox{\normalsize\boldmath$n$}abla \phi_k| - \sqrt{ \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} \right|^2 dx \\ & = \limsup_{k \to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) - \sqrt{ 2 W(\phi_k) } |\mbox{\normalsize\boldmath$n$}abla \phi_k | \right] dx \\ & = \lim_{k \to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] - \liminf_{k \to \infty} \int_\Omegaega \sqrt{ 2 W(\phi_k ) } |\mbox{\normalsize\boldmath$n$}abla \phi_k |\, dx \\ & = P_\Omegaega(G) - \liminf_{k \to \infty} \int_\Omegaega |\mbox{\normalsize\boldmath$n$}abla \eta_k |\, dx \\ & \le 0. \end{align*} This proves \reff{equienergy-0}. By \reff{POG} and \reff{equienergy-0}, we have \begin{eqnarray}gin{align*} & \int_\Omegaega \left| \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 - \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right| dx \\ &\qquad = \int_\Omegaega \left| \sqrt{ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} } |\mbox{\normalsize\boldmath$n$}abla \phi_k| - \sqrt{ \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} \right| \, \left| \sqrt{ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} } |\mbox{\normalsize\boldmath$n$}abla \phi_k| + \sqrt{ \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} \right|\, dx \\ &\qquad \le \left( \int_{\Omegaega} \left| \sqrt{ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} } |\mbox{\normalsize\boldmath$n$}abla \phi_k| - \sqrt{ \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} \right|^2 dx \right)^{1/2} \left( 2 \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] dx \right)^{1/2} \\ &\qquad \to 0 \qquad \mbox{as } k \to \infty, \end{align*} implying \reff{equienergy}. \end{proof} We are now ready to prove Theorem~\ref{th:CH-force-conv}. \begin{eqnarray}gin{proof}[Proof of Theorem~\ref{th:CH-force-conv}.] Suppose \reff{CH-force-conv1} is true for any $\Psi \in C_c(\Omegaega, \mathbb{R}^n \times \mathbb{R}^n).$ Let $V \in C_c^1(\Omegaega, \mathbb{R}^n).$ Under the additional assumptions on $\phi_k$ $(k \ge 1)$ and $G$, we have by \reff{weak-f_sur} in Lemma~\ref{l:Stress}, \reff{CH-force-conv1} with $\Psi = \mbox{\normalsize\boldmath$n$}abla V$, and \reff{weak-f0_sur} in Lemma~\ref{l:sharpboundaryforce} that \begin{eqnarray}gin{align*} & \lim_{k\to\infty}\int_\Omegaega\left[ -\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\Deltalta\phi_k + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W'(\phi_k)\right] \mbox{\normalsize\boldmath$n$}abla\phi_k \cdot V\;dx \\ & \qquad = - \lim_{k\to\infty} \int_\Omegaega T_{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}(\phi_k) : \mbox{\normalsize\boldmath$n$}abla V\, dx \\ &\qquad = - \int_{\partialrtial G} (I - \mbox{\normalsize\boldmath$n$}u \otimes \mbox{\normalsize\boldmath$n$}u ): \mbox{\normalsize\boldmath$n$}abla V \, d {\mathcal H}^{n-1} \\ &\qquad = - (n-1)\int_{\partialrtial G} H\mbox{\normalsize\boldmath$n$}u\cdot V \, d S, \end{align*} proving \reff{CH-force-conv2}. We now prove \reff{CH-force-conv1}. We claim that it suffices to prove that \begin{eqnarray}gin{align} \langlebel{tensor-conv2} \lim_{k\to \infty} \int_\Omegaega \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\mbox{\normalsize\boldmath$n$}abla\phi_k\otimes\mbox{\normalsize\boldmath$n$}abla\phi_k:\Psi\;dx = \int_{\partialrtial^*G}\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u:\Psi\;d{\cal H}^{n-1} \qquad \forall \Psi\in C_c(\Omegaega;\mathbb{R}^{n\times n}). \end{align} In fact, suppose \reff{tensor-conv2} is proved. Notice for any $a \in \mathbb{R}^n$, $|a|^2 = a\otimes a : I$. Let $\Psi \in C_c(\Omegaega, \mathbb{R}^{n\times n})$. Then $(I:\Psi) I\in C_c(\Omegaega, \mathbb{R}^{n\times n})$. Hence, it follows from Lemma~\ref{l:equienergy} and \reff{tensor-conv2}, with $(I:\Psi)I $ replacing $\Psi$, that \begin{eqnarray}gin{align*} &\lim_{k\to\infty}\int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac1{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k)\right] I : \Psi\, dx \\ & \qquad = \lim_{k\to\infty}\int_\Omegaega \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 I: \Psi\, dx \\ & \qquad = \lim_{k\to\infty}\int_\Omegaega \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k \mbox{\normalsize\boldmath$n$}abla \phi_k \otimes \mbox{\normalsize\boldmath$n$}abla \phi_k : (I: \Psi) I \, dx \\ &\qquad = \int_{\partialrtial^*G}\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u : (I:\Psi) I \, d{\cal H}^{n-1} \\ &\qquad = \int_{\partialrtial^*G}I:\Psi \, d{\cal H}^{n-1}. \end{align*} This, togehter with \reff{tensor-conv2}, implies \reff{CH-force-conv1}. It remains to prove \reff{tensor-conv2}. Fix $\Psi \in C_c(\Omegaega, \mathbb{R}^n \times \mathbb{R}^n)$ and let $\sigmagma > 0.$ Recall that the reduced boundary $\partialrtial^*G$ has the decomposition \cite{Giusti84,Ziemer_Book89,EvansGariepy_Book92} \[ \partialrtial^*G = \biggl( \bigcup_{j=1}^\infty K_j \biggr) \bigcup Q, \] where $K_j$ $(j = 1, 2, \dots)$ are disjoint compact sets, each being a subset of a $C^1$-hypersurface $S_j \subset \Omegaega$, and $Q\subset \partialrtial G$ with $\| \partialrtial G\|(Q) = 0.$ The vector $\mbox{\normalsize\boldmath$n$}u(x) $ at some $x \in K_j$ for some $j$ is the normal to $S_j$. Moreover, \begin{eqnarray}gin{equation} \langlebel{GK} \sum_{j=1}^\infty {\mathcal H}^{n-1}(K_j) = {\mathcal H}^{n-1}(\partialrtial^* G) = \|\partialrtial G \|(\Omegaega) = P_\Omegaega(G) < \infty. \end{equation} Let $J $ be large enough so that \begin{eqnarray}gin{equation} \langlebel{Jsigma} \sum_{j=J+1}^\infty {\mathcal H}^{n-1}(K_j) < \sigmagma. \end{equation} Since $K_j$ $(j = 1, \dots, J)$ are disjoint, there exist disjoint open sets $U_j \subset \overline{U}_j \subset \Omegaega$ such that $K_j \subset U_j$ $(j = 1, \dots, J).$ For each $j$ $(1 \le j \le J)$, we define $d_j: U_j \to \mathbb{R}$ to be the signed distance to $S_j$ for which the sign is chosen so that $\mbox{\normalsize\boldmath$n$}u (x) = \mbox{\normalsize\boldmath$n$}abla d_j(x)$ if $x \in K_j;$ and extend $d_j $ to $\Omegaega$ by setting $d_j = 0$ on $\Omegaega \setminus U_j.$ We also choose $\zetata_j \in C_c^1(\Omegaega)$ be such that $0 \le \zetata_j \le 1$ on $\Omegaega$, $\zetata_j = 1$ in a neighborhood of $K_j$, $\mbox{supp}\, (\zetata_j) \subset U_j$, and $ \zetata_j \mbox{\normalsize\boldmath$n$}abla d_j \in C_c(\Omegaega, \mathbb{R}^n)$. Define $\mbox{\normalsize\boldmath$n$}u_J: \Omegaega \to \mathbb{R}^n$ by \[ \mbox{\normalsize\boldmath$n$}u_J = \sum_{j=1}^J \zetata_j \mbox{\normalsize\boldmath$n$}abla d_j. \] Note that $\mbox{\normalsize\boldmath$n$}u_j \in C_c (\Omegaega, \mathbb{R}^n)$, $|\mbox{\normalsize\boldmath$n$}u_j| \le 1$ on $\Omegaega$, and $\mbox{\normalsize\boldmath$n$}u_j = \mbox{\normalsize\boldmath$n$}u$ on each $K_j$ $(1 \le j \le J).$ We rewrite $ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\mbox{\normalsize\boldmath$n$}abla\phi_k\otimes\mbox{\normalsize\boldmath$n$}abla\phi_k $ as \begin{eqnarray}gin{align} \langlebel{decomposition} \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\mbox{\normalsize\boldmath$n$}abla\phi_k\otimes\mbox{\normalsize\boldmath$n$}abla\phi_k = & \left( \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k +\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right)\otimes\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \mbox{\normalsize\boldmath$n$}n\\ &\quad + \left( \sqrt{\frac{2W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} - \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}|\mbox{\normalsize\boldmath$n$}abla\phi_k|\right) \mbox{\normalsize\boldmath$n$}u_J\otimes\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \mbox{\normalsize\boldmath$n$}n\\ &\quad - \mbox{\normalsize\boldmath$n$}u_J\otimes\sqrt{2W(\phi_k)}\mbox{\normalsize\boldmath$n$}abla\phi_k. \end{align} We claim: \begin{eqnarray}gin{enumerate} \item[(1)] $\displaystyle\limsup_{k\to\infty}\int_\Omegaega\left|\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k + \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right|^2 dx \le 4\sigmagma;$ \item[(2)] $\displaystyle \sup_{k\ge1} \left\| \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \right\|_{L^2(\Omegaega)} < \infty; $ \item[(3)] $\displaystyle\lim_{k\to\infty}\int_\Omegaega \left[ \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}|\mbox{\normalsize\boldmath$n$}abla\phi_k| -\sqrt{\frac{2W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}}\,\right]^2dx =0;$ \item[(4)] $\displaystyle \lim_{k\to \infty} \int_\Omegaega \mbox{\normalsize\boldmath$n$}u_J\otimes \sqrt{2W(\phi_k)}\mbox{\normalsize\boldmath$n$}abla\phi_k:\Psi\;dx = - \int_{\partialrtial^*G} \mbox{\normalsize\boldmath$n$}u_J \otimes \mbox{\normalsize\boldmath$n$}u: \Psi\; d{\mathcal H}^{n-1}. $ \end{enumerate} If all these claims are true, then it follows from \reff{decomposition} and \reff{Jsigma} that \begin{eqnarray}gin{align*} &\limsup_{k\to\infty} \left| \int_\Omegaega \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k\mbox{\normalsize\boldmath$n$}abla\phi_k\otimes\mbox{\normalsize\boldmath$n$}abla\phi_k :\Psi\;dx - \int_{\partialrtial^*G}\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u:\Psi\;d{\cal H}^{n-1} \right| \\ &\qquad \le \limsup_{k\to\infty} \int_\Omegaega \left| \left( \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k + \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right)\otimes\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \mbox{\normalsize\boldmath$n$}abla\phi_k:\Psi \right| \, dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad \quad + \limsup_{k\to\infty} \int_\Omegaega\left|\left( \sqrt{\frac{2W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}} - \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}|\mbox{\normalsize\boldmath$n$}abla\phi_k|\right)\mbox{\normalsize\boldmath$n$}u_J\otimes\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} \mbox{\normalsize\boldmath$n$}abla\phi_k : \Psi \right| \;dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad\quad + \left| \lim_{k\to\infty}\int_\Omegaega\mbox{\normalsize\boldmath$n$}u_J\otimes \sqrt{2W(\phi_k)}\mbox{\normalsize\boldmath$n$}abla\phi_k:\Psi\;dx +\int_{\partialrtial^*G}\mbox{\normalsize\boldmath$n$}u\otimes\mbox{\normalsize\boldmath$n$}u:\Psi\;d{\cal H}^{n-1} \right| \\ & \qquad \le \limsup_{k\to \infty } \left[ \int_\Omegaega \left| \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k + \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right|^2 dx \right]^{1/2} \left(\sup_{k\ge 1}\left\| \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \right\|_{L^2(\Omegaega)}\right) \|\Psi\|_{L^\infty(\Omegaega)} \\ & \qquad \quad + \limsup_{k\to\infty}\left[ \int_\Omegaega \left(\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}|\mbox{\normalsize\boldmath$n$}abla\phi_k| -\sqrt{\frac{2W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}}\right)^2dx\right]^{1/2} \\ & \qquad \qquad \cdot \left( \sup_{k\ge1}\|\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k\|_{L^2(\Omegaega)} \right) \| \Psi \|_{L^\infty(\Omegaega)} \\ & \qquad \qquad + \left|\int_{\partialrtial^*G} ( \mbox{\normalsize\boldmath$n$}u_J -\mbox{\normalsize\boldmath$n$}u)\otimes\mbox{\normalsize\boldmath$n$}u : \Psi \;d{\cal H}^{n-1} \right| \\ & \qquad \le \sqrt{4\sigmagma}\left(\sup_{k\ge 1}\left\| \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \right\|_{L^2(\Omegaega)}\right) \|\Psi\|_{L^\infty(\Omegaega)} + 2 \|\Psi\|_{L^\infty(\Omegaega)} \sum_{j=J+1}^\infty {\cal H}^{n-1}(K_j) \\ & \qquad \le \sqrt{4\sigmagma}\left(\sup_{k\ge 1}\left\| \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \right\|_{L^2(\Omegaega)}\right) \|\Psi\|_{L^\infty(\Omegaega)} + 2\sigmagma \|\Psi\|_{L^\infty(\Omegaega)}. \end{align*} Since $\sigmagma > 0$ is arbitrary, this proves \qref{tensor-conv2}. We now prove all of our claims. Claim (2) follows from the assumption \reff{important} of the energy convergence and the assumption that $P_\Omegaega(G) < \infty.$ Claim (3) is \qref{equienergy-0} in Lemma~\ref{l:equienergy}. Claim (4) follows from \reff{etakweakconv} in Lemma~\ref{l:etak}, which implies that for any $j \in \{ 1, \dots, n \}$ \[ \lim_{k\to \infty} \int_\Omegaega \partialrtial_{x_j} \eta_k h \, dx = -\int_{\partialrtial^* G} \mbox{\normalsize\boldmath$n$}u_j h \, d{\mathcal H}^{n-1} \qquad \forall h \in C_c(\Omegaega), \] where $\mbox{\normalsize\boldmath$n$}abla \eta_k = \sqrt{2 W(\phi_k)} \mbox{\normalsize\boldmath$n$}abla \phi_k$. Proof of Claim (1). Noting that $|\mbox{\normalsize\boldmath$n$}u_J| \le 1$, we have for each $k \ge 1$ that \begin{eqnarray}gin{align} \langlebel{Claim1} &\frac12 \int_\Omegaega\left|\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k + \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right|^2 dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad = \frac12 \int_\Omegaega \left(\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 | \mbox{\normalsize\boldmath$n$}u_J|^2 +2\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k|\mbox{\normalsize\boldmath$n$}abla\phi_k|\mbox{\normalsize\boldmath$n$}abla\phi_k \cdot \mbox{\normalsize\boldmath$n$}u_J\right)dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad \le \int_\Omegaega\left(\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k |\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k|\mbox{\normalsize\boldmath$n$}abla\phi_k|\mbox{\normalsize\boldmath$n$}abla\phi_k\cdot \mbox{\normalsize\boldmath$n$}u_J \right)dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad = \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 + \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\right] dx + \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2}|\mbox{\normalsize\boldmath$n$}abla\phi_k|^2 - \frac{W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\right] dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad \quad + \int_\Omegaega \left[ \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}|\mbox{\normalsize\boldmath$n$}abla\phi_k|-\sqrt{\frac{2W(\phi_k)}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}}\right] \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k \cdot\mbox{\normalsize\boldmath$n$}u_J\;dx \mbox{\normalsize\boldmath$n$}n\\ &\qquad \quad + \int_\Omegaega \sqrt{2W(\phi_k)}\mbox{\normalsize\boldmath$n$}abla\phi_k\cdot\mbox{\normalsize\boldmath$n$}u_J\;dx \mbox{\normalsize\boldmath$n$}n \\ &\qquad =: I_1(k) +I_2 (k) +I_3(k) + I_4(k). \end{align} By \qref{POG}, \[ \lim_{k\to \infty} I_1(k) = P_\Omegaega(G). \] By Lemma~\ref{l:equienergy} on the asymptotic equi-partition of energy, \[ \lim_{k\to \infty} I_2(k) = 0. \] By Claim (2) and Claim (3), \[ \lim_{k\to \infty} I_3(k) = 0. \] By \reff{etakweakconv} in Lemma~\ref{l:etak}, \begin{eqnarray}gin{align*} \lim_{k\to\infty} I_4 &= - \int_{\partialrtial^* G} \mbox{\normalsize\boldmath$n$}u\cdot \mbox{\normalsize\boldmath$n$}u_J\;d{\cal H}^{n-1} \\ &= - \sum_{j=1}^J{\cal H}^{n-1}(K_j) - \sum_{j=J+1}^\infty\int_{K_j}\mbox{\normalsize\boldmath$n$}u\cdot\mbox{\normalsize\boldmath$n$}u_J\;d{\cal H}^{n-1}. \end{align*} Therefore, continuing from \reff{Claim1}, we have by \reff{GK}, \reff{Jsigma}, and the fact that $|\mbox{\normalsize\boldmath$n$}u\cdot \mbox{\normalsize\boldmath$n$}u_J|\le 1$ that \begin{eqnarray}gin{align*} &\limsup_{k\to \infty} \frac12 \int_\Omegaega\left|\sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}\mbox{\normalsize\boldmath$n$}abla\phi_k + \sqrt{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} | \mbox{\normalsize\boldmath$n$}abla \phi_k | \mbox{\normalsize\boldmath$n$}u_J \right|^2 dx \\ &\qquad \le P_\Omegaega(G) - \sum_{j=1}^J{\cal H}^{n-1}(K_j) - \sum_{j=J+1}^\infty\int_{K_j}\mbox{\normalsize\boldmath$n$}u\cdot \mbox{\normalsize\boldmath$n$}u_J\;d{\cal H}^{n-1} \\ &\qquad = \sum_{j=J+1}^\infty {\cal H}^{n-1}(K_j) -\sum_{j=J+1}^\infty\int_{K_j} \mbox{\normalsize\boldmath$n$}u\cdot\mbox{\normalsize\boldmath$n$}u_J\,d{\cal H}^{n-1} \mbox{\normalsize\boldmath$n$}n\\ &\qquad \leq 2\sum_{j=J+1}^\infty {\cal H}^{n-1}(K_j) \mbox{\normalsize\boldmath$n$}n \\ &\qquad \leq 2\sigmagma, \end{align*} proving Claim (1). The proof is complete. \end{proof} \begin{eqnarray}gin{comment} With $\zetata^2 $ replacing $\zetata $ in \reff{PP2sigma}, we also have \[ \limsup_{k\to \infty} \left| \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} (\mbox{\normalsize\boldmath$n$}abla \phi_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J)^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) - \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J \right] dx \right| \le 2 \sigmagma. \] Similarly, we have \begin{eqnarray}gin{align*} P_\Omegaega(G) & = \lim_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} |\mbox{\normalsize\boldmath$n$}abla \phi_k|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k) \right] dx \\ & \ge \limsup_{k\to \infty} \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} | \mbox{\normalsize\boldmath$n$}abla \phi_k\cdot \zetata \mbox{\normalsize\boldmath$n$}u_J|^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}W(\phi_k) \right] dx \\ & = \limsup_{k\to \infty} \int_\Omegaega | \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J|\,dx \\ & \ge \lim_{k\to \infty} \int_\Omegaega \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J \,dx \\ & = \int_{\partialrtial^* G} \mbox{\normalsize\boldmath$n$}u_G \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J \, d{\mathcal H}^{n-1} \\ & \ge P_\Omegaega(G) - 2 \sigmagma. \end{align*} Hence, with $\zetata^2 $ replacing $\zetata$, we have \begin{eqnarray}gin{align*} 0 &\le \limsup_{k\to \infty} \int_\Omegaega \left| \, |\zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)| - \zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J ) \right| \, dx \\ & = \limsup_{k\to \infty} \int_\Omegaega \left[ |\zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)| - \zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J ) \right] dx \\ & \le \limsup_{k\to \infty} \int_\Omegaega |\zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)| \, dx - \lim_{k\to \infty} \int_\Omegaega \zetata (\mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J ) \, dx \\ & \le 2 \sigmagma. \end{align*} Now, we obtain \begin{eqnarray}gin{align*} &\limsup_{k\to \infty} \left| \int_\Omegaega \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k (\mbox{\normalsize\boldmath$n$}abla \phi_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)^2 \zetata^2 \left( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J \right)\, dx - \int_{\partialrtial^* G} ( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u_J \cdot \mbox{\normalsize\boldmath$n$}u_J ) \, d{\mathcal H}^{n-1} \right| \\ &\quad \le \limsup_{k \to \infty} \left| \int_\Omegaega \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} (\mbox{\normalsize\boldmath$n$}abla \phi_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)^2 \zetata^2 + \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) - \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J \right] (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) dx \right| \\ & \qquad + \limsup_{k \to \infty} \left| \int_\Omegaega \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) dx - \int_{\partialrtial^* G} ( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u_J \cdot \mbox{\normalsize\boldmath$n$}u_J ) \, d{\mathcal H}^{n-1} \right| \\ &\qquad +\limsup_{k \to \infty} \int_\Omegaega \left| \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} (\mbox{\normalsize\boldmath$n$}abla \phi_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)^2 \zetata^2 - \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} | \mbox{\normalsize\boldmath$n$}abla \phi_k |^2 \right] (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) \right| \, dx \\ &\qquad \, +\limsup_{k \to \infty} \int_\Omegaega \left| \left[ \frac{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k}{2} | \mbox{\normalsize\boldmath$n$}abla \phi_k |^2 - \frac{1}{\mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k} W(\phi_k) \right] (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) \right|\, dx \\ &\quad \le 4 ( \| \mbox{\normalsize\boldmath$n$}abla \cdot V \|_{L^\infty(\Omegaega)} + \| \mbox{\normalsize\boldmath$n$}abla V \|_{L^\infty(\Omegaega)} ) \sigmagma \\ &\qquad + \lim_{k \to \infty} \left| \int_\Omegaega ( \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J ) (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) \, dx - \int_{\partialrtial^* G} ( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u_J \cdot \mbox{\normalsize\boldmath$n$}u_J ) \, d{\mathcal H}^{n-1} \right| \\ &\quad \le 4 ( \| \mbox{\normalsize\boldmath$n$}abla \cdot V \|_{L^\infty(\Omegaega)} + \| \mbox{\normalsize\boldmath$n$}abla V \|_{L^\infty(\Omegaega)} ) \sigmagma \\ &\qquad + \left| \int_{\partialrtial^* G} ( \mbox{\normalsize\boldmath$n$}u_G \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J ) (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J)\, d{\mathcal H}^{n-1} - \int_{\partialrtial^* G} ( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u_J \cdot \mbox{\normalsize\boldmath$n$}u_J ) \, d{\mathcal H}^{n-1} \right| \\ &\quad \le 4 ( \| \mbox{\normalsize\boldmath$n$}abla \cdot V \|_{L^\infty(\Omegaega)} + \| \mbox{\normalsize\boldmath$n$}abla V \|_{L^\infty(\Omegaega)} ) \sigmagma + 2 ( \| \mbox{\normalsize\boldmath$n$}abla \cdot V \|_{L^\infty(\Omegaega)} + \| \mbox{\normalsize\boldmath$n$}abla V \|_{L^\infty(\Omegaega)} ) \sum_{j=J+1}^\infty {\mathcal H}^{n-1}(K_j) \\ &\quad \le 6 ( \| \mbox{\normalsize\boldmath$n$}abla \cdot V \|_{L^\infty(\Omegaega)} + \| \mbox{\normalsize\boldmath$n$}abla V \|_{L^\infty(\Omegaega)} ) \sigmagma. \end{align*} \[ \lim_{k \to \infty} \int_\Omegaega ( \mbox{\normalsize\boldmath$n$}abla \eta_k \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J ) (\mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \zetata \mbox{\normalsize\boldmath$n$}u_J \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J) dx = \int_{\partialrtial^* G} (\mbox{\normalsize\boldmath$n$}u_G \cdot \zetata^2 \mbox{\normalsize\boldmath$n$}u_J) ( \mbox{\normalsize\boldmath$n$}abla \cdot V - \mbox{\normalsize\boldmath$n$}abla V \mbox{\normalsize\boldmath$n$}u_J \cdot \mbox{\normalsize\boldmath$n$}u_J ) \, d{\mathcal H}^{n-1} \] We have \[ 0 \le \limsup_{k \to \infty} \left| \int_\Omegaega \left[ \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k | \mbox{\normalsize\boldmath$n$}abla \phi_k |^2 (\mbox{\normalsize\boldmath$n$}abla \cdot V) - \mbox{\mbox{\normalsize\boldmath$n$}ormalsize\boldmath$x$}i_k ( \mbox{\normalsize\boldmath$n$}abla \phi_k \cdot \zetata \mbox{\normalsize\boldmath$n$}u_J )^2 (\mbox{\normalsize\boldmath$n$}abla \cdot V) \right] \right|\, dx \le 4 \| \mbox{\normalsize\boldmath$n$}abla V \|_\infty \sigmagma. \] \end{comment} } \mbox{\normalsize\boldmath$n$}oindent{\bf Acknowledgments.} This work was supported by the US National Science Foundation (NSF) through the grant DMS-1411438 (S.D.), DMS-1319731 (B.L.), and DMS-1454939 (J.L.). \end{document}
\begin{document} \title{Collective strong coupling between ion Coulomb crystals and an optical cavity field:\\ Theory and experiment} \author{M. Albert} \altaffiliation{Current address: Max-Planck-Institut für Quantenoptik, Hans-Kopfermann-Str. 1, 85748 Garching, Germany and Albert-Ludwigs-Universität Freiburg, Physikalisches Institut, Hermann-Herder-Str. 3, 79104 Freiburg, Germany} \affiliation{QUANTOP, Danish National Research Foundation Center for Quantum Optics, Department of Physics and Astronomy, University of Aarhus, DK-8000 \AA rhus C., Denmark} \author{J. P. Marler} \altaffiliation{Current address: Department of Physics and Astronomy, Northwestern University, 2145 Sheridan Road, Evanston IL 60208, USA} \affiliation{QUANTOP, Danish National Research Foundation Center for Quantum Optics, Department of Physics and Astronomy, University of Aarhus, DK-8000 \AA rhus C., Denmark} \author{P. F. Herskind} \altaffiliation{Current address: Research Laboratory of Electronics, Massachusetts Institute of Technology, Cambridge, MA 02139, USA} \affiliation{QUANTOP, Danish National Research Foundation Center for Quantum Optics, Department of Physics and Astronomy, University of Aarhus, DK-8000 \AA rhus C., Denmark} \author{A. Dantan} \affiliation{QUANTOP, Danish National Research Foundation Center for Quantum Optics, Department of Physics and Astronomy, University of Aarhus, DK-8000 \AA rhus C., Denmark} \author{M. Drewsen}\email{[email protected]} \affiliation{QUANTOP, Danish National Research Foundation Center for Quantum Optics, Department of Physics and Astronomy, University of Aarhus, DK-8000 \AA rhus C., Denmark} \begin{abstract} A detailed description and theoretical analysis of experiments achieving coherent coupling between an ion Coulomb crystal and an optical cavity field are presented. The various methods used to measure the coherent coupling rate between large ion Coulomb crystals in a linear quadrupole radiofrequency ion trap and a single-field mode of a moderately high-finesse cavity are described in detail. Theoretical models based on a semiclassical approach are applied in assessment of the experimental results of [P.~F.~Herskind \textit{et al.}, Nature Phys. {\bf 5}, 494 (2009)] and of complementary new measurements. Generally, a very good agreement between theory and experiments is obtained. \end{abstract} \pacs{42.50.Pq,37.30.+i,42.50.Ct} \date{\today} \maketitle \section{Introduction} Cavity Quantum Electrodynamics (CQED) constitutes a fundamental platform for studying the quantum dynamics of matter systems interacting with electromagnetic fields~\cite{Berman1994,Haroche2006}. For a single two-level quantum system interacting with a single mode of the electromagnetic field of a resonator, a particularly interesting regime of CQED is reached when the rate, $g$, at which single excitations are coherently exchanged between the two-level system and the cavity field mode exceeds both the decay rate of the two-level system, $\gamma$, and the rate, $\kappa$, at which the cavity field decays~\cite{Rempe1994Cavity}. This so-called strong coupling regime was investigated first with single atoms in microwave and optical cavities~\cite{Brune1996,Thompson1992Observation} and recently with quantum dots~\cite{Badolato2005Deterministic,khitrova2006} and superconducting Josephson junctions~\cite{wallraff2004,Chiorescu2004Coherent}. In the optical domain, the use of ultrahigh-finesse cavities with a very small modevolume allows for reaching the confinement of the light field required to achieve strong coupling with single neutral atoms~\cite{Hood1998,Rempe1994Cavity,Maunz2005}. With charged particles, however, the insertion of dielectric mirrors in the trapping region makes it extremely challenging to obtain sufficiently small cavity modevolumes, due to the associated perturbation of the trapping potentials and charging effects~\cite{Harlander2010Trapped-ion,Herskind2011AMicrofabricated}. Although the strong coupling regime has not yet been reached with ions, single ions in optical cavities have been successfully used for, e.g., probing the spatial structure of cavity fields~\cite{Guthohrlein2001AsingleIon}, enhanced spectroscopy~\cite{Kreuter2004}, the generation of single photons~\cite{Keller2004Continuous,Barros2009Deterministic}, the investigation of cavity sideband cooling~\cite{Leibrandt2009Cavity}, or the demonstration of a single ion laser~\cite{Dubin2010Quantum}. For an ensemble of $N$ identical two-level systems simultaneously interacting with a single mode of the electromagnetic field, the coherent coupling rate is enhanced by a factor $\sqrt{N}$~\cite{Haroche2006}. This leads to another interesting regime of CQED, the so-called \textit{collective} strong coupling regime~\cite{Haroche2006}, where the \textit{collective} coupling rate $g_{\mathrm N}=g\sqrt{N}$ is larger than both $\kappa$ and $\gamma$. This regime, first explored with Rydberg atoms in microwave cavities~\cite{Kaluzny1983}, has been realized in the optical domain with atomic beams~\cite{Thompson1992Observation}, atoms in magneto-optical traps~\cite{Lambrecht1996,Nagorny2003Collective,Chan2003Observation,Kruse2004Observation,Chen2011Conditional}, Bose-Einstein condensates~\cite{Brennecke2007Cavity,Colombe2007Strong}, and, recently, with ion Coulomb crystals~\cite{Herskind2009Realization}. This cavity-enhanced collective interaction with an ensemble has many applications within quantum optics and quantum information processing~\cite{Kimble2008}, including the establishment of strong nonlinearities~\cite{Lambrecht1995Optical,Joshi2003Optical}, QND measurements~\cite{Grangier1991Observation,Roch1997Quantum,Mielke1998Nonclassical}, the production~\cite{Black2005On-Demand,Thompson2006AHigh-Brightness} and storage~\cite{Simon2007,Tanji2009Heralded} of single-photons, the generation of squeezed and entangled states of light~\cite{Lambrecht1996,Josse2003Polarization,Josse2004Continuous} and atoms \cite{Leroux2010Implementation,Chen2011Conditional} , the observation of cavity optomechanical effects~\cite{Nagorny2003Collective,Kruse2004Observation,Klinner2006Normal,Slama2007Superradiant,Murch2008Observation,Brennecke2008}, cavity cooling~\cite{Chan2003Observation,Black2003Observation}, and the investigation of quantum phase transitions~\cite{Baumann2010Dicke}. This paper provides a detailed description and a theoretical analysis of experiments achieving \textit{collective} strong coupling with ions~\cite{Herskind2009Realization}. The various methods used to measure the coherent coupling rate between large ion Coulomb crystals in a linear quadrupole radiofrequency ion trap and a single field mode of a moderately high-finesse cavity ($\mathcal{F}\sim3000$) are described in detail. Theoretical models based on a semiclassical approach are applied in assessment of the experimental results of Ref.~\cite{Herskind2009Realization} as well as of complementary new measurements. Generally, a very good agreement between the theoretical predictions and the experimental results is obtained. As also emphasized in Ref.~\cite{Herskind2009Realization}, the realization of collective strong coupling with ion crystals is important for ion-based CQED~\cite{Lange2009CavityQED} and enables, e.g., for the realization of quantum information processing devices, such as high-efficiency, long-lived quantum memories~\cite{Lukin2000,Simon2007} and repeaters~\cite{Duan2001}. In addition to the well-established attractive properties of cold, trapped ions for quantum information processing~\cite{Leibfried2003Quantum,Blatt2008Entangled}, ion Coulomb crystals benefit from unique properties which can be exploited for CQED purposes. First, their uniform density under linear quadrupole trapping conditions~\cite{Drewsen1998,Hornekaer2002Formation,Hornekaer2001} makes it possible to couple the same ensemble equally to different transverse cavity modes~\cite{Dantan2009Large} and opens for the realization of multimode quantum light-matter interfaces~\cite{Lvovsky2009Optical}, where the spatial degrees of freedom of light can be exploited in addition to the traditional polarization and frequency encodings~\cite{Vasilyev2008Quantum,Tordrup2008Holographic,Wesenberg2011Dynamics}. Second, their cold, solid-like nature combined with their strong optical response to radiation pressure forces and their tunable mode spectrum~\cite{Dubin1991Theory,Dubin1996Normal,Dantan2010Non-invasive} make ion Coulomb crystals a unique medium to investigate cavity optomechanical effects~\cite{Kippenberg2008}. Ion Coulomb crystals could, for instance, be used as a model system to study the back action of the cavity light field on the collective motion of mesoscopic objects at the quantum limit, as was recently demonstrated with ultracold atoms~\cite{Murch2008Observation,Brennecke2008,Baumann2010Dicke}. In addition, novel classical and quantum phase transitions could be investigated using cold ion Coulomb crystals in optical cavities~\cite{Garcia-Mata2007Frenkel-Kontorova,Retzker2008Double,Fishman2008Structural,Harkonen2009Dicke}. The paper is organized as follows: Sec.~\ref{sec:theory} presents the theoretical basis for the CQED interaction of ion Coulomb crystals and an optical cavity field. The cavity field reflectivity spectra, and the effective number of ions interacting with the cavity field are derived and the effect of temperature on the collective coupling rate is discussed. In Sec.~\ref{sec:experimentalSetup} the experimental setup and the measurement procedures are described. Section~\ref{sec:experimentalResults} presents various collective coupling rate measurements and compares them to the theoretical expectations. Section~\ref{sec:coherencetime} shows measurements of the coherence time of collective coherences between Zeeman sublevels. A conclusion is given in Sec.~\ref{sec:conclusion}. \section{CQED interaction: theoretical basis} \label{sec:theory} \subsection{Hamiltonian and evolution equations} \begin{figure}\label{fig:InOutputFieldCavity} \end{figure} We consider the interaction of $N_{\mathrm{tot}}$ two-level ions in a Coulomb crystal with a single mode of the electromagnetic field of an optical cavity (denoted by $\mathrm{nm}$), as depicted in Fig.~\ref{fig:InOutputFieldCavity}. The single-ended linear cavity is formed by two mirrors M$_1$ (partial transmitter, PT) and M$_2$ (high reflector, HR) with intensity transmission coefficients $T_1$ and $T_2$ ($T_1\gg T_2$). The absorption loss coefficient per round-trip is $\mathcal{L}$ and the empty cavity field round-trip time is $\tau=2l/c$, where $l$ is the cavity length and $c$ the speed of light. The intracavity, input and reflected fields are denoted by $a$, $a_{\mathrm{in}}$, and $a_{\mathrm{r}}$, respectively. The interaction of an ensemble of $N$ identical two-level ions with a single mode of the cavity field can be described by a Jaynes-Cummings Hamiltonian of the form~\cite{Haroche2006,Breuer2007TheTheory} \begin{equation} \label{eq:Jaynes-CummingsHamiltonian} H=H_{\mathrm{at}}+H_{\mathrm{l}}+H_{\mathrm{al}} \end{equation} where, in the frame rotating at the laser frequency $\omega_{\mathrm{l}}$, the atom and light Hamiltonians are given by $H_{\mathrm{at}}=\hbar \Delta \sum_{j=1}^{N_{\mathrm{tot}}} \hat \pi_j^{(e)}$ and $H_{\mathrm{l}}=\hbar \Delta_{\mathrm{c}} \hat a^\dagger \hat a$. The atomic and cavity detunings are denoted by $\Delta=\omega_{\mathrm{at}}-\omega_{\mathrm{l}}$ and $\Delta_{\mathrm{c}}=\omega_{\mathrm{c}}-\omega_{\mathrm{l}}$, where $\omega_{\mathrm{at}}$ and $\omega_{\mathrm{c}}$ are the atomic and cavity resonance frequencies, respectively. $\hat \pi_j^{(e)}$ is the excited state population operator of the $j$-th ion and $\hat{a}$, $\hat{a}^{\dagger}$ are the intracavity field annihilation and creation operators. In the rotating wave approximation the interaction Hamiltonian reads \begin{equation} \label{eq:interactionHamiltonian} H_{\mathrm{al}}=-\hbar g\sum_{j=1}^{N_{\mathrm{tot}}} \Psi_{\mathrm{nm}}(\bm r_j) (\hat\sigma_j^\dagger \hat a +\hat\sigma_j \hat a^\dagger). \end{equation} where $\hat \sigma_j^\dagger$ and $\hat \sigma_j$ are the atomic rising and lowering operators, defined in the frame rotating at the laser frequency. The single-ion coupling rate $g$ is defined as $g=\mu_{\mathrm{ge}}E_{0}/\hbar$, where $\mu_{\mathrm{ge}}$ is the dipole element of the transition considered and $E_0$ the maximum electric field amplitude. The field distribution $E_0 \Psi_{\mathrm{nm}}(\bm r_j)$ is assumed to be that of a single-cavity Hermite-Gauss mode~\cite{Kogelnik1966Laser}. In the following, we will restrict ourselves to the fundamental $\mathrm{TEM}_{00}$ mode of the cavity and refer to Ref.~\cite{Dantan2009Large} for the coupling of ion Coulomb crystals to higher-order cavity transverse modes. The coupled atom-cavity system is subject to decoherence, mainly through the spontaneous decay of the ions from the excited state and through the decay of the cavity field due to the finite reflectivity of the cavity mirrors and due to intracavity losses. These dissipative processes are characterized by the atomic dipole decay rate, $\gamma$, and by the total cavity field decay rate, $\kappa$, respectively. The cavity field decay rate is given by $\kappa=\kappa_1+\kappa_2+\kappa_{\mathcal{L}}$, and includes the decay rates through the PT and HR mirrors ($\kappa_1=T_1/2\tau$ and $\kappa_2=T_2/2\tau$) and the decay rate due to absorption losses ($\kappa_{\mathcal{L}}=\mathcal{L}/2\tau$). We derive standard semiclassical equations of motion for the mean values of the observables via $\langle \dot{\hat{a}}\rangle=\frac {i}{\hbar}\langle[H,\hat{a}]\rangle$ and phenomenologically adding the relevant dissipative processes \cite{Haroche2006,Breuer2007TheTheory,Scully1997,Tavis1968Exact,Thompson1992Observation,Raizen1989Normal-mode}. In the low saturation regime, most of the atoms remain in the ground state, $\langle \hat{\pi}_j^{(e)}\rangle\ll 1$, and the dynamical equations for the mean values of the observables read \begin{eqnarray} \label{eq:opticalBlochEquationsLowSaturation1} \dot \sigma_j &=& -(\gamma+i \Delta) \sigma_j + i g \Psi_{00}(\bm r_j) a, \\ \dot a &=& -(\kappa+i \Delta_{\mathrm{c}}) a + i \sum_{j=1}^{N_{\mathrm{tot}}} g \Psi_{00}(\bm r_j) \sigma_j +\sqrt{2\kappa_1/\tau} a_{\mathrm{in}}.\label{eq:opticalBlochEquationsLowSaturation2} \end{eqnarray} where $o=\langle \hat o\rangle$ is the mean value of observable $\hat o$. \subsection{Steady-state reflectivity spectrum and effective number of ions} \begin{figure} \caption{(Color online) (a) Calculated probe reflectivity spectrum as a function of cavity detuning $\Delta_{\mathrm c} \label{fig:theoryCoupledAtomCavitySystem} \end{figure} In steady-state, the mean value of the intracavity field amplitude is given by \begin{eqnarray} \label{eq:steadyStateSolution} a=\frac {\sqrt{2 \kappa_1/\tau}a_{\mathrm{in}}}{\kappa^\prime+i\Delta_{\mathrm{c}}^\prime}, \end{eqnarray} where an effective cavity decay rate and an effective cavity detuning are introduced: \begin{eqnarray} \label{eq:kappaPrime} \kappa^\prime&=&\kappa+g^2 N\frac{\gamma}{\gamma^2+\Delta^2},\\ \Delta_{\mathrm{c}}^\prime&=&\Delta_{\mathrm{c}} - g^2 N\frac{\Delta}{\gamma^2+\Delta^2}.\label{eq:delta_cprime} \end{eqnarray} In these expressions, $N$ is the effective number of ions interacting with the intracavity field, which is calculated by summing over all ions and weighting the contribution of each ion by the field modefunction under consideration evaluated at the ion's position: \begin{equation} \label{eq:effectiveNumberOfIons} N= \sum_{j=1}^{N_\mathrm{tot}} \Psi_{\mathrm{00}}^2(\bm r_j). \end{equation} Here, \begin{align}\nonumber\Psi_{00}^2(\bm r)=&\left(\frac{w_0}{w(z)}\right)^2\exp\left(-\frac{2r^2}{w(z)^2}\right)\\ &\times\sin^2\left[kz-\arctan(z/z_0)+kr^2/2R(z)\right],\end{align} is the modefunction of the cavity fundamental TEM$_{00}$ Gaussian mode with waist $w_0$ at the center of the mode and \hbox{$w(z)=w_0\sqrt{1+z^2/z_0^2}$}, \hbox{$R(z)=z+z_0^2/z$}, \hbox{$z_0=\pi w_0^2/\lambda$}, and \hbox{$k=2\pi /\lambda$}. Large ion Coulomb crystals in a linear radiofrequency trap are to an excellent approximation spheroids with half-length $L$ and radius $R$ (see Fig.~\ref{fig:crystalAbsorptionAndPhaseShift}), where the density of ions, $\rho$, is constant throughout the crystal~\cite{Drewsen1998,Hornekaer2002Formation}. It is then convenient to adopt a continuous medium description, in which Eq.~(\ref{eq:effectiveNumberOfIons}) becomes an integral over the crystal volume $V$: \begin{equation} N=\rho\int_V d\bm r\Psi_{00}^2(\bm r) \end{equation} In our experiment, the crystal radius and half-length, $R$ and $L$, are typically much smaller than $z_0$ and the axial mode function can be approximated by $\sin^2(kz)$. Moreover, for randomly distributed ions along the cavity axis $z$, one can average over the cavity standing-wave longitudinal structure, which gives an effective number of ions equal to \begin{equation} \label{eq:effectiveNumberOfIonsAxiallyAveraged} N=\frac{\rho}{2}\int_V d\bm r \exp[-2r^2/w(z)^2]. \end{equation} This expression can be evaluated knowing the crystal dimensions, its density, and the cavity mode geometry. For typical crystals with large radial extension as compared to the cavity waist $R\gg w_0$ and length smaller than the Rayleigh range $L\ll z_{0}$, this expression reduces to \begin{equation}N\simeq\rho\frac{\pi w_0^2}{4}L,\end{equation} which is simply the product of the ion density by the volume of the cavity mode in the crystal. Using the input-output relation $a_{\mathrm{r}}=\sqrt{2\kappa_1 \tau}a-a_{\mathrm{in}}$, one finds that the steady-state probe reflectivity spectrum of the cavity is also Lorentzian-shaped in presence of the ions, the bare cavity decay rate, and detuning $\kappa$ and $\Delta_{\mathrm{c}}$ being replaced by their effective counterparts $\kappa'$ and $\Delta_{\mathrm{c}}'$ of Eqs.~(\ref{eq:kappaPrime},\ref{eq:delta_cprime}): \begin{equation} \label{eq:reflectivity} \mathcal{R}\equiv\left|\frac{a_r}{a_{\mathrm{in}}}\right|^2= \left|\frac{2\kappa_1-\kappa'-i\Delta_{\mathrm{c}}'}{\kappa'+i\Delta_{\mathrm{c}}'}\right|^2. \end{equation} The broadening and shift of the cavity resonance then represent the change in absorption and dispersion experienced by the cavity field interacting with $N$ ions. In Fig. \ref{fig:theoryCoupledAtomCavitySystem} (a) the expected cavity reflectivity spectrum is shown for both an empty cavity and a cavity containing a crystal with an effective number of ions $N=500$ and for parameters corresponding to those used in the experiments presented in Secs. \ref{sec:experimentalSetup} and \ref{sec:experimentalResults}. In Fig. \ref{fig:theoryCoupledAtomCavitySystem} (b) the effective cavity decay rate, $\kappa^\prime$, and the shift of the cavity resonance induced by the interaction with the ions, $\Delta^\prime_{\mathrm{c}}-\Delta_{\mathrm{c}}$, are shown as a function of the probe detuning, $\Delta$, for the same parameters. \subsection{Effect of the motion of the ions} \label{sec:EffectOfTheMotionOfTheIons} The interaction Hamiltonian in Eq.~\eqref{eq:interactionHamiltonian} is only valid for atoms at rest. If an ion is moving along the axis of the cavity, the standing-wave structure of the cavity field and the Doppler shifts due to the finite velocity of the ion have to be taken into account. For an ion moving along the standing wave field with a velocity $v_j$, it is convenient to define atomic dipole operators, $\sigma_{j\pm}=\frac{1}{2}\sigma_j \exp{(\pm i k z_j)}$, arising from the interaction with the two counterpropagating components of the standing-wave cavity field. In the low saturation limit and taking into account the opposite Doppler-shifts, the evolution equations \eqref{eq:opticalBlochEquationsLowSaturation1},\eqref{eq:opticalBlochEquationsLowSaturation2} become \begin{widetext} \begin{eqnarray} \label{eq:opticalBlochEquationsIncludingMotion} \dot \sigma_{j\pm} &=& -\left[\gamma+i (\Delta\pm kv_j)\right]\sigma_{j\pm}+i (g/2) \Psi_{\mathrm{nm}}(\bm r_j) a\\ \dot a &=& -(\kappa+i \Delta_{\mathrm{c}}) a + i (g/2) \sum_{j=1}^{N_{tot}} \Psi_{\mathrm{nm}}(\bm r_j) \left(\sigma_{j+}+\sigma_{j-}\right)+\sqrt{2\kappa_1/\tau} a_{\mathrm{in}}. \end{eqnarray} \end{widetext} When the typical timescale of the motion is slow as compared to the timescales for the coupled dynamics of the atomic dipole and cavity field, the steady-state mean value of the intracavity field can be found by averaging the contributions of the individual dipole mean values given by Eq.~\eqref{eq:opticalBlochEquationsIncludingMotion} over the distribution of the mean velocities, $f(v)$. For a distribution $f(v)$ with an average velocity $v_{\mathrm{D}}$ a conservative estimate for this to be valid is that the mean Doppler-shift is smaller than both effective rates of the coupled system on resonance ($\Delta_{\mathrm{c}}=\Delta=0$), $kv_{\mathrm{D}}\ll\min[\kappa+g^2N/\gamma,\gamma+g^2N/\kappa]$. Under these conditions, the expression for the intracavity field mean value is then of the same form as in the zero-velocity case (Eq.~\eqref{eq:steadyStateSolution}). The effective cavity field decay rate and detuning of Eqs.~\eqref{eq:kappaPrime} and\eqref{eq:delta_cprime} are modified according to \begin{eqnarray} \label{eq:kappaMotion} \kappa^\prime&=&\kappa+g^2 N \int {\mathrm d} v f(v)\gamma \xi(v) \\ \label{eq:DeltaCMotion}\Delta_{\mathrm{c}}^\prime&=&\Delta_{\mathrm{c}}-g^2 N \int {\mathrm d} v f(v)(\Delta-kv) \xi(v) . \end{eqnarray} where \begin{equation}\xi(v)=\frac{\gamma ^2 + \Delta ^2 +(kv)^2}{(\gamma^2 + \Delta ^2)^2 +2(\gamma^2 - \Delta ^2)(kv)^2 + (kv)^4}.\end{equation} In the case of a thermal Maxwell-Boltzmann distribution with temperature $T$, one has \hbox{$ f(v) = \sqrt{\frac m {2 \pi k_{\mathrm{B}}T}}\exp\left(-\frac {m v^2}{2k_{\mathrm{B}}T}\right)$}, where $k_{\mathrm{B}}$ is the Boltzmann constant and $m$ the mass of the ion. At low temperatures, i.e., when the width of the thermal distribution is small as compared to the atomic natural linewidth, the effective cavity field decay rate and detuning given by Eqs.~\eqref{eq:kappaMotion} and \eqref{eq:DeltaCMotion} are well-approximated by \begin{eqnarray} \label{eq:kappaPrimeTemperature} \kappa^\prime&=&\kappa+g^2 N\frac{\gamma^\prime}{{\gamma^\prime}^2+\Delta^2},\\ \Delta_{\mathrm{c}}^\prime&=&\Delta_{\mathrm{c}} - g^2 N\frac{\Delta}{{\gamma^\prime}^2+\Delta^2}.\label{eq:delta_cprime_Temperature} \end{eqnarray} These equations are of the same form as Eqs.~\eqref{eq:kappaPrime} and \eqref{eq:delta_cprime}, replacing the natural dipole decay rate by an effective dipole decay rate , \begin{equation} \label{eq:effectiveGamma} \gamma^\prime\simeq \gamma(1+kv_{\mathrm{D}}/\sqrt{2}), \end{equation} where $v_{\mathrm{D}}=\sqrt{k_BT/m}$ is the mean Doppler velocity. \section{Experimental setup} \label{sec:experimentalSetup} \begin{figure} \caption{(Color online) Schematic experimental setup. The abbreviations are: polarizing beam splitter (PBS), single mode fiber (SMF), acousto-optical modulator (AOM), dichroic mirror (DM), Pound-Drever-Hall lock (PDHL), avalanche photodiode (APD), second harmonic generation (SHG). The photoionization laser is not shown.} \label{fig:ExperimentalSetup} \end{figure} \subsection{Cavity trap} The ion trap used is a segmented linear quadrupole radiofrequency trap that consists of four cylindrical electrode rods (for details see \cite{Herskind2008Loading}). The electrode radius is $2.60~\mathrm{mm}$ and the distance from the trap center to the electrodes is $r_0 = 2.35~\mathrm{mm}$. Each electrode rod is divided into three parts, where the length of the center electrode is $z_{\mathrm{C}}=5.0~\mathrm{mm}$, and the length of the end electrodes is $z_{\mathrm{E}}=5.9~\mathrm{mm}$. Radial confinement is achieved by a radiofrequency field (RF) applied to the entire rods at a frequency of $2\pi \times 4~\mathrm{MHz}$ and a $\pi$ phase difference between neighboring rods. The axial trapping potential is created by static voltages (DC) applied to the outer parts of the rods. An optical cavity is incorporated into the trap with its axis parallel to the symmetry axis of the ion trap (see Fig. \ref{fig:ExperimentalSetup}). The cavity mirrors have a diameter of $1.2~\mathrm{mm}$ and a radius of curvature of $10~\mathrm{mm}$. The rear face of both mirrors are anti-reflection coated at a wavelength of $866~\mathrm{nm}$ corresponding to the $3\mathrm{d}^2\mathrm{D}_{3/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition in $^{40}\mathrm{Ca}^+$, while the front facade of one mirror is partially transmitting (PT) and for the other highly reflecting (HR) at this wavelength. Their intensity transmission coefficients are $1500$ and $5~\mathrm{ppm}$, respectively. The intracavity losses due to contamination of the mirrors during the initial bake out amount to $\sim 650$ ppm. The PT mirror is mounted on a plate that can be translated using piezoelectric actuators to allow for scanning or actively stabilizing the cavity length. The cavity has a close to confocal geometry with a length of $11.8~\mathrm{mm}$, corresponding to a free spectral range of $12.7~\mathrm{GHz}$ and a waist of the fundamental $\mathrm{TEM_{00}}$ mode of $w_0=37 \mathrm{\mu m}$. With a measured cavity field decay rate of $\kappa=2\pi \times (2.1\pm0.1)~\mathrm{MHz}$, the finesse is found to be $\mathcal{F}=3000\pm200$ at a wavelength of $866~\mathrm{nm}$~\cite{Herskind2008Loading}. $^{40}\mathrm{Ca}^+$ ions are loaded into the trap by \textit{in situ} photoionization of atoms from a beam of atomic calcium in a two-photon resonant photoionization process~\cite{Kjaergaard2000Isotope,Mortensen2004Isotope,Herskind2008Loading}. The ions are cooled to a crystalline state through Doppler-laser cooling using a combination of two counterpropagating laser beams, resonant with the $4\mathrm{s}^2\mathrm{S}_{1/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition at 397 nm along the trap axis, and a repumping laser applied along the $x$ axis and resonant with the $3\mathrm{d}^2\mathrm{D}_{3/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition at 866 nm to prevent shelving to the metastable $\mathrm{D}_{3/2}$ state. Three sets of Helmholtz coils are used to compensate for residual magnetic fields and to produce bias magnetic fields. For the measurements of the collective coupling rate between the ion Coulomb crystals and the cavity light field, the transverse magnetic fields along $x$ and $y$ are nulled and a magnetic field of $B_{\mathrm{z}}\sim 2.5~{G}$ along the $z$-axis is used. \subsection{Detection} A grating stabilized diode laser at $866~\mathrm{nm}$ provides the light for probing the coupling of the ion Coulomb crystals with the standing wave field inside the optical cavity. It is injected into the cavity through the PT mirror. Additionally, a second grating stabilized diode laser with a wavelength of $894~\mathrm{nm}$ serves as an off-resonant reference laser and is simultaneously coupled to the cavity through the PT mirror and used to monitor the cavity resonance. Both lasers are frequency stabilized to the same temperature stabilized reference cavity and have linewidths of $\sim 100~\mathrm{kHz}$. The reflectivity of the 866-nm cavity field is measured using an avalanche photo diode (APD). The light sent to the APD is spectrally filtered by a diffraction grating ($1800~\mathrm{lines/mm}$) and coupled to a single mode fiber. Taking into account the efficiency of the APD at $866~\mathrm{nm}$, the fiber incoupling and the optical losses, the total detection efficiency amounts to $\approx 16\%$. A similar detection system is used to measure the transmission of the 894-nm reference laser. Depending on the experiment, the reference laser serves two different purposes. In a first configuration, the length of the cavity is scanned at a rate of $30~\mathrm{Hz}$ over the atomic resonance. In this configuration, the frequency of the reference laser is tuned such that it is resonant at the same time as the probe laser in the cavity scan. This allows for monitoring slow drifts and acoustic vibrations. The signal of the weak probe laser is then averaged over typically 100 scans in which the stronger reference laser is used to keep track of the current position of the cavity resonance. In a second configuration, the cavity resonance is locked on the atomic resonance by stabilizing the length of the cavity to the frequency of the reference laser in a Pound-Drever-Hall locking scheme~\cite{Drever1983Laser}. During the measurement, imperfections in the stabilization are compensated for by monitoring the transmission of the 894-nm reference laser. The data is then postselected by only keeping data points for which the transmitted reference signal was above a certain threshold. \subsection{Experimental sequence} \label{sec:experimentalSequence} \begin{figure*} \caption{(color online) (a) Experimental sequence used to measure the collective coupling rate. (b) Energy levels of $^{40} \label{fig:experimentalSequence} \end{figure*} In both configurations, the cavity reflection spectrum is measured at a rate of $50~\mathrm{kHz}$ using a $20~\mathrm{\mu s}$ sequence of Doppler cooling, optical pumping and probing, as indicated in Fig.~\ref{fig:experimentalSequence}. First, the ions are Doppler-laser cooled for $5~\mathrm{\mu s}$ by driving the $4\mathrm{s}^2\mathrm{S}_{1/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition using laser cooling beams at 397~nm (LC), and at the same time repumping on the $3\mathrm{d}^2\mathrm{D}_{3/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition with a laser at 866~nm (RP). Next, the ions are optically pumped to the ${m_{\mathrm{J}}}=+3/2$ magnetic substate of the $3\mathrm{d}^2\mathrm{D}_{3/2}$ level by applying the optical pumping laser (OP) in combination with the laser cooling beams (LC) for a period of $12~\mathrm{\mu s}$. The optical pumping laser is resonant with the $3\mathrm{d}^2\mathrm{D}_{3/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition and has a polarization consisting only of $\sigma^+$- and $\pi$-polarized components. It is sent to the trap under an angle of $45^\circ$ with respect to the quantization axis. By probing the populations of the different Zeeman sublevels, the efficiency of the optical pumping was measured to be $\eta = 97^{+3}_{-5}\%$~\cite{phdPeterHerskind}. Finally, the cavity reflection signal is probed by injecting a $1.4~\mathrm{\mu s}$ $\sigma^-$-polarized probe pulse, resonant with the $3\mathrm{d}^2\mathrm{D}_{3/2}\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2}$ transition, into the $\mathrm{TEM}_{00}$ mode of the optical cavity. Its intensity is set such that the mean intracavity photon number is less than one at any time. With a delay of $0.1~\mathrm{\mu s}$ relative to the probe laser, the APD is turned on. The delay ensures that the field has built up inside the cavity and that the system has reached a quasi-steady-state. The length of the probing period was chosen in order to minimize the total sequence length as well as to avoid depopulation due to saturation of the transition~\cite{phdPeterHerskind}. \subsection{Effective number of ions}\label{sec:effectivenumberofions} \begin{figure}\label{fig:crystalAbsorptionAndPhaseShift} \end{figure} As mentioned above, the effective number of ions interacting with the cavity field depends on the ion crystal density and the overlap between the crystal and the cavity modevolume, where the density of the ion Coulomb crystals depends on the amplitude of the RF voltage~\cite{Hornekaer2001}: \begin{equation} \label{eq:ionDensity} \rho=\frac {\epsilon_0 U_{\mathrm{RF}}^2}{M r_0^4 \Omega_{\mathrm{RF}}^2}. \end{equation} Here, $M$ denotes the ion mass. The precise calibration of the RF voltage on the trap electrodes can be performed, e.g., on the basis of a zero-temperature charged liquid model~\cite{Turner1987Collective,Hornekaer2001,Herskind2009Positioning} or the measurement of the Wigner-Seitz radius~\cite{Herskind2009Positioning}. For the trap used in these experiments, $\rho=(6.01\pm0.08)\times 10^3 ~U_{\mathrm{RF}}^2~\mathrm{V^{-2}cm^{-3}}$. The crystal mode volume is found by taking fluorescence images of the crystal during Doppler-laser cooling, as shown in Fig.~\ref{fig:crystalAbsorptionAndPhaseShift}, from which the crystal half-length $L$ and radius $R$ can be extracted. Taking a possible offset between the cavity axis and the crystal revolution axis into account, the effective number of ions [see Eq.~\eqref{eq:effectiveNumberOfIonsAxiallyAveraged}] is then numerically calculated using the formula \begin{equation} N=\eta \frac { \rho} 2 \int_V \mathrm{d}x \mathrm{d}y\;\exp \left\{ -2[(x-x_0)^2+(y-y_0)^2]/w_0^2\right\}, \end{equation} where the parameter $\eta$ accounts for a finite efficiency of the optical pumping preparation and $x_0$ and $y_0$ denote the radial offsets. These offsets can in principle be canceled to within a micron~\cite{Herskind2009Positioning}, but in the experiments reported here, they were measured to be $x_0=3.9~\mathrm{\mu m},~y_0=15.7~\mathrm{\mu m}$~\cite{Dantan2009Large}. The uncertainty in the effective number of ions comes from the uncertainty $\delta \rho$ in the density determination, due to the RF voltage calibration, the uncertainty in the crystal volume $\delta V$, due to the imaging resolution $\delta x$ and the uncertainty of the optical pumping efficiency $\delta \eta$. The relative uncertainty in the effective number of ions, $N=\eta \rho V$, can then be expressed as~\cite{phdPeterHerskind} \begin{equation} \label{eq:errorN} \frac{\delta N}{N}=\sqrt{\left(\frac{\delta\rho}{\rho}\right)^2+\left(\frac{\delta V}{V}\right)^2+\left(\frac {\delta \eta}{\eta}\right)^2}, \end{equation} where $\delta V/V=\delta x\sqrt{16L^2+R^2}/2RL$. For the typically few-mm-long prolate crystals used in these experiments and an imaging resolution $\delta x\sim $ $\mu$m, this results in a relative uncertainty of 5-7\% in the effective number of ions. \section{Collective coupling measurements} \label{sec:experimentalResults} To achieve collective strong coupling on the chosen $3\mathrm{d}^2\mathrm{D}_{3/2},\;m_{\mathrm{J}}=+3/2\leftrightarrow 4\mathrm{p}^2\mathrm{P}_{1/2},\;m_{\mathrm{J}}=+1/2$ transition the collective coupling rate $g\sqrt{N}$ has to be larger than the cavity field decay rate $\kappa=2\pi \times 2.1$ MHz and the optical dipole decay rate $\gamma=2\pi\times11.2$ MHz. With the known dipole element of the transition and the cavity geometry, the single-ion coupling rate at an antinode at the center of the cavity fundamental mode is expected to be $g=2\pi\times(0.53\pm0.01)$~MHz. One thus expects to be able to operate in the collective strong coupling regime as soon as $N\gtrsim 500$. \subsection{Atomic absorption and dispersion} \label{sec:CollectiveStrongCoupling} To investigate the coherent coupling of the ions with the cavity field in the collective strong coupling regime, we first perform measurements of the atomic absorption and dispersion of a given crystal with $N\sim 500$ by scanning the cavity length around atomic resonance and recording the probe reflectivity spectrum. The crystal used in these experiments is similar to the one shown in Fig.~\ref{fig:crystalAbsorptionAndPhaseShift}. With a density of $\rho=(5.4\pm0.1)\times10^8~\mathrm{cm}^{-3}$ , a half-length $L=(511\pm1)~\mathrm{\mu m}$ and radius $R=(75\pm1)~\mathrm{\mu m}$ the total number of ions in the crystal is $N_{\mathrm{tot}}=6500\pm200$, and the effective number of ions interacting with the cavity mode is $N=520^{+24}_{-32}$. \begin{figure} \caption{(Color online) Typical probe reflectivity for various values of the atomic detuning $\Delta$. The probe detunings were (a) $\Delta\approx 2\pi\times54.3~\mathrm{MHz} \label{fig:Abs_scans_all} \end{figure} \begin{figure} \caption{\label{fig:absorptionWidthAndPhaseShift} \label{fig:absorptionWidthAndPhaseShift} \end{figure} The broadening and the shift of the cavity resonance are then measured as a function of the detuning of the probe laser, $\Delta$. This is accomplished by scanning the cavity length over a range corresponding to $\sim1.3~\mathrm{GHz}$ at a repetition rate of $30~\mathrm{Hz}$, for a fixed value of $\Delta$. The width of the reflection dip for a given detuning $\Delta$ is found by averaging over 100 cavity scans, where the reference laser is overlapped with the probe laser on the cavity scan and used to compensate for any drift of the cavity. In Fig.~\ref{fig:Abs_scans_all}, cavity reflection scans are plotted for various detunings. Each data point corresponds to the average of 100 $20-\mathrm{\mu s}$-measurement sequences as showed in Fig.~\ref{fig:experimentalSequence}. As expected from Eq.~\eqref{eq:kappaPrimeTemperature}, the broadening of the intracavity field absorption reflects the two-level atomic medium absorption. Each set of data is, according to Eq.~\eqref{eq:reflectivity}, fitted to a Lorentzian from which the cavity half width half maximum (HWHM) $\kappa'$ is deduced. Figure \ref{fig:absorptionWidthAndPhaseShift}(a) shows the modified cavity HWHM, $\kappa^\prime$, as a function of detuning of the probe laser, $\Delta$. Each point is the average of five measurements; the solid line is a fit according to Eq.~\eqref{eq:kappaPrimeTemperature}. From the fit we deduce a collective coupling rate of $g_{\mathrm{N}}=2\pi\times(12.2\pm0.2)~\mathrm{MHz}$, in good agreement with the theoretical expectation of $g_{\mathrm{N,~theory}}=2\pi\times(12.1^{+0.4}_{-0.5})~\mathrm{MHz}$, calculated for $N=520^{+24}_{-32}$ ions interacting with the cavity mode~\cite{Herskind2009Realization}. Furthermore, the effective dipole decay rate $\gamma'$ is left as a fit parameter to account for nonzero temperature effects, as discussed in Sec.~\ref{sec:EffectOfTheMotionOfTheIons}. The fit yields $\gamma^\prime=2\pi\times(11.9\pm0.4)~\mathrm{MHz}$, which would correspond to a temperature of $T=24^{+20}_{-14}~\mathrm{mK}$, and a natural half-width of the cavity of $\kappa=2\pi\times(2.2\pm0.1)~\mathrm{MHz}$, in good agreement with the value deduced from an independent measurement of the free spectral range (FSR) and the finesse of the cavity, $\kappa=2\pi\times(2.1\pm0.1)~\mathrm{MHz}$~\cite{Herskind2008Loading}. For the measurement of the effective cavity detuning, $\Delta_{\mathrm{c}}'$, the position of the 894-nm resonance laser in the cavity scan is fixed to the bare cavity resonance. The frequency shift is then measured by comparing the position of the probe and the reference signal resonances in the cavity scan. The effective cavity detuning as a function of probe detuning is shown in Fig.~\ref{fig:absorptionWidthAndPhaseShift}(b) One observes the typical dispersive frequency-shift of two-level atoms probed in the low saturation regime. The data is fitted to the theoretical model according to Eq.~\eqref{eq:delta_cprime_Temperature}, to find a collective coupling rate $g_{\mathrm{N}}=2\pi\times(12.0\pm0.3)~\mathrm{MHz}$ and an effective dipole decay rate $\gamma^\prime=2\pi\times(12.7\pm0.8)~\mathrm{MHz}$. Both values are consistent with the previous measurement and the theoretical expectations. As in the previous measurement, the 894-nm reference laser is used to compensate systematic drifts and acoustic vibrations. However, since this compensation method relies on the temporal correlations of the drifts in both signals, and thereby on their relative positions in the cavity scan, the compensation becomes less effective at large detunings. This is reflected in the bigger spread and the larger error bars at larger detunings, which renders this method slightly less precise than the absorption measurement to evaluate the collective coupling rate. \subsection{Vacuum Rabi splitting} \label{sec:VaccumRabiSplitting} \begin{figure} \caption{(color online). Probe reflectivity signal as a function of $\Delta=\Delta_{\mathrm{c} \label{fig:rabisplitting} \end{figure} A third complementary method to measure the collective coupling rate is based on locking the cavity on atomic resonance, $\omega_{\mathrm{c}}=\omega_{\mathrm{at}}$. The response of the coupled atom-cavity system is then probed as a function of probe detuning $\Delta$, which is then equal to the cavity detuning $\Delta_{\mathrm{c}}$. The result of this measurement is shown on Fig.~\ref{fig:rabisplitting}. The blue triangles are obtained with an empty cavity, while the red circles were taken with the same ion Coulomb crystal as used in the previous experiments. Each data point is deduced from $2\times10^4$ experimental sequences (see Fig.~\ref{fig:experimentalSequence}). The results are fitted using the theoretical expectations of Eq.~\eqref{eq:reflectivity} and Eqs.~\eqref{eq:kappaPrimeTemperature} and \eqref{eq:delta_cprime_Temperature} (solid lines in Fig.~\ref{fig:rabisplitting}) and yield $g_{\mathrm{N}}=2\pi\times(12.2\pm0.2)~\mathrm{MHz}$, a value that is in good agreement with the previous measurements. To facilitate the convergence of the more complex fitting function, the value of $\gamma^\prime$ in Eqs.~\eqref{eq:kappaPrimeTemperature} and \eqref{eq:delta_cprime_Temperature} was set to the one found in the previous absorption measurement.\\ From these three independent measurements of the collective coupling rate $g_{\mathrm{N}}$ and using the effective number of ions $N=520^{+24}_{-32}$, one deduces a single ion coupling rate of $g_{\mathrm{exp}}=2\pi\times (0.53\pm0.02)~\mathrm{MHz}$, which is in excellent agreement with the expected value of $g_{\mathrm{theory}}=2\pi\times(0.53\pm0.01)~\mathrm{MHz}$. \subsection{Scaling with the number of interacting ions} \label{sec:CouplingVsN} \begin{figure} \caption{(Color online) Cooperativity as a function of the effective number of ions. The solid line is a linear fit to the data and yields a scaling parameter of $\frac C N=(5.1^{+0.4} \label{fig:CvsN} \end{figure} To check further the agreement between the theoretical predictions and the experimental data, we investigated the dependence of the collective coupling rate on the effective number of ions. An attractive feature of ion Coulomb crystals is that the number of ions effectively interacting with a single mode of the optical cavity can be precisely controlled by the trapping potentials. While the density $\rho$ only depends on the amplitude of the RF voltage [see Eq.~\eqref{eq:ionDensity}], the aspect ratio of the crystal depends on the relative trap depths of the axial and radial confinement potentials, which can be independently controlled by the DC voltages on the endcap electrodes. This allows for controlling the number of effectively interacting ions down to the few ion-level. By analogy with the case of a single two-level system interacting with a single field mode of an optical cavity, the cooperativity parameter $C$ is defined here as (half) the ratio of the square of the effective coupling rate $g_\mathrm{N}$ to the cavity field decay rate $\kappa$ times the effective dipole decay rate $\gamma^\prime$ (taking into account the effect of the motion of the ions): $C=g_{\mathrm{N}}^2/2\kappa\gamma^\prime$. As can be seen from Eq.~\eqref{eq:kappaPrimeTemperature}, this parameter can be experimentally obtained by measuring for a probe field tuned to atomic resonance ($\Delta=0$) the effective cavity field decay rate $\kappa^\prime(\Delta=0)=\kappa+\frac {g_{\mathrm{N}}^2}{\gamma^\prime}=\kappa~(1+2C)$. In Fig.~\ref{fig:CvsN}, the dependence of the cooperativity parameter, $C$, is plotted as a function of the effective number of ions interacting with the TEM$_{00}$ mode, where the effective number of ions was changed by measuring for different aspect ratios and densities of several crystals. The effective number of ions in each crystals was deduced by applying the method described in Sec.~\ref{sec:effectivenumberofions}. The data points were obtained using $\sigma^-$-circularly polarized probe light, hence probing the population in the $m_{\mathrm{J}}=+3/2$ and $m_{\mathrm{J}}=+1/2$ substates, and shows the expected linear dependence on the effective number of ions. From a linear fit (solid line) we deduce a scaling parameter $\frac C N=(5.1^{+0.4}_{-0.2})\times10^{-3}$. The limit where {\it collective} strong coupling is achieved ($g_{\mathrm N}>\kappa,\gamma$) is indicated by the black dashed line and is reached for $\approx 500$ interacting ions.\\ The largest coupling observed in these experiments was measured for a crystal with a length of $\sim 3~\mathrm{mm}$ and a density of $\sim 6\times 10^8~\mathrm{cm^{-3}}$ and amounted to $C=7.9\pm0.3$, corresponding to an effective number of ions of $N=1523^{+69}_{-93}$. This value exceeds previously measured cooperativities with ions in optical cavities by roughly one order of magnitude~\cite{Guthohrlein2001AsingleIon,Keller2004Continuous,Kreuter2004}. \\ \begin{figure} \caption{(color online). Vacuum Rabi splitting spectra ($\Delta=\Delta_{\mathrm{c} \label{fig:rabiFamily} \end{figure} \begin{figure} \caption{(color online). Collective coupling rate $g_{\mathrm N} \label{fig:g_vs_N_exp} \end{figure} Similarly, vacuum Rabi splitting spectra, such as the one presented in Fig.~\ref{fig:rabisplitting}, were measured for several crystals and aspect ratios. The result of such measurements is shown in Fig.~\ref{fig:rabiFamily}, showing clearly the increase in the separation between the coupled crystal+cavity normal modes as the number of ions is increased. The collective coupling rate $g_{\mathrm N}$, derived from fits to the theoretical expression Eq. \eqref{eq:reflectivity}, is plotted for different effective number of ions in Fig.~\ref{fig:g_vs_N_exp}. Taking the finite optical pumping efficiency into account and fitting the curve with the expected square-root dependency, we deduce a single ion coupling rate of $g=2\pi \times (0.53\pm0.01)~\mathrm{MHz}$, in good agreement with the previous measurements and the theoretical expectation. \section{Coherence time of collective Zeeman substate coherences} \label{sec:coherencetime} To evaluate the prospect for realizing coherent manipulations, we measured the decay time of the collective coherences between the Zeeman substates of the $3\mathrm{d}^2\mathrm{D}_{3/2}$ level. These coherences were established by the Larmor precession of the magnetic spin induced by an additional $B$-field transverse to the quantization axis. In presence of this orthogonal $B$-field, the population of the several substates undergo coherent oscillations, which are measured at different times in their free evolution by directly probing the coherent coupling between the cavity field and the ions. In order to be able to resolve the coherent population oscillations in time using the previous technique (probing time $\sim 1~\mathrm{\mu s})$ the amplitude of the longitudinal $B$-field was lowered to obtain oscillation periods in the $\sim 10~\mathrm{\mu s}$ range, and the optical pumping preparation was modified as to minimize the effect of the transverse $B$-field. The reduced $B$-field along the quantization axis could in principle make the sample more sensitive to $B$-field fluctuations. Since these fluctuations might be one of the factors eventually limiting the achievable coherence time, we expect the coherence time measured by this method to be a lower bound as compared to the previous configuration with a larger longitudinal $B$-field. \subsection{Experimental sequence and theoretical expectations} \begin{figure*} \caption{(Color online) (a) Experimental sequence used to measure the coherence time of collective Zeeman substate coherences in the $3\mathrm{d} \label{fig:coherenceTime_SequenceAndLevelScheme} \end{figure*} The coherence time measurements required the experimental configuration and the measurement sequence to be slightly modified as compared to the collective coupling rate measurements described in Sec. \ref{sec:experimentalSequence}. The Larmor precession is induced by an additional $B$-field component along the transverse $x$ direction, while the longitudinal magnetic field component $B_{\mathrm{z}}$ was lowered to optimize the contrast of the coherent population oscillations. The optical pumping light propagates along the $x$ axis and is $\pi$-polarized, hence transferring most of the atoms symmetrically into the two outermost magnetic sub-states of the $3\mathrm{d}^2\mathrm{D}_{3/2}$ level, $m_{\mathrm{J}}=\pm 3/2$. The experimental sequence used to measure the coherence time is shown in Fig. \ref{fig:coherenceTime_SequenceAndLevelScheme}. The ions are Doppler-laser cooled during the first $5~\mathrm{\mu s}$, followed by a $12~\mathrm{\mu s}$ optical pumping period. After the optical pumping, all lasers are turned off for a time $\tau$, allowing for the free evolution of the system. Finally, a weak $\sigma^-$-circularly polarized probe pulse is injected into the cavity, addressing the ions in the $m_{\mathrm{J}}=+1/2$ and $m_{\mathrm{J}}=+3/2$ sub-states. The steady-state cavity reflection is measured by collecting the reflected photons with the APD for $0.5~\mathrm{\mu s}$. The additional delay time between optical pumping preparation and probing obviously lowers the repetition rate of the sequence significantly, especially for long delay times, and the number of data points for each sweep of the cavity will decrease. To compensate for this, the data points at longer delays had to be averaged over more cavity scans, which substantially increased the acquisition time and eventually limited these measurements to delays of around $\sim 120~\mathrm{\mu s}$. Based on a simple four-level model the free Larmor precession-induced changes in the populations of the Zeeman substates, $\ket{m_{\mathrm{J}}=\pm \nicefrac 1 2,~\pm \nicefrac 3 2}$, of the $3\mathrm{d}^2\mathrm{D}_{3/2}$ level can be calculated. For a homogeneous $B$-field with components $B_{\mathrm{x}}$ and $B_{\mathrm{z}}$, the Hamiltonian of the four-level system can be expressed in terms of collective population operator, \begin{equation} \label{eq:larmorCollectivePopulationOperator} \hat \Pi_{{m_{\mathrm{J}}}}=\sum_{j=1}^{N_{\mathrm tot}}\ket {m_{\mathrm{J}}}^{(j)}\bra{m_{\mathrm{J}}}^{(j)}, \end{equation} and collective spin operators \begin{equation} \label{eq:larmorCollectiveSpinOperator} \hat \sigma_{{m_{\mathrm{J}}, m_{\mathrm{J}}^\prime}}=\sum_{j=1}^{N_{\mathrm tot}}\ket {m_{\mathrm{J}}}^{(j)}\bra{m_{\mathrm{J}}^\prime}^{(j)},\quad m_{\mathrm{J}}\ne m_{\mathrm{J}}^\prime. \end{equation} Here, $\ket{m_{\mathrm{J}}}^{(j)}$ and $\ket{m_{\mathrm{J}}^\prime}^{(j)}$ are the state kets of the $j$th ion with magnetic quantum number $m_{\mathrm{J}}$ and $m_{\mathrm{J}}^\prime$, respectively. The sum extends over the total number of ions. In this notation, the Hamiltonian of the free evolution of a spin $J=\nicefrac 3 2$ system can be written as \begin{eqnarray} \nonumber H_{\mathrm{B}}&=&\hbar \omega_{\mathrm{z}}\sum_{m_{\mathrm{J}}}m_{\mathrm{J}} \hat\Pi_{{m_{\mathrm{J}}}} + \frac { \hbar \omega_{\mathrm{x}}} 2 \sum_{m_{\mathrm{J}}}\sum_{m_{\mathrm{J}}^\prime} \hat\sigma_{{m_{\mathrm{J}},m_{\mathrm{J}}^\prime}}\times\\ && \label{eq:HamiltonianLarmor} \left[\sqrt{\frac {15} 4 -m_{\mathrm{J}}(m_{\mathrm{J}}-1)} \delta_{{m_{\mathrm{J}},m_{\mathrm{J}}^\prime+1}} +\right. \\\nonumber && \left. \sqrt{\frac {15} 4 -m_{\mathrm{J}}(m_{\mathrm{J}}+1)} \delta_{{m_{\mathrm{J}},m_{\mathrm{J}}^\prime-1}} \right], \end{eqnarray} where the sums extend over the four Zeeman substates. Here, $\delta_{{m_{\mathrm{J}},m_{\mathrm{J}}^\prime+1}}$ is the Kronecker delta, and the Larmor frequencies $\omega_{\mathrm{z}}$ and $\omega_{\mathrm{x}}$ corresponding to the $z$ and $x$ component of the magnetic field are given by the product of the magnetic field amplitude by the gyromagnetic ratio $\gamma_{\mathrm{GM}}$: \begin{equation} \label{eq:BField} \omega_{\mathrm{z}}=\gamma_{\mathrm{GM}} B_{\mathrm{z}},\quad \omega_{\mathrm{x}}=\gamma_{\mathrm{GM}} B_{\mathrm{x}}. \end{equation} For a $\sigma^-$-circularly polarized probe, the measured collective coupling to the cavity light will depend on the collective populations in the $m_{\mathrm{J}}=\nicefrac {+1} 2$ and $m_{\mathrm{J}}=\nicefrac {+3} 2$ substates. For a nonvanishing population in the $m_{\mathrm{J}}=\nicefrac {+1} 2$ state, the measured effective cavity decay rate, which was defined for a two-level system in Eq.~\eqref{eq:kappaPrime}, contains both contributions and is hence modified to \begin{equation} \label{eq:kappaPrime_Larmor} \kappa^\prime(\tau)=\kappa+g_{\nicefrac 1 2}^2 N_{\nicefrac 1 2}(\tau)\frac{\gamma}{\gamma^2+\Delta_{\nicefrac 1 2}^2}+ g_{\nicefrac 3 2}^2 N_{\nicefrac 3 2}(\tau)\frac{\gamma}{\gamma^2+\Delta_{\nicefrac 3 2}^2}, \end{equation} where $g_{m_{\mathrm{J}}}$, $N_{m_{\mathrm{J}}}$, and $\Delta_{m_{\mathrm{J}}}=\omega_{m_{\mathrm{J}}}-\omega_{\mathrm{l}}$ denote the single-ion coupling rate, the effective number of ions and the atomic detunings of the relevant Zeeman substates $m_{\mathrm{J}}=\nicefrac {+1} 2,~\nicefrac {+3} 2$, respectively, and $\omega_{m_J}$ is the frequency of the $3\mathrm d^2 \mathrm D_{3/2},~m_{\mathrm{J}}\leftrightarrow 4\mathrm p^2\mathrm P_{1/2},m_{\mathrm{J}}-1$ transition. Due to the induced Larmor precession, the effective number of ions in the individual Zeeman substates will be time-dependent. For a system initially prepared in a superposition state $\psi_0$, the population in a particular Zeeman substate at a certain time $\tau$ can be calculated from the projection of the time evolved state, $\psi(\tau)=U(\tau) \psi_0$, onto this state. Here, $U(\tau)=exp(-\nicefrac i \hbar H_{\mathrm{B}} \tau)$ denotes the time evolution operator. Straightforward but lengthy calculations show that the populations in the $\nicefrac {+1} 2$ and $\nicefrac {+3} 2$ Zeeman substates after a time $\tau$ are of the form $A\cos(\omega_{\mathrm{L}}\tau)+B\cos(2\omega_{\mathrm{L}}\tau)+C$, where $A$, $B$, and $C$ are constants depending on the efficiency of the optical pumping (i.e., the initial populations and coherences in the different Zeeman sublevels) and the magnetic field amplitudes $Bz$ and $B_x$ (via $\omega_{\mathrm{x}}$ and $\omega_{\mathrm{z}}$). One thus obtains $N_{1/2}(\tau)$ and $N_{3/2}(\tau)$ using Eq. (8). It follows from Eq. \eqref{eq:HamiltonianLarmor} and $\kappa'(\tau)=\kappa(1+2C(\tau))$ that the measured cooperativity at time $\tau$ can be put under the form \begin{equation} \label{eq:cooperativity_Larmor} C(\tau)=a \cos(\omega_{\mathrm{L}}\tau)+b\cos(2\omega_{\mathrm{L}}\tau)+c, \end{equation} where the Larmor frequency \begin{equation} \label{eq:LarmorFrequency} \omega_{\mathrm{L}}=\sqrt{\omega_{\mathrm{z}}^2+\omega_{\mathrm{x}}^2} \end{equation} was defined. The parameters $a$, $b$, $c$ are constants depending on the efficiency of the optical pumping preparation, and the magnetic field amplitudes $B_{\mathrm{z}}$ and $B_{\mathrm{x}}$. \subsection{Experimental results} \begin{figure} \caption{(Color online) Calibration of the Larmor frequency for different currents of the $B_{\mathrm{x} \label{fig:BFieldCalibration} \end{figure} \begin{figure} \caption{(Color online) Larmor frequency as a function of current through the $B_{\mathrm{x} \label{fig:BFieldCalibration2} \end{figure} The amplitudes of the magnetic fields, $B_{\mathrm{x}}$ and $B_{\mathrm{z}}$, at the position of the ion crystal were calibrated by measuring the dependence of the Larmor frequency $\omega_{\mathrm{L}}$ with the intensity of the current used to drive the transverse magnetic field coils [see Eqs. \eqref{eq:BField} and \eqref{eq:LarmorFrequency}]. The obtained coupling as a function of $\tau$ is shown for different currents $I_{\mathrm{x}}$ on Fig.~\ref{fig:BFieldCalibration}. The curves are fitted according to Eq.~\eqref{eq:cooperativity_Larmor}, yielding the individual Larmor frequencies. These frequencies are shown as a function of the current through the $B_{\mathrm{x}}$ coils in Fig.~\ref{fig:BFieldCalibration2}. Using the gyromagnetic ratio $\gamma_{\mathrm{GM}}=\frac {\mu_{\mathrm{B}} \mathfrak{g}_{3/2}} {\hbar}$ ($\mu_{\mathrm{B}}$ is the Bohr magneton, $\mathfrak{g}_{3/2}$ the Land\'e factor of the $3\mathrm{d}^2\mathrm{D}_{3/2}$ level), we deduce the magnetic fields along the two axis $B_{\mathrm{z}}=(0.134\pm0.002)~\mathrm{G}$ and $B_{\mathrm{x}}=(4.91\pm0.09)~\frac{\mathrm{G}}{\mathrm{A}}\times I_{\mathrm{x}}$.\\ \begin{figure} \caption{(Color online) (a) Normalized cooperativity paramter as a function of delay $\tau$. Due to the presence of a non-zero $B$-field component orthogonal to the quantization axis ($B_{\mathrm{z} \label{fig:coherenceConstantCoupling} \label{fig:coherenceTime} \end{figure} To achieve a large contrast, the measurement was carried out with moderate $B$-field values $B_{\mathrm{x}}=B_{\mathrm{z}}=0.15~\mathrm{G}$ and the variation of the cooperativity was measured for $120~\mathrm{\mu s}$. To compensate for slow drifts during the measurement, each data point was normalized to the mean cooperativity, $\bar C$, averaged over one oscillation period. The normalized cooperativity is shown in Fig. \ref{fig:coherenceTime}(a), together with a fit of the form of \eqref{eq:cooperativity_Larmor}, where decoherence processes are taken into account by multiplying the oscillating terms with an exponential decay term $\exp(-\nicefrac {\tau} {\tau_{\mathrm{e}}})$, which would be expected, e.g., for a homogeneous broadening of the energy levels. From this fit, we deduce a coherence time of $\tau_{\mathrm{e}}=1.7^{100}_{-0.8}~\mathrm{ms}$. This value is comparable to previously measured coherence times for single ions in linear Paul trap in equivalent magnetic field sensitive states \cite{Schmidt-Kaler2003} and might be further improved by an active control of stray magnetic fields or state configurations that are less magnetic field sensitive. For inhomogeneous broadening, due to magnetic field gradient over the crystal, the decoherence process would be better described by a Gaussian decay~\cite{Chaneliere2005}. Fitting the data assuming a Gaussian decay $\exp(- \nicefrac {\tau^2}{\tau_{\mathrm{g}}^2})$ in Eq.~\eqref{eq:cooperativity_Larmor} yields a coherence time of $\tau_{\mathrm{g}}=0.5_{-0.2}^{+0.6}~\mathrm{ms}$. Due to the limitation of our measurement to time delays of $\tau \lesssim 120~\mathrm{\mu s}$, it is at present not possible to distinguish between the two decay mechanisms. For comparison, the cooperativity as a function of probe delay, $C(\tau)$, was measured with only the bias field along the quantization axis present ($B_{\mathrm{x}}=0,~B_{\mathrm{z}}=0.15\mathrm{G}$), as shown in Fig. \ref{fig:coherenceConstantCoupling} b. Here, the values are normalized to the mean cooperativity averaged over all points $\langle C \rangle$. Within the error bars, the deduced cooperativities agree with a constant value of $\langle C \rangle=1.43\pm 0.02$ (solid line). \section{Conclusion}\label{sec:conclusion} To conclude, we have presented a detailed theoretical and experimental analysis of the experiments of~\cite{Herskind2009Realization}, which demonstrated the possibility of using large ion Coulomb crystals positioned in a moderately high-finesse optical cavity to enter the collective strong-coupling regime of CQED. The excellent agreement between the experimental results including those of Ref.~\cite{Herskind2009Realization} and the theoretical predictions, makes ion Coulomb crystals promising candidates for the realization of quantum information processing devices such as quantum memories and repeaters~\cite{Duan2001,Kimble2008}. Using, for instance, cavity EIT-based protocols~\cite{Fleischhauer2000,Lukin2000,Dantan2008c,Gorshkov2007,Albert2011Cavity}, the obtained coupling strengths and coherence times could open up for the realization of both high-efficiency \textit{and} long life-time quantum memories~\cite{Lvovsky2009Optical}. Moreover, the nice properties of ion Coulomb crystals also allow for the manipulation of complex multimode photonic information~\cite{Lvovsky2009Optical} by exploiting the crystal spatial~\cite{Dantan2009Large} or motional~\cite{Dantan2010Non-invasive} degrees of freedom. Ion Coulomb crystals in optical cavities have also great potential for the investigation of cavity optomechanical phenomena~\cite{Kippenberg2008} and the observation of novel phase transitions~\cite{Garcia-Mata2007Frenkel-Kontorova,Retzker2008Double,Fishman2008Structural,Harkonen2009Dicke,Baumann2010Dicke} with cold, solid-like objects. We acknowledge financial support from the Carlsberg Foundation, the Danish Natural Science Research Council through the ESF EuroQUAM project CMMC, and the EU commission through the FP7 ITN project CCQED and STREP project PICC.\\ \end{document}
\begin{document} \title[Lagrangian structure of relativistic Vlasov systems]{On the Lagrangian structure of transport equations: relativistic Vlasov systems} \author[H. Borrin]{Henrique Borrin} \address{Departamento de Matem\'{a}tica Pura e Aplicada, Universidade Federal do Rio Grande do Sul, Porto Alegre - RS, Brazil.} \email{[email protected]} \author[D. Marcon]{Diego Marcon} \address{Departamento de Matem\'{a}tica Pura e Aplicada, Universidade Federal do Rio Grande do Sul, Porto Alegre - RS, Brazil.} \email{[email protected]} \begin{abstract} We study the Lagrangian structure of relativistic Vlasov systems, such as the relativistic Vlasov-Poisson and the relativistic quasi-eletrostatic limit of Vlasov-Maxwell equations. We show that renormalized solutions of these systems are Lagrangian and that these notions of solution, in fact, coincide. As a consequence, finite-energy solutions are shown to be transported by a global flow. Moreover, we extend the notion of generalized solution for ``effective" densities and we prove its existence. Finally, under a higher integrability assumption of the initial condition, we show that solutions have every energy bounded, even in the gravitational case. These results extend to our setting those obtained by Ambrosio, Colombo, and Figalli \cite{vlasovpoisson} for the Vlasov-Poisson system; here, we analyse relativistic systems and we consider the contribution of the magnetic force into the evolution equation. \noindent \textbf{Keywords:} Relativistic Vlasov equation, transport equations, Lagrangian flows, renormalized solutions. \noindent \textbf{2020 AMS Subject Classifications} 35F25, 35Q83, 34A12, 37C10. \end{abstract} \maketitle \section{Introduction} \subsection{Overview} In this paper, we are interested in the Lagrangian structure of relativistic Vlasov systems. These systems describe the evolution of a nonnegative distribution function $f:(0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3\longrightarrow [0,\infty)$ under the action of a self-consistent acceleration: \begin{equation}\label{prin} \begin{cases} \partial_t f_t +\hat{v}\cdot \nabla_x f_t+( E_t+\hat{v}\times B_t)\cdot \nabla_v f_t=0 & \text{ in }\quad (0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3;\\[5pt] \rho_t(x)=\int_{\mathbb{R}^3} f_t(x,v)\,\mathrm{d} v,\quad J_t(x)=\int_{\mathbb{R}^3}\hat{v} f_t(x,v)\,\mathrm{d} v& \text{ in }\quad (0,\infty)\times \mathbb{R}^3;\\[5pt] E_t(x)=\sigma_E\int_{\mathbb{R}^3}\rho_t(y)K(x-y)\,\mathrm{d} y& \text{ in }\quad (0,\infty)\times \mathbb{R}^3;\\[5pt] B_t(x)=\sigma_B\int_{\mathbb{R}^3}J_t(y)\times K(x-y)\,\mathrm{d} y& \text{ in }\quad (0,\infty)\times \mathbb{R}^3. \end{cases} \end{equation} Here, $f_t(x,v)$ denotes the distribution of particles with position $x$ and velocity $v$ at time $t$, $\hat{v}\coloneqq (1+|v|^2)^{-1/2}v$ is the velocity of particles (we assume the speed of light is $c=1$), $\sigma_E \in \{0,\pm 1\}$, $\sigma_B \in \{0,1\}$, and $K: \mathbb{R}^{3} \longrightarrow \mathbb{R}^{3}$ is given by $K(x)=(4\pi)^{-1}x/|x|^3$. Such systems are very important in mathematical physics and appear in a variety of physical models. Typically, $\rho_{t}$ and $J_{t}$ represent the density of particles and the relativistic particle current density and $E_{t}$ and $B_{t}$ the electric and magnetic fields, respectively. We postpone a derivation of \eqref{prin} and a more complete description of its significance to \Cref{deriv-model}, but summarize what \eqref{prin} models depending on the values of $\sigma_E$ and $\sigma_B$: \begin{itemize} \item \textbf{Relativistic Vlasov-Poisson equations:} charged particles under a self-consistent electric field or particles under a self-consistent electric and gravitational fields with particle charge $q> q_c$ if $\sigma_E=1$, $\sigma_B=0$; motion of galaxy clusters under a gravitational field or particles under a self-consistent electric and gravitational fields with particle charge $q<q_c$ if $\sigma_E=-1$, $\sigma_B=0$ (see, for instance, \cite[Chapter 5]{smoothed} and references therein); \item \textbf{Relativistic Vlasov-Biot-Savart equations\footnote{This terminology, albeit not standard, is in analogy to the Vlasov-Poisson system, since the magnetic field obeys the Biot-Savart law.}:} charged particles under a self-consistent magnetic field; particles under a self-consistent quasi-electrostatic (QES) electromagnetic and gravitational fields with particle charge $q=q_c$ if $\sigma_E= 0$ and $\sigma_B=1$; \item \textbf{QES relativistic Vlasov-Maxwell equations:} charged particles under a self-consistent QES electromagnetic field; particles under a self-consistent QES electromagnetic and gravitational fields with particle charge $q>q_c$ if $\sigma_E=\sigma_B=1$; \item \textbf{Relativistic gravitational Vlasov-Biot-Savart equations:} charged particles under a self-consistent magnetic and gravitational fields; particles under a self-consistent quasi-magnetostatic (QMS) electromagnetic and gravitational fields with particle charge $q<q_c$ if $\sigma_E=-1$ and $\sigma_B=1$. \end{itemize} Note we allow $\sigma_B=\sigma_E=0$, that is, \eqref{prin} to be the linear transport equation, but its theory is classical and we shall not consider it. Moreover, the fact that the critical charge evolution system coincides with the Vlasov-Biot-Savart system suggests that the displacement current $\partial_t E_t$ behaves like a lower order term; see \eqref{QES} in \Cref{deriv-model}. This is well-known in Electrodynamics \cite{jackson}; Maxwell predicted theoretically as a correction of Amp\`{e}re's law. Nonetheless, we show that it behaves like a lower order term in the magnetic potential energy; see \Cref{magneticpositive} and \Cref{remarkpartialE}. Concerning the existence of classical solutions of \eqref{prin}, we refer to \cite{15,36,66}, where the existence of local solutions for the relativistic Vlasov-Poisson system is established. As mentioned in \cite[Chapter 5, Section 1.5]{smoothed}, very little is known regarding the existence of global solutions for general initial data. However, existence results can be found, for instance, for spherically and axially symmetric initial data; see \cite{29,34}. In the aforementioned results, it is required higher integrability assumptions and moment conditions on the initial data. To be more physically relevant, it is desired to avoid such hypotheses even though classical solutions may fail to exist. We thus consider renormalized and generalized solutions, which allow us to establish a Lagrangian structure for the system, global existence results, and (under suitable energy bounds) a global in time maximal regular flow, as we explain in the next section. \subsection{Main results} For our purposes, a crucial observation is that \eqref{prin} can be written as \begin{equation}\label{transport} \partial_t f_t + \textbf{b}_t\cdot\nabla_{x,y}f_t=0, \end{equation} where, for each fixed $t>0$, the vector field $\textbf{b}_t: \mathbb{R}^6 \longrightarrow \mathbb{R}^6$ is given by $\textbf{b}_t(x,v)=(\hat{v},E_t+\hat{v}\times B_t)$. Moreover, the vector field is divergence-free, since \[ \nabla_{x,v}\cdot \textbf{b}_t = \nabla_v\cdot(\hat{v}\times B_t)=(\nabla_v \times\hat{v})\cdot B_t - \hat{v}\cdot (\nabla_v \times B_t)=0. \] By the transport nature of \eqref{transport}, it is expected that solutions have a Lagrangian structure, meaning that the initial condition $f_{0}$ is transported to $f_{t}$ by an associated flow. In the weak regularity regime, however, the existence of such flow is not guaranteed by the classical Cauchy-Lipschitz theory. Indeed, since $K$ is locally integrable, we have $E_t,\, B_t \in L^1_{\operatorname{loc}}(\mathbb{R}^3;\mathbb{R}^3)$ whenever $f_t\in L^1(\mathbb{R}^6)$, so that $\textbf{b}_t$ is only in $L^1_{\operatorname{loc}}(\mathbb{R}^6;\mathbb{R}^6)$. Since $\textbf{b}_t$ is divergence-free, \eqref{transport} can be rewritten as \[ \partial_t f_t + \nabla_{x,y}\cdot(\textbf{b}_tf_t)=0. \] The latter can be interpreted in the distributional sense provided $\textbf{b}_tf_t$ is locally integrable which, however, does not follow only from the assumption $f_t\in L^1(\mathbb{R}^6)$. To treat this problem, we introduce a function $\beta \in C^1(\mathbb{R})\cap L^\infty (\mathbb{R})$ such that \begin{equation}\label{betatransport} \partial_t \beta(f_t)+\nabla_{x,v}\cdot(\textbf{b}_t\beta(f_t))=0 \end{equation} whenever $f_t$ is a smooth solution of \eqref{transport}. Hence, $\textbf{b}_t\beta(f_t)\in L^1_{\operatorname{loc}}$, which leads to the concept of a renormalized solution; see \cite{DiPerna-Lions-1988}. \begin{definition}[Renormalized solution]\label{renormalized} For a Borel vector field $\textbf{b}\in L_{\operatorname{loc}}^1( [0,T] \times \mathbb{R}^6;\mathbb{R}^6)$, we say a Borel function $f\in L_{\operatorname{loc}}^1([0,T]\times \mathbb{R}^6)$ is a renormalized solution of \eqref{transport} starting from $f_0$ if \eqref{betatransport} holds in the sense of distributions, that is, \begin{equation}\label{prinbeta} \int_{\mathbb{R}^6}\phi_0(x,v)\beta(f_0(x,v))\,\mathrm{d} x\,\mathrm{d} v +\int_0^T \!\! \int_{\mathbb{R}^6} \Big[\partial_t \phi_t(x,v)+\nabla_{x,v}\phi_t(x,v)\cdot\textbf{b}_t(x,v)\Big] \beta(f_t(x,v))\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t=0 \end{equation} for all $\phi\in C_c^1([0,T)\times\mathbb{R}^6)$ and $\beta\in C^1(\mathbb{R})\cap L^\infty(\mathbb{R})$. Moreover, $f\in L^\infty((0,T);L^1(\mathbb{R}^6))$ is called a renormalized solution of \eqref{prin} starting from $f_0$ if, by setting \[ \rho_t(x) \coloneqq\int_{\mathbb{R}^3} f_t(x,v)\,\mathrm{d} v,\quad E_t(x)\coloneqq\sigma_E\int_{\mathbb{R}^3}\rho_t(y)\, K(x-y)\,\mathrm{d} y, \] \begin{equation}\label{jBbt} J_t(x) \coloneqq\int_{\mathbb{R}^3}\hat{v} f_t(x,v)\,\mathrm{d} v, \quad B_t(x)\coloneqq\sigma_B\int_{\mathbb{R}^3}J_t(y)\times K(x-y)\,\mathrm{d} y, \quad \text{and} \end{equation} \[ \textbf{b}_t(x,v) \coloneqq (\hat{v}, E_t(x)+\hat{v}\times B_t(x)), \] we have that $f_t$ satisfies \eqref{prinbeta}, for every $\phi\in C_c^1([0,T)\times\mathbb{R}^6)$, with $\textbf{b}_t$ as in \eqref{jBbt}. \end{definition} Observe that the integrability assumption $f_t\in L^1(\mathbb{R}^6)$ is used so that $\rho_t,\, J_t$, $E_t$, and $B_t$ are well defined. From now on, we refer to $E_t$ and $B_t$ as the electric and the magnetic fields, respectively, even though $E_t$ may represent a gravitational field as well; see \Cref{deriv-model}. Our first main result shows that distributional or renormalized solutions of \eqref{prin} are in fact Lagrangian solutions. This gives a characterization of solutions of \eqref{prin}, since Lagrangian solutions are generally stronger than renormalized or distributional solutions. \begin{theorem}\label{existencesolution} Let $T>0$ and $f$ be a nonnegative function. Assume $f\in L^\infty([0,T);L^1(\mathbb{R}^6))$ is weakly continuous in the sense that \[ t\longmapsto \int_{\mathbb{R}^6}f_t\,\varphi\,\mathrm{d} x\,\mathrm{d} v \ \text{ is continuous for any } \varphi\in C_c(\mathbb{R}^6). \] Assume further that: \begin{enumerate}[$(i)$] \item\label{thmitem1} either $f \in L^\infty((0,T);L^\infty(\mathbb{R}^6))$ and $f_t$ is a distributional solution of \eqref{prin} starting from $f_0$; or \item\label{thmitem2} $f_t$ is a renormalized solution of \eqref{prin} starting from $f_0$. \end{enumerate} Then, $f_t$ is a Lagrangian solution transported by the Maximal Regular Flow $\boldsymbol{X}(t,x)$ associated to $\textbf{b}_t(x,v)=(\hat{v},E_t(x)+\hat{v}\times B_t(x))$ (see \Cref{lagrangian}), starting from $0$. In particular, $f_t$ is renormalized. \end{theorem} Next, in \Cref{generalized}, we introduce the concept of generalized solutions, which allows the electromagnetic field to be generated by effective densities $\rho^{\operatorname{eff}}$ and $J^{\operatorname{eff}}$. This may be interpreted as particles vanishing from the phase space but still contributing in the electromagnetic field in the physical space. In fact, generalized solutions are renormalized if the number of particles is conserved in time, as follows from \Cref{jeffequalj}. This indicates that, should renormalized solutions fail to exist, there must be a loss of mass/charge as $\hat{v}$ approaches the speed of light. Our second main theorem provides, under minimal assumptions on the initial datum, the global existence of generalized solutions. \begin{theorem}[Existence of generalized solutions]\label{existencegeneral} Let $f_0\in L^1(\mathbb{R}^6)$ be a nonnegative function. Then there exists a generalized solution $(f_t,\,\rho^{\operatorname{eff}}_t,\,J^{\operatorname{eff}}_t)$ of \eqref{prin} starting from $f_0$ (see \Cref{generalized})). Moreover, the map \[ t \in [0,\infty) \longmapsto f_t\in L^1_{\operatorname{loc}}(\mathbb{R}^6) \] is continuous and the solution $f_t$ is transported by the Maximal Regular Flow associated to field $\boldsymbol{b}^{\operatorname{eff}}_t(x,v)=(\hat{v},E_t^{\operatorname{eff}}+\hat{v}\times B^{\operatorname{eff}}_t)$. \end{theorem} In view of \Cref{existencegeneral}, if we assume higher integrability on the initial datum and bounded initial energy, we can prove the existence of a global Lagrangian solution. Moreover, we show strong continuity of densities and fields and that each energy remains bounded in later times. Furthermore, we emphasize that our result holds even in the gravitational case $\sigma_{E} = -1$. \begin{theorem}[Existence of global Lagrangian solution]\label{finalthm} Let $f_0$ be a nonnegative function with every energy bounded (see \Cref{boundedenergy}). Then there exists a global Lagrangian (hence renormalized) solution $f_t\in C([0,\infty);L^1(\mathbb{R}^6))$ of \eqref{prin} with initial datum $f_0$, and the flow is globally defined on $[0,\infty)$ for $f_0$-almost every $(x,v)\in\mathbb{R}^6$, with $f_t$ being the image of $f_0$ through the incompressible flow. Moreover, the following properties hold: \begin{enumerate}[(i)] \item the densities $\rho_t,\,J_t$ and the fields $E_t,\, B_t$ are strongly continuous in $L^1_{\operatorname{loc}}(\mathbb{R}^6)$; \item for every $t\geq 0$, $f_t$ has every energy bounded independently of time. \end{enumerate} \end{theorem} \subsection{Structure of the paper} The paper is organized as follows. As previously mentioned, a discussion of the physical interpretation of \eqref{prin} is presented in \Cref{deriv-model}. In \Cref{sec:Lagrangian}, we prove \Cref{existencesolution}. More explicitly, we rely on the machinery for nonsmooth vector fields developed in \cite{existenceflow} to prove the equivalence of renormalized and Lagrangian solutions. Moreover, in \Cref{maincorollary}, we show that if the electromagnetic and relativistic energies are integrable in $[0,T]$, then its associated flow is globally defined in time. In \Cref{sec:generalized}, we extend the notion of generalized solutions from \cite[Definition 2.6]{vlasovpoisson} to our setting (see \Cref{generalized}) in order to allow an ``effective" density current of particles (along with the corresponding ``effective" density of particles) and we prove the existence of a Lagrangian solution with the ``effective" acceleration (\Cref{existencegeneral}). Finally, in \Cref{sec:finite-energy}, we prove \Cref{finalthm} under the condition of each bounded energy (see \Cref{boundedenergy}), obtaining a globally defined flow and a solution of \eqref{prin} for all range of $\sigma_E$ and $\sigma_B$. \subsection*{Aknowledgements} Henrique is partially supported by CAPES through a Master's scholarship. Diego is partially supported by CNPq-Brazil through grant 311354/2019-0. \section{Lagrangian solution and associated flow}\label{sec:Lagrangian} In this section, we prove \Cref{existencesolution} which says that Lagrangian and renormalized solutions of \eqref{prin} are equivalent. For this, we use the machinery developed in \cite[Sections 4 and 5]{vlasovpoisson} combined with a version of \cite[Theorem 4.4]{vlasovpoisson} that we show holds for our vector field $\textbf{b}$ as well. From now on, we denote by $\mathcal{M}$ the space of measures with finite total mass, by $\mathcal{M}_+$ the space of nonnegative measures with finite total mass, by $\operatorname{AC}(I;\mathbb{R}^6)$ the space of absolutely continuous curves on the interval $I$ with values in $\mathbb{R}^6$, and by $\mathcal{L}^6$ the Lebesgue measure in $\mathbb{R}^6$. We begin with the preliminary definitions of renormalized solutions, and of regular and maximum regular flows: \begin{definition}[Regular flow]\label{rf} \textnormal{Fix $\tau_1<\tau_2$ and $B \subseteq \mathbb{R}^6$ a Borel set. For a Borel vector field $\textbf{b}:(\tau_1,\tau_2)\times\mathbb{R}^6\longrightarrow \mathbb{R}^6$, we say that $\textbf{X}:[\tau_1,\tau_2]\times B\longrightarrow \mathbb{R}^6$ is a regular flow with vector $\textbf{b}$ when} \begin{enumerate}[$(i)$] \item \textnormal{for a.e. $x\in B$, we have that $\textbf{X}(\cdot,x)\in \operatorname{AC}([\tau_1,\tau_2];\mathbb{R}^6)$ and that it solves the equation $\dot{x}(t)=\textbf{b}_t(x(t))$ a.e. in $(\tau_1,\tau_2)$ with initial condition $\textbf{X}(x,\tau_1)=x$;} \item \textnormal{there exists $C > 0$ such that $\textbf{X}(t,\cdot)_\#(\mathcal{L}^6\lefthalfcup B)\leq C\mathcal{L}^6$ for all $t\in [\tau_1,\tau_2]$. Note that $C$ can depend on the particular flow $\textbf{X}$.} \end{enumerate} \end{definition} \begin{definition}[Maximum regular flow]\label{mrf} \textnormal{For every $s\in(0,T)$, a Borel map $\textbf{X}(\cdot,s,\cdot)$ is said to be a maximum regular flow (starting at $s$) if there exist two Borel maps $T^+_{s,\textbf{X}}:\mathbb{R}^6\longrightarrow (s,T]$, $T^-_{s,\textbf{X}}:\mathbb{R}^6\longrightarrow [0,s)$ such that $\textbf{X}(\cdot,s,x)$ is defined in $(T^-_{s,\textbf{X}},T^+_{s,\textbf{X}})$ and} \begin{enumerate}[$(i)$] \item \textnormal{for a.e. $x\in\mathbb{R}^6$, we have that $\textbf{X}(\cdot,s,x)\in \operatorname{AC}((T^-_{s,\textbf{X}},T^+_{s,\textbf{X}});\mathbb{R}^6)$ and that it solves the equation $\dot{x}(t)=\textbf{b}_t(x(t))$ a.e. in $(T^-_{s,\textbf{X}},T^+_{s,\textbf{X}})$ with $\textbf{X}(s,s,x)=x$;} \item \textnormal{there exists a constant $C > 0$ such that $\textbf{X}(t,s,\cdot)_\#(\mathcal{L}^6\lefthalfcup \{T^-_{s,\textbf{X}}<t<T^+_{s,\textbf{X}}\})\leq C\mathcal{L}^6$ for all $t\in [0,T]$. As berfore, this constant $C$ can depend of $\textbf{X}$ and $s$;} \item \textnormal{for a.e. $x\in\mathbb{R}^6$, either $T^+_{s,\textbf{X}}=T$ and $\textbf{X}(\cdot,s,x)\in C([s,T];\mathbb{R}^6)$, or $\lim_{t\uparrow T^+_{s,\textbf{X}}}|\textbf{X}(t,s,x)|=\infty$. Analogousy, either $T^-_{s,\textbf{X}}=0$ and $\textbf{X}(\cdot,s,x)\in C([0,s];\mathbb{R}^6)$, or $\lim_{t\downarrow T^-_{s,\textbf{X}}}|\textbf{X}(t,s,x)|=\infty$.} \end{enumerate} \end{definition} The following lemma (compare with \cite[Theorem 4.4]{vlasovpoisson}), combined with the facts that $\textbf{b}_t$ is divergence-free in the sense of distribution a.e. in time and that $\boldsymbol{b}\in L^\infty((0,T);L^1_{\operatorname{loc}}(\mathbb{R}^6;\mathbb{R}^6))$, provides a sufficient condition to the existence and the uniqueness of a maximum regular flow for the continuity equation. \lemma\label{A2} Let $\textbf{b}:(0,T)\times \mathbb{R}^6\longrightarrow \mathbb{R}^6 $ be given by $\textbf{b}_t(x,v)=(\textbf{b}_{1t}(v),\textbf{b}_{2t}(x,v))$, where \[ \textbf{b}_{1}\in L^\infty((0,T);W^{1,\infty}_{\operatorname{loc}}(\mathbb{R}^3;\mathbb{R}^3)), \] \[ \textbf{b}_{2t}(x,v)=K\ast \rho_t(x)+\textbf{b}_{1t}(v)\times\int_{\mathbb{R}^3}K(y-x)\times\mathrm{d} J_t(y)\eqqcolon K\ast \rho_t(x)+\textbf{b}_{1t}(v)\times \tilde{\textbf{b}}_{2t}(x), \] with $\rho\in L^\infty((0,T); \mathcal{M}_+(\mathbb{R}^3))$ and $|J| \in L^\infty((0,T); \mathcal{M}_{+}(\mathbb{R}^3))$. Then, $\textbf{b}$ satisfies the following: for any nonnegative $\bar{\rho}\in L^\infty(\mathbb{R}^3)$ with compact support and any closed interval $[a,b]\subset [0,T]$, both continuity equations \[ \frac{\mathrm{d}}{\mathrm{d} t}\rho_t\pm\nabla_{x,v}\cdot(\textbf{b}_t\rho_t)=0 \quad \text{in}\, (a,b)\times \mathbb{R}^6 \] have at most one solution in the class of all weakly* nonnegative continuous functions $[a,b]\ni t\longrightarrow \tilde{\rho}_t$ with $\rho_a=\bar{\rho}$ and $\cup_{t\in[a,b]}\operatorname{supp} \rho_t \Subset \mathbb{R}^6$. \proof We proceed as \cite{vlasovpoisson} with the proof for autonomous vector fields (in particular, densities $\rho,\, J$ are independent of time), but the computations generalize for the time dependent case. We denote by $\mathcal{P}(X)$ the set of probability measures on $X$, and by $e_t: C([0,T];\mathbb{R}^6)\longrightarrow \mathbb{R}^6$ the evaluation map at time $t$, which means $e_t(\eta)\coloneqq \eta(t)$. Thanks to \cite[Theorem 5.1]{vlasovpoisson}, any two bounded compactly supported nonnegative distributional solutions with the same initial datum $\bar{\rho}$ can be represented by $\boldsymbol{\eta}_1,\,\boldsymbol{\eta}_2\in \mathcal{P}(C([0,T];B_R\times B_R))$. Setting $\boldsymbol{\eta}=(\boldsymbol{\eta}_1+\boldsymbol{\eta}_2)/2$, if we can prove that the desintegration $\boldsymbol{\eta}_x$ of $\boldsymbol{\eta}$ with respect to $e_0$ is a Dirac delta for $\bar{\rho}$-a.e. $x$ we deduce that $\boldsymbol{\eta}_x=(\boldsymbol{\eta}_1)_x=(\boldsymbol{\eta}_2)_x$ for $\bar{\rho}$-a.e. $x$, which gives $\boldsymbol{\eta}_1=\boldsymbol{\eta}_2$. Hence, it is enough to show that given $\boldsymbol{\eta}\in \mathcal{P}(C([0,T];B_R\times B_R))$ concentrated on integral curves of $\textbf{b}$ such that $(e_t)_\# \boldsymbol{\eta}\leq C_0 \mathcal{L}^{6}$ for all $t\in[0,T]$, $\boldsymbol{\eta}_x$ is a Dirac delta for $e_{0\#}\boldsymbol{\eta}$-a.e. $x$. For this purpose, consider the function \[ \Phi_{\delta,\zeta}(t)\coloneqq \iiint \log\left(1+\frac{|\gamma^1(t)-\eta^1(t)|}{\zeta\delta}+\frac{|\gamma^2(t)-\eta^2(t)|}{\delta}\right)\mathrm{d}\mu(x,\eta,\gamma), \] where $\delta,\, \zeta \in(0,1)$ are small constants to be chosen later, $t\in[0,T]$, $\bar{\rho}\coloneqq (e_0)_\#\mathrm{d}\boldsymbol{\eta}$, $\mathrm{d}\mu(x,\eta,\gamma)\coloneqq \mathrm{d}\boldsymbol{\eta}_x(\gamma)\mathrm{d}\boldsymbol{\eta}_x(\eta)\mathrm{d}\bar{\rho}(x)$, with notation $\eta(t)=(\eta_1(t),\eta_2(t))\in\mathbb{R}^3\times \mathbb{R}^3$. Note that $\mu\in \mathcal{P}(\mathbb{R}^3\times C([0,T];\mathbb{R}^3)^2)$ and $\Phi_{\delta,\zeta}(0)=0$. We assume by contradition that $\boldsymbol{\eta}_x$ is not a Dirac delta for $\bar{\rho}$-a.e. $x$, which means that there exists a constant $a>0$ such that \[ \iiint\left(\int_0^T\min\{|\gamma(t)-\eta(t)|,1\}dt\right)\mathrm{d}\mu(x,\eta,\gamma)\geq a. \] Using Fubini's Theorem, the fact that the integrand is bounded by $1$ and $\mu$ has mass $1$, we have that there exist a time $t_0\in(0,T]$ such that \[ A\coloneqq \left\{(x,\eta,\gamma): \min\{|\gamma(t_0)-\eta(t_0)|,1\}\geq \frac{a}{2T}\right\} \] has $\mu$-measure at least $a/(2T)$. Without loss of generality, by assuming $a\leq 2T$, this implies that $|\gamma(t_0)-\eta(t_0)|\geq a/(2T)$ for all $(x,\eta,\gamma)\in A$, hence \begin{equation}\label{Phibounded} \Phi_{\delta,\zeta}(t_0)\geq \frac{a}{2T}\log\left(1+\frac{a}{2\delta T}\right). \end{equation} Computing the time derivative of $\Phi_{\delta,\zeta}$, we have that \begin{equation}\label{derivativePhi} \begin{split} \frac{\mathrm{d}\Phi_{\delta,\zeta}}{\mathrm{d} t}(t)&\leq \iiint\bigg(\frac{|\textbf{b}_1(\gamma^2(t))-\textbf{b}_1(\eta^2(t))|}{\zeta(\delta+|\gamma^2(t)-\eta^2(t)|)} +\frac{\zeta|\textbf{b}_1(\gamma^2(t))\times(\tilde{\textbf{b}}_2(\gamma^1(t))-\tilde{\textbf{b}}_2(\eta^1(t)))|}{\zeta\delta+|\gamma^1(t)-\eta^1(t)|} \\ &+\frac{\zeta|(\textbf{b}_1(\gamma^2(t))-\textbf{b}_1(\eta^2(t)))\times \tilde{\textbf{b}}_2(\eta^1(t))|}{\zeta\delta+|\gamma^2(t)-\eta^2(t)|}+\frac{\eta|K\ast\rho(\gamma^1(t))-K\ast\rho(\eta^1(t))|}{\zeta\delta+|\gamma^1(t)-\eta^1(t)|)}\bigg)\mathrm{d}\mu(x,\eta,\gamma). \end{split} \end{equation} By our assumption on $\textbf{b}_1$, the first summand is easily estimated using the Lipschitz regularity of $\textbf{b}_1$ in $B_R$: \begin{equation}\label{firstterm} \iiint\frac{|\textbf{b}_1(\gamma^2(t))-\textbf{b}_1(\eta^2(t))|}{\zeta(\delta+|\gamma^2(t)-\eta^2(t)|)}\mathrm{d}\mu(x,\eta,\gamma)\leq \frac{\|\nabla\textbf{b}_1\|_{L^\infty(B_R)}}{\zeta}. \end{equation} Analogously, the third summand is estimated using the boundedness of $\tilde{\textbf{b}}_2$ and the Lipschitz regularity of $\textbf{b}_1$ in $B_R$: \begin{equation}\label{thirdterm} \iiint \frac{\zeta|(\textbf{b}_1(\gamma^2(t))-\textbf{b}_1(\eta^2(t)))\times \tilde{\textbf{b}}_2(\eta^1(t))|}{\zeta\delta+|\gamma^2(t)-\eta^2(t)|} \mathrm{d}\mu(x,\eta,\gamma) \leq \zeta\|\nabla\textbf{b}_1\|_{L^\infty(B_R)}\|\tilde{\textbf{b}}_2\|_{L^1(B_R)}. \end{equation} For the second term, we have \[\begin{split} \iiint \frac{\zeta|\textbf{b}_1(\gamma^2(t))\times(\tilde{\textbf{b}}_2(\gamma^1(t))-\tilde{\textbf{b}}_2(\eta^1(t)))|}{\zeta\delta+|\gamma^1(t)-\eta^1(t)|}&\mathrm{d}\mu(x,\eta,\gamma)\\ \leq C\|\textbf{b}_1\|_{L^\infty(B_R)}&\iiint \frac{\zeta|K\ast\tilde{\rho}(\gamma^1(t))-K\ast \tilde{\rho}(\eta^1(t))|}{\zeta\delta+|\gamma^1(t)-\eta^1(t)|} \mathrm{d}\mu(x,\eta,\gamma), \end{split}\] where $\tilde{\rho}(y)\coloneqq \sup_{i}|J_i|(y)$. Since $J_i\in L^\infty((0,\infty); \mathcal{M}(\mathbb{R}^3))$, its total variation is well-defined and has finite measure, thus \[ \tilde{\rho}\in L^\infty((0,\infty); \mathcal{M}_+(\mathbb{R}^3)). \] By \cite[Theorem 4.4, estimate (4.13)]{vlasovpoisson}, we have that \begin{equation}\label{secondterm} \iiint \frac{\zeta|K\ast\bar{\rho}(\gamma^1(t))-K\ast \bar{\rho}(\eta^1(t))|}{\zeta\delta+|\gamma^1(t)-\eta^1(t)|} \mathrm{d}\mu(x,\eta,\gamma) \leq C\zeta\left(1+\log\left(\frac{C}{\eta\delta}\right)\right), \end{equation} where $\bar{\rho}\in L^\infty((0,\infty); \mathcal{M}_+(\mathbb{R}^3))$ and $C$ depends only on $\bar{\rho}(\mathbb{R}^3)$ and $R$. Hence, the second and fourth terms can be estimated by \eqref{secondterm}. Then, using \eqref{firstterm}, \eqref{thirdterm}, and \eqref{secondterm}, one can integrate \eqref{derivativePhi} with respect to time in $[0,t_0]$ to obtain \[ \frac{d\Phi_{\delta,\zeta}}{dt}(t_0)\leq Ct_0\left(\frac{1}{\zeta}+\zeta+\zeta\log\left(\frac{C}{\zeta}\right)+\zeta\log\left(\frac{1}{\delta}\right)\right), \] where $C$ is a constant depending only on $R$, $\rho(\mathbb{R}^3)$, $\tilde{\rho}(\mathbb{R}^3)$, $\|\tilde{\textbf{b}}_2\|_{L^1(B_R)}$, and $\|\textbf{b}_1\|_{W^{1,\infty}(B_R)}$. Choosing first $\zeta>0$ small enough in order to have $Ct_0\zeta< a/(2T)$ and then letting $\delta \longrightarrow 0$, we find a contradiction with \eqref{Phibounded}, concluding the proof. \endproof As mentioned before, by \cite[Theorems 5.7, 6.1, 7.1]{existenceflow}, we obtain existence, uniqueness, and a semigroup property for the maximum regular flow (for a concise statement, see \cite[Theorem 4.3]{vlasovpoisson}). We now define generalized flow (analogous to \Cref{rf}) and Lagrangian solutions. For this, we define $\bar{\mathbb{R}}^6=\mathbb{R}^6\cup\{\infty\}$, and given a open set $A\subset [0,\infty)$, $\operatorname{AC}_{\operatorname{loc}}(A;\mathbb{R}^6)$ the set of continuous curves $\gamma:A\longrightarrow \mathbb{R}$ that are absolutely continuous when restricted to any closed interval in $A$. \begin{definition}[Generalized flow] For a Borel vector field $\boldsymbol{b}:(0,T)\times\mathbb{R}^6\longrightarrow \mathbb{R}^6$, the measure $\boldsymbol{\eta}\in\mathcal{M}_+(C[0,T];\bar{\mathbb{R}}^6)$ is said to be a generalized flow of $\boldsymbol{B}$ if $\boldsymbol{\eta}$ is concentrated on the (Borel) set \[ \Gamma\coloneqq \{\eta\in C([0,T];\bar{\mathbb{R}}^6): \eta\in \operatorname{AC}_{\operatorname{loc}}(\{\eta\neq\infty\};\mathbb{R}^6) \text{ and } \dot{\eta}(t)=\boldsymbol{b}_t(\eta(t)) \text{ for a.e. } t\in \{\eta(t)\neq\infty\}\}. \] The generalized flow is regular if there exists $C\geq 0$ such that \[ (e_t)_\#\boldsymbol{\eta}\lefthalfcup\mathbb{R}^6\leq C \mathcal{L}^6 \quad \forall t\in[0,T]. \] \end{definition} \begin{definition}[Transported measures and Lagrangian solutions]\label{lagrangian} Let $\boldsymbol{b}:(0,T)\times\mathbb{R}^6\longrightarrow \mathbb{R}^6$ be a Borel vector field having a maximal regular flow $\boldsymbol{X}$, and $\boldsymbol{\eta}\in\mathcal{M}_+(C[0,T];\bar{\mathbb{R}}^6)$ with $(e_t)_\#\boldsymbol{\eta}\ll \mathcal{L}^6$ for all $t\in[0,T]$. We say that $\boldsymbol{\eta}$ is transported by $\boldsymbol{X}$ if, for all $s\in[0,T]$, $\boldsymbol{\eta}$ is concentrated on \[ \{\eta\in C([0,T];\bar{\mathbb{R}}^6):\eta(s)=\infty \text{ or } \eta(\cdot)=\boldsymbol{X}(\cdot,s,\eta(s)) \text{ in } (T^-_{s,\textbf{X}}(\eta(s)),T^+_{s,\textbf{X}}(\eta(s)))\}. \] Moreover, let $\rho\in L^\infty((0,T);L^1_{\operatorname{loc}}(\mathbb{R}^6))$ be a nonnegative a distributional solution of the continuity equation, weakly continuous on $[0,T]$ in the duality $C_c(\mathbb{R}^6)$. We say that $\rho_t$ is a Lagrangian solution if there exists $\eta\in\mathcal{M}_+(C([0,T];\bar{\mathbb{R}}^6))$ transported by $\boldsymbol{X}$ with $(e_t)_\#\boldsymbol{\eta}=\rho_t\mathcal{L}^6$ for every $t\in[0,T]$. \end{definition} By \cite[Theorem 4.7]{vlasovpoisson}, we have that for $\boldsymbol{b}$ as in \Cref{A2}, regular generalized flows are transported by its maximal regular flow $\boldsymbol{X}$. We are now ready to prove \Cref{existencesolution}. \proof[Proof of \Cref{existencesolution}] Notice that the vector field $\boldsymbol{b}$ satisfies $\boldsymbol{b}\in L^\infty((0,T);L^1_{\operatorname{loc}}(\mathbb{R}^6;\mathbb{R}^6))$, is diver-gence-free, and satisfies the uniqueness of bounded compactly supported nonnegative distributional solutions of the continuity equation (see \Cref{A2}). Therefore by \cite[Theorem 5.1]{vlasovpoisson}, we deduce that: if \eqref{thmitem1} holds, then $f_t$ is a Lagrangian solution; if \eqref{thmitem2} holds, then $\beta(f_t)$ is a Lagrangian solution, where $\beta(s)\coloneqq \arctan(s)$. In particular, by \cite[Theorem 4.10]{vlasovpoisson} we have that $f_t$ is a renormalized solution. \endproof We have a direct corollary that provides conditions to obtain a globally defined flow, that is, to avoid a finite-time blow up. \corollary\label{maincorollary} Fix $T>0$ and let $f\in L^\infty((0,T);L^1(\mathbb{R}^6))$ be a nonnegative renormalized solution of \eqref{prin} (as in \Cref{renormalized}). Assume that \begin{equation}\label{finiteenergy} \int_0^T\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t + \int_0^T \int_{\mathbb{R}^3} \tfrac{1}{2} |E_t|^2+ \tfrac{1}{2} |B_t|^2\,\mathrm{d} x\,\mathrm{d} t<\infty, \end{equation} that is, the relativistic energy and the electromagnetic energy \eqref{prin} are integrable in time. Then \begin{enumerate}[(i)] \item\label{corolitem1} The maximal regular flow $\boldsymbol{X}(t,\cdot)$ associated to $\boldsymbol{b}_t=(\hat{v},E_t+\hat{v}\times B_t)$ and starting from $0$ is globally defined on $[0,T]$ for $f_0$-a.e. $(x,v)$; \item\label{corolitem2} $f_t$ is the image of $f_0$ through this flow, that is, $f_t=\boldsymbol{X}(t,\cdot)_\#f_0=f_0\circ \boldsymbol{X}^{-1}(t,\cdot)$ for all $t\in[0,T]$: \[ \int_{\mathbb{R}^6}\phi(x,v)f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v=\int_{\mathbb{R}^6}\phi\left(\boldsymbol{X}(t,x,v)\right)f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v \] for all $ \phi \geq 0, \, t\in[0,T]$; \item\label{corolitem3} the map \[ [0,T]\ni t \longmapsto \int_{\mathbb{R}^6}\psi(f_t(x,v))\,\mathrm{d} x\,\mathrm{d} v \] is constant in time for all Borel $\psi: [0,\infty)\longrightarrow [0,\infty)$. \end{enumerate} \proof Thanks to \Cref{existencesolution}, the solution is transported by the maximal regular flow associated to $\boldsymbol{b}_t=(v,v\times B_t)$. Moreover, since $f_t$ is renormalized, $g_t\coloneqq \frac{2}{\pi}\arctan f_t : (0,T)\times \mathbb{R}^3\longrightarrow [0,1]$ is a solution of the continuity equation with vector field $\boldsymbol{b}$. Since $g_t^2\leq g_t\leq f_t$ and $|\hat{v}|< 1$, we have \[\begin{split} I&\coloneqq\int_0^T\int_{\mathbb{R}^6}\frac{|\boldsymbol{b}_t(x,v)|g_t(x,v)}{(1+(|x|^2+|v|^2)^{1/2})\log(2+(|x|^2+|v|^2)^{1/2})}\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t\\ &\leq C\int_0^T\int_{\mathbb{R}^6} f_t\, \mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t +\int_0^T\int_{\mathbb{R}^6} \frac{(|E_t|+|B_t|)g_t}{(1+|v|)\log(2+|v|)}\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t\\ &\leq \left(\int_{\mathbb{R}^3}\frac{1}{(1+|v|)^3\log^2(2+|v|)}\mathrm{d} v\right)\left(\int_0^T\int_{\mathbb{R}^3}|E_t|^2+|B_t|^2\,\mathrm{d} x\,\mathrm{d} t\right)+C\int_0^T\int_{\mathbb{R}^6}(1+|v|)f_t\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t. \end{split}\] By \eqref{finiteenergy} and $(1+|v|)\leq \sqrt{2(1+|v|^2)}$, we conclude $I$ is bounded. Now, by the no blow-up criterion in \cite[Proposition 4.11]{vlasovpoisson} we obtain that the maximal regular flow $\boldsymbol{X}$ of $\boldsymbol{b}$ is globally defined on $[0,T]$ (hence, it follows \eqref{corolitem1}). Moreover, the trajectories $\boldsymbol{X}(\cdot,x,v)$ belong to $\operatorname{AC}([0,T];\mathbb{R}^6)$ for $g_0$-a.e. $(x,v)\in \mathbb{R}^6$, and $g_t=\boldsymbol{X}(t,\cdot)_\#g_0=g_0\circ \boldsymbol{X}^{-1}(t,\cdot)$. Since $f_t=\tan \left(\frac{\pi}{2}g_t\right)$ and the map $[0,1) \ni s\longrightarrow \tan\left(\frac{\pi}{2}s\right)\in[0,\infty)$ is a diffeomorphism, we obtain that $f_t=\boldsymbol{X}(t,\cdot)_\#f_0=f_0\circ \boldsymbol{X}^{-1}(t,\cdot)$ (hence, it follows \eqref{corolitem2}). In particular, for all Borel functions $\psi:[0,\infty)\longrightarrow [0,\infty)$ we have \[ \int_{\mathbb{R}^6}\psi(f_t)\,\mathrm{d} x\,\mathrm{d} v=\int_{\mathbb{R}^6}\psi(f_0)\circ \boldsymbol{X}^{-1}(t,\cdot) \,\mathrm{d} x\,\mathrm{d} v=\int_{\mathbb{R}^6}\psi(f_0)\,\mathrm{d} x\,\mathrm{d} v, \] where the second equality follows by the incompressibility of the flow, which gives \eqref{corolitem3}. \endproof \begin{remark} \textnormal{As in \cite[Remark 2.4]{vlasovpoisson}, given $0\leq s\leq t \leq T$, it is possible to reconstruct $f_t$ from $f_s$ by using the flow, that is, $f_t=\boldsymbol{X}(t,s,\cdot)_\#(f_s).$} \end{remark} \section{Existence of generalized solution}\label{sec:generalized} We now introduce the concept of a generalized solution, which allows the electromagnetic field to be generated by effective densities $\rho^{\operatorname{eff}}$ and $J^{\operatorname{eff}}$. We may interpret it as particles vanishing from the phase space but still contributing in the electromagnetic field in the physical space. Thus, it is natural to assume that $\rho^{\operatorname{eff}}_t$ may be larger than $\rho_t$, but it is bounded by the initial particle density $\rho_0$. Moreover, we assume that the particle current density $J^{\operatorname{eff}}_t$ is relativistic and compatible with $\rho^{\operatorname{eff}}_t$, that is, $|J^{\operatorname{eff}}_t|<\rho^{\operatorname{eff}}_t$ and satisfies the continuity equation (see \eqref{conditionj1}, \eqref{conditionj2}, and \eqref{conditionj3} below). \begin{definition}[Generalized solution]\label{generalized} Given $\bar{f}\in L^1(\mathbb{R}^6)$, let $f\in L^\infty((0,\infty);L^1(\mathbb{R}^6))$ be a nonnegative function, $\rho^{\operatorname{eff}}_t\in L^\infty((0,\infty); \mathcal{M}_+(\mathbb{R}^3))$, and $(J_t^{\operatorname{eff}})_i\in L^\infty((0,\infty); \mathcal{M}(\mathbb{R}^3))$ for each component $i\in\{1,\,2,\,3\}$. We say that the triplet $(f_t,\, \rho_t^{\operatorname{eff}},\, J_t^{\operatorname{eff}})$ is a (global in time) generalized solution of \eqref{prin} starting from $\bar{f}$ if, setting \begin{equation}\label{defeff} \begin{split} \rho_t(x)&\coloneqq\int_{\mathbb{R}^3} f_t(x,v)\,\mathrm{d} v,\quad E^{\operatorname{eff}}_t(x)\coloneqq\sigma_E\int_{\mathbb{R}^3} K(x-y)\,\mathrm{d} \rho^{\operatorname{eff}}_t(y),\\ J_t(x)&\coloneqq\int_{\mathbb{R}^3}\hat{v} f_t(x,v)\,\mathrm{d} v, \quad B^{\operatorname{eff}}_t(x)\coloneqq\sigma_B\int_{\mathbb{R}^3} K(y-x)\times\mathrm{d} J_t^{\operatorname{eff}}(y), \quad \text{and}\\ \textbf{b}^{\operatorname{eff}}_t(x,v)&\coloneqq (\hat{v}, E^{\operatorname{eff}}_t(x)+\hat{v}\times B^{\operatorname{eff}}_t(x)), \end{split} \end{equation} the following hold: $f_t$ is a renormalized solution of the continuity equation with vector field $\boldsymbol{b}_t$ starting from $\bar{f}$, \begin{subequations} \begin{equation}\label{conditionj1} \rho_t\leq \rho^{\operatorname{eff}}_t,\quad |J^{\operatorname{eff}}_t|< \rho^{\operatorname{eff}}_t\quad\text{ as measures for a.e. } t\in(0,\infty), \end{equation} \begin{equation}\label{conditionj2} \rho^{\operatorname{eff}}_t(\mathbb{R}^3)\leq \| f_0\|_{L^1(\mathbb{R}^{6})} \quad \text{ for a.e. } t\in(0,\infty), \text{ and} \end{equation} \begin{equation}\label{conditionj3} \partial_t\rho^{\operatorname{eff}}_t+\nabla\cdot J^{\operatorname{eff}}_t=0\quad \text{with initial condition } \bar{\rho}=\int_{\mathbb{R}^3}\bar{f}\,\mathrm{d} v\text{, i.e.,} \end{equation} \[ \int_{\mathbb{R}^3}\phi_0\,\mathrm{d} \bar{\rho}+\int_0^\infty\int_{\mathbb{R}^3}(\partial_t\phi_t\,\mathrm{d} \rho^{\operatorname{eff}}_t+\nabla\phi_t\cdot\mathrm{d} J^{\operatorname{eff}}_t)\,\mathrm{d} t=0 \quad \forall \, \phi\in C^1_c([0,\infty)\times\mathbb{R}^3). \] \end{subequations} \end{definition} Notice that by the Radon-Nikodym's Theorem, combined with \eqref{conditionj1}, there exists a vector field $V^{\operatorname{eff}}\in L^\infty((0,\infty);L^1(\rho^{\operatorname{eff}};\mathbb{R}^3))$ such that $\mathrm{d} J^{\operatorname{eff}}_t= V^{\operatorname{eff}}_t\,\mathrm{d}\rho^{\operatorname{eff}}_t$ and $|V^{\operatorname{eff}}_t(x)|< 1$ for a.e. $(t,x)\in(0,\infty)\times\mathbb{R}^3$. This is analogous to the continuity equation associated to \eqref{prin} with initial condition $\rho_0$, which is obtained by integrating \eqref{prin} with respect to $v$ over the whole domain $\mathbb{R}^3$: \begin{equation}\label{continuityeq} \int_{\mathbb{R}^3}\phi_0\,\mathrm{d} \rho_0+\int_0^\infty\int_{\mathbb{R}^3}(\partial_t\phi_t+\nabla\phi_t\cdot V_t)\,\mathrm{d} \rho_t\,\mathrm{d} t=0 \quad \forall \phi\in C^1_c([0,\infty)\times\mathbb{R}^3), \end{equation} where $V\coloneqq J/\rho\in L^\infty((0,\infty);L^1(\rho;\mathbb{R}^3))$ satisfies $\mathrm{d} J_t=V_t\,\mathrm{d} \rho_t$ and $|V_t(x)|< 1$ for a.e. $(t,x)\in(0,\infty)\times \mathbb{R}^3$. To see that \Cref{generalized} is in fact a generalization of \Cref{renormalized}, we remark that $\| \rho_t\|_{L^1(\mathbb{R}^{3})}=\| f_t\|_{L^1(\mathbb{R}^{6})}$, hence it follows by \eqref{conditionj1} and \eqref{conditionj2} that, if the number of particles is conserved a.e. in time, i.e., if $\|f_t\|_{L^1(\mathbb{R}^6)}=\|f_0\|_{L^1(\mathbb{R}^6)}$ for a.e. $t$, then $\rho^{\operatorname{eff}}_t=\rho_t$. Moreover, by \eqref{conditionj3} and \eqref{continuityeq}, we have that $\rho_t$ satisfy the continuity equation with both velocities $V_t$ and $V^{\operatorname{eff}}_t$ with initial condition $\rho_0$. The following lemma gives that $V=V^{\operatorname{eff}}$, whence $J=J^{\operatorname{eff}}$. \lemma\label{jeffequalj} If $\rho_t$ satisfies the continuity equation with the same initial condition and both vectors $V,\,V^{\operatorname{eff}}$ which \[ \int_0^T\int_{\mathbb{R}^3}\frac{|\boldsymbol{b}_t(x)|}{1+|x|}\,\mathrm{d}\mu_t(x)\,\mathrm{d} t<\infty \] holds, then $V=V^{\operatorname{eff}}$. \proof Consider a (convex) class $\mathcal{L}_{\boldsymbol{b}}$ of measured-value solutions $\mu_t\in\mathcal{M}_+(\mathbb{R}^3)$ of continuity equation with vector field $\boldsymbol{b}_t$ satisfying \[ 0\leq \partial_t\mu_t\leq \mu_t \quad \Longrightarrow \quad \partial_t\mu_t\in \mathcal{L}_{\boldsymbol{b}} \] whenever $\partial_t\mu_t$ still solves the continuity equation with vector field $\boldsymbol{b}_t$, and the integrability condition \[ \int_0^T\int_{\mathbb{R}^3}\frac{|\boldsymbol{b}_t(x)|}{1+|x|}\,\mathrm{d}\mu_t(x)\,\mathrm{d} t<\infty. \] Notice that $\rho_t\in \mathcal{L}_{V}\cap \mathcal{L}_{V^{\operatorname{eff}}}$ for all $T>0$, hence by \cite{dipernalions}, we have \begin{equation}\label{equalityvandveff} \rho_t=\boldsymbol{X}(t,\cdot)_\#\rho_0=\boldsymbol{X}^{\operatorname{eff}}(t,\cdot)_\#\rho_0 \quad \forall \, t\in[0,T], \end{equation} where $\boldsymbol{X}$ and $\boldsymbol{X}^{\operatorname{eff}}$ are $\mathcal{L}_{V}$ and $\mathcal{L}_{V^{\operatorname{eff}}}$ Lagragian flows, respectively, that is, $\boldsymbol{X}(t,\cdot)$ and $\boldsymbol{X}^{\operatorname{eff}}(t,\cdot)$ are (unique) absolutely continuous functions in $[0,T]$ starting from $\rho_0$ (at time $0$) such that \[\begin{split} \dot{\boldsymbol{X}}(t,\cdot)&=V_t(\boldsymbol{X}(t,\cdot)),\quad \dot{\boldsymbol{X}}^{\operatorname{eff}}(t,\cdot)=V^{\operatorname{eff}}_t(\boldsymbol{X}^{\operatorname{eff}}(t,\cdot)),\\ \boldsymbol{X}(0,\cdot)&=\boldsymbol{X}^{\operatorname{eff}}(0,\cdot)=\operatorname{Id} \end{split}\] for $\rho_0$-almost everywhere. By \eqref{equalityvandveff} and the uniqueness of $\boldsymbol{X}$ and $\boldsymbol{X}^{\operatorname{eff}}$, we conclude that $V_t=V^{\operatorname{eff}}_t$. \endproof It follows that, if the number of particles is conserved in time, then generalized solutions are renormalized ones. This observation indicates that a generalized solution which is not renormalized must lose mass/charge as the velocity approaches the speed of light. Next, our goal is to prove the global existence of generalized solutions $f_t$ for any nonnegative $f_0\in L^1(\mathbb{R}^6)$ (\Cref{existencegeneral}). In order to do so, we need to establish the existence of a (unique) distributional solution with smooth kernel and initial data. More precisely, we show that by smoothing the kernel $K$ and with nonnegative initial condition in $C_c^\infty(\mathbb{R}^6)$, we obtain a classical solution of \eqref{prin}. To avoid any confusion with the notation of \Cref{existencegeneral} and \Cref{finalthm}, we denote by $\pazocal{K}\coloneqq \eta\ast K$ and by $g$ the smoothed kernel and the initial condition, respectively. \proposition\label{existencesmooth} Let $g\in C_c^\infty(\mathbb{R}^6)$ be a nonnegative function. Then, there exists a unique nonnegative Lagrangian solution $f\in C^\infty([0,\infty)\times \mathbb{R}^6)$ of the smoothed system \eqref{prin}: \begin{equation}\label{prinsmooth} \begin{cases} \partial_t f_t +\hat{v}\cdot \nabla_x f_t+( E_t+\hat{v}\times B_t)\cdot \nabla_v f_t=0 & \text{ in }\quad (0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3;\\ \rho_t(x)=\int_{\mathbb{R}^3} f_t(x,v)\,\mathrm{d} v,\quad J_t(x)=\int_{\mathbb{R}^3}\hat{v} f_t(x,v)\,\mathrm{d} v& \text{ in }\quad (0,\infty)\times \mathbb{R}^3;\\ E_t(x)=\sigma_E\int_{\mathbb{R}^3}\rho_t(y)\pazocal{K}(x-y)\,\mathrm{d} y& \text{ in }\quad (0,\infty)\times \mathbb{R}^3;\\ B_t(x)=\sigma_B\int_{\mathbb{R}^3}J_t(y)\times\pazocal{K}(x-y)\,\mathrm{d} y& \text{ in }\quad (0,\infty)\times \mathbb{R}^3;\\ f_0(x,v)=g(x,v)& \text{ in }\quad \mathbb{R}^3\times \mathbb{R}^3. \end{cases} \end{equation} \proof In this proof, we adapt ideas and techniques from \cite[Chapter 5]{smoothed}. We construct by induction a sequence of smooth functions $f^n_t$ with initial condition $g$ which converges to a solution of \eqref{prinsmooth}. For $n=1$, let $f^1$ be a solution of the linear transport equation \[\begin{cases} \partial_t f_t^1(x,v)+\nabla_x\cdot(\hat{v} f_t^1)(x,v)=0,\\ f^1_0(x,v)=g(x,v) \end{cases}\] which gives that \[ f^1_t(x,v)=g(x-t\hat{v},v)\in C_c^\infty([0,\infty)\times\mathbb{R}^6). \] Moreover, we have that $f^1$ is a Lagrangian solution, since there exists a unique solution $\boldsymbol{Z}^0(t,\cdot)\coloneqq (\boldsymbol{X}^0,\boldsymbol{V}^0)(t,\cdot)$ of \[\begin{cases} \dot{\boldsymbol{Z}}(t,\cdot)=\boldsymbol{b}^0_t(\boldsymbol{Z}(t,\cdot));\\ \boldsymbol{Z}(0,\cdot)= \operatorname{Id}, \end{cases}\] where $\boldsymbol{b}^0_t(x,v)\coloneqq (\hat{v},0)$. Hence, \[ f_t^1= g\circ \boldsymbol{Z}^0(t),\quad \|f_t^1\|_{L^1(\mathbb{R}^6)}=\|g\|_{L^1(\mathbb{R}^6)},\quad\text{and}\quad \|f_t^1\|_{L^\infty(\mathbb{R}^6)}=\|g\|_{L^\infty(\mathbb{R}^6)}. \] Now, for $n\geq 2$, assume that there exists a smooth Lagrangian function \[ f^n\in L^\infty([0,\infty)\times\mathbb{R}^6)\cap\, L^\infty([0,\infty);L^1(\mathbb{R}^6)) \] which satisfies \begin{equation}\label{approx} \begin{cases} \partial_t f^n_t(x,v)+\nabla_{x,v}\cdot(\boldsymbol{b}^{n-1} f_t^n)(x,v)=0,\\ f^n_0(x,v)=g(x,v), \end{cases} \end{equation} where \[ \boldsymbol{b}^n_t(x,v)=(\hat{v},\,E^n_t+\hat{v}\times B^n_t)(x,v), \] and define $f^{n+1}$ as a solution of \eqref{approx} with vector field $\boldsymbol{b}^n_t$. Notice that $\boldsymbol{b}^n_t$ is divergence-free, and since $f^n$ and $\pazocal{K}$ are smooth, we obtain that $\boldsymbol{b}^n_t$ is also smooth. Moreover, we have $\boldsymbol{b}^n\in L^\infty([0,\infty);W^{k,\infty}(\mathbb{R}^6;\mathbb{R}^6))$ for all $k\in \mathbb{N}$, since by Young's inequality (recall that $|J^n|<\rho^n$ a.e.) \begin{equation}\label{boundbn} \begin{split} \|D^k_{x,v}\boldsymbol{b}^n_t\|_{L^\infty([0,\infty);L^\infty(\mathbb{R}^6;\mathbb{R}^6))}\leq C\Big(1&+\|K\|_{L^1(B_1;\mathbb{R}^3)}\|D^k \eta\|_{L^\infty(\mathbb{R}^3)}\|\rho^n\|_{L^\infty([0,\infty);L^1(\mathbb{R}^3))}\\ &+\|K\|_{L^\infty(\mathbb{R}^3\setminus B_1;\mathbb{R}^3)}\|D^k \eta\|_{L^1(\mathbb{R}^3)}\|\rho^n\|_{L^\infty([0,\infty);L^1(\mathbb{R}^3))}\Big). \end{split} \end{equation} Thus, we have for all $t\geq 0$ a smooth incompressible flow $\boldsymbol{Z}^n(t)=(\boldsymbol{X}^n,\boldsymbol{V}^n)(t)$ which satisfies \begin{equation}\label{characteristic} \begin{cases} \dot{\boldsymbol{Z}}(t,\cdot)=\boldsymbol{b}^n_t(\boldsymbol{Z}(t,\cdot));\\ \boldsymbol{Z}(0,\cdot)= \operatorname{Id}, \end{cases} \end{equation} and the following properties hold: \begin{equation}\label{propertiesapprox} f_t^{n+1}= g\circ \boldsymbol{Z}^n(t),\quad \|f_t^{n+1}\|_{L^1(\mathbb{R}^6)}=\|g\|_{L^1(\mathbb{R}^6)},\quad\text{and}\quad \|f_t^{n+1}\|_{L^\infty(\mathbb{R}^6)}=\|g\|_{L^\infty(\mathbb{R}^6)}. \end{equation} Now, we want to exploit the fact that (recall that $g\in C^\infty_c$) \begin{equation}\label{newidea} |f^{n+1}_t-f^n_t|\leq C|\boldsymbol{Z}^n(t)-\boldsymbol{Z}^{n-1}(t)| \end{equation} to show that $f^n$ is a Cauchy sequence in $C([0,T]\times\mathbb{R}^6)$. For this purpose, notice that (we omit the $t$ and $(x,v)$ arguments for a cleaner presentation) \[\begin{split} |\boldsymbol{X}^n(s)-\boldsymbol{X}^{n-1}(s)|&\leq \int_s^t |\boldsymbol{V}^n(\tau)-\boldsymbol{V}^{n-1}(\tau)|+|\boldsymbol{V}^n(\tau)|\left|\frac{1}{\sqrt{1+|\boldsymbol{V}^n(\tau)|^2}}-\frac{1}{\sqrt{1+|\boldsymbol{V}^{n-1}(\tau)|^2}}\right|\,\mathrm{d} \tau\\ &\leq \int_s^t |\boldsymbol{V}^n(\tau)-\boldsymbol{V}^{n-1}(\tau)|+\left|\sqrt{1+|\boldsymbol{V}^n(\tau)|^2}-\sqrt{1+|\boldsymbol{V}^{n-1}(\tau)|^2}\right|\,\mathrm{d} \tau. \end{split}\] Thus, by mean value theorem, we conclude that \[ |\boldsymbol{X}^n(s)-\boldsymbol{X}^{n-1}(s)|\leq 2\int_s^t |\boldsymbol{V}^n(\tau)-\boldsymbol{V}^{n-1}(\tau)|\,\mathrm{d} \tau. \] Moreover, define $E^n$ and $B^n$ as in \eqref{prinsmooth} with densities $\rho^n$ and $J^n$, respectively. Now, by the same procedure as before combined with the uniform boundedness of $B^n$ (by \eqref{boundbn} and \eqref{propertiesapprox}), we have \[\begin{split} |\boldsymbol{V}^n(s)-\boldsymbol{V}^{n-1}(s)|\leq C\int_s^t |E^n_\tau(\boldsymbol{X}^n(\tau))-E^{n-1}_\tau(\boldsymbol{X}^{n-1}(\tau))|&+|B^n_\tau(\boldsymbol{X}^n(\tau))-B^{n-1}_\tau(\boldsymbol{X}^{n-1}(\tau))|\\ &+|\boldsymbol{V}^n(\tau)-\boldsymbol{V}^{n-1}(\tau)|\,\mathrm{d} \tau. \end{split}\] By \eqref{boundbn} and \eqref{propertiesapprox}, $E^n$ and $B^n$ are uniformly bounded with respect to $n$ and $t$, thus \[\begin{split} |E^n_\tau(\boldsymbol{X}^n(\tau))-E^{n-1}_\tau(\boldsymbol{X}^{n-1}(\tau))|&\leq |(E^n_\tau-E^{n-1}_\tau)(\boldsymbol{X}^n(\tau))|+|E^{n-1}_\tau(\boldsymbol{X}^n(\tau))-E^{n-1}_\tau(\boldsymbol{X}^{n-1}(\tau))|\\ &\leq \|E^n_\tau-E^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+C|\boldsymbol{X}^n(\tau)-\boldsymbol{X}^{n-1}(\tau)|, \end{split}\] and, analogously, \[ |B^n_\tau(\boldsymbol{X}^n(\tau))-B^{n-1}_\tau(\boldsymbol{X}^{n-1}(\tau))|\leq \|B^n_\tau-B^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+C|\boldsymbol{X}^n(\tau)-\boldsymbol{X}^{n-1}(\tau)|. \] Hence, we obtain that \[ |\boldsymbol{Z}^n(s)-\boldsymbol{Z}^{n-1}(s)|\leq C\int_s^t \|E^n_\tau-E^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+ \|B^n_\tau-B^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+|\boldsymbol{Z}^n(\tau)-\boldsymbol{Z}^{n-1}(\tau)|\,\mathrm{d} \tau. \] Thus, by Gronwall's inequality, we conclude that \[ |\boldsymbol{Z}^n(t)-\boldsymbol{Z}^{n-1}(t)|\leq C\int_0^t \|E^n_\tau-E^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+ \|B^n_\tau-B^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)} \,\mathrm{d} \tau. \] Now, by \eqref{propertiesapprox}, we have that $f^n\in C^\infty_c$, which combined with \eqref{newidea} and Young's inequality gives that \begin{equation}\label{replace} \begin{split} \|f^{n+1}_t-f^n_t\|_{L^\infty(\mathbb{R}^6)}&\leq C\int_0^t \|\rho^n_\tau-\rho^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3)}+ \|J^n_\tau-J^{n-1}_\tau\|_{L^\infty(\mathbb{R}^3;\mathbb{R}^3)}\,\mathrm{d} \tau\\ &\leq C\int_0^t \|f^n_\tau-f^{n-1}_\tau\|_{L^\infty(\mathbb{R}^6)}\,\mathrm{d} \tau. \end{split} \end{equation} Therefore, by induction, we have that for all $T>0$, \[ \|f^{n+1}_t-f^n_t\|_{L^\infty(\mathbb{R}^6)}\leq C\frac{T^n}{n!}, \quad t\in[0,T], \] and we conclude that $f^n$ converges uniformly to a function $f\in C([0,\infty)\times\mathbb{R}^6)$. Moreover, by \eqref{propertiesapprox}, we have that $f_t=g\circ \boldsymbol{Z}(t)$, and $f\in L^\infty([0,\infty);L^1(\mathbb{R}^6))\cap L^\infty([0,\infty)\times\mathbb{R}^6)$, where \[ \boldsymbol{Z}(t,\cdot)\coloneqq \lim_{n\rightarrow \infty}\boldsymbol{Z}^n(t,\cdot) \] Notice that $f_t$ has compact support (since $g\in C^\infty_c$), thus $\rho^n$ and $J^n$ converge to $\rho$ and $J$ in $C([0,\infty)\times\mathbb{R}^6)$, respectively. Therefore, $E^n$ and $B^n$ converge to $E$ and $B$, thus $\boldsymbol{b}^n$ converges to $\boldsymbol{b}$ in $C([0,\infty)\times\mathbb{R}^6)$. By the same computation as \eqref{boundbn}, we have in fact that $\boldsymbol{b}\in C([0,\infty);W^{k,\infty}(\mathbb{R}^6))$ for all $k\in\mathbb{N}$, and we conclude by passing the limit in \eqref{characteristic} that $\boldsymbol{Z}\in C^1([0,\infty); C^\infty(\mathbb{R}^6))$, and we have $f\in C^1([0,\infty); C^\infty(\mathbb{R}^6))$. By iteration, we conclude that $f$ is a smooth nonnegative Lagrangian solution of \eqref{prinsmooth}, where $\boldsymbol{Z}\in C^\infty([0,\infty)\times C^\infty(\mathbb{R}^6))$ solves \begin{equation}\label{charac} \begin{cases} \dot{\boldsymbol{Z}}(t,\cdot)=\boldsymbol{b}_t(\boldsymbol{Z}(t,\cdot));\\ \boldsymbol{Z}(0,\cdot)= \operatorname{Id}. \end{cases} \end{equation} In particular, we have that $f\in C_c^\infty([0,\infty)\times \mathbb{R}^6)$. To prove the uniqueness, assume that there exists Lagrangian solutions $f,\,\tilde{f}$ of \eqref{prinsmooth}. Thus, \[ f_t\coloneqq g\circ\boldsymbol{Z}(t),\quad \tilde{f}_t\coloneqq g\circ\boldsymbol{\tilde{Z}}(t) \] where both $\boldsymbol{Z},\, \boldsymbol{\tilde{Z}}$ solve \eqref{charac}. Thus, we may repeat the proof of \eqref{replace} for $f_t-\tilde{f}_t$ to obtain \[ \|f_t-\tilde{f}_t\|_{L^\infty(\mathbb{R}^6)}\leq C\int_0^t \|f_\tau-\tilde{f}_\tau\|_{L^\infty(\mathbb{R}^6)}\,\mathrm{d} \tau, \] and we conclude by Gronwall's inequality that $f\equiv\tilde{f}$. \endproof We are now able to prove our second main result. \proof[Proof of \Cref{existencegeneral}] Our proof follows the same general structure of the proof of \cite[Theorem 2.7]{vlasovpoisson}: we begin by approximating $f$ as a $L^1$ limit of $f^n$ (Steps 1 and 2), which was already shown in \cite{vlasovpoisson}; then, we approximate $(\rho^{\operatorname{eff}}_t,\,J^{\operatorname{eff}}_t)$ and show that the electromagnetic field of the approximation converges to the effective field $(E^{\operatorname{eff}}_t,\,B^{\operatorname{eff}}_t)$ (Steps 3 and 4); finally, in Step 5, we combine stability results for the continuity equation obtained in \cite[Section 5]{vlasovpoisson} to take limits in the approximated system and conclude that the limiting solution is transported by the limit of the incompressible flow. \textbf{Step 1: Approximating solutions.} Consider $K^n\coloneqq K\ast \eta^n$, where $\eta^n(x)\coloneqq n^3\eta(nx)$, and $\eta$ is a standard convolution kernel in $\mathbb{R}^3$. Let $f^n_0\in C^\infty_c(\mathbb{R}^6)$ be a sequence such that \begin{equation}\label{convergencel1f0} f_0^n\longrightarrow f_0 \text{ in } L^1(\mathbb{R}^6). \end{equation} Moreover, denote $f^n_t$ the smooth solution of \eqref{prin} with initial condition $f^n_0$ and kernel $K^n$ (see \Cref{existencesmooth}), and its respective charge density, electric field, density current, and magnetic field defined by \[\begin{split} \rho_t^n(x)&\coloneqq \int_{\mathbb{R}^3}f_t^n(x,v)\, \mathrm{d} v, \quad E^n_t(x)\coloneqq \sigma_E\int_{\mathbb{R}^3} \rho^n_t(y) K^n(x-y)\, \mathrm{d} y,\\ J^n_t (x)&\coloneqq \int_{\mathbb{R}^3} \hat{v} \,f_t^n(x,v)\, \mathrm{d} v,\quad \text{and} \quad B^n_t(x)\coloneqq \sigma_B\int_{\mathbb{R}^3} J^n_t(y)\times K^n(x-y)\, \mathrm{d} y. \end{split}\] Since $K^n$ is smooth and vanishes at infinity, we have $E^n,\,B^n\in L^\infty([0,\infty);W^{1,\infty}(\mathbb{R}^3;\mathbb{R}^3))$ (but without a uniform bound with respect to $n$, nonetheless). Hence, $\boldsymbol{b}_t^n\coloneqq (\hat{v},E^n_t+\hat{v}\times B^n_t)$ is a Lipschitz divergence-free vector field, and its flow $\boldsymbol{X}^n(t):\mathbb{R}^6\longrightarrow \mathbb{R}^6$ is well defined and incompressible, hence by theory for the transport equation, for all $t\in [0,\infty)$ and each component $i \in\{1,\, 2,\, 3\}$, \begin{equation}\label{boundfn} f_t^n= f_0^n\circ \boldsymbol{X}^n(t)^{-1}\quad\text{ and }\quad \|J_t^n\|_{L^1(\mathbb{R}^3,\mathbb{R}^3)}\leq\||\hat{v}|f_t^n\|_{L^1(\mathbb{R}^6)}<\|\rho_t^n\|_{L^1(\mathbb{R}^3)}=\|f_t^n\|_{L^1(\mathbb{R}^6)}=\|f_0^n\|_{L^1(\mathbb{R}^6)}. \end{equation} Assume without loss of generality that $\mathcal{L}^{6}(\{f_0=k\})=0$ for every $k\in \mathbb{N}$ (otherwise, consider $\mathcal{L}^{6}(\{f_0=k+\tau\})=0$ for $\tau \in (0,1)$), we deduce that for all $k$ \begin{equation}\label{convergencefnk} f_0^{n,k}\coloneqq \boldsymbol{1}_{\{k\leq f_0^n< k+1\}} f_0^n\longrightarrow f_0^k\coloneqq \boldsymbol{1}_{\{k\leq f_0< k+1\}} f_0 \quad \text{in } L^1(\mathbb{R}^6). \end{equation} Thus, by defining $f_t^{n,k}\coloneqq \boldsymbol{1}_{\{k\leq f_t^n< k+1\}} f_t^n$, we have that $f_t^{n,k}$ is a distributional solution of the continuity equation (with vector field $\boldsymbol{b}_t^n$) and $f_0^n$ initial datum. Moreover, we have \begin{equation}\label{boundjnk} f_t^{n,k}= \boldsymbol{1}_{\{k\leq f_0^n\circ \boldsymbol{X}^n(t)^{-1}< k+1\}} f_0^n\circ \boldsymbol{X}^n(t)^{-1}, \quad \|f_t^{n,k}\|_{L^1(\mathbb{R}^6)}=\|f_0^{n,k}\|_{L^1(\mathbb{R}^6)} \quad \forall \, t\in[0,\infty). \end{equation} \textbf{Step 2: Limit in phase space.} By construction, $(f^{n,k})_{n\in\mathbb{N}}$ is a nonnegative uniformly bounded sequence. Hence, there exists $f^k\in L^\infty((0,\infty)\times\mathbb{R}^6)$ such that \begin{equation}\label{weakstarfnk} f^{n,k}\relbar\joinrel\rightharpoonup f^k \quad \text{weakly* in } L^\infty((0,\infty)\times\mathbb{R}^6) \quad\text{as } n\longrightarrow \infty \quad \forall\, k\in\mathbb{N}. \end{equation} Moreover, for any $K\subset\joinrel\subset \mathbb{R}^6$, and any bounded function $\phi: (0,\infty)\longrightarrow (0,\infty)$ with compact support, we use test function $\phi(t)\boldsymbol{1}_{K}(x,v)\operatorname{sign}(f_t^k)(x,v)$ for the previous two weak convergence combined with Fatou's lemma, the convergence of $(f^{n,k}_t)_{n\in\mathbb{N}}$, and \eqref{boundjnk} to obtain \[ \int_0^\infty\phi(t)\|f^{k}_t\|_{L^1(K)}\mathrm{d} t\leq \left(\int_0^\infty\phi(t)\,\mathrm{d} t\right)\liminf_{n\rightarrow \infty}\|f^{n,k}_0\|_{L^1(\mathbb{R}^6)}=\left(\int_0^\infty\phi(t)\,\mathrm{d} t\right)\|f^{k}_0\|_{L^1(\mathbb{R}^6)}, \] Since $\phi$ was arbitrary the supremum among all compact subset $K\subset \mathbb{R}^6$ we obtain \begin{equation}\label{boundfk} \|f_t^k\|_{L^1(\mathbb{R}^6)}\leq\|f_0^k\|_{L^1(\mathbb{R}^6)} \quad \text{for a.e. } t\in(0,\infty), \end{equation} so, in particular, $f^k\in L^\infty((0,\infty);L^1(\mathbb{R}^6))$. Moreover, by defining $f=\sum_{k=0}^\infty f^k$, we have \begin{equation}\label{regularityf} \|f_t\|_{L^1(\mathbb{R}^6)}\leq\|f_0\|_{L^1(\mathbb{R}^6)} \quad \text{for a.e. } t\in[0,\infty). \end{equation} Noticing that $f^n=\sum_{k=0}^\infty f^{n,k}$, by fixing $\varphi\in L^\infty((0,T)\times\mathbb{R}^6)$, \eqref{boundjnk}, and \eqref{boundfk}, we have for all $k_0\geq 1$, \[\begin{split} \left|\int_0^T\int_{\mathbb{R}^6}\varphi(f^n-f)\,\mathrm{d} x\,\mathrm{d} v\, \mathrm{d} t\right|&\leq \left|\sum_{k=0}^{k_0-1}\int_0^T\int_{\mathbb{R}^6}\varphi(f^{n,k}-f^k)\,\mathrm{d} x\,\mathrm{d} v\, \mathrm{d} t\right|\\ &+T\|\varphi\|_{L^\infty((0,T)\times\mathbb{R}^6)}\sum_{k=k_0}^\infty\int_{\mathbb{R}^6}(|f^{n,k}_0|+|f^k_0|)\,\mathrm{d} x\,\mathrm{d} v. \end{split}\] Now, by the convergence \eqref{weakstarfnk} the first term vanishes as $n\longrightarrow \infty$. Thus, by convergences \eqref{convergencel1f0} and \eqref{convergencefnk}, we have \[\begin{split} \limsup_{n\rightarrow \infty}\left|\int_0^T\int_{\mathbb{R}^6}\varphi(f^n-f)\,\mathrm{d} x\,\mathrm{d} v\, \mathrm{d} t\right|\leq 2T\|\varphi\|_{L^\infty((0,T)\times\mathbb{R}^6)}\|f_0\boldsymbol{1}_{\{f_0\geq k_0\}}\|_{L^1(\mathbb{R}^6)}. \end{split}\] Letting $k_0\longrightarrow \infty$ and since $\varphi\in L^\infty$ was arbitrary, we conclude \begin{equation}\label{weakfn} f^n\relbar\joinrel\rightharpoonup f \quad \text{ weakly in } L^1((0,T)\times\mathbb{R}^6). \end{equation} \textbf{Step 3: Limit in physical densities.} Since $(\rho^n)_{n\in \mathbb{N}}$ and $(J^n_i)_{n\in \mathbb{N}}$ are bounded sequences in $L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3))$ and $L^\infty((0,\infty);\mathcal{M}(\mathbb{R}^3))$, respectively, for each component $i\in\{1,\,2,\,3\}$ (see \eqref{boundfn}), and $L^\infty((0,\infty);\mathcal{M}(\mathbb{R}^3))= [L^1((0,\infty);C_0(\mathbb{R}^3))]^*$, there exist $\rho^{\operatorname{eff}}\in L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3))$ and $J_i^{\operatorname{eff}}\in L^\infty((0,\infty);\mathcal{M}(\mathbb{R}^3))$ such that \begin{equation}\label{convergencerhoj} \begin{split} \rho^n&\relbar\joinrel\rightharpoonup \rho^{\operatorname{eff}}\quad \text{weakly* in } L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3));\\ J^n_i&\relbar\joinrel\rightharpoonup J^{\operatorname{eff}}_i\quad \text{weakly* in } L^\infty((0,\infty);\mathcal{M}(\mathbb{R}^3)). \end{split} \end{equation} for each component $i\in\{1,\,2,\, 3\}$. Hence, by the lower semicontinuity of the norm under weak* convergence, we have \begin{equation}\label{boundednessrhoeff} \operatorname{ess}\,\operatorname{sup}_{t\in(0,\infty)}|\rho^{\operatorname{eff}}_t|(\mathbb{R}^3)\leq \lim_{n\rightarrow \infty}\left(\sup_{t\in(0,\infty)}\|\rho^n_t\|_{L^1(\mathbb{R}^3)}\right)=\lim_{n\rightarrow \infty}\|\rho^n_0\|_{L^1(\mathbb{R}^3)}=\|f_0\|_{L^1(\mathbb{R}^6)}. \end{equation} Now, fixing a nonnegative function $\varphi\in C_c((0,\infty)\times\mathbb{R}^3)$, by \eqref{weakfn} and \eqref{convergencerhoj}, we obtain that \[\begin{split} \int_0^\infty\int_{\mathbb{R}^3}\varphi_t(x)\,\mathrm{d} \rho_t^{\operatorname{eff}}(x)\,\mathrm{d} t &\geq\lim_{R\rightarrow\infty}\liminf_{n\rightarrow \infty}\int_0^\infty\int_{\mathbb{R}^3\times B_R} f_t^n(x,v)\varphi_t(x)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\\ &=\int_0^\infty\int_{\mathbb{R}^6} f_t(x,v)\varphi_t(x)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t=\int_0^\infty\int_{\mathbb{R}^3}\varphi_t(x)\,\mathrm{d} \rho_t(x)\,\mathrm{d} t. \end{split}\] Moreover, by recalling that $|\hat{v}|<1$, we have \[\begin{split} \int_0^\infty\int_{\mathbb{R}^3}\varphi_t(x)\,\mathrm{d} \rho_t^{\operatorname{eff}}(x)\,\mathrm{d} t &=\lim_{n\rightarrow \infty}\int_0^\infty\int_{\mathbb{R}^6} f_t^n(x,v)\varphi_t(x)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\\ &>\lim_{n\rightarrow \infty}\int_0^\infty\int_{\mathbb{R}^6} |\hat{v}|\,f^n_t(x,v)\varphi_t(x)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\\ &\geq\int_0^\infty\int_{\mathbb{R}^3}\varphi_t(x)\,\mathrm{d} |J^{\operatorname{eff}}_t|(x)\,\mathrm{d} t. \end{split}\] Thus, \begin{equation}\label{boundednessrhoandj} \rho_t\leq \rho^{\operatorname{eff}}_t,\quad |J^{\operatorname{eff}}_t|< \rho^{\operatorname{eff}}_t\quad\text{ as measures for a.e. } t\in(0,\infty). \end{equation} Finally, by the same argument to show \eqref{continuityeq}, we notice that \[ \int_{\mathbb{R}^3}\phi_0\,\mathrm{d} \rho^n_0+\int_0^\infty\int_{\mathbb{R}^3}(\partial_t\phi_t\,\mathrm{d} \rho^n_t+\nabla\phi_t\cdot\,\mathrm{d} J^n_t)\,\mathrm{d} t=0 \quad \forall \phi\in C^1_c([0,\infty)\times\mathbb{R}^3). \] Hence, by \eqref{convergencel1f0} and \eqref{convergencerhoj}, we conclude by taking the limit $n\longrightarrow \infty$ that \[ \int_{\mathbb{R}^3}\phi_0\,\mathrm{d} \rho_0+\int_0^\infty\int_{\mathbb{R}^3}(\partial_t\phi_t\,\mathrm{d} \rho^{\operatorname{eff}}_t+\nabla\phi_t\cdot\mathrm{d} J^{\operatorname{eff}}_t)\,\mathrm{d} t=0 \quad \forall \, \phi\in C^1_c([0,\infty)\times\mathbb{R}^3), \] i.e., \begin{equation}\label{continuityverified} \partial_t\rho^{\operatorname{eff}}_t+\nabla\cdot J^{\operatorname{eff}}_t=0 \quad \text{as measures with initial condition } \rho_0. \end{equation} \textbf{Step 4: Limit of vector fields.} Using the definition \eqref{defeff}, we claim that \begin{equation}\label{convergenceb} \boldsymbol{b}^n\relbar\joinrel\rightharpoonup \boldsymbol{b}^{\operatorname{eff}} \quad \text{weakly in } L^1_{\operatorname{loc}}((0,\infty)\times\mathbb{R}^6;\mathbb{R}^6) \end{equation} and that, for every ball $B_R\subset \mathbb{R}^3$, \begin{equation}\label{unifomconvergenceb} [E^n+\hat{v}\times B^n](x+h)\longrightarrow [E^n+\hat{v}\times B^n](x)\text{ as } |h|\rightarrow 0 \text{ in } L^1_{\operatorname{loc}}((0,\infty);L^1(B_R)), \text{ uniformly in } n. \end{equation} For this purpose, we first prove that the sequence $(\boldsymbol{b}^n)_{n\in\mathbb{N}}$ is bounded in $L^p_{\operatorname{loc}}((0,\infty)\times\mathbb{R}^6;\mathbb{R}^6)$ for every $p\in[1,3/2)$. Indeed, by using Young's inequality, for every $t\geq 0$, $n\in\mathbb{N}$, and $r>0$, \[ \|B^n_t\|_{L^p(B_r;\mathbb{R}^3)}+\|E^n_t\|_{L^p(B_r;\mathbb{R}^3)}\leq \|(|J_t^n|\ast \eta^n)\ast |K|\|_{L^p(B_r;\mathbb{R}^3)}+\|(\rho_t^n\ast \eta^n)\ast K\|_{L^p(B_r;\mathbb{R}^3)} \] The first term can be bounded by \[\begin{split} &\|(|J_t^n|\ast \eta^n)\ast (|K| \boldsymbol{1}_{B_1})\|_{L^p(B_r;\mathbb{R}^3)}+\|(|J_t^n|\ast \eta^n)\ast (|K| \boldsymbol{1}_{\mathbb{R}^3\setminus B_1})\|_{L^p(B_r;\mathbb{R}^3)}\\ &\leq \||J_t^n|\|_{L^1(\mathbb{R}^3)}\|\eta^n\|_{L^1(\mathbb{R}^3)}\|K\|_{L^p(B_1;\mathbb{R}^3)}+\mathcal{L}^3(B_r)^{1/p}\||J_t^n\|_{L^1(\mathbb{R}^3)}\|\eta^n\|_{L^1(\mathbb{R}^3)}\|K\|_{L^\infty(\mathbb{R}^3\setminus B_1;\mathbb{R}^3)}. \end{split}\] Likewise, the second term can be bounded by \[ \|\rho_t^n\|_{L^1(\mathbb{R}^3)}\|\eta^n\|_{L^1(\mathbb{R}^3)}\|K\|_{L^p(B_1;\mathbb{R}^3)}+\mathcal{L}^3(B_r)^{1/p}\|\rho_t^n\|_{L^1(\mathbb{R}^3)}\|\eta^n\|_{L^1(\mathbb{R}^3)}\|K\|_{L^\infty(\mathbb{R}^3\setminus B_1;\mathbb{R}^3)}. \] Thus, up to subsequences, the sequence $(\boldsymbol{b}_n)_{n\in\mathbb{N}}$ converges weakly in $L^p_{\operatorname{loc}}$. We now claim that for every $\varphi\in C_c((0,\infty)\times\mathbb{R}^3)$, \[ \lim_{n\rightarrow \infty}\int_0^\infty\int_{\mathbb{R}^3}(E^n_t+\hat{v}\times B^n_t)\,\varphi_t\,\mathrm{d} x\,\mathrm{d} t=\int_0^\infty\int_{\mathbb{R}^3}(E^{\operatorname{eff}}_t+\hat{v}\times B^{\operatorname{eff}}_t)\,\varphi_t\,\mathrm{d} x\,\mathrm{d} t. \] Indeed, denoting $T_\varphi$ the upper time support of $\varphi$, we have \[\begin{split} &\left|\int_0^\infty\int_{\mathbb{R}^3}(E^n_t+\hat{v}\times B^n_t)\,\varphi_t\,\mathrm{d} x\,\mathrm{d} t-\int_0^\infty\int_{\mathbb{R}^3}(E^{\operatorname{eff}}_t+\hat{v}\times B^{\operatorname{eff}}_t)\,\varphi_t\,\mathrm{d} x\,\mathrm{d} t\right|\\ &\leq \left|\int_0^\infty\int_{\mathbb{R}^3}(\rho^n_t-\rho^{\operatorname{eff}}_t)\varphi_t\ast K\,\mathrm{d} x\,\mathrm{d} t\right|+\left|\int_0^\infty\int_{\mathbb{R}^3}\rho^n_t(\varphi_t\ast K-\varphi_t\ast K\ast \eta^n)\,\mathrm{d} x\,\mathrm{d} t\right|\\ &+\left|\int_0^\infty\int_{\mathbb{R}^3}(J^n_t-J^{\operatorname{eff}}_t)\times\varphi_t\ast K\,\mathrm{d} x\,\mathrm{d} t\right|+\left|\int_0^\infty\int_{\mathbb{R}^3}J^n_t\times(\varphi_t\ast K-\varphi_t\ast K\ast \eta^n)\,\mathrm{d} x\,\mathrm{d} t\right|\\ &\leq \left|\int_0^\infty\int_{\mathbb{R}^3}(\rho^n_t-\rho^{\operatorname{eff}}_t)\varphi_t\ast K\,\mathrm{d} x\,\mathrm{d} t\right|+\left|\int_0^\infty\int_{\mathbb{R}^3}(J^n_t-J^{\operatorname{eff}}_t)\times\varphi_t\ast K\,\mathrm{d} x\,\mathrm{d} t\right|\\ &+T_\varphi(\|\rho^n\|_{L^\infty((0,\infty);L^1(\mathbb{R}^3))}+\|J^n\|_{L^\infty((0,\infty);L^1(\mathbb{R}^3;\mathbb{R}^3))})\|\varphi\ast K-\varphi\ast K\ast \eta^n\|_{L^\infty((0,\infty)\times\mathbb{R}^3;\mathbb{R}^3)}. \end{split}\] By the weak convergence \eqref{convergencerhoj} and the fact that $\varphi\ast K$ is a bounded continuous function, the first and second terms vanish as $n\longrightarrow \infty$. Moreover, the last term also vanishes, since the first factor is bounded by $C\|f_0\|_{L^1(\mathbb{R}^6)}$, where $C>0$ is a universal constant and $\varphi\ast K\ast \eta^n$ convergences uniformly to $\varphi\ast K$ in $(0,\infty)\times \mathbb{R}^3$. Thus, we have proven \eqref{convergenceb}. We now prove \eqref{unifomconvergenceb}. For this purpose, we combine the fact that $K\in W^{\alpha,p}(\mathbb{R}^3;\mathbb{R}^3)$ for every $\alpha<1$ and $p<3/(2+\alpha)$, and Young's inequality to obtain \[ \|E_t^n+\hat{v}\times B^n_t\|_{W^{\alpha,p}(B_R;\mathbb{R}^3)}\leq C(R)\|(\rho_t^n+|J^n_t|)\ast\eta^n\|_{L^1(\mathbb{R}^3;\mathbb{R}^3)}. \] Combining $\|\eta^n\|_{L^1(\mathbb{R}^3)}=1$ with \eqref{boundfn}, we can bound the right term independently of $n$ and $t$, which combined with the embedding of fractional Sobolev spaces and Nikolsky spaces \cite{nikowsky} gives \[ \|\boldsymbol{b}^n_t(\cdot+h)-\boldsymbol{b}^n_t(\cdot)\|_{L^p(\mathbb{R}^3;\mathbb{R}^3)}\leq C\left(p,\alpha,R,\|\boldsymbol{b}^n_t\|_{W^{\alpha,p}(B_{2R};\mathbb{R}^3)}\right)|h|^\alpha \quad \forall |h|\leq R, \] and \eqref{unifomconvergenceb} follows. \textbf{Step 5: Conclusion.} By \eqref{convergenceb} and \eqref{unifomconvergenceb}, we can apply the stability result from \cite{dipernalions} to deduce that $f^k$ is a weakly continuous distributional solution of the continuity equation with vector field $\boldsymbol{b}^{\operatorname{eff}}$ and starting from $f_0^k$ for every $k\in\mathbb{N}$. We now exploit the linearity of the continuity equation to show that $F^m\coloneqq \sum_{k=1}^mf^k$ is also a bounded distributional solution for every $m\in \mathbb{N}$. Using the same arguments as in the proof of \Cref{existencesolution}, we obtain that $F^m$ is a renormalized solution for every $m\in\mathbb{N}$. Since $F^m\longrightarrow f$ strongly in $L^1_{\operatorname{loc}}((0,\infty)\times\mathbb{R}^6)$ as $m\longrightarrow\infty$, we obtain that $f$ is a renormalized solution of the continuity equation with vector field $\boldsymbol{b}^{\operatorname{eff}}$ and starting from $f_0$, which combined with \eqref{regularityf} \eqref{boundednessrhoeff}, \eqref{boundednessrhoandj}, and \eqref{continuityverified} proves that the trio $(f_t,\rho^{\operatorname{eff}}_t,J^{\operatorname{eff}}_t)$ is a generalized solution starting from $f_0$ according to \Cref{generalized}. To show that $f$ is transported by the maximum regular flow associated to $\boldsymbol{b}^{\operatorname{eff}}$, we simply use that each $f^k$ is transported (once again with the same argument as in \Cref{existencesolution}) combined with the definition of $f$ and \eqref{regularityf}. Finally, by \cite[Theorem 4.10]{vlasovpoisson}, we conclude that the map \[ [0,\infty)\ni t \longmapsto f_t\in L^1_{\operatorname{loc}}(\mathbb{R}^6)\quad \text{is continuous}.\qedhere\] \endproof \section{Finite energy solutions}\label{sec:finite-energy} Up to now, we have established the existence of a generalized solution (see \Cref{existencegeneral}) and that renormalized and generalized solutions coincide in case the mass/charge is conserved in time. In this section, we investigate whether the existence of renormalized solutions can be shown under the more natural condition that the initial total energy is bounded, that is, \begin{equation}\label{itotalenergy} \mathcal{E}_0\coloneqq\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast J_0) \cdot J_0 \,\mathrm{d} x<\infty, \end{equation} where the first term is the relativistc (initial) total energy and the second and third are the electric and magnetic potential (initial) energies, respectively. For this purpose, we recall that by integrating the first equation of \eqref{prin} with respect to $(x,v)$ on the whole domain $\mathbb{R}^6$ gives that the relativistic energy (formally) satisfies \[ \frac{\mathrm{d}}{\mathrm{d} t}\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v=\int_{\mathbb{R}^6}\hat{v}\cdot(E_t+\hat{v}\times B_t) f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v=\int_{\mathbb{R}^3}E_t\cdot J_t \,\mathrm{d} x. \] Now, Poynting's Theorem gives that the relativistic Vlasov-Maxwell equation has its electromagnetic total energy (formally) conserved, i.e., \[ \begin{split} \int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{1}{2}\int_{\mathbb{R}^3}|E_t|^2+|B_t|^2\,\mathrm{d} x=&\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v\\ &+\frac{1}{2}\int_{\mathbb{R}^3}|E_0|^2+|B_0|^2\,\mathrm{d} x, \end{split}\] while for the system \eqref{prin} we obtain a similar expression (see \eqref{conservation2} below): \begin{equation}\label{conservation1} \begin{split} \int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x=&\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v\\ &+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x. \end{split} \end{equation} Notice that the magnetic potential energy does not appear in the conservation above. On the other hand, one can (formally) integrate by parts the electric and magnetic energy to obtain the relations \begin{equation}\label{conservation2} \begin{split} \int_{\mathbb{R}^3}|E_t|^2\,\mathrm{d} x&=\int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x;\\ \int_{\mathbb{R}^3}|B_t|^2\,\mathrm{d} x&=\int_{\mathbb{R}^3}(H\ast J_t) \cdot J_t \,\mathrm{d} x-\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast J_t)\right)^2\,\mathrm{d} x, \end{split} \end{equation} where $H(x)\coloneqq (4\pi|x|)^{-1}$. We can interpret $H\ast \rho_t$ and $H\ast J_t$ as the electric potential and magnetic vector potential, respectively (see \cite{jackson}). Notice that, on one hand, the electric potential energy is fully converted into the electric energy. On the other hand, the magnetic potential energy is converted into the magnetic energy and the displacement current $\partial_t E_t$, since \begin{equation}\label{partialE} -\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast J_t)\right)^2\,\mathrm{d} x=\int_{\mathbb{R}^3} \nabla\cdot (H\ast J_t)\,\partial_t(H\ast \rho_t)\,\mathrm{d} x=\int_{\mathbb{R}^3} (H\ast J_t)\cdot\partial_tE_t\,\mathrm{d} x. \end{equation} Moreover, we obtain (formally) that the magnetic potential energy is nonnegative for a.e. $t\in[0,\infty)$. Hence, by \eqref{conservation1} and \eqref{conservation2}, we do not expect the initial energy $\mathcal{E}_0$ to bound the total energy of the system given by \[ \mathcal{E}_t\coloneqq\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast J_t) \cdot J_t \,\mathrm{d} x. \] Nonetheless, we shall exploit a semicontinuity argument to show an inequality analogous to \eqref{conservation1} (see the proof of \Cref{finalthm}): \begin{equation}\label{protobound} \begin{split} \int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x \leq & \int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v\\ &+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x. \end{split} \end{equation} \begin{remark}\textnormal{Although the formal argument that leads to \eqref{conservation2} suggests the magnetic potential energy is nonnegative, we rigorously justify it in the proof of \Cref{magneticpositive}. Hence, \eqref{itotalenergy} implies that the right-hand side of \eqref{protobound} is bounded.} \end{remark} \begin{remark}\label{remarkpartialE} \textnormal{By \eqref{conservation1} and \eqref{partialE}, we (formally) have} \begin{equation}\label{lowerorder} \int_{\mathbb{R}^3}|B_t|^2\,\mathrm{d} x=\int_{\mathbb{R}^3} A_t \cdot (J_t+\partial_t E_t) \,\mathrm{d} x, \end{equation} \textnormal{where $A_t\coloneqq H\ast J_t$ is the magnetic vector potential. Since we can interpret $\partial_t E_t$ as a density current, one might define the magnetic vector potential as $H\ast (J_t +\partial_t E)$, and therefore \eqref{partialE} does not provide a relation between magnetic energy and magnetic potential energy. We claim that \eqref{lowerorder} still holds if $A_t=H\ast (J_t +\partial_t E)$; thus, we may interpret $\partial_t E_t$ as a lower order term. Indeed, define a magnetic field with density current $\tilde{J}_t\coloneqq J_t+\partial_t E_t$, that is, $\tilde{B}=\nabla\times(H\ast \tilde{J}_t)$, and a calculation analogous to \eqref{conservation1} gives that \begin{equation}\label{lowerorder1} \int_{\mathbb{R}^3}|\tilde{B}_t|^2\,\mathrm{d} x=\int_{\mathbb{R}^3}(H\ast \tilde{J}_t) \cdot \tilde{J}_t \,\mathrm{d} x-\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast \tilde{J})\right)^2\,\mathrm{d} x. \end{equation} Notice that $\nabla\cdot (H\ast \tilde{J})=H\ast(\nabla\cdot J+\partial_t \rho_t)=0$, hence the last term vanishes. Moreover, since $E_t$ is irrotational, $B_t=\tilde{B}_t$; thus, combining \eqref{lowerorder} and \eqref{lowerorder1}, we conclude that \[ \int_{\mathbb{R}^3}(H\ast \partial_tE_t) \cdot (J_t+\partial_t E_t) \,\mathrm{d} x=0. \] Therefore, had we defined the magnetic vector potential as $H\ast \tilde{J}$, \eqref{lowerorder} would be unaltered.} \end{remark} Notice that if $\sigma_E=1$, a bound as \eqref{protobound} gives that each energy term of $\mathcal{E}_t$ is bounded, since $|J|<\rho$ a.e. in space-time. However, it does not provide, in general, control of relativistic energy, electric and magnetic potential energies if $\sigma_E=-1$ or $\sigma_E=0$. If we also assume a higher integrability of $f_0$ and a suitable smallness condition on its norm, the next lemma can be used to bound each energy. \lemma\label{interpolation} Let $f\in L^1(\mathbb{R}^6)\cap L^q(\mathbb{R}^6)$ be a nonnegative function for some $q\geq 1$ and $\sqrt{1+|v|^2}f\in L^1(\mathbb{R}^6)$. Set $p\coloneqq \frac{4q-3}{3q-2}$. Then $\rho= \int_{\mathbb{R}^3}f(\cdot,v)\,\mathrm{d} v\in L^p(\mathbb{R}^3)$ and there exists a constant $C>0$, depending only on $q$ such that \[ \|\rho\|_{L^p(\mathbb{R}^3)}\leq C\|\sqrt{1+|v|^2}f\|^{\theta}_{L^1(\mathbb{R}^6)}\|f\|^{1-\theta}_{L^q(\mathbb{R}^6)}, \] where $\theta\coloneqq \frac{3(q-1)}{4q-3}$. \proof We begin choosing $R>0$ splitting the integral of $\rho$ on the sets $\{|v|<R\}$ and $\{|v|\geq R\}$. Hence, for each $x\in\mathbb{R}^3$, \[ \rho(x)\leq R^{3(q-1)/q}\|f(x,\cdot)\|_{L^q(\mathbb{R}^3)}+R^{-1}\|\sqrt{1+|v|^2}f(x,\cdot)\|_{L^1(\mathbb{R}^3)}. \] By minimizing the right-hand side with respect to $R$, we have \[ \rho(x)\leq C\|\sqrt{1+|v|^2}f(x,\cdot)\|^{3(q-1)/(4q-3)}_{L^1(\mathbb{R}^3)}\|f(x,\cdot)\|^{q/(4q-3)}_{L^q(\mathbb{R}^3)}. \] Taking the $L^p$-norm on $\rho$ and using H\"{o}lder's inequality, the result follows. \endproof As anticipated, if $f_0$ satisfies \begin{equation}\label{f0bound} f_0 \in \begin{cases} L^{1}(\mathbb{R}^6) & \text{if } \sigma_E=1;\\ L^{1}(\mathbb{R}^6)\cap L^{3/2}(\mathbb{R}^6) & \text{if } \sigma_E=0;\\ L^{1}(\mathbb{R}^6)\cap L^{3/2}(\mathbb{R}^6) \text{ and } \|f_0\|_{L^{3/2}(\mathbb{R}^6)}\leq \epsilon & \text{if } \sigma_E=-1 \end{cases} \end{equation} for some suitable $\epsilon>0$, the previous lemma allows us to bound each relativistic energy, electric and magnetic potential energies. Indeed, by Calder\'{o}n-Zygmund estimates and the Sobolev embedding, we have that \begin{equation}\label{calderonsobolev} \|H\ast \rho_t\|_{L^6(\mathbb{R}^3)}\leq C\|D^2(H\ast \rho_t)\|_{L^{6/5}(\mathbb{R}^3)}\leq C\|\rho_t\|_{L^{6/5}(\mathbb{R}^3)} \end{equation} for some universal constant $C>0$. Combining \eqref{calderonsobolev} with H\"{o}lder's inequality and \Cref{interpolation} with $p=6/5$ and $q=3/2$ gives \begin{equation}\label{potentialenergy} \begin{split} \int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x\leq \|H\ast \rho_t\|_{L^6(\mathbb{R}^3)}\|\rho_t\|_{L^{6/5}(\mathbb{R}^3)}&\leq C\|\rho_t\|^2_{L^{6/5}(\mathbb{R}^3)}\\ &\leq C\|\sqrt{1+|v|^2}f_t\|_{L^1(\mathbb{R}^6)}\|f_t\|_{L^{3/2}(\mathbb{R}^6)}. \end{split} \end{equation} Notice that $\|f\|_{L^{\infty}([0,\infty);L^{3/2}(\mathbb{R}^6))}\leq \|f_0\|_{L^{3/2}(\mathbb{R}^6)}$ when the solution is built by approximation (see \eqref{regularityf}). Hence, if \eqref{protobound} holds, we already have a bound of the relativistic energy in the pure magnetic case $\sigma_E=0$, and by the previous bound, we obtain the following boundedness of the magnetic and electric potential energies (recall that $|J|<\rho$ a.e. in space-time): \[ \int_{\mathbb{R}^3}(H\ast J_t)\cdot J_t \,\mathrm{d} x\leq \int_{\mathbb{R}^3}(H\ast \rho_t) \rho_t \,\mathrm{d} x\leq C\|f_0\|_{L^{3/2}(\mathbb{R}^6)}\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v. \] Now, in the repulsive case $\sigma_E=-1$, we obtain by \eqref{protobound} and \eqref{potentialenergy} that \[\begin{split} \left(1-C\|f\|_{L^{\infty}([0,\infty);L^{3/2}(\mathbb{R}^6))}\right)\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t(x,v)\,\mathrm{d} x\,\mathrm{d} v&\leq \int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v\\ &-\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x. \end{split}\] Assuming that $f$ is built by approximation as before and that $\|f_0\|_{L^{3/2}(\mathbb{R}^6)}< 1/C\eqqcolon\epsilon$, we have a bound of the relativistic energy; therefore, by \eqref{potentialenergy}, the electric and magnetic potential energies are bounded as well. This motivates the following: \begin{definition}\label{boundedenergy} We say that $f_0$ has every energy bounded if \eqref{itotalenergy} and \eqref{f0bound} hold. Moreover, if $f_t$ also satisfies \eqref{protobound} for almost every $t\in[0,\infty)$, then we say that $f_t$ has every energy bounded. \end{definition} \begin{remark} \textnormal{Notice that we need stronger assumptions on the initial data compared to the nonrelativistic Vlasov-Poisson case for $\sigma_E=-1$, where it is only needed that $f_0\in L^{9/7}(\mathbb{R}^3)$, with no smallness assumption (see \cite{thesis}). This is due to the fact that classical kinetic energy grows as $|v|^2$, whereas the relativistic energy as $|v|$.} \end{remark} We now prove that if $f_0$ has every energy bounded, then we have a smooth sequence $(f_0^n)_{n\in\mathbb{N}}$ and a mollified sequence of kernels $(H\ast \eta^{k_n})_{n\in\mathbb{N}}$ with uniform bounded energy. We denote by $L_c^\infty$ the space of bounded measurable functions with compact support. \lemma\label{approxinitial} Let $\eta^k(x)\coloneqq k^3\eta(kx)$, where $\eta$ is a standard convolution kernel in $\mathbb{R}^3$. Let $f_0$ be a nonnegative function with every energy bounded. Then there exists a sequence $(f_0^n)_{n\in\mathbb{N}}\subset C_c^\infty(\mathbb{R}^6)$ and a sequence $(k_n)_{n\in\mathbb{N}}$ such that $k_n\longrightarrow \infty$ and, by setting $\rho_0^n=\int_{\mathbb{R}^3}f^n_0(\cdot,v)\,\mathrm{d} v$ and $J_0^n=\int_{\mathbb{R}^3}\hat{v} f^n_0(\cdot,v)\,\mathrm{d} v$, \[\begin{split} \lim_{n\rightarrow \infty}&\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f^n_0(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast\eta^{k_n}\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast\eta^{k_n}\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x\right)\\ &=\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast J_0) \cdot J_0 \,\mathrm{d} x. \end{split}\] \proof We split the proof in three steps: in Step 1, we assume that $f_0\in L_c^\infty(\mathbb{R}^6)$ and approximate it by a sequence of smooth functions with compact support; in Step 2, we obtain the desired limit without the mollification of $H$; in Step 3, we introduce the mollification of the kernel $\eta^k\ast H$, and conclude that the limit holds if we extract a subsequence of $k$ which depends on $n$. \textbf{Step 1: $\boldsymbol{f_0\in L_c^\infty(\mathbb{R}^6)}$.} Consider smooth functions $f_0^n$ which converge pointwise such that $\|f_0^n\|_{L^\infty(\mathbb{R}^6)}\leq \|f_0\|_{L^\infty(\mathbb{R}^6)}$ and $\operatorname{supp} f_0^n\subset B_R$ for all $n$ for some $R>0$. Thus, $\|J_0^n\|_{L^\infty(\mathbb{R}^3,\mathbb{R}^3)}<\|\rho_0^n\|_{L^\infty(\mathbb{R}^3)}\leq \|\rho_0\|_{L^\infty(\mathbb{R}^3)}$, and $\operatorname{supp} |J_0^n|\subset\operatorname{supp}\rho_0^n \subseteq B_R$. Moreover, $|H\ast J_0^n|< H\ast \rho_0^n<\infty$ and $H\ast \rho_0^n\longrightarrow H\ast \rho_0$ and $H\ast J_0^n\longrightarrow H\ast J_0$ in $L^p_{\operatorname{loc}}$ for every $p$, and we conclude by dominated convergence that \begin{equation}\label{approxinitialenergy} \begin{split} \lim_{n\rightarrow \infty}&\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f^n_0(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x\right)\\ &=\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_0(x,v)\,\mathrm{d} x\,\mathrm{d} v+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}(H\ast \rho_0) \rho_0 \,\mathrm{d} x+\frac{\sigma_B}{2}\int_{\mathbb{R}^3}(H\ast J_0) \cdot J_0 \,\mathrm{d} x. \end{split} \end{equation} \textbf{Step 2: $\boldsymbol{f_0\in L^1(\mathbb{R}^6)}$ without mollification of $\boldsymbol{H}$.} By Step 1, it is enough to approximate $f_0$ by $(f_0^n)_{n\in\mathbb{N}}\subset L_c^\infty(\mathbb{R}^6)$ with converging energies to obtain \eqref{approxinitialenergy}. For this purpose, define \[ f_0^n(x,v)\coloneqq\min\{n,\boldsymbol{1}_{B_n}(x,v)f_0(x,v)\}, \quad (x,v)\in\mathbb{R}^6. \] Since $H\geq 0$, the first two integrands on the left-hand side of \eqref{approxinitialenergy} converges monotonically, and we conclude by monotone convergence. Since $|(H\ast J_0^n)\cdot J_0^n|< (H\ast\rho_0) \rho_0$ a.e., and $(H\ast\rho_0)\rho_0$ is integrable (since $f_0$ has every energy bounded), we conclude that the last integral on the left-hand side converges by the dominated convergence. \textbf{Step 3: Approximation of the kernel.} Given $(f_0^n)_{n\in\mathbb{N}}\in C^\infty_c(\mathbb{R}^6)$ provided by the previous two steps, we have \[\begin{split} \lim_{k\rightarrow \infty}&\left(\int_{\mathbb{R}^3}(H\ast\eta^k\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x+\int_{\mathbb{R}^3}(H\ast\eta^k\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x\right)\\ &=\int_{\mathbb{R}^3}(H\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x+\int_{\mathbb{R}^3}(H\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x \end{split}\] for every fixed $n$. Hence, there exists $k_n$ sufficiently large such that \[\begin{split} \left|\int_{\mathbb{R}^3}(H\ast\eta^{k_n}\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x+ \int_{\mathbb{R}^3}(H\ast\eta^{k_n}\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x-\int_{\mathbb{R}^3}(H\ast \rho^n_0) \rho^n_0 \,\mathrm{d} x-\int_{\mathbb{R}^3}(H\ast J^n_0) \cdot J^n_0 \,\mathrm{d} x\right|\\ \leq \frac{1}{n}, \end{split}\] and the lemma follows. \endproof In what follows, we need the following result from \cite[Lemma 3.3]{vlasovpoisson} that we state for convenience of the reader. \begin{lemma}\label{notproven} Let $T>0$ and $\phi\in C_c((0,T))$ be a nonnegative function. Then, for every sequence $(\rho^n)_{n\in\mathbb{N}}\subset C([0,T];\mathcal{M}_+(\mathbb{R}^3))$ such that \[ \sup_{n\in\mathbb{N}}\sup_{t\in[0,T]}\rho^n_t(\mathbb{R}^3)<\infty \] and \begin{equation}\label{rhonconvergence} \lim_{n\rightarrow \infty}\sup_{t\in[0,T]}\left|\int_{\mathbb{R}^3}\varphi\,\mathrm{d} (\rho^n_t-\rho_t)\right|=0 \quad \text{for every } \varphi\in C^\infty_c(\mathbb{R}^3). \end{equation} we have \begin{equation}\label{liminf} \int_0^T\phi(t)\int_{\mathbb{R}^3}H\ast \rho_t(x)\,\mathrm{d}\rho_t(x)\,\mathrm{d} t\leq \liminf_{n\rightarrow \infty}\int_0^T\phi(t)\int_{\mathbb{R}^3}H\ast \eta^{n}\ast \rho^n_t(x)\,\mathrm{d}\rho^n_t(x)\,\mathrm{d} t, \end{equation} \end{lemma} Although the previous lemma is enough for $\sigma_E\in\{0,1\}$, we need a slight higher integrability assumption in the gravitational case $\sigma_E=-1$. This is due to the fact that we obtain \eqref{protobound} by a lower semicontinuity argument, and \eqref{liminf} is not sufficient if the electric potential energy is nonpositive. Nonetheless, if $\rho \in L^{6/5}$, we obtain \eqref{liminf} with a limit and an equality, and we prove it in the next lemma. \lemma\label{proven} Let $\rho^n,\,\rho\in L^\infty([0,T];L^1(\mathbb{R}^3)\cap L^{6/5}(\mathbb{R}^3))$ in the same setting as \Cref{notproven}. Moreover, assume that \begin{equation}\label{vanishing} \sup_{n\in\mathbb{N}}\sup_{t\in[0,T]}\|\rho^n_t\|_{L^{6/5}(\mathbb{R}^3)}<\infty. \end{equation} Then \begin{equation}\label{eqproven} \lim_{n\rightarrow \infty}\int_0^T\phi(t)\int_{\mathbb{R}^3}H\ast \eta^{n}\ast \rho^n_t(x)\,\mathrm{d}\rho^n_t(x)\,\mathrm{d} t=\int_0^T\phi(t)\int_{\mathbb{R}^3}H\ast \rho_t(x)\,\mathrm{d}\rho_t(x)\,\mathrm{d} t. \end{equation} \proof Notice that \[\begin{split} \int_{\mathbb{R}^3}H\ast \eta^n \ast\rho^n_t(x)\rho^n_t(x)-H\ast \rho_t(x)\rho_t(x)\,\mathrm{d} x&=\int_{\mathbb{R}^3}H\ast \eta^n \ast(\rho^n_t(x)-\rho_t(x))\rho^n_t(x)\,\mathrm{d} x\\ &+\int_{\mathbb{R}^3}H\ast(\eta^n\ast\rho_t(x)-\rho_t(x))\rho^n_t(x)\,\mathrm{d} x\\ &+\int_{\mathbb{R}^3}H\ast\rho_t(x)(\rho^n_t(x)-\rho_t(x))\,\mathrm{d} x\eqqcolon I_1+I_2+I_3. \end{split}\] Now, by \eqref{calderonsobolev} and H\"{o}lder inequality, we obtain that \[ |I_2|\leq C\|\eta^n \ast\rho_t-\rho_t\|_{L^{6/5}(\mathbb{R}^3)}\sup_{n\in\mathbb{N}}\|\rho^n_t\|_{L^{6/5}(\mathbb{R}^3)}. \] Letting $n\longrightarrow \infty$, we obtain that $I_2$ vanishes. We now define $\zeta_k\in C_c^\infty(\mathbb{R}^3)$ as a cutoff function in the annular set $B_{k}\setminus B_{1/k}$, namely, \[ \begin{cases} \zeta_k=1 & \text{ in }\quad B_{k}\setminus B_{1/k};\\ \zeta_k=0 & \text{ in }\quad B^c_{k+1}\cup B_{1/(k+1)};\\ 0\leq\zeta_k\leq 1& \text{ in }\quad \mathbb{R}^3. \end{cases} \] We write $I_3$ as \[ |I_3|\leq \left|\int_{\mathbb{R}^3}H\ast\rho_t(x)(\rho^n_t(x)-\rho_t(x))\zeta_k(x)\,\mathrm{d} x\right|+\left|\int_{\mathbb{R}^3}H\ast\rho_t(x)(\rho^n_t(x)-\rho_t(x))(1-\zeta_k(x))\,\mathrm{d} x\right| \] We want to take first the limit $n\longrightarrow \infty$ and after $k\longrightarrow \infty$ to be able to use \eqref{rhonconvergence}. Now, by \eqref{calderonsobolev}, we obtain \[\begin{split} \left|\int_{\mathbb{R}^3}H\ast\rho_t(x)(\rho^n_t(x)-\rho_t(x))(1-\zeta_k(x))\,\mathrm{d} x\right|&\leq \left|\int_{B_{1/k}\cup B_k^c}H\ast\rho_t(x)(\rho^n_t(x)-\rho_t(x))\,\mathrm{d} x\right|\\ &\leq C\|\rho_t\|_{L^{6/5}(\mathbb{R}^3)}\sup_{n\in\mathbb{N}}\|\rho^n_t-\rho_t\|_{L^{6/5}(B_{1/k}\cup B_k^c)}. \end{split}\] Defining measures $\mathrm{d} \mu^n_t\coloneqq (\rho^n_t-\rho_t)^{6/5}\mathrm{d} x$ and $\mu\coloneqq \sup_{n\in\mathbb{N}}\mu^n$, by \eqref{vanishing} and the continuity from below for measures gives that \[ \lim_{k\rightarrow \infty}\sup_{n\in\mathbb{N}}\int_{B_{1/k}\cup B_k^c}(\rho^n_t-\rho_t)^{6/5}\,\mathrm{d} x=\lim_{k\rightarrow \infty}\mu_t(B_{1/k}\cup B_k^c)=\mu_t\left(\cap^\infty_{k=1} B_{1/k}\right)+\mu_t\left(\cap^\infty_{k=1} B^c_k\right)=0, \] and we conclude that second term vanishes as $k\longrightarrow \infty$. Now, we bound the first term by \[ \|H\ast \rho_t\|_{L^\infty(B_{k+1}\setminus B_{1/(k+1)})}\left|\int_{\mathbb{R}^3}\zeta_k(\rho^n_t(x)-\rho_t(x))\,\mathrm{d} x\right| \] By Young's inequality, we have \[ \|H\ast \rho_t\|_{L^\infty(B_{k+1}\setminus B_{1/(k+1)})}\leq \|H\|_{L^\infty(B_{k+1}\setminus B_{1/(k+1)})}\|\rho_t\|_{L^1(\mathbb{R}^3)}<\infty. \] Hence, by \eqref{rhonconvergence}, $I_3$ vanishes as $n\longrightarrow\infty$ and $k\longrightarrow\infty$. Analogously, we have \[\begin{split} |I_1|&=\left|\int_{\mathbb{R}^3}H\ast\eta^n \ast\rho^n_t(x)(\rho^n_t(x)-\rho_t(x))\,\mathrm{d} x\right|\\ &\leq\|H\ast \rho^n_t\|_{L^\infty(B_{k+1}\setminus B_{1/(k+1)})}\left|\int_{\mathbb{R}^3}\zeta_k(\rho^n_t(x)-\rho_t(x))\,\mathrm{d} x\right|\\ &+ C\sup_{n\in\mathbb{N}}\|\rho^n_t\|_{L^{6/5}(\mathbb{R}^3)}\sup_{n\in\mathbb{N}}\|\rho^n_t-\rho_t\|_{L^{6/5}(B_{1/k}\cup B_k^c)}, \end{split}\] and by the same argument as before, $I_1$ vanishes as $n\longrightarrow\infty$ and $k\longrightarrow\infty$, and the lemma follows. \endproof We now want to rigorously justify \eqref{conservation2} for $|J| < \rho \in L^{1}(\mathbb{R}^3)$. Actually, the same argument yields the result for $|J| < \rho \in \mathcal{M}_{+}(\mathbb{R}^{3})$. The following lemma gives \eqref{conservation2} with an inequality; in particular, the magnetic potential energy is nonnegative. \lemma\label{magneticpositive} For every $|J|<\rho\in L^1(\mathbb{R}^3)$ nonnegative, \begin{equation}\label{conservation2proof} \begin{split} \int_{\mathbb{R}^3}|\nabla(H\ast \rho)|^2\,\mathrm{d} x&\leq\int_{\mathbb{R}^3}(H\ast \rho) \rho \,\mathrm{d} x;\\ \int_{\mathbb{R}^3}|\nabla\times(H\ast J)|^2\,\mathrm{d} x&\leq\int_{\mathbb{R}^3}(H\ast J) \cdot J \,\mathrm{d} x-\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast J)\right)^2\,\mathrm{d} x. \end{split} \end{equation} In particular, we obtain that the magnetic potential energy is nonnegative. \proof We split the proof similarly to \Cref{approxinitial}: \textbf{Step 1: $\boldsymbol{J_i,\,\rho \in L^\infty_c(\mathbb{R}^3)}$.} Consider first $\rho,\, J$ smooth compactly supported functions, and perform an integration by parts to obtain \[\begin{split} \int_{B_R}|\nabla(H\ast \rho)|^2\,\mathrm{d} x&=\int_{B_R}(H\ast \rho) \rho \,\mathrm{d} x+\int_{\partial B_R}H\ast \rho\, \nabla(H\ast \rho)\cdot \nu_{B_R}\, \mathrm{d} \mathcal{H}^{2};\\ \int_{B_R}|\nabla\times(H\ast J)|^2\,\mathrm{d} x&=\int_{B_R}(H\ast J) \cdot J \,\mathrm{d} x-\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast J)\right)^2\,\mathrm{d} x\\ &-\int_{\partial B_R}[(H\ast J) \times (\nabla\times(H\ast J))]\cdot \nu_{B_R}\, \mathrm{d} \mathcal{H}^{2}\\ &+\int_{\partial B_R}\nabla\cdot(H\ast J)H\ast J\cdot \nu_{B_R}\, \mathrm{d} \mathcal{H}^{2}. \end{split}\] The same identity holds for $J_i,\,\rho \in L^\infty_c(\mathbb{R}^3)$ by approximation for each component $i\in\{1,\,2,\, 3\}$. Since $H\ast \mu$ and $\nabla (H\ast \mu)$ decay as $R^{-1}$ and $R^{-2}$ when evaluated at $\partial B_R$ for all $\mu\in L^\infty_c(\mathbb{R}^3)$, the boundary terms vanish as $R\longrightarrow \infty$, and we obtain that \eqref{conservation2proof} holds with an equality. \textbf{Step 2: $\boldsymbol{J_i,\,\rho \in L^1(\mathbb{R}^3)}$.} We consider the truncations \[\rho^n\coloneqq \min\{n,\boldsymbol{1}_{B_n}(x,v)\rho\}, \quad J_i^n\coloneqq \min\{n,\boldsymbol{1}_{B_n}(x,v)J_i\}.\] Since $H\geq 0$, by monotone convergence and Step 1 we obtain that \[ \int_{\mathbb{R}^3}(H\ast \rho) \rho \,\mathrm{d} x=\lim_{n\rightarrow\infty}\int_{\mathbb{R}^3}(H\ast \rho^n) \rho^n \,\mathrm{d} x= \lim_{n\rightarrow\infty} \int_{\mathbb{R}^3}|\nabla(H\ast \rho^n)|^2\,\mathrm{d} x. \] Moreover, since $|J|<\rho$, by dominated convergence and Step 1 we obtain that \[\begin{split} \int_{\mathbb{R}^3}(H\ast J) \cdot J \,\mathrm{d} x&=\lim_{n\rightarrow\infty}\int_{\mathbb{R}^3}(H\ast J^n) \cdot J^n \,\mathrm{d} x\\ &=\lim_{n\rightarrow\infty}\int_{\mathbb{R}^3}|\nabla\times(H\ast J^n)|^2\,\mathrm{d} x+\lim_{n\rightarrow\infty}\int_{\mathbb{R}^3} \left(\nabla\cdot (H\ast J^n)\right)^2\,\mathrm{d} x. \end{split}\] Assuming without loss of generality that $(H\ast \rho) \rho\in L^1(\mathbb{R}^3)$, we get bounded sequences $(\nabla(H\ast \rho^n))_{n\in\mathbb{N}}$, $(\nabla\cdot(H\ast J^n))_{n\in\mathbb{N}}$, and $(\nabla\times(H\ast J^n))_{n\in\mathbb{N}}$ in $L^2$. Since each sequence converges in the sense of distributions to $\nabla(H\ast \rho)$, $\nabla\cdot(H\ast J)$, and $\nabla\times(H\ast J)$, respectively, and the lower semicontinuity of the $L^2$-norm with respect to the weak convergence, we conclude \eqref{conservation2proof}. \endproof Finally, we prove our third main result. \proof[Proof of \Cref{finalthm}] The proof of existence of renormalized solutions begins similarly to the proof of \Cref{existencegeneral}: let $(f^n_0)_{n\in\mathbb{N}}\subset C_c^\infty(\mathbb{R}^6)$ and $(k_n)_{n\in\mathbb{N}}$ given by \Cref{approxinitial}. By Steps 1-3 in the proof of \Cref{existencegeneral} we get a sequence of smooth functions $f^n$ satisfying \eqref{prin} with initial condition $f^n_0$ and kernel $K^n$ (see \Cref{existencesmooth}) such that \begin{equation}\label{convergences} \begin{split} f^n\relbar\joinrel\rightharpoonup f \quad& \text{weakly in } L^1([0,T]\times\mathbb{R}^6) \text{ for any } T>0;\\ \rho^n\relbar\joinrel\rightharpoonup \rho^{\operatorname{eff}} \quad& \text{weakly* in } L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3));\\ |J^{\operatorname{eff}}|<\rho^{\operatorname{eff}} \quad& \text{as measures};\\ \partial_t \rho^{\operatorname{eff}}+\nabla\cdot J^{\operatorname{eff}}=0 \quad& \text{ as measures with initial condition } \rho_0, \end{split} \end{equation} where $\rho^n_t(x)\coloneqq \int_{\mathbb{R}^3}f^n_t(x,v)\,\mathrm{d} v$. Analogously to \eqref{regularityf}, we have that for $\sigma_E\in\{-1,\, 0\}$, \begin{equation}\label{ftregularity} \|f^n_t\|_{L^{3/2}(\mathbb{R}^6)}\leq\|f_0\|_{L^{3/2}(\mathbb{R}^6)},\quad \|f_t\|_{L^{3/2}(\mathbb{R}^6)}\leq\|f_0\|_{L^{3/2}(\mathbb{R}^6)} \quad \text{for a.e. } t\in[0,\infty). \end{equation} Moreover, since \eqref{conservation1} holds for classical solutions and $f_0$ has every energy bounded, we obtain that \begin{equation}\label{boundnenergy} \sup_{n\in\mathbb{N}}\sup_{t\in[0,\infty)}\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t^n\,\mathrm{d} x\,\mathrm{d} v\leq C, \end{equation} and by the lower semicontinuity of the relativistic energy we deduce that, for every $T>0$, \begin{equation}\label{boundenergyT} \int_0^T\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t\leq \liminf_{n\rightarrow \infty}\int_0^T\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t^n\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t\leq C T. \end{equation} We now claim that $\rho^{\operatorname{eff}}=\rho$ and, consequently, $J=J^{\operatorname{eff}}$, where $|J|<\rho \in L^{\infty}((0,T);L^1(\mathbb{R}^6))$ as in \eqref{prin}. For this, consider $\zeta_k:\mathbb{R}^6\longrightarrow [0,1]$ a nonnegative function which equals $1$ inside $B_k$ and $0$ in $B_{k+1}^c$ and compute \[\begin{split} \int_0^\infty\int_{\mathbb{R}^3}(\rho_t^n-\rho_t)\varphi_t\,\mathrm{d} x\,\mathrm{d} t&=\int_0^\infty\int_{\mathbb{R}^6}(f_t^n(x,v)-f_t(x,v))\varphi_t(x)\zeta_k(v)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\\ &+\int_0^\infty\int_{\mathbb{R}^6} f_t^n(x,v)\varphi_t(x)(1-\zeta_k(v))\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\\ &+\int_0^\infty\int_{\mathbb{R}^6} f_t(x,v)\varphi_t(x)(\zeta_k(v)-1)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t. \end{split}\] By the weak convergence in $L^1$ in \eqref{convergences}, the first term vanishes as $n\longrightarrow \infty$. The second and third terms can be estimated using \eqref{boundnenergy} and \eqref{boundenergyT}: \[\begin{split} &\left|\int_0^\infty\int_{\mathbb{R}^6} f_t^n(x,v)\varphi_t(x)(1-\zeta_k(v))\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t +\int_0^\infty\int_{\mathbb{R}^6} f_t(x,v)\varphi_t(x)(\zeta_k(v)-1)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\right|\\ &+\frac{\|\varphi\|_{L^\infty((0,\infty)\times\mathbb{R}^3)}}{k}\int_0^{T_\varphi}\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f^n_t\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t+\frac{\|\varphi\|_{L^\infty((0,\infty)\times\mathbb{R}^3)}}{k}\int_0^{T_\varphi}\int_{\mathbb{R}^6}\sqrt{1+|v|^2}f_t\,\mathrm{d} x\,\mathrm{d} v\,\mathrm{d} t\\ &\leq \frac{C {T_\varphi}\|\varphi\|_{L^\infty((0,\infty)\times\mathbb{R}^3)}}{k}, \end{split}\] where $T_\varphi$ is the time support of $\varphi$. Letting $k\longrightarrow \infty$, we conclude that $\rho^n$ converges to $\rho$ weakly* in $L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3))$, which combined with \eqref{convergences} gives that $\rho=\rho^{\operatorname{eff}}$. Hence, by \eqref{convergences} and \Cref{jeffequalj}, we conclude that $J=J^{\operatorname{eff}}$, and in Steps 4 and 5 in the proof of \Cref{existencegeneral}, we obtain a global Lagrangian (hence renormalized) solution $f_t\in C([0,\infty);L^1_{\operatorname{loc}}(\mathbb{R}^6))$ of \eqref{prin} with initial datum $f_0$. We now prove properties by a lower semicontinuous argument on the energy of $f^n$. \textbf{Step 1: Bound on the total energy for $\boldsymbol{\mathcal{L}^1}$-almost every time.} We use the weak convergence of $f^n$ (see \eqref{convergences}) with test function $\phi(t)\sqrt{1+|v|^2}\chi_r(x,v)$, where $\phi\in C_c^{\infty}((0,\infty))$ and $\chi_r\in C_c^\infty(\mathbb{R}^6)$ are nonnegative functions, with $\chi_r$ being a cutoff between $B_r$ and $B_{r+1}$, we obtain \[ \int_0^\infty\int_{\mathbb{R}^6} f_t(x,v)\sqrt{1+|v|^2}\phi(t)\chi_r(x,v)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\leq\liminf_{n\rightarrow \infty} \int_0^\infty \phi(t)\int_{\mathbb{R}^6} \sqrt{1+|v|^2}f^n_t(x,v)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t. \] Taking the supremum with respect to $r$, we deduce that \begin{equation}\label{liminfrelativistic} \int_0^\infty \phi(t)\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f_t(x,v)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t\leq\liminf_{n\rightarrow \infty} \int_0^\infty \phi(t)\int_{\mathbb{R}^6} \sqrt{1+|v|^2}f^n_t(x,v)\,\mathrm{d} v\,\mathrm{d} x\,\mathrm{d} t. \end{equation} Since $\phi$ is arbitrary, we have that $\sqrt{1+|v|^2}f_t\in L^1_{\operatorname{loc}}(\mathbb{R}^6)$ for almost every $t$. Moreover, since we can decompose the density current as $J=V\rho$ (see remark after \Cref{generalized}), where $|V|<1$ a.e. in spacetime, we have that \[ \sup_{t\in[0,\infty)}\int_{\mathbb{R}^3}|V_t(x)|\,\mathrm{d}\rho_t(x)<\infty, \] hence by \cite[Theorem 8.1.2]{gradientflows}, we have that $\rho_t$ has a weakly* continuous representative. Furthermore, since $\rho^n$ satisfies a similar continuity equation, by the proof of \cite[Theorem 8.1.2]{gradientflows}, we have that \[ \left|\int_{\mathbb{R}^3}(\rho_t^n-\rho_s^n)\varphi\,\mathrm{d} x\right|\leq \|\varphi\|_{C^1(\mathbb{R}^3)}\int_s^t\int_{\mathbb{R}^3}|V^n_r|\rho_r^n\,\mathrm{d} x\,\mathrm{d} r\leq C|t-s| \] for all $\varphi\in C^\infty_c(\mathbb{R}^3)$, which gives that the map $t\longmapsto\int_{\mathbb{R}^3}\varphi\,\mathrm{d} \rho^n_t$ is equicontinuous. By the weak* convergence of $\rho^n$ to $\rho$ in $L^\infty((0,\infty);\mathcal{M}_+(\mathbb{R}^3))$, we have a uniform boundedness, thus Arzel\`{a}-Ascoli theorem implies that \begin{equation}\label{rhonweakconvergence} \lim_{n\rightarrow \infty}\sup_{t\in[0,T]}\left|\int_{\mathbb{R}^3}\varphi\,\mathrm{d} (\rho^n_t-\rho_t)\right|=0 \quad \text{for every } \varphi\in C^\infty_c(\mathbb{R}^3). \end{equation} Combining the above with the fact that $\rho^n_t$ is uniformly bounded with respect to $n$ and $t$, by \Cref{notproven} we obtain \begin{equation}\label{liminfelectric} \int_0^\infty\phi(t)\int_{\mathbb{R}^3}H\ast \rho_t(x)\,\mathrm{d}\rho_t(x)\,\mathrm{d} t\leq \liminf_{n\rightarrow \infty}\int_0^\infty\phi(t)\int_{\mathbb{R}^3}H\ast \eta^{k_n}\ast \rho^n_t(x)\,\mathrm{d}\rho^n_t(x)\,\mathrm{d} t. \end{equation} Combining \eqref{liminfrelativistic}, \eqref{liminfelectric}, and \eqref{conservation1}, we conclude that for $\sigma_E\in\{0,\,1\}$ \[\begin{split} &\int_0^\infty\phi(t)\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f_t(x,v)\,\mathrm{d} v\,\mathrm{d} x+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}H\ast \rho_t(x)\rho_t(x)\,\mathrm{d} x\right)\,\mathrm{d} t\\ &\leq\liminf_{n\rightarrow \infty}\int_0^\infty\phi(t)\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f^n_0(x,v)\,\mathrm{d} v\,\mathrm{d} x+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}H\ast \eta^{k_n}\ast\rho^n_0(x)\rho^n_0(x)\,\mathrm{d} x\right)\,\mathrm{d} t\\ &=\left(\int_0^\infty\phi(t)\,\mathrm{d} t\right)\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f_0(x,v)\,\mathrm{d} v\,\mathrm{d} x+\frac{\sigma_E}{2}\int_{\mathbb{R}^3}H\ast \rho_0(x)\rho_0(x)\,\mathrm{d} x\right). \end{split}\] The case $\sigma_E=-1$ is subtler: by \eqref{liminfrelativistic} and \eqref{conservation1} we have that \[\begin{split} &\int_0^\infty\phi(t)\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f_t(x,v)\,\mathrm{d} v\,\mathrm{d} x-\frac{1}{2}\int_{\mathbb{R}^3}H\ast \rho_t(x)\rho_t(x)\,\mathrm{d} x\right)\,\mathrm{d} t\\ &\leq \liminf_{n\rightarrow \infty} \int_0^\infty \phi(t)\left(\int_{\mathbb{R}^6} \sqrt{1+|v|^2}f^n_t(x,v)\,\mathrm{d} v\,\mathrm{d} x-\frac{1}{2}\int_{\mathbb{R}^3}H\ast \rho_t(x)\rho_t(x)\,\mathrm{d} x\right)\,\mathrm{d} t\\ &\leq \left(\int_0^\infty\phi(t)\,\mathrm{d} t\right)\left(\int_{\mathbb{R}^6}\sqrt{1+|v|^2} f_0(x,v)\,\mathrm{d} v\,\mathrm{d} x-\frac{1}{2}\int_{\mathbb{R}^3}H\ast \rho_0(x)\rho_0(x)\,\mathrm{d} x\right)\\ &+\frac{1}{2}\limsup_{n\rightarrow\infty}\int_0^\infty \phi(t)\left(\int_{\mathbb{R}^3}H\ast \eta^{k_n} \ast\rho^n_t(x)\rho^n_t(x)-H\ast \rho_t(x)\rho_t(x)\,\mathrm{d} x\right)\,\mathrm{d} t \end{split}\] Notice that by \eqref{boundenergyT}, \eqref{ftregularity} and \eqref{calderonsobolev}, we have for every $T>0$, \[ \sup_{t\in[0,T]}\|\rho_t\|_{L^{6/5}(\mathbb{R}^3)}+\sup_{n\in\mathbb{N}}\sup_{t\in[0,T]}\|\rho^n_t\|_{L^{6/5}(\mathbb{R}^3)}<\infty. \] Thus, by \Cref{proven}, we obtain that the last term equals $0$. Since $\phi$ was arbitrary and since $f_0$ has every energy bounded, we conclude that $f_t$ has every energy bounded for $\mathcal{L}^1$-almost every $t\in (0,\infty)$. \textbf{Step 2: Bound on the total energy for every time.} Notice that the relativistic and electric potential energy is lower semicontinuous with respect to the strong $L^1_{\operatorname{loc}}$ and weak* $\mathcal{M}_+$ convergences, respectively. Hence, by the continuity of $t\longmapsto f_t\in L^1(\mathbb{R}^6)$ and $t\longmapsto \rho_t\in \mathcal{M}_+(\mathbb{R}^3)$ for the $L^1_{\operatorname{loc}}$ and weak* $\mathcal{M}_+$ convergences, respectively, combined with Step 1, we have that for $t_n\longrightarrow \bar{t}\in [0,\infty)$ such that \eqref{protobound} holds for all $t_n$, we may pass the limit and obtain \eqref{protobound} for $t=\bar{t}$. \textbf{Step 3: Strong $\boldsymbol{L^1_{\operatorname{loc}}}$-continuity of the $\boldsymbol{\rho,\,J,\, E,\, B}$.} Given $t\in[0,\infty)$, let $t_n\longrightarrow t$. Fix $r>0$, and for any $R>0$ \[ \int_{B_r}\int_{\mathbb{R}^3}|f_{t_n}-f_t|\,\mathrm{d} v\, \mathrm{d} x\leq \int_{B_r}\int_{B_R}|f_{t_n}-f_t|\,\mathrm{d} v\, \mathrm{d} x+ R^{-1}\int_{B_r}\int_{\mathbb{R}^3}\sqrt{1+|v|^2}(f_{t_n}+f_t)\,\mathrm{d} v\, \mathrm{d} x. \] By the uniform boundedness of the relativistic energy with respect to time and the $L^1_{\operatorname{loc}}$ continuity of $f_t$, by taking the limit in $n$ and then in $R$, we conclude that $\rho_{t_n}\longrightarrow \rho_t$ in $L^1_{\operatorname{loc}}$. Moreover, since $|\hat{v}|<1$, we have \[ \int_{B_r}|J_{t_n}-J_t|\, \mathrm{d} x< \int_{B_r}\int_{\mathbb{R}^3}|f_{t_n}-f_t|\,\mathrm{d} v\, \mathrm{d} x\longrightarrow 0, \] thus $J_{t_n}\longrightarrow J_t$ in $L^1_{\operatorname{loc}}$. Finally, since $K\in L^1_{\operatorname{loc}}$ and $|J|(\mathbb{R}^3)<\rho(\mathbb{R}^3)<\infty$, we conclude that $E_t,\, B_t$ are also strongly continuous in $L^1_{\operatorname{loc}}(\mathbb{R}^3)$. \textbf{Step 4: Globally defined flow.} We can combine the fact that $f_t$ has every energy bounded and \Cref{magneticpositive} to obtain that $E_t,\, B_t\in L^\infty([0,\infty);L^2(\mathbb{R}^3))$, thus by \Cref{maincorollary} we conclude that the trajectories of the maximal regular flow starting at any given $t$ do not blow up for $f_t$-almost every $(x,v)\in\mathbb{R}^6$. \textbf{Step 5: Strong $\boldsymbol{L^1}$-continuity of $\boldsymbol{f}$.} By \Cref{existencesolution} and $L^1_{\operatorname{loc}}$-continuity of $f_t$, we deduce that finite energy solutions conserve mass, i.e., $\rho_t(\mathbb{R}^3)=\rho_0(\mathbb{R}^3)$ for every $t\in [0,\infty)$. In particular, solutions are strongly continuous in $L^1(\mathbb{R}^6)$ and not only $L^1_{\operatorname{loc}}(\mathbb{R}^6)$ (see \cite[Theorem 4.10]{vlasovpoisson}). \endproof \appendix \section{Derivation}\label{deriv-model} The relativistic Vlasov equation describes the evolution of a function $f:(0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3\longrightarrow [0,\infty)$ under the action of a self-consistent acceleration $A:(0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3\longrightarrow \mathbb{R}^3$: \begin{equation}\label{vlasov} \partial_t f_t(x,v) +\hat{v}\cdot \nabla_x f_t(x,v)+A_t(x,v)\cdot \nabla_v f_t(x,v)=0 \quad \text{ in }\quad (0,\infty)\times \mathbb{R}^3\times \mathbb{R}^3. \end{equation} In this paper, we consider the acceleration given by \[ A_t(x,v)=g_t(x)+ \frac{q}{m} (E_t(x)+\hat{v}\times B_t(x)), \] where $g_t$, $E_t$, and $B_t$ are the Newtonian gravitational, electric, and magnetic fields, respectively, and $q$ and $m$ are the particle charge and mass. Newtonian gravity implies that $g_t=Gm\nabla(-\Delta)^{-1}\rho_t$, where $G$ is the gravitational constant and $\rho_t$ the density of particles. We study the case in which the electromagnetic field satisfies one of the quasi-static limits of Maxwell's equations (see, for instance, \cite{manfredi} and references therein): \begin{equation}\label{QES} \nabla\cdot E_t=\frac{q}{\epsilon_0}\rho_t,\quad \nabla \cdot B=0,\quad \nabla\times E_t=0,\quad \nabla\times B_t= \frac{q}{\epsilon_0}J_t+\partial_t E, \end{equation} or \begin{equation}\label{QMS} \nabla\cdot E_t=\frac{q}{\epsilon_0}\rho_t,\quad \nabla \cdot B=0,\quad \nabla\times E_t=-\partial_t B_t \quad \nabla\times B_t=\frac{q}{\epsilon_0} J_t, \end{equation} where $J_t$ is the relativistic particle current density. Equations \eqref{QES} and \eqref{QMS} are known as the quasi-electrostatic (QES) and quasi-magnetostatic (QMS) limits, respectively. The solution of \eqref{QES} can be written as \[ E_t= -\frac{q}{\epsilon_0}\nabla(-\Delta)^{-1}\rho_t \qquad \text{and} \qquad B_t=\frac{q}{\epsilon_0}\nabla\times(-\Delta)^{-1}J_t, \] while the solution of \eqref{QMS} is \[ E_t= -\frac{q}{\epsilon_0}\nabla(-\Delta)^{-1}\rho_t-\frac{q}{\epsilon_0}\partial_t (-\Delta)^{-1}J_t \qquad \text{and} \qquad B_t=\frac{q}{\epsilon_0}\nabla\times(-\Delta)^{-1}J_t. \] Notice that the leading term in the QES limit is the electric field whereas in the QMS it is the magnetic field. Hence, in the QES case, we can write $A_t$ in terms of $\rho_t$ and $J_t$ only: \[ A_t(x,v)=\left(\frac{q^2}{4\pi \,\epsilon_0 m}-Gm\right)\int_{\mathbb{R}^3}\rho_t(y)\frac{x-y}{|x-y|^3}\,\mathrm{d} y + \frac{q^2}{4\pi\, \epsilon_0 m} \, \hat{v} \times \int_{\mathbb{R}^3}J_t(y)\times \frac{x-y}{|x-y|^3}\,\mathrm{d} y, \] where $\epsilon_0$ is the electric permittivity. Next, define the critical charge $q_c$ as \[ q_c\coloneqq \pm \sqrt{4\pi \epsilon_0 G}\,m. \] If $q>q_c$, we have that the electric field is stronger and, up to redefining of $\rho_t$ and $J_t$, we may write the acceleration as \[ A_t(x,v)= \int_{\mathbb{R}^3}\rho_t(y)K(x-y)\,\mathrm{d} y+\hat{v}\times\int_{\mathbb{R}^3}J_t(y)\times K(x-y)\,\mathrm{d} y. \] Analogously, if $q<q_c$, we can write \[ A_t(x,v)= -\int_{\mathbb{R}^3}\rho_t(y)K(x-y)\,\mathrm{d} y+\hat{v}\times\int_{\mathbb{R}^3}J_t(y)\times K(x-y)\,\mathrm{d} y. \] In both cases, if we drop the magnetic field (since it is a lower order term), we have the relativistic Vlasov-Poisson system. Moreover, notice that in the critical case $q=q_c$, we only have the magnetic force acting in the evolution equation \eqref{vlasov}, which is exactly the same as if we only consider the leading term in the QMS limit, that is, the relativistic Vlasov-Biot-Savart system. \end{document}
\begin{document} \title[Diophantine stability for elliptic curves on average]{Diophantine stability for elliptic curves on average} \author[A.~Ray]{Anwesh Ray} \address[Ray]{Centre de recherches mathématiques, Université de Montréal, Pavillon André-Aisenstadt, 2920 Chemin de la tour, Montréal (Québec) H3T 1J4, Canada} \email{[email protected]} \author[T.~Weston]{Tom Weston} \address[Weston]{Department of Mathematics, University of Massachusetts, Amherst, MA, USA.} \email{[email protected]} \keywords{} \subjclass[2020]{} \maketitle \begin{abstract} Let $K$ be a number field and $\ell\mathfrak{g}eq 5$ be a prime number. Mazur and Rubin introduced the notion of \emph{diophantine stability} for a variety $X_{/K}$ at a prime $\ell$. Under the hypothesis that all elliptic curves $E_{/\mathbb{Q}}$ have finite Tate-Shafarevich group, we show that there is a positive density set of elliptic curves $E_{/\mathbb{Q}}$ of rank $1$, such that $E_{/K}$ is diophantine stable at $\ell$. This result has implications to Hilbert's tenth problem for number rings. \end{abstract} \section{Introduction} \mathfrak{p}ar Let $V$ be a variety over a number field $K$ and let $\ell$ be a prime number. Mazur and Rubin introduced the notion of diophantine stability. Given a number field extension $L/K$, the variety $V_{/K}$ is \emph{diophantine stable} in $L$ if $V(L)=V(K)$. For a prime $\ell$ it is said that $V$ is \emph{$\ell$-diophantine stable} over $K$ if for every $n\in \mathbb{Z}_{\mathfrak{g}eq 1}$ and finite set of primes $\Sigma$ of $K$, there are infinitely many cyclic extensions $L/K$ of degree $\ell^n$, such that \begin{enumerate} \item all primes $v\in \Sigma$ are completely split in $L$, \item $V(L)=V(K)$. \end{enumerate} \begin{theorem}[Mazur-Rubin \cite{mazur2018diophantine}, Theorem 1.2]Let $A_{/K}$ be a simple abelian variety for which all geometric endomorphisms are defined over $K$. Then, there is a set of prime numbers $S$ of positive density such that $A$ is $\ell$-diophantine stable for all $\ell\in S$. \end{theorem} When specialized to elliptic curves, this result has significant consequences to Hilbert's tenth problem for number rings. \begin{corollary}[Mazur-Rubin \cite{mazur2018diophantine}, Corollary 1.6] For every prime $\ell$, there are uncountably many pairwise non-isomorphic totally real fields $L$ of algebraic numbers in $\mathbb{Q}_\ell$ over which the following two statements both hold: \begin{enumerate} \item There is a diophantine definition of $\mathbb{Z}$ in the ring of integers $\mathcal{O}_L$ of $L$. In particular, Hilbert's Tenth Problem has a negative answer for $\mathcal{O}_L$; i.e., there does not exist an algorithm to determine whether a polynomial (in many variables) with coefficients in $\mathcal{O}_L$ has a solution in $\mathcal{O}_L$. \item There exists a first-order definition of the ring $\mathbb{Z}$ in $L$. The first-order theory for such fields $L$ is undecidable. \end{enumerate} \end{corollary} \subsection{Main results} We study statistical questions from a different perspective. Any elliptic curve $E_{/\mathbb{Q}}$ is isomorphic to a unique curve of the form \[E=E_{A,B}:y^2=x^3+Ax+B,\] where $(A,B)\in \mathbb{Z}^2$ such that for all primes $p$, either $p^4\mathfrak{n}mid A$ or $p^6\mathfrak{n}mid B$. Such a Weierstrass equation is minimal and the (naive) height of $E$ is defined as follows \[H(E)=H(E_{A, B}):=\op{max}\{|A|^3, B^2\}.\] Let $\mathcal{C}$ be the set of all isomorphism classes of elliptic curves over $\mathbb{Q}$ and $\mathcal{C}(X)$ those with height $\leq X^6$. As is well known, \[\#\mathcal{C}(X)=C_1 X^5+O(X^3),\] where $C_1=\frac{4}{\zeta(10)}$, cf. \cite[Lemma 4.3]{brumer1992average}. Let $\mathcal{S}$ be a subset of isomorphism classes of elliptic curves defined over $\mathbb{Q}$. Set $\mathcal{S}(X):=\mathcal{S}\cap \mathcal{C}(X)=\{E\in \mathcal{S}\mid H(E)\leq X^6\}$. The density of $\mathcal{S}$ is given by \[\delta(\mathcal{S}):=\lim_{X\rightarrow \infty} \frac{\# \mathcal{S}(X)}{\# \mathcal{C}(X)},\] provided the limit exists. \mathfrak{p}ar In any case, we can define the upper and lower densities by \[\overline{\delta}(\mathcal{S}):=\limsup_{X\rightarrow \infty} \frac{\# \mathcal{S}(X)}{\# \mathcal{C}(X)}\] and \[\underline{\delta}(\mathcal{S}):=\liminf_{X\rightarrow \infty} \frac{\# \mathcal{S}(X)}{\# \mathcal{C}(X)}\] respectively. We prove the following result. \begin{lthm}[Theorem \ref{our main result}]\label{main thm 1} Let $\ell\mathfrak{g}eq 5$ be a prime number and let $K$ be a number field. Then, the set of elliptic curves $E_{/\mathbb{Q}}$ that are $\ell$-diophantine stable over $K$ has density $1$. \end{lthm} In the above result, we make no assumption on the finiteness of the Tate-Shafarevich group. We recall a result of Bhargava and Shankar, which will be shown to have consequences for Hilbert's tenth problem over number rings. \begin{theorem}[Bhargava-Shankar \cite{bhargava2015ternary}, Theorem 5]\label{BS thm} Assume that for all elliptic curves $E_{/\mathbb{Q}}$, the Tate-Shafarevich group $\Sh(E/\mathbb{Q})$ is finite. Then, the upper density of elliptic curves $E_{/\mathbb{Q}}$ with rank $1$ is positive. \end{theorem} \begin{lthm}[Theorem \ref{main thm 2.6}]\label{main thm pos density} Let $\ell\mathfrak{g}eq 5$ be a prime number and let $K$ be a number field. Assume that for all elliptic curves $E_{/\mathbb{Q}}$, the Tate-Shafarevich group $\Sh(E/\mathbb{Q})$ is finite. Then, the set of elliptic curves $E_{/\mathbb{Q}}$ for which \begin{enumerate} \item $E_{/K}$ is $\ell$ diophantine stable, \item $\op{rank}E(\mathbb{Q})=1$ \end{enumerate} has positive upper density. \end{lthm} \begin{lthm}\label{main thm 2} Let $\ell\mathfrak{g}eq 5$ be a prime number, let $K$ be a number field, $n\in \mathbb{Z}_{\mathfrak{g}eq 1}$ and $\Sigma$ be a finite set of primes of $K$. Assume that the following conditions are satisfied. \begin{enumerate} \item For all elliptic curves $E_{/\mathbb{Q}}$, the Tate-Shafarevich group $\Sh(E/\mathbb{Q})$ is finite, \item $\mathbb{Z}$ is a diophantine subset of $\mathcal{O}_K$, and consequently, Hilbert's tenth problem has a negative answer for $\mathcal{O}_K$. \end{enumerate}Then, there are infinitely many degree $\ell^n$ cyclic extensions $L/K$ in which the primes of $\Sigma$ are completely split, such that $\mathbb{Z}$ is diophantine in $\mathcal{O}_L$ and Hilbert's tenth problem has a negative answer for $\mathcal{O}_L$. \end{lthm} The key point is that the above result holds for all primes $\ell\mathfrak{g}eq 5$. It follows from results of Shlapentokh and Mazur-Rubin that the above assertion holds for a density $1$ set of primes $\ell$. \subsection{Organization} Including the introduction, the article consists of four sections. In section \ref{s 2}, we recall a criterion of Mazur and Rubin for diophantine stability, cf. Theorem \ref{mazur rubin criterion}. For elliptic curves $E_{/\mathbb{Q}}$, this criterion can be verified if the field cut out by the residual representation satisfies some additional conditions, cf. Proposition \ref{T K l prop}. In section \ref{s 3}, it is shown that the conditions of Theorem \ref{mazur rubin criterion} are satisfied for a set of elliptic curves of density $1$. Theorems \ref{main thm 1} and \ref{main thm pos density} are proven at the end of this section. In section \ref{s 4}, we discuss Hilbert's tenth problem and prove Theorem \ref{main thm 2}. \subsection*{Acknowledgment} The first named author's research is supported by the CRM Simons postdoctoral fellowship. We thank Lea Beneish, Barry Mazur and Ravi Ramakrishna for helpful suggestions. \section{Diophantine stability}\label{s 2} \subsection{Diophantine stability for elliptic curves} \mathfrak{p}ar We review the notion of diophantine stability for an elliptic curve. Throughout, we shall fix a number field $K$ and a prime number $\ell\mathfrak{g}eq 5$. For $n\in \mathbb{Z}_{\mathfrak{g}eq 1}$, we say that an extension $L$ of $K$ is a $\mathbb{Z}/\ell^n\mathbb{Z}$-extension if it is a Galois extension of $K$ such that $\op{Gal}(L/K)$ is isomorphic to $\mathbb{Z}/\ell^n \mathbb{Z}$. We recall the notion of diophantine stability, due to Mazur and Rubin \cite{mazur2018diophantine}. \begin{definition}Let $L/K$ be a field extension and let $E_{/K}$ be an elliptic curve. Then, $E_{/K}$ is said to be diophantine stable in $L$ if $E(L)=E(K)$. It is said that $E_{/K}$ is diophantine stable at $\ell$ if for all $n\in \mathbb{Z}_{\mathfrak{g}eq 1}$ and every finite set of primes $\Sigma$ of $K$, there are infinitely many $\mathbb{Z}/\ell^n \mathbb{Z}$-extensions $L/K$ such that \begin{enumerate} \item $E(L)=E(K)$, \item all primes in $\Sigma$ are completely split in $L$. \end{enumerate} Given an elliptic curve $E_{/\mathbb{Q}}$, we say that $(E,K, \ell)$ satisfies (DS) if $E_{/K}$ is diophantine stable at $\ell$. \end{definition} Given a number field $K$, set $\op{G}_K$ to denote the absolute Galois group $\op{Gal}(\bar{K}/K)$. Let $E_{/\mathbb{Q}}$ be an elliptic curve and denote by $E[\ell]$, the $\ell$-torsion group \[E[\ell]:=\op{ker}\left(E(\bar{\mathbb{Q}})\rightarrow E(\bar{\mathbb{Q}})\right).\] Set $\bar{\rho}_E=\bar{\rho}_{E, \ell}:\op{G}_{\mathbb{Q}}\rightarrow \op{GL}_2(\mathbb{F}_\ell)$ to denote the Galois representation on $E[\ell]$. Set $\mathbb{Q}(E[\ell])$ to be the field cut out by $E[\ell]$, i.e., $\mathbb{Q}(E[\ell]):=\bar{\mathbb{Q}}^{\op{ker}\bar{\rho}_{E, \ell}}$. \mathfrak{p}ar We recall a criterion for diophantine stability, specialized to rational elliptic curves without complex multiplication. \begin{theorem}[Mazur-Rubin]\label{mazur rubin criterion} Let $K$ be a number field, $E_{/\mathbb{Q}}$ an elliptic curve without complex multiplication, and $\ell\mathfrak{g}eq 3$ a prime number. Assume that the following conditions hold \begin{enumerate} \item $E[\ell]$ is an irreducible $\op{G}_K$-module, \item $H^1(K(E[\ell])/K, E[\ell])=0$, \item there is no abelian extension of degree $\ell$ of $K(\mu_\ell)$ contained in $K(E[\ell])$, \item there is $\tau_0\in \op{G}_{K(\mu_\ell)}$ such that $E[\ell]/(\tau_0-1)E[\ell]=0$, \item there is $\tau_1\in \op{G}_{K(\mu_\ell)}$ such that $\op{dim}_{\mathbb{F}_\ell}\left(E[\ell]/(\tau_1-1)E[\ell]\right)=1$. \end{enumerate} Then, $E_{/K}$ is diophantine stable at $\ell$. \end{theorem} \begin{proof} The result follows from \cite[Theorem 9.21]{mazur2018diophantine}. \end{proof} In the next section, we prove the following result. \begin{theorem}\label{our main result} Let $\ell\mathfrak{g}eq 5$ be a prime number and let $K$ be a number field. Then, the set of elliptic curves $E_{/\mathbb{Q}}$ for which the conditions of Theorem \ref{mazur rubin criterion} are satisfied has density $1$. \end{theorem} \begin{definition}\label{s k l def} Let $\mathcal{S}_{K, \ell}$ be the set of elliptic curves $E_{/\mathbb{Q}}$ such that \begin{enumerate} \item $\op{rank}E(\mathbb{Q})=1$, \item $(E,K,\ell)$ satisfies (DS). \end{enumerate} \end{definition} \begin{theorem}\label{main thm 2.6} Let $\ell\mathfrak{g}eq 5$ be a prime number and $K$ be any number field. Assume that for all elliptic curves $E_{/\mathbb{Q}}$, the Tate-Shafarevich group $\Sh(E/\mathbb{Q})$ is finite. Then, $\mathcal{S}_{K, \ell}$ has positive upper density. \end{theorem} \begin{proof} This is a direct consequence of Theorem \ref{our main result} and Theorem \ref{BS thm}. \end{proof} \subsection{A criterion for diophantine stability in terms of the residual image} In the remainder of this section, we shall introduce some further notation and establish a criterion for the conditions of Theorem \ref{mazur rubin criterion} to be satisfied. We fix $(K, \ell)$ where $K$ is a number field and $\ell\mathfrak{g}eq 5$ is a prime number. We let $E_{/\mathbb{Q}}$ be an elliptic curve and recall that \[\bar{\rho}_E:\op{G}_{\mathbb{Q}}\rightarrow \op{GL}_2(\mathbb{F}_\ell)\] denotes the representation on $E[\ell]$. Set $\mathbf{1}$ to denote the identity element of $\op{GL}_2(\mathbb{F}_\ell)$ and $\langle -\mathbf{1}\rangle$ the subgroup of order $2$ generated by $-\mathbf{1}$. We set $\op{GL}_2'(\mathbb{F}_\ell):=\frac{\op{GL}_2(\mathbb{F}_\ell)}{\langle -\mathbf{1}\rangle}$ and denote by \[\bar{\rho}_E':\op{G}_{\mathbb{Q}}\rightarrow \op{GL}_2'(\mathbb{F}_\ell)\] the homomorphism which is the composite of $\bar{\rho}_E$ with the natural quotient map. The determinant character $\op{det}\bar{\rho}_E$ is the mod-$\ell$ cyclotomic character $\chi_\ell$, and thus the kernel of $\op{det}\bar{\rho}_E$ is $\op{G}_{\mathbb{Q}(\mu_\ell)}$. Note that $\langle -\mathbf{1}\rangle$ lies in the kernel of the determinant map, and hence, $\op{det}:\op{GL}_2(\mathbb{F}_\ell)\rightarrow \mathbb{F}_\ell^\times$ factors through $\op{GL}_2'(\mathbb{F}_\ell)$. Note that the restriction of $\bar{\rho}_E$ and $\bar{\rho}_E'$ to $\op{G}_{\mathbb{Q}(\mu_\ell)}$ gives rise to homomorphisms $\bar{\rho}_{E}:\op{G}_{\mathbb{Q}(\mu_\ell)}\rightarrow \op{SL}_2(\mathbb{F}_\ell)$ and $\bar{\rho}_{E}':\op{G}_{\mathbb{Q}(\mu_\ell)}\rightarrow \op{PSL}_2(\mathbb{F}_\ell)$ respectively. Galois \cite[p.\ 412]{galois1846works}, in a letter to Chevalier showed that when $\ell\mathfrak{g}eq 5$, the group $\op{PSL}_2(\mathbb{F}_\ell)$ is simple. This property shall prove to be considerably useful in establishing our results. \mathfrak{p}ar Given a number field $F$, a finite group $\mathcal{G}$, and a homomorphism $\varrho: \op{G}_F\rightarrow \mathcal{G}$, we let $F(\varrho)$ denote the field $\bar{F}^{\op{ker}\varrho}$. We identify the Galois group $\op{Gal}(F(\varrho)/F)$ with the image of $\varrho$. We refer to $F(\varrho)$ as the extension of $F$ cut out by $\varrho$. In particular, when $\varrho$ is surjective, $F(\varrho)$ is a $\mathcal{G}$-extension of $F$. With respect to the above notation, we set $F(\bar{\rho}_E)$ (resp. $F(\bar{\rho}_E')$) to be the extension of $F$ cut out by $\bar{\rho}_E$ (resp. $\bar{\rho}_E'$). We shall also write $F(E[\ell])$ to denote the field $F(\bar{\rho}_E)$. Let $G_E$ (resp. $G_E'$) denote the Galois group $\op{Gal}(\mathbb{Q}(\bar{\rho}_E)/\mathbb{Q})$ (resp. $\op{Gal}(\mathbb{Q}(\bar{\rho}_E')/\mathbb{Q})$). Set $D:=\op{Gal}(\mathbb{Q}(\bar{\rho}_E)/\mathbb{Q}(\bar{\rho}_E'))$ and identify $G_E'$ with $G_E/D$. The residual representation $\bar{\rho}_E$ induces an injection \[\bar{\rho}_E:G_E\hookrightarrow \op{GL}_2(\mathbb{F}_\ell),\] and $D$ lies in the kernel of the determinant character \begin{equation}\label{det character}\op{det}\bar{\rho}_E: G_E\hookrightarrow \op{GL}_2(\mathbb{F}_\ell)\rightarrow \mathbb{F}_\ell^\times.\end{equation} The character $G_E\xrightarrow{\op{det}\bar{\rho}_E} \mathbb{F}_\ell^\times$ \eqref{det character} thus factors through the quotient $G_E'$. Let $H_E$ (resp. $\bar{H}_E$) denote the kernel of $G_E\xrightarrow{\op{det}\bar{\rho}_E}\mathbb{F}_\ell^\times$ (resp. $G_E'\xrightarrow{\op{det}\bar{\rho}_E}\mathbb{F}_\ell^\times$). Note that $D$ is contained in $H_E$ and $\bar{H}_E=H_E/D$. We find that $H_E=\op{Gal}(\mathbb{Q}(\bar{\rho}_E)/\mathbb{Q}(\mu_\ell))$, while $\mathbb{Q}(\mu_\ell)$ is contained $\mathbb{Q}(\bar{\rho}_E')$ and $\bar{H}_E=\op{Gal}(\mathbb{Q}(\bar{\rho}_E')/\mathbb{Q}(\mu_\ell))$. Set $\tilde{K}$ to be the Galois closure of $K$ over $\mathbb{Q}$. \begin{definition}\label{def T K L} Let $\mathcal{T}_{K, \ell}$ be the set of elliptic curves $E_{/\mathbb{Q}}$ such that the following conditions are satisfied \begin{enumerate} \item $\bar{\rho}_E'$ is surjective, \item $\tilde{K}(\mu_\ell)$ does not contain $\mathbb{Q}(\bar{\rho}_E')$. \end{enumerate} \end{definition} Note that if $\bar{\rho}'_E$ is surjective, then the image of $\bar{\rho}_E$ is not solvable. In particular, $E$ does not have complex multiplication. \begin{lemma}\label{lemma 3.6} For $E\in \mathcal{T}_{K, \ell}$, \[\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell).\] \end{lemma} \begin{proof} Note that since $\bar{\rho}_E'$ is surjective, we find that $\op{Gal}(\mathbb{Q}(\bar{\rho}_E')/\mathbb{Q}(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell)$. On the other hand, since $\op{PSL}_2(\mathbb{F}_\ell)$ is simple, and $\tilde{K}(\mu_\ell)$ does not contain $\mathbb{Q}(\bar{\rho}_E')$, it follows that $\tilde{K}(\mu_\ell)\cap \mathbb{Q}(\bar{\rho}_E')=\mathbb{Q}(\mu_\ell)$. This implies that $K(\mu_\ell)\cap \mathbb{Q}(\bar{\rho}_E')=\mathbb{Q}(\mu_\ell)$ and therefore we conclude that \[\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{Gal}(\mathbb{Q}(\bar{\rho}_E')/\mathbb{Q}(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell).\] \end{proof} \begin{proposition}\label{T K l prop} For every elliptic curve $E\in \mathcal{T}_{K,\ell}$, $(E,K, \ell)$ satisfies (DS). \end{proposition} \begin{proof} It suffices to show that the five conditions of Theorem \ref{mazur rubin criterion} are satisfied. Note that Lemma \ref{lemma 3.6} asserts that $\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell)$. \begin{enumerate} \item Since $\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell)$, $E[\ell]$ is irreducible as a $\op{G}_K$-module. \item This part follows from \cite{lawson2017vanishing} or \cite[Lemma 2.2]{prasad2021relating}. We summarize the argument here. Let $G\subseteq \op{GL}_2(\mathbb{F}_\ell)$ denote the image of $\bar{\rho}_{E|\op{G}_K}$. Note that since $\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell)$, $G$ contains an element of order $\ell$. Clearly, $G$ is not contained in a Borel subgroup. It follows from \cite[Proposition 3.1]{sutherland2016computing} that $G$ contains $\op{SL}_2(\mathbb{F}_\ell)$. In particular, $G$ contains the negative identity $-\mathbf{1}$. Identify $G$ with $\op{Gal}(K(E[\ell])/K)$ and let $\mathfrak{D}elta$ be the subgroup of $G$ generated by $-\mathbf{1}$. Inflation-restriction yields an exact sequence \[0\rightarrow H^1(G/\mathfrak{D}elta, E[\ell]^\mathfrak{D}elta)\rightarrow H^1(G, E[\ell])\rightarrow H^1(\mathfrak{D}elta, E[\ell]).\] Since $E[\ell]^\mathfrak{D}elta=0$, it follows that $H^1(G/\mathfrak{D}elta, E[\ell]^\mathfrak{D}elta)=0$. Since $D$ has order $2$, its order is coprime to that of $E[\ell]$ and we find that $H^1(\mathfrak{D}elta, E[\ell])=0$. Therefore, we conclude from the above exact sequence that $H^1(G, E[\ell])=0$. \item Since $\op{PSL}_2(\mathbb{F}_\ell)$ is simple, $\op{Gal}(K(\bar{\rho}_E')/K(\mu_\ell))\simeq \op{PSL}_2(\mathbb{F}_\ell)$ and the degree $[K(E[\ell]):K(\bar{\rho}_E')]$ is prime to $\ell$, it follows that there is no abelian extension of degree $\ell$ of $K(\mu_\ell)$ contained in $K(E[\ell])$. \item Since $\bar{\rho}_E'(\op{G}_{K(\mu_\ell)})$ is isomorphic to $\op{PSL}_2(\mathbb{F}_\ell)$, it is clear that there is a diagonal element $\mtx{a}{}{}{a^{-1}}\in \bar{\rho}_E(\op{G}_{K(\mu_\ell)})$, such that $a\mathfrak{n}eq \mathfrak{p}m 1$. Let $\tau_0\in \op{G}_{K(\mu_\ell)}$ be chosen so that $\bar{\rho}(\tau_0)=\mtx{a}{}{}{a^{-1}}$. Then, it follows that $E[\ell]/(\tau_0-1)E[\ell]=0$. \item It is easy to see that $\bar{\rho}_E(\op{G}_{K(\mu_\ell)})$ contains a unipotent element $\mtx{1}{b}{}{1}$, with $b\mathfrak{n}eq 0$. Let $\tau_1\in \op{G}_{K(\mu_\ell)}$ be such that $\bar{\rho}(\tau_1)=\mtx{1}{b}{}{1}$, we find that \[\op{dim}_{\mathbb{F}_\ell}\left(E[\ell]/(\tau_1-1)E[\ell]\right)=1.\] \end{enumerate} \end{proof} \section{A refinement of Duke's theorem}\label{s 3} \mathfrak{p}ar Recall from Definition \ref{def T K L} that $\mathcal{T}_{K, \ell}$ is the set of elliptic curves $E_{/\mathbb{Q}}$ such that the following conditions are satisfied \begin{enumerate} \item $\bar{\rho}_E'$ is surjective, \item $\tilde{K}(\mu_\ell)$ does not contain $\mathbb{Q}(\bar{\rho}_E')$. \end{enumerate} According to Proposition \ref{T K l prop}, for every elliptic curve $E\in \mathcal{T}_{K, \ell}$, $(E,K,\ell)$ satisfies (DS). We prove that the set of elliptic curves $\mathcal{T}_{K, \ell}$ has density $1$. Let $\mathcal{T}_\ell'$ be the set of elliptic curves $E_{/\mathbb{Q}}$ for which $\bar{\rho}_E'$ is surjective. We shall assume throughout this section that $\ell\mathfrak{g}eq 5$. Note that the elliptic curves $E\in \mathcal{T}_\ell'$ do not have complex multiplication. Duke \cite{duke1997elliptic} showed that the set of elliptic curves $E_{/\mathbb{Q}}$ for which $\bar{\rho}_E:\op{G}_{\mathbb{Q}}\rightarrow \op{GL}_2(\mathbb{F}_\ell)$ is surjective has density $1$. In particular, it follows from Duke's result that the set $\mathcal{T}_\ell'$ has density $1$. The result proven in \emph{loc. cit.} is much stronger. A prime $\ell$ is an \emph{exceptional prime} of an elliptic curve $E_{/\mathbb{Q}}$ if the residual representation at $\ell$ is not surjective. It is proven that a density $1$ set of elliptic curves $E_{/\mathbb{Q}}$ have no exceptional primes. An elliptic curve $E_{/Q}$ is said to be a \emph{Serre curve} if the image of its associated adelic Galois representation has index $2$ in $\op{GL}_2(\widehat{\mathbb{Z}})$. Jones \cite{jones2010almost} proves that the set of Serre curves has density $1$. \mathfrak{p}ar Let $A_{/\mathbb{Q}}$ be an elliptic curve and let $\mathcal{T}_A$ be the set of elliptic curves $E_{/\mathbb{Q}}$ such that $\bar{\rho}_E'\simeq \bar{\rho}_A'$. Given an elliptic curve $E$, we write $N_E$ for its conductor. At a prime $p$, let $\op{G}_p$ denote the absolute Galois group $\op{Gal}(\bar{\mathbb{Q}}_p/\mathbb{Q}_p)$ and $\sigma_p\in \op{G}_p$ be a lift of the Frobenius element. For $p\mathfrak{n}mid N_E\ell$, \[\op{trace}\bar{\rho}_E(\sigma_p) \equiv a_p(E)\mathfrak{p}mod{\ell};\] set $t_p(E)\in \mathbb{F}_\ell$ to denote $\op{trace}\bar{\rho}(\sigma_p)$. For $E\in \mathcal{T}_A$, we find that \begin{equation}\label{t_p(A)=pm t_p(E)}t_p(E)=\mathfrak{p}m t_p(A)\end{equation} for all primes $p\mathfrak{n}mid N_EN_A\ell$. \mathfrak{p}ar Let $E_1$ and $E_2$ be elliptic curves over $\mathbb{Q}$ and let $(t_1, t_2, d)\in \mathbb{F}_\ell^3$ be such that $d\mathfrak{n}eq 0$. Let $\mathfrak{p}i(X, d, \ell)$ be the number of prime numbers $p\leq X$ such that $p\equiv d\mathfrak{p}mod{\ell}$, and set $\mathfrak{p}i_{E_1, E_2}(X, t_1, t_2, d, \ell)$ to be the number of prime numbers $p\leq X$ such that \begin{enumerate} \item $p\mathfrak{n}mid N_{E_1}N_{E_2}$, \item $p\equiv d\mathfrak{p}mod{\ell}$, \item $t_p(E_i)=t_i$ for $i=1,2$. \end{enumerate} For $t\in \mathbb{F}_\ell$, set $\chi_{t,d,\ell}$ to denote the Kronecker symbol $\left(\frac{t^2-4d}{\ell}\right)$, set \[\delta(t,d, \ell) :=\left(\frac{\ell+\chi_{t,d,\ell}}{\ell^2-1}\right),\] and set \begin{equation}\label{def of delta}\delta=\delta(t_1, t_2, d, \ell):=\delta(t_1,d, \ell)\delta(t_2,d, \ell).\end{equation} For an integer $n$, set $H(n)$ to denote the \emph{Hurwitz class number}. We refer to \cite[p. 816, l. 24]{duke1997elliptic} or \cite[p.293]{cox2022primes} for the definition. Let $E=E_{r,s}$ be the elliptic curve with minimal Weierstrass equation $y^2=x^3+rx+s$, and assume that this equation is minimal. Let $\mathfrak{D}elta_E$ be the discriminant of $E$. The Frobenius trace $a_p(E)$ depends only on $(r,s)$, and \[a_p(E)=a_{r,s}(p):=-\sum_{x \in \mathbb{F}_p}\left(\frac{x^3+rx+s}{p}\right).\]Let $p\mathfrak{g}eq 5$ be a prime number. We recall a well known formula of Deuring for the number of elliptic curves over $\mathbb{F}_p$ which have a preassigned number of points. \begin{theorem}[Deuring]\label{deuring thm} Let $p\mathfrak{g}eq 5$ be a prime and $N=p+1-a$ be an integer such that $|a|<2\sqrt{p}$. Then the number of Weierstrass equations for elliptic curves $E_{/\mathbb{F}_p}$ for which $\#E(\mathbb{F}_p)=N$ is $\left(\frac{p-1}{2}\right)H(a^2-4p)$. In other words, \[\# \left\{(r,s)\in (\mathbb{Z}/p\mathbb{Z})^2\mid 4r^3+27s^2\mathfrak{n}eq 0\text{ and }a_{r,s}(p)=a\right\}=\left(\frac{p-1}{2}\right)H(a^2-4p).\] \end{theorem} \begin{proof} We refer to \cite[Theorem 14.18]{cox2022primes} for a proof of the result. \end{proof} Recall from the previous section that $\mathcal{C}$ is the set of all isomorphism classes of elliptic curves over $\mathbb{Q}$ and $\mathcal{C}(X)$ those with height $\leq X^6$. Denote by $\mathcal{C}(X)^2$ the Cartesian product $\mathcal{C}(X)\times \mathcal{C}(X)$ consisting of all pairs of elliptic curves $(E_1, E_2)$ defined over $\mathbb{Q}$. Let $f(X)$ and $g(X)$ be functions of $X$ taking on postive values. We write $f(X)\ll g(X)$, or equivalently, $f(X)=O(g(X))$, to mean that there exists a constant $C>0$, independent of $X$, such that $f(X)\leq C g(X)$. \begin{proposition}\label{main prop 3.1} Let $(t_1, t_2, d)\in \mathbb{F}_\ell^3$ be such that $d\mathfrak{n}eq 0$. Then, with respect to notation above, \[\frac{1}{\# \mathcal{C}(X)^2}\sum_{(E_1, E_2)\in \mathcal{C}(X)^2}\left(\mathfrak{p}i_{E_1, E_2}(X, t_1, t_2, d, \ell)-\delta\mathfrak{p}i(X, d, \ell)\right)^2\ll X.\] \end{proposition} \begin{proof} The proof of this result follows from a similar argument to that of \cite[Theorem 2]{duke1997elliptic}. For each prime $p$, let $\Omega(p)$ be a prescribed subset of $(\mathbb{Z}/p\mathbb{Z})^n$. For $m\in \mathbb{Z}^n$, set \[P(X;m):=\#\{p\leq X\mid m\!\!\mod{p}\in \Omega(p)\},\] \[P(X)=\sum_{p\leq X} \#\Omega(p) p^{-n}.\] Let $\mathcal{B}$ be a box in $\mathbb{R}^n$ whose sides are parallel to the coordinate planes which has minimum width $W(\mathcal{B})$ and volume $V(\mathcal{B})$. Then, the large sieve inequality states that if $W(\mathcal{B})\mathfrak{g}eq X^2$, then \begin{equation}\label{large sieve ineq}\sum_{\mathcal{B}\cap \mathbb{Z}^n} (P(X;m)-P(X))^2\ll V(\mathcal{B})P(X).\end{equation} We refer to the proof of \cite[Lemma 1]{duke1997elliptic} for the standard references on this theme. \mathfrak{p}ar Taking $n=4$, let us define $\Omega(p)$ to be empty if $p=2,3$ or $p\mathfrak{n}ot \equiv d\mod{\ell}$. For $p>3$ such that $p\equiv d\mod{\ell}$, set \[\Omega(p):=\{(r_1, s_1, r_2, s_2)\in (\mathbb{Z}/p\mathbb{Z})^4\mid 4r_i^3+27s_i^2\mathfrak{n}eq 0\text{ and } a_{r_i, s_i}(p)\equiv t_i\mod{\ell}\text{ for }i=1,2\}.\] For primes $p>3$, with $p\equiv d\mod{\ell}$, it follows from Theorem \ref{deuring thm} (or \cite[Lemma 2]{duke1997elliptic}) that \[\begin{split} &\# \{(r_1, s_1, r_2, s_2)\in (\mathbb{Z}/p\mathbb{Z})^4\mid 4r_i^3+27s_i^2\mathfrak{n}eq 0, a_{r_i, s_i}(p)=a_i\text{ for }i=1,2\} \\ =& \left(\frac{p-1}{2}\right)^2 H(4p-a_1^2)H(4p-a_2^2).\end{split}\] We find that \begin{equation}\label{H estimate delta}\begin{split}\#\Omega(p)=& \left(\frac{p-1}{2}\right)^2\mathfrak{p}rod_{i=1}^2\left(\sum_{a_i\equiv t_i\mod{\ell}}H(4p-a_i^2)\right).\\ =& \delta p^4+O(\ell p^{7/2}), \end{split}\end{equation} where the second equality follows from \cite[Lemma 3]{duke1997elliptic}, which states that \[\left(\sum_{a_i\equiv t_i\mod{\ell}}H(4p-a_i^2)\right)=2\delta(t_i, d, \ell)p+O(\ell p^{1/2}).\] Recall that $P(X)=\sum_{p\leq X} \#\Omega(p) p^{-n}$, and therefore, from the estimate \eqref{H estimate delta}, we find that \begin{equation}\label{est 1}\begin{split} P(X)= & \sum_{p\equiv d \mod{\ell}; p\leq X} \delta + O\left(\ell \sum_{p\equiv d \mod{\ell}; p\leq X} p^{-1/2}\right),\\ & \delta \mathfrak{p}i(X;\ell, d)+O(X^{1/2}). \end{split}\end{equation} The second estimate is the same as that of \cite[p.817, l.\ 4]{duke1997elliptic}. Let $(E_1, E_2)\in \mathcal{C}(X)\times \mathcal{C}(X)$, with minimal Weierstrass equation \[E_i: y^2=x^3+r_i x +s_i.\] Then, it is easy to see that \begin{equation}\label{est 2}P(X; (r_1, s_1, r_2, s_2))=\mathfrak{p}i_{E_1, E_2} (X, a, b, d, \ell)+O(\op{log} X).\end{equation} We take \[\mathcal{B}:=\{(r_1, s_1, r_2, s_2)\in \mathbb{R}^4\mid |r_i|\leq X^2\text{ and } |s_i|\leq X^3\},\] and from \eqref{large sieve ineq}, \eqref{est 1} and \eqref{est 2}, we deduce that \[\begin{split}& \frac{1}{\# \mathcal{C}(X)^2}\sum_{E_1\in \mathcal{C}(X)}\sum_{E_2\in \mathcal{C}(X)}\left(\mathfrak{p}i_{E_1, E_2}(X,\ell; t_1, t_2, d)-\delta\mathfrak{p}i(X;\ell, d)\right)^2 \\ \ll & \frac{1}{X^5}\sum_{\mathcal{B}\cap \mathbb{Z}^n} (P(X;m)-P(X))^2 \ll P(X) \ll X.\end{split}\] This proves the result. \end{proof} \begin{proposition}\label{T_A density 0} Let $A_{/\mathbb{Q}}$ be an elliptic curve, then, \[\frac{\#\mathcal{T}_A(X)}{\# \mathcal{C}(X)}=O\left(\frac{\op{log}(X)}{\sqrt{X}}\right).\] In particular, the set $\mathcal{T}_A$ has density $0$ in $\mathcal{C}$. \end{proposition} \begin{proof} We choose a triple $w=(a,b, d)\in \mathbb{F}_\ell^3$ such that $a\mathfrak{n}eq \mathfrak{p}m b$ and $d\mathfrak{n}eq 0$. For any pair $(E_1,E_2)\in \mathcal{T}_A\times \mathcal{T}_A$, we find that \eqref{t_p(A)=pm t_p(E)} implies that the relation $t_p(E_1)=\mathfrak{p}m t_p(E_2)$ holds. In particular, $(t_p(E_1), t_p(E_2))\mathfrak{n}eq (a,b)$. In particular, this implies that for $(E_1, E_2)\in \mathcal{T}_A\times \mathcal{T}_A$, \[\mathfrak{p}i_{E_1, E_2}(X, a, b, d,\ell)=0.\] \mathfrak{p}ar Invoking Proposition \ref{main prop 3.1}, we find that \[\frac{1}{\# \mathcal{C}(X)^2}\sum_{E_1\in \mathcal{C}(X)}\sum_{E_2\in \mathcal{C}(X)}\left(\mathfrak{p}i_{E_1, E_2}(X, a, b, d,\ell)-\delta\mathfrak{p}i(X;\ell, d)\right)^2\leq CX.\] In particular, this implies that \[\frac{1}{\# \mathcal{C}(X)^2}\sum_{E_1\in \mathcal{T}_A(X)}\sum_{E_2\in \mathcal{T}_A(X)}\left(\mathfrak{p}i_{E_1, E_2}(X, a, b, d,\ell)-\delta\mathfrak{p}i(X;\ell, d)\right)^2\leq CX.\] Since $\mathfrak{p}i_{E_1, E_2}(X, a, b, d,\ell)=0$ for $(E_1, E_2)\in \mathcal{T}_A\times \mathcal{T}_A$, we find that \[\left(\frac{\#\mathcal{T}_A(X)}{\# \mathcal{C}(X)}\right)^2\leq \frac{C}{\delta}\frac{X}{\mathfrak{p}i(X;\ell, d)^2}=O\left(\frac{\op{log}(X)^2}{X}\right),\] and thus, \[\frac{\#\mathcal{T}_A(X)}{\# \mathcal{C}(X)}=O\left(\frac{\op{log}(X)}{\sqrt{X}}\right).\] Since $\lim_{X\rightarrow \infty}\frac{\op{log}(X)}{\sqrt{X}}=0$, the result follows. \end{proof} \begin{proof}[Proof of Theorem \ref{main thm 1}/ Theorem \ref{our main result}] According to Proposition \ref{T K l prop}, every elliptic curve $E\in \mathcal{T}_{K, \ell}$, $(E, K, \ell)$ satisfies (DS) and the conditions of Theorem \ref{mazur rubin criterion} are satisfied. Thus, it suffices to show that $\mathcal{T}_{K, \ell}$ has density $1$. Let $L$ denote $\tilde{K}(\mu_\ell)$ and $\mathcal{D}_{K, \ell}$ be the set of elliptic curves $E_{/\mathbb{Q}}$ such that \begin{enumerate} \item $\bar{\rho}_E'$ is surjective, \item $\tilde{K}(\mu_\ell)$ contains $\mathbb{Q}(\bar{\rho}_E')$. \end{enumerate} Note that $\mathcal{T}_{K, \ell}\cup \mathcal{D}_{K, \ell}$ consists of all elliptic curves $E_{/\mathbb{Q}}$ for which $\bar{\rho}_E'$ is surjective. By the main result of \cite{duke1997elliptic} discussed at the start of this section, their union has density $1$. Therefore, it suffices to show that $\mathcal{D}_{K, \ell}$ has density $0$. Let $F$ be a subfield of $L$ and $\mathcal{D}_F$ be the set of elliptic curves $E_{/\mathbb{Q}}$ such that \begin{enumerate} \item $\bar{\rho}_E'$ is surjective, \item $\mathbb{Q}(\bar{\rho}_E')=F$. \end{enumerate} There are finitely many number fields $F$ contained in $L$. Therefore, it suffices to show that $\mathcal{D}_F$ has density $0$. Without loss of generality, we may assume that $\mathcal{D}_F$ is nonempty. Suppose that $A_1, A_2\in \mathcal{D}_F$. Then, since \[\mathbb{Q}(\bar{\rho}_{A_1}')\simeq \mathbb{Q}(\bar{\rho}_{A_2}')\] it follows that $\bar{\rho}_{A_1}'\simeq \bar{\rho}_{A_2}'\circ \eta$, where $\eta\in \op{Aut}\left(\op{GL}_2'(\mathbb{F}_\ell)\right)$. The group $\op{Aut}\left(\op{GL}_2'(\mathbb{F}_\ell)\right)$ is finite. Therefore, we find that $\mathcal{D}_F$ is a finite union $\bigcup_{i=1}^n \mathcal{T}_{A_i}$. Proposition \ref{T_A density 0} asserts that $\mathcal{T}_{A}$ has density $0$ for any elliptic curve $A_{/\mathbb{Q}}$. Being a finite union of density $0$ sets, it follows that $\mathcal{D}_F$ has density $0$ as well. Since $\mathcal{D}_{K, \ell}$ is a finite union of sets of the form $\mathcal{D}_{L}$, it follows that $\mathcal{D}_{K, \ell}$ has density $0$, and therefore, $\mathcal{T}_{K,\ell}$ has density $1$. This proves the result. \end{proof} \begin{proof}[Proof of Theorem \ref{main thm pos density}] The result is an immediate consequence of Theorem \ref{main thm 2.6}. \end{proof} \section{Hilbert's tenth problem}\label{s 4} \mathfrak{p}ar In this section, we prove Theorem \ref{main thm 2}. Let us first recall the notion of an integrally diophantine extension of number fields, in the sense of \cite[Section 1.2]{shlapentokh2007hilbert}. Let $A$ be a commutative ring with identity, and $A^n$ be the free $A$-module of rank $n$, consisting of tuples $a=(a_1, \dots, a_n)$ with entries in $A$. Let $m$ and $n$ be positive integers and let $a=(a_1, \dots, a_n)\in A^n$ and $b=(b_1, \dots, b_m)\in A^m$. Denote by $(a,b)\in A^{n+m}$ the tuple $(a_1, \dots, a_n, b_1, \dots, b_m)$. Given a finite set of polynomials, $F_1,\dots, F_k$, we set \[\mathcal{F}(a; F_1, \dots, F_k):=\{b\in A^m\mid F_i(a,b)=0\text{ for all }i=1,\dots, k\}.\] \begin{definition}\label{diophantine subset}A subset $S$ of $A^n$ is a \emph{diophantine subset} of $A^n$ if for some $m\mathfrak{g}eq 1$, there are polynomials $F_1, \dots, F_k\in A[x_1, \dots, x_n, y_1, \dots, y_m]$ such that $S$ consists of all $a\in A^n$ for which the set $\mathcal{F}(a; F_1, \dots, F_k)$ is nonempty. \end{definition} \begin{definition} An extension of number fields $L/K$ is said to be \emph{integrally diophantine} if $\mathcal{O}_K$ is a diophantine subset of $\mathcal{O}_L$. \end{definition} Let $L', L$ and $K$ be number fields such that \[L'\supseteq L\supseteq K.\]Suppose that $L'/L$ and $L/K$ are integrally diophantine extensions. Then, it is a well known fact that $L'/K$ is an integrally diophantine extension. Indeed, this is a special case of \cite[Theorem 2.1.15]{shlapentokh2007hilbert}. \mathfrak{p}ar We recall a conjecture of Denef and Lipchitz \cite{denef1978diophantine}. \begin{conjecture}[Denef-Lipchitz]\label{denef lipchitz conjecture} For any number field $L$, $L/\mathbb{Q}$ is an integrally diophantine extension. \end{conjecture} The celebrated result of Matiyasevich proves that Hilbert's tenth problem has a negative answer over $\mathbb{Z}$. If $L$ is a number field for which Conjecture \ref{denef lipchitz conjecture} holds, then Hilbert's tenth problem has a negative answer for $\mathcal{O}_L$. New cases of Conjecture \ref{denef lipchitz conjecture} are established via the following criterion of Shlapentokh. \begin{theorem}[Shlapentokh]\label{shlap 1} Let $L/K$ be an extension of number fields and suppose that there exists an elliptic curve $E_{/K}$ such that $\op{rank}E(L)=\op{rank}E(K)>0$. Then, $L/K$ is integrally diophantine. In particular, if Hilbert's tenth problem has a negative answer for $\mathcal{O}_K$, then, Hilbert's tenth problem has a negative answer for $\mathcal{O}_L$. \end{theorem} \begin{proof}[Proof of Theorem \ref{main thm 2}] By Theorem \ref{main thm 2.6}, there exists an elliptic curve $E_{/\mathbb{Q}}$ with positive rank which is diophantine stable at $\ell$. The result is then an immediate consequence of Theorem \ref{shlap 1}. \end{proof} \end{document}
\begin{document} \title[Subadditivity of Kodaira dimensions]{Subadditivity of Kodaira dimensions for fibrations of three-folds in positive characteristics} \address{Lei Zhang\\Key Laboratory of Wu Wen-Tsun Mathematics, Chinese Academy of Sciences\\ School of Mathematical Science\\University of Science and Technology of China\\Hefei 230026, P.R.China.} \email{[email protected], [email protected]} \author{Lei Zhang} \maketitle \begin{abstract} In this paper, we will prove subadditivity of Kodaira dimensions for a fibration with possibly singular geometric generic fiber, under certain nefness and relative semi-ampleness conditions. As an application, for a fibration $f: X \to Y$ of a smooth projective threefold over an algebraically closed field of characteristic $p>5$, under the assumption that $Y$ is of general type and non-uniruled, we prove subadditivity of Kodaira dimensions when general fibers are smooth or when $K_{X/Y}$ is relatively big over $Y$. \emph{Keywords}: Kodaira dimension; positive characteristic; weak positivity; minimal model.\\ \emph{MSC}: 14E05; 14E30. \end{abstract} \section{Introduction} Let $X$ be a projective variety over a field $k$, $D$ a $\mathbb{Q}$-Cartier divisor on $X$. The \emph{$D$-dimension} $\kappa(X,D)$ is defined as \[\kappa(X,D) =\left\{ \begin{array}{llr} -\infty, \text{ ~~if for every integer } m >0, |mD| = \emptyset;\\ \max \{\dim_k \Phi_{|mD|}(X)| m \in \mathbb{Z}~\text{and}~m>0 \}, \text{ otherwise.} \end{array}\right. \] If $X$ has a smooth projective birational model $\tilde{X}$, the \emph{Kodaira dimension} $\kappa(X)$ of $X$ is defined as $\kappa(\tilde{X}, K_{\tilde{X}})$ where $K_{\tilde{X}}$ denotes the canonical divisor. Kodaira dimension is one of the most important birational invariant in the classification theory. Let $f: X \rightarrow Y$ be a morphism between two schemes. For $y \in Y$, let $X_y$ denote the fiber of $f$ over $y$; and for a divisor $D$ (resp. a sheaf $\mathcal{F}$) on $X$, let $D_y$ (resp. $\mathcal{F}_y$) denote the restriction of $D$ (resp. $\mathcal{F}$) on the fiber $X_y$. Throughout this paper, since $Y$ frequently appears as an integral scheme, we use the special notation $\eta$ and $\bar{\eta}$ for the generic and geometric generic point of $Y$ respectively. We say $f$ is a \emph{fibration} if $f$ is a projective morphism such that $f_*\mathcal{O}_X = \mathcal{O}_Y$. For the fibrations over $\mathbb{C}$, the following problem is of great importance in birational geometry. \begin{Conjecture}[Iitaka conjecture] Let $f:X\rightarrow Y$ be a fibration between two smooth projective varieties over $\mathbb{C}$, with $\dim X = n$ and $\dim Y = m$. Then $$C_{n,m}: \kappa(X)\geq \kappa(Y) + \kappa(X_{\bar{\eta}}).$$ \end{Conjecture} This conjecture has been studied by Kawamata (\cite{Ka0}, \cite{Ka1}, \cite{Ka2}), Koll\'ar (\cite{Ko}), Viehweg (\cite{Vie1}, \cite{Vie2}, \cite{Vie2}), Birkar (\cite{Bir09}), Chen and Hacon (\cite{CH}), Cao and P\v{a}un (\cite{CP0}) etc.. In positive characteristics, analogously it is conjectured that \begin{Conjecture}[Weak Subadditivity]\label{wc} Let $f:X\rightarrow Y$ be a fibration between smooth projective varieties over an algebraically closed field $k$ of positive characteristic, with $\dim X = n$ and $\dim Y = m$. Assume that the geometric generic fibre $X_{\bar{\eta}}$ is integral and has a smooth projective birational model $\tilde{X}_{\bar{\eta}}$. Then $$WC_{n,m}: \kappa(X)\geq \kappa(Y) + \kappa(\tilde{X}_{\bar{\eta}}).$$ \end{Conjecture} \begin{Remark} The condition that $X_{\bar{\eta}}$ is integral is equivalent to that $X_{\bar{\eta}}$ is reduced, and also is equivalent to that $f$ is separable by \cite[Sec. 3.2.2]{Liu}. If $\dim Y = 1$ then the fibration $f$ is separable by \cite[Lemma 7.2]{ba01}, thus $X_{\bar{\eta}}$ is integral. The reason why we assume the existence of smooth birational models is to guarantee that $WC_{n,m}$ makes sense, because the geometric generic fibre $X_{\bar{\eta}}$ is not necessarily smooth (which is true over $\mathbb{C}$). In positive characteristics, smooth resolution of singularities has been proved only in dimension $\leq 3$ (\cite{CP1} and \cite{CP2}). Here we mention that Luo proposed a new definition \cite[Def. 5.1]{Luo87} of the Kodaira dimension of a variety $X$ via its function field $K(X)$, without involving smooth resolutions. This definition coincides with the traditional one when $X$ is a smooth projective variety. For more discussions please refer to \cite[Appendix B]{Pa2}. \end{Remark} Notice that if both $X$ and $Y$ are smooth, then the dualizing sheaf of $X_{\bar{\eta}}$ is invertible, thus the canonical divisor $K_{X_{\bar{\eta}}}$ is Cartier. It is reasonable to ask whether the following is true. \begin{Conjecture}\label{sc} Let $f:X\rightarrow Y$ be a fibration between smooth projective varieties over an algebraically closed field $k$ of positive characteristic, with $\dim X = n$ and $\dim Y = m$. Then $$C_{n,m}: \kappa(X)\geq \kappa(Y) + \kappa(X_{\bar{\eta}}, K_{X_{\bar{\eta}}}).$$ \end{Conjecture} It is known that $C_{n,m}$ implies $WC_{n,m}$ by \cite[Corollary 2.5]{CZ}, and we call the inequality $WC_{n,m}$ weak subadditivity. Up to some Frobenius base changes and a smooth resolution, to prove $WC_{n,m}$ is equivalent to prove $C_{n,m}$ for another fibration with smooth geometric generic fiber (\cite[proof of Corollary 1.3]{BCZ}). It is easier to treat a fibration with smooth geometric generic fiber, because then one can take advantage of moduli theory and positivity results proved recently by Patakfalvi \cite{Pa} and Ejiri \cite{Ej}. Using these technical results, the following have been proved: \begin{itemize} \item[(i)]{$WC_{n, n-1}$ by Chen and Zhang (\cite{CZ});} \item[(ii)]{$WC_{3,1}$ by Birkar, Chen and Zhang over $\bar{\mathbb{F}}_p, p >5$ (\cite{BCZ});} \item[(iii)]{$WC_{3,1}$ under the situation that $\tilde{X}_{\bar{\eta}}$ is of general type and $\mathrm{char}~k >5$ by Ejiri (\cite{Ej}).} \end{itemize} When the geometric generic fiber is singular, the only known result is $C_{2,1}$, which follows from Bombieri-Mumford's classification of surfaces (cf. \cite{CZ}). And recently Patakfalvi proves $C_{n, m}$ under the situation that $f$ is separable, $\dim_{k(\bar{\eta})} S^0(X_{\bar{\eta}}, K_{X_{\bar{\eta}}})>0$ and $K_Y$ is big (\cite{Pa2}). This paper aims to treat the fibrations with possibly singular geometric generic fibers. Our main result is the following theorem. \begin{Theorem}\label{mthk} Let $f:X\rightarrow Y$ be a separable fibration between two normal projective varieties over an algebraically closed field $k$ with $\mathrm{char}~k = p>0$. Assume either that $Y$ is smooth or that $f$ is flat. Let $D$ be a Cartier divisor on $X$. If there exist an effective $\mathbb{Q}$-Weil divisor $\Delta$ on $X$ and a big $\mathbb{Q}$-Cartier divisor $A$ on $Y$ such that (1) \small{$K_X+ \Delta$} is $\mathbb{Q}$-Cartier and the Cartier index \small{$\mathrm{ind}((K_X+ \Delta)_{\eta})$} is indivisible by $p$; (2) $D - K_{X/Y} - \Delta - f^*A$ is nef and $f$-semi-ample; (3) $\dim_{k(\bar{\eta})} S^0_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}}) > 0$, then $$\kappa(X, D) \geq \dim Y + \kappa(X_{\bar{\eta}}, D_{\bar{\eta}}).$$ In particular, if $D$ is nef and $f$-big, and conditions (1) and (2') $D - K_{X/Y} - \Delta - f^*A$ is nef\\ hold, then $D$ is big. \end{Theorem} \begin{Remark} If setting $\Delta = 0, D = K_X$ and $A = K_Y$, by Theorem \ref{mthk} we get the main result of \cite{Pa2} mentioned before. The condition (3) above holds if $D_{\bar{\eta}}$ is sufficiently big (Proposition \ref{F-non-vanishing}). In the application to the study of Kodaira dimension, if $K_X$ is not relatively big, a strategy is to consider the relative Iitaka fibration, but then some kind of canonical bundle formula is needed (Section \ref{can-bdl-formula}). \end{Remark} \begin{Remark} For a separable fibration $f:X\rightarrow Y$, there always exists a projective birational morphism $Y'\rightarrow Y$ such that the main component $X'$ of $X\times_Y Y'$ is flat over $Y'$ $($\cite[Lemma 3.4]{AO}$)$. So it is convenient to pass to a flat fibration. The advantage of flat fibrations lies in that the relative canonical sheaves are compatible with base changes $($cf. Proposition \ref{compds}$)$. \end{Remark} As an easy consequence we get \begin{Corollary}\label{app-to-3dim} Let $f:X\rightarrow Y$ be a separable fibration from a normal projective 3-fold to a normal surface or a curve over an algebraically closed field $k$ with $\mathrm{char}~k = p>0$. Let $\Delta$ be an effective divisor on $X$ such that $K_X + \Delta$ is $\mathbb{Q}$-Cartier, nef and $f$-big. Let $\mu: Z \to Y$ be a smooth resolution. If $K_Z$ is big, then $K_X + \Delta$ is big. \end{Corollary} Combining the recent results of minimal model theory in dimension 3 (cf. \cite{HX}, \cite{Bir13}), we can prove \begin{Corollary}\label{app-to-3dim-special} Let $(X, \Delta)$ be a projective klt pair of dimension 3, and let $f: X \rightarrow Y$ be a separable fibration to a smooth projective curve or a surface, over an algebraically closed field $k$ with $\mathrm{char}~k = p >5$. Assume that $K_Y$ is big and $Y$ is non-uniruled. Then $$\kappa(X, K_X + \Delta) \geq \kappa(Y) + \kappa(X_{\bar{\eta}}, K_{X_{\bar{\eta}}} + \Delta_{\bar{\eta}})$$ if one of the following holds (1) $K_{X/Y} + \Delta$ is $f$-big; (2) $\Delta = 0$ and the geometric generic fiber $X_{\bar{\eta}}$ is smooth. \end{Corollary} \begin{Remark} (1) In Corollary \ref{app-to-3dim}, as $K_X +\Delta$ is assumed to be nef, the pair $(X,\Delta)$ is not necessarily assumed to be klt. This result holds in arbitrary dimensions if granted smooth resolution of singularities. (2) When $Y$ is of general type and non-uniruled, Corollary \ref{app-to-3dim-special} implies $WC_{3,n}$ by \cite[Corollary 2.5]{CZ}, and $C_{3,n}$ if in addition $K_{X/Y}$ is $f$-big. Shortly after this paper was written, Ejiri and the author prove $WC_{3,n}$ completely in \cite{EZ16}, they treat the case $g(Y) =1$ by a very clever use of trace maps and a deep result of vector bundles on curves. (3) Varieties of maximal Albanese dimension are non-uniruled. The results in this paper can be applied to study abundance for 3-folds with $\dim \mathrm{Pic}^0(X) >0$, which is finally proved in a later paper \cite{Zh17}. \end{Remark} \textbf{Strategy of the proof:} Let's explain our idea to study subadditivity of Kodaira dimensions. Recall that by the standard approach proposed by Viehweg in \cite{Vie}, granted the bigness of $K_Y$, we only need to prove the weak positivity of $f_*\omega_{X/Y}^l$. Unfortunately, in positive characteristics, if fibers have bad singularities, then the sheaf $f_*\omega_{X/Y}^l$ is not necessarily weakly positive (see Raynaud's example \ref{ray-example} below). To overcome this difficulty, stimulated by \cite{PSZ} and \cite{Pa2}, we prove a positivity result (Theorem \ref{mthp} below) without singularity conditions, but at the cost of assuming other conditions like nefness and relative semi-ampleness. These conditions are closely related to minimal model theory. For a fibration of a 3-fold, by passing to a minimal model, we can prove that the sheaf $F_Y^{g*}f_*(\omega_{X/Y}^l \otimes f^*\omega_Y^{l-1})$ contains a non-zero weakly positive subsheaf under certain situations (say, when $\omega_{X/Y}$ is $f$-big), which plays a similar role as the sheaf $f_*\omega_{X/Y}^l$. The positivity result mentioned above is stated as follows. \begin{Theorem}\label{mthp} Let $f:X\rightarrow Y$ be a separable surjective projective morphism between two normal projective varieties over an algebraically closed field $k$ with $\mathrm{char}~k = p > 0$. Assume that $Y$ is Gorenstein. Let $\Delta$ be an effective $\mathbb{Q}$-Weil divisor on $X$ such that $K_{X/Y}+ \Delta$ is $\mathbb{Q}$-Cartier and $p \nmid \mathrm{ind}((K_{X/Y} + \Delta)_\eta)$. If $D$ is a Cartier divisor on $X$ such that $D - K_{X/Y} - \Delta$ is nef and $f$-semi-ample, then for sufficiently divisible $g$, the sheaf $F_Y^{g*}f_*\mathcal{O}_X(D)$ contains a weakly positive subsheaf $S_{\Delta}^{g}f_*\mathcal{O}_X(D)$ of rank $\dim_{k(\bar{\eta})} S_{\Delta_{\bar{\eta}}}^0(X_{\bar{\eta}}, D_{\bar{\eta}})$. Moreover if $Y$ is smooth, then $t(Y, S_{\Delta}^{g}f_*\mathcal{O}_X(D), H) \geq 0$ for an ample divisor $H$ on $Y$. \end{Theorem} \begin{Remark}\label{rmk-of-positivity} (1) Please refer to Sec. \ref{Ftrm} and \ref{wp} for the definitions of $S_{\Delta}^{g}f_*\mathcal{O}_X(D)$, $S_{\Delta_{\bar{\eta}}}^0(X_{\bar{\eta}}, D_{\bar{\eta}})$ and $t(Y, S_{\Delta}^{g}f_*\mathcal{O}_X(D), H)$. The invariant $t(Y, \mathcal{F}, H)$ for a coherent sheaf $\mathcal{F}$ was introduced by Ejiri in \cite{Ej} to measure the positivity of $\mathcal{F}$. For example, the condition $t(Y, \mathcal{F}, H) \geq 0$ implies the weak positivity of $\mathcal{F}$, and they are equivalent when $Y$ is a curve. In positive characteristic, to construct global sections, we will use the condition $t(Y, \mathcal{F}, H) \geq 0$ instead of weak positivity (Theorem \ref{F-p-subadd-of-kod-dim}). (2) In \cite[Theorem D and Theorem E]{PSZ}, the authors got similar results under stronger conditions that $f$ is flat, relatively $G_1$ and $S_2$, $p \nmid \mathrm{ind}(K_{X/Y}+ \Delta)$ and $D - K_{X/Y} - \Delta$ is nef and $f$-ample. And in \cite[Sec. 6]{Pa2}, Patakfalvi proved the weak positivity of $S^{g}f_*\omega_{X/Y}$ under some mild assumptions. The idea of the proof is to consider the trace maps of relative Frobenius iterations, similarly as in \cite{PSZ} and \cite{Ej}. \end{Remark} Applying the theorem above to log minimal models, immediately we get \begin{Corollary} Let $f:X\rightarrow Y$ be a separable surjective projective morphism between two normal projective varieties over an algebraically closed field $k$ with $\mathrm{char}~k =p > 0$. Let $\Delta$ be an effective $\mathbb{Q}$-Weil divisor on $X$ such that $K_X + \Delta$ is $\mathbb{Q}$-Cartier and $p \nmid \mathrm{ind} (K_X + \Delta)_\eta$. Assume that $K_X + \Delta$ is nef and $f$-semi-ample and $Y$ is Gorenstein. Then for a positive integer $l$ such that $l(K_{X} + \Delta)$ is Cartier and sufficiently divisible $g$, the sheaf $F_Y^{g*}(\mathcal{O}_X(l(K_{X/Y} + \Delta)) \otimes f^*\omega_Y^{l-1})$ contains a weakly positive subsheaf of rank $\dim_{k(\bar{\eta})} S_{\Delta_{\bar{\eta}}}^0(X_{\bar{\eta}}, l(K_{X/Y} + \Delta)_{\bar{\eta}})$. \end{Corollary} Let's recall Raynaud's example, which gives a minimal surface $S$ of general type over a curve $C$ such that for $l \geq 2$, $f_*\omega_{S/C}^l$ is not nef while $(f_*\omega_{S/C}^l) \otimes \omega_C^{l-1}$ is nef. \begin{Example}[{\cite{Ra}, \cite[Theorem 3.6]{Xie}}]\label{ray-example} Let $k$ be an algebraically closed field with $\mathrm{char}~k = p\geq 3$. We can find a \emph{Tango curve} $C$ of \emph{integral type} over $k$ of genus $g\geq 2$ (cf. \cite[Ex. 2.4, Def. 2.6]{Xie}), namely, $C$ has a divisor $L=[\frac{df}{p}]$ for some $f \in K(C)$ such that $\deg L >0$ and $L':=\frac{L}{2}$ is integral. By abusing notation, we also use $L,L'$ to denote the corresponding line bundles. Notice that $L$ is a sub-line bundle of $\mathcal{B}^1$ \emph{($=d \mathcal{O}_C \subset \Omega_C^1$)}, which implies $0< h^0(\mathcal{B}^1(-L)) \leq h^0((F_{C*}\Omega_C^1)\otimes L^{-1})) = h^0(\Omega_C^1\otimes L^{-p}))$, hence $\deg(K_C-pL) \geq 0$. We have a non-trivial extension $$0 \rightarrow \mathcal{O}_C \rightarrow \mathcal{E} \rightarrow L \rightarrow 0$$ such that $\mathrm{Sym}^p\mathcal{E} \otimes L^{-p}$ has a non-zero section. Let $X = \mathbb{P}_C(\mathcal{E}^*) = \mathrm{Proj}_{\mathcal{O}_C} \oplus_l \mathrm{Sym}^l\mathcal{E}$, $g: X \rightarrow C$ the natural projection, $E$ the natural section such that $E \sim \mathcal{O}_X(1)$ and $C'$ a smooth curve on $X$ such that $C' \sim pE - pf^*L$. Then $E$ and $C'$ are disjoint to each other. Let $$M \sim \frac{p+1}{2}E - p f^*L'.$$ Denote by $\pi: S \rightarrow X$ the smooth double cover induced by the relation $2M \sim E + C'$, and by $f: S \rightarrow C$ the natural fibration. Then we have that $$\pi_*\omega_{S/C}^l \cong \mathcal{O}_X(l(K_{X/C} + M)) \oplus \mathcal{O}_X(lK_{X/C} + (l-1)M),$$ thus by $K_{X/C} \sim -2E + g^*\det \mathcal{E} \sim -2E + g^*L$, \begin{align*} f_*\omega_{S/C}^l \cong &g_*\mathcal{O}_X(l(K_{X/C} + M)) \oplus \mathcal{O}_X(lK_{X/C} + (l-1)M)) \\ \cong &g_*(\mathcal{O}_X(\frac{l(p-3)}{2}E + g^*(2-p)lL')) \\ &\oplus g_*(\mathcal{O}_X(\frac{lp - p - 3l - 1}{2}E + g^*((2-p)l + p)L')) \\ \cong &(\mathrm{Sym}^{\frac{l(p-3)}{2}}\mathcal{E}\otimes L'^{(2-p)l}) \oplus (\mathrm{Sym}^{\frac{lp - p - 3l - 1}{2}}\mathcal{E}\otimes L'^{(2-p)l + p}). \end{align*} By easy calculations, one can verify that for every positive integer $l$, the sheaf $f_*\omega_{S/C}^l$ is not nef, instead its dual $(f_*\omega_{S/C}^l)^*$ is nef, and the sheaf $(f_*\omega_{S/C}^l) \otimes \omega_C^{l-1}$ is nef for $l \geq 2$ since $\deg \omega_C \geq 2p\deg L'$. \end{Example} \begin{Acknowledgments} The author expresses his gratitude to Dr. Sho Ejiri for many useful discussions, and to Prof. Zsolt Patakfalvi and Chenyang Xu for some useful communications. He is very grateful to the anonymous referees for pointing out many inaccuracies and giving valuable suggestions to improve this paper. The author is supported by grant NSFC (No. 11771260 and No. 11531009). \end{Acknowledgments} \section{Preliminaries}\label{tools} \subsection{Almost Cartier divisors} Because the fibrations we treat may have non-normal geometric generic fibers, their Frobenius base changes are not necessarily normal. In the following we will often work on non-normal varieties satisfying Serre condition $S_2$. On $S_2$ varieties, we will work with almost Cartier divisors. Let's recall the definition and some basic results of almost Cartier divisors. Please refer to \cite[Sec. 2.1]{MS} and \cite[p.171-172]{Ko92} for more details. \begin{Definition} Let $X$ be a reduced Noetherian $S_2$ scheme over a field $k$ of finite type and of pure dimension. An \emph{almost Cartier divisor} $($\emph{AC divisor} for short$)$ on $X$ is a reflexive coherent $\mathcal{O}_X$-submodule of the sheaf of total quotient ring $K(X)$ such that invertible in codimension one. \end{Definition} We remark the following results. (1) Recall that if $X$ is normal and $D$ is a Weil divisor on $X$, the sheaf $\mathcal{O}_X(D)$ is defined via $$\mathcal{O}_X(D)_x:=\{f \in K(X)| ((f) + D)|_U \geq 0 ~\mathrm{for~some~open~set}~U~\mathrm{containing}~x\},$$ hence is an AC divisor. In the normal setting, we have a natural one-to-one correspondence between the set of Weil divisors and the set of AC divisors. (2) To give an AC divisor, it is equivalent to give a Cartier divisor $D_0$ on an open subset $X_0$ which is the complement of a closed subscheme $S$ with $\mathrm{codim}_X S \geq 2$. Indeed, denoting by $i: X_0 \hookrightarrow X$ the natural inclusion, the sheaf $i_*\mathcal{O}_{X_0}(D_0)$ is an AC divisor (\cite[p.172]{Ko92}). (3) Denote by $\mathrm{WSh}(X)$ the set of AC divisors, which is an additive group. For $D \in \mathrm{WSh}(X)$, in the following to unify the notation, we use the notation $\mathcal{O}_X(D)$ for the coherent sheaf defining $D$; and we say $D$ is effective, if $\mathcal{O}_X \subseteq \mathcal{O}_X(D)$. For two AC divisors $D_1$ and $D_2$ on $X$, we denote $D_1 \geq D_2$ if $E= D_2 - D_1$ is effective. An element of $\mathrm{WSh}(X) \otimes \mathbb{Q}$ is called a $\mathbb{Q}$-AC divisor. (4) We can define the linear and $\mathbb{Q}$-linear equivalences in $\mathrm{WSh}(X)$ and $\mathrm{WSh}(X) \otimes \mathbb{Q}$, which are denoted by $\sim$ and $\sim_{\mathbb{Q}}$ respectively. And for a morphism $g: Z \to X$ of reduced Noetherian $S_2$ schemes, we can always define the pullback of $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisors, and if $g$ is equi-dimensional we can define the pullback of $\mathbb{Q}$-AC divisors. \subsection{Relative canonical sheaves} Let $X$ be a reduced, $G_1$ and $S_2$ projective scheme of pure dimension and of finite type over a field $k$. The \emph{canonical sheaf} $\omega_X$ of $X$ is defined as its dualizing sheaf (\cite[V.8-10, VI.2]{Ha}, \cite[Sec. 5.5]{KM98}). Then over the Gorenstein open set $i: X_0 \hookrightarrow X$ such that $\mathrm{codim}_X (X \setminus X_0) \geq 2$, the restriction $\omega_{X}|_{X_0}$ is a line bundle, the \emph{canonical divisor} (class) $K_X$ is the equivalent class of AC divisors satisfying that $\mathcal{O}_X(K_{X})$ is isomorphic to $\omega_{X}$. Let $f: X \to Y$ be a projective morphism between reduced, $G_1$ and $S_2$ Noetherian schemes of pure dimension and of finite type over a field $k$. If either $Y$ is Gorenstein, or $f$ is flat then similarly as above we can define the \emph{relative canonical sheaf} $\omega_{X/Y}$ and the \emph{relative canonical divisor} $K_{X/Y}$. It is known that relative canonical sheaves are compatible with flat base changes (cf. \cite[Chap. III Sec. 8]{Ha}). To treat non-flat base changes, we have the following result which is similar to \cite[Theorem 2.4]{CZ}. \begin{Proposition}\label{compds} Let $f: X \rightarrow Y$ be a separable flat projective morphism between two normal varieties. Let $\Delta$ be an effective $\mathbb{Q}$-Weil divisor on $X$ such that $K_{X/Y} + \Delta$ is $\mathbb{Q}$-Cartier. Let $\pi: Y' \rightarrow Y$ be a generically finite surjective morphism from a smooth variety, $\bar{X}'= X\times_Y Y'$ and $\sigma: X' \rightarrow \bar{X}$ the normalization morphism, which fit into the following commutative diagram \begin{center} \xymatrix@C=2cm{ &X' \ar@/^2pc/[rr]|{\sigma'}\ar[r]^>>>>>>>>>{\sigma} \ar[rd]^{f'} &\bar{X}' = X \times_Y Y' \ar[r]^<<<<<<<<<{\pi'}\ar[d]^{\bar{f}'} &X\ar[d]^f \\ & &Y'\ar[r]^{\pi} &Y } \end{center} where $\pi'$ and $\bar{f}'$ denote the natural projections, and $f'=\bar{f}'\circ \sigma$. Then there exist an effective $\sigma$-exceptional Cartier divisor $E'$ coming from the pullback of a divisor on $Y'$, and an effective divisor $\Delta'$ on $X'$ such that $$K_{X'/Y'} + \Delta' = \sigma'^*(K_{X/Y} + \Delta) + E'.$$ \end{Proposition} \begin{proof} Denote by $Y_0$ the smooth locus of $Y$, and let $Y'_0 = \pi^{-1}Y_0$, $X_0 = X\times_Y Y_0$, $\bar{X}'_0 = \bar{X}'\times_{Y'}Y'_0$ and $X'_0 = X'\times_{Y'}Y'_0$. By arguing in codimension one, we assume $X_0$ is Gorenstein, hence $f|_{X_0}$ is a flat Gorenstein morphism by \cite[p.298 (Ex. 9.7)]{Ha}. Then by remarks of \cite[p.388]{Ha}, we have $$K_{\bar{X}'_0/Y'_0} \sim_{\mathbb{Q}} \pi'^*K_{X/Y}|_{\bar{X}'_0}.$$ Since $X'_0 \rightarrow \bar{X}'_0$ is the normalization, by results of \cite[Sec. 2]{Re}, there exists an effective divisor $C'$ such that $$\sigma'^*K_{X/Y}|_{\bar{X}'_0} \sim_{\mathbb{Q}} \sigma^*K_{\bar{X}'_0/Y'_0} = K_{X'_0/Y'_0} + C'.$$ Since $K_{X/Y} + \Delta$ is assumed to be $\mathbb{Q}$-Cartier, its pull-back makes sense. In turn we can get an effective divisor $\Delta'_0$ on $X'_0$ such that $$K_{X'_0/Y'_0} + \Delta'_0 \sim_{\mathbb{Q}} \sigma'^*(K_{X/Y} + \Delta)|_{\bar{X}'_0}.$$ Let $D'$ be the closure of $\Delta'_0$ in $X'$, which is a $\mathbb{Q}$-Weil divisor. Let $B'= \sigma'^*(K_{X/Y} + \Delta) - (K_{X'/Y'} + D')$. If $B'= 0$, then we are done. Otherwise, since $f'$ is equi-dimensional, the support of $B'$ is mapped via $f'$ to a codimension one cycle contained in $Y' \setminus Y'_0$. Since $Y'$ is smooth, we can find an effective $\pi$-exceptional Cartier divisor $E$ on $Y'$ such that $D''=f'^*E + B' \geq 0$. Then we are done by setting $E'= f'^*E$ which is $\sigma'$-exceptional, and $\Delta'= D' + D''$. \end{proof} \subsection{Trace maps of Frobenius iterations} \label{Ftrm} Throughout this subsection, let $k$ be an algebraically closed field of characteristic $p > 0$. Let $f: X \rightarrow Y$ be a morphism of schemes over $k$. We will use the following notation: (1) $F_X^e: X \rightarrow X$ for the $e^{\mathrm{th}}$ absolute Frobenius iteration, and sometimes, to avoid confusions, we use $X^e$ for the source scheme in the morphism $F_X^e: X \rightarrow X$; (2) $X_{Y^e}$ for the fiber product $X\times_Y Y^e$ of morphisms $f: X \rightarrow Y$ and $F_Y^e: Y^e \rightarrow Y$, $f_e: X_{Y^e} \rightarrow Y$ and $\pi_Y^e: X_{Y^e} \rightarrow X$ for the natural projections; (3) $F_{X/Y}^e: X \rightarrow X_{Y^e}$ for the $e^{\mathrm{th}}$ relative Frobenius iteration over $Y$. We will discuss the trace maps of (relative) Frobenius iterations in different settings. Please refer to \cite{Pa}, \cite{Pa2}, \cite{PSZ} and \cite{Ej} for more details and related results. \subsubsection{Trace maps of absolute Frobenius iterations}\label{tmaF} \begin{Notation}\label{1} Let $X$ be a reduced, $G_1$ and $S_2$ projective scheme over $k$ of finite type and of pure dimension. Denote by $X_0$ a Gorenstein open subset of $X$ such that $\mathrm{codim}_X (X\backslash X_0) >1$. Let $\Delta$ be an effective $\mathbb{Q}$-AC divisor such that $K_X + \Delta$ is $\mathbb{Q}$-Cartier. Assume the Cartier index $\mathrm{ind} (K_X + \Delta)$ is indivisible by $p$. Then there exists a positive integer $g$ such that $(1-p^{eg})(K_X + \Delta)$ is Cartier for every positive integer $e$, in particular $(p^{eg}-1)\Delta|_{X_0}$ is an effective Cartier divisor. Let $D$ be a Cartier divisor on $X$. \end{Notation} \begin{Remark} To give a divisor $\Delta$ as above is equivalent to give a sub-line bundle $\mathcal{L}_g$ of $\mathcal{O}_X ((1-p^{g})K_X)$ for some $g$. Indeed, from $\Delta$ we can immediately get the sub-line bundle $\mathcal{O}_X ((1-p^{g})(K_X + \Delta))$ of $\mathcal{O}_X ((1-p^{g})K_X)$; conversely, given a sub-line bundle $\mathcal{L}_g \subseteq \mathcal{O}_X ((1-p^{g})K_X)$, assuming this inclusion is induced by an effective AC divisor $B$, we get the $\mathbb{Q}$-AC divisor $\Delta = \frac{B}{p^{g} - 1}$ which satisfies the assumptions in Notation \ref{1}. \end{Remark} Since $X$ is $G_1$ and $S_2$, the composite map of the natural inclusion $$F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta))|_{X_0} \hookrightarrow F^{eg}_{X_0*} \mathcal{O}_{X_0} ((1-p^{eg})K_{X_0})$$ and the trace map $Tr_{X_0}^{eg}: F^{eg}_{X_0*} \mathcal{O}_{X_0} ((1-p^{eg})K_{X_0}) \cong F^{eg}_{X_0*} \omega_{X_0}^{1-p^{eg}}\rightarrow \mathcal{O}_{X_0}$ extends to a map on $X$: $$Tr_{X,\Delta}^{eg}: F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)) \rightarrow \mathcal{O}_X.$$ Twisting the trace map $Tr_{X,\Delta}^{eg}$ above by $\mathcal{O}_X(D)$ induces a map \begin{align*} Tr_{X,\Delta}^{eg}(D): &F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)) \otimes \mathcal{O}_X(D) \\ &\cong F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)+ p^{eg}D) \rightarrow \mathcal{O}_X(D), \end{align*} then taking global sections gives $$H^0(Tr_{X,\Delta}^{eg}(D)): H^0(X, F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)+ p^{eg}D)) \rightarrow H^0(X, D).$$ Let $$S_{\Delta}^{eg}(X, D) = \mathrm{Im} H^0(Tr_{X,\Delta}^{eg}(D)) ~ \mathrm{and} ~S_{\Delta}^{0}(X, D) = \cap_{e\geq 0} S_{\Delta}^{eg}(X, D).$$ If $\Delta = 0$, we usually use the notation $S^{0}(X, D)$ instead of $S_{0}^{0}(X, D)$. For $e' > e$, the map $Tr_{X,\Delta}^{e'g}(D)$ factors as \begin{align*} Tr_{X,\Delta}^{e'g}(D):~ &F^{eg}_{X*} F^{(e'-e)g}_{X*}\mathcal{O}_X ((1-p^{e'g})(K_X + \Delta)+ p^{e'g}D) \\ &\xrightarrow{F^{eg}_{X*}Tr_{X,\Delta}^{(e'-e)g}((1-p^{eg})(K_X + \Delta)+ p^{eg}D)} F^{eg}_{X*} \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)+ p^{eg}D) \\ &\xrightarrow{Tr_{X,\Delta}^{eg}(D)} \mathcal{O}_X(D). \end{align*} So there is a natural inclusion $S_{\Delta}^{e'g}(X, D) \subseteq S_{\Delta}^{eg}(X, D)$, thus for sufficiently large $e$, $S_{\Delta}^{eg}(X, D)= S_{\Delta}^{0}(X, D)$. \begin{Proposition}\label{F-non-vanishing} Let the notation be as in Notation \ref{1}. Then (1) There exists an ideal $\sigma(X, \Delta)$, namely, the non-$F$-pure ideal of $(X, \Delta)$, such that for sufficiently divisible $e$, $$\mathrm{Im} Tr_{X,\Delta}^{eg} = \sigma(X, \Delta) = Tr_{X,\Delta}^{eg} (F^{eg}_{X*} (\sigma(X, \Delta)\cdot \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)))).$$ (2) If $D$ is ample, then for sufficiently large $l$ $$S_{\Delta}^{0}(X, lD) = H^0(X, \sigma(X, \Delta) \cdot \mathcal{O}_X(lD)).$$ (3) Assume $X$ is integral and $D$ is big, and let $E$ be another Cartier divisor on $X$. Then for sufficiently large $l$, $S_{\Delta}^{0}(X, lD + E) \neq 0$. (4) Assume that $X$ is integral. Let $\sigma: X' \rightarrow X$ be the normalization morphism. Let $K_{X}$ be a fixed canonical divisor of $X$, and let $C$ denote the divisor arising from the conductor of the normalization, which gives the canonical divisor $K_{X'} = \sigma^*K_X -B$ $($\cite[2.6]{Re}$)$, and let $\Delta' = \sigma^* \Delta + B$ $($namely, $K_{X'} + \Delta'= \sigma^*(K_X + \Delta)$$)$. Then $$\dim S_{\Delta}^{0}(X, D) \leq \dim S_{\Delta'}^{0}(X', \sigma^*D).$$ \end{Proposition} \begin{proof} For (1), please refer to \cite[Lemma 13.1]{Ga}. (2) Fix a sufficiently divisible $g$ such that for every integer $e >0$ the trace map below is surjective $$Tr_{X,\Delta}^{eg}:~F^{eg}_{X*} (\sigma(X, \Delta)\cdot \mathcal{O}_X ((1-p^{eg})(K_X + \Delta))) \rightarrow \sigma(X, \Delta),$$ and denote its kernel by $\mathcal{B}^{eg}$. Then we have the following exact sequence {\small \begin{align*} 0 \to F^{(e-1)g}_{X*} (\mathcal{B}^{g} \otimes &\mathcal{O}_X ((1-p^{(e-1)g})(K_X + \Delta))) \to F^{eg}_{X*} (\sigma(X, \Delta)\cdot \mathcal{O}_X ((1-p^{eg})(K_X + \Delta))) \\ &\to F^{(e-1)g}_{X*} (\sigma(X, \Delta)\cdot \mathcal{O}_X ((1-p^{(e-1)g})(K_X + \Delta))) \to 0. \end{align*}} Mimicking the proof of \cite[Lemma 2.20]{Pa}), we can obtain another exact sequence \begin{align*} 0\to F^{(e - 1)g}_{X*}(\mathcal{B}^{g} \otimes \mathcal{O}_X ((1-p^{(e - 1)g})(K_X + \Delta))) \to \mathcal{B}^{eg} \to \mathcal{B}^{(e - 1)g} \to 0. \end{align*} We can find an integer $l_0$ such that $l_0D - (K_X + \Delta)$ is ample, and applying Fujita vanishing (\cite{Keeler}), in turn we can find $l_1>l_0$ such that for every integer $l > l_1$ and $e \geq 1$ \begin{align*} &H^1(X, F^{(e - 1)g}_{X*}(\mathcal{B}^{g}\otimes \mathcal{O}_X ((1-p^{(e - 1)g})(K_X + \Delta))) \otimes \mathcal{O}_X(lD)) \\ &\cong H^1(X, \mathcal{B}^{g}\otimes \mathcal{O}_X (lD + (p^{(e - 1)g} -1)(lD - (K_X + \Delta)))) = 0. \end{align*} By induction on $e$ we can show $H^1(X, \mathcal{B}^{eg} \otimes \mathcal{O}_X(lD)) = 0$ for every $e>0$, which implies \begin{align*} H^0(Tr_{X,\Delta}^{eg}(lD)):~&H^0(X, F^{eg}_{X*} (\sigma(X, \Delta)\cdot \mathcal{O}_X ((1-p^{eg})(K_X + \Delta)))\otimes \mathcal{O}_X(lD))\\ &\rightarrow H^0(X, \sigma(X, \Delta)\cdot \mathcal{O}_X(lD)) \end{align*} is surjective. (3) By the result (2) we can take a sufficiently ample divisor $H$ such that $S_{\Delta}^{0}(X, H) \neq 0$. Since $D$ is big, for sufficiently large integer $l$ $$H^0(X, \mathcal{O}_X(lD + E - H)) \neq 0.$$ Take a nonzero section $s_F \in H^0(X, \mathcal{O}_X(lD + E - H))$ which defines a divisor $F \in |lD + E - H|$. We have the natural maps \begin{align*} &H^0(X, F^{g}_{X*} \mathcal{O}_X ((1-p^{g})(K_X + \Delta))\otimes \mathcal{O}_X(H))\\ &\xrightarrow{\otimes s_F} H^0(X, F^{g}_{X*} \mathcal{O}_X ((1-p^{g})(K_X + \Delta))\otimes \mathcal{O}_X(H + F))\\ &\xrightarrow{H^0(Tr_{X,\Delta}^{g}(H + F))} H^0(X, \mathcal{O}_X(H + F)) = H^0(X, \mathcal{O}_X(lD + E)). \end{align*} Then we get an injection $S_{\Delta}^{0}(X, H)\xrightarrow{\otimes s_F} S_{\Delta}^{0}(X, lD + E)$, which finishes the proof of (3). We are left to prove (4). To ease the notation, we will use $\mathcal{O}_X^{\frac{1}{p^g}}$ to denote the structure sheaf of $X^g$; and for an AC divisor $D$ on $X^g$, the pushforward $F_{X*}^{g}\mathcal{O}_{X^g}(D)$ is granted an $\mathcal{O}_X^{\frac{1}{p^g}}$-module structure and will be denoted by $\mathcal{O}_{X}(D)^{\frac{1}{p^g}}$. By duality theory (\cite[Sec. III.6]{Ha}), we have an $\mathcal{O}_{X} ^{\frac{1}{p^g}}$-linear isomorphism $$\mathcal{O}_X((1-p^g)K_X)^{\frac{1}{p^g}} ~\cong ~\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}^{\frac{1}{p^g}}, \mathcal{O}_{X}),$$ and the trace map $Tr_{X, \Delta}^g$ is give by the composition \begin{align*} Tr_{X, \Delta}^g: \mathcal{O}_X((1-p^g)(K_X + \Delta))^{\frac{1}{p^g}} ~\cong ~&\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}((p^g -1)\Delta)^{\frac{1}{p^g}}, \mathcal{O}_{X}) \\ & \subseteq ~\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}^{\frac{1}{p^g}}, \mathcal{O}_{X}) \xrightarrow{\mathrm{ev}(1)} \mathcal{O}_{X} \end{align*} where $\mathrm{ev}(1)$ denotes the evaluation map at $1$. Let $K = K(X') = K(X)$. Regard {\small $\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}((p^g -1)\Delta)^{\frac{1}{p^g}}, \mathcal{O}_{X})\otimes_{\mathcal{O}_X^{\frac{1}{p^g}}} \mathcal{O}_{X'}^{\frac{1}{p^g}}$} and $\mathcal{H}om_{\mathcal{O}_{X'}}(\mathcal{O}_{X'}((p^g -1)\Delta')^{\frac{1}{p^g}}, \mathcal{O}_{X'})$ as two sub-sheaves of the constant sheaf $\mathrm{Hom}_K(K^{\frac{1}{p^g}}, K) \cong K^{\frac{1}{p^g}}$ on $X'^g$, then both are line bundles linearly equivalent to the divisor $\sigma^*(1-p^g)(K_{X} + \Delta) = (1-p^g)(K_{X'} + \Delta')$. We claim that {\small $$\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}((p^g -1)\Delta)^{\frac{1}{p^g}}, \mathcal{O}_{X})\otimes_{\mathcal{O}_X^{\frac{1}{p^g}}} \mathcal{O}_{X'}^{\frac{1}{p^g}} = \mathcal{H}om_{\mathcal{O}_{X'}}(\mathcal{O}_{X'}((p^g -1)\Delta')^{\frac{1}{p^g}}, \mathcal{O}_{X'}).$$} It suffices to verify this claim in codimension one. So we may assume both $X$ and $X'$ are Gorenstein. Take an open affine subset $U =\mathrm{Spec} R \subseteq X$. Let $R^N$ denote the normalization of $R$. By abusing notation we still use $\Delta$ to denote the restriction $\Delta|_U$. Since $\mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}((p^g -1)\Delta)^{\frac{1}{p^g}}, \mathcal{O}_{X})(U) = \mathrm{Hom}_{R}(R((p^g-1)\Delta)^{\frac{1}{p^g}}, R)$ is a free $R^{\frac{1}{p^g}}$-module of rank one, we can take a generator $\phi$, which corresponds to the divisor $\Delta_{\phi} = \Delta$ via the correspondence of \cite[Theorem 2.4]{MS}. By \cite[Lemma 3.1]{MS} (or \cite[Prop. 7.10, 7.11]{Sch10}), $\phi$ extends to an element $\sigma^*\phi \in \mathrm{Hom}_{R^N}((R^N)^{\frac{1}{p^g}}, R^N)$, which corresponds to the divisor $\Delta_{\sigma^*\phi} =\sigma^*\Delta_{\phi} + B = \Delta'$ by \cite[Lemma 3.1]{MS}\footnote{Remark that in the statement of \cite[Lemma 3.1]{MS}, $R$ is required to be semi-normal, but this condition is not used in the proof.}. This means $\sigma^*\phi$ is a generator of the $(R^N)^{\frac{1}{p^g}}$-module $\mathrm{Hom}_{R^N}(R^N((p^g-1)\Delta')^{\frac{1}{p^g}}, R^N)$. Then we can conclude the claim. Applying the claim above, we can extend $Tr_{X, \Delta}^g$ to an $\mathcal{O}_{X'}$-linear map {\small \begin{align*} \sigma^*Tr_{X, \Delta}^g: ~&\mathcal{O}_{X'}((1-p^g)(K_{X'} + \Delta'))^{\frac{1}{p^g}} \\ &\cong~\sigma^*\mathcal{O}_X((1-p^g)(K_X + \Delta))^{\frac{1}{p^g}}\\ ~&\cong~ \mathcal{H}om_{\mathcal{O}_{X}}(\mathcal{O}_{X}((p^g -1)\Delta)^{\frac{1}{p^g}}, \mathcal{O}_{X})\otimes_{\mathcal{O}_X^{\frac{1}{p^g}}} \mathcal{O}_{X'}^{\frac{1}{p^g}} \\ &=~ \mathcal{H}om_{\mathcal{O}_{X'}}(\mathcal{O}_{X'}((p^g -1)\Delta')^{\frac{1}{p^g}}, \mathcal{O}_{X'}) \subseteq \mathcal{H}om_{\mathcal{O}_{X'}}(\mathcal{O}_{X'}^{\frac{1}{p^g}}, \mathcal{O}_{X'}) \xrightarrow{\mathrm{ev}(1)} \mathcal{O}_{X'}. \end{align*}} By the above construction, since $X'$ is integral and projective, we see that $\sigma^*Tr_{X, \Delta}^g$ coincides with $Tr_{X', \Delta'}^g$ up to some multiplication by a nonzero number of $k$. From this we can verify the following commutative diagram $$\xymatrix@C=1cm{ &H^0(\mathcal{O}_X((1-p^g)(K_X + \Delta))^{\frac{1}{p^g}}\otimes_{\mathcal{O}_{X}}\mathcal{O}_{X}(D))\ar[r]^<<<<<<<<<{Tr_{X, \Delta}^g}\ar[d]^{\sigma^*} &H^0(\mathcal{O}_{X}(D))\ar[d]^{\sigma^*}\\ &H^0(\mathcal{O}_{X'}((1-p^g)(K_{X'} + \Delta'))^{\frac{1}{p^g}}\otimes_{\mathcal{O}_{X'}}\mathcal{O}_{X'}(\sigma^*D ))\ar[r]^<<<<{Tr_{X', \Delta'}^g} &H^0(\mathcal{O}_{X'}(\sigma^*D)) }$$ In turn we get an injection $\sigma^*: S_{\Delta}^{0}(X, D) \hookrightarrow S_{\Delta'}^{0}(X', \sigma^*D)$, which implies that $\dim S_{\Delta}^{0}(X, D) \leq \dim S_{\Delta'}^{0}(X', \sigma^*D)$. \end{proof} \subsubsection{Trace maps of relative Frobenius iterations I}\label{tmrFI} \begin{Notation}\label{2} Let $f: X \rightarrow Y$ be a separable surjective projective morphism between two schemes over $k$ of finite type and of pure dimension. Assume that $X$ is reduced, $G_1$ and $S_2$ and that $Y$ is integral and regular. Let $\Delta$, $g$ and $D$ be assumed as in Notation \ref{1}. \end{Notation} By assumption $F_Y^{eg}$ is a flat morphism, so $X_{Y^{eg}}$ also satisfies $G_1$ and $S_2$, and $K_{X_{Y^{eg}}/Y^{eg}} = \pi_Y^{eg*}K_{X/Y}$. By easy calculation we have that $$K_{X^{eg}/X_{Y^{eg}}} = (1-p^{eg})K_{X^{eg}/Y^{eg}} ~\mathrm{and}~ F_{X/Y}^{eg*}\pi_Y^{eg*}D = p^{eg}D.$$ Similarly as in \ref{tmaF}, we get the trace map $$ Tr_{X/Y,\Delta}^{eg}(D): F_{X/Y*}^{eg}\mathcal{O}_X ((1-p^{eg})(K_{X/Y} + \Delta)+ p^{eg}D) \rightarrow \mathcal{O}_{X_{Y^{eg}}}(\pi_Y^{eg*}D). $$ Applying $f_{eg*}$ to the above map, we get \begin{align*} f_*Tr_{X/Y,\Delta}^{eg}(D): f_*\mathcal{O}_X &((1-p^{eg})(K_{X/Y} + \Delta)+ p^{eg}D) \\ &\twoheadrightarrow S_{\Delta}^{eg}f_*\mathcal{O}_X(D) \hookrightarrow f_{eg*}\mathcal{O}_{X_{Y^{eg}}}(\pi_Y^{eg*}D) \cong F_Y^{eg*}f_*\mathcal{O}_X(D). \end{align*} where $S_{\Delta}^{eg}f_*\mathcal{O}_X(D)$, introduced in \cite[Def. 6.4]{PSZ} with slightly different notation, denotes the image of $f_*Tr_{X/Y,\Delta}^{eg}(D)$. If $\Delta = 0$, we use the notation $S^{eg}f_*\mathcal{O}_X(D)$ instead of $S_{0}^{eg}f_*\mathcal{O}_X(D)$. For $e'>e$, according to the following commutative diagram $$\xymatrix@=1.5cm{&X\ar[dr]^{F_X^{(e'-e)g}}\ar[d]|{F_{X/Y}^{(e'-e)g}}\ar@/^2.8pc/[rrdd]|{F_X^{e'g}}\ar@/_2.3pc/[ddd]_{f} & &\\ &X_{Y^{(e'-e)g}}\ar[r]^{\pi_Y^{(e'-e)g}}\ar[dd] &X\ar[dr]^{F_X^{eg}}\ar[d]_{F_{X/Y}^{eg}} &\\ & &X_{Y^{eg}}\ar[r]^{\pi_Y^{eg}}\ar[d]^{f_{eg}} &X\ar[d]^f \\ &Y\ar[r]^{F_Y^{(e'-e)g}} &Y\ar[r]^{F_Y^{eg}} &Y }$$ the trace map $f_*Tr_{X/Y,\Delta}^{e'g}(D)$ factors as \begin{align*} f_*\mathcal{O}_X ((1-p^{e'g})(K_{X/Y} + \Delta)+ p^{e'g}&D) \\ \xrightarrow{f_*Tr_{X/Y,\Delta}^{(e'-e)g}((1-p^{eg})(K_{X/Y} + \Delta)+ p^{eg}D)}&F_Y^{(e'-e)g*}f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y} + \Delta)+ p^{eg}D)\\ &\xrightarrow{F_Y^{(e'-e)g*}f_*Tr_{X/Y,\Delta}^{eg}(D)} F_Y^{(e'-e)g*}S_{\Delta}^{eg}f_*\mathcal{O}_X(D). \end{align*} Then we conclude a natural inclusion $$S_{\Delta}^{e'g}f_*\mathcal{O}_X(D) \hookrightarrow F_Y^{(e'-e)g*}S_{\Delta}^{eg}f_*\mathcal{O}_X(D).$$ \begin{Proposition}\label{stable-dim} Let the notation be as in Notation \ref{2}. Then for every positive integer $e$, $$\dim_{k(\bar{\eta})}S^{eg}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}}) = \mathrm{rank} S_{\Delta}^{eg}f_*\mathcal{O}_X(D).$$ As a consequence, for sufficiently large $e$, $\mathrm{rank}S_{\Delta}^{eg}f_*\mathcal{O}_X(D)= \dim_{k(\bar{\eta})} S^{0}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}})$. \end{Proposition} \begin{proof} Consider the following commutative diagram $$\xymatrix@C=2.5cm{&X_{\bar{\eta}}\ar[d]\ar[r]^<<<<<<<<<<<<<<<<{F_{X_{\bar{\eta}}/\bar{\eta}}^{eg}}\ar@/^2pc/[rr]|{F_{X_{\bar{\eta}}}^{eg}} &X_{\bar{\eta}^{eg}} \ar[d] \ar[r]^>>>>>>>>>>>>>>{\pi_{\bar{\eta}}^{eg}} & X_{\bar{\eta}}\ar[d]\\ &\bar{\eta}\ar@{=}[r]&\bar{\eta} \cong \bar{\eta}^{eg}\ar[r]^{F_{\bar{\eta}}^{eg}} &\bar{\eta} }.$$ Let $D_{\bar{\eta}^{eg}} = \pi_{\bar{\eta}}^{eg*}D$. Then the trace map w.r.t. the map $\pi_{\bar{\eta}}^{eg}$ $$Tr_{\pi_{\bar{\eta}}^{eg}}: H^0(X_{\bar{\eta}^{eg}}, D_{\bar{\eta}^{eg}}) \rightarrow H^0(X_{\bar{\eta}}, D_{\bar{\eta}})$$ is an isomorphism since $k(\bar{\eta})$ is algebraically closed. By $F_{X_{\bar{\eta}}}^{eg} = \pi_{\bar{\eta}}^{eg} \circ F_{X_{\bar{\eta}}/\bar{\eta}}^{eg}$, the trace map $$H^0(Tr_{X_{\bar{\eta}},\Delta_{\bar{\eta}}}^{eg}(D_{\bar{\eta}})): H^0(X_{\bar{\eta}}, F^{eg}_{X_{\bar{\eta}}*} \mathcal{O}_{X_{\bar{\eta}}} ((1-p^{eg})(K_{X_{\bar{\eta}}} + \Delta_{\bar{\eta}})+ p^{eg}D_{\bar{\eta}})) \rightarrow H^0(X_{\bar{\eta}}, D_{\bar{\eta}})$$ factors as \begin{align*} H^0(X_{\bar{\eta}}, &F^{eg}_{X_{\bar{\eta}}*} \mathcal{O}_{X_{\bar{\eta}}} ((1-p^{eg})(K_{X_{\bar{\eta}}} + \Delta_{\bar{\eta}})+ p^{eg}D_{\bar{\eta}}))\\ &\xrightarrow{H^0(Tr_{X_{\bar{\eta}}/\bar{\eta},\Delta_{\bar{\eta}}}^{eg}(D_{\bar{\eta}^{eg}}))} H^0(X_{\bar{\eta}^{eg}}, D_{\bar{\eta}^{eg}}) \xrightarrow{Tr_{\pi_{\bar{\eta}}^{eg}}}H^0(X_{\bar{\eta}}, D_{\bar{\eta}}). \end{align*} It follows that $\dim_{k(\bar{\eta}^{eg})} \mathrm{Im} H^0(Tr_{X_{\bar{\eta}}/\bar{\eta},\Delta_{\bar{\eta}}}^{eg}(D_{\bar{\eta}^{eg}})) = \dim_{k(\bar{\eta})}S^{eg}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}})$. On the other hand, since the morphism $i: \bar{\eta} \rightarrow Y$ is flat, the trace map $Tr_{X_{\bar{\eta}}/\bar{\eta},\Delta_{\bar{\eta}}}^{eg}(D_{\bar{\eta}^{eg}})$ coincides with the pull-back map via $i^*$ of \begin{align*} Tr_{X/Y,\Delta}^{eg}(D): f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y} + &\Delta)+ p^{eg}D) \\ &\twoheadrightarrow S_{\Delta}^{eg}f_*\mathcal{O}_X(D) \hookrightarrow F_Y^{eg*}f_*\mathcal{O}_X(D). \end{align*} Then we conclude the proof by $$\mathrm{rank} S_{\Delta}^{eg}f_*\mathcal{O}_X(D) = \dim_{k(\bar{\eta}^{eg})} \mathrm{Im} H^0(Tr_{X_{\bar{\eta}}/\bar{\eta},\Delta_{\bar{\eta}}}^{eg}(D_{\bar{\eta}^{eg}})) = \dim_{k(\bar{\eta})}S^{eg}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}}).$$ \end{proof} \subsubsection{Trace maps of relative Frobenius iterations II: in the normal setting}\label{tmrFII} \begin{Notation}\label{3} Let $f: X \rightarrow Y$ be a separable surjective projective morphism between two schemes over $k$ of finite type and of pure dimension. Assume that $X$ is normal and $Y$ is integral and regular. Let $D$ be a Weil divisor on $X$ and $\Delta$ an effective $\mathbb{Q}$-Weil divisor on $X$. Assume that $K_X + \Delta$ is $\mathbb{Q}$-Cartier. It is known that $X_{Y^{e}}$ is $G_1$ and $S_2$, $\pi_Y^{e*}D$ is an AC divisor on $X_{Y^{e}}$, and the sheaf $\mathcal{O}_{X_{Y^{e}}}(\pi_Y^{e*}D)$ is isomorphic to $\pi_Y^{e*}\mathcal{O}_{X}(D)$. \end{Notation} Replacing $(p^{eg} - 1)\Delta$ with $[(p^{eg} - 1)\Delta]$, analogously to Sec. \ref{tmrFI}, we get the trace maps $$ Tr_{X/Y,\Delta}^{eg}(D): F_{X/Y*}^{eg}\mathcal{O}_X ((1-p^{eg})K_{X/Y} - [(p^{eg} - 1)\Delta]+ p^{eg}D) \rightarrow \mathcal{O}_{X_{Y^{eg}}}(\pi_Y^{eg*}D) $$ and \begin{align*} f_*Tr_{X/Y,\Delta}^{eg}(D): f_*\mathcal{O}_X &((1-p^{eg})K_{X/Y} - [(p^{eg} - 1)\Delta] + p^{eg}D) \\ &\twoheadrightarrow S_{\Delta}^{eg}f_*\mathcal{O}_X(D) \hookrightarrow f_{eg*}\mathcal{O}_{X_{Y^{eg}}}(\pi_Y^{eg*}D) \cong F_Y^{eg*}f_*\mathcal{O}_X(D). \end{align*} where $S_{\Delta}^{eg}f_*\mathcal{O}_X(D)$ denotes the image of $f_*Tr_{X/Y,\Delta}^{eg}(D)$. Note that for $e'> e$ the divisor below is effective $$[(p^{e'g} - 1)\Delta] - p^{(e'-e)g}[(p^{eg} - 1)\Delta].$$ We deduce the following two inclusions $$S_{\Delta}^{e'g}f_*\mathcal{O}_X(D) \hookrightarrow F_Y^{(e'-e)g*}S_{\Delta}^{eg}f_*\mathcal{O}_X(D)\hookrightarrow F_Y^{e'g*}f_*\mathcal{O}_X(D)$$ by the factorization \begin{align*} f_*Tr_{X/Y,\Delta}^{e'g}(D): &f_*\mathcal{O}_X ((1-p^{e'g})K_{X/Y} - [(p^{e'g} - 1)\Delta]+ p^{e'g}D) \\ & \hookrightarrow f_*\mathcal{O}_X ((1-p^{e'g})K_{X/Y} - p^{(e'-e)g}[(p^{eg} - 1)\Delta] + p^{e'g}D) \\ &\xrightarrow{f_*Tr_{X/Y,0}^{(e'-e)g}((1-p^{eg})K_{X/Y} - [(p^{eg} - 1)\Delta]+ p^{eg}D)} \\ &F_Y^{(e'-e)g*}f_*\mathcal{O}_X ((1-p^{eg})K_{X/Y} - [(p^{eg} - 1)\Delta]+ p^{eg}D)\\ &\xrightarrow{F_Y^{(e'-e)g*}f_*Tr_{X/Y,\Delta}^{eg}(D)} F_Y^{(e'-e)g*}S_{\Delta}^{eg}f_*\mathcal{O}_X(D) \hookrightarrow F_Y^{e'g*}f_*\mathcal{O}_X(D). \end{align*} \begin{Proposition}\label{stable-dim-II} Let the notation be as in Notation \ref{3}. Assume moreover that $D$ is Cartier and $p \nmid \mathrm{ind} ((K_X + \Delta)_{\eta})$. Let $g$ be a positive integer such that $(1-p^{g})(K_X + \Delta)_{\eta}$ is Cartier. Then for every positive integer $e$, $$\dim_{k(\bar{\eta})}S^{eg}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}}) = \mathrm{rank} S_{\Delta}^{eg}f_*\mathcal{O}_X(D).$$ As a consequence, for sufficiently large $e$, $\mathrm{rank} S_{\Delta}^{eg}f_*\mathcal{O}_X(D) = \dim_{k(\bar{\eta})} S^{0}_{\Delta_{\bar{\eta}}}(X_{\bar{\eta}}, D_{\bar{\eta}})$. \end{Proposition} \begin{proof} Shrinking $Y$ and $X$, we can assume that $(1-p^{g})(K_X + \Delta)$ is Cartier. Then we are done by applying Proposition \ref{stable-dim}. \end{proof} \subsection{Weak positivity}\label{wp} \begin{Definition} A torsion free coherent sheaf $\mathcal{F}$ on a normal quasi-projective variety $Y$ is said to be \emph{generically globally generated} if for a general closed point $y \in Y$ the homomorphism $H^0(Y, \mathcal{F}) \rightarrow \mathcal{F}\otimes k(y)$ is surjective; and is said to be \emph{weakly positive}, if for every ample line bundle $H$ on $Y$ and positive integer $m$, there exists a sufficiently large integer $n$ such that, $S^n(H\otimes S^m(\mathcal{F})^{**})$ is generically globally generated, where for a coherent sheaf $\mathcal{G}$, $\mathcal{G}^{**}:=\mathcal{H}om(\mathcal{H}om(\mathcal{G}, \mathcal{O}_Y), \mathcal{O}_Y)$ denotes the double dual. \end{Definition} Recall an invariant introduced by Ejiri in \cite[Sec. 4]{Ej} to measure the positivity of a sheaf. \begin{Definition} Let $Y$ be a quasi-projective variety, $\mathcal{F}$ a torsion free coherent sheaf and $H$ an ample $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisor on $Y$. Let \begin{align*} t(Y,\mathcal{F}, H)= \sup\{a \in \mathbb{Q}|\mathrm{the~sheaf~}&(F_{Y}^{e*}\mathcal{F})\otimes\mathcal{O}_{Y}([ -p^eaH]) \\ ~&\mathrm{is~ generically~globally~generated~ for ~some ~} e>0\}. \end{align*} \end{Definition} Recall the following result due to Ejiri \cite[Proposition 4.7]{Ej}. \begin{Lemma}\label{cri-for-positivity} Let $Y$ be a normal quasi-projective variety, $H$ an ample $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisor on $Y$, $Y_0 \subseteq Y$ an open set such that $codim_Y(Y\setminus Y_0) \geq 2$, $\mathcal{F}$ a torsion free coherent sheaf on $Y$. If $t(Y_0, \mathcal{F}|_{Y_0}, H) \geq 0$, then $\mathcal{F}$ is weakly positive. \end{Lemma} \begin{Remark}\label{t-non-ngtv} The condition $t(Y, \mathcal{F}, H) \geq 0$ is equivalent to that, there exist a sequence of positive integers $\{n_e|e = 1,2,3,\cdots \}$ such that $\frac{n_e}{p^e}\rightarrow 0$, $n_eH$ is Cartier, and the sheaf $(F_{Y}^{e*}\mathcal{F})\otimes\mathcal{O}_{Y}(n_eH)$ is generically globally generated. It is easy to show that if $t(Y, \mathcal{F}, H) \geq 0$ then $t(Y, \mathcal{F}, H') \geq 0$ for every ample divisor $H'$. So if this happens we may simply denote $t(Y, \mathcal{F}) \geq 0$. \end{Remark} \subsection{Surjection of restriction maps} Recall a Keeler's result. \begin{Lemma}[{\cite[Theorem 1.5]{Keeler}}](\textbf{\emph{Relative Fujita Vanishing}}) Let $f: X \rightarrow Y$ be a projective morphism over a Noetherian scheme, $H$ an $f$-ample line bundle and $\mathcal{F}$ a coherent sheaf on $X$. Then there exists a positive integer $N$ such that, for every $n >N$ and every nef line bundle $L$ $$R^if_*(\mathcal{F}\otimes H^n \otimes L) = 0, \mathrm{~if~} i>0.$$ \end{Lemma} \begin{Lemma}\label{restr} Let $f: X \rightarrow Y$ be a surjective projective morphism between two projective varieties. Let $H$ be an ample line bundle and $\mathcal{F}$ a coherent sheaf on $X$. Then there exist a positive integer $N$ and a non-empty Zariski open set $Y_0 \subseteq Y$ such that, for every $n>N$, every nef line bundle $L$ on $X$ and every closed point $y\in Y_0$ the restriction map $$r_y^{n, L}: H^0(X, \mathcal{F} \otimes H^n \otimes L) \rightarrow H^0(X_y, \mathcal{F} \otimes H^n \otimes L\otimes \mathcal{O}_{X_y})$$ is surjective. In particular for two nef Cartier divisors $A_1 ,A_2$ on $X$, if $A_1 + A_2$ is ample, then there exists an integer $M$ such that, for integers $m, n>M$ and a closed point $y\in Y_0$ the restriction map below is surjective $$r_y^{m,n}: H^0(X, \mathcal{F} \otimes \mathcal{O}_X(mA_1 + nA_2)) \rightarrow H^0(X_y, \mathcal{F} \otimes \mathcal{O}_X(mA_1 + nA_2) \otimes \mathcal{O}_{X_y}).$$ \end{Lemma} \begin{proof} Consider the following commutative diagram \begin{center}\xymatrix@C=2.5cm{&X_{\Delta} \ar[d]^{f_{\Delta}}\ar[r]^{j} &X \times Y \ar[d]^{f \times id_Y}\ar[r]^{p_1} &X \ar[d]^f \\ &\Delta \ar@{^(->}[r]^{i} &Y \times Y \ar[r]^{q_1} &Y } \end{center} where $i:\Delta \hookrightarrow Y \times Y$ denotes the diagonal embedding of $Y$, $X_{\Delta} = (X \times Y) \times_{Y\times Y}\Delta$, $p_i, q_i, i = 1,2$ denote the projection from $X \times Y, Y\times Y$ to the $i^{\mathrm{th}}$ factors respectively. Denote by $\mathcal{K}$ the kernel of the restriction homomorphism $p_1^* \mathcal{F} \rightarrow p_1^*\mathcal{F}\otimes \mathcal{O}_{X_{\Delta}}$. Applying relative Fujita vanishing above, since $p_1^*H$ is $p_2$-ample, we can find a positive integer $N$ such that, for every $n>N$, $i>0$ and every nef line bundle $L$ on $X$ $$R^ip_{2*} (\mathcal{K} \otimes p_1^*(H^n \otimes L)) = 0~\mathrm{and}~R^if_*(\mathcal{F} \otimes H^n \otimes L) = 0~~~~~~~(\clubsuit).$$ Let $n>N$. Tensoring the exact sequence $$0 \rightarrow \mathcal{K} \rightarrow p_1^*\mathcal{F} \rightarrow p_1^*\mathcal{F} \otimes \mathcal{O}_{X_{\Delta}} \rightarrow 0$$ by the line bundle $p_1^*(H^n \otimes L)$ yields the exact sequence $$0 \rightarrow \mathcal{K} \otimes p_1^*(H^n \otimes L) \rightarrow p_1^*(\mathcal{F}\otimes H^n \otimes L) \rightarrow p_1^*(\mathcal{F}\otimes H^n \otimes L) \otimes\mathcal{O}_{X_{\Delta}} \rightarrow 0.$$ Applying the derived functor $Rp_{2*}$ to the exact sequence above, by vanishing $\clubsuit$ we get a surjection $$\alpha^{n, L}: p_{2*} p_1^*(\mathcal{F}\otimes H^n \otimes L) \twoheadrightarrow p_{2*} (p_1^*(\mathcal{F}\otimes H^n \otimes L) \otimes \mathcal{O}_{X_{\Delta}}).$$ Identifying $X_{\Delta}$ with $X$ via the isomorphism $p_1 \circ j$, and $p_2|_{X_{\Delta}}: X_{\Delta} \rightarrow Y$ with $f$, we can identify $p_{2*} (p_1^*(\mathcal{F}\otimes H^n \otimes L) \otimes\mathcal{O}_{X_{\Delta}})$ with $f_*(\mathcal{F}\otimes H^n \otimes L)$. Then by $$p_{2*} p_1^*(\mathcal{F}\otimes H^n \otimes L) \cong H^0(X, \mathcal{F}\otimes H^n \otimes L) \otimes \mathcal{O}_Y \cong H^0(Y, f_*(\mathcal{F}\otimes H^n \otimes L)) \otimes \mathcal{O}_Y,$$ we can identify the map $\alpha^{n,L}$ with the natural map $$\beta^{n, L}: H^0(Y, f_*(\mathcal{F}\otimes H^n \otimes L)) \otimes \mathcal{O}_Y \twoheadrightarrow f_*(\mathcal{F}\otimes H^n \otimes L).$$ There exists a non-empty open set $Y_0 \subseteq Y$ such that $\mathcal{F}$ is flat over $Y_0$. Since $R^1f_*(\mathcal{F} \otimes H^n \otimes L) = 0$ by $\clubsuit$, applying \cite[Theorem 12.11]{Har} we have that for every closed point $y \in Y_0$, $$f_*(\mathcal{F}\otimes H^n \otimes L) \otimes k(y)\cong H^0(X_y, \mathcal{F}\otimes H^n \otimes L \otimes \mathcal{O}_{X_y}).$$ Since $\beta_{n, L}$ is a surjection, we conclude that the restriction map $$r_y^{n, L}: H^0(X, \mathcal{F} \otimes H^n \otimes L) \rightarrow H^0(X_y, \mathcal{F} \otimes H^n \otimes L\otimes \mathcal{O}_{X_y}).$$ is a surjection. The remaining assertion is an easy consequence of the first one. \end{proof} \subsection{Minimal models of $3$-folds} We collect some results on minimal model theory for $3$-folds, which will be used in this paper. First recall a result of Kawamata adapted to char $p>0$, and please refer to [\cite{BW}, Lemma 5.6] for a proof. \begin{Lemma}\label{l-linear-pullback} Let $f: X \rightarrow Z$ be a fibration between normal quasi-projective varieties over an algebraically closed field $k$ with $\mathrm{char}~k = p >0$, Let $L$ be a nef $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisor on $X$ such that $L|_F\sim_{\mathbb{Q}} 0$ where $F$ is the generic fibre of $f$. Assume $\dim Z\le 3$. Then there exist a commutative diagram $$ \xymatrix{ X'\ar[r]^\phi\ar[d]^{f'} & X\ar[d]^f\\ Z'\ar[r]^\psi & Z } $$ with $\phi,\psi$ projective birational, and an $\mathbb{R}$-Cartier divisor $D$ on $Z'$ such that $\phi^* L\sim_{\mathbb{Q}} f'^*D$. \end{Lemma} \begin{Theorem}\label{rel-mmp} Let $(X,\Delta)$ be a projective $\mathbb{Q}$-factorial klt pair of dimension 3 and $f: X\rightarrow Y$ a fibration over an algebraically closed field $k$ with $\mathrm{char}~k = p >5$. (1) If $K_X+\Delta$ is pseudo-effective over $Y$, then $(X,\Delta)$ has a log minimal model over $Y$. (2) If $K_X+\Delta$ is not pseudo-effective over $Y$, then $(X,\Delta)$ has a Mori fibre space over $Y$. (3) Assume that $K_X+\Delta$ is nef over $Y$. \begin{itemize} \item[(3.1)] If $K_X+\Delta$ or $\Delta$ is big over $Y$, then $K_X+\Delta$ is semi-ample over $Y$. \item[(3.2)] If $Y$ is a smooth curve and $\kappa(X_{\eta}, (K_X+\Delta)_{\eta}) \geq 0$, then $(K_X+\Delta)_{\eta}$ is semi-ample on $X_{\eta}$. \item[(3.3)] If $Y$ is a smooth curve and $\kappa(X_{\eta}, (K_X+\Delta)_{\eta}) = 0$ or $2$, then $K_X+\Delta$ is semi-ample over $Y$. \item[(3.4)] If $Y$ is a smooth curve with $g(Y) \geq 1$ and $\kappa(X_{\eta}, (K_X+\Delta)_{\eta}) \geq 0$, then $K_X+\Delta$ is nef. \end{itemize} (4) If $Y$ is a non-uniruled surface and $K_X+\Delta$ is pseudo-effective over $Y$, then $K_X + \Delta$ is pseudo-effective, and there exists a map to a minimal model $\sigma: X \dashrightarrow \bar{X}$ such that, the restriction $\sigma|_{X_{\eta}}$ is an isomorphism from $X_{\eta}$ to its image. \end{Theorem} \begin{proof} For (1) refer to \cite{HX} and \cite{Bir13}. For (2) refer to \cite{BW}. For (3.1)refer to \cite{Bir13}, \cite{Xu} and \cite{BW}. For (3.2) and (3.3) refer to \cite[Theorem 1.5 and 1.6 and the remark below 1.6]{BCZ} or \cite[Theorem 1.1]{Ta15}. Assertion (3.4) follows from the cone theorem \cite[Theorem 1.1]{BW}. Indeed, otherwise we can find an extremal ray $R$ generated by a rational curve $\Gamma$, so $\Gamma$ is contained in a fiber of $f$ since $g(Y) >0$, this contradicts that $K_X + \Delta$ is $f$-nef. For (4), first $K_X + \Delta$ is obviously pseudo-effective because otherwise, $X$ will be ruled by horizontal rational curves (w.r.t. $f$) by (2), which contradicts that $Y$ is non-uniruled. The exceptional locus of a flip contraction is of dimension one, so it does not intersect $X_{\eta}$; neither does that of an extremal divisorial contraction, because it is uniruled (cf. the $2^{\mathrm{nd}}$ paragraph of the proof of \cite[Lemma 3.2]{BW}), hence does not dominate over $Y$. Running an LMMP for $K_X +\Delta$, by induction we get a map $\sigma: X \dashrightarrow \bar{X}$ as required. \end{proof} \subsection{Covering Theorem} The result below is \cite[Theorem 10.5]{Iit} when $X$ and $Y$ are both smooth, and the proof also applies when they are normal. \begin{Theorem}\label{cth} Let $f: X \rightarrow Y$ be a proper surjective morphism between normal projective varieties. If $D$ is a Cartier divisor on $Y$ and $E$ an effective $f$-exceptional Cartier divisor on $X$. Then $$\kappa(X,f^*D + E) = \kappa(Y,D).$$ \end{Theorem} \subsection{Easy subadditivity of Kodaira dimensions} The following result is known to experts, please refer to \cite[Lemma 2.22]{BCZ} or \cite[Lemma 4.2]{Pa2} for a proof. \begin{Lemma}\label{l-adtv-of-kdim} Let $f: X\rightarrow Y$ be a fibration between normal projective varieties. Let $D$ be an effective $\mathbb{Q}$-Cartier divisor on $X$ and $H$ a big $\mathbb{Q}$-Cartier divisor on $Y$. Then $$\kappa(D + f^*H) \geq \kappa(X_{\eta}, D|_{X_{\eta}}) + \dim Y.$$ \end{Lemma} \section{Proof of Theorem \ref{mthp}}\label{pf-positivity} Let $Y_0$ be a smooth open subset of $Y$ such that $\mathrm{codim}_Y(Y\setminus Y_0) \geq 2$. By Proposition \ref{stable-dim-II} we can assume $g$ is divisible enough that for every positive integer $e$, the sheaf $S_{\Delta}^{eg}f_*\mathcal{O}_X(D)$ has the stable rank $\dim_{k(\bar{\eta})} S_{\Delta_{\bar{\eta}}}^0(X_{\bar{\eta}}, D_{\bar{\eta}})$. Then for every integer $e >0$, the composite homomorphism below is generically surjective \begin{align*} \alpha^{eg}: f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y})- &[(p^{eg}-1)\Delta] + p^{eg}D)|_{Y_0} \\ &\twoheadrightarrow (S_{\Delta}^{eg}f_*\mathcal{O}_X(D))|_{Y_0} \hookrightarrow (F_Y^{(e-1)g*}S_{\Delta}^{g}f_*\mathcal{O}_X(D))|_{Y_0}, \end{align*} because the two sheaves $S_{\Delta}^{eg}f_*\mathcal{O}_X(D)|_{Y_0}$ and $(F_Y^{(e-1)g*}S_{\Delta}^{g}f_*\mathcal{O}_X(D))|_{Y_0}$ have the same rank. Let $H$ be an ample Cartier divisor on $Y$. Tensoring the map $\alpha^{eg}$ with $\mathcal{O}_Y(eH)$, we get a generically surjective homomorphism \begin{align*} \beta^{eg}: f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y})- &[(p^{eg}-1)\Delta] + p^{eg}D+ ef^*H)|_{Y_0} \\ &\rightarrow F_Y^{(e-1)g*}S_{\Delta}^{g}f_*\mathcal{O}_X(D)\otimes \mathcal{O}_Y(eH)|_{Y_0}. \end{align*} \begin{Claim} There is an integer $e_0$ such that, for every integer $e > e_0$ the sheaf $f_*\mathcal{O}_X ((1-p^{eg})K_{X/Y}- [(p^{eg}-1)\Delta] + p^{eg}D + ef^*H)$ is generically globally generated. \end{Claim} \begin{proof}[Proof of the claim] Since $D -K_{X/Y} - \Delta$ is $f$-semi-ample, we have two morphisms $h: X \rightarrow Z$ and $g: Z \rightarrow Y$, such that $D -K_{X/Y} - \Delta \sim_{\mathbb{Q}} h^*A'$ where $A'$ is a $g$-ample $\mathbb{Q}$-Cartier divisor on $Z$, which is also nef by the assumption. Take an integer $d >0$ such that $A = dA' \sim d(D -K_{X/Y} - \Delta)$ is Cartier. Write that $p^{eg} -1 = q_e d + r_e $ where $q_e$ and $r_e$ are integers such that $0 \leq r_e <d$. Then by $f_* = g_*\circ h_*$, we have \begin{align*} &f_*\mathcal{O}_X ((1-p^{eg})K_{X/Y}- [(p^{eg}-1)\Delta]+ p^{eg}D + ef^*H)\\ &\cong f_*\mathcal{O}_X (q_e d (D - K_{X/Y} - \Delta) + ef^*H + (r_e+1)D - r_eK_{X/Y} - [r_e\Delta])\\ & \cong g_* h_* \mathcal{O}_X (h^*q_eA + eh^*g^*H + (r_e+1)D - r_eK_{X/Y} - [r_e\Delta])\\ &\cong g_*(\mathcal{O}_Z(q_eA + eg^*H)\otimes h_*\mathcal{O}_X ((r_e+1)D - r_eK_{X/Y} - [ r_e\Delta])) \end{align*} where the last ``$\cong$'' is from using projection formula. Note that the set $$\{h_*\mathcal{O}_X ((r_e+1)D - r_eK_{X/Y} - [r_e\Delta])| e = 0,1,2,\cdots \}$$ contains finitely many coherent sheaves. Since $A + g^*H$ is ample, and both $A$ and $g^*H$ are nef, by Lemma \ref{restr} there exist a positive integer $e_0$ and a non-empty Zariski open subset $Y'_0 \subseteq Y$ such that for every $e>e_0$ and $y \in Y'_0$, the restriction map \begin{align*} &H^0(Y, f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y})- [(p^{eg}-1)\Delta]+ p^{eg}D + ef^*H)) \\ &\cong H^0(Z, \mathcal{O}_Z(q_eA + eg^*H)\otimes h_*\mathcal{O}_X ((r_e+1)D - r_eK_{X/Y} - [ r_e\Delta])) \\ &\xrightarrow{} H^0(Z_y, \mathcal{O}_Z(q_eA + eg^*H)\otimes h_*\mathcal{O}_X ((r_e+1)D - r_eK_{X/Y} - [ r_e\Delta]) \otimes\mathcal{O}_{Z_y}) \\ &\cong g_*(\mathcal{O}_Z(q_eA + eg^*H)\otimes h_*\mathcal{O}_X ((r_e+1)D - r_eK_{X/Y} - [ r_e\Delta]))\otimes k(y) \\ & \cong f_*\mathcal{O}_X ((1-p^{eg})(K_{X/Y})- [(p^{eg}-1)\Delta] + p^{eg}D + ef^*H)\otimes k(y) \end{align*} is surjective. \end{proof} The claim above implies that the image of $\beta^{eg}$ is generically globally generated, hence so is the sheaf $F_Y^{(e-1)g*}S_{\Delta}^{g}f_*\mathcal{O}_X(D)\otimes \mathcal{O}_Y(eH)|_{Y_0}$. Therefore, by Remark \ref{t-non-ngtv} we have $t(Y_0, S_{\Delta}^{g}f_*\mathcal{O}_X(D)|_{Y_0}, H) \geq 0$, which implies that $S_{\Delta}^{g}f_*\mathcal{O}_X(D)$ is weakly positive by Lemma \ref{cri-for-positivity}. If $Y$ is smooth, then setting $Y_0 = Y$, by the argument above we show that $t(Y, S_{\Delta}^{g}f_*\mathcal{O}_X(D), H) \geq 0$. \section{Subadditivity of Kodaira dimensions}\label{pf-of-subadd} In this section, we will prove Theorem \ref{mthk}. Let's begin with a theorem with similar spirit of \cite[Lemma 4.4]{Pa2}. \begin{Theorem}\label{F-p-subadd-of-kod-dim} Let $f: X \rightarrow Y$ be a separable fibration between normal projective varieties over an algebraically closed field $k$ with $\mathrm{char}~k = p>0$, and let $D$ be a Cartier divisor on $X$. Assume that for some positive integer $e$, the sheaf $F_Y^{e*} f_*\mathcal{O}_X(D)$ contains a non-zero subsheaf $\mathcal{F}$ such that $t(Y, \mathcal{F}) \geq 0$. Then for any big $\mathbb{Q}$-Cartier divisor $H$ on $Y$, we have $$\kappa(D + H) \geq \kappa(X_{\eta}, D|_{X_{\eta}}) + \dim Y.$$ \end{Theorem} \begin{proof} Let $A$ be an ample $\mathbb{Q}$-Cartier divisor on $Y$ such that $H \geq 2A$. By Remark \ref{t-non-ngtv}, we can find positive integers $g, n_g \ll p^g$ such that, the sheaf $F_{Y}^{g*}\mathcal{F} \otimes \mathcal{O}_Y(n_gA)$ is generically globally generated. Consider the following commutative diagram \\ \[\xymatrix@C=2cm{&X'\ar@/^1.5pc/[rr]|{\sigma}\ar[r]^{\sigma'}\ar[dr]^{f'} &X_{Y^{e+g}}\ar[r]^{\pi_{Y}^{e+g}}\ar[d]^{f_{e+g}} &X\ar[d]^f\\ & &Y^{e+g}\ar[r]^{F_{Y}^{e+ g}} &Y\\ } \] where $X'$ denotes the normalization of $X_{Y^{e+g}}$ and $\sigma, \sigma', f'$ denote the natural morphisms. By the commutative diagram above, there are natural inclusions $$F_{Y}^{(e+ g)*}f_*\mathcal{O}_X(D) \hookrightarrow f_{(e+g)*}\pi_{Y}^{e+g*} \mathcal{O}_X(D) \hookrightarrow f'_*\mathcal{O}_{X'}(\sigma'^* \pi_{Y}^{(e+g)*}D) = f'_*\mathcal{O}_{X'}(\sigma^*D).$$ Therefore, the sheaf $f'_*\mathcal{O}_{X'}(\sigma^*D)\otimes \mathcal{O}_Y(n_gA)$ contains a generically globally generated subsheaf $F_{Y}^{g*}\mathcal{F} \otimes \mathcal{O}_Y(n_gA)$. We can find an effective divisor $D'$ on $X'$ such that $$D' \sim \sigma^*D + n_gf'^*A.$$ By $F_{Y}^{(e+g)*}H = p^{e+g}H > 2n_g A$, we complete the proof by \begin{align*} \kappa(X, D+ f^*H) &= \kappa(X', \sigma^*D + f'^*F_{Y}^{(e+g)*}H) \cdots \text{by Theorem \ref{cth}} \\ &\geq \kappa(X', \sigma^*D + 2n_g f'^*A) \\ & = \kappa(X', D' + n_g f'^*A) \\ & \geq \kappa(X'_{\eta^{e+g}}, D'|_{X'_{\eta^{e+g}}}) + \dim Y \cdots \text{by Lemma \ref{l-adtv-of-kdim}}\\ & \geq \kappa(X_{\eta}, D|_{X_{\eta}}) + \dim Y \cdots \text{since $D'|_{X'_{\eta^{e+g}}} = \sigma^*(D|_{X_{\eta}})$}. \end{align*} \end{proof} Before proving Theorem \ref{mthk}, let's explain the strategy. If the base $Y$ is smooth and $A$ is Cartier (good situation), first by Theorem \ref{mthp} we can show that some Frobenius pullback of $f_*\mathcal{O}_X(D-f^*A)$ contains a nonzero subsheaf $\mathcal{F}$ with $t(Y, \mathcal{F}) \geq 0$, then by $D = (D- f^*A) + f^*A$, applying Theorem \ref{F-p-subadd-of-kod-dim} we finish the proof. To reduce to a fibration with the base being smooth, we will do a smooth alteration base change (\cite{J}), namely, a proper, surjective and generically finite morphism; and to reduce to the situation $A$ being Cartier, we will do some Frobenius base changes and replace the pullback of $A$ with a big Cartier divisor. \begin{proof}[Proof of Theorem \ref{mthk}] We break the proof into three steps following the above strategy. \textbf{Step 1:} We reduce to the good situation. Consider the following commutative diagram \begin{equation*} \begin{gathered} \xymatrix@C=1.5cm{&X_2\ar@/^1.6pc/[rrr]|{\sigma}\ar[rd]^{f_2}\ar[r]^>>>>>>>{\sigma_2} &\bar{X}_2=X\times_Y Y_2\ar[rr]^{\sigma_1}\ar[d]^{\bar{f}_2} & &X\ar[d]^{f}\\ & &Y_2 = Y_1^{g_0}\ar[r]^{F_{Y_1}^{g_0}} &Y_1\ar[r]^{\mu} &Y\\ } \end{gathered} \end{equation*} where \begin{itemize} \item if $Y$ is smooth then we set $Y_1 = Y$ and $\mu = \mathrm{id}_Y$; otherwise, $f: X \to Y$ is flat by the assumption, we set $\mu: Y_1\rightarrow Y$ to be a smooth alteration (\cite{J}), thus by the construction the base change $\bar{X}_2=X\times_Y Y_2$ is always integral; \item $\sigma_2: X_2 \rightarrow \bar{X}_2$ is the normalization morphism; \item $\sigma, \sigma_1, f_2$ and $\bar{f}_2$ denote the natural morphisms. \end{itemize} We can assume $g_0$ is big enough such that, the geometric generic fiber $(X_2)_{\bar{\eta}}$ is normal, and that the integral part $A_2 = [p^{g_0}\mu^*A]$ is big. Then $B = p^{g_0}\mu^*A - A_2$ is effective, and on $Y_2$ we have $F_{Y_1}^{g_0*}\mu^*A = A_2 + B$. We claim that there exist an effective $\mathbb{Q}$-Weil divisor $\Delta'$ and an effective $\sigma$-exceptional Cartier divisor $E_2$ on $X_2$ such that $$K_{X_2/Y_2} + \Delta' = \sigma^*(K_{X/Y} + \Delta) + E_2~\mathrm{and}~E_2|_{(X_2)_{\bar{\eta}}} = 0.$$ Indeed, if $Y$ is smooth then the base change $Y_2 \to Y$ is flat and thus $K_{\bar{X}_2/Y_2} = \sigma_1^*K_{X/Y}$, we can set $E_2 = 0$ and construct $\Delta'$ by applying Proposition \ref{F-non-vanishing} (4); otherwise, since $f: X\to Y$ is flat, we can apply Proposition \ref{compds} to get the divisors $\Delta'$ and $E_2$ on $X_2$ as required. Let $$\Delta_2 = \Delta' + f_2^*B ~\mathrm{and}~ D_2 = \sigma^*D + E_2.$$ Immediately it follows that $K_{X_2/Y_2} + \Delta_2 = \sigma^*(K_{X/Y} + \Delta) + E_2 + f_2^*B$ and $$(K_{X_2/Y_2} + \Delta_2)_{\bar{\eta}} = (\sigma^*(K_{X/Y} + \Delta))_{\bar{\eta}}~\mathrm{and}~(D_2)_{\bar{\eta}} = (\sigma^*D)_{\bar{\eta}}.$$ Therefore, (i) the divisor $(D_2 - f_2^*A_2) - K_{X_2/Y_2} - \Delta_2 \sim_{\mathbb{Q}} \sigma^*(D- (K_{X/Y} + \Delta) - f^*A)$ is nef and $f_2$-semi-ample; (ii) $p\nmid \mathrm{ind}(K_{X_2/Y_2} + \Delta_2)_{\bar{\eta}}$ by the assumption (1) in the theorem; and (iii) applying Proposition \ref{F-non-vanishing} (4) shows $S^0_{(\Delta_2)_{\bar{\eta}}}((X_2)_{\bar{\eta}}, (D_2)_{\bar{\eta}}) \neq 0$ by the assumption (3). \textbf{Step 2:} Applying Theorem \ref{mthp} on the pair $(X_2, \Delta_2)$, wee can show that for sufficiently divisible $e$, the sheaf $F_{Y_2}^{e*}f_{2*}\mathcal{O}_{X_2}(D_2- f_2^*A_2)$ contains a nonzero subsheaf $S^e_{\Delta_2}f_{2*}\mathcal{O}_{X_2}(D_2- f_2^*A_2)$ satisfying $t(Y, S^e_{\Delta_2}f_{2*}\mathcal{O}_{X_2}(D_2- f_2^*A_2)) \geq 0$. Then we conclude that \begin{align*} \kappa(X, D) & = \kappa(X_2, D_2 = \sigma^*D + E) ~~~~\text{$\cdot\cdot\cdot$ by Theorem \ref{cth}}\\ & = \kappa(X_2, (D_2 - f_2^*A_2) + f_2^*A_2 ) \\ & \geq \dim Y + \kappa((X_2)_{\eta_2}, (D_2)_{\eta_2}) ~~~~\text{$\cdot\cdot\cdot$ by Theorem \ref{F-p-subadd-of-kod-dim}}\\ & \geq \dim Y + \kappa(X_{\bar{\eta}}, D_{\bar{\eta}}) ~~~~\text{$\cdot\cdot\cdot$ since $(D_2)_{\bar{\eta}} = (\sigma^*D)_{\bar{\eta}}$}\\ \end{align*} \textbf{Step 3:} We are left to prove that $D$ is big under the conditions (1), (2') and that $D$ is nef and $f$-big. Take an ample divisor $A_1$ on $Y$. Then $D+ f^*A_1$ is big. We can write that $$D+ f^*A_1 \sim_{\mathbb{Q}} H_1 + B_1$$ where $H_1$ is ample and $B_1$ is an effective $\mathbb{Q}$-Cartier divisor with $p \nshortmid \mathrm{ind}(B_1)$. Take a rational number $\delta > 0$ small enough such that (i) $A'= A - \delta A_1$ is big on $Y$; and (ii) $p \nshortmid \mathrm{ind}(\delta B_1)$.\\ Let $\Delta' = \Delta + \delta B_1$. Then for sufficiently divisible integer $m >0$, since $D$ is nef and $f$-big we have (a) $mD - (K_{X/Y} + \Delta') - f^*A' = (m - 1- \delta)D + \delta(D + f^*A_1 - B_1) + (D - (K_{X/Y} + \Delta) - f^*A)$ is ample by the condition (2'); and (b) $S^0_{\Delta'_{\bar{\eta}}}(X_{\bar{\eta}}, mD_{\bar{\eta}}) \neq 0$ by Proposition \ref{F-non-vanishing} (3). Finally applying the result in \textbf{Step 2} on the pair $(X, \Delta')$, we show that $mD$ is big. \end{proof} \section{Application to three-folds}\label{app-to-3fold} In this section we will focus on three dimensional varieties in characteristic $p>5$. Taking advantages of minimal model program and smooth resolution of singularities, both Corollary \ref{app-to-3dim} and \ref{app-to-3dim-special} follow easily from Theorem \ref{mthk}. \subsection{Proof of Corollary \ref{app-to-3dim}} We first pass to a fibration over $Z$. Take a smooth resolution $\sigma: W \to X$, and assume the morphism $W \to Y$ lifts to a fibration $g: W \to Z$, which fit into the following commutative diagram $$\xymatrix{&W\ar[d]^{g}\ar[r]^{\sigma} &X \ar[d]^{f}\\ & Z \ar[r]^{\mu} &Y\\ }$$ Then $\sigma^*(K_X + \Delta)$ is nef and $g$-big, thus $n\sigma^*(K_X + \Delta) + K_W$ is $g$-big for sufficiently big $n$, and $$(n\sigma^*(K_X + \Delta) + K_W) - K_{W/Z} - f^*K_Z = n\sigma^*(K_X + \Delta)$$ is nef. By Proposition \ref{F-non-vanishing} (3), for sufficiently divisible $n>0$ $$S^{0}(W_{\bar{\eta}}, (n\sigma^*(K_X + \Delta) + K_W)_{\bar{\eta}}) \neq 0.$$ Since $Z$ is smooth and $K_Z$ is big, applying Theorem \ref{mthk} shows that $n\sigma^*(K_X + \Delta) + K_W$ is big. There exist an effective divisor $D$ and an effective $\sigma$-exceptional divisor $E$ on $W$ such that $K_W \sim_{\mathbb{Q}} \sigma^*(K_X + \Delta) - D + E$. Applying Theorem \ref{cth}, we can show $$\kappa(X, K_X + \Delta) = \kappa(W, \sigma^*(n+1)(K_X + \Delta) + E) \geq \kappa(W, \sigma^*n(K_X + \Delta) + K_W) = 3.$$ \subsection{Proof of Corollary \ref{app-to-3dim-special}}\label{pf-3dim} Before giving the proof, we remark some easy results. Let $\rho: X' \to X$ be a smooth log resolution of $(X,\Delta)$. We can write that $\small{K_{X'} + \rho_*^{-1}\Delta + \sum_ia_iE_i = \rho^*(K_X + \Delta) + \sum_j b_jF_j}$ where $E_i, F_j$ are distinct reduced and irreducible exceptional components and $0< a_i <1, b_j \geq 0$. Let $\Delta' = \rho_*^{-1}\Delta + \sum_ia_iE_i$. Then $(X', \Delta')$ is klt, and by Theorem \ref{cth} we conclude $$\kappa(X', K_{X'} + \Delta') = \kappa(X, K_X + \Delta)~\mathrm{and}~\kappa(X', K_{X'}) \leq \kappa(X, K_X).$$ Moreover we have $\kappa(X'_{\bar{\eta}}, (K_{X'} + \Delta')_{\bar{\eta}}) \geq \kappa(X_{\bar{\eta}}, (K_X + \Delta)_{\bar{\eta}})$, and in case (2) $\kappa(X'_{\bar{\eta}}, K_{X'_{\bar{\eta}}}) =\kappa(X_{\bar{\eta}}, K_{X_{\bar{\eta}}})$ since $X_{\bar{\eta}}$ is assumed smooth. So to prove the inequality of this corollary, we are allowed to replace $(X,\Delta)$ with $(X', \Delta')$ in case (1) and replace $X$ with $X'$ in case (2). Let $\sigma: (X, \Delta) \dashrightarrow(\bar{X}, \bar{\Delta})$ be the map to a minimal model of $(X, \Delta)$. If necessary, by replacing $(X, \Delta)$ with a smooth log resolution as above, we can assume $\sigma$ is a morphism. Since $Y$ is non-uniruled, applying Theorem \ref{rel-mmp} (3.4) and (4), we have the following commutative diagram $$\xymatrix{&(X,\Delta)\ar[d]^{f}\ar[r]^{\sigma} &(\bar{X}, \bar{\Delta}) \ar@{-->}[dl]^{\bar{f}}\\ & Y &\\ }$$ here if $\dim Y =1$ then $\bar{f}: \bar{X} \dashrightarrow Y$ is a morphism, and if $\dim Y =2$ then there exists a nonempty open subset $U \subseteq Y$ such that $\bar{X}_U \cong X_U$, thus the restriction map $\bar{f}: \bar{X}_U \to U$ is a morphism. In case (1), by the construction, applying Theorem \ref{rel-mmp} (4) shows that $\sigma^*(K_{\bar{X}} + \bar{\Delta})$ is nef and $f$-big. For sufficiently divisible $n>0$ the divisor $$(n\sigma^*(K_{\bar{X}} + \bar{\Delta}) + K_X) - K_{X/Y} - f^*K_Y = n\sigma^*(K_X + \Delta)$$ is nef and $f$-big, and by Proposition \ref{F-non-vanishing} (3) $$S^{0}(X_{\bar{\eta}}, (n\sigma^*(K_{\bar{X}} + \bar{\Delta}) + K_X)_{\bar{\eta}}) \neq 0.$$ Since $K_Y$ is big, applying Theorem \ref{mthk} shows that $n\sigma^*(K_{\bar{X}} + \bar{\Delta}) + K_X$ is big. Then arguing as in the last paragraph of the proof of Corollary \ref{app-to-3dim}, we can show that $K_X + \Delta$ is big. In case (2), it is assumed $\Delta = 0$. Then since $WC_{3,2}$ has been proved in \cite{CZ}, we can assume $Y$ is a curve with $g(Y) >1$, and have a fibration $\bar{f}: \bar{X} \rightarrow Y$. The case $\kappa(X_{\bar{\eta}}) = 2$ is included in Case (1). We only need to consider the cases $\kappa(X_{\bar{\eta}}) = 0$ or $1$. If $\kappa(X_{\bar{\eta}}) = 0$, then $K_{\bar{X}/Y}$ is relatively semi-ample over $Y$ by Theorem \ref{rel-mmp} (3.3). Notice that general fibers of $\bar{f}$ have canonical singularities, which are strongly $F$-regular by \cite{Har98} because $\mathrm{char}~k >5$. Applying \cite[Theorem 3.16]{Pa}, we have that $K_{\bar{X}/Y}$ is nef, so there exists a nef $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisor $M$ on $Y$ such that $$K_{\bar{X}/Y} \sim_{\mathbb{Q}} \bar{f}^*M.$$ It is easy to conclude that $$\kappa(X) = \kappa(\bar{X}, K_{\bar{X}}) = \kappa(\bar{X}, K_{\bar{X}/Y} + \bar{f}^*K_Y) = \kappa(Y, K_Y + M) = 1 = \dim Y.$$ If $\kappa(X_{\bar{\eta}}) = 1$, then $X_{\bar{\eta}}$ is a smooth surface over $k(\bar{\eta})$, and general fibers of its Iitaka fibration are smooth elliptic curves. Considering the relative Iitaka fibration of $X$, if necessary, by blowing up $X$, we can assume $f:X\to Y$ factors through an elliptic fibration $h: X\to Z$ to a smooth surface $Z$, which fit into the following commutative diagram $$\xymatrix{&X \ar[r]^{\sigma}\ar[d]^h &\bar{X} \ar[d]^{\bar{f}}\\ &Z\ar[r]^{g} &Y\\ }$$ Applying Lemma \ref{l-linear-pullback}, if necessary, again by blowing up both $X$ and $Z$, we can also assume $\sigma^*K_{\bar{X}} \sim_{\mathbb{Q}} h^*H$ for some nef and $g$-big $\mathbb{Q}$-Cartier $\mathbb{Q}$-divisor $H$ on $Z$. By Proposition \ref{F-non-vanishing} (3), for sufficiently divisible $n >0$, $S^0(Z_{\bar{\eta}}, (nH +K_Z)_{\bar{\eta}}) \neq 0$. Combining that $(nH +K_Z) - K_{Z/Y} - g^*K_Y$ is nef and $g$-big and that $K_Y$ is big, by Theorem \ref{mthk} we can show $nH +K_Z$ is big. Since $\kappa(X, K_{X/Z}) \geq 0$ (\cite[3.2]{CZ}), there exists an effective $\mathbb{Q}$-divisor $E$ on $X$ such that $K_{X/Z} \sim_{\mathbb{Q}} E$. Applying Theorem \ref{cth} it follows that \begin{align*} \kappa(X, K_X) &= \kappa(X, (n+1)K_X) = \kappa(X, n\sigma^*K_{\bar{X}} + K_X)\\ &= \kappa(X, n\sigma^*K_{\bar{X}} + E + h^*K_Z) \geq \kappa(Z, nH + K_Z) = 2. \end{align*} This completes the proof. \subsection{Remarks on the proof}\label{can-bdl-formula} Our strategy to prove $C_{n,m}$ heavily relies on the non-vanishing of $S^{0}(X_{\bar{\eta}}, lK_{X_{\bar{\eta}}})$. However, this often fails, for example when $X_{\bar{\eta}}$ is a supersingular elliptic curve. We have known that $S^{0}(X_{\bar{\eta}}, lK_{X_{\bar{\eta}}}) \neq 0$ if $K_{X_{\bar{\eta}}}$ is big and $l\gg0$. To overcome this difficulty, an idea is to consider the relative Iitaka fibration $h: X \rightarrow Z$ as in the proof of Corollary \ref{app-to-3dim-special}, then reduce to studying $\kappa(Z, K_Z + \Delta_Z)$ where $K_Z + \Delta_Z$ is relatively big over $Y$. To carry out this idea, we only need to have ``Canonical bundle formula'': for a fibration $h:(X, \Delta) \rightarrow Z$ from a klt pair such that $K_{X} + \Delta$ is relatively $\mathbb{Q}$-trivial over $Z$, then there exists an effective divisor $\Delta_Z$ on $Z$ such that $K_{X} + \Delta \sim_{\mathbb{Q}} h^*(K_{Z}+ \Delta_Z)$. Over complex numbers this is true, $(Z, \Delta_Z)$ can even be assumed to be klt up to some birational modifications, more precisely $\Delta = B + M$ is the sum of discriminant part and moduli part (cf. \cite[Theorem 4.5]{FM} and \cite[Theorem 0.2]{Amb}). In positive characteristic, we have such a canonical bundle formula when the geometric generic fiber $(X_{\bar{\xi}}, \Delta_{\bar{\xi}})$ of $h$ is globally $F$-split (cf. \cite[Theorem 3.18]{Ej} or \cite[Theorem B]{DS}), or when $\Delta=0$ and $X_{\bar{\xi}}$ is a smooth elliptic curve. In general, the canonical bundle formula as above does not hold in positive characteristic, one can construct a counter example by a ruled surface over a curve with a multiple section purely inseparable over the base. Recently Witaszek \cite{Wit17} proves a weaker canonical bundle formula for fibrations of relative dimension one and gives some interesting applications. But his formulation does not seem to fit the above strategy. \end{document}
\begin{document} \date{} \begin{abstract} In this paper, we study the fluctuations of observables of metric measure spaces which are random discrete approximations $\mathscr{X}_n$ of a fixed arbitrary (complete, separable) metric measure space $\mathscr{X}=\mathscr{X}ex$. These observables $\Phi(\mathscr{X}_n)$ are polynomials in the sense of Greven--Pfaffelhuber--Winter, and we show that for a generic model space $\mathscr{X}$, they yield asymptotically normal random variables. However, if $\mathscr{X}$ is a compact homogeneous space, then the fluctuations of the observables are much smaller, and after an adequate rescaling, they converge towards probability distributions which are not Gaussian. Conversely, we prove that if all the fluctuations of the observables $\Phi(\mathscr{X}_n)$ are smaller than in the generic case, then the measure metric space $\mathscr{X}$ is compact homogeneous. The proofs of these results rely on the Gromov reconstruction principle, and on an adaptation of the method of cumulants and mod-Gaussian convergence developed by Féray--Méliot--Nikeghbali. As an application of our results, we construct a statistical test of the hypothesis of symmetry of a compact Riemannian manifold. \end{abstract} \keywords{Discrete approximation of metric spaces, Gromov--Prohorov topology, combinatorics of the cumulants of random variables.} \title{Fluctuations of the Gromov--Prohorov sample model} \tableofcontents \section{Introduction} Let $ \mathscr{X} = \mathscr{X}ex$ be a metric space which we assume to be complete, separable and equipped with a probability measure $\mu$ over the Borel algebra of $\mathcal{X}$; and $(X_n)_{n \in \mathbb{N}}$ be a sequence of independent random variables with the same law $\mu$. We study here the approximation of $\mathscr{X} = \mathscr{X}ex$ by the random discrete metric space \begin{align*} \mathscr{X}_n = \left( \mathcal{X}_n = \{X_1,\ldots,X_n\}, d, \frac{1}{n}\sum_{i=1}^n \delta_{X_i}\right) \end{align*} for the Gromov-weak topology; we call this discrete approximation the \emph{Gromov--Prohorov random sample model}. The Gromov-weak topology is based on the idea that a sequence of metric measure spaces converges if and only if all finite subspaces sampled from these spaces converge. This is formalized by using real-valued observables called \emph{polynomials} and introduced by Greven, Pfaffelhuber and Winter in \cite{greven2009convergence}: they are the functions $\Phi$ defined by $$\Phi(\mathscr{X}ex) = \int_{\mathcal{X}^p} \mathrm{var}phi((d(x_i,x_j))_{1\leq i<j\leq p})\,\mu(dx_1)\cdots \mu(dx_p),$$ \noindent where $\mathrm{var}phi : \mathbb{R}^{\binom{p}{2}} \to \mathbb{R}$ is an arbitrary continuous bounded function. By using the theorem of convergence of empirical measures (see \cite[Theorem 3]{Var58}), one proves readily the almost sure convergence of $\mathscr{X}_n$ toward $\mathscr{X}$ (see Theorem \ref{theo:as_convergence}). In this paper, we will study the fluctuations of the polynomials $\Phi(\mathscr{X}_n)$ with respect to their limits $\Phi(\mathscr{X})$. The evaluation of a polynomial $\Phi$ on the space $\mathscr{X}_n$ is a sum of dependent random variables \begin{align*} \Phi(\mathscr{X}_n) = \frac{1}{n^p} \,\sum_{\bar{\imath} \in [\![1,n ]\!]^p } \mathrm{var}phi(d(X_{\bar{\imath}})), \end{align*} where we abbreviate $\mathrm{var}phi(d(X_{\bar{\imath}})) := \mathrm{var}phi((d(X_{i_a},X_{i_b}))_{1\leq a ,b \leq p})$ for a sequence of indices $\bar{\imath}=(i_1,\ldots,i_p)$. This dependency between the random variables is sparse: if we associate to these variables a graph describing the dependency between those variables, then when $n$ goes to infinity the maximal degree of a vertex of this graph becomes negligible against the number of vertices (variables). \PLM{This sparse dependency leads to central limit theorems, but the limiting distribution is not necessarily Gaussian, and it depends on the size of the variance of $\Phi(\mathscr{X}_n)$, for which there are two cases.} We shall see that the variance $\mathrm{var}(S_n(\mathrm{var}phi,\mathscr{X}))$ with $S_n(\mathrm{var}phi,\mathscr{X}) = n^p\, \Phi(\mathscr{X}_n) $ is a polynomial in the variable $n$ with coefficients depending on the function $\mathrm{var}phi$ and the space $ \mathscr{X}$; this variance is at most of order $n^{2p-1}$ and therefore, $\mathrm{var}(\Phi(\mathscr{X}_n))$ is of order at most $1/n$. \begin{itemize} \item In a first part, we study the case where the variance of $\Phi(\mathscr{X}_n)$ is of order exactly $1/n$. We call this setting the \emph{generic case}, and it corresponds to fluctuations which are asymptotically normal. We study the combinatorics of the cumulants of the variable $S_n(\mathrm{var}phi,\mathscr{X})$ by using the theory of dependency graphs and mod-Gaussian convergence developed recently by Féray, Méliot and Nikeghbali (see \cite{feray2016mod}); and we prove the mod-Gaussian convergence of the sequence $S_n(\mathrm{var}phi,\mathscr{X})$ adequately renormalized. This leads to a central limit theorem for the variables $$Y_n(\mathrm{var}phi,\mathscr{X}) = \frac{\Phi(\mathscr{X}_n) - \mathbb{E}[\Phi(\mathscr{X}_n)]}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}};$$ the limiting distribution is the standard Gaussian distribution, and we also obtain the normality zone of this approximation, moderate deviation estimates and a Berry--Esseen inequality (Theorem \ref{theo:generic_case}). In \cite{2017arXiv171206841F}, similar techniques were used in the study of the fluctuations of observables of random graph, random permutation and random integer partition models parametrised respectively by the space of graphons, the space of permutons and the Thoma simplex. \item In a second part, we study the case where the variance of $\Phi(\mathscr{X}_n)$ is at most of order $1/n^2$ for any polynomial $\Phi$. We call this setting a \emph{globally singular point} $\mathscr{X}$ of the Gromov--Prohorov sample model. It corresponds to the following condition: for any $p \geq 1$ and any $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, \begin{align*} \sum_{1 \leq i,j \leq p} \mathrm{cov}\left(\mathrm{var}phi(d(X_1,\dots,X_i,\dots,X_p)), \mathrm{var}phi(d(X_1',\dots,X_{j-1}',X_i,X_{j+1}',\dots,X_p'))\right) = 0, \end{align*} where $(X_n')_{n \in \mathbb{N}}$ is an independent copy of $(X_n)_{n \in \mathbb{N}}$, and where in each summand the second vector contains all the variables $X_1',\ldots,X_p'$, except $X_j'$ which is replaced by $X_i$. This identity is difficult to analyse: therefore, we shall study the simpler case where each of the covariances in the sum vanishes. In particular, \begin{align*} \mathrm{cov}\left(\mathrm{var}phi(d(X_1,X_2,\dots,X_p)), \mathrm{var}phi(d(X_1,X_2',\dots,X_p'))\right) = 0. \end{align*} It turns out that this second identity is equivalent to $\mathscr{X}$ being a compact homogeneous space (in the space of metric measure spaces); see Theorem \ref{theo:homogeneous}. \PLM{We are thus able to relate a probabilistic condition to a geometric condition on the space; this result is a bit surprising, and for instance it ensures that when approximating an ellipse and a circle by the Gromov--Prohorov sample model, the convergence is much faster for the circle and does not have the same kind of asymptotic fluctuations.} The proof of the equivalence relies notably on Gromov's reconstruction theorem \cite{gromov2007metric}. Now, in this situation, we cannot directly use the theory of mod-Gaussian convergence and dependency graphs in order to prove all the probabilistic results that we obtained in the generic case. However, by using the symmetry of the space, we are able to obtain for this singular case a better upper bound of the cumulants. It allows us to prove a central limit theorem for the random variables $Y_n(\mathrm{var}phi,\mathscr{X})$, but the limit is not necessarily the Gaussian distribution; see Theorem \ref{theo:singular_case}. \end{itemize} \noindent The reader might wonder why we consider that replacing the hypothesis "the sums of covariances vanish" by "all the covariances vanish" is a reasonable restriction in the study of the singular models. In fact, we believe that the two conditions are equivalent; we shall say a short bit about this at the beginning of Section \ref{sec:homogeneous_case}, and we plan to address this question in forthcoming works. The theoretical results of this article lead to a better understanding of the possible behaviors of random variables stemming from a \emph{mod-Gaussian moduli space}; this kind of classifying object for random models has been introduced in \cite{2017arXiv171206841F}. Let us restate the previous discussion with this viewpoint. To any point $\mathscr{X}$ of the space $\mathbb{M}$ of complete separable metric spaces endowed with a probability measures, one can associate a sequence of random models $(\mathscr{X}_n)_{n \in \mathbb{N}}$ which are discrete approximations of $\mathscr{X}$, and such that $\mathscr{X}_n \to_{\mathbb{P}} \mathscr{X}$ as $n$ goes to infinity. Moreover, for a generic point $\mathscr{X}$, an algebra of observables of spaces in $\mathbb{M}$ yields random variables $\Phi(\mathscr{X}_n)$ such that $n^{-1/2}(\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}))$ is always asymptotically normal. However, some special points $\mathscr{X} \in \mathbb{M}$ yield observables such that $n^{-1/2}(\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}))$ always goes to $0$ (in probability). The identification of these singular models $\mathscr{X}$ is then a natural question, and for those models, one can be interested in the asymptotics of $\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})$ with a different rescaling (here, we shall look at $n^{-1}(\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}))$). The exact same approach has been used in \cite{2017arXiv171206841F} with the space of graphons for models of random graphs, the space of permutons for models of random permutations, and the Thoma simplex for models of random integer partitions. Until now, we believed that the singular points of a mod-Gaussian moduli space still yielded observables which were asymptotically normal, albeit with a different rescaling. Indeed, this is what happens for singular graphons (Erd\H{o}s--Rényi random graphs) and for singular models of random integer partitions (Plancherel and Schur--Weyl measures). However this is not a general phenomenon: with the Gromov--Prohorov sample model, we encounter the first known example where singular points yield observables which \emph{are not} asymptotically normal after appropriate rescaling. An application of our identification of the singular points of the space $\mathbb{M}$ of measured metric spaces is a procedure of statistical testing of the hypothesis of symmetry of a manifold. Suppose given a compact Riemannian manifold $\mathcal{X}$, endowed with its geodesic distance $d$ and with the unique probability measure $\mu$ which is proportional to the volume form of the manifold. We want to know whether $\mathcal{X}$ is a compact homogeneous space (see Theorem \ref{theo:homogeneous} for precise definitions). For instance, assume that one is given a surface homeomorphic to the real sphere $\mathbb{S}^2$ and endowed with a Riemannian structure; one wants to decide whether this structure is the canonical structure of symmetric space $\mathbb{S}^2 = \mathrm{SO}(3)/\mathrm{SO}(2)$. One can observe the manifold $\mathcal{X}$ as follows: \begin{itemize} \item one can take independent random points $x_i$ on $\mathcal{X}$ according to $\mu$; \item one can measure the distances $d(x_i,x_j)$ between the observed points. \end{itemize} Fix a polynomial $\Phi$ as defined previously. If the triple $\mathscr{X} =(\mathcal{X},d,\mu)$ is truly a homogeneous space, then the fluctuations of $\Phi(\mathscr{X}_n)$ tend to be small, of order $n^{-1}$. Therefore, given a $2n$ sample of points $(x_{i},x_{i}')_{1\leq i \leq n}$ and a large threshold $t_\alpha$, if $\mathscr{X}_{n}$ (respectively, $\mathscr{X}_n'$) denotes the approximation of $\mathscr{X}$ constructed from the family of points $\{x_{1},x_{2},\ldots,x_{n}\}$ (respectively, $\{x_1',x_2',\ldots,x_n'\}$), then $$Z_{n} = n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}_n')|$$ should be smaller than $t_\alpha$ with large probability $1-\alpha$ (for $n$ large). On the contrary, if $(\mathcal{X},d,\mu)$ is not homogeneous, then the fluctuations of $\Phi(\mathscr{X}_{n})$ are generically of order $\frac{1}{\sqrt{n}}$, so one expects $Z_{n}$ to be larger than $t_\alpha$ with large probability (again, for $n$ large). We make this argument precise at the end of our paper, by describing in details the procedure of statistical hypothesis testing for the symmetry of $\mathscr{X}$. \noindent \textbf{Outline of the article.} The paper is organized as follows. In Section \ref{sec:metric_measure_spaces}, we will recall some definitions and facts about metric measure spaces. Section \ref{sec:method_cumulants} introduces the method of cumulants, the theory of dependency graphs and all the probabilistic results that we can obtain from this method. In Section \ref{sec:generic_fluctuations}, we apply this theory to the generic case of the random sample model to get several probabilistic results about the model including a central limit theorem, the normality zone, moderate deviations and a Berry--Esseen bound for the random variables $Y_n(\mathrm{var}phi,\mathscr{X})$. Section \ref{sec:homogeneous_case} details the singular case, and we prove the equivalence between having a small variance for the model, and $\mathscr{X}$ being a compact homogeneous space. We obtain also in this case a finer bound on the cumulants, a non-Gaussian central limit theorem for the observables $\Phi(\mathscr{X}_n)$, and concentration inequalities for these random variables. In Section \ref{sec:circle}, we provide an explicit counterexample for the asymptotic normality of observables of the sample model of an homogeneous space. This section also enables us to explain in more details the combinatorics of moments and cumulants of the polynomial observables, and how to compute them concretely. Finally, Section \ref{sec:statistics} is devoted to the description of the statistical test for symmetry that has been briefly presented above. \section{Metric measure spaces}\label{sec:metric_measure_spaces} In this section, we recall the theory of metric measure spaces and of the Gromov–Prohorov topology, following very closely \cite[Section 2]{greven2009convergence}. \subsection{Definitions} For any topological space $\mathcal{X}$, we denote $\mathscr{C}_b(\mathcal{X}) $ the set of continuous bounded functions $\mathcal{X} \to \mathbb{R}$; $\mathscr{C}(\mathcal{X}) $ the set of continuous functions $\mathcal{X} \to \mathbb{R}$; $\mathscr{B}(\mathcal{X})$ the set of Borel subsets of $\mathcal{X}$; and $\mathscr{M}^1(\mathcal{X})$ the set of Borel probability measures over $\mathcal{X}$. A measurable map $f : \mathcal{X} \to \mathcal{Y}$ between two topological spaces induces a map $f_* : \mathscr{M}^1(\mathcal{X}) \to \mathscr{M}^1(\mathcal{Y})$ (push-forward of measures): for any Borel subset $A \subset \mathcal{Y}$, $(f_*\mu)(A) = \mu(f^{-1}(A))$. \begin{defn} A {\normalfont metric measure space} is a complete and separable metric space $(\mathcal{X},d)$ which is endowed with a probability measure $\mu \in \mathscr{M}^1(\mathcal{X})$. We say that two metric measure spaces $\mathscr{X}ex$ and $(\mathcal{X}',d',\mu')$ are measure-preserving isometric if there exists an isometry $\psi$ between the supports of $\mu$ on $(\mathcal{X},d)$ and of $\mu'$ on $(\mathcal{X}',d')$, such that $\mu' = \psi_* \mu$. \end{defn} We denote $\mathbb{M}$ the space of metric measure spaces (in short, mm-spaces) modulo measure-preserving isometries. In the sequel, unless explicitly stated, given a mm-space $\mathscr{X}ex$, we will always suppose that the space $\mathcal{X}$ is exactly the support of the measure $\mu$. Let $\mathscr{X} = \mathscr{X}ex \in \mathbb{M}$ and \begin{align*} \mathbb{R}^{\mathrm{met}} \vcentcolon= \{(d_{i,j})_{1 \leq i < j < \infty} \,\,|\,\, \forall 1 \leq i < j < k < \infty,\,\,d_{i,j} + d_{j,k} \geq d_{i,k} \}. \end{align*} the space of infinite pseudo-distance matrices. We introduce the maps: \begin{align*} \iota^\mathscr{X} \colon \mathcal{X}^\mathbb{N} &\to \mathbb{R}^{\mathrm{met}} \\ (x_n)_{n \in \mathbb{N}} &\mapsto (d(x_i,x_j))_{1 \leq i < j < \infty}, \end{align*} and \begin{align*} S \colon \mathcal{X} &\to (\mathcal{X}^\mathbb{N} \to \mathcal{X}^\mathbb{N}) \\ x &\mapsto \left(S^x \vcentcolon= (x_n)_{n \in \mathbb{N}} \mapsto(x,x_0,x_1,x_2,\ldots)\right). \end{align*} \begin{defn} We define the {\normalfont distance matrix distribution} of $\mathscr{X}$ by \begin{align*} \nu^\mathscr{X} \vcentcolon= (\iota^\mathscr{X})_* \mu^{\otimes \mathbb{N}}, \end{align*} and the {\normalfont pointed distance matrix distribution} by \begin{align*} \nu \colon \mathcal{X} &\to \mathscr{M}^1(\mathbb{R}^{\mathrm{met}}) \\ x &\mapsto \nu^x \vcentcolon= (\iota^\mathscr{X} \circ S^x)_* \mu^{\otimes \mathbb{N}}. \end{align*} \end{defn} The distance matrix distribution characterizes the metric measure space in $\mathbb{M}$. It means that if $\nu^{\mathscr{X}_1} = \nu^{\mathscr{X}_2}$, then $\mathscr{X}_1 $ is measure-preserving isometric to $\mathscr{X}_2$. This follows from Gromov's reconstruction theorem for metric measure spaces \cite[Paragraph $3 \frac{1}{2}.5$]{gromov2007metric}. \subsection{Polynomials and the Gromov--Prohorov distance} We associate to any bounded continuous map $ \mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$ a map $\Phi = \Phi^{p,\mathrm{var}phi} \colon \mathbb{M} \to \mathbb{R}$ called a \emph{polynomial} on $\mathbb{M}$ and defined by \begin{align*} \Phi(\mathscr{X}=\mathscr{X}ex) = \int_{\mathbb{R}^{\mathrm{met}}} \mathrm{var}phi((d_{i,j})_{1 \leq i < j \leq p})\,\nu^\mathscr{X}((d_{i,j})_{1 \leq i < j \leq p}), \end{align*} We denote $\Pi$ the real algebra of polynomials on $\mathbb{M}$. Applying the definition of the distance-matrix distribution as a pushed-forward measure, we have \begin{align*} \Phi(\mathscr{X}ex) = \int_{\mathcal{X}^p} \mathrm{var}phi((d(x_i,x_j))_{1 \leq i < j \leq p})\,\mu^{\otimes p}(x_1,\dots,x_p). \end{align*} \begin{defn} The Gromov-weak topology is the initial topology on $\mathbb{M}$ associated to the family of polynomials $(\Phi^{p,\mathrm{var}phi})_{p,\mathrm{var}phi}$. In the sequel we endow $\mathbb{M}$ with this topology. \end{defn} The Gromov-weak topology can be metrized by the Gromov--Prohorov distance, where we optimally embed the two metric measure spaces into a common mm-space and then take the Prohorov distance between the image measures. Given $\mu$ and $\nu$ two probability measures on a metric space $(\mathcal{Z},d_{\mathcal{Z}})$, their Prohorov distance is \begin{align*} d_{\mathrm{Pr}}^{(\mathcal{Z},d_{\mathcal{Z}})} = \inf \{\epsilon > 0 \, | \, \forall A \in \mathscr{B}(\mathcal{Z}), \mu(A) \leq \nu(A^\epsilon)+\epsilon, \nu(A) \leq \mu(A^\epsilon)+\epsilon \}, \end{align*} where $A^\epsilon=\{z \in \mathcal{Z}\,|\,d_{\mathcal{Z}}(z,A)<\epsilon\}$. It is well known to metrise the weak convergence of probability measures in $\mathscr{M}^1(\mathcal{Z})$ \cite[Theorem 6.8]{billing}. \begin{defn} The Gromov--Prohorov distance between two mm-spaces $\mathscr{X} = (\mathcal{X},d_\mathcal{X},\mu_\mathcal{X})$ and $\mathscr{Y} = (\mathcal{Y},d_\mathcal{Y},\mu_\mathcal{Y})$ in $\mathbb{M}$ is defined by \begin{align*} d_{\mathrm{GPr}}(\mathscr{X},\mathscr{Y}) = \inf_{(\mathrm{var}phi_\mathcal{X},\mathrm{var}phi_\mathcal{Y},\mathcal{Z})} d_{\mathrm{Pr}}^{(\mathcal{Z},d_{\mathcal{Z}})}((\psi_\mathcal{X})_* \mu_\mathcal{X}, (\psi_\mathcal{Y})_* \mu_\mathcal{Y}), \end{align*} where the infimum is taken over all pairs of isometric embeddings $\psi_{\mathcal{X}}$ and $\psi_{\mathcal{Y}}$ from $(\mathcal{X},d_\mathcal{X})$ and $(\mathcal{Y},d_\mathcal{Y})$ into some common metric space $(\mathcal{Z},d_{\mathcal{Z}})$. \end{defn} \begin{theo} Given a sequence of mm-spaces $(\mathscr{X}_n=(\mathcal{X}_n,\mu_n,d_n))_{n \in \mathbb{N}}$ and another mm-space $\mathscr{X}=(\mathcal{X},d,\mu)$ in $\mathbb{M}$, the following assertions are equivalent: \begin{enumerate} \item The sequence $(\mathscr{X}_n)_{n \in \mathbb{N}}$ converges to $\mathscr{X}$ with respect to the Gromov--Prohorov distance. \item The sequence of distance matrix distributions $(\nu^{\mathscr{X}_n})_{n \in \mathbb{N}}$ converges weakly to $\nu^\mathscr{X}$. \item The sequence $(\mathscr{X}_n)_{n \in \mathbb{N}}$ converges to $\mathscr{X}$ with respect to the Gromov--weak topology: for any polynomial $\Phi^{p,\mathrm{var}phi}$ associated to a bounded and continuous function $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, we have $\Phi^{p,\mathrm{var}phi}(\mathscr{X}_n) \to_{n \to \infty} \Phi^{p,\mathrm{var}phi}(\mathscr{X})$. \item For any $p \geq 2 $ and any compactly supported and continuous function $\mathrm{var}phi \in \mathscr{C}_c(\mathbb{R}^{\binom{p}{2}})$, we have $\Phi^{p,\mathrm{var}phi}(\mathscr{X}_n) \to_{n \to \infty} \Phi^{p,\mathrm{var}phi}(\mathscr{X})$. \end{enumerate} Furthermore, the metric space $(\mathbb{M}, d_{\mathrm{GPr}})$ is complete and separable, so the space $\mathbb{M}$ is polish. \end{theo} \begin{proof} The equivalence of the three first points and the polish character are respectively Theorems 5 and Theorem 1 in \cite[Theorem 5]{greven2009convergence}; we also refer to \cite{Lohr13} for further details on the Gromov--Prohorov metric. We have obviously (3) $\mathbb{R}ightarrow$ (4). Conversely, note that (4) amounts to the \emph{vague} convergence of the distance matrix distributions $\nu^{\mathscr{X}_n}$ towards $\nu^{\mathscr{X}}$. However, for probability measures, vague convergence and weak convergence are equivalent (the difference is that for vague convergence we can have a positive mass that escapes to infinity, but this does not happen if we specify the limit and if this limit is a probability measure); see \cite[Lemma 5.20]{Kallenberg02}. Therefore, (4) $\mathbb{R}ightarrow$ (2). \end{proof} \begin{remark}\label{remark:dense} As a consequence of the fourth item in the theorem above and of the Stone--Weierstrass theorem, in order to control the Gromov-weak topology, we can use a \emph{countable} family $H$ of polynomials $(\Phi^{p,\mathrm{var}phi})_{p,\mathrm{var}phi}$ associated to functions $\mathrm{var}phi \colon \mathbb{R}^{\binom{p}{2}} \to \mathbb{R}$ with compact support. \end{remark} \subsection{Almost sure convergence of the sample model}\label{sub:almost_sure_convergence} Let $ \mathscr{X} = \mathscr{X}ex$ in $\mathbb{M}$ and $(X_n)_{n \in \mathbb{N}}$ be a sequence of random and independent variables with the same law $\mu$. We define \begin{align*} \mathscr{X}_n = \left( \mathcal{X}_n = \{X_1,\dots,X_n\}, {d|}_{\mathcal{X}_n}, \mu_n = \frac{1}{n}\sum_{i=1}^n \delta_{X_i}\right). \end{align*} Then, taking $\Phi \in H$ (see Remark~\ref{remark:dense}), we have \begin{align*} \Phi(\mathscr{X}_n) &= \int_{\mathcal{X}^p} \mathrm{var}phi((d(x_i,x_j))_{1 \leq i < j \leq p})\,\mu_n^{\otimes p}(x_1,\dots,x_p)\\ &{\longrightarrow}_{n \to \infty} \int_{\mathcal{X}^p} \mathrm{var}phi((d(x_i,x_j))_{1 \leq i < j \leq p})\,\mu^{\otimes p}(x_1,\dots,x_p) = \Phi(\mathcal{X}). \end{align*} Indeed, $\mu_n$ converges almost surely to $\mu$ for the weak topology of probability measures (see for instance \cite{Var58}), so the same is true for $\mu^{\otimes p}_n$ toward $\mu^{\otimes p}$ (see \cite[Chapter 1, Example 3.2]{billing}). This implies the following theorem: \begin{theo}\label{theo:as_convergence} We have the almost sure convergence $\mathscr{X}_n \underset{a.s.}{{\longrightarrow}} \mathscr{X}$ in the space $\mathbb{M}$ of mm-spaces: $$\mathbb{P}[\Phi(\mathscr{X}_n) \to_{n \to \infty} \Phi(\mathscr{X})\text{ for any polynomial }\Phi \in \Pi]=1$$ or equivalently, $$\mathbb{P}[d_{\mathrm{GPr}}(\mathscr{X}_n,\mathscr{X}) \to_{n \to \infty} 0] =1.$$ \end{theo} \PLM{We can also prove the theorem by using the Gromov--Prohorov distance; indeed, by choosing $\mathcal{Z}=\mathcal{X}$ as the common metric space in which one embeds $\mathcal{X}_n$ and $\mathcal{X}$, and the identity maps for the isometric embeddings, we see that $$d_{\mathrm{GPr}}(\mathscr{X}_n,\mathscr{X}) \leq d_{\mathrm{Pr}}(\mu_n,\mu),$$ and the convergence to $0$ of the right-hand side is the Glivenko--Cantelli convergence of empirical measures. Estimates on the speed of convergence of $\mathbb{E}[d_{\mathrm{Pr}}(\mu_n,\mu)]$ are given in \cite{Dud69}, but they depend strongly on the space $\mathscr{X}$: if $k$ denotes the entropic dimension of $\mathscr{X}=\mathscr{X}ex$, then in general one cannot prove a better bound than $\mathbb{E}[d_{\mathrm{Pr}}(\mu_n,\mu)] = O(n^{-\frac{1}{k+2+\epsilon}})$; see Theorem 4.1 in \cite{Dud69}. However, if instead of the Gromov--Prohorov distance one uses polynomial observables $\Phi$ in order to control the speed of convergence, then the results of this paper will prove that essentially there are only two possible speeds of convergence: \begin{itemize} \item in the generic case, $|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})| = O(n^{-\frac{1}{2}})$; more precisely, there exists a bilinear map $$\kappa^2 : \Pi^2 \to \Pi$$ such that, for any polynomial $\Phi=\Phi^{p,\mathrm{var}phi}$, we have the convergence in law $$\frac{\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})}{n^{1/2}}\rightharpoonup_{n \to \infty} \mathcal{N}(0,\kappa^2(\Phi)(\mathscr{X}));$$ see Theorem \ref{theo:generic_case}. \item in the case of compact homogeneous spaces, $|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})| = O(n^{-1})$; more precisely, for any polynomial $\Phi=\Phi^{p,\mathrm{var}phi}$, there exists a random variable $Y(\mathrm{var}phi,\mathscr{X})$ which is determined by its moments (it has a convergent moment-generating function) such that we have the convergence in law $$\frac{\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})}{n}\rightharpoonup_{n \to \infty} Y(\mathrm{var}phi,\mathscr{X});$$ see Theorem \ref{theo:singular_case}. \end{itemize} } \section{The method of cumulants}\label{sec:method_cumulants} In this section, we recall the notion of (joint) cumulants of random variables and the results from \cite{feray2016mod,feray2017mod}, which relate the existence of a sparse dependency graph for a family of random variables to the size of the cumulants and to the fluctuations of their sum. \subsection{Joint cumulants} A \emph{set partition} of $[\![1,n ]\!]$ is a family of non-empty disjoint subsets of $[\![1,n ]\!]$ (the \emph{parts} of the partition), whose union is $[\![1,n ]\!]$. For instance, \begin{align*} \{\{1,4,8\},\{3,5,6\},\{2,7\},\{9\}\} \end{align*} is a set partition of $[\![1,9]\!]$. We denote $\mathfrak{Q}(n)$ the set of set partitions of $[\![1,n ]\!]$. It is endowed with the \textit{refinement} order: a set partition $\pi$ is \textit{finer} than another set partition $\pi'$ if every part of $\pi$ is included in a part of $\pi'$. Denote $\mu$ the Möbius function of the partially ordered set $(\mathfrak{Q}(n),\preceq)$ (see \cite{Rot64}). One has \begin{align*} \mu(\pi) \vcentcolon = \mu(\pi, \{[\![1,n]\!]\}) = (-1)^{\ell(\pi)-1}\,(\ell(\pi)-1)!, \end{align*} where $\ell(\pi)$ is the number of parts of $\pi$; see \cite[Chapter 3, Equation (30) p.~128]{Stan97}. Given a probability space $(\Omega,\mathcal{F}, \mathbb{P})$ , we set $$\mathscr{A} = \bigcap\limits_{p \in \mathbb{N}^*} \mathscr{L}^p(\Omega, \mathcal{F}, \mathbb{P}),$$ which has a structure of real algebra. For any integer $r \geq 1$, we define a map $\kappa_r \colon \mathscr{A}^r \to \mathbb{R} $ by \begin{align*} \kappa_r(X_1,\dots,X_r) = \left[ t_1 \cdots t_r\right] \, \mathrm{log} \left( \mathbb{E}\left[\mathrm{e}^{t_1 X_1 + \cdots + t_r X_r} \right] \right) \quad \text{for} \enskip (X_i)_{i \in [\![1,r]\!]} \in \mathscr{A}^r, \end{align*} where $\left[t_1 \cdots t_r\right](F)$ is the coefficient of the monomial $\prod_{i=1}^r t_i$ in the series expansion of $F$. Here, $\mathrm{log} (\mathbb{E}[\mathrm{e}^{t_1 X_1 + \cdots + t_r X_r} ] )$ is considered as a formal power series whose coefficients are polynomials in the joint moments of the $X_i$'s; we do not ask \emph{a priori} for the convergence of the exponential generating function. We call the map $\kappa_r$ the $r$-th joint cumulant map, and we define the \emph{joint cumulant map} $$\kappa \colon \bigcup\limits_{r \in \mathbb{N}^*} \mathscr{A}^r \to \mathbb{R}$$ by $ {\kappa|}_{\mathscr{A}^r} = \kappa_r$ for any integer $r \geq 1$. For a specific sequence $(X_i)_{i \in [\![1,r]\!]} \in \mathscr{A}^r$, we call the quantity $\kappa_r((X_i)_{i \in [\![1,r]\!]} )$ the joint cumulant of $(X_i)_{i \in [\![1,r]\!]} \in \mathscr{A}^r$. This notion of joint cumulant was introduced by Leonov and Shiryaev in \cite{leonov1959method}, and it generalises the usual cumulants: for $X \in \mathscr{A}$, $$\kappa^{(r)}(X) \vcentcolon= \kappa_r(X,\dots,X)$$ is the usual $r$-th cumulant of $X$, that is $r!\,[t^r](\log \mathbb{E}[\mathrm{e}^{tX}])$. We summarise the properties of the map $\kappa$ in the following: \begin{prop}\label{prop:cumulants} \begin{enumerate} \item The map $\kappa$ is multilinear. \item The joint cumulants and the joint moments are related by the poset of set partitions, and the following formulas hold: \begin{align*} &\mathbb{E}\left[X_1 \cdots X_r\right] = \sum \limits_{\pi \in \mathfrak{Q}(r)} \prod \limits_{C \in \pi} \kappa\left( X_i \, ; \, i \in C\right); \\ &\kappa(X_1,\dots, X_r) = \sum \limits_{\pi \in \mathfrak{Q}(r)} \mu(\pi) \prod\limits_{C \in \pi} \mathbb{E}\left[\prod\limits_{i \in C} X_i\right]. \end{align*} \item If the variables $X_1, \dots,X_r$ can be split into two non-empty sets of variables which are independent of each other, then $\kappa(X_1,\dots,X_r)$ vanishes. \end{enumerate} \end{prop} For example, the joint cumulants of one or two variables are respectively the expectation and the covariance: \begin{align*} \kappa(X_1) = \mathbb{E}[X_1] \, ; \,\, \kappa(X_1,X_2) = \mathbb{E}[X_1 X_2] - \mathbb{E}[X_1]\mathbb{E}[X_2]. \end{align*} \PLM{For the convenience of the reader, we also recall the value of the third cumulant: $\kappa(X_1,X_2,X_3) = \mathbb{E}[X_1X_2X_3] - \mathbb{E}[X_1]\mathbb{E}[X_2X_3] - \mathbb{E}[X_2]\mathbb{E}[X_1X_3]- \mathbb{E}[X_3]\mathbb{E}[X_1X_2] +2\mathbb{E}[X_1]\mathbb{E}[X_2]\mathbb{E}[X_3]$.} \subsection{Dependency graphs and bounds on cumulants} \PLM{A real random variable $X$ is distributed according to the normal law $\mathcal{N}(m,\sigma^2)$ with mean $m$ and variance $\sigma^2$ if and only if $\kappa^{(1)}(X)=m$, $\kappa^{(2)}(X)=\sigma^2$ and $\kappa^{(r)}(X)=0$ for $r \geq 3$. More generally, a sequence of random variables $(X_n)_{n \in \mathbb{N}}$ converges in distribution towards a normal law $\mathcal{N}(m,\sigma^2)$ if the two first cumulants $\kappa^{(1,2)}(X_n)$ converge toward $m$ and $\sigma^2$ respectively, and if $\lim_{n \to \infty} \kappa^{(r)}(X_n)=0$ for $r \geq 3$; see for instance \cite[Theorem 1]{Jan88}. In the series of papers \cite{feray2016mod,feray2017mod,2017arXiv171206841F,BMN19}, a method of cumulants has been built in order to make more precise this result of asymptotic normality, assuming that one has good upper bounds on the size of the cumulants of the random variables $X_n$. This method falls in the framework of \emph{mod-Gaussian convergence} also constructed in the aforementioned papers. We recall below the main results from this theory; see \cite[Definition 2 and Theorem 3]{2017arXiv171206841F}. } \begin{defn} Let $(S_n)_{n \in \mathbb{N}}$ be a sequence of real-valued random variables. We fix $A \geq 0$, and we consider two positive sequences $(D_n)_{n \in \mathbb{N}}$ and $(N_n)_{n \in \mathbb{N}}$ such that $$\lim_{n \to \infty} \frac{D_n}{N_n} = 0\quad(\text{hypothesis of sparcity}).$$ The hypotheses of the method of cumulants with parameters $((D_n)_{n \in \mathbb{N}}, (N_n)_{n \in \mathbb{N}}, A)$ and with limits $(\sigma^2,L)$ for the sequence $(S_n)_{n \in \mathbb{N}}$ are the two following conditions: \begin{itemize} \item For any $r \geq 1$, we have: \begin{align*} |\kappa^{(r)}(S_n)| \leq N_n (2D_n)^{r-1} r^{r-2} A^r. \end{align*} \item There exist two real numbers $\sigma^2 \geq 0$ and $L$ such that: \begin{align*} &\frac{\kappa^{(2)}(S_n)}{N_n D_n} = (\sigma_n)^2 = \sigma^2 \left( 1 + o\left(\left(\frac{D_n}{N_n}\right)^{\!1/3}\right)\right); \\ &\frac{\kappa^{(3)}(S_n)}{N_n (D_n)^2} = L_n = L(1+o(1)) . \end{align*} \end{itemize} \end{defn} \noindent In particular, the first estimate in the second item states that the variance of $S_n$ is equivalent to $\sigma^2\,N_nD_n$. \begin{theo}\label{theo:esti_cumu} Let $(S_n)_{n \in \mathbb{N}}$ be a sequence of real-valued random variables that satisfies the hypotheses of the method of cumulants, with parameters $((D_n)_{n \in \mathbb{N}}, (N_n)_{n \in \mathbb{N}}, A)$ and with limits $(\sigma^2,L)$. Assuming that $\sigma^2 >0$, we set: \begin{align*} Y_n = \frac{S_n - \mathbb{E}[S_n]}{\sqrt{\mathrm{var}(S_n)}}. \end{align*} \begin{enumerate} \item Central limit theorem with an extended zone of normality: we have $Y_n {\rightharpoonup}_{n \to \infty}\, \mathcal{N}_{\mathbb{R}}(0,1)$, and more precisely, \begin{align*} \mathbb{P}\left[Y_n \geq y_n\right] = \mathbb{P}\left[\mathcal{N}_{\mathbb{R}}(0,1) \geq y_n\right](1+o(1)) \end{align*} for any sequence $(y_n)_{n \in \mathbb{N}}$ with $|y_n| \ll \left(\frac{N_n}{D_n}\right)^{1/6}$. \item Berry--Esseen type bound: the Kolmogorov distance between $Y_n$ and the standard Gaussian distribution satisfies \begin{align*} d_{\mathrm{Kol}}(Y_n,\mathcal{N}(0,1)) \leq \frac{C\, A^3}{(\sigma_n)^3} \sqrt{\frac{D_n}{N_n}}, \end{align*} where $C=76.36$ is a universal constant. \item Moderate deviations: for any sequence $(y_n)_{n \in \mathbb{N}}$ with $1\ll y_n \ll \left(\frac{N_n}{D_n}\right)^{1/4}$, \begin{align*} \mathbb{P}\left[Y_n \geq y_n\right] = \frac{\mathrm{e}^{-\frac{(y_n)^2}{2}}}{y_n \sqrt{2 \pi}} \exp\left(\frac{L}{6 \sigma^3} \sqrt{\frac{D_n}{N_n}}(y_n)^3\right)(1+o(1)). \end{align*} \item Local limit theorem: for any $y \in \mathbb{R}$, any Jordan measurable set $B$ with positive Lebesgue measure $\mathrm{m}(B) > 0$, and any real exponent $\delta$ in $(0,\frac{1}{2})$, \begin{align*} \lim\limits_{n \to \infty} \left(\frac{N_n}{D_n}\right)^{\!\delta} \,\mathbb{P}\!\left[Y_n-y \in \left(\frac{D_n}{N_n}\right)^{\!\delta} B \right] = \frac{1}{\sqrt{2\pi}}\,\mathrm{e}^{-\frac{y^2}{2}} \,\mathrm{m}(B). \end{align*} \PLM{\item Concentration inequality: suppose that in addition to the hypotheses of the method of cumulants, we have almost surely $|S_n| \leq N_nA$. Then, for any $x\geq 0$ and any $n \in \mathbb{N}$, $$\mathbb{P}[|Y_n|\geq x]\leq 2\exp\left(-\frac{(\sigma_n)^2x^2}{9A^2}\right). $$} \end{enumerate} \end{theo} \noindent This list of results corresponds to Theorem 9.5.1 in \cite{feray2016mod} (CLT and moderate deviations), Corollary 30 in \cite{feray2017mod} (Kolmogorov distance), Proposition 4.9 in \cite{BMN19} (local limit theorem), and Proposition 6 in \cite{2017arXiv171206841F} (concentration inequality). We shall use the method of dependency graphs in order to verify the hypothesis of the previous theorem. Let $S = \sum_{v \in V} A_v$ be a finite sum or real-valued random variables. We say that a graph $G=(V,E)$ is a \emph{dependency graph} for the family of random variables $(A_v)_{v \in V}$ if, given two disjoint subsets $V_1,V_2 \subseteq V$, if there is no edge $e = (v,w) \in E$ such that $v \in V_1$ and $w \in V_2$, then the two vectors $(A_v)_{v \in V_1}$ and $(A_w)_{w \in V_2}$ are independent. \begin{theo}\label{theo:bounds_dependency} Let $S = \sum_{v \in V} A_v$ be a sum of random variables such that $(A_v)_{v \in V}$ admits a dependency graph $G=(V,E)$, with $$N = \mathrm{card}(V)\qquad;\qquad D = 1 + \max_{v \in V}(\mathrm{deg}(v)).$$ We also assume that $|A_v| \leq A$ almost surely for any $v$ in $V$. Then, for any $r \geq 1$, \begin{align}\label{bound:cumulant} |\kappa^{(r)}(S)| \leq N (2D)^{r-1} r^{r-2} A^r. \end{align} \end{theo} \PLM{We refer to \cite[Theorem 9.1.7]{feray2016mod} for a proof of this result; later, we shall recall some of its arguments and adapt them in order to obtain adequate bounds on the cumulants of polynomials of the Gromov--Prohorov sample model of a compact homogeneous space.} \section{Generic fluctuations of the sample model}\label{sec:generic_fluctuations} Throughout this section, $\mathscr{X} = \mathscr{X}ex \in \mathbb{M}$ is a fixed metric measure space and $\Phi^{p,\mathrm{var}phi} \in \Pi$ a fixed polynomial. As in Section \ref{sub:almost_sure_convergence}, we denote $\mathscr{X}_n$ the sample model of $\mathscr{X}$ with $n$ independent points $X_1,\ldots,X_n$, and we are going to study the convergence of $\Phi (\mathscr{X}_n)$ toward $\Phi(\mathscr{X})$. \subsection{Dependency graphs for the sample model} \noindent For any sequence $X \colon \mathbb{N} \to E$ with values in a set $E$ and for any map $f \colon S \to \mathbb{N}$, we denote by $X_f$ the map $X \circ f$. For example, if we take $f = I \in [\![1,n]\!]^5$ which is a 5-tuple, we have $X_I = (X_{I_1},X_{I_2},X_{I_3},X_{I_4},X_{I_5})$. For any finite or infinite sequence $I \colon S \to T$, we write \begin{align*} d(X_I) = (d(X_{I_i},X_{I_j}))_{i \in S, j \in S} \end{align*} We see a $p$-tuple $\bar{\imath}$ as a map $\bar{\imath} \colon [\![1,p ]\!] \to [\![1,n ]\!]$ and we denote by $\overline{\mathrm{Im}}(\bar{\imath})$ the multiset-image of this map, taking as a multiplicity function the map $m \colon \mathrm{Im}(\bar{\imath}) \to \mathbb{N}$ defined for any $\overline{\mathrm{Im}}(\bar{\imath})$ by $m(x) = \mathrm{Card}((\bar{\imath})^{-1}(x))$. We have \begin{align*} \forall n \geq 1, \, \Phi(\mathscr{X}_n) = \frac{1}{n^p} \,\sum_{\bar{\imath} \in [\![1,n ]\!]^p } \mathrm{var}phi(d(X_{\bar{\imath}})). \end{align*} \noindent We write $S_{n}(\mathrm{var}phi,\mathscr{X}) = n^p\,\Phi(\mathscr{X}_n) = \sum_{\bar{\imath} \in [\![1,n ]\!]^p } \mathrm{var}phi(d(X_{\bar{\imath}}))$, which is a sum of dependent random variables. We are going to use the method of cumulants in order to study the asymptotic probabilistic behavior of $S_{n}(\mathrm{var}phi,\mathscr{X})$. Placing ourselves in the framework of the previous section, we take $V = [\![1,n ]\!]^p$, $S = S_{n}(\mathrm{var}phi,\mathscr{X}) = \sum_{\bar{\imath} \in [\![1,n ]\!]^p } \mathrm{var}phi(d(X_{\bar{\imath}})) $, $A = \|\mathrm{var}phi\|_{\infty}$, and two vertices $\bar{\imath}$ and $\bar{\jmath}$ will be adjacent in the graph $G = (V,E)$ if and only if they have at least one index in common, \emph{i.e.} if and only if $$\mathrm{Card}\left(\overline{\mathrm{Im}}(\bar{\imath})\cap \overline{\mathrm{Im}}(\bar{\jmath})\right) \geq 1 .$$ \PLM{\begin{lemma} The condition written above defines a dependency graph for the family of random variables $(\mathrm{var}phi(d(X_{\bar{\imath}})))_{\bar{\imath} \in V}$. \end{lemma} \begin{proof} Suppose that $\{\bar{\imath}^{1},\ldots,\bar{\imath}^{r}\}$ and $\{\bar{\jmath}^{1},\ldots,\bar{\jmath}^{s}\}$ are two sets of $p$-tuples which are not connected. Then, there is no index $i$ belonging to an intersection $\overline{\mathrm{Im}}(\bar{\imath}^{a})\cap \overline{\mathrm{Im}}(\bar{\jmath}^{b})$, so the two sets of variables $$\left\{X_i,\,\,i \in \bigcup_{a=1}^r\overline{\mathrm{Im}}(\bar{\imath}^{a}) \right\} \quad\text{and}\quad \left\{X_j,\,\,j \in \bigcup_{b=1}^s\overline{\mathrm{Im}}(\bar{\jmath}^{b}) \right\}$$ are disjoint. As the two vectors $(\phi(d(X_{\bar{\imath}^{a}})))_{1\leq a \leq r}$ and $(\phi(d(X_{\bar{\jmath}^{b}})))_{1\leq b \leq s}$ are measurable functions of these two sets, they are independent. \end{proof}} In the dependency graph $G$ constructed above, we have $N = n^p$ and $D \leq p^2 n^{p-1}$. Indeed, we can build a surjective map from $[\![1,p ]\!]^2 \times[\![1,n ]\!]^{p-1}$ to the set of adjacent vertices of a vertex $\bar{\imath} \in V $ taking \begin{align*} [\![1,p ]\!]^2 \times [\![1,n ]\!]^{p-1} &\to \{\text{adjacent vertices of }\bar{\imath} \} \\ (i,j,(y_k)_{k \neq j}) &\mapsto (y'_k)_{k \in [\![1,p ]\!]} \end{align*} with $y'_k = y_k$ if $k\neq j$ and $y'_j = x_i$. Therefore, we have from Theorem~\ref{theo:bounds_dependency}: \begin{align*} \forall r \geq 1, |\kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X}))| \leq n^p (2p^2 n^{p-1})^{r-1} r^{r-2} (\|\mathrm{var}phi\|_\infty)^r. \end{align*} which is an upper bound of order $n^{(p-1)r + 1}$. \subsection{Polynomiality of the cumulants} For any $r \geq 1$, we can write by multilinearity of the cumulant: \begin{align*} \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) = \sum_{(\bar{\imath}^1,\dots,\bar{\imath}^r) \in V^r} \kappa\left(\mathrm{var}phi(d(X_{\bar{\imath}^1})),\dots,\mathrm{var}phi(d(X_{\bar{\imath}^r}))\right). \end{align*} For any $I = (\bar{\imath}^1,\dots,\bar{\imath}^r) \in V^r$, we set $\mathrm{var}phi(d(X_I)) = (\mathrm{var}phi(d(X_{\bar{\imath}^1})),\dots,\mathrm{var}phi(d(X_{\bar{\imath}^r})))$, hence: \begin{align*} \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) = \sum_{I \in V^r} \kappa (\mathrm{var}phi(d(X_I))). \end{align*} We identify here $V^r = \left([\![1,n ]\!]^p \right)^r $ with the set $[\![1,n ]\!]^{pr}$ by preserving the \textit{lexicographic} order: \emph{i.e.} by using the bijection \begin{align*} b \colon &[\![1,r ]\!] \times \![1,n ]\!] \to [\![1,rp]\!] \\ &(k,l) \mapsto (k-1)p + l. \end{align*} \begin{prop}\label{prop:polynomiality} For any integer $r \geq 1$, the map \begin{align*} \mathbb{N}^* &\to \mathbb{R} \\ n &\mapsto \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) \end{align*} is a polynomial in $\mathbb{R}[n]$ with degree not exceeding $(p-1)r+1$. \end{prop} \begin{proof} For $x= (x_1,\dots,x_{pr})$ in $[\![1,n ]\!]^{pr}$, we consider the equivalence relation $\pi_x$ over $[\![1,pr]\!]$ defined by $i \sim j$ if and only if $x_i = x_j$. We then denote $\mathrm{Sp}_n(x)$ the set-partition in $\mathfrak{Q}(pr)$ associated to the equivalence relation $\pi_x$. Given two families of indices $I=(x_1,\ldots,x_{pr})$ and $J=(y_1,\ldots,y_{pr})$ in $[\![1,n ]\!]^{pr}$, note that if $\mathrm{Sp}_n(I) = \mathrm{Sp}_n(J)$, then $\kappa (\mathrm{var}phi(d(X_I))) =\kappa (\mathrm{var}phi(d(X_J)))$. Indeed, if $\mathrm{Sp}_n(I) = \mathrm{Sp}_n(J)$, then one can find a bijection $\psi : [\![1,n]\!] \to [\![1,n]\!]$ such that $\psi(x_a)=y_a$ for any $a \in [\![1,pr]\!]$; the result follows since the $X_{i}$'s all have the same law. Given $\pi \in \mathfrak{Q}(pr)$, we denote: \begin{equation} \kappa(\pi,\mathrm{var}phi) = \kappa (\mathrm{var}phi(d(X_I))) \quad \text{for any $I \subset [\![1,n ]\!]^{pr}$ such that $\mathrm{Sp}_n(I) = \pi$}. \label{eq:kappa_pi} \end{equation} Then, \begin{align*} \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) &= \sum_{I \in V^r} \,\kappa (\mathrm{var}phi(d(X_I))) \\ &= \sum_{\pi \in \mathfrak{Q}(pr)} \mathrm{Card}(\pi,n) \,\kappa(\pi,\mathrm{var}phi). \end{align*} where $\mathrm{Card}(\pi,n)$ denotes the number of families $I \in [\![1,n]\!]^{pr}$ such that $\mathrm{Sp}_n(I) = \pi$. We now remark that given $\pi \in \mathfrak{Q}(pr) $, $\mathrm{Sp}_{n}^{-1}(\pi)$ is in bijection with the set \begin{align*} \{(x_1,\dots,x_{\ell(\pi)}) \, ; \, x_i \in [\![1,n]\!] \text{ and for all } i \neq j \in [\![1,{\ell(\pi)} ]\!], x_i \neq x_j \} . \end{align*} The cardinal of this set is $n^{\downarrow {\ell(\pi)}} = n(n-1)\cdots (n-({\ell(\pi)}-1))$ (this is valid even if $n < \ell(\pi)$). Thus, for any $n \geq 1$, \begin{equation} \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) = \sum_{\pi \in \mathfrak{Q}(pr)}\kappa(\pi,\mathrm{var}phi) \,n^{\downarrow \ell(\pi)}. \label{eq:expansion_cumulant} \end{equation} This proves the polynomiality, and since we know that the left-hand side is a $O(n^{(p-1)r+1})$, the degree of the polynomial is smaller than $(p-1)r+1$. \end{proof} In Equation \eqref{eq:expansion_cumulant}, we know that the terms with degree strictly larger than $(p-1)r+1$ cancel one another. Let us give a simpler explanation of this vanishing: \begin{prop}\label{prop:vanish generic} If $r\geq 2$ and $\ell(\pi) > (p-1)r + 1$, then $\kappa(\pi,\mathrm{var}phi) = 0$. \end{prop} \begin{proof} This is mostly a rewriting of the proof of the general upper bound on cumulants stated in Theorem \ref{theo:esti_cumu}. For the convenience of the reader, let us give a proof which is adapted to our situation; this will also enable us to introduce combinatorial objects which will play a major role in Section \ref{sec:homogeneous_case}. Given $\pi\in \mathfrak{Q}(pr)$, we construct a graph $G_\pi$ on the vertex set $V({G_\pi}) = [\![1,pr]\!]$ as follows. For any part $A$ of the set partition $\pi$, we associate a spanning tree $T_A$ of the set of vertices $A$, then we define $G_\pi$ as the disjoint union of those spanning trees. We have $\sum_{A \in \pi} (|E(T_A)|+1) = pr$. This implies $|E(G_\pi)|=\sum_{A \in \pi} |E(T_A)| \leq r-2$ by the assumption on $\ell(\pi)$. We now construct a multigraph $H_\pi$ with vertex set $V(H_\pi) = [\![1,r]\!]$, by contracting the vertices of the graph $G_\pi$ according to the map \begin{align*} (b^{-1})_1 : [\![1,rp]\!] &\to [\![1,r]\!]\\ (k-1)p+l &\mapsto k. \end{align*} The multigraph $H_\pi$ has the same number of edges as $G_\pi$, so $E(H_\pi)=E(G_\pi) \leq r-2$ and $H_\pi$ is not connected. As a consequence, if $[\![1,r]\!]=A \sqcup B$ are two non-connected components and $I=(\bar{\imath}^1,\ldots,\bar{\imath}^r)$ is a family of indices such that $\mathrm{Sp}_n(I)=\pi$, then the two families of indices $\bigcup_{a \in A} \bar{\imath}^a$ and $\bigcup_{b \in B} \bar{\imath}^b$ are disjoint. This implies that $\kappa(\pi,\mathrm{var}phi) = 0$, by using the third property in Proposition \ref{prop:cumulants}. \end{proof} \subsection{Limiting variance and asymptotics of the fluctuations} In order to apply Theorem \ref{theo:esti_cumu}, we also have to compute the limiting parameters $\sigma^2$ and $L$ involved in the method of cumulants. Identifying the leading terms in Equation \eqref{eq:expansion_cumulant}, we obtain: \begin{align*} \frac{\kappa^{(2)}(S_n(\mathrm{var}phi,\mathscr{X}))}{N_n D_n} = \frac{\kappa^{(2)}(S_n(\mathrm{var}phi,\mathscr{X}))}{p^2 n^{2p-1}} = \frac{1}{p^2} \sum_{\substack{\pi \in \mathfrak{Q}(2p) \\ \ell(\pi) = 2p-1}} \kappa(\pi,\mathrm{var}phi) + O\left(\frac{1}{n}\right). \end{align*} For $k,l \in [\![1,p]\!]$, we define the partition \begin{equation} \pi_{k,l} = \{k,l+p\} \cup \{ \{t\} \, ; \, t \in [\![1,2p]\!] \setminus \{k,l+p\} \}=\, \begin{tikzpicture}[scale=0.5, baseline=1mm] \foreach \x in {0,1,2,3,4,5} {\fill (\x,0) circle (3pt); \fill (\x,1) circle (3pt);} \draw (1,1) -- (3,0); \draw (1,1.5) node {\footnotesize $k$}; \draw (3,-0.5) node {\footnotesize $l$}; \end{tikzpicture} \,\,\,;\label{eq:pi_kl} \end{equation} the picture above of the set partition makes appear the integers in $[\![1,p]\!]$ on the top row, and the integers in $[\![p+1,2p]\!]$ on the bottom row. We then have: \begin{align*} \frac{\kappa^{(2)}(S_n(\mathrm{var}phi,\mathscr{X}))}{N_n D_n} = \frac{1}{p^2} \sum_{1 \leq k,l \leq p} \kappa(\pi_{k,l},\mathrm{var}phi) + O\left(\frac{1}{n}\right). \end{align*} Indeed, a set partition $\pi$ of $[\![1,2p]\!]$ with length $2p-1$ consists of a pair $\{k,l\}$ and of singletons, and if the pair $\{k,l\}$ is included in $[\![1,p]\!]$ or in $[\![p+1,2p]\!]$, then the graph $H_{\pi}$ introduced during the proof of Proposition \ref{prop:vanish generic} is not connected (it is the graph on $2$ vertices and without edge), so $\kappa(\pi,\mathrm{var}phi)=0$. Similarly, we compute the limiting third cumulant $L$: \begin{align*} \frac{\kappa^{(3)}(S_n(\mathrm{var}phi,\mathscr{X}))}{N_n (D_n)^2} =\frac{\kappa^{(3)}(S_n(\mathrm{var}phi,\mathscr{X}))}{p^4 n^{3p-2}} =\frac{1}{p^4} \sum_{\substack{\pi \in \mathfrak{Q}(3p) \\ \ell(\pi) = 3p-2}} \kappa(\pi,\mathrm{var}phi) + O\left(\frac{1}{n}\right). \end{align*} For $i,j,k,l \in [\![1,p]\!]$ with $j \neq k$, we define the partition: \begin{align} \pi_{i,j,k,l} &= \{i,j+p\} \cup \{k+p,l+2p\} \cup \{ \{t\} \, ; \, t \in [\![1,3p]\!] \setminus \{i,j+p,k+p,l+2p\} \} \nonumber\\ &=\,\, \begin{tikzpicture}[scale=0.5, baseline=4mm] \foreach \x in {0,1,2,3,4,5} {\fill (\x,0) circle (3pt); \fill (\x,1) circle (3pt); \fill (\x,2) circle (3pt);} \draw (1,1) -- (3,0); \draw (4,1) -- (5,2); \draw (0.7,1.2) node {\footnotesize $k$}; \draw (4.3,0.8) node {\footnotesize $j$}; \draw (5,2.5) node {\footnotesize $i$}; \draw (3,-0.5) node {\footnotesize $l$}; \end{tikzpicture}\,\,,\label{eq:pi_ijkl} \end{align} and if $j=k$: \begin{align*} \pi_{i,j,j,l} = \{i,j+p,l+2p\} \cup \{ \{t\} \, ; \, t \in [\![1,3p]\!] \setminus \{i,j+p,l+2p\} \} = \,\, \begin{tikzpicture}[scale=0.5, baseline=4mm] \foreach \x in {0,1,2,3,4,5} {\fill (\x,0) circle (3pt); \fill (\x,1) circle (3pt); \fill (\x,2) circle (3pt);} \draw (2,1) -- (3,0); \draw (2,1) -- (5,2); \draw (1.5,1) node {\footnotesize $j$}; \draw (5,2.5) node {\footnotesize $i$}; \draw (3,-0.5) node {\footnotesize $l$}; \end{tikzpicture}\,\,. \end{align*} \PLM{These are the only possible forms for a set partition of $[\![1,3p]\!]$ with length $3p-2$ and with the condition that $H_\pi$ is connected. For the $\pi_{i,j,k,l}$'s with $j \neq k$, we also need to take into account the set partitions where two elements of the top row or of the bottom row (instead of the middle row) are connected to elements of the other rows; this leads to a factor $3$ in the enumeration}. Thus, we have: \begin{align*} \frac{\kappa^{(3)}(S_n(\mathrm{var}phi,\mathscr{X}))}{N_n (D_n)^2} = \frac{1}{p^4} \sum_{1 \leq i,j,k,l \leq p} c_{i,j,k,l}\,\kappa(\pi_{i,j,k,l},\mathrm{var}phi) + O\!\left(\frac{1}{n}\right) \end{align*} with \begin{equation} c_{i,j,k,l} = \begin{cases} 3 &\text{if }j\neq k,\\ 1 & \text{if }j=k. \end{cases}\label{eq:coefficients_cijkl} \end{equation} \PLM{Similar formulas were obtained in \cite[Section 5]{2017arXiv171206841F} for the limiting behavior of the first cumulants of observables of random graphs associated to a graphon parameter. We have now established:} \begin{theo}[Fluctuations in the generic case]\label{theo:generic_case} Let $\mathscr{X} = \mathscr{X}ex \in \mathbb{M}$ a metric measure space and $\Phi=\Phi^{p,\mathrm{var}phi} \in \Pi$ a polynomial. \begin{enumerate} \item The random variable $S_n(\mathrm{var}phi,\mathscr{X}) = n^p\,\Phi(\mathscr{X}_n)$ satisfies the hypotheses of the method of the cumulants \begin{itemize} \item with parameters $D_n = p^2 n^{p-1}$, $N_n = n^p$ and $A = \|\mathrm{var}phi\|_{\infty}$, \item and with limits $\sigma^2 = \frac{1}{p^2} \sum_{1 \leq k,l \leq p} \kappa(\pi_{k,l},\mathrm{var}phi)$ and $L = \frac{1}{p^4} \sum_{1 \leq i,j,k,l \leq p} c_{i,j,k,l}\,\kappa(\pi_{i,j,k,l},\mathrm{var}phi)$. \end{itemize} In the formul{\ae} for $\sigma^2$ and $L$, $\kappa(\pi,\mathrm{var}phi)$ with $\pi $ set partition of $[\![1,pr]\!]$ is defined by Equation \eqref{eq:kappa_pi}, and the coefficients $c_{i,j,k,l}$ are given by Equation \eqref{eq:coefficients_cijkl}; the diagrams of the set partitions are drawn in Equations \eqref{eq:pi_kl} and \eqref{eq:pi_ijkl}. \item If $\sigma(\mathrm{var}phi,\mathscr{X}) > 0$, then the random variables \begin{align*} Y_n(\mathrm{var}phi,\mathscr{X}) = \frac{\Phi(\mathscr{X}_n) - \Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}} \end{align*} satisfy all the limiting results from Theorem~\ref{theo:esti_cumu}. In particular, we have the convergence in law $Y_n(\mathrm{var}phi,\mathscr{X}) \rightharpoonup_{n \to \infty} \mathcal{N}(0,1)$, and $$d_{\mathrm{Kol}}\left(Y_n(\mathrm{var}phi,\mathscr{X}),\mathcal{N}(0,1)\right) = O\left(\left(\frac{\|\mathrm{var}phi\|_\infty}{\sigma}\right)^3\,p\,n^{-1/2}\right).$$ Under the assumption $\sigma(\mathrm{var}phi,\mathscr{X})>0$, the renormalisation $\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}$ is of order $n^{-1/2}$, and more precisely, $\mathrm{var}(\Phi(\mathscr{X}_n))$ is a polynomial in $n^{-1}$ without constant term, and with leading term $p^2\,\sigma^2(\mathrm{var}phi,\mathscr{X})\,n^{-1}$. \end{enumerate} \end{theo} \PLM{With the terminology of \cite[Section 6, Definition 30]{2017arXiv171206841F}, the theorem above ensures that the pair $(\mathbb{M},\Pi)$ is a \emph{mod-Gaussian moduli space}: generically (as soon as $\sigma(\mathrm{var}phi,\mathscr{X})>0$), an observable of the Gromov--Prohorov sample model of a mm-space $\mathscr{X}$ has normal fluctuations of size $O(n^{-1/2})$, and the limiting variance $\sigma^2(\mathrm{var}phi,\mathscr{X})$ writes as an observable $\kappa_2(\mathrm{var}phi,\mathrm{var}phi) \in \Pi$ evaluated on the mm-space $\mathscr{X}$. In this setting, a general problem is to identify the singular points of the space $\mathbb{M}$, that is to say the mm-spaces such that $\sigma^2(\mathrm{var}phi,\mathscr{X})=0$ for any function $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, and thus such that the fluctuations of $\Phi^{p,\mathrm{var}phi}(\mathscr{X}_n)$ are of order smaller than $n^{-1/2}$. The next sections of this paper are devoted to this topic.} \section{Fluctuations in the homogeneous case}\label{sec:homogeneous_case} In this section, we place ourselves in the singular case of the Gromov--Prohorov sample model, where \begin{align} \forall p \geq 1,\,\,\forall \mathrm{var}phi \in \mathscr{C}_b\left(\mathbb{R}^{\binom{p}{2}}\right),\,\,\, \sigma^2 (\mathrm{var}phi,\mathscr{X}) = \frac{1}{p^2} \sum_{1 \leq k,l \leq p} \kappa(\pi_{k,l},\mathrm{var}phi) = 0.\label{eq:singular_case} \end{align} This implies that $\frac{\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})}{\sqrt{n}}$ converges in probability to $0$ for any observable $\Phi \in \Pi$. A condition which implies \eqref{eq:singular_case} and which is much easier to check is: \begin{align}\label{eq:cov_vanish} \forall p \geq 1,\,\,\forall k,l \in [\![1,p]\!],\,\,\forall \mathrm{var}phi \in \mathscr{C}_b\left(\mathbb{R}^{\binom{p}{2}}\right), \,\,\,\,\kappa(\pi_{k,l},\mathrm{var}phi) = 0. \end{align} It is not known whether it is possible to have \eqref{eq:singular_case} without having \eqref{eq:cov_vanish}. We strongly believe that these two conditions are actually equivalent; let us detail a bit why this should be true. In Section \ref{sec:circle}, we shall introduce monomial observables of mm-spaces which are indexed by finite multigraphs; Equations \eqref{eq:singular_case} and \eqref{eq:cov_vanish} correspond to relations between the values of these observables on a mm-space. This viewpoint leads then to questions of graph theory, and a combinatorial study of these relations should allow one to understand whether Condition \eqref{eq:cov_vanish} is strictly stronger than, or equivalent to Condition \eqref{eq:singular_case}; we aim to address this problem in a forthcoming paper. Let us mention that a analogous problem occurs in the study of fluctuations of graphon models, where the Erd\H{o}s--Rényi random graphs are singular models but may not be the only singular points; see again \cite{2017arXiv171206841F}. In the remainder of the article, we assume that Condition \eqref{eq:cov_vanish} is satisfied, and we prove the following results: \begin{enumerate} \item This probabilistic condition is equivalent to a geometric property for the space $\mathscr{X}$, namely, $\mathscr{X}$ is a compact homogeneous space $G/K$ on which the compact group $G$ acts by isometry; see Theorem \ref{theo:homogeneous}. \item In this situation, for any observable $\Phi \in \Pi$, $n(\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}))$ converges in distribution toward a law which is determined by its moments (Theorem \ref{theo:singular_case}). \item The limiting distribution is not necessarily Gaussian; we provide in Section \ref{sec:circle} an explicit example when $\mathscr{X}$ is the circle. \end{enumerate} Let us introduce a few more notations. Given $\mathscr{X}=\mathscr{X}ex \in \mathbb{M}$, we denote $\mathrm{Isomp}(\mathscr{X})$ the group of isometries $i : \mathcal{X} \to \mathcal{X}$ which are measure-preserving: $$d(\cdot,\cdot) = d(i(\cdot),i(\cdot))\quad \text{and}\quad i_*\mu=\mu.$$ The group $\mathrm{Isomp}(\mathscr{X})$ is endowed with the topology of uniform convergence on compact subsets, which is defined by the neighborhoods \begin{align} V(i,K,\epsilon) = \left\{ j \in \mathrm{Isomp}(\mathscr{X}) \, , \, \sup\limits_{x\in K} d(i(x),j(x)) < \epsilon \right\}\label{eq:neighborhood} \end{align} for $i \in \mathrm{Isomp}(\mathscr{X})$, $K$ compact subset of $\mathcal{X}$ and $\epsilon > 0$. The group action of $G=\mathrm{Isomp}(\mathscr{X})$ on $\mathcal{X}$ is the continuous map \begin{align*} \mathrm{Isomp}(\mathscr{X}) \times \mathcal{X} &\to \mathcal{X} \\ (g,x) &\mapsto g \cdot x = g(x). \end{align*} The orbit of $x \in \mathcal{X}$ is $O_x = \left\{y\in \mathcal{X}\mid\exists g \in G : y = g \cdot x \right\}$, and the stabilizer of $x$ is the subgroup of $G$ given by $\mathrm{St}_x = \{ g \in \mathrm{G} \mid g \cdot x = x\} $. For a subgroup $K$ of a group $G$, we denote by $G/K$ the space of left cosets of the group $G$ over $K$, and \begin{align*} \pi : G &\to G/K\\ g &\mapsto \bar{g}= gK \end{align*} the canonical projection map. The group action by left translations of $G$ on $G/K$ is $g \cdot \bar{g_1} = \overline{gg_1}$. For any $x \in \mathscr{X}$, we have the bijection \begin{align*} \left\{\begin{array}{ccc} G/\mathrm{St}_x & \to & O_x\\ \bar{g} & \mapsto & g \cdot x. \end{array}\right. \end{align*} Finally, we denote $\mathcal{X}_\mu^\mathbb{N}$ the space of $\mu$-equidistributed sequences: \begin{align*} \mathcal{X}_{\mu}^\mathbb{N} = \left\{(x_n)_{n \in \mathbb{N}} \mid \frac{1}{n}\sum_{i=1}^n \delta_{x_i} \rightharpoonup_{ n \to +\infty } \mu \right\}. \end{align*} \subsection{Equivalence between small variance and compact homogeneity} The following theorem characterizes the singular case~\eqref{eq:cov_vanish}, where the variance of $\Phi(\mathscr{X}_n)$ is at most of order $1 / n^2$ for any polynomial $\mathscr{X}$. \PLM{Let us restate in simpler words our Condition \eqref{eq:cov_vanish}. Given $1\leq k,l \leq p$, suppose that for any $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, we have $$0=\kappa(\pi_{k,l},\mathrm{var}phi) = \mathrm{cov}\left(\mathrm{var}phi(d(X_1,\ldots,X_p)),\mathrm{var}phi(d(X_1',\ldots,\overset{(l)}{X_k},\ldots,X_p'))\right).$$ By polarisation, the covariance between any two bounded continuous functions $\psi_1$ and $\psi_2$ of the distances vanishes: $$\mathrm{cov}\left(\psi_1(d(X_1,\ldots,X_p)),\psi_2(d(X_1',\ldots,\overset{(l)}{X_k},\ldots,X_p'))\right)=0.$$ In particular, taking \begin{align*} \psi_1(d(x_1,\ldots,x_p))&=\mathrm{var}phi(d(x_k,x_1,x_2,\ldots,x_{k-1},x_{k+1},\ldots,x_p));\\ \psi_2(d(x_1,\ldots,x_p))&=\mathrm{var}phi(d(x_l,x_1,x_2,\ldots,x_{l-1},x_{l+1},\ldots,x_p)), \end{align*} we obtain \begin{equation} \mathrm{cov}\left(\mathrm{var}phi(d(X_1,\ldots,X_p)),\mathrm{var}phi(d(X_1,X_2',\ldots,X_p'))\right)=\kappa(\pi_{1,1},\mathrm{var}phi) =0.\label{eq:cov_vanish2} \end{equation} Thus, the vanishing of one kind of covariance $\kappa(\pi_{k,l},\mathrm{var}phi)$ is equivalent to the vanishing of all these covariances for $1 \leq k,l \leq p$, and in the sequel we shall work with the case $k=l=1$. } We recall that $\nu$ is the map that associates to any point in $\mathcal{X}$ the law of the random variable $d(x,(X_n)_{n \in \mathbb{N}})$. \begin{theo}\label{theo:homogeneous} The following assertions are equivalent: \begin{enumerate} \item For all $p \geq 1$ and $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, $\mathrm{cov}(\mathrm{var}phi(d(X_1,\ldots,X_p)),\mathrm{var}phi(d(X_1,X_2',\ldots,X_p')))=0$. \item The map $\nu$ is constant. \item The action of $\mathrm{Isomp}(\mathscr{X})$ on $\mathcal{X}$ is transitive. \item There exists a compact topological group $G$, and $K$ a closed subgroup of $G$ such that $$\mathscr{X}ex = (G/K, d_{G/K}, \mu_{G/K}),$$ where $d_{G/K}$ is a distance invariant by the action of $G$ ($d_{G/K}(\overline{gg_1},\overline{gg_2}) = d_{G/K}(\overline{g_1},\overline{g_2})$), and $\mu_{G/K} = \pi_{*}(\mathrm{Haar}_G)$ is the push-forward of the Haar measure of $G$. \end{enumerate} \end{theo} \PLM{\begin{remark} In the fourth item of Theorem \ref{theo:homogeneous}, the identification of $\mathscr{X}$ as a compact homogeneous space has to be understood in the space $\mathbb{M}$, that is to say modulo measure-preserving isometries. In particular, one assumes that $\mathcal{X}$ is equal to the support of $\mu$. \end{remark}} \begin{proof} \implication{(1)}{(2)} Let $A$ be a closed subset of $\mathbb{R}^{\binom{p}{2}}$. There exists a sequence $(\mathrm{var}phi_q)_{q \in \mathbb{N}}$ of positive continuous bounded functions converging pointwise to $\mathbbm{1}_{A}$ the indicator function of $A$: take $\mathrm{var}phi_q(x) = \min(1,1-q \,d(x,A))$. Taking the limit in Equation~\eqref{eq:cov_vanish2} as $q$ goes to infinity, we obtain \begin{align*} &\mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2,\dots,X_{p}))\mathbbm{1}_{A}(d(X_1,X_2',\dots,X_{p}'))\right] \\ &= \mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2,\dots,X_{p}))\right] \mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2',\dots,X_{p}'))\right] . \end{align*} If $U = \mathbb{R}^{\binom{p}{2}} \setminus A$, then $1_U = 1-1_A$, so the same is true with $A$ open subset. Let us define the map \begin{align*} \mathrm{Ed}_{A} \colon \mathcal{X} &\to \mathbb{R} \\ x &\mapsto \mathbb{E}[\mathbbm{1}_{A}(d(x,X_2,\dots,X_p))]. \end{align*} We have: \begin{align*} \int_{\mathcal{X}} \left(\mathrm{Ed}_{A}\right)^2(x) \, \mu(dx) &= \int_{\mathcal{X}} \mathbb{E}\left[\mathbbm{1}_{A}(d(x,X_2,\dots,X_{p}))\right]\mathbb{E}\left[\mathbbm{1}_{A}(d(x,X_2',\dots,X_{p}'))\right] \,\mu(dx) \\ &= \int_{\mathcal{X}} \mathbb{E}\left[\mathbbm{1}_{A}(d(x,X_2,\dots,X_{p}))\mathbbm{1}_{A}(d(x,X_2',\dots,X_{p}'))\right] \, \mu(dx) \\ &= \mathbb{E}\left[\int_\mathcal{X} \mathbbm{1}_{A}(d(x,X_2,\dots,X_{p}))\mathbbm{1}_{A}(d(x,X_2',\dots,X_{p}')) \, \mu(dx)\right] \\ &= \mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2,\dots,X_{p}))\mathbbm{1}_{A}(d(X_1,X_2',\dots,X_{p}'))\right] \\ &= \mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2,\dots,X_{p}))\right] \mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2',\dots,X_{p}'))\right] \\ &= \left(\mathbb{E}\left[\mathbbm{1}_{A}(d(X_1,X_2,\dots,X_{p}))\right]\right)^2 \\ &= \left(\int_{\mathcal{X}} \mathrm{Ed}_{A}(x) \, \mu(dx)\right)^2, \end{align*} so the variance $\mathrm{var}(\mathrm{Ed}_{A})$ under $\mu$ vanishes. We have thus showed that \begin{align*} \forall A \text{ open set of } \mathbb{R}^{\binom{p}{2}}, \mu\text{-almost surely, }\mathrm{Ed}_{A} \text{ is constant} (= \int_{\mathcal{X}} \mathrm{Ed}_{A}(x) \, \mu(dx)). \end{align*} Fix a countable basis of open subsets $(A_i)_{i \in \mathbb{N}}$ of $\mathbb{R}^{\binom{p}{2}}$. For any $A_{i_1},\dots,A_{i_n}$, there exists a set $\mathcal{X}_{A_{i_1},\dots,A_{i_n}}$ of $\mu$-measure $1$ such that $\mathrm{Ed}_{A_{i_1} \cup \dots \cup A_{i_n}}$ is constant on that set. Hence, there exists a set $\mathcal{X}_0 \subseteq \mathcal{X}$ of $\mu$-measure $1$ such that all the maps $\mathrm{Ed}_{A_{i_1} \cup \dots \cup A_{i_n}}$ are simultaneously constant on $\mathcal{X}_0$. We can replace in the previous statement $\mathcal{X}_0$ by $\mathcal{X}$, because by dominated convergence, $\mathrm{Ed}_{A}$ is continuous over $\mathcal{X}$, and by assumption, $\mathcal{X}$ is the support of $\mu$, that is to say the smallest closed subset with $\mu$-measure $1$. Consider now an arbitrary open subset $A \subset \mathcal{X}$, and $x,y \in \mathcal{X}$. We can write $A$ as a union $\bigcup_{i \in I} A_i$, and for any finite subfamily $J \subset I$, we have by assumption $$\mathrm{Ed}_{\bigcup_{i \in J}A_i} (x) = \mathrm{Ed}_{\bigcup_{i \in J}A_i} (y).$$ By making $J$ grow to $I$, we conclude that $\mathrm{Ed}_A(x)=\mathrm{Ed}_A(y)$. The set of all $A \subset \mathbb{R}^{\binom{p}{2}}$ such that $\mathrm{Ed}_{A}$ is constant is a Dynkin system, so we get that for any Borel subset $A$ of $\mathbb{R}^{\binom{p}{2}}$, the map $\mathrm{Ed}_{A}$ is constant over $\mathcal{X}$. This means that the law of $d(x,X_2,\ldots,X_p)$ is constant over $\mathcal{X}$. As this is true for any $p \geq 1$, and as the measurable structure of $\mathbb{R}^{\mathrm{met}}$ is defined by its finite projections, we conclude that $\nu^x$ does not depend on $x$. \noindent \implication{(2)}{(1)} Fix $x_0 \in \mathcal{X}$, and denote $(X_n')_{n \in \mathbb{N}}$ an independent copy of $(X_n)_{n \in \mathbb{N}}$. We can write \begin{align*} &\mathbb{E}[\mathrm{var}phi(d(X_1,X_2,\dots,X_{p}))\,\mathrm{var}phi(d(X_1,X_2',\dots,X_{p}'))] \\ &= \mathbb{E}\left[\int_{\mathcal{X}} \mathrm{var}phi(d(x,X_2,\dots,X_{p}))\mathrm{var}phi(d(x,X_2',\dots,X_{p}'))\, \mu(dx)\right]\\ &= \mathbb{E}\left[ \mathrm{var}phi(d(x_0,X_2,\dots,X_{p}))\,\mathrm{var}phi(d(x_0,X_2',\dots,X_{p}'))\right] \\ &= \mathbb{E}\left[ \mathrm{var}phi(d(x_0,X_2,\dots,X_{p}))\right]\,\mathbb{E}\left[\mathrm{var}phi(d(x_0,X_2',\dots,X_{p}'))\right]\\ &= \mathbb{E}\left[\int_{\mathscr{X}} \mathrm{var}phi(d(x,X_2,\dots,X_{p}))\,\mu(dx)\right]\,\mathbb{E}\left[\int_{\mathscr{X}}\mathrm{var}phi(d(x,X_2',\dots,X_{p}'))\,\mu(dx)\right]\\ &= \mathbb{E}\left[ \mathrm{var}phi(d(X_1,X_2,\dots,X_{p}))\right]\,\mathbb{E}\left[\mathrm{var}phi(d(X_1,X_2',\dots,X_{p}'))\right] \end{align*} because from the second point, the integrals inside the expectations do not depend on $x$. \noindent \implication{(2)}{(3)} We adapt the arguments of \cite[Section $3\frac{1}{2}$]{gromov2007metric}. Let $x,y \in \mathcal{X}$, we set $\nu^{eq} = \nu^x = \nu^y$ as the common value of the map $\nu$ by hypothesis. The law of large numbers gives us $\mu^{\otimes\mathbb{N}} ( \mathcal{X}_\mu^\mathbb{N} ) = 1.$ Then \begin{align*} \nu^{eq}\left((\iota^\mathscr{X} \circ S^x)(\mathcal{X}_\mu^\mathbb{N})\cap (\iota^\mathscr{X} \circ S^y)(\mathcal{X}_\mu^\mathbb{N}) \right) &= \nu^x\left((\iota^\mathscr{X} \circ S^x)\left(\mathcal{X}_\mu^\mathbb{N}\right)\right) + \nu^y\left((\iota^\mathscr{X} \circ S^y)\left(\mathcal{X}_\mu^\mathbb{N}\right)\right) \\ &\quad - \nu^{eq}\left((\iota^\mathscr{X} \circ S^x)(\mathcal{X}_\mu^\mathbb{N})\cup (\iota^\mathscr{X} \circ S^y)(\mathcal{X}_\mu^\mathbb{N}) \right) \\ &\geq \mu^{\otimes \mathbb{N}}(\mathcal{X}_\mu^\mathbb{N}) + \mu^{\otimes \mathbb{N}}(\mathcal{X}_\mu^\mathbb{N}) - 1 = 1. \end{align*} It implies the existence of two sequences $(x_n)_{n \in \mathbb{N}}$ and $(y_n)_{n \in \mathbb{N}}$ in $\mathcal{X}^\mathbb{N}$ such that \begin{itemize} \item $x_0 = x$ et $y_0 = y$; \item $(x_n)_{n \in \mathbb{N}}$ et $(y_n)_{n \in \mathbb{N}}$ are in $\mathcal{X}_\mu^\mathbb{N}$; \item $(d(x_i,x_j))_{i,j} = (d(y_i,y_j))_{i,j}$. \end{itemize} By the Portmanteau theorem \cite[Theorem 2.1]{billing}, a $\mu$-equidistributed sequence is dense in the support of $\mu$. Therefore, there exists a unique isometry $ i \colon \mathcal{X} \to \mathcal{X}$ such that for all $n \in \mathbb{N}$, $i(x_n) = y_n$. We have for any continuous bounded function $f : \mathcal{X}\to \mathbb{R}$: \begin{align*} \frac{1}{n}\sum_{j=1}^n \delta_{x_j}(f \circ i) = \frac{1}{n}\sum_{j=1}^n \delta_{i(x_j)}(f) = \frac{1}{n}\sum_{j=1}^n \delta_{y_j}(f) . \end{align*} By taking the limit of this identity as $n$ goes to infinity, we obtain $\mu(f \circ i) = \mu(f)$. This is true for any $f \in \mathscr{C}_b(\mathcal{X})$, so by \cite[Theorem 1.2]{billing}, $i_*\mu=\mu$. We have therefore constructed $i \in \mathrm{Isomp}(\mathscr{X})$ such that $i(x)=y$. \noindent \implication{(3)}{(2)} Let $x,y \in \mathcal{X}$, by $3.$, there exists an isometry $i \colon \mathcal{X} \to \mathcal{X}$ with $i(x) = y$ and $i_* \mu = \mu$. We can define $i^\mathbb{N} \colon \mathcal{X}^\mathbb{N} \to \mathcal{X}^\mathbb{N}$ with $i^\mathbb{N}((x_n)_{n \in \mathbb{N}}) = (i(x_n))_{n \in \mathbb{N}}$. We get $(i^\mathbb{N})_* \mu^{\otimes \mathbb{N}} = \mu^{\otimes \mathbb{N}}$. Let $\mathrm{var}phi \colon \mathbb{R}^{\mathrm{met}} \to \mathbb{R}$ a bounded continuous function, we have with $x_0 = x$ and $y_0 = y$, \begin{align*} \int_{\mathbb{R}^{\mathrm{met}}} \mathrm{var}phi(z) \, \nu^x(z) &= \int_{\mathcal{X}^\mathbb{N}} \mathrm{var}phi(d((x_n)_{n \in \mathbb{N}})) \, \mu^{\otimes \mathbb{N}}((x_{n+1})_{n \in \mathbb{N}}) \\ &= \int_{\mathcal{X}^\mathbb{N}} \mathrm{var}phi(d((i(x_n))_{n \in \mathbb{N}}) \, \mu^{\otimes \mathbb{N}}((x_{n+1})_{n \in \mathbb{N}}) \\ &= \int_{\mathcal{X}^\mathbb{N}} \mathrm{var}phi(d((y_n)_{n \in \mathbb{N}}) \, (i^\mathbb{N})_* \mu^{\otimes \mathbb{N}}((y_{n+1})_{n \in \mathbb{N}}) \\ &= \int_{\mathcal{X}^\mathbb{N}} \mathrm{var}phi(d((y_n)_{n \in \mathbb{N}}) \, \mu^{\otimes \mathbb{N}}((y_{n+1})_{n \in \mathbb{N}}) \\ &=\int_{\mathbb{R}^{\mathrm{met}}} \mathrm{var}phi(z) \, \nu^y(z), \end{align*} so $\nu^x=\nu^y$. \noindent \implication{(4)}{(3)} The action of $G$ on $G/K$ gives rise to translations $(\tau_g)_{g \in G}$ with $\tau_g(\overline{g_1}) = \overline{gg_1}$; they form a subgroup of $\mathrm{Isomp}(G/K)$. For $\overline{g_1}, \overline{g_2} \in G/K$, the translation $\tau_{g_2 g_1^{-1}}$ sends $\overline{g_1}$ to $\overline{g_2}$, so $\mathrm{Isomp}(G/K)$ is transitive on $G/K$. \noindent \implication{(3)}{(4)} Let $(x_n)_{n \in \mathbb{N}}$ a dense sequence in $\mathscr{X}$ and \begin{align*} D_{\mathscr{X}, \epsilon} = \left\{ I \subseteq \mathcal{P}(\mathbb{N}) \mid \text{the union }\cup_{n\in I}B(x_n,\epsilon) \text{ is disjoint}\right\}; \end{align*} this is a poset for the inclusion order, and it is stable by increasing union. We build by induction a maximal element of this set. We set $A_0 = B(x_0,\epsilon)$ and $I_0 = \{0\}$, and then for any $n \in \mathbb{N}$: \begin{itemize} \item if $B(x_{n+1},\epsilon)\cap A_n = \emptyset$, then $A_{n+1} = A_n \sqcup B(x_{n+1},\epsilon)$ and $I_{n+1} = I_n \sqcup \{n+1\}$; \item otherwise, $A_{n+1} = A_n$ and $I_{n+1} = I_n$. \end{itemize} Consider $I_{\max} = \bigcup_{n \in \mathbb{N}} I_n$. \begin{enumerate} \item The set of indices $I_{\max}$ is a maximal element of $\left(D_{\mathscr{X}, \epsilon}, \subseteq\right)$: if $n \notin I_{\max}$, then $B(x_{n},\epsilon) \cap A_{n-1}$ is non-empty, and \emph{a fortiori} $$B(x_n,\epsilon)\cap \left(\bigsqcup_{i \in I_{\max}} B(x_i,\epsilon)\right)\neq \emptyset;$$ therefore, we cannot add $n$ to $I_{\max}$ and stay in $D_{\mathscr{X},\epsilon}$. \item We have $\mathcal{X} = \bigcup_{n \in I_{\max}} B(x_n, 3\epsilon)$. If $x \in \mathcal{X}$, since $(x_n)_{n \in \mathbb{N}}$ is dense in $\mathcal{X}$, there exists $n \in \mathbb{N}$ such that $x \in B(x_n,\epsilon)$. If $n \in I_{\max}$, then obviously $$x \in \bigsqcup_{n \in I_{\max}} B(x_n,\epsilon) \subset \bigcup_{n \in I_{\max}} B(x_n, 3\epsilon),$$ and if $n$ is not in $I_{\max}$, then there exists $n' \in I_{\max}$ such that $ y \in B(x_n,\epsilon)\cap B(x_{n'},\epsilon) \neq \emptyset$. Hence, we have \begin{align*} d(x,x_{n'}) \leq d(x,x_n) + d(x_n,y) +d(y,x_{n'}) \leq 3\epsilon. \end{align*} \item The set $I_{\max}$ is finite. Indeed, because the action of $\mathrm{Isomp}(\mathscr{X})$ over $\mathscr{X}$ is transitive, the following map is constant: \begin{align*} \mathcal{X} &\to \mathbb{R} \\ x &\mapsto \mu(B(x, \epsilon)) \end{align*} with common value denoted $\mu_{\epsilon}>0$. Consequently, \begin{align*} 1 \geq \mu\left(\bigsqcup_{n \in I_{\max}}B(x_n,\epsilon)\right) = \sum_{n \in I_{\max}} \mu(B(x_n,\epsilon)) = \mathrm{card}(I_{\max})\,\mu_{\epsilon} \end{align*} because $\mu$ is a probability measure. \end{enumerate} So, $I_{\max}$ is finite, and we have proved that $\mathcal{X}$ is a pre-compact space. Since $\mathcal{X}$ is complete, $\mathcal{X}$ is compact. \PLM{ The group of isometries $\mathrm{Isom}(\mathscr{X})$ endowed with the compact-open topology defined by the neighborhoods $V(i,K,\epsilon)$ from Equation \eqref{eq:neighborhood} is also a compact Hausdorff space: \begin{itemize} \item It is a general fact that given two compact metric spaces $\mathcal{X}$ and $\mathcal{Y}$, the space of continuous functions $\mathscr{C}(\mathcal{X},\mathcal{Y})$ endowed with the compact-open topology is metrised by $d(f,g) = \sup_{x \in \mathcal{X}} d(f(x),g(x))$; see \cite[Chapter XII, Section 8]{Dug66}. By restriction, the topology of $\mathrm{Isom}(\mathcal{X})$ is metrisable. \item The compactness of $\mathrm{Isom}(\mathscr{X})$ is then an immediate application of the Arzela--Ascoli theorem. \end{itemize} The subgroup of measure-preserving isometries $\mathrm{Isomp}(\mathscr{X})$ is a closed subgroup of $\mathrm{Isom}(\mathscr{X})$, hence also compact.} Since the action of $\mathrm{Isomp}(\mathscr{X})$ over $\mathcal{X}$ is transitive, we have $\mathrm{O}_x = \mathcal{X}$ for each $x \in \mathcal{X}$. Therefore, we have the following homeomorphism (see \cite[Theorem 2.3.2]{mneimne1986introduction}): \begin{align*} \psi : \mathrm{Isomp}(\mathscr{X})/\mathrm{St}_x & \to \mathcal{X}\\ \bar{g} & \mapsto g \cdot x. \end{align*} Denote $G = \mathrm{Isomp}(\mathscr{X})$ and $K=\mathrm{St}_x$, $x$ being an arbitrary reference point in the space $\mathcal{X}$. The homeomorphism $\psi$ allows one to transport the distance $d$ of $\mathcal{X}$ to a $G$-invariant distance $d_{G/K}(\cdot,\cdot)=d(\psi^{-1}(\cdot),\psi^{-1}(\cdot))$, and the measure $\mu$ to a $G$-invariant probability measure $\mu_{G/K}=(\psi^{-1})_*\mu$ on $G/K$. It remains to prove that $\mu_{G/K} = \pi_*(\mathrm{Haar}_G)$. Given a topological compact Hausdorff space $Z$, we recall the bijective correspondence (see \cite[Chapter IX]{Lang93}): \begin{alignat*}{2} \mathscr{M}^1(Z) &\to \{\phi \colon \mathscr{C}(Z,\mathbb{R}) \to \mathbb{R}, \, \mathbb{R}\text{-linear, continuous, positive and with }\phi(1) = 1 \} \\ \mu &\mapsto \begin{cases} \mathscr{C}(Z,\mathbb{R}) \!\!\!\!&\to \mathbb{R} \\ \qquad f &\mapsto \int_Z f(z) \mu(dz) . \end{cases} \end{alignat*} To any topological compact Hausdorff group $Z$, we associate the probability Haar measure $\mathrm{Haar}_Z$, and we define \begin{alignat*}{2} T \colon \mathscr{C}(G) &\to \mathscr{C}(G/K) \\ f &\mapsto Tf \colon\begin{cases} G/K \!\!\!\!&\to \mathbb{R} \\ \,\,\,\, gK \!\!\!\!&\mapsto \int_K f(gk) \, \mathrm{Haar}_K(dk) \end{cases} \end{alignat*} We denote by $\mathscr{C}(G)_+^*$ the space of positive continuous linear forms on the $\mathbb{R}$-vector space $\mathscr{C}(G)$. The transformation $T$ induces the contravariant transformation \begin{align*} T^* \colon \mathscr{C}(G/K)_+^* &\to \mathscr{C}(G)_+^* \\ \nu &\mapsto \nu \circ T, \end{align*} and any group action $G \times A \to A$ induces the group action \begin{align*} G \times \mathscr{C}(A) &\to \mathscr{C}(A) \\ (g,f) &\mapsto g \cdot f = \begin{cases} A \!\!\!\!&\to \mathbb{R} \\ x\!\!\!\! &\mapsto f(g^{-1} \cdot x). \end{cases} \end{align*} Consider the probability measure $\mu_{G/K} $ as an element of $\mathscr{C}(G/K)_+^*$; we have by definition that for any $g \in G$ and $p \in \mathscr{C}(G/K) $, $\mu(g \cdot p) = \mu(p)$. If $q \in \mathscr{C}(G)$, then we have \begin{align*} (\mu \circ T)( g \cdot q) = \mu (T(g \cdot q)) &= \mu \left( \begin{cases} G/K\!\!\!\! &\to \mathbb{R} \\ \,\,\,\,\overline{l} \!\!\!\!&\mapsto \int_K (g \cdot q) (lk) \, \mathrm{Haar}_K(dk) \end{cases}\right) \\ &= \mu \left( \begin{cases} G/K \!\!\!\! &\to \mathbb{R} \\ \,\,\,\,\overline{l} \!\!\!\!&\mapsto \int_K q (g^{-1}lk) \, \mathrm{Haar}_K(dk) \end{cases}\right) \\ &= \mu \left( g \cdot \begin{cases} G/K \!\!\!\!&\to \mathbb{R} \\ \,\,\,\, \overline{l} \!\!\!\!&\mapsto \int_K q (lk) \, \mathrm{Haar}_K(dk) \end{cases}\right) \\ &= (\mu \circ T)(q). \end{align*} so $\mu \circ T = T^*(\mu)$ is the unique $G$-invariant positive normalised continuous linear form on $\mathscr{C}(G)$. Hence $T^*(\mu) = \mathrm{Haar}_G$, and we finally need to show that $\pi_* \circ T^* = \mathrm{Id}_{\mathscr{C}(G/K)_+^*}$. However, for any $ \overline{g} \in G/K $ and $f \in \mathscr{C}(G/K)$, $T(f \circ \pi)(\overline{g}) = \int_K f \circ \pi (gk) \,\mathrm{Haar}_K(dk) = \int_K f(\overline{g})\, \mathrm{Haar}_K(dk) = f(\overline{g})$; \PLM{the result follows by functoriality.} \end{proof} \subsection{Study of the cumulants in the homogeneous case} \PLM{We now perform the asymptotic analysis of the fluctuations of the observables $\Phi(\mathscr{X}_n)$ when $\mathcal{X}=G/K$ is a compact homogeneous space. We start by proving an upper bound on the cumulants of $S_n(\mathrm{var}phi,\mathscr{X})$ which will be analogue to the one of the method of cumulants, but with different parameters $N_n$ and $D_n$, and with a non-Gaussian limiting distribution; see Theorem \ref{theo:bound_cumulant_homogeneous}.} Our arguments will involve spanning trees of graphs. We recall that a \emph{Cayley tree} of size $r$ is a labeled tree with vertex set $[\![1,r]\!]$; there are $r^{r-2}$ Cayley trees of size $r$. We start with the homogeneous analogue of Proposition \ref{prop:vanish generic}. \begin{prop}\label{prop:vanish generic homogeneous} If $\mathscr{X}$ is a compact homogeneous space, then for $r\geq2$, $\pi \in \mathfrak{Q}(pr)$ and $\mathrm{var}phi \in \mathscr{C}_b(\mathbb{R}^{\binom{p}{2}})$, if $\ell(\pi) > (p-1)r $, then $\kappa(\pi,\mathrm{var}phi) = 0$. \end{prop} \begin{proof} We consider the same trees $(T_A)_{A \in \pi}$, the same graph $G_\pi$ and the same multigraph $H_\pi$ as in the proof of Proposition~\ref{prop:vanish generic}. We have $\sum_{A \in \pi} (|E(T_A)|+1) = pr$. This implies $|E(H_\pi)|=|E(G_\pi)|=\sum_{A \in \pi} |E(T_A)| \leq r-1$ by the assumption on $\ell(\pi)$. If $H_\pi$ is not connected, then the same argument as in Proposition~\ref{prop:vanish generic} gives $\kappa(\pi,\mathrm{var}phi)=0$. Therefore, the only remaining case to treat is when $H_\pi$ is connected and has exactly $r-1$ edges; it is then a Cayley tree. Fix $I=(\bar{\imath}^1,\ldots,\bar{\imath}^r)$ such that $\mathrm{Sp}_n(I)=\pi$, and an index $k \in [\![1,r]\!]$ which is a leaf of the graph $H_\pi$. By definition of the multigraph $H_\pi$, the block of indices $\bar{\imath}^k$ shares exactly one index with all the other blocks $\bar{\imath}^{j \neq k}$: $$\mathrm{Card}\left(\overline{\mathrm{Im}}(\bar{\imath}^k)\cap \bigcup_{j\neq k} \overline{\mathrm{Im}}(\bar{\imath}^j)\right) =1. $$ To fix the ideas, let us assume that $k=1$ and that the shared index in $\bar{\imath}^1=(\bar{\imath}^1_1,\ldots,\bar{\imath}^1_p)$ is the first one. To compute the cumulant, we consider \begin{align*} \mathbb{E}\left[\mathrm{e}^{t_1 \mathrm{var}phi(d(X_{\bar{\imath}^1}))+\cdots+ t_r \mathrm{var}phi(d(X_{\bar{\imath}^r}))}\right] = \int_{\mathcal{X}}\left(\int_{\mathcal{X}^{(p-1)r}} \mathrm{e}^{t_1 \mathrm{var}phi(d(x_{\bar{\imath}^1}))+\cdots+ t_r \mathrm{var}phi(d(x_{\bar{\imath}^r}))}\, \mu^{\otimes (p-1)r}((x_i)_{i \neq \bar{\imath}^1_1})\right)\mu(dx_{\bar{\imath}^1_1}). \end{align*} Denote $F(x_{\bar{\imath}^1_1})$ the integral where one has integrated all the variables except $x_{\bar{\imath}^1_1}$. If $x_0$ is an arbitrary point in $\mathcal{X}$, then for any $x_{\bar{\imath}^1_1}$, we have an isometry $\psi \in\mathrm{Isomp}(\mathscr{X})$ such that $\psi(x_{\bar{\imath}^1_1}) = x_0$, because $\mathscr{X}$ is homogeneous. So, denoting $y_a = \psi(x_a)$, we get \begin{align*} F(x_{\bar{\imath}^1_1})&=\int_{\mathcal{X}^{(p-1)r}} \mathrm{e}^{t_1 \mathrm{var}phi(d(x_{\bar{\imath}^1}))+\cdots+ t_r \mathrm{var}phi(d(x_{\bar{\imath}^r}))}\, \mu^{\otimes (p-1)r}((x_i)_{i \neq \bar{\imath}^1_1}) \\ &= \int_{\mathcal{X}^{(p-1)r}} \mathrm{e}^{t_1 \mathrm{var}phi(d(y_{\bar{\imath}^1})))+\cdots+ t_r \mathrm{var}phi(d(y_{\bar{\imath}^r})))}\, \mu^{\otimes (p-1)r}((y_i)_{i \neq \bar{\imath}^1_1})\\ &= F(x_0) \end{align*} and the integral does not depend on $x_{\bar{\imath}^1_1}$. So, we have \begin{align*} \mathbb{E}\left[\mathrm{e}^{t_1 \mathrm{var}phi(d(X_{\bar{\imath}^1}))+\cdots+ t_r \mathrm{var}phi(d(X_{\bar{\imath}^r}))}\right] = F(x_0) &= \int_{\mathcal{X}} F(x_0)\,\mu(dx_0)\\ &= \int_{\mathcal{X}^{(p-1)r+1}} \mathrm{e}^{t_1 \mathrm{var}phi(d(x_{\bar{\jmath}^1}))+\cdots+ t_r \mathrm{var}phi(d(x_{\bar{\jmath}^r}))}\, \mu^{\otimes (p-1)r+1}((x_j)_{j \in \overline{\mathrm{Im}}(\bar{\jmath})}) \end{align*} where $J=(\bar{\jmath}^1,\ldots,\bar{\jmath}^r)$ is the same collection of indices as $I$, except that we have replaced $\bar{\imath}^1_1$ by a new index different from all the other indices. In this new collection, $\bar{\jmath}^1$ does not share any index with the other families $\bar{\jmath}^{k\geq 2}$, so $X_{\bar{\jmath}^1}$ is independent from the other variables, and $$\mathbb{E}\left[\mathrm{e}^{t_1 \mathrm{var}phi(d(X_{\bar{\imath}^1}))+\cdots+ t_r \mathrm{var}phi(d(X_{\bar{\imath}^r}))}\right]= \mathbb{E}\left[\mathrm{e}^{t_1 \mathrm{var}phi(d(X_{\bar{\imath}^1}))}\right] \mathbb{E}\left[\mathrm{e}^{t_2\mathrm{var}phi(d(X_{\bar{\imath}^2}))+\cdots+ t_r \mathrm{var}phi(d(X_{\bar{\imath}^r}))}\right].$$ Looking at the coefficient of $[t_1\cdots t_r]$ in the logarithm of the Laplace transform, we conclude that the joint cumulant vanishes. \end{proof} \PLM{\begin{remark} The proof of this proposition leads to a slightly stronger result: if $\pi \in \mathfrak{Q}(pr)$ is a set partition such that $H_\pi$ is disconnected or is a tree, or even is a connected graph with one vertex of valence $1$, then the corresponding cumulant vanishes. For instance, with $r=2$ and $p=6$, the following set partition \begin{center} \begin{tikzpicture}[scale=0.5] \foreach \x in {0,1,2,3,4,5} {\fill (\x,0) circle (3pt); \fill (\x,1) circle (3pt);} \fill[gray] (1,0) -- (0,0) -- (0,1) -- (1,0); \draw (1,0) -- (0,0) -- (0,1) -- (1,0); \end{tikzpicture} \end{center} \noindent which identifies one index of the first block of indices $\bar{\imath}^1$ with two distinct indices of the second block $\bar{\imath}^2$ satisfies $\ell(\pi) = 10 = (p-1)r$, but the corresponding graph $H_\pi$ is the unique Cayley tree on $2$ vertices, so $\kappa(\pi,\mathrm{var}phi)=0$ for any function $\mathrm{var}phi \in \mathscr{C}(\mathbb{R}^{\binom{p}{2}})$. The most general condition which leads to the vanishing of the joint cumulant $\kappa(\pi,\mathrm{var}phi)=0$ is the following: if there exists an integer $k \in [\![1,r]\!]$ such that, among the integers $(k-1)p+1,\ldots,kp$, the set partition $\pi \in \mathfrak{Q}(pr)$ contains $p-1$ singletons (and the remaining integer of this block which can be connected to many other integers in the other blocks), then $\kappa(\pi,\mathrm{var}phi)=0$. Indeed, we can then use the same trick as above to replace in the computation of the joint Laplace transform the family $\bar{\imath}^k$ by a family of indices $\bar{\jmath}^k$ which are all distinct and which are not shared by the other families $\bar{\imath}^{a \neq k}$. We call such a set partition $\pi$ \emph{homogeneously vanishing}. \end{remark}} In the homogeneous case, the variance $ \mathrm{var}(S_{n}(\mathrm{var}phi,\mathscr{X}))$ is a polynomial function of degree smaller than $2(p-1)$. We have \begin{align*} \kappa^{(2)}(S_n(\mathrm{var}phi,\mathscr{X})) = \sum\limits_{\substack{\pi \in \mathfrak{Q}(2p) \\ \ell(\pi) \leq 2(p-1)}}\kappa(\pi,\mathrm{var}phi) \,n^{\downarrow \ell(\pi)}. \end{align*} By using the previous remark, we can identify the set partitions with $\ell(\pi)=2(p-1)$ and $\kappa(\pi,\mathrm{var}phi) \neq 0$. For $1 \leq k_1,l_1,k_2,l_2 \leq p$ with $k_1 \neq k_2, l_1 \neq l_2$, we define the set partition \begin{align*} \pi_{k_1,l_1,k_2,l_2} = \{\{k_1,l_1\},\{k_2,l_2\},\{t\} \cup \{ \{t\} \, ; \, t \in [\![1,2p]\!] \setminus \{k_1,k_2,l_1+p,l_2+p\} \}. \end{align*} Then we have the following equality (the bracket is the extraction of the coefficient of degree $n^{2(p-1)}$ in the polynomial in the variable $n$): \begin{equation} \kappa^{(2)}(S_n(\mathrm{var}phi,\mathscr{X}))[n^{2(p-1)}] = \sum\limits_{\substack{1 \leq k_1,l_1,k_2,l_2 \leq p \\ k_1 <k_2,\,\, l_1 \neq l_2}} \kappa(\pi_{k_1,l_1,k_2,l_2},\mathrm{var}phi) \vcentcolon= \sigma_{\text{hom}}^2.\label{eq:sigma_hom} \end{equation} \begin{prop}\label{prop:limit_cumulant} Suppose that $\sigma_{\text{hom}}^2 > 0$. If $Y_n(\mathrm{var}phi,\mathscr{X}) = \frac{\Phi(\mathscr{X}_n) - \Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}} $, then we have convergence of all the cumulants of these variables: for any $r \geq 1$, there exists $a_r \in \mathbb{R}$ such that $$\kappa^{(r)}(Y_n(\mathrm{var}phi,\mathscr{X})) {\longrightarrow}_{n \to +\infty } a_r .$$ \end{prop} \begin{proof} For $r=1$, $\kappa^{(r)}(Y_n(\mathrm{var}phi,\mathscr{X})) = 0$ and for $ r\geq 2$ \begin{align*} \kappa^{(r)}(Y_n(\mathrm{var}phi,\mathscr{X})) &= \kappa^{(r)}\left(\frac{\Phi(\mathscr{X}_n) - \Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}}\right) \\ &= \kappa^{(r)}\left(\frac{S_{n}(\mathrm{var}phi,\mathscr{X}) - \mathbb{E}[S_{n}(\mathrm{var}phi,\mathscr{X})]}{\sqrt{\mathrm{var}(S_{n}(\mathrm{var}phi,\mathscr{X}))}}\right) = \frac{\kappa^{(r)}(S_{n}(\mathrm{var}phi,\mathscr{X}))}{\left( \mathrm{var}(S_{n}(\mathrm{var}phi,\mathscr{X}))\right)^{r/2}} \,. \end{align*} We know that for each $r \geq 2$, $\kappa^{(r)}(S_{n}(\mathrm{var}phi,\mathscr{X}))$ is a polynomial function of degree less than $(p-1)r$, according to Propositions~\ref{prop:polynomiality} and \ref{prop:vanish generic homogeneous}. We can write $\kappa^{(r)}(S_{n}(\mathrm{var}phi,\mathscr{X})) = V(n) = \sum_{i=0}^{(p-1)r} v_i n^i$ and $\mathrm{var}(S_{n}(\mathrm{var}phi,\mathscr{X})) = \kappa^{(2)}(S_{n}(\mathrm{var}phi,\mathscr{X})) = W(n) = \sum_{i=0}^{2(p-1)} w_i n^i$; the assumption $\sigma_{\text{hom}}^2>0$ amounts to $w_{2(p-1)} > 0$. So we have \begin{align*} \lim\limits_{n \to +\infty} \kappa^{(r)}(Y_n(\mathrm{var}phi,\mathscr{X})) &= \lim\limits_{n \to +\infty} \frac{v_{(p-1)r}\, n^{(p-1)r}}{ \left(w_{2(p-1)}\, n^{2(p-1)}\right)^{r/2}} = \frac{v_{(p-1)r}}{(w_{2(p-1)})^{r/2}} = a_r.\qedhere \end{align*} \end{proof} \PLM{The following theorem ensures that the $a_r$'s are not too large, so that we can sum them and obtain the Laplace transform of a limiting distribution of $Y_n(\mathrm{var}phi,\mathscr{X})$.} \begin{theo}\label{theo:bound_cumulant_homogeneous} In the case where $\mathscr{X}$ is a compact homogeneous space, we have for any $\mathrm{var}phi \in \mathscr{C}(\mathbb{R}^{\binom{p}{2}})$ and any $r\geq 2$ the upper bound $$|\kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X}))| \leq (Ap^2)^r \,(2r)^{r-1} \, n^{(p-1)r}$$ with $A=\|\mathrm{var}phi\|_\infty$. \end{theo} \begin{proof} We are going to adapt the proof of the upper bound~\eqref{bound:cumulant} which can be found in \cite[Chapter 9]{feray2016mod}. We expand by multilinearity the cumulant and we start by controlling each term of the following sum: \begin{align*} \kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})) = \sum_{(\bar{\imath}^1,\dots,\bar{\imath}^r) \in V^r} \kappa\left(\mathrm{var}phi(d(X_{\bar{\imath}^1})),\dots,\mathrm{var}phi(d(X_{\bar{\imath}^r}))\right), \end{align*} with $V=[\![1,n]\!]^p$. With $A = \| \mathrm{var}phi\|_{\infty} $, Equation (9.9) in \cite{feray2016mod} gives \begin{align*} |\kappa\left(\mathrm{var}phi(d(X_{\bar{\imath}^1})),\dots,\mathrm{var}phi(d(X_{\bar{\imath}^r}))\right)| \leq A^r 2^{r-1}\, \mathrm{ST}(H_\pi), \end{align*} where $\pi = \mathrm{Sp}_n(\bar{\imath}^1,\ldots,\bar{\imath}^r)$ and $\mathrm{ST}(H_\pi)$ is the number of spanning trees of the multigraph $H_\pi$. Now, we have identified in a previous remark the cumulants $\kappa(\pi,\mathrm{var}phi)$ which vanish in the homogeneous case, so we can add this condition to the upper bound. Thereby, we have \begin{align*} |\kappa(X_{\bar{\imath}^1},\dots,X_{\bar{\imath}^r})| \leq A^r 2^{r-1}\, \mathrm{ST}(H_\pi)\, \mathbbm{1}_{\mathrm{NHV}(\pi)}, \end{align*} where $\mathrm{NHV}(\pi)$ is the condition "$\pi$ is not homogeneously vanishing". Summing over $V^r$, we get by using the triangle inequality \begin{align*} |\kappa^{(r)}(S_{n}(\mathrm{var}phi,\mathscr{X}))| &\leq A^r 2^{r-1} \sum_{\bar{\imath}^1 \in V} \left[ \sum_{(\bar{\imath}^2,\dots,\bar{\imath}^r) \in V^{r-1}}\mathrm{ST}(H_{\mathrm{Sp}_n(I)}) \,\mathbbm{1}_{\mathrm{NHV}(\mathrm{Sp}_n(I))} \right] \\ &\leq A^r 2^{r-1} \sum_{\bar{\imath}^1 \in V} \left[\sum_{T\text{ Cayley tree of size }r} \sum_{(\bar{\imath}^2,\dots,\bar{\imath}^r) \in V^{r-1}}\mathbbm{1}_{T \subset H_{\mathrm{Sp}_n(I)}} \,\mathbbm{1}_{\mathrm{NHV}(\mathrm{Sp}_n(I))} \right]. \end{align*} Now, we can bound the expression in the bracket by adapting the Lemma 9.3.5 in \cite{feray2016mod} to the homogeneous case. Indeed, let us fix a Cayley tree $T$ of size $r$ and an element $\bar{\imath}^1 \in V$. The lists $(\bar{\imath}^2,\dots,\bar{\imath}^r)$ which have a non-zero contribution in the sum $$\sum_{(\bar{\imath}^2,\dots,\bar{\imath}^r) \in V^{r-1}}\mathbbm{1}_{T \subset H_{\mathrm{Sp}_n(I)}} \,\mathbbm{1}_{\mathrm{NHV}(\mathrm{Sp}_n(I))}$$ are constructed as follows. We fix a vertex $k\neq 1$ of degree one (a leaf) in $T$, and we shall choose $\bar{\imath}^k$ at the end. Before that: \begin{itemize} \item We start by choosing the $\bar{\imath}^j$'s with $j$ neighbour of $1$ in $T$ and $j \neq k$. For each such family, $\bar{\imath}^1$ and $\bar{\imath}^j$ share at least one index, so the number of possibilities for $\bar{\imath}^j$ is smaller than $D_n=p^2\,n^{p-1}$. \item We pursue the construction with the neighbours of the neighbours of $1$, and so on but leaving always on the side the vertex $k$. Each time, there are at most $p^2\,n^{p-1}$ possibilities for $\bar{\imath}^j$. Moreover, as $k$ is a leaf of $T$, our inductive construction enumerates all the vertices in $[\![1,r]\!]$ but $k$. \end{itemize} We therefore have less than $(p^2\,n^{p-1})^{r-2}$ possibilities for $(\bar{\imath}^2,\dots,\bar{\imath}^r)\setminus \{\bar{\imath}^k\}$. We finally choose $\bar{\imath}^k$, using now the fact that if the list $(\bar{\imath}^2,\dots,\bar{\imath}^r)$ yields a non-zero contribution, then $\pi$ is not homogeneously vanishing and $\bar{\imath}^k$ must share at least two \emph{distinct} indices with other families $\bar{\imath}^a$ and $\bar{\imath}^b$ (we may have $a=b$). Consequently, there are less than $$p^4\,(r-1)\,n^{p-2}$$ possible values for $\bar{\imath}^k \in V$: one family $\bar{\imath}^a$ is obtained by taking the unique neighbour $a$ of $k$ in $T$, there are $(r-1)$ possibilities for the other family $\bar{\imath}^b$, then $p^4$ possibilities for the choices of positions of indices that are shared, and $n^{p-2}$ possibilities for the other indices in the family $\bar{\imath}^k$. So, $$\sum_{(\bar{\imath}^2,\dots,\bar{\imath}^r) \in V^{r-1}}\mathbbm{1}_{T \subset H_{\mathrm{Sp}_n(I)}} \,\mathbbm{1}_{\mathrm{NHV}(\mathrm{Sp}_n(I))} \leq (p^2\,n^{p-1})^{r-2}\,p^4\,(r-1)\,n^{p-2}\leq p^{2r}\,r\,n^{pr-p-r}.$$ As there are $r^{r-2}$ Cayley trees of size $r$, and $n^p$ possibilities for $\bar{\imath}^1$, we finally get the upper bound \begin{equation*} |\kappa^{(r)}(S_{n}(\mathrm{var}phi,\mathscr{X}))| \leq A^r\, 2^{r-1} \,r^{r-1}\,p^{2r}\, n^{(p-1)r}.\qedhere \end{equation*} \end{proof} \subsection{Central limit theorem for the homogeneous case} We can finally prove the analogue of Theorem \ref{theo:generic_case} when $\mathscr{X}$ is a compact homogeneous space. \begin{theo}[Fluctuations in the homogeneous case]\label{theo:singular_case} Let $\mathscr{X}$ be a compact homogeneous space, $\mathrm{var}phi \in \mathscr{C}(\mathbb{R}^{\binom{p}{2}})$ and $\Phi=\Phi^{p,\mathrm{var}phi}$. Suppose that $\sigma_{\text{hom}}^2(\mathrm{var}phi,\mathscr{X}) = \lim_{n \to \infty}\frac{\mathrm{var}(S_n(\mathrm{var}phi,\mathscr{X}))}{n^{2(p-1)}} > 0$; a combinatorial expansion of $\sigma_{\text{hom}}^2(\mathrm{var}phi,\mathscr{X})$ is provided by Equation \eqref{eq:sigma_hom}. Then, the sequence $$Y_n(\mathrm{var}phi,\mathscr{X}) = \frac{\Phi(\mathscr{X}_n) - \Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}}$$ converges in distribution toward a real-valued random variable $Y(\mathrm{var}phi,\mathscr{X})$ having for cumulants the sequence $$\kappa^{(r)}(Y(\mathrm{var}phi,\mathscr{X})) = a_r = \frac{1}{(\sigma_{\text{hom}})^{\frac{r}{2}}}\,\sum_{\substack{\pi \in \mathfrak{Q}(pr) \\ \ell(\pi) = (p-1)r \\ \pi \text{ non homogeneously vanishing}}} \kappa(\pi,\mathrm{var}phi),$$ where $\kappa(\pi,\mathrm{var}phi)$ is defined by Equation \eqref{eq:kappa_pi}. The law of the limit $Y(\mathrm{var}phi,\mathscr{X})$ is determined by these cumulants $(a_r)_{r \geq 1}$. Under the assumption $\sigma_{\text{hom}}(\mathrm{var}phi,\mathscr{X})>0$, the renormalisation $\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}$ is of order $n^{-1}$, and more precisely, $\mathrm{var}(\Phi(\mathscr{X}_n))$ is a polynomial in $n^{-1}$ without constant term and without term $\alpha\, n^{-1}$; its leading term is $\sigma_{\text{hom}}^2(\mathrm{var}phi,\mathscr{X})\,n^{-2}$. \end{theo} \begin{proof} Theorem \ref{theo:bound_cumulant_homogeneous} shows that for any $n \in \mathbb{N}$, the log-Laplace transform $\log \mathbb{E}[\mathrm{e}^{z Y_n}]$ is absolutely convergent on a fixed disc of radius $R>0$, with $R$ independent of $n$. Indeed, denoting $\mathrm{var}(S_n) = (\sigma_{n,\text{hom}})^2\,n^{2(p-1)}$, we obtain by using Stirling's estimate \begin{align*} \sum_{r=2}^\infty \frac{|\kappa^{(r)}(Y_n)|}{r!} |z|^r \leq \sum_{r=2}^\infty \frac{(Ap^2\mathrm{e})^r (2r)^{r-1}}{r^r}\left(\frac{|z|}{\sigma_{n,\text{hom}}}\right)^r \leq \sum_{r=2}^\infty \left(\frac{2|z|Ap^2\mathrm{e}}{\sigma_{n,\text{hom}}}\right)^r. \end{align*} Since $\sigma_{n,\text{hom}} \to \sigma_{\text{hom}} >0$, we see that for $n$ large enough, if $$|z| \leq R = \frac{\sigma_{\text{hom}}}{10Ap^2},$$ then $\log \mathbb{E}[\mathrm{e}^{z Y_n}]$ is convergent and uniformly bounded on this disk. Taking the exponentials, the same is true for the Laplace transforms $\mathbb{E}[\mathrm{e}^{z Y_n}]$, and by Proposition \ref{prop:limit_cumulant}, these holomorphic functions converge uniformly on $D(0,R)$ towards $$\exp\left(\sum_{r=2}^\infty \frac{a_r}{r!}\,z^r\right) = \lim_{n \to \infty} \mathbb{E}[\mathrm{e}^{zY_n}].$$ By standard arguments (see \cite[p.~390]{Bil95}), this implies the convergence in law towards a random variable $Y$ whose moment-generating function $\mathbb{E}[\mathrm{e}^{zY}]$ is the left-hand side of the equation above. Since this Laplace transform is convergent on a disc with positive radius, $Y$ is determined by its moments. \end{proof} Let us compare Theorems \ref{theo:generic_case} and Theorems \ref{theo:singular_case}. In the generic case, the variance of $\Phi(\mathscr{X}_n)$ is expected to be of order $$O\left(\frac{n^{2p-1}}{(n^p)^2}\right) = O\left(\frac{1}{n}\right),$$ so the fluctuations of $\Phi(\mathscr{X}_n)$ are usually of order $O(n^{-1/2})$, and asymptotically (mod-)Gaussian. By usually we mean that one specific observable $\mathrm{var}phi$ might satisfy "by chance" $\sigma(\mathrm{var}phi,\mathscr{X})=0$, but this is in general not the case; and by Theorem \ref{theo:homogeneous} the vanishing of all these limiting variances is almost equivalent to $\mathscr{X}$ being compact homogeneous (the \emph{almost} is related to the replacement of Condition \eqref{eq:singular_case} by the simpler Condition \eqref{eq:cov_vanish}; they might be equivalent). In the homogeneous case, the variance of $\Phi(\mathscr{X}_n)$ is expected to be of order $$O\left(\frac{n^{2p-2}}{(n^p)^2}\right) = O\left(\frac{1}{n^2}\right),$$ so the fluctuations of $\Phi(\mathscr{X}_n)$ are now of order $O(n^{-1})$. What remains to be seen is that our estimates on cumulants in the homogeneous case are in a sense optimal: we have the best possible upper bound for these cumulants, and in particular we can have $a_{r \geq 3} \neq 0$, whence a non-Gaussian limiting distribution. The last section of the paper is devoted to the analysis of one such example. \subsection{Concentration inequalities} Since the cumulant estimate from Theorem \ref{theo:bound_cumulant_homogeneous} holds for any $n$, we can use it in combination with Chernoff's inequality in order to obtain: \begin{prop}\label{prop:chernoff} Let $\mathscr{X}$ be a compact homogeneous space, and $\phi \in \mathcal(\mathbb{R}^{\binom{p}{2}})$ such that $A =\|\mathrm{var}phi\|_\infty$ and $\sigma_{\mathrm{hom}}^2(\mathrm{var}phi,\mathscr{X})>0$. We denote as above $\sigma_{n,\mathrm{hom}}^2(\mathrm{var}phi,\mathscr{X}) = \frac{\mathrm{var}(S_n(\mathrm{var}phi,\mathscr{X}))}{n^{2(p-1)}}$, and $$q_n = \frac{2Ap^2}{\sigma_{n,\hom}} \geq 1.$$ For any $x \geq 0$ and any $n$, $$\mathbb{P}\!\left[|Y_n(\mathrm{var}phi,\mathscr{X})|\geq \frac{q_n x}{\mathrm{e}}\right] \leq 2\,\exp\left(\frac{\log(1+x)-x}{\mathrm{e}^2}\right).$$ The same estimate holds with $Y_n(\mathrm{var}phi,\mathscr{X})$ replaced by its limit in distribution $Y(\mathrm{var}phi,\mathscr{X})$, and $q_n$ replaced by its limit $q$. \end{prop} \begin{proof} Note that the case $r=2$ of Theorem \ref{theo:bound_cumulant_homogeneous} yields $q_n = \frac{2Ap^2}{\sigma_{n,\hom}} \geq 1$ for any $n$. By Chernoff's inequality and by using Stirling's estimate to get rid of the factorials, we obtain for any $t,x \geq 0$ \begin{align*} \mathbb{P}[Y_n(\mathrm{var}phi,\mathscr{X}) \geq x] &\leq \exp\left(-tx + \sum_{r=2}^\infty \frac{|\kappa^{(r)}(S_n(\mathrm{var}phi,\mathscr{X})|}{r!}\left(\frac{t}{\sigma_{n,\hom}\,n^{(p-1)}}\right)^{\!r}\right) \\ &\leq \exp\left(-tx + \frac{1}{\mathrm{e}^2}\sum_{r=2}^\infty \frac{1}{r}\,z^r\right) = \exp\left(-tx -\frac{1}{\mathrm{e}^2} \log(1-z) - \frac{z}{\mathrm{e}^2}\right) \end{align*} where $$z = \frac{2Ap^2}{\sigma_{n,\hom}}\,\mathrm{e}\, t=q_n\,\mathrm{e} \,t$$ is supposed strictly smaller than $1$, so that the power series on the second line is convergent. The optimal value of $t$ in terms of $x$ is given by the equations $$x= \frac{q_n^2 t}{1-q_n \mathrm{e} t} \qquad;\qquad z=\frac{q_n\mathrm{e} x}{q_n^2+q_n\mathrm{e} x}\qquad;\qquad t = \frac{x}{q_n^2 + q_n\mathrm{e} x}.$$ This choice of $t$ yields $$\mathbb{P}[Y_n(\mathrm{var}phi,\mathscr{X}) \geq x] \leq \exp\left(\frac{1}{\mathrm{e}^2}\left(\log\left(1+\frac{\mathrm{e} x}{q_n}\right)-\frac{\mathrm{e} x}{q_n}\right)\right).$$ We obtain a two-sided upper bound on the tail of the distribution of $|Y_n(\mathrm{var}phi,\mathscr{X})|$ by replacing $Y_n(\mathrm{var}phi,\mathscr{X})$ by $-Y_n(\mathrm{var}phi,\mathscr{X})$, which satisfies the same hypotheses. Finally, the same arguments hold with $Y(\mathrm{var}phi,\mathscr{X})$ replaced by $Y_n(\mathrm{var}phi,\mathscr{X})$, since we have convergence in law and in moments. \end{proof} \begin{remark} One can wonder whether there exists in the homogeneous case a Berry--Esseen upper bound similar to the one from Theorem \ref{theo:generic_case}. We believe that the approach from \cite{feray2017mod} cannot be used here, for two reasons: \begin{itemize} \item The concentration inequality stated above is the only thing about the limiting distribution of the variables $Y_n(\mathrm{var}phi,\mathscr{X})$ that we able to prove with the techniques of this paper. In particular, we do not know whether this limiting distribution is discrete or absolutely continuous with respect to the Lebesgue measure. This prohibits the use of the inequality from \cite[Chapter XVI, Equation (3.13)]{Fel71}, which is the starting point of the Fourier approach to Berry--Esseen bounds. \item Besides, we do not have a large \emph{zone of control} on the Fourier transform of $Y_n(\mathrm{var}phi,\mathscr{X})$ as in \cite{feray2017mod}; the upper bound on the cumulants yields an upper bound on the Fourier transform $\mathbb{E}[\mathrm{e}^{\mathrm{i} \xi Y_n(\mathrm{var}phi,\mathscr{X})}]$ on a zone of size $O(1)$, but it seems difficult to extend it to a larger zone, which is a requirement in order to obtain a meaningful upper bound on the Kolmogorov distance $d_{\mathrm{Kol}}(Y_n(\mathrm{var}phi,\mathscr{X}),Y(\mathrm{var}phi,\mathscr{X}))$. \end{itemize} The study of the Cauchy transform of the variables $Y_n(\mathrm{var}phi,\mathscr{X})$ (instead of the Fourier and Laplace transforms) might lead to a solution of the first problem. \end{remark} \section{Sample model for the circle and a non-Gaussian limit}\label{sec:circle} Throughout this section, $\Phi$ is the observable of metric measure spaces with degree $3$ associated to the continuous bounded function $$\mathrm{var}phi(d(x,y,z)) = \min(1,d(x,y))\times \min(1,d(y,z)).$$ In particular, if $\mathscr{X}=\mathscr{X}ex$ is a metric measure space with diameter smaller than $1$, then $$\Phi(\mathscr{X}) = \int_{\mathcal{X}^3} d(x,y)\, d(y,z)\,\mu^{\otimes 3}(dx\,dy\,dz).$$ Let us consider the metric measure space $\mathcal{X} = \mathbb{R}/\mathbb{Z}$. For $x \in \mathbb{R}$, we denote $\overline{x}$ the class of $x$ modulo $1$. The space $\mathcal{X}$ is endowed with the geodesic distance $$d(\overline{x},\overline{y}) = \inf_{k \in \mathbb{Z}} |x-y-k|$$ and with the projection $\mu$ of the Lebesgue measure, which is a probability measure. It is obviously a compact homogeneous space in the sense of Section \ref{sec:homogeneous_case}, and even a compact Lie group. Therefore, by Theorem \ref{theo:singular_case}, if $\mathscr{X}_n$ is the sample model of order $n$ associated to this space, then $$Y_n(\mathrm{var}phi,\mathscr{X}) =\frac{\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}}$$ converges towards a limiting distribution, assuming that $$ n^2\,\mathrm{var}(\Phi(\mathscr{X}_n)) = \frac{\mathrm{var}(S_n(\mathrm{var}phi,\mathscr{X}))}{n^4}$$ admits a strictly positive limit $\sigma^2_{\mathrm{hom}}$. The objective of this section is to prove that this limiting distribution indeed exists and \emph{is not} the Gaussian distribution. To this purpose, we shall compute the three first cumulants of $S_n(\mathrm{var}phi,\mathscr{X})$, and prove in particular that $\kappa^{(3)}(Y_n(\mathrm{var}phi,\mathscr{X}))$ admits a non-zero limit. \begin{remark} The observable that we have chosen is not the simplest counterexample to the asymptotic normality: we could have considered the degree $2$ observable $\mathrm{var}phi'(d(x,y)) = \min(1,d(x,y))$. Our choice of the degree $3$ observable $\mathrm{var}phi$ enables us to explain how to compute the moments and cumulants of a general observable $\Phi(\mathscr{X}_n)$ (we believe that the explanations are a bit clearer with an example larger than the smallest possible one). \end{remark} \subsection{Graph expansion of the moments of monomial observables} Let us consider in full generality the \emph{monomial observables} $M_G$ attached to multigraphs. Let $G$ be a (unoriented) graph on $p$ vertices $1,2,\ldots,p$, possibly with loops and with multiple edges. We associate to $G=(V,E)$ and to a metric measure space $\mathscr{X}=(\mathcal{X},d,\mu)$ the function \begin{align*} F_G : \mathcal{X}^p &\to \mathbb{R}_+ \\ (x_1,\ldots,x_p) &\mapsto \prod_{\{i,j\}\,=\, e \in E} \min(1,d(x_i,x_j)). \end{align*} For instance, the function $\mathrm{var}phi$ introduced above is $\mathrm{var}phi(d(x_1,x_2,x_3)) = F_G(x_1,x_2,x_3)$ with $$G = \begin{tikzpicture}[scale=1,baseline=-1.5mm] \draw (0,0) -- (2,0) ; \foreach \x in {0,1,2} {\fill [color=white] (\x,0) circle (0.2); \draw (\x,0) circle (0.2);} \draw (0,0) node {$1$}; \draw (1,0) node {$2$}; \draw (2,0) node {$3$}; \end{tikzpicture}.$$ We denote $M_G(\mathscr{X}) = \int_{\mathcal{X}^{p}} F_G(x_1,\ldots,x_p)\,\mu^{\otimes p}(dx_1\cdots dx_p)$. This quantity is a polynomial observable of $\mathscr{X}$, and it only depends on the unlabeled graph underlying $G$. The following proposition relates these observables and the moments of the random functions $M_{G}(\mathscr{X}_n)$. \begin{prop}\label{prop:moments_monomial_observables} Fix a multigraph $G$ on $p$ vertices, and a metric measure space $\mathscr{X}$, with sample model $\mathscr{X}_n$ for all order $n$. For any $r\geq 1$, we have: $$ \mathbb{E}[(M_G(\mathscr{X}_n))^r] = \frac{1}{n^{pr}} \sum_{\pi \in \mathfrak{Q}(pr)} n^{\downarrow \ell(\pi)} \,M_{G^r \downarrow \pi}(\mathscr{X}),$$ where $G^r$ denotes the disjoint union of $r$ copies of $G$, and $G^r \downarrow \pi$ is the contraction of this graph according to a set partition $\pi$. \end{prop} By contraction of a multigraph $H$ according to a set partition $\pi$ of its vertex set $V$, we mean the multigraph $H \downarrow \pi$ whose vertices are the parts of $\pi$, and where every edge $\{a,b\}$ of the original graph $H$ becomes an edge between the parts $\pi(a)$ and $\pi(b)$ containing respectively $a$ and $b$ (and a loop if $\pi(a)=\pi(b)$). \begin{proof} By definition, if $(X_n)_{n \geq 1}$ is a sequence of independent variables distributed according to $\mu$ and with sequence of empirical measures $(\mu_n)_{n \geq 1}$, then \begin{align*} M_G(\mathscr{X}_n) &= \int_{\mathcal{X}^p} F_G(x_1,\ldots,x_p)\,(\mu_n)^{\otimes p}(dx_1\cdots dx_p) = \frac{1}{n^p} \sum_{1\leq i_1,\ldots,i_p \leq n} F_G(X_{i_1},\ldots,X_{i_p}). \end{align*} We denote as usual $\bar{\imath}$ an arbitrary family of $p$ indices $i_1,\ldots,i_p$. Given $I=(\bar{\imath}^1,\ldots,\bar{\imath}^r)$, if $\pi=\mathrm{Sp}_n(I)$ is the set partition of $[\![ 1,pr ]\!] = [\![1,p]\!]^r$ whose parts correspond to the sets of equal indices in $I$, then we have $$\prod_{a=1}^r F_G(X_{i^a_1},\ldots,X_{i^a_p}) =_{(\mathrm{distribution})} F_{G^r \downarrow \pi}(X_1,\ldots,X_{\ell(\pi)}).$$ Indeed, if one chooses for every part $\pi_c$ of $\pi$ an index $i^{a_c}_{b_c}$ falling in this part, then one has the identity $$\prod_{a=1}^r F_G(X_{i^a_1},\ldots,X_{i^a_p}) = F_{G^r \downarrow \pi}\!\left(X_{i^{a_1}_{b_1}},\ldots, X_{i^{a_{\ell(\pi)}}_{b_{\ell(\pi)}}}\right),$$ and the variables $X_{i^{a_c}_{b_c}}$ are all distinct by definition of $\pi$; the identity in distribution follows by a relabeling of these variables. We therefore have: \begin{align*} \mathbb{E}[(M_G(\mathscr{X}_n))^r] &=\frac{1}{n^{pr}} \sum_{I=(\bar{\imath}^1,\ldots,\bar{\imath}^r) \in [\![ 1,n]\!]^{pr}} \mathbb{E}[F_{G^r \downarrow \mathrm{Sp}_n(I)}(X_1,\ldots,X_{\ell(\mathrm{Sp}_n(I))})] \\ &= \frac{1}{n^{pr}} \sum_{I=(\bar{\imath}^1,\ldots,\bar{\imath}^r) \in [\![ 1,n ]\!]^{pr}} M_{G^r \downarrow \mathrm{Sp}_n(I)}(\mathscr{X}), \end{align*} and the result follows by gathering the list of indices $I$ according to their set partitions $\mathrm{Sp}_n(I)$. \end{proof} \begin{exa} Let $G$ be the graph on $3$ vertices previously introduced, and $r=2$. Note that if a graph $H= G^2 \downarrow \pi$ contains a loop, then the corresponding monomial $F_H$ vanishes on $\mathcal{X}^{|V(H)|}$. There are $203$ set-partitions of size $6$, but only $67$ of them yield a graph $H= G^2 \downarrow \pi$ without loop. Gathering these graphs according to their isomorphism types, we obtain: \begin{align*} &n^6\,\mathbb{E}[(M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (2,0); \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}_n))^2]\\ & = 8\left( n^{\downarrow 4}\,M_{\begin{tikzpicture}[scale=0.3] \draw (2,-0.5) -- (1,0) -- (2,0.5); \draw (1,0) .. controls (0.7,0.3) and (0.3,0.3) .. (0,0) ; \draw (1,0) .. controls (0.7,-0.3) and (0.3,-0.3) .. (0,0) ; \foreach \x in {(0,0),(1,0),(2,0.5),(2,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 4}\, M_{\begin{tikzpicture}[scale=0.3] \draw (3,0) -- (1,0); \draw (1,0) .. controls (0.7,0.3) and (0.3,0.3) .. (0,0) ; \draw (1,0) .. controls (0.7,-0.3) and (0.3,-0.3) .. (0,0) ; \foreach \x in {(0,0),(1,0),(2,0),(3,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 4}\, M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (0,-0.5) -- (1,0) -- (0,0.5); \draw (1,0) -- (2,0); \foreach \x in {(0,0.5),(0,-0.5),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 3}\, M_{\begin{tikzpicture}[scale=0.3] \draw (2,0) -- (0,0); \draw (1,0) .. controls (0.7,0.3) and (0.3,0.3) .. (0,0) ; \draw (1,0) .. controls (0.7,-0.3) and (0.3,-0.3) .. (0,0) ; \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 3}\, M_{\begin{tikzpicture}[scale=0.3] \draw (0,-0.5) -- (1,0) -- (0,0.5); \draw (0,0.5) .. controls (0.3,0.2) and (0.3,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (-0.3,0.2) and (-0.3,-0.2) .. (0,-0.5); \foreach \x in {(0,0.5),(0,-0.5),(1,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) \right) \\ &\quad +6\,n^{\downarrow 3}\, M_{\begin{tikzpicture}[scale=0.3] \draw (2,0) .. controls (1.7,-0.3) and (1.3,-0.3) .. (1,0) .. controls (0.7,0.3) and (0.3,0.3) .. (0,0) ; \draw (2,0) .. controls (1.7,0.3) and (1.3,0.3) .. (1,0) .. controls (0.7,-0.3) and (0.3,-0.3) .. (0,0) ; \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) + 4\left( n^{\downarrow 5}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (4,0); \foreach \x in {(0,0),(1,0),(2,0),(3,0),(4,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 5}\, M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (2,0) -- (3,0.5); \draw (2,0) -- (3,-0.5); \foreach \x in {(0,0),(1,0),(2,0),(3,0.5),(3,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 4}\,M_{\begin{tikzpicture}[scale=0.3] \draw (3,0) -- (2,0) .. controls (1.7,-0.3) and (1.3,-0.3) .. (1,0) -- (0,0); \draw (2,0) .. controls (1.7,0.3) and (1.3,0.3) .. (1,0); \foreach \x in {(0,0),(1,0),(2,0),(3,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) \right)\\ &\quad + 2\left( n^{\downarrow 5}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (2,0.5); \draw (1,-0.5) .. controls (0.7,-0.8) and (0.3,-0.8) .. (0,-0.5); \draw (1,-0.5) .. controls (0.7,-0.2) and (0.3,-0.2) .. (0,-0.5); \foreach \x in {(0,0.5),(1,0.5),(2,0.5),(0,-0.5),(1,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) + n^{\downarrow 4}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,-0.5) rectangle (1,0.5); \foreach \x in {(0,0.5),(1,0.5),(1,-0.5),(0,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) +n^{\downarrow 2}\, M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) .. controls (0.5,0.2) and (1,0.2) .. (1.5,0); \draw (0,0) .. controls (0.5,-0.2) and (1,-0.2) .. (1.5,0); \draw (0,0) .. controls (0.5,0.6) and (1,0.6) .. (1.5,0); \draw (0,0) .. controls (0.5,-0.6) and (1,-0.6) .. (1.5,0); \foreach \x in {(0,0),(1.5,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) \right)\\ &\quad + n^{\downarrow 6}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (2,0.5); \draw (0,-0.5) -- (2,-0.5); \foreach \x in {(0,0.5),(1,0.5),(2,0.5),(0,-0.5),(1,-0.5),(2,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) + n^{\downarrow 5}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (1.5,0); \draw (0.75,0.75) -- (0.75,-0.75); \foreach \x in {(0,0),(1.5,0),(0.75,0),(0.75,0.75),(0.75,-0.75)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}) + n^{\downarrow 4}\,M_{\begin{tikzpicture}[scale=0.3] \draw (1,-0.5) .. controls (0.7,-0.8) and (0.3,-0.8) .. (0,-0.5); \draw (1,-0.5) .. controls (0.7,-0.2) and (0.3,-0.2) .. (0,-0.5); \draw (1,0.5) .. controls (0.7,0.8) and (0.3,0.8) .. (0,0.5); \draw (1,0.5) .. controls (0.7,0.2) and (0.3,0.2) .. (0,0.5); \foreach \x in {(0,0.5),(1,0.5),(0,-0.5),(1,-0.5)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}). \end{align*} \end{exa} \subsection{The three first limiting cumulants} Proposition \ref{prop:moments_monomial_observables} shows that if one can compute $M_G(\mathscr{X})$ for any graph $G$, then one can also compute the moments and cumulants of $M_{G}(\mathscr{X}_n)$ for any $n$ and any graph $G$. However, even in the easy case where $\mathscr{X}$ is the circle, it can be difficult to find the value of the integral \begin{align*} M_G(\mathbb{T}) &= \int_{[0,1]^{p}} \left(\prod_{\{a,b\} \in E(G)} d(\overline{x}_a,\overline{x}_b)\right) dx_1\cdots dx_p \\ &= \int_{[0,1]^{p}} \left(\prod_{\{a,b\} \in E(G)} \min(|x_a-x_b|,|x_a-x_b+1|,|x_a-x_b-1|)\right) dx_1\cdots dx_p. \end{align*} In the following, we compute the three first moments of $\Phi(\mathscr{X}_n) = M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (2,0); \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathscr{X}_n)$, and we explain in the specific case where $\mathcal{X} = \mathbb{R}/\mathbb{Z} =\mathbb{T}$ how to make some \emph{reductions} of the graphs $G$ that appear in our computation. We have of course $M_{\begin{tikzpicture}[scale=0.3] \fill (0,0) circle (5pt); \end{tikzpicture}}(\mathbb{T})=1$. Let us explain how to compute $M_G(\mathbb{T})$ when one can reduce $G$ to the trivial graph $\bullet$ by recursively deleting in $G$ the vertices with one or two neighbors: \begin{itemize} \item reduction of the vertices with one neighbor. If in the graph $G$ there is one vertex $x$ only connected to another vertex $y$, then we can factor in the integral $M_G(\mathbb{T})$ the term $$\int_{\mathbb{T}} (d(x,y))^a \,dx,$$ where $a\geq 1$ is the number of edges between $x$ and $y$. The integral above is equal to $$2\,\int_0^{\frac{1}{2}} t^a\,dt = \frac{1}{2^{a}(a+1)}.$$ Therefore, $$M_{\begin{tikzpicture}[scale=0.3] \draw (0.5,0) -- (2.5,0); \draw (1.5,0.35) node {\tiny $a$}; \fill (2.5,0) circle (5pt); \draw (0,0) node {$G$}; \end{tikzpicture}}(\mathbb{T}) = \frac{1}{2^a(a+1)}\,M_G(\mathbb{T}).$$ More generally, because the circle $\mathbb{T}$ is a homogeneous space, if the graph $G$ is not biconnected and can be written either as the disjoint union of two graphs $G_1$ and $G_2$, or as the union of two graphs $G_1$ and $G_2$ that only spare one vertex, then we have $M_G(\mathbb{T}) = M_{G_1}(\mathbb{T})\,M_{G_2}(\mathbb{T})$. \item reduction of the vertices with two neighbors. Suppose now that there is one vertex $x$ only connected to two other vertices $y$ and $z$, with $a\geq 1$ edges between $x$ and $y$ and $b \geq 1$ edges between $x$ and $z$. Note that this does not mean that one can split $G$ as the union of two biconnected components meeting at $x$ (consider for instance the case where $y$ and $z$ are themselves connected by an edge). We have \begin{align*} \int_{\mathbb{T}} (d(x,y))^a\,(d(x,z))^b\, dx&= \int_0^{D} t^a\,(D-t)^b\,dt + \int_0^{\frac{1}{2}-D} t^a\,(D+t)^b\,dt \\ &\quad+ \int_0^{\frac{1}{2}-D} (D+t)^a\,t^b\,dt +\int_{\frac{1}{2}-D}^{\frac{1}{2}} t^a\,(1-D-t)^b\,dt \end{align*} with $D = d(y,z)$. These four terms are polynomials in $D$: \begin{align*} \int_0^{D} t^a\,(D-t)^b\,dt &= \frac{a!\,b!}{(a+b+1)!}\,D^{a+b+1} ;\\ \int_0^{\frac{1}{2}-D} t^a\,(D+t)^b\,dt &= \sum_{j=0}^{a+b+1} \left(\sum_{k=0}^{\min(b,j)} \binom{b}{k}\binom{a+b+1-k}{a+b+1-j}\,\frac{(-1)^{j-k}}{2^{a+b+1-j}(a+b+1-k)} \right)D^{j} ;\end{align*} \begin{align*} \int_0^{\frac{1}{2}-D} (D+t)^a\,t^b\,dt &= \sum_{j=0}^{a+b+1} \left(\sum_{k=0}^{\min(a,j)} \binom{a}{k}\binom{a+b+1-k}{a+b+1-j}\,\frac{(-1)^{j-k}}{2^{a+b+1-j}(a+b+1-k)} \right)D^{j} ;\\ \int_{\frac{1}{2}-D}^{\frac{1}{2}} t^a\,(1-D-t)^b\,dt &= \frac{1}{2^{a+b}} \sum_{j=0}^{a+b} \left(\sum_{\substack{0\leq k \leq j \\ k\text{ even}}}\sum_{l=0}^k \binom{a}{k-l}\binom{b}{l}\binom{a+b-k}{a+b-j}\frac{(-1)^{j+l}}{k+1}\right)\,D^{j+1}. \end{align*} Therefore, if a graph $G$ contains a vertex $x$ with $a$ incident edges $(x,y)$, $b$ incident edges $(x,z)$ and no other incident edges, then we have the reduction formula \begin{align*} M_G(\mathbb{T}) &= \frac{a!\,b!}{(a+b+1)!}\,M_{(G \setminus \{x\}) + (y,z)^{a+b+1}}(\mathbb{T}) \\ &\quad+ \sum_{\substack{0\leq k \leq j \leq a+b+1}} \binom{a}{k} \binom{a+b+1-k}{a+b+1-j}\,\frac{(-1)^{j+k}}{2^{a+b+1-j}(a+b+1-k)} \,M_{(G \setminus \{x\}) + (y,z)^j}(\mathbb{T})\\ &\quad+ \sum_{\substack{0\leq k \leq j \leq a+b+1}} \binom{b}{k}\binom{a+b+1-k}{a+b+1-j}\,\frac{(-1)^{j+k}}{2^{a+b+1-j}(a+b+1-k)} \,M_{(G \setminus \{x\}) + (y,z)^j}(\mathbb{T})\\ &\quad + \frac{1}{2^{a+b}} \sum_{\substack{0\leq l \leq k\leq j \leq a+b \\ k \text{ even}}} \binom{a}{k-l}\binom{b}{l}\binom{a+b-k}{a+b-j}\frac{(-1)^{j+l}}{k+1}\,M_{(G \setminus \{x\}) + (y,z)^{j+1}}(\mathbb{T}), \end{align*} where $(G \setminus \{x\}) + (y,z)^j$ is the graph obtained from $G$ by first removing the vertex $x$ and its adjacent edges, and then adding $j$ new edges between $y$ and $z$. \end{itemize} This is already sufficient in order to compute the two first moments of $\Phi(\mathscr{X}_n)$: \begin{align*} n^3\,\mathbb{E}[\Phi(\mathscr{X}_n)] &= n^{\downarrow 3}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (2,0); \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathbb{T}) + n^{\downarrow 2}\,M_{\begin{tikzpicture}[scale=0.3] \draw (1,0) .. controls (0.7,0.3) and (0.3,0.3) .. (0,0) ; \draw (1,0) .. controls (0.7,-0.3) and (0.3,-0.3) .. (0,0) ; \foreach \x in {(0,0),(1,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathbb{T}) = \frac{n^{\downarrow 3}}{16} + \frac{n^{\downarrow 2}}{12} = \frac{n^3}{16} - \frac{5n^2}{48} + \frac{n}{24};\\ n^6\,\mathbb{E}[(\Phi(\mathscr{X}_n))^2] &= \frac{n^6}{256} - \frac{5n^5}{384} + \frac{611n^4}{26880} - \frac{67n^3}{2688} + \frac{5n^2}{336} + \frac{n}{280}. \end{align*} Indeed, all the loopless graphs $G^r \downarrow \pi$ with $r \in \{1,2\}$ are reducible by one of the previous arguments. We obtain in particular the value of the variance: $$n^2\,\mathrm{var}(\Phi(\mathscr{X}_n)) = \frac{269}{40320} - \frac{131}{8064n} + \frac{53}{4032 n^2} - \frac{1}{280n^3}.$$ In particular, $\sigma_{\mathrm{hom}}^2 = \frac{269}{40320}$ is strictly positive. For the third moment, there are $22147$ set partitions of size $9$, and $6097$ of them yield a contracted graph $G^3 \downarrow \pi$ which is without loop. These $6097$ graphs fall into $131$ isomorphism types, and only one isomorphism type is not reducible with the aforementioned techniques: $$H = G^3 \downarrow \pi = K_4 = \begin{tikzpicture}[baseline=3mm,scale=1] \draw (1,1) -- (0,0) -- (0,1) -- (1,0) -- (1,1) -- (0,1); \draw (0,0) -- (1,0); \foreach \x in {(0,0),(0,1),(1,0),(1,1)} \fill \x circle (2pt); \end{tikzpicture}\,.$$ Let us explain how to compute $M_H(\mathbb{T})$. We need to compute the integral $$I = \int_{\mathbb{T}}d(w,x)\,d(w,y)\,d(w,z)\,dw.$$ The Fourier expansion of the distance function $d(x,y)$ with $x,y \in \mathbb{R}/\mathbb{Z}$ is $$d(x,y) = \frac{1}{4} - \sum_{\substack{k \in \mathbb{Z} \\ k \text{ odd}}} \frac{1}{k^2\pi^2}\,\mathrm{e}^{2\mathrm{i} \pi k (x-y)}.$$ If $\widetilde{d}(x,y) = \frac{1}{4}-d(x,y)$, then \begin{align*} I &= - \int_{\mathbb{T}}\left(\widetilde{d}(w,x)-\frac{1}{4}\right) \left(\widetilde{d}(w,y)-\frac{1}{4}\right) \left(\widetilde{d}(w,z)-\frac{1}{4}\right) dw \\ &= - \int_{\mathbb{T}} \widetilde{d}(w,x)\,\widetilde{d}(w,y)\,\widetilde{d}(w,z)\,dw + \frac{1}{4} (F(x,y) + F(x,z) + F(y,z))+ \frac{1}{64} \end{align*} where \begin{align*} F(x,y) &= \int_\mathbb{T} \widetilde{d}(w,x)\,\widetilde{d}(w,y)\,dw = \int_\mathbb{T} d(w,x)\,d(w,y)\,dw -\frac{1}{16} \\ &= \frac{2(d(x,y))^3}{3} - \frac{(d(x,y))^2}{2} + \frac{1}{48}. \end{align*} Now, the key observation is that $\int_\mathbb{T} \widetilde{d}(w,x)\,\widetilde{d}(w,y)\,\widetilde{d}(w,z)\,dw=0$. Indeed, by using the Fourier expansions of the distance functions, setting $C_{k \text{ odd}}=\frac{1}{k^2\pi^2}$, we see that this integral equals $$\sum_{k+l+m=0} C_kC_lC_m\, \mathrm{e}^{-2\mathrm{i} \pi (kx + ly + mz)}.$$ the sum running over odd integers $k$, $l$ and $m$. But then it is not possible to have $k+l+m=0$, whence the vanishing of the integral. As a consequence, $$M_{K_4}(\mathbb{T}) = \frac{1}{2} \,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (1.5,0) -- (0,-0.5); \draw (0,0.5) .. controls (0.2,0.2) and (0.2,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (-0.2,0.2) and (-0.2,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (0.6,0.2) and (0.6,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (-0.6,0.2) and (-0.6,-0.2) .. (0,-0.5); \foreach \x in {(0,0.5),(0,-0.5),(1.5,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathbb{T}) - \frac{3}{8}\,M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (1.5,0) -- (0,-0.5); \draw (0,0.5) .. controls (0.2,0.2) and (0.2,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (-0.2,0.2) and (-0.2,-0.2) .. (0,-0.5); \draw (0,0.5) .. controls (-0.6,0.2) and (-0.6,-0.2) .. (0,-0.5); \foreach \x in {(0,0.5),(0,-0.5),(1.5,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathbb{T}) + \frac{1}{32} M_{\begin{tikzpicture}[scale=0.3] \draw (0,0.5) -- (1.5,0) -- (0,-0.5) -- (0,0.5); \foreach \x in {(0,0.5),(0,-0.5),(1.5,0)} \fill \x circle (5pt); \end{tikzpicture}}(\mathbb{T})= \frac{11}{71680},$$ all the graphs on the right-hand side being reducible. By using a computer algebra system, we then obtain \begin{align*} n^9\,\mathbb{E}[(\Phi(\mathscr{X}_n))^3] &= \frac{n^9}{4096} - \frac{5n^8}{4096} + \frac{541n^7}{143360} - \frac{5713619n^6}{638668800}+ \frac{61771n^5}{3801600} \\ &\quad - \frac{132443n^4}{6386688} + \frac{6367n^3}{380160} - \frac{150193n^2}{19958400} + \frac{2353n}{1663200}. \end{align*} This gives the third cumulant: $$ n^3\,\kappa^{(3)}(\Phi(\mathscr{X}_n)) = -\frac{42209}{39916800} + O\!\left(\frac{1}{n}\right).$$ Since the right-hand side does not vanish, we conclude that $\lim_{n \to \infty} \kappa^{(3)}(Y_{n}(\mathrm{var}phi,\mathscr{X})) \neq 0 $; therefore, the limiting distribution from Theorem \ref{theo:homogeneous} is not the standard normal distribution, and we have proved: \begin{prop} If $\mathscr{X}$ is the circle $\mathbb{R}/\mathbb{Z}$ endowed with its geodesic distance and with the projection of the Lebesgue measure, and if $\Phi=M_{\begin{tikzpicture}[scale=0.3] \draw (0,0) -- (2,0); \foreach \x in {(0,0),(1,0),(2,0)} \fill \x circle (5pt); \end{tikzpicture}}$, then the Gromov--Prohorov sample model yields a sequence of random variables $$\frac{\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})}{\sqrt{\mathrm{var}(\Phi(\mathscr{X}_n))}}$$ which converges in distribution towards a law which is centered, with variance $1$ and with third cumulant equal to $$-\frac{168836}{44385}\sqrt{\frac{70}{269}}\simeq -1.94044;$$ in particular, this distribution is not the Gaussian distribution. \end{prop} \section{A statistical test for the symmetry of a compact Riemannian manifold}\label{sec:statistics} In this section, as an application of our results and in particular of the concentration inequality \ref{prop:chernoff}, we construct a statistical test for the symmetry of a compact manifold. \noindent \textbf{Model.} We consider a compact Riemannian manifold $\mathcal{X}$; the distance between points of $\mathcal{X}$ is the geodesic distance, and the compactness ensures that for any pair of points $(x,y)$ in $\mathcal{X}$, there exists a geodesic curve of minimal length connecting $x$ to $y$ (see for instance \cite[Section 1.5]{Jost11}; this is even true for complete Riemannian manifolds, see \cite[Chapter I, Theorem 10.4]{Hel78}). The space $X$ is equipped with the probability measure $\mu$ proportional to the Lebesgue measure induced by the Riemannian volume form $\omega$ of the manifold. An isometry of $\mathcal{X}$ always preserves the Riemannian structure and therefore the probability measure $\mu$ (this result is due to Myers and Steenrod \cite{MS39}; see also \cite[Chapter I, Theorem 11.1]{Hel78}). In other words, $\mathrm{Isomp}(\mathscr{X})=\mathrm{Isom}(\mathscr{X})$. This group of isometries endowed with the compact-open topology is always a compact Lie group, such that the action $G \times \mathcal{X} \to \mathcal{X}$ is a smooth map; see \cite[Chapter II, Theorems 1.1 and 1.2]{Kob72}. Therefore, the following assertions are equivalent: \begin{enumerate} \item The Riemannian manifold $\mathcal{X}$ is a compact homogeneous space (in the sense of the fourth item of Theorem \ref{theo:homogeneous}). \item The group of isometries $G=\mathrm{Isom}(\mathscr{X})$ acts transitively on $\mathcal{X}$. \end{enumerate} Our objective is to construct a statistical test for these conditions. \noindent \textbf{Hypotheses and statistics.} The hypotheses of our test are: \begin{align*} \text{null hypothesis }H_0&: \,\,\,\text{the compact manifold $\mathscr{X}$ is homogeneous};\\ \text{alternative hypothesis }H_1&:\,\,\,\text{the compact manifold $\mathscr{X}$ is not homogeneous}. \end{align*} The allowed observations of $\mathscr{X}$ are the following: \begin{itemize} \item we can take independent random points $x_1,x_2,\ldots,x_n$ on $\mathcal{X}$, all these points being chosen according to the Lebesgue measure $\mu$; \item and we can measure all their inter-distances $d(x_i,x_j)$, $1 \leq i,j \leq n$. \end{itemize} Let us fix a function $\mathrm{var}phi \in \mathcal{C}_b(\mathbb{R}^{\binom{p}{2}})$ corresponding to a polynomial observable $\Phi=\Phi^{p,\mathrm{var}phi}$ of mm-spaces. A convenient choice is $$p=2\quad;\quad\mathrm{var}phi(d) = \min(1,d),$$ but other observables might yield more powerful tests; we shall discuss this in a moment. By Theorem \ref{theo:singular_case}, the random variable $\Phi(\mathscr{X}_n) = \frac{1}{n^p} \sum_{i_1,\ldots,i_p = 1}^n \mathrm{var}phi(d(x_{i_1},\ldots,x_{i_p}))$ has fluctuations of order $\frac{1}{n}$ under the hypothesis $H_0$, so a convenient statistics for testing this hypothesis would be $n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})|$. As we do not know the value of $\Phi(\mathscr{X})$, we shall proceed a bit differently. Consider an independent copy $\mathscr{X}_n'$ of the discrete approximation of our mm-space $\mathscr{X}$, constructed from random points $x_1',x_2',\ldots,x_n'$ which are again independent, independent from $x_1,\ldots,x_n$, and distributed on $\mathcal{X}$ according to the normalised Lebesgue measure $\mu$. By the triangular inequality, the statistics \begin{equation} Z_n = n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}_n')| \label{eq:statistics} \end{equation} is smaller than the sum of two independent random variables distributed like $n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})|$, so given a large threshold $t_\alpha$, we should again have $Z_n \leq t_\alpha$ with large probability under $H_0$. We therefore choose $Z_n$ as our statistics of test. \noindent \textbf{Estimates of probabilities and choice of the threshold.} By Proposition \ref{prop:chernoff}, under the hypothesis of symmetry $H_0$, if $A$ is an upper bound on $\|\mathrm{var}phi\|_\infty$ ($A=1$ if we consider the test function $\mathrm{var}phi(d) = \min(1,d)$), then $$\mathbb{P}_{H_0}\!\left[n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})| \geq \frac{2Ap^2}{\mathrm{e}}\,x\right] \leq 2\,\exp\!\left(\frac{\log(1+x)-x}{\mathrm{e}^2}\right).$$ Notice that this is a non-asymptotic estimate, valid for any $n \geq 1$. It implies: \begin{align*} &\mathbb{P}_{H_0}\!\left[Z_n \geq \frac{4Ap^2}{\mathrm{e}}\,x\right]\\ &= \mathbb{P}_{H_0}\!\left[n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X}_n')| \geq \frac{4Ap^2}{\mathrm{e}}\,x\right]\\ &\leq \mathbb{P}_{H_0}\!\left[n\,|\Phi(\mathscr{X}_n)-\Phi(\mathscr{X})| \geq \frac{2Ap^2}{\mathrm{e}}\,x\right] + \mathbb{P}_{H_0}\!\left[n\,|\Phi(\mathscr{X}_n')-\Phi(\mathscr{X})| \geq \frac{2Ap^2}{\mathrm{e}}\,x\right]\\ &\leq 4\,\exp\!\left(\frac{\log(1+x)-x}{\mathrm{e}^2}\right)=F(x). \end{align*} The upper bound $F(x)$ is a strictly decreasing function of $x$ with $\lim_{x \to \infty} F(x) = 0$. Therefore, for every significance level $\alpha \in (0,1)$, there exists a unique $x_\alpha \in \mathbb{R}_+$ with $F(x_\alpha)=\alpha$. \begin{center} \begin{tikzpicture}[xscale=0.1] \draw [smooth, domain=0:100, samples=500, very thick] plot (\x,{4*exp((ln(1+\x)-\x)/exp(2))}); \draw [->] (-2,0) -- (105,0); \draw [->] (0,-0.2) -- (0,4.5); \foreach \x in {0,1,2,3,4} {\draw (-5,\x) node {$\x$}; \draw (-2,\x) -- (0,\x);} \foreach \x in {0,25,50,75,100} {\draw (\x,-0.5) node {$\x$}; \draw (\x,-0.2) -- (\x,0);} \draw (0,4.9) node {$F(x)$}; \draw (109,0) node {$x$}; \draw (-5,0.3) node {$\alpha$}; \draw (-2,0.3) -- (0,0.3); \draw [dashed] (0,0.3) -- (22,0.3) -- (22,0); \draw (22,0) -- (22,-0.2); \draw (20,-0.4) node {$x_\alpha$}; \end{tikzpicture} \end{center} We set \begin{equation} t_\alpha = \frac{4Ap^2}{\mathrm{e}}\,x_\alpha = \frac{4Ap^2}{\mathrm{e}}\,F^{-1}(\alpha).\label{eq:threshold} \end{equation} \begin{prop}\label{prop:test_symmetry} For any $n \geq 1$ and any significance level $\alpha \in (0,1)$, the statistics $Z_n$ given by Equation \eqref{eq:statistics} and the threshold $t_\alpha$ given by Equation \eqref{eq:threshold} yield a test for the hypothesis of symmetry with level smaller than $\alpha$: $$\mathbb{P}_{H_0}[Z_n \geq t_\alpha] \leq \alpha.$$ \end{prop} \noindent \textbf{Power of the test and sample size.} Of course, the proposition above is only useful if we can also estimate the probability $\mathbb{P}_{H_1}[Z_n < t_\alpha]$ of the second kind of error of this procedure of testing, and make it reasonably small. To this purpose, we need to be a bit more precise on the alternative hypothesis $H_1$ (if $\mathscr{X}$ is very close to being homogeneous, then the probability of the second kind of error will be large). A typical example which is solvable is the following. Suppose that we observe a manifold $\mathscr{X}=(\mathcal{X},d,\mu)$ isometric to the circle $\mathbb{R}/\mathbb{Z}$, and where $\frac{d\mu}{dx}=f(x)$ is some unknown density function with $\int_0^1 f(x)\,dx=1$. In this case, the hypotheses of our test for symmetry can be taken as follows: \begin{align*} H_0&:\,\,\,f \text{ is constant (and the space is homogeneous)};\\ H_1(\mathrm{var}epsilon)&:\,\,\,f(x)\,dx \text{ is at total variation distance larger than $\mathrm{var}epsilon$ from }dx \end{align*} for some $\mathrm{var}epsilon>0$. More generally, we can take for alternative hypothesis: \begin{align*} H_1':\,\,\,&\text{$\mathscr{X}$ belongs to a specific class of non-homogeneous compact manifolds}\\[-1mm] &\text{for which $\sigma^2(\mathrm{var}phi,\mathscr{X})$ can be computed}. \end{align*} Then, we can follow the steps below in order to compute the power of our test: \begin{enumerate} \item Compute a lower bound $\sigma^2_0$ on $\sigma^2(\mathrm{var}phi,\mathscr{X})$ for $\mathscr{X}$ described by $H_1'$. We assume that this lower bound is strictly positive (under $H_0$, $\sigma^2(\mathrm{var}phi,\mathscr{X})=0$ for any continuous bounded function $\mathrm{var}phi$). \item By Theorem \ref{theo:generic_case}, $\sqrt{n}\,(\Phi(\mathscr{X}_n) - \Phi(\mathscr{X}))$ converges to a centered normal distribution with variance $p^2\,\sigma^2(\mathrm{var}phi,\mathscr{X})$, so $$\frac{Z_n}{\sqrt{n}} \rightharpoonup_{n \to \infty} |\mathcal{N}(0,2p^2\,\sigma^2(\mathrm{var}phi,\mathscr{X}))|.$$ Moreover, the Kolmogorov distance between these two distributions is a $$O\!\left(\frac{A^3p}{\sigma^3(\mathrm{var}phi,\mathscr{X})\,\sqrt{n}}\right),$$ with a universal constant $C$ in the $O(\cdot)$ (an explicit value of $C$ can be computed readily from \cite[Corollary 30]{feray2017mod}). Therefore, \begin{align*} \mathbb{P}[Z_n < t_\alpha] &= \mathbb{P}\!\left[\frac{Z_n}{\sqrt{n}} \leq \frac{t_\alpha}{\sqrt{n}}\right] \\ &\leq \frac{CA^3p}{\sigma^3(\mathrm{var}phi,\mathscr{X})\,\sqrt{n}} + \mathbb{P}\left[|\mathcal{N}(0,2p^2\,\sigma^2(\mathrm{var}phi,\mathscr{X}))|\leq \frac{t_\alpha}{\sqrt{n}}\right]\\ &\leq \frac{CA^3p}{\sigma^3(\mathrm{var}phi,\mathscr{X})\,\sqrt{n}} + \frac{C'A p\,F^{-1}(\alpha) }{\sigma(\mathrm{var}phi,\mathscr{X})\,\sqrt{n}} \end{align*} with $C'=\frac{4}{\mathrm{e}\sqrt{\pi}}$. \item By combining the two items above, we obtain: $$\mathbb{P}_{H_1'}[Z_n < t_\alpha] \leq \left(\frac{A^3}{\sigma_0^3} + \frac{A}{\sigma_0}\,F^{-1}(\alpha)\right)\frac{Kp}{\sqrt{n}}$$ for some universal constant $K$. \end{enumerate} So, we conclude: \begin{prop} Fix a significance level $\alpha$ and a threshold $t_\alpha$ as in Proposition \ref{prop:test_symmetry}. There exists a universal constant $K$ such that the test for symmetry $H_0/H_1'$ has power larger than $1-\beta$, with $$\beta = \left(\frac{A^3}{\sigma_0^3} + \frac{A}{\sigma_0}\,F^{-1}(\alpha)\right)\frac{Kp}{\sqrt{n}},$$ and where $A=\|\mathrm{var}phi\|_\infty$ and $\sigma_0^2$ is a lower bound on $\sigma^2(\mathrm{var}phi,\mathscr{X})$ under the alternative hypothesis $H_1'$. \end{prop} \noindent Therefore, once the observable $\mathrm{var}phi$ and the significance level $\alpha$ of the test for symmetry have been chosen, if one has a non-zero lower bound on the variances $\sigma^2(\mathrm{var}phi,\mathscr{X})$ under $H_1'$, then one can find a sample size $n$ in order to obtain an error of the second kind as small as wanted. \begin{remark} Suppose that $p=2$ and that $\mathrm{var}phi(d) = \min(A,d)$ (standard choice of observable), where $A$ is an \emph{a priori} upper bound on the diameter of the space $\mathscr{X}$ to which we want to apply the test for symmetry. Then, $$\sigma^2(\mathrm{var}phi,\mathscr{X}) = \int_{\mathscr{X}^3} d(x,y)d(y,z)\, \mu^{\otimes 3}(dx\,dy\,dz) - \left(\int_{\mathscr{X}^2}d(x,y)\,\mu^{\otimes 2}(dx\,dy)\right)^2.$$ \end{remark} \begin{remark} One might need to choose $\mathrm{var}phi$ and the observable $\Phi^{p,\mathrm{var}phi}$ in order to ensure that one has under $H_1'$ a non-zero lower bound $\sigma_0^2$. Indeed, for a given non-homogeneous space $\mathscr{X}$, certain functions $\mathrm{var}phi$ might give "by chance" a vanishing parameter $\sigma^2(\mathrm{var}phi,\mathscr{X})$. Consequently, one might have to take another observable than the one previously presented as the standard choice. \end{remark} \end{document}
\begin{document} \title{New Directions in Quantum Music: concepts for a quantum keyboard and the sound of the Ising model} \begin{abstract} We explore ideas for generating sounds and eventually music by using quantum devices in the NISQ era using quantum circuits. In particular, we first consider a concept for a ``qeyboard'', i.e.\ a quantum keyboard, where the real-time behaviour of expectation values using a time evolving quantum circuit can be associated to sound features like intensity, frequency and tone. Then, we examine how these properties can be extracted from physical quantum systems, taking the Ising model as an example. This can be realized by measuring physical quantities of the quantum states of the system, e.g.\ the energies and the magnetization obtained via variational quantum simulation techniques. \blfootnote{Chapter submitted for publication in the book “Quantum Computer Music”, Edited by E. R. Miranda (Springer, 2022).} \end{abstract} \section{Introduction} With the current acceleration in the development and improvement of quantum technologies, it is conceivable that we shall witness an increasing influence of quantum ideas in everyday life, music included. Public availability and easy access to the Noisy Intermediate-Scale Quantum (NISQ)~\cite{Preskill2018} devices allowed users of different backgrounds (e.g.\ composers, software developers, video game designers) to experiment with them. Using principals of Quantum Mechanics to generate or manipulate music is gaining popularity in the recent years~\cite{putz_quantum_2017,miranda_quantum_2020}. In this chapter, we present two ideas. First, the Qeyboard, an attempt to turn a Quantum Computer into an instrument, allows the performer to exploit quantum effects such as superposition and entanglement. This opens up new avenues for live performances and musical compositions. The second idea converts the algorithmic process of simulating a quantum system into music, making it possible to hear the properties of the quantum system as it evolves. Both of these ideas are simple enough to be practically realizable with the current technology and little effort; they also allow for straightforward extensions and generalizations for later stages of this quantum revolution. \section{Qeyboard: some concepts for a real-time quantum keyboard} The idea of a \emph{quantum instrument}, i.e.\ a device capable of producing sounds as end-products of quantum mechanical processes instead of classical mechanical ones, is starting to take shape in the quantum music community~\cite{putz_quantum_2017}. The ``true'' nature of the real world is quantum (at least, according to our most accurate description of nature), so any classical musical instrument is already quantum at the fundamental level, but the extent of quantum effects (such as superposition, interference, entanglement) is usually obscured by the macroscopic and incoherent nature of the phenomena involved in the sound production. In order to harness the full potentiality of quantum processes in the NISQ era, in the following discussion we will focus on the quantum circuit model as a convenient representation for abstract wavefunctions: a generic quantum state is prepared as a sequence of gates acting on qubits starting from an initial fiducial state (usually the state with all qubits set to $0$). Having prepared a specific wavefunction, one can then measure its properties which can in turn be related to sound features to be classically synthetized, according to the pipeline diagram depicted in Fig.~\ref{fig:digital_qinstrument_structure}. \begin{figure} \caption{Schematic pipeline diagram of a digital quantum instrument. The dashed box represents the part involving quantum processes: $\mathcal{U} \label{fig:digital_qinstrument_structure} \end{figure} In the following sections, we will describe some of the possible choices for each steps of this pipeline, namely: \begin{enumerate} \item real-time \textbf{interface} classical input and evolution of the circuit $\mathcal{U}(t)$ (and therefore $\ket{\psi(t)}$); \item set of \textbf{measurements} on the state $\ket{\psi(t)}$ at each frame; \item map between measurements (real or binary valued) and sound \textbf{features}; \item \textbf{synthesis} of the final sound from different sound sources. \end{enumerate} \subsection{Real-time interface for evolving a dynamical parameterized quantum circuit} The circuit structure, described abstractly in Fig.~\ref{fig:digital_qinstrument_structure} as a multi-qubit unitary operator $\mathcal{U}(t)$, is time dependent according to the time dependence of the input data. In complete generality, the circuit represented by $\mathcal{U}(t)$ can include time-varying parameterized gates, as well as evolving topologies, where gates are added or removed at any time and at any point of the circuit. Notice also that here we are not making any assumption on the continuity or differentiability of $\mathcal{U}(t)$ with respect to the time parameter $t$: this makes possible abrupt changes in the properties of the wavefunction, which in turns allows to model all possible shapes of ADSR envelopes (attach, decay, sustain and release). From the point of view of a human user, a good level of real-time manipulation of the circuit can be realized by pressing combinations of keys mapped to a finite set of single and double-qubit gates, which are then added to the right end of the circuit. These gates can be parameterless (like Pauli, Hadamard, CNOT or SWAP gates) or parameterized like generic rotations; in turn, the parameters can be changed in real time using a slider (e.g.\ operated via mouse). Another possibility, which requires some more work in terms of interface but enhances the level of manipulation expressibility, is to manage the whole circuit with a touch monitor where gates can be added or removed at specific points or their parameters changed. This can be realized by simple gestures or even with multiple simultaneous action. If one is interested instead in machine driven execution, the circuit dynamics can be represented by a predeterminated ``quantum music sheet'', or some other form of input, which drives the circuit changes in full generality and without the limitation of the human user (limited pace and simultaneous actions). \subsection{Measurements} Here we discuss the second step in the pipeline of Fig.~\ref{fig:digital_qinstrument_structure}, namely the association between properties of the wavefunction produced by measurements on the circuit $\mathcal{U}(t)$ and properties of the sound. Notice that, in practice, it is not possible to run quantum circuits and measurements continuously in $t$, so we would assume a reasonable sampling rate at discrete times $t_i$ which still allows us to capture the main features of the circuit dynamics without loss of expressibility. The results of measurements can finally be interpolated during post-processing. \subsubsection{Playing qubits as quantum strings} In this paradigm, which we named ``quantum strings with counts to intensity'', we associate an oscillator with a specific frequency to any qubit register, while the corresponding sound intensity is determined by the average count of measurements with outcome $1$. As a concrete example, we can consider a $q=8$ qubits system associated to the major scale in the octave $C4$ to $C5$, so that a circuit could be visually mapped to pentagram lines and spaces. At every time $t$, one can make $n_{\text{shots}}$ measurements in all the qubit registers, which produces in general different states in the computational basis in terms of a dictionary containing the $0$-$1$ bitstring representation of the state and the number of times that has been observed: $\{"00\dots 00":c_{00\dots 00}, "00\dots 01":c_{00\dots 01}, \dots \}$. The intensity associated to the $n$-th quantum string would then be determined by the ratio between the sum of counts of states with the $n$-th qubit register set to $1$ and the total number of shots: \begin{equation} \mathcal{I}_n = \sum\limits_{\vec{s}\in \mathbb{Z}_2^q \vert s_n=1}\!\! \frac{c_{\vec{s}}}{n_{\text{shots}}} \; \; \in [0,1]. \end{equation} In this way, in absence of noise and with a trivial circuit $\mathcal{U}(t_i)=I$, only the state with all qubits set to zero would be observed for every shot ($c_{\vec{0}}=n_{\text{shots}}$), so the sound would be silence. A generic (non-diagonal) circuit $\mathcal{U}(t_i)$ would instead be associated to a generic distribution of count ratios ${c^i_{\vec{s}}}/{n_{\text{shots}}}$. Notice that the addition of noise and a finite number of shots would always introduce fluctuations in the intensity associated to each quantum string at neighbouring times $t_i$ and $t_{i+1}$, even if the circuit does not change. \subsubsection{Expectation values to continuous sound properties} In this paradigm, the whole wavefunction is associated to different properties of the final synthetized sound. Unlike the previous approach, we will not associate single qubit registers to specific and predetermined frequencies, but instead we will characterize the properties of the sound at each time $t_i$ in terms of expectation values of Hermitian operators (observables). This idea can be implemented in a complete general way, but we will give a simple concrete example. Let us consider a two qubits system and two Hermitian operators \begin{equation}\label{eq:hermop_simplexample} H_{f} \equiv \frac{1}{2} (I-\sigma^X) \otimes I,\qquad H_{i} \equiv \frac{1}{2} I \otimes (I-\sigma^Z). \end{equation} At any time $t$, after applying the circuit $\mathcal{U}(t)$, one would then estimate the expectation value of the observables in Eq.~\eqref{eq:hermop_simplexample}: $\expval{H_f}(t)\equiv \langle \psi(t) \lvert H_f \rvert \psi(t) \rangle$ and $\expval{H_i}(t)\equiv \langle \psi(t) \lvert H_i \rvert \psi(t) \rangle$, which takes any value in the range $[0,1]$. Fixing a conventional frequency range $[f_{0},f_{1}]$, the value $\expval{H_f}(t)$ can be associated to continuous values of frequency in that range by the linear relation $f(t) = f_0 + (f_1-f_0) \expval{H_f}(t)$, while intensity would simply be $i(t) = \expval{H_i}(t)$. These time-dependent frequency and intensity properties of a single sound will be then synthetized, as discussed in the next section. In the specific example of Eq.~\eqref{eq:hermop_simplexample}, the two properties are commuting and it is intuitive how the sound can be manipulated by an appropriate rotation in the first or second qubit register. In general, one can associate pairs of Hermitian operators to the frequency and intensity properties for a certain number of sounds $N_S$; for example, one can build a set of observables by considering all the possible combinations of tensor products of $I$, and \begin{equation}\label{eq:projop} \Pi_j=\frac{1}{2}(I-\sigma^{j}), \end{equation} where $\sigma^{j}$ are the Pauli matrices and $\Pi_j$ is the projector to the eigenstate of $\sigma^{j}$ with eigenvalue $1$. This set of operators can in principle be used to make a tomography of the wavefunction $\psi(t)$ at any time $t$ in order to extract all possible information from it. This choice would give a wide range of expressivity, since the number of properties could scale as $4^{q}-1$, but we recommend the user to select just a few of them or a meaningful combination. We want to stress also that one could add more sophisticated sound properties, like tone, which would require a different preprocessing stage during synthesis. \subsection{Synthesis} In this section we will briefly describe how to process the collection of properties $\mathcal{P}(t_i) = {\{(f_s(t_i),i_s(t_i))\}}_{s=1}^{N_S}$ for each sound at any time $t_i$ collected during the measurement stage (previous section), and synthetize them in order to obtain a single waveform. This can be done using inharmonic additive synthesis~\cite{additivesynth}, since the properties of each sound source is generally time-dependent. First of all, an interpolation step must be performed in order to make the sounds properties vary continuously with time $\mathcal{P}(t)$. The interpolation can be linear or higher order with some smoothing factor. The final waveform is then built as follows\footnote{The phase of an individual sound could be added as a further property, but this would not be perceptible to the listener's ear.} \begin{equation}\label{eq:synthesis} w(t) = \frac{1}{\mathcal{N}}\sum_{s=1}^{N_S} i_s(t) \sin\Big[2 \pi f_s(t) t \Big], \end{equation} where a global normalization $\mathcal{N}$ has been added to make the waveform vary between $-1$ and $1$, so to avoid clipping effects. \begin{figure} \caption{Simple example of circuit synthesis of single sound evolution as described in the text: measurements are taken at every $0.1$ seconds, quantum noise is present and a smoothed interpolation is applied at the post-processing stage, with sampling rate $44100$ Hz.} \label{fig:sheet_class2} \end{figure} \begin{figure} \caption{Inharmonic synthesis of the waveform obtained by using Eq.~\eqref{eq:synthesis} \label{fig:sheet_class2_sgram} \end{figure} As a first example, we consider a three qubits system, with the following observables associated to intensity and frequency (fixed for the whole run): \begin{align} H_i &= I \otimes I \otimes \Pi_Z \\ H_f &= \big( 2 \Pi_Z \otimes I + I \otimes \Pi_Z + I\otimes I \big) \otimes I, \end{align} where $\Pi_i$ are the projection operators, defined in Eq.~\eqref{eq:projop}. In this case, the intensity can be easily controlled by acting on the rightmost qubit register, for example by applying $\sigma^Y$ or $\sigma^X$, while the first two registers from the left are associated to a frequency, which can take values from $1$ to $4$. \begin{figure} \caption{More complex synthesis for three sounds played simultaneously with both evolving frequencies and intensities with some smoothing applied and synthetized with sampling rate $44100$ Hz.} \label{fig:sheet_test4} \end{figure} \begin{figure} \caption{Inharmonic synthesis of the waveform obtained by using Eq.~\eqref{eq:synthesis} \label{fig:sheet_test4_sgram} \end{figure} For example, applying the following circuit evolution: \begin{equation} \mathcal{U}(t) = \begin{cases} I\otimes I \otimes I, & \text{ if } t < 0.5 s \\ I \otimes I \otimes \sigma^X, & \text{ if } 0.5 s < t < 1.0 s \\ e^{-i\frac{\pi (t-1 s)}{6 s} \sigma^X \otimes \sigma^X } \otimes \sigma^X, & \text{ if } 1.0 s < t < 4.0 s \\ e^{-i\frac{\pi (7 s-t)}{6 s} \sigma^Y \otimes \sigma^Y } \otimes \sigma^X, & \text{ if } 4.0 s < t < 7.0 s \\ I \otimes I \otimes \sigma^X, & \text{ if } t > 7.0 s, \end{cases} \end{equation} the behaviour in Fig.~\ref{fig:sheet_class2} is produced, where measurements are interpolated and then synthetized as in Fig.~\ref{fig:sheet_class2_sgram}. Fig.~\ref{fig:sheet_test4} and~\ref{fig:sheet_test4_sgram} show another example of qeyboard dynamics, again for a three qubits system, but with $6$ (non mutually commuting) Hermitian operators associated to intensities and frequencies for three sounds. The qeyboard-driven circuit evolution and the set of observables used in this case are more complicated, so we will not put the specific details of its generation here, but it should be nevertheless possible to appreciate the degree of customizability and expressivity which can be realized using this paradigm. \section{The sound of the Ising model} In this section we explore how to use physical systems to play quantum music. To this end we will employ the spectrum and other properties of a quantum system; here we consider the \textit{Ising model} as a convenient toy system. The energies of the spectrum will be used as frequencies and other quantities such as the magnetization can be used for the intensites. These principles can be actually applied to many other physical systems, which would supply a very broad portfolio of sounds and eventually quantum music. The reason is that physical quantum systems can have very different properties showing a variety of phases and corresponding phase transitions. The Ising Model is a simple statistical mechanical system, demonstrating of it serving as a microscopic model for magnetism exhibiting a quantum phase transition from unmagnetized to a magnetized phase. It consists of discrete two-valued variables that represent the two possible states ($+1$ or $-1$) of magnetic dipole moments, or "spin". These spins are defined on a lattice and they interact with their nearest neighbours. The Hamiltonian of the system has two terms \begin{equation} H = - J\sum_{i}\sigma^Z_i\sigma^Z_j -h \sum_{i}\sigma^X_i, \end{equation} the first describes the interaction between neighbouring spins: if $J>0$, neighbouring spins prefer to be aligned ($\uparrow\uparrow$ or $\downarrow\downarrow$), which denotes a \textit{ferromagnetic} behaviour. If $J<0$, the preferred combination is anti-aligned ($\uparrow\downarrow$), leading to an \textit{anti-ferromagnetic} behaviour. The second term represents the action of an external magnetic field with amplitude $h$, which endows an energy advantage to the spins aligned to the magnetic field. If the value of $h$ is sufficiently large, i.e. $h=O(1)$, the Ising model undergoes a phase transition where the fluctuations of the spins increase and interesting physics starts to happen. Here also the magnetization, defined in Sec.~\ref{magnetization}, decreases sharply as a function of the external magnetic field. It is the goal of this section to use the properties of the Ising system to generate also interesting quantum sounds and even quantum music. The idea is to use a variational approach, such as the Variational Quantum Deflation algorithm \cite{Higgott2019variationalquantum} (see Sec.~\ref{vqd}) to find pairs of eigenvalues and eigenvectors($\{(E_k,\ket{\psi_k}\}$, $k=0,1,\dots$) of the Ising Hamiltonian, and then convert the properties of the system into audible sounds. \subsubsection{Variational Quantum Algorithms }\label{vqd} The Variational Quantum Eigensolver \cite{Peruzzo2014} uses a variational technique to find the minimum eigenvalue of the Hamiltonian of a given system. An instance of VQE requires a trial state (ansatz), and one classical optimizer as summarized in Fig.~\ref{fig:vqe_tab}. \begin{figure} \caption{Schematic procedure of the VQE algorithm. Source:~\cite{vqesource} \label{fig:vqe_tab} \end{figure} The ansatz is varied, via its set of parameters $\theta$, generating a state $\ket{\varPsi(\theta)}$ which allows to measure the energy as the expectation value of the Hamiltonian, $\melem{\varPsi(\theta)}{H}{\varPsi(\theta)}$. The classical optimizer then gives back a new set of parameters for the next computation of the energy. This procedure is repeated until convergence to the true minimum of the expection value is found. The VQE can be generalized also for computing excited states, for which the Variational Quantum Deflation (VQD) algorithm is used. The method has the following steps: \begin{enumerate} \item Apply the VQE method and obtain optimal parameters $\theta_0^*$ and an approximate ground state $\ket{\psi_0} \simeq \ket{\varPsi(\theta_0^*)}$. \item For the first excited state define a Hamiltonian: \begin{equation} H_1 = H + \beta \ket{\varPsi(\theta_0^*)} \bra{\varPsi(\theta_0^*)} \end{equation} where $\beta$ is a real-valued coefficient. \item Apply the VQE approach to find an approximate ground state of $H_1$. \item This procedure can be repeated for higher eigenstates. \end{enumerate} \subsection{How to play a quantum system} The aim of this section is to describe different possibilities for extracting sounds from a quantum physical system. By applying variational techniques we can get access to the observables of a quantum theory at the end of the minimization process and convert them to sounds. We can also measure the observables during the optimization itself and thus `play' quantum music running the VQE or the VQD algorithms. Most of these techniques can be generalized to an arbitrary Hamiltonian, such as the one of Quantum Electrodynamics, or even more intricate systems from condensed matter or high energy physics. \subsubsection{Convert energy eigenvalues $E_k$ into frequencies} The first approach is to apply the VQD algorithm, compute the energy eigenvalues and convert them to audible frequencies. To this end a suitable interval of the energies is chosen for a given value of the coupling $h$. Using $h$ as `time' variable, we can follow the behaviour of the corresponding frequencies and play them through an output device. Fig.\ref{fig:eigsp} shows the dynamics of the whole energy spectrum (16 eigenvalues) and can be naturally interpreted as a spectrogram. \begin{figure} \caption{The energy spectrum taken as frequencies and amplitudes $h$, as time, can be played as audible sounds.} \label{fig:eigsp} \end{figure} \subsubsection{Use the callback results} With this technique, the results for the ground state energy (or generic $E_k$) are collected during the VQD minimization with the NFT optimizer~\cite{PhysRevResearch.2.043158}. The energies are measured now in each step of the optimization procedure and again converted into frequencies. As can be seen in Fig.~\ref{fig:eigs}, the highly oscillatory behaviour of the energy values can be translated into frequencies. These oscillations are typical of the NFT algorithm but the detailed evolution depends on the physical quantum system under consideration. Playing the frequencies of this hybrid quantum/classical approach can lead to very interesting sounds. \begin{figure} \caption{Intermediate values during the optimization for ground state ($E_0$) and first excited state ($E_1$) with NFT optimizer. } \label{fig:eigs} \end{figure} \subsubsection{Exploring how the sound changes across the phase diagram} \label{magnetization} With this method we include the \textit{magnetization} as an observable measured in the ground state $\psi_0$ and which is defined as \begin{equation} M=\frac{1}{N}\sum_i \melem{\psi_0}{\sigma^Z_i}{\psi_0}. \label{eq:mag} \end{equation} In Fig.~\ref{fig:mh} we can see that for small $h$ the magnetization is equal to one, this corresponds to a \textit{ferromagnetic}\footnote{Materials with a strong magnetization in a magnetic field. They can turn into permanent magnets, i.e.\ have magnetization in the absence of an external field.} system. When $h$ increases, the magnetic term becomes more relevant and eventually the system reaches a \textit{paramagnetic}\footnote{Materials with a weak induced magnetization in a magnetic field, which disappears when the external field is removed.} behaviour, with $M\sim 0$. In particular, we can observe a quantum phase transition when $h\sim1$. \begin{figure} \caption{Phase transition of Ising system. Varying the external magnetic field $h$, we go from a ferromagnetic phase to the paramagnetic phase by crossing a quantum phase transition. This can be clearly seen in the behaviour of the magnetization in Eq.~\protect{\eqref{eq:mag} \label{fig:mh} \end{figure} The definition of the magnetization in Eq.~\eqref{eq:mag} can be generalized for higher excited states \begin{equation} M_k=\frac{1}{N}\sum_i \melem{\psi_k}{\sigma^Z_i}{\psi_k} . \label{eq:magk} \end{equation} These eigenstate-dependent magnetizations can be related to the intensity of the sound of the corresponding frequency, defined by the $k$-energy eigenvalue $E_k$. \section{Summary and outlook} In this chapter we have discussed two approaches to generate sounds through quantum devices based on the circuit model. The first idea is to use the quantum computer as an instrument. Here we use a real time evolution and manipulate the quantum circuit through a (quantum-)keyboard. Measurements during the time evolution are performed to make them audible. The ideas that we described for a quantum instrument are open for customization by the user at different stages of the pipeline and allow for a high degree of flexibility. The second idea is to use quantum physical systems for generating sound and therefore to be able to actually listen to a true quantum system. Here we followed the approach of assigning the role of `time variable' to some parameter of the model under consideration. In particular, in the case of the here discusses Ising model, the external magnetic field was chosen. Frequencies can then be computed from the energy eigenvalues and intensities from the magnetization measured in the corresponding eigenstates. In our opinion, using quantum systems to generate sound and eventually quantum music can lead to very interesting effects since such quantum models describe often very complex phenomena and phase diagrams with intricate physical properties. In addition, they exhibit phase transitions where large fluctuations can occur with strong correlations. We believe that these chracteristics of quantum systems can be harvest through the quantum mechanical principles of superposition and entanglement possibly leading even to new directions in music. Some resources (figures and sound files) for the examples that we discussed in the previous sections can be found in~\cite{qeybising}. Our first step to generate quantum music presented in this chapter manipulates only frequencies and intensities which can be obtained through measurements of specific Hamiltonians or observables such as the magnetization. Our approach can be generalized to also generate tones and even more complicated music properties. \end{document}
\begin{document} \title{Approximate bi-criteria search by efficient representation of subsets of the Pareto-optimal frontier} \author{Boris Goldin, Oren Salzman\\ Technion---Israel Institute of Technology\\ [email protected], [email protected] } \maketitle \begin{abstract} We consider the bi-criteria shortest-path problem where we want to compute shortest paths on a graph that simultaneously balance two cost functions. While this problem has numerous applications, there is usually no path minimizing both cost functions simultaneously. Thus, we typically consider the set of paths where no path is strictly better than the others in both cost functions, a set called the Pareto-optimal frontier. Unfortunately, the size of this set may be exponential in the number of graph vertices and the general problem is \mathbb{N}P-hard. While existing schemes to approximate this set exist, they may be slower than exact approaches when applied to relatively small instances and running them on graphs with even a moderate number of nodes is often impractical. The crux of the problem lies in how to efficiently approximate the Pareto-optimal frontier. Our key insight is that the Pareto-optimal frontier can be approximated using \myemph{pairs} of paths. This simple observation allows us to run a best-first search while efficiently and effectively pruning away intermediate solutions in order to obtain an approximation of the Pareto frontier for any given approximation factor. We compared our approach with an adaptation of \algname{BOA$^*$}, the state-of-the-art algorithm for computing exact solutions to the bi-criteria shortest-path problem. Our experiments show that as the problem becomes harder, the speedup obtained becomes more pronounced. Specifically, on large roadmaps, when using an approximation factor of $10\%$ we obtain a speedup on the average running time of more than~$\times 19$. \end{abstract} \section{Introduction \& Related Work} \label{sec:intro} We consider the bi-criteria shortest-path problem, an extension to the classical (single-criteria) shortest-path problem where we are given a graph $G = (V,E)$ and each edge has two cost functions. Here, we are required to compute paths that balance between the two cost functions. The well-studied problem~\cite{CP07} has numerous applications. For example, given a road network, the two cost functions can represent travel times and distances and we may need to consider the set of paths that allow to balance between these costs. Other applications include planning of power-transmission lines~\cite{bachmann2018multi} and planning how to transport hazardous material in order to balance between minimizing the travel distance and the risk of exposure for residents~\cite{bronfman2015maximin}. There usually is no path minimizing all cost functions simultaneously. Thus, we typically consider the set of paths where no path is strictly better then the others for both cost functions, a set called the \myemph{Pareto-optimal frontier}. Unfortunately, the problem is \mathbb{N}P-hard~\cite{S87} as the cardinality of the size of the Pareto-optimal frontier may be exponential in $|V|$~\cite{Ehrgott05,breugem2017analysis} and even determining whether a path belongs to the Pareto-optimal frontier is \mathbb{N}P-hard~\cite{PY00}. Existing methods either try to efficiently compute the Pareto-optimal frontier or to relax the problem and only compute an approximation of this set. \paragraph{Efficient computation of the Pareto-optimal frontier.} To efficiently compute the Pareto-optimal frontier, adaptations of the celebrated \algname{A$^*$} algorithm~\cite{HNR68} were suggested. Stewart et al.~\cite{stewart1991multiobjective} introduced Multi-Objective A* (\algname{MOA$^*$}) which is a multiobjective extension of $\algname{A}^*$. The most notable difference between \algname{MOA$^*$} and $\algname{A}^*$ is in maintaining the Pareto-optimal frontier to intermediate vertices. This requires to check if a path~$\pi$ is \myemph{dominated} by another path~$\tilde{\pi}$. Namely, if both of~$\tilde{\pi}$'s costs are smaller than~$\pi$'s costs. As these dominance checks are repeatedly performed, the time complexity of the checks play a crucial role for the efficiency of such bi-criteria shortest-path algorithms. \algname{MOA$^*$} was later revised~\cite{de2005new,mandow2010multiobjective,pulido2015dimensionality} with the most efficient variation, termed bi-Objective \algname{A$^*$} (\algname{BOA$^*$})~\cite{UYBZSK20} allowing to compute these operations in $O(1)$ time when a consistent heuristic is used.\footnote{A heuristic function is said to be consistent if its estimate is always less than or equal to the estimated distance from any neighbouring vertex to the goal, plus the cost of reaching that neighbour.} \paragraph{Approximating the Pareto-optimal frontier.} Initial methods in computing an approximation of the Pareto-optimal frontier were directed towards devising a Fully Polynomial Time Approximation Scheme\footnote{An FPTAS is an approximation scheme whose time complexity is polynomial in the input size and also polynomial in $1/\ensuremath{\varepsilon}\xspace$ where~$\ensuremath{\varepsilon}\xspace$ is the approximation factor.} (FPTAS)~\cite{V01}. Warburton~\cite{W87} proposed a method for finding an approximate Pareto-optimal solution to the problem for any degree of accuracy using scaling and rounding techniques. Perny and Spanjaard~\cite{perny2008near} presented another FPTAS given that a finite upper bound $L$ on the numbers of arcs of all solution-paths in the Pareto-frontier is known. This requirement was later relaxed~\cite{TZ09,breugem2017analysis} by partitioning the space of solutions into cells according to the approximation factor and, roughly speaking, taking only one solution in each grid cell. Unfortunately, the running times of FPTASs are typically polynomials of high degree, and hence they may be slower than exact approaches when applied to relatively-small instances and running them on graphs with even a moderate number of nodes (e.g., $\approx 10,000$) is often impractical~\cite{breugem2017analysis}. A different approach to compute a subset of the Pareto-optimal frontier is to find all extreme supported non-dominated points (i.e., the extreme points on the convex hull of the Pareto-optimal set)~\cite{sedeno2015dijkstra}. Taking a different approach Legriel et al.~\cite{LLCM10} suggest a method based on satisfiability/constraint solvers. Alternatively, a simple variation of \algname{MOA$^*$}, termed \algname{MOA$^*_\ensuremath{\varepsilon}\xspace$} allows to compute an approximation of the Pareto-optimal frontier by pruning intermediate paths that are approximately dominated by already-computed solutions~\cite{perny2008near}. However, as we will see, this allows to prune only a small subset of paths that may be pruned. Finally, recent work~\cite{BC20} conducts a comprehensive computational study with an emphasis on multiple criteria. Similar to the aforementioned FPTASs, their framework still partitions the space prior to running the algorithm. \paragraph{Key contribution.} To summarize, exact methods compute a solution set whose size is often exponential in the size of the input. While one would expect that approximation algorithms will allow to dramatically speed up computation times, in practice their running times are often slower than exact solutions for FPTAS's because they partition the space of solutions into cells according to the approximation factor in advance. Alternative methods only prune paths that are approximately dominated by already-computed solutions. Our key insight is that we can efficiently partition the space of solutions into cells during the algorithm's execution (and not a-priori). This allows us to efficiently and effectively prune away intermediate solutions in order to obtain an approximation of the Pareto-optimal frontier for any given approximation factor $\ensuremath{\varepsilon}\xspace$ (this will be formalized in Sec.~\ref{sec:pdf}). This is achieved by running a best-first search on \myemph{path pairs} and not individual paths. Such path pairs represent a subset of the Pareto-optimal frontier such that any solution in this subset is approximately dominated by the two paths. Using concepts that draw inspiration from a recent search algorithm from the robotics literature~\cite{FuKSA19}, we propose Path-Pair \algname{A$^*$} (\algname{PP-A$^*$}). \algname{PP-A$^*$} dramatically reduces the computational complexity of the best-first search by merging path pairs while still ensuring that an approximation of the Pareto-optimal frontier is obtained for any desired approximation. For example, on a roadmap of roughly 1.5 million vertices, \algname{PP-A$^*$} approximates the Pareto-optimal frontier within a factor of $1\%$ in roughly 13 seconds on average on a commodity laptop. We compared our approach with an adaptation of \algname{BOA$^*$}~\cite{UYBZSK20}, the state-of-the-art algorithm for computing exact solutions to the bi-criteria shortest-path problem, which we term \algname{BOA$^*$}eps. \algname{BOA$^*$}eps computes near-optimal solutions by using the approach suggested in~\cite{perny2008near}. Our experiments show that as the problem becomes harder, the speedup that \algname{PP-A$^*$} may offer becomes more pronounced. Specifically, on the aforementioned roadmap and using an approximation factor of $10\%$, we obtain a speedup on the average running time of more than $\times 19$ and a maximal speedup of over~$\times 25$. \section{Problem Definition} \label{sec:pdf} Let $G = (V,E)$ be a graph, $c_1 : E \rightarrow \mathbb{R}$ and $c_2 : E \rightarrow \mathbb{R}$ be two cost functions defined over the graph edges. A path $\pi = v_1, \ldots v_k$ is a sequence of vertices where consecutive vertices are connected by an edge. We extend the two cost functions to paths as follows: $$ c_1(\pi) = \sum_{i=1}^{k-1} c_1(v_i, v_{i+1}) \hspace{2mm} \text{ and } \hspace{2mm} c_2(\pi) = \sum_{i=1}^{k-1} c_2(v_i, v_{i+1}). $$ Unless stated otherwise, all paths start at the same specific vertex ${\ensuremath{v_{\rm{start}}}\xspace}$ and $\pi_u$ will denote a path to vertex $u$. \begin{defn}[Dominance] Let $\pi_u$ and $\tilde{\pi}_u$ be two paths to vertex $u$. We say that $\pi_u$ \myemph{weakly dominates}~$\tilde{\pi}_u$ if (i)~$c_1(\pi_u) \leq c_1(\tilde{\pi}_u)$ and (ii)~$c_2(\pi_u) \leq c_2(\tilde{\pi}_u)$. We say that~$\pi_u$ \myemph{strictly dominates}~$\tilde{\pi}_u$ if (i)~$\pi_u$ {weakly dominates}~$\tilde{\pi}_u$ and (ii)~$c_1(\pi_u) < c_1(\tilde{\pi}_u)$ or~$c_2(\pi_u) < c_2(\tilde{\pi}_u)$. \end{defn} \begin{defn}[Approximate dominance] Let $\pi_u$ and $\tilde{\pi}_u$ be two paths to vertex $u$ and let $\ensuremath{\varepsilon}\xspace_1 \geq 0$ and $\ensuremath{\varepsilon}\xspace_2 \geq 0$ be two real values. We say that $\pi_u$ \myemph{$(\ensuremath{\varepsilon}\xspace_1,\ensuremath{\varepsilon}\xspace_2)$-dominates}~$\tilde{\pi}_u$ if (i)~$c_1(\pi_u) \leq (1 + \ensuremath{\varepsilon}\xspace_1) \cdot c_1(\tilde{\pi}_u)$ and (ii)~$c_2(\pi_u) \leq (1 + \ensuremath{\varepsilon}\xspace_2) \cdot c_2(\tilde{\pi}_u)$. When $\ensuremath{\varepsilon}\xspace_1 = \ensuremath{\varepsilon}\xspace_2$, we will sometimes say that $\pi_u$ \myemph{$(\ensuremath{\varepsilon}\xspace_1)$-dominates}~$\tilde{\pi}_u$ and call $\ensuremath{\varepsilon}\xspace_1$ the \myemph{approximation factor}. \end{defn} \begin{defn}[(approximate) Pareto-optimal frontier] The~\myemph{Pareto-optimal frontier} $\Pi_{u}$ of a vertex $u$ is a set of paths connecting ${\ensuremath{v_{\rm{start}}}\xspace}$ and~$u$ such that (i)~no path in $\Pi_{u}$ is strictly dominated by any other path from ${\ensuremath{v_{\rm{start}}}\xspace}$ to $u$ and (ii)~every path from ${\ensuremath{v_{\rm{start}}}\xspace}$ to $u$ is weakly dominated by a path in $\Pi_{u}$. Similarly, for $\ensuremath{\varepsilon}\xspace_1 \geq 0$ and $\ensuremath{\varepsilon}\xspace_2 \geq 0$ the \myemph{approximate Pareto-optimal frontier}\footnote{Our definition of an approximate Pareto-optimal frontier slightly differs from existing definitions~\cite{breugem2017analysis} which do not require that the approximate Pareto frontier is a subset of the Pareto-optimal frontier.} $\Pi_{u}(\ensuremath{\varepsilon}\xspace_1,\ensuremath{\varepsilon}\xspace_2) \subseteq \Pi_u$ is a subset of $u$'s Pareto frontier such that every path in $\Pi_{u}$ is $(\ensuremath{\varepsilon}\xspace_1,\ensuremath{\varepsilon}\xspace_2)$-dominated by a path in $\Pi_{u}(\ensuremath{\varepsilon}\xspace_1,\ensuremath{\varepsilon}\xspace_2)$. \end{defn} \noindent For brevity we will use the terms (approximate) Pareto frontier to refer to the (approximate) Pareto-optimal frontier. For a visualization of these notions, see Fig.~\ref{fig:dominance}. \begin{figure} \caption{(approximate) Dominance and (approximate) Pareto frontier. Given start and target vertices, we consider each path $\pi_u$ as a 2D point $(c_1(\pi_u), c_2(\pi_u))$ according to the two cost functions (points and squares). The set of all possible paths dominated and approximately dominated by path $\pi_u$ are depicted in blue and green, respectively (for $\varepsilon_1 = \varepsilon_2 = 1$). The Pareto frontier $\Pi_u$ is the set of all black points that collectively dominate all other possible paths (squares in grey region). Finally, an approximate Pareto frontier $\Pi_u(1,1) = \{\pi_u, \tilde{\pi} \label{fig:dominance} \end{figure} We are now ready to formally define our search problems. \begin{prob}[Bi-criteria shortest path] \label{prob:1} Let $G$ be a graph, $c_1, c_2 : E \rightarrow \mathbb{R}$ two cost functions and ${\ensuremath{v_{\rm{start}}}\xspace}$ and~${\ensuremath{v_{\rm{goal}}}\xspace}$ be start and goal vertices, respectively. The \myemph{bi-criteria shortest path} problem calls for computing the Pareto frontier $\Pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}$. \end{prob} \begin{prob}[Bi-criteria approximate shortest path] \label{prob:2} Let $G$ be a graph, $c_1, c_2 : E \rightarrow \mathbb{R}$ two cost functions and ${\ensuremath{v_{\rm{start}}}\xspace}$ and~${\ensuremath{v_{\rm{goal}}}\xspace}$ be start and goal vertices, respectively. Given~$\ensuremath{\varepsilon}\xspace_1 \geq 0$ and $\ensuremath{\varepsilon}\xspace_2 \geq 0$, the \myemph{bi-criteria approximate shortest path} problem calls for computing an approximate Pareto frontier~$\Pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$. \end{prob} \section{Algorithmic Background} \label{sec:background} In this section we describe two approaches to solve the bi-criteria shortest-path problem (Problem~\ref{prob:1}). With the risk of being tedious, we start with a brief review of best-first search algorithms as both state-of-the-art bi-criteria shortest path algorithms, as well as ours, rely heavily on this algorithmic framework. We note that the description of best-first search we present here can be optimized but this version will allow us to better explain the more advanced algorithms. A best-first search algorithm (Alg.~\ref{alg:astar}) computes a shortest path from ${\ensuremath{v_{\rm{start}}}\xspace}$ to ${\ensuremath{v_{\rm{goal}}}\xspace}$ by maintaining a priority queue, called an OPEN list, that contains all the nodes that have not been expanded yet (line~\ref{alg:astar-line1}). Each node is associated with a path $\pi_u$ from ${\ensuremath{v_{\rm{start}}}\xspace}$ to some vertex $u\in V$ (by a slight abuse of notation we will use paths and nodes interchangeability which will simplify algorithm's descriptions in the next sections). This queue is ordered according to some cost function called the $f$-value of the node. For example, in Dijkstra and \algname{A$^*$}, this is the computed cost from~${\ensuremath{v_{\rm{start}}}\xspace}$ (also called its $g$-value) and the computed cost from~${\ensuremath{v_{\rm{start}}}\xspace}$ added to the heuristic estimate to reach~${\ensuremath{v_{\rm{goal}}}\xspace}$, respectively. At each iteration (lines~\ref{alg:astar-line3}-\ref{alg:astar-line13}), the algorithm extracts the most-promising node from OPEN (line~\ref{alg:astar-line3}), checks if it has the potential to be a better solution than any found so far (line~\ref{alg:astar-line4}). If this is the case and we reached ${\ensuremath{v_{\rm{goal}}}\xspace}$, the solution set is updated (in single-criteria shortest path, once a solution is found, the search can be terminated). If not, we extend the path represented by this node to each of it's neighbors (line~\ref{alg:astar-line10}). Again, we check if it has the potential to be a better solution than any found so far (line~\ref{alg:astar-line11}). If this is the case, it is added to the OPEN list. Different single-criteria search algorithms such as Dijkstra, \algname{A$^*$}, \algname{A$^*$}eps as well as bi-criteria search algorithms such \algname{BOA$^*$} fall under this framework. They differ with how OPEN is ordered and how the different functions (highlighted in Alg.~\ref{alg:astar}) are implemented. \begin{algorithm}[t!] \textbf{Input: ($G = (V,E), {\ensuremath{v_{\rm{start}}}\xspace}, {\ensuremath{v_{\rm{goal}}}\xspace}, \ldots$)} \begin{algorithmic}[1] \State {${\rm OPEN}\gets$ new node $\pi_{{\ensuremath{v_{\rm{start}}}\xspace}}$} \label{alg:astar-line1} {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \While{${\rm{OPEN}} \neq \emptyset$} \State $\pi_u \gets$ \rm{OPEN{}.\func{extract\_min}}() \label{alg:astar-line3} \If {\func{is\_dominated}($\pi_u$) \label{alg:astar-line4}} \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \If {$u={\ensuremath{v_{\rm{goal}}}\xspace}$} \mathbb{C}omment{reached goal} \State {\func{merge\_to\_solutions}($\pi_u$, solutions)} \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \For{$e=(u,v) \in$ {neighbors}($u, G$)} \State $\pi_v \gets$ \func{extend}($\pi_u, e$) \label{alg:astar-line10} \If {\func{is\_dominated}($\pi_v $) \label{alg:astar-line11} } \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \State {\func{insert}($\pi_v , \rm{OPEN}$)} \label{alg:astar-line13} \EndFor \EndWhile \State{\textbf{return} all extreme paths in solutions} \end{algorithmic} \caption{Best First Search} \label{alg:astar} \end{algorithm} \paragraph*{Bi-Objective \algname{A$^*$} (\algname{BOA$^*$})} To efficiently solve Problem~\ref{prob:1}, bi-Objective \algname{A$^*$} (\algname{BOA$^*$}) runs a best-first search. The algorithm is endowed with two heuristic functions $h_1, h_2$ estimating the cost to reach ${\ensuremath{v_{\rm{goal}}}\xspace}$ from any vertex according to~$c_1$ and $c_2$, respectively. Here, we assume that these heuristic functions are admissible and consistent. This is key as the efficiency of \algname{BOA$^*$} relies on this assumption. Given a node $\pi_u$, we define $g_i(\pi_u)$ to be the computed distance according to $c_i$. It can be easily shown that in best-first search algorithms $g_i := c_i(\pi_u)$. Additionally, we define $f_i(\pi_u) := g_i(\pi_u) + h_i(\pi_u)$. Although the cost and the $g$-value of a path can be used interchangeably, we will use the former to describe general properties of paths and the latter to describe algorithm operations. Nodes in OPEN are ordered lexicographically according to $(f_1, f_2)$ which concludes the description of how \func{extract\_min} and \func{insert} (lines~\ref{alg:astar-line3} and~\ref{alg:astar-line13}, respectively) are implemented. Domination checks, which are typically time-consuming in bi-criteria search algorithms are implemented in $O(1)$ per node by maintaining for each vertex $u \in V$ the minimal cost to reach $u$ according to $c_2$ computed so far. This value is maintained in a map $g_2^{\rm min}: V \rightarrow \mathbb{R}$ which is initialized to~$\infty$ for each vertex. This allows to implement the function \func{is\_dominated} for a node $\pi_u$ by testing if \begin{equation} \label{eq:d00} g_2(\pi_u) \geq g_2^{\rm min}(u) \text{ or } f_2(\pi_u) \geq g_2^{\rm min}({\ensuremath{v_{\rm{goal}}}\xspace}). \end{equation} The first test checks if the node is dominated by an already-extended node and replaces the CLOSED list typically used in \algname{A$^*$}-like algorithms. The second test checks if the node has the potential to reach the goal with a solution whose cost is not dominated by any existing solution. Finally, the function \func{merge\_to\_solutions} simply adds a newly-found solution to the solution set. \paragraph{Computing the approximate Pareto frontier} Perny and Spanjaard~\cite{perny2008near} suggest to compute an approximate Pareto frontier by endowing the algorithm with an approximation factor $\ensuremath{\varepsilon}\xspace$. When a node is popped from OPEN, we test if its $f$-value is $\ensuremath{\varepsilon}\xspace$-dominated by any solution that was already computed. While this algorithm was presented before \algname{BOA$^*$} and hence uses computationally-complex dominance checks, we can easily use this approach to adapt \algname{BOA$^*$} to compute an approximate Pareto frontier. This is done by replacing the dominance check in Eq.~\ref{eq:d00} with the test \begin{equation} \label{eq:d0} g_2(\pi_u) \geq g_2^{\rm min}(v) \text{ or } (1 + \ensuremath{\varepsilon}\xspace) \cdot f_2(\pi_u) \geq g_2^{\rm min}({\ensuremath{v_{\rm{goal}}}\xspace}). \end{equation} We call this algorithm \algname{BOA$^*$}eps. \section{Algorithmic Framework} \subsection{Preliminaries} Recall that (single-criteria) shortest-path algorithms such as \algname{A$^*$} find a solution by computing the shortest path to all nodes that have the potential to be on the shortest path to the goal (namely, whose $f$-value is less than the current estimate of the cost to reach ${\ensuremath{v_{\rm{goal}}}\xspace}$). Similarly, bi-criteria search algorithms typically compute for each node the subset of the Pareto frontier that has the potential to be in~$\Pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}$. Now, near-optimal (single-criteria) shortest-path algorithms such as \algname{A$^*$}eps~\cite{pearl1982studies} attempt to speed up this process by only \myemph{approximating} the shortest path to intermediate nodes. Similarly, we suggest to construct only an approximate Pareto frontier for intermediate nodes which, in turn, will allow to dramatically reduce computation times. Looking at Fig.~\ref{fig:dominance}, one may suggest to run an \algname{A$^*$}-like search and if a path $\pi_u$ on the Pareto frontier $\Pi_{u}$ of $u$ is approximately dominated by another path $\tilde{\pi}_u \in \Pi_{u}$, then discard~$\pi_u$. Unfortunately, this does not account for paths in~$\Pi_{u}$ that may have been approximately dominated by $\pi_u$ and hence discarded in previous iterations of the search. Existing methods use very conservative bounds to prune intermediate paths. For example, as stated in Sec.~\ref{sec:intro}, if a bound $L$ on the length of the longest path exists, we can use this strategy by replacing $(1 + \ensuremath{\varepsilon}\xspace)$ with $(1 + \ensuremath{\varepsilon}\xspace)^{1/L}$ to account for error propagation~\cite{perny2008near}. \begin{figure} \caption{The partial Pareto frontier of two paths $\pi_u^{\texttt{tl} \label{fig:ppf} \end{figure} In contrast, we suggest a simple-yet-effective method to prune away approximately-dominated solutions using the notion of a partial Pareto frontier which we now define. \begin{defn}[Partial Pareto frontier {\small \ensuremath{\rm{PPF}}\xspace}] Let $\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}} \in \Pi_{u}$ be two paths on the Pareto frontier of vertex $u$ such that $c_1(\pi_u^{\texttt{tl}}) < c_1 (\pi_u^{\texttt{br}})$ (here, \texttt{tl} and \texttt{br} are shorthands for ``top left'' and ``bottom right'' for reasons which will soon be clear). Their \myemph{partial Pareto frontier} ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}} \subseteq \Pi_{u}$ is a subset of a Pareto frontier such that if $\pi_u \in \Pi_{u}$ and $c_1(\pi_u^{\texttt{tl}}) < c_1 ({\pi}_u) < c_1 (\pi_u^{\texttt{br}})$ then ${\pi}_u \in {\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$. The paths $\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}$ are called the \myemph{extreme} paths of ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$ For a visualization, see Fig.~\ref{fig:ppf}. \end{defn} \begin{defn}[Bounded {\small \ensuremath{\rm{PPF}}\xspace}] A {partial Pareto frontier} ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}} \subseteq \Pi_{u}$ is \myemph{$(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded} if $$ \ensuremath{\varepsilon}\xspace_1 \geq \frac{c_1(\pi_u^{\texttt{br}}) - c_1(\pi_u^{\texttt{tl}})}{c_1(\pi_u^{\texttt{tl}})} \text{ and } \ensuremath{\varepsilon}\xspace_2 \geq \frac{c_2(\pi_u^{\texttt{tl}}) - c_2(\pi_u^{\texttt{br}})}{c_2(\pi_u^{\texttt{br}})}. $$ \end{defn} \begin{lem} \label{lem:ppf-dom} If ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$ is an $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded partial Pareto frontier then any path in ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$ is $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-dominated by both $\pi_u^{\texttt{tl}}$ and $\pi_u^{\texttt{br}}$. \end{lem} \begin{proof} Let $\pi_u \in {\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$. By definition, we have that $c_1(\pi_u^{\texttt{tl}}) < c_1 (\pi_u)$ and that $\ensuremath{\varepsilon}\xspace_1 \geq \frac{c_1(\pi_u^{\texttt{br}}) - c_1(\pi_u^{\texttt{tl}})}{c_1(\pi_u^{\texttt{tl}})}$. Thus, $$ c_1(\pi_u^{\texttt{br}}) \leq (1 + \ensuremath{\varepsilon}\xspace_1) \cdot c_1(\pi_u^{\texttt{tl}}) < (1 + \ensuremath{\varepsilon}\xspace_1) \cdot c_1(\pi_u). $$ As $c_2(\pi_u^{\texttt{br}}) < c_2(\pi_u)$, we have that $\pi_u^{\texttt{br}}$ approximately dominates $\pi_u$. Similarly, by definition, we have that $c_2 (\pi_u) > c_2 (\pi_u^{\texttt{br}})$ and that $\ensuremath{\varepsilon}\xspace_2 \geq \frac{c_2(\pi_u^{\texttt{tl}}) - c_2(\pi_u^{\texttt{br}})}{c_2(\pi_u^{\texttt{br}})}$. Thus, $$ c_2(\pi_u^{\texttt{tl}}) \leq (1 + \ensuremath{\varepsilon}\xspace_2) \cdot c_1(\pi_u^{\texttt{br}}) < (1 + \ensuremath{\varepsilon}\xspace_2) \cdot c_1(\pi_u). $$ As $c_1(\pi_u^{\texttt{tl}}) < c_1(\pi_u)$, we have that $\pi_u^{\texttt{tl}}$ approximately dominates $\pi_u$. \end{proof} \subsection{Algorithmic description} \begin{figure*} \caption{Operations on path pairs. \protect \subref{fig:extend} \label{fig:extend} \label{fig:merge} \label{fig:operations} \end{figure*} In contrast to standard search algorithms which incrementally construct shortest paths from ${\ensuremath{v_{\rm{start}}}\xspace}$ to the graph vertices, our algorithm will incrementally construct $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded partial Pareto frontiers. Lemma~\ref{lem:ppf-dom} suggests a method to efficiently represent and maintain these frontiers for any approximation factors $\ensuremath{\varepsilon}\xspace_1$ and $\ensuremath{\varepsilon}\xspace_2$. Specifically, for a vertex $u$, \algname{PP-A$^*$} will maintain \myemph{path pairs} corresponding to the extreme paths in partial Pareto frontiers. For each path pair $(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ we have that $c_1 (\pi_u^{\texttt{tl}}) \leq c_1(\pi_u^{\texttt{br}})$ and $c_2 (\pi_u^{\texttt{tl}}) \geq c_2(\pi_u^{\texttt{br}})$. Before we explain how path pairs will be used let us define operations on path pairs: The first operation we consider is \myemph{extending} a path pair $(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ by an edge $e = (u, v)$, which simply corresponds to extending both $\pi_u^{\texttt{tl}}$ and $\pi_u^{\texttt{br}}$ by~$e$. The second operation we consider is \myemph{merging} two path pairs $(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ and $(\tilde{\pi}_u^{\texttt{tl}}, \tilde{\pi}_u^{\texttt{br}})$. This operation constructs a new path pair $(\hat{\pi}_u^{\texttt{tl}}, \hat{\pi}_u^{\texttt{br}})$ such that \begin{equation*} \hat{\pi}_u^{\texttt{tl}} = \begin{cases} \pi_u^{\texttt{tl}} &~\text{if } c_1(\pi_u^{\texttt{tl}}) \leq c_1(\tilde{\pi}_u^{\texttt{tl}}) \\ \tilde{\pi}_u^{\texttt{tl}} &~\text{if } c_1(\tilde{\pi_u}^{\texttt{tl}}) < c_1(\pi_u^{\texttt{tl}}), \end{cases} \end{equation*} and \begin{equation*} \hat{\pi}_u^{\texttt{br}} = \begin{cases} \pi_u^{\texttt{br}} &~\text{if } c_2(\pi_u^{\texttt{br}}) \leq c_2(\tilde{\pi}_u^{\texttt{br}}) \\ \tilde{\pi}_u^{\texttt{br}} &~\text{if } c_2(\tilde{\pi_u}^{\texttt{br}}) < c_2(\pi_u^{\texttt{br}}). \end{cases} \end{equation*} For a visualization, see Fig.~\ref{fig:operations}. We are finally ready to describe \algname{PP-A$^*$}, our algorithm for bi-criteria approximate shortest-path computation (Problem~\ref{prob:2}). We run a best-first search similar to Alg.~\ref{alg:astar} but nodes are path pairs. We start with the trivial path pair $({\ensuremath{v_{\rm{start}}}\xspace}, {\ensuremath{v_{\rm{start}}}\xspace})$ and describe our algorithm by detailing the different functions highlighted in Alg.~\ref{alg:astar}. For each function, we describe what needs to be performed and how this can be efficiently implemented when consistent heuristics are used (see Sec.~\ref{sec:background}). Finally, the pseudocode of the algorithm is provided in Alg.~\ref{alg:is} with the efficient implementations provided in Alg.~\ref{alg:is_dominated}-\ref{alg:merge-IS}. \begin{algorithm}[t!] \textbf{Input: ($G = (V,E), {\ensuremath{v_{\rm{start}}}\xspace}, {\ensuremath{v_{\rm{goal}}}\xspace}, c_1, c_2, h_1, h_2, \ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2$)} \begin{algorithmic}[1] \State {solutions\_pp$\gets \emptyset$} \mathbb{C}omment{path pairs } \State {${\rm OPEN}\gets$ new path pair $({\ensuremath{v_{\rm{start}}}\xspace}, {\ensuremath{v_{\rm{start}}}\xspace})$} {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \While{${\rm{OPEN}} \neq \emptyset$} \State $(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}) \gets$ \rm{OPEN{}.\func{extract\_min}}() \If {\func{is\_dominated\_\algname{PP-A$^*$}}($\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}$)} \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \If {$u={\ensuremath{v_{\rm{goal}}}\xspace}$} \mathbb{C}omment{reached goal} \State {\func{merge\_to\_solutions\_\algname{PP-A$^*$}}($\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}$, solutions\_pp)} \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \For{$e=(u,v) \in$ {neighbors}($s(n), G$)} \State $\left(\pi_v^{\texttt{tl}}, \pi_v^{\texttt{br}}\right) \gets$ \func{extend\_\algname{PP-A$^*$}}($(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}), e$) \If {\func{is\_dominated\_\algname{PP-A$^*$}}($\pi_v^{\texttt{tl}}, \pi_v^{\texttt{br}}$)} \State {\textbf{continue}} \EndIf {\ensuremath{v_{\rm{start}}}\xspace}pace{1mm} \State {\func{insert\_\algname{PP-A$^*$}}($(\pi_v^{\texttt{tl}}, \pi_v^{\texttt{br}}), \rm{OPEN}$)} \EndFor \EndWhile \State{solutions$\gets \emptyset$} \For{$(\pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{tl}}, \pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{br}}) \in$ solutions\_pp} \State solutions $\gets$ solutions $\cup\{ \pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{tl}} \}$ \EndFor \State{\textbf{return} solutions} \end{algorithmic} \caption{\algname{PP-A$^*$}} \label{alg:is} \end{algorithm} \paragraph{Ordering nodes in OPEN:} Recall that a node is a path pair $(\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ and that each path $\pi$ has two $f$ values which correspond to the two cost functions and the two heuristic functions. Nodes are ordered lexicographically according to \begin{equation} \label{eq:lexi} \large( f_1(\pi_u^{\texttt{tl}}), f_2(\pi_u^{\texttt{br}}) \large). \end{equation} \paragraph{Domination checks:} Recall that there are two types of domination checks that we wish to perform (i)~checking if a node is dominated by a node that was already expanded and (ii)~checking if a node has the potential to reach the goal with a solution whose cost is not dominated by any existing solution. In our setting a path pair $\rm{PP}_u$ is dominated by another path pair $\tilde{\rm{PP}}_u$ if the partial Pareto frontier represented by $\rm{PP}_u$ is contained in the partial Pareto frontier represented by $\tilde{\rm{PP}}_u$ (see Fig.~\ref{fig:ppf-dominates}). We can efficiently test if $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ is dominated by any path to $u$ found so far, by checking if \begin{equation} \label{eq:d1} g_2({\pi}_u^{\texttt{br}}) \geq g_2^{\rm min}(u). \end{equation} This only holds when using the assumption that our heuristic functions are admissible and consistent and using the way we order our OPEN list. \begin{figure} \caption{Testing dominance of partial Pareto frontiers using path pairs. The partial Pareto frontier $\Pi_u^{\pi_u^{\texttt{tl} \label{fig:ppf-dominates} \end{figure} We now continue to describe how we test if a path pair has the potential to reach the goal with a solution whose cost is not dominated by any existing solution. Given a path pair $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ a lower bound on the partial Pareto frontier at ${\ensuremath{v_{\rm{goal}}}\xspace}$ that can be attained via $\rm{PP}_u$ is obtained by adding the heuristic values to the costs of the two paths in $\rm{PP}_u$. Namely, we consider two paths $\pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{tl}}, \pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{br}}$ such that $ c_i(\pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{tl}}):= c_i(\pi_u^{\texttt{tl}}) + h_i(u) $ and $ c_i(\pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{br}}):= c_i(\pi_u^{\texttt{br}}) + h_i(u) $. Note that these paths may not be attainable and are a lower bound on the partial Pareto frontier that can be obtained via~$\rm{PP}_u$. Now, if the partial Pareto frontier ${\small \ensuremath{\rm{PPF}}\xspace}_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{tl}}, \pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\texttt{br}}}$ is contained in the union of the currently-computed partial Pareto frontiers at ${\ensuremath{v_{\rm{goal}}}\xspace}$, then $\rm{PP}_u$ is dominated. Similar to the previous dominance check, this can be efficiently implemented by testing if \begin{equation} \label{eq:d2} (1 + \ensuremath{\varepsilon}\xspace_2) \cdot (f_2(\pi_u^{\texttt{br}})) \geq g_2^{\rm min}({\ensuremath{v_{\rm{goal}}}\xspace}). \end{equation} \paragraph{Inserting nodes in OPEN:} Recall that we want to use the notion of path pairs to represent a partial Pareto frontier. Key to the efficiency of our algorithm is to have every partial Pareto frontier as large as possible under the constraint that they are all $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded. Thus, when coming to insert a path pair $\rm{PP}_u$ into the OPEN list, we check if there exists a path pair $\rm{\tilde{PP}}_u$ such that $\rm{PP}_u$ and $\rm{\tilde{PP}}_u$ can be merged and the resultant path pair is still $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded. If this is the case, we remove $\rm{\tilde{PP}}_u$ and replace it with the merged path pair. \paragraph{Merging solutions:} Since we want to minimize the number of path pairs representing $\Pi_{\ensuremath{v_{\rm{goal}}}\xspace}(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$ we suggest an optimization that operates similarly to node insertions. When a new path pair $\rm{PP}_{\ensuremath{v_{\rm{goal}}}\xspace}$ representing a partial Pareto frontier at ${\ensuremath{v_{\rm{goal}}}\xspace}$ is obtained, we test if there exists a path pair in the solution set $\rm{\tilde{PP}}_{\ensuremath{v_{\rm{goal}}}\xspace}$ such that $\rm{PP}_u$ and $\rm{\tilde{PP}}_{\ensuremath{v_{\rm{goal}}}\xspace}$ can be merged and the resultant path pair is still $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded. If this is the case, we remove $\rm{\tilde{PP}}_{\ensuremath{v_{\rm{goal}}}\xspace}$ and replace it with the merged path pair. \paragraph{Returning solutions:} Recall that our algorithm stores solutions as path pairs and not individual paths. To return an approximate Pareto frontier, we simply return one path in each path pair. Here, we arbitrarily choose to return $\pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{tl}}$ for each path pair $(\pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{tl}}, \pi_{\ensuremath{v_{\rm{goal}}}\xspace}^{\texttt{br}})$. \begin{algorithm}[t!] \textbf{Input:} ($\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$) \begin{algorithmic}[1] \If{$(1 + \ensuremath{\varepsilon}\xspace_2) \cdot f_2(\pi_u^{\texttt{br}}) \geq g_2^{\rm min} ({\ensuremath{v_{\rm{goal}}}\xspace})$} \State{\textbf{return} \texttt{true}} \mathbb{C}omment{dominated by solution} \EndIf \If{$g_2({\pi}_u^{\texttt{br}})\geq g_2^{\rm min}(u)$} \State{\textbf{return} \texttt{true}} \mathbb{C}omment{dominated by existing path pair} \EndIf \State{\textbf{return} \texttt{false}} \end{algorithmic} \caption{is\_dominated\_\algname{PP-A$^*$}} \label{alg:is_dominated} \end{algorithm} \begin{algorithm}[t!] \textbf{Input:} ($\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}) , e = (u,v)$) \begin{algorithmic}[1] \State{$\pi_v^{\texttt{tl}} \gets $\func{extend}($\pi_u^{\texttt{tl}}$)} \State{$\pi_v^{\texttt{br}} \gets $\func{extend}($\pi_u^{\texttt{br}}$)} \State{\textbf{return} $(\pi_v^{\texttt{tl}}, \pi_v^{\texttt{br}})$} \end{algorithmic} \caption{extend\_\algname{PP-A$^*$}} \label{alg:extendIS} \end{algorithm} \begin{algorithm}[t!] \textbf{Input:} ($\rm{PP}_v$, OPEN) \begin{algorithmic}[1] \For{\textbf{each} path pair $\rm{\tilde{PP}}_v \in$ OPEN} \State $\rm{PP}_v^{\rm merged} \gets $ merge($\rm{\tilde{PP}}_v, \rm{{PP}}_v $) \If {$\rm{PP}_v^{\rm merged}$.is\_bounded($\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2$)} \State OPEN.remove($\rm{\tilde{PP}}_v$) \mathbb{C}omment{remove existing path pair} \State OPEN.\func{insert}($\rm{PP}_v^{\rm merged}$) \State \textbf{return} \EndIf \EndFor \State OPEN.\func{insert}($\rm{PP}_v$) \State{\textbf{return}} \end{algorithmic} \caption{insert\_\algname{PP-A$^*$}} \label{alg:insertIS} \end{algorithm} \begin{algorithm}[t!] \textbf{Input:} ($\rm{PP}_{{\ensuremath{v_{\rm{goal}}}\xspace}}$, solutions\_pp) \begin{algorithmic}[1] \For{\textbf{each} path pair $\rm{\tilde{PP}}_{{\ensuremath{v_{\rm{goal}}}\xspace}} \in$ solutions\_pp} \State $\rm{PP}_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\rm merged} \gets $ merge($\rm{\tilde{PP}}_{{\ensuremath{v_{\rm{goal}}}\xspace}}, \rm{{PP}}_v $) \If {$\rm{PP}_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\rm merged}$.is\_bounded($\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2$)} \State solutions\_pp.remove($\rm{\tilde{PP}}_{{\ensuremath{v_{\rm{goal}}}\xspace}}$) \State solutions\_pp.insert($\rm{PP}_{{\ensuremath{v_{\rm{goal}}}\xspace}}^{\rm merged}$) \State \textbf{return} \EndIf \EndFor \State solutions\_pp.insert($\rm{PP}_{{\ensuremath{v_{\rm{goal}}}\xspace}}$) \State{\textbf{return}} \end{algorithmic} \caption{merge\_to\_solutions\_\algname{PP-A$^*$}} \label{alg:merge-IS} \end{algorithm} \subsection{Analysis} To show that \algname{PP-A$^*$} indeed computes an approximate Pareto frontier using the domination checks suggested in Eq.~\ref{eq:d1} and~\ref{eq:d2}, it will be useful to introduce the notion of a path pair's \myemph{apex} which represents a (possibly non-existent) path that dominates all paths represented by the path pair. \begin{defn} Let $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ be a path pair. Its apex is a two-dimensional point $\ensuremath{\mathcal{A}}\xspace_u = ( c_1(\pi_u^{\texttt{tl}}), c_2(\pi_u^{\texttt{br}} )$. The $f$-value of the apex is $( c_1(\pi_u^{\texttt{tl}}) + h_1(u), c_2(\pi_u^{\texttt{br}} + h_2(u) )$ \end{defn} If we (conceptually) replace each path pair used by \algname{PP-A$^*$} with its corresponding apex, we obtain an algorithm that is very similar to \algname{BOA$^*$}eps. Specifically, both algorithms (i)~order nodes / apexes in the OPEN list lexicographically, (ii)~update $g_2^{\rm {min}}(u)$ for each vertex $u$ when a node / apex is popped if $g_2 < g_2^{\rm {min}}(u)$ where~$g_2$ is the $c_2$-cost of the node / apex and (iii)~prune a node / apex using the same condition (notice that Eq.~\ref{eq:d0} is identical to Eq~\ref{eq:d1} and~\ref{eq:d2}). The main difference is that \algname{PP-A$^*$} also merges path pairs. This turns the search tree into a directed acyclic graph (DAG). Now we can easily adapt several Lemmas described in~\cite{UYBZSK20}\footnote{When adapting the Lemmas used in~\cite{UYBZSK20}, we mention the corresponding Lemma number. Furthermore, to ease a reader interested in comparing the Lemmas and the corresponding proofs, when possible, we use the same notation and even the same words. This was done after obtaining permission from the authors in~\cite{UYBZSK20}.}: \begin{lem}[Corresponding to Lemma~1 in~\cite{UYBZSK20}] \label{lem:1} After extending an apex, the new apex has $f_1$- and $f_2$-values that are no smaller than the $f_1$- and $f_2$-values, respectively, of the generating apex. \end{lem} The proof is identical to Lemma~1 in~\cite{UYBZSK20}. However, it is important to note that an apex can also be created by merging two existing apexes. Here, it is not clear which is the ``parent'' apex and the Lemma does not necessarily hold. However, we can state the following straightforward obsrevation: \begin{observation} \label{obs:1} Let $\ensuremath{\mathcal{A}}\xspace_u^{\rm merged}$ by the apex resulting from a merge operation between $\ensuremath{\mathcal{A}}\xspace_u^1$ and $\ensuremath{\mathcal{A}}\xspace_u^2$. Then, the $f_1$- and $f_2$-values of $\ensuremath{\mathcal{A}}\xspace_u^{\rm merged}$ cannot be smaller than the $f_1$- and $f_2$-values of both of $\ensuremath{\mathcal{A}}\xspace_u^1$ and $\ensuremath{\mathcal{A}}\xspace_u^2$. Specifically, $$ f_1(\ensuremath{\mathcal{A}}\xspace_u^{\rm merged}) = \min (f_1(\ensuremath{\mathcal{A}}\xspace_u^1),f_1( \ensuremath{\mathcal{A}}\xspace_u^2)), $$ and $$ f_2(\ensuremath{\mathcal{A}}\xspace_u^{\rm merged}) = \min (f_2(\ensuremath{\mathcal{A}}\xspace_u^1), f_2(\ensuremath{\mathcal{A}}\xspace_u^2)). $$ \end{observation} \begin{lem}[Corresponding to Lemma~2 in~\cite{UYBZSK20}] \label{lem:2} The sequences of extracted and expanded apexes have monotonically non-decreasing $f_1$-values. \end{lem} \begin{proof} An apex extracted by \algname{PP-A$^*$} from the OPEN list has the smallest $f_1$-value among of all apexes in the Open list. Since generated apexes that are added to the Open list have $f_1$-values that are no smaller than those of their expanded parent apexes (Lemma~\ref{lem:1}) and an apex resulting from a merge operation cannot have an $f_1$-value smaller than the $f_1$- value of both of the merged apexes (Obs~\ref{obs:1}), the sequence of extracted apexes has monotonically non-decreasing $f_1$-values. \end{proof} \begin{lem}[Corresponding to Lemma~3 in~\cite{UYBZSK20}] \label{lem:3} The sequences of extracted apexes with the same state has strictly monotonically decreasing $f_2$-values. \end{lem} The proof is identical to Lemma~3 in~\cite{UYBZSK20}. \begin{lem}[Corresponding to Lemma~6 in~\cite{UYBZSK20}] \label{lem:6} If apex $\ensuremath{\mathcal{A}}\xspace_u^1$ is weakly dominated by apex $\ensuremath{\mathcal{A}}\xspace_u^2$, then each apex at ${\ensuremath{v_{\rm{goal}}}\xspace}$ in the subtree of the search tree rooted at $\ensuremath{\mathcal{A}}\xspace_u^1$ (when no merge operations are performed in the subtree) is weakly dominated by an apex at ${\ensuremath{v_{\rm{goal}}}\xspace}$ in the DAG rooted at $\ensuremath{\mathcal{A}}\xspace_u^2$ (even when merge operations are performed). \end{lem} \begin{proof} Since apex $\ensuremath{\mathcal{A}}\xspace_u^1$ is weakly dominated by apex $\ensuremath{\mathcal{A}}\xspace_u^2$, $g_1(\ensuremath{\mathcal{A}}\xspace_u^1) \leq g_1(\ensuremath{\mathcal{A}}\xspace_u^2)$ (here, given apex $\ensuremath{\mathcal{A}}\xspace = (p_1,p_2)$ we define $g_i(\ensuremath{\mathcal{A}}\xspace): = p_i$ corresponding to the $g$-value of nodes in standard \algname{A$^*$}-like algorithms). Assume that $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3$ is an apex at the goal in the subtree of the search tree rooted at~$\ensuremath{\mathcal{A}}\xspace_u^1$ (when no merge operations are performed). Let the sequence of vertices of the apexes along a branch of the search tree from the root apex to $\ensuremath{\mathcal{A}}\xspace_u^1$ be $u_1, \ldots u_i$ (with $u_1 = {\ensuremath{v_{\rm{start}}}\xspace}$ and $u_i = u$). Similarly, let the sequence of vertices of the apexes along a branch of the DAG from the root apex to $\ensuremath{\mathcal{A}}\xspace_u^2$ be $u_1', \ldots u_j'$ (with $u_1' = {\ensuremath{v_{\rm{start}}}\xspace}$ and $u_j' = u$). Finally, let the sequence of vertices of the apexes along a branch of the search tree from $\ensuremath{\mathcal{A}}\xspace_u^1$ to $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3$ be $\pi = u_i, \ldots u_k$ (with $u_k = {\ensuremath{v_{\rm{goal}}}\xspace}$). Then, there is an apex $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ at the goal in the DAG rooted at apex $\ensuremath{\mathcal{A}}\xspace_u^2$ such that the sequence of vertices of the apexes along a branch of the DAG from the root apex to~$\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ is $u_1', \ldots u_j', u_{i+1}, \ldots ,u_k$. Since $g_1(\ensuremath{\mathcal{A}}\xspace_u^1) \leq g_1(\ensuremath{\mathcal{A}}\xspace_u^2)$, it follows that \begin{align*} g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) ~= &~ g_1(\ensuremath{\mathcal{A}}\xspace_u^2)+c_1(\pi) \\ ~\leq &~ g_1(\ensuremath{\mathcal{A}}\xspace_u^1) + c_1(\pi) \\ ~= &~ g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3). \end{align*} Thus, $g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) \leq g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3)$. Following similar lines yields that $g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) \leq g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3)$. Thus, $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ weakly dominates $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^3$. \end{proof} The following Lemma concludes the building blocks we will need to prove that \algname{PP-A$^*$} computes an approximate Pareto frontier. However, it can be easily adapted to show that \algname{BOA$^*$} also computes an approximate Pareto frontier. \begin{lem}[Corresponding to Lemma~7 in~\cite{UYBZSK20}] \label{lem:7} When \algname{PP-A$^*$} prunes an apex $\ensuremath{\mathcal{A}}\xspace_u^1$ at state $u$ and this prevents it in the future from adding an apex $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$ (at the goal state) to the solution set, then it can still add in the future an apex (with the goal state) that approximately dominates apex $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$. \end{lem} \begin{proof} We prove the statement by induction on the number of pruned apexes so far, including apex $\ensuremath{\mathcal{A}}\xspace_u^1$. If the number of pruned apexes is zero, then the lemma trivially holds. Now assume that the number of pruned apexes is $n+1$ and the lemma holds for $n \geq 0$. We distinguish three cases: \begin{itemize} \item[\textbf{C1}] \label{case:1} \algname{PP-A$^*$} prunes apex $\ensuremath{\mathcal{A}}\xspace_u^1$ on line~5 because of the first pruning condition (Eq.~\ref{eq:d1}): Then, \algname{PP-A$^*$} has expanded an apex~$\ensuremath{\mathcal{A}}\xspace_u^4$ at state $u$ previously such that $g_2^{\rm {min}}(u) = g_2(\ensuremath{\mathcal{A}}\xspace_u^4)$ since otherwise $g_2^{\rm {min}}(u) = \infty$ and the pruning condition could not hold. Combining both (in)equalities yields $g_2(\ensuremath{\mathcal{A}}\xspace_u^1) \geq g_2(\ensuremath{\mathcal{A}}\xspace_u^4)$. Since $f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f_1(\ensuremath{\mathcal{A}}\xspace_u^4)$ (Lemma~\ref{lem:2}), \begin{align*} g_1(\ensuremath{\mathcal{A}}\xspace_{u}^1) + h_1(u) ~= &~ f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \\ ~\geq & f_1(\ensuremath{\mathcal{A}}\xspace_u^4) \\ ~= &~ g_1(\ensuremath{\mathcal{A}}\xspace_{u}^4) + h_1(u). \end{align*} Thus $g_1(\ensuremath{\mathcal{A}}\xspace_{u}^1) \geq g_1(\ensuremath{\mathcal{A}}\xspace_{u}^4)$. Combining both inequalities yields that apex $\ensuremath{\mathcal{A}}\xspace_{u}^1$ is weakly dominated by apex $\ensuremath{\mathcal{A}}\xspace_{u}^4$ and thus each apex at ${\ensuremath{v_{\rm{goal}}}\xspace}$ in the subtree rooted at $\ensuremath{\mathcal{A}}\xspace_{u}^1$, including~$\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$, is weakly dominated (and hence approximately dominated) by an apex $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^5$ in the subtree rooted at apex $\ensuremath{\mathcal{A}}\xspace_{u}^4$ (Lemma~\ref{lem:6}). In case \algname{PP-A$^*$} has pruned an apex that prevents it in the future from adding apex $\ensuremath{\mathcal{A}}\xspace_{u}^5$ to the solution set, then it can still add in the future an apex (at the goal state) that approximately dominates~$\ensuremath{\mathcal{A}}\xspace_{u}^5$ and thus also apex $\ensuremath{\mathcal{A}}\xspace_{u}^2$ (induction assumption). \item[\textbf{C2}] \label{case:2} \algname{PP-A$^*$} prunes apex $\ensuremath{\mathcal{A}}\xspace_u^1$ on line~5 because of the second pruning condition (Eq.~\ref{eq:d2}): Then, \algname{PP-A$^*$} has expanded an apex $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ with the goal state previously such that $g_2^{\rm {min}}({\ensuremath{v_{\rm{goal}}}\xspace}) = g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$ since otherwise $g_2^{\rm {min}}({\ensuremath{v_{\rm{goal}}}\xspace}) = \infty$ and the pruning condition could not hold. Combining both (in)equalities yields that $(1 + \ensuremath{\varepsilon}\xspace_2) \cdot f_2(\ensuremath{\mathcal{A}}\xspace_u^1) \geq g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$. Since $\ensuremath{\mathcal{A}}\xspace_u^1$ is an ancestor of $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$ in the search tree, $f_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq f_2(\ensuremath{\mathcal{A}}\xspace_u^1)$ (Lemma~\ref{lem:1}). Combining both inequalities yields $g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) = f_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) / (1 + \ensuremath{\varepsilon}\xspace_2)$. Since $\ensuremath{\mathcal{A}}\xspace_{u}^1$ is an ancestor of $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$ in the search tree, $g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^1) = f_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq f_1(\ensuremath{\mathcal{A}}\xspace_u^1)$ (Lemma~\ref{lem:1}). Since $f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$ (Lemma~\ref{lem:2}), it follows that $g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) = g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$. Combining $g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq g_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$ and $g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2) \geq g_2(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4) / (1 + \ensuremath{\varepsilon}\xspace_2)$. yields that $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$ is approximately dominated by $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$. In case \algname{PP-A$^*$} has pruned an apex that prevents it in the future from adding $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ to the solution set, then it can still add in the future an apex that approximately dominates $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4$ and thus also $\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^2$(induction assumption). \item[\textbf{C3}] \algname{PP-A$^*$} prunes apex $\ensuremath{\mathcal{A}}\xspace_u^1$ on line~12 because of either the first or the second pruning condition (Eq.~\ref{eq:d1} or~\ref{eq:d2}): The proofs of Case~\textbf{C1} or Case~\textbf{C2}, respectively, apply unchanged except that $f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$ now holds for a different reason. Let $\ensuremath{\mathcal{A}}\xspace_v^3$ be the apex that \algname{PP-A$^*$} expands when it executes Line 12. Combining $f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f_1(\ensuremath{\mathcal{A}}\xspace_v^3)$ (Lemma~\ref{lem:1}) and $f_1(\ensuremath{\mathcal{A}}\xspace_v^3) \geq f_1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$ (Lemma~\ref{lem:2}) yields $f_1(\ensuremath{\mathcal{A}}\xspace_u^1) \geq f1(\ensuremath{\mathcal{A}}\xspace_{{\ensuremath{v_{\rm{goal}}}\xspace}}^4)$. \end{itemize} \end{proof} Lemma~\ref{lem:7} states that all solutions in the partial Pareto frontier captured by a path pair that was pruned by \algname{PP-A$^*$} are approximately dominated by an apex of a path pair that was used as a solution. Combining this with the fact that all path pairs are $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded by constructed we obtain the following Corollary. \begin{cor} Given a Bi-criteria approximate shortest path (Problem~\ref{prob:2}), \algname{PP-A$^*$} returns an approximate Pareto frontier. \end{cor} \ignore{ To show that \algname{PP-A$^*$} indeed computes a approximate Pareto frontier we start by stating the following simple-yet-important observations: \begin{observation} \label{obs:pp-valid} If $\rm{PP}_u $ is a path pair generated by \algname{PP-A$^*$} (either by extending an existing path pair or by merging two existing path pairs) then $\rm{PP}_u$ is $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded. \end{observation} \begin{observation} \label{obs:ppa-valid} If \algname{PP-A$^*$} prunes a path pair $\rm{PP}_u $ such that any path that is approximately dominated by the $\rm{PP}_u $ is is a path pair generated by \algname{PP-A$^*$} (either by extending an existing path pair or by merging two existing path pairs) then $\rm{PP}_u$ is $(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$-bounded. \end{observation} To show that we indeed can use the $O(1)$ dominance test suggested by Hernandez et al. we can use exactly the same reasoning. Specifically, Hernandez et al. show that (LX and TY refers to Lemma X and Theorem Y in~\cite{UYBZSK20}, respectively). \begin{itemize} \item[L1] Each generated node $n$ has $f_1$- and $f_2$-values that are no smaller than the $f_1$- and $f_2$-values-values, respectively, of its parent node $p$. \item[L2] The sequences of extracted nodes and of expanded nodes have monotonically non-decreasing $f_1$-values. \item[L3] The sequence of expanded nodes with the same state has strictly monotonically decreasing $f_2$-values. \item[L4] The sequence of expanded nodes with the same state has strictly monotonically increasing $f_1$-values. \item[L5] Expanded nodes with the same state do not weakly dominate each other. \item[L6] If node $n_1$ with state $u$ is weakly dominated by node $n_2$ with state $u$, then each node with the goal state in the subtree of the search tree rooted at node $n_1$ is weakly dominated by a node with the goal state in the subtree rooted at node $n_2$. \item[L7] When \algname{BOA$^*$} prunes a node $n_1$ with state $u$ and this prevents it in the future from adding a node $n_2$ (with the goal state) to the solution set, then it can still add in the future a node (with the goal state) that weakly dominates node~$n_2$. \item[T2] \algname{BOA$^*$} computes a cost-unique Pareto-optimal solution set. \end{itemize} Each of the above Lemmas and Theorem can be easily adapted to our setting where we order nodes in the open list according to Eq.~\ref{eq:lexi}. Before we do this, we mention this simple-yet-important property of path pairs: \begin{prop} \label{prop:pp} Let $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ be a path pair then $$f_1(\pi_u^{\texttt{tl}}) \leq f_1(\pi_u^{\texttt{br}})$$ and $$f_2(\pi_u^{\texttt{tl}}) \geq f_2(\pi_u^{\texttt{br}}).$$ In addition, $\pi_u^{\texttt{tl}}$ \myemph{cannot} strictly dominate $\pi_u^{\texttt{br}}$. \end{prop} We are now ready to describe the adapted Lemmas: \begin{lem}[Corresponding to Lemma~1 in~\cite{UYBZSK20}] \label{lem:1} Each path in each generated path pair \rm{PP} has $f_1$- and $f_2$-values that are no smaller than the $f_1$- and $f_2$-values-values, respectively, of their respective parents in the path pair that generated \rm{PP}. \end{lem} \begin{proof} Let $\rm{PP}_v = (\pi_v^{\texttt{tl}}, \pi_v^{\texttt{br}})$ be a path pair generated by extending the path pair $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$. Since the $h-$ values are consistent, $$ c_1(\pi_u^{\texttt{tl}}, \pi_v^{\texttt{tl}}) + h_1(\pi_v^{\texttt{tl}}) \geq h_1(\pi_u^{\texttt{tl}}). $$ Thus, \begin{align*} f_1( \pi_v^{\texttt{tl}}) ~= &~ c_1(\pi_v^{\texttt{tl}}) + h_1(\pi_v^{\texttt{tl}}) \\ ~= &~ c_1(\pi_u^{\texttt{tl}}) + c_1(\pi_u^{\texttt{tl}}, \pi_v^{\texttt{tl}}) + h_1(\pi_v^{\texttt{tl}}) \\ ~\geq &~ c_1(\pi_u^{\texttt{tl}}) + h_1(\pi_u^{\texttt{tl}}) \\ ~= &~ f_1( \pi_u^{\texttt{tl}}) \end{align*} A similar proof holds for yields that $f_1( \pi_v^{\texttt{br}}) \geq f_1( \pi_u^{\texttt{br}})$, $f_2( \pi_v^{\texttt{tl}}) \geq f_2( \pi_u^{\texttt{tl}})$, and that $f_2( \pi_v^{\texttt{br}}) \geq f_2( \pi_u^{\texttt{br}})$. \end{proof} \begin{lem}[Corresponding to Lemma~2 in~\cite{UYBZSK20}] \label{lem:2} The sequences of extracted path pairs and of expanded path pairs have monotonically non-decreasing $f_1$-values of their top-left paths. \end{lem} \begin{proof} \algname{PP-A$^*$} extracts the path pair from the Open list according to Eq.~\ref{eq:lexi}. Thus, an extracted path pair has the smallest $f_1$-value of its top-left path of all nodes in the Open list. Since generated path pairs that are added to the Open list have $f_1$-values that are no smaller than those of their expanded parent nodes (Lemma~\ref{lem:1}), the sequence of extracted nodes has monotonically non-decreasing $f_1$-values of their top-left path. \end{proof} \begin{lem}[Corresponding to Lemma~3 in~\cite{UYBZSK20}] \label{lem:3} The sequences of extracted path pairs with the same state has strictly monotonically decreasing $f_2$-values of their bottom-right paths. \end{lem} \begin{proof} Assume for a proof by contradiction that \algname{PP-A$^*$} expands path pair $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ before path pair $\tilde{\rm{PP}}_u = (\tilde{\pi}_u^{\texttt{tl}}, \tilde{\pi}_u^{\texttt{br}})$, that it expands no path pair with state $u$ after $\rm{PP}_u$ and before $\tilde{\rm{PP}}_u$, and that $f_2(\pi_u^{\texttt{br}}) \leq f_2(\tilde{\pi}_u^{\texttt{br}})$. Then, $$ c_2(\pi_u^{\texttt{br}}) + h_2(u) = f_2(\pi_u^{\texttt{br}}) \leq f_2(\tilde{\pi}_u^{\texttt{br}}) = c_2(\tilde{\pi}_u^{\texttt{br}}) + h_2(u). $$ Thus, $c_2(\pi_u^{\texttt{br}}) \leq c_2(\tilde{\pi}_u^{\texttt{br}})$. After $\rm{PP}_u$ is expanded and before $\tilde{\rm{PP}}_u$ is expanded, $g_2^{\rm{min}}(u) = c_2(\pi_u^{\texttt{br}})$. Combining both (in)equalities yields that $g_2^{\rm{min}}(u) \leq c_2(\pi_u^{\texttt{br}})$ which means that $\tilde{\rm{PP}}_u$ is not expanded, contradicting the assumption. \end{proof} \begin{lem}[Corresponding to Lemma~4 in~\cite{UYBZSK20}] \label{lem:4} The sequences of extracted path pairs with the same state has strictly monotonically increasing $f_1$-values of their top-left paths. \end{lem} \begin{proof} Since the sequence of expanded path pairs has monotonically non-decreasing $f_1$-values of their top left path (Lemma~\ref{lem:2}), the sequence of expanded path pairs with the same state also has monotonically non-decreasing $f_1$-values of their top left path. Assume for a proof by contradiction that \algname{PP-A$^*$} expands path pair $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ before path pair $\tilde{\rm{PP}}_u = (\tilde{\pi}_u^{\texttt{tl}}, \tilde{\pi}_u^{\texttt{br}})$, that it expands no path pair with state $u$ after $\rm{PP}_u$ and before $\tilde{\rm{PP}}_u$, and that $f_1(\pi_u^{\texttt{tl}}) = f_1(\tilde{\pi}_u^{\texttt{tl}})$. We distinguish two cases: \begin{itemize} \item[\textbf{C1}] Path pair $\tilde{\rm{PP}}_u$ is in the Open list when \algname{PP-A$^*$} expands path pair ${\rm{PP}}_u$: In this case ${\rm{PP}}_u$ and $\tilde{\rm{PP}}_u$ would be merged (such a merge is always valid regardless of the values of $\ensuremath{\varepsilon}\xspace_1$ and $\ensuremath{\varepsilon}\xspace_2$) which contradicts the fact that $\tilde{\rm{PP}}_u$ was expanded. \item[\textbf{C2}] Path pair $\tilde{\rm{PP}}_u$ is not in the Open list when \algname{PP-A$^*$} expands path pair ${\rm{PP}}_u$: \algname{PP-A$^*$} generates $\tilde{\rm{PP}}_u$ after it expands ${\rm{PP}}_u$. Thus, there is a node ${\rm{PP}}_v$ in the Open list when \algname{PP-A$^*$} expands ${\rm{PP}}_u$ that is expanded after ${\rm{PP}}_u$ and before $\tilde{\rm{PP}}_u$ and becomes an ancestor node of $\tilde{\rm{PP}}_u$ in the search tree. Since the sequence of expanded path pairs has monotonically nondecreasing $f_1$-values of their top-left paths (Lemma~\ref{lem:2}) and $f_1(\pi_u^{\texttt{tl}}) = f_1(\tilde{\pi}_u^{\texttt{tl}})$, we have that $$f_1(\pi_u^{\texttt{tl}}) = f_1(\tilde{\pi}_u^{\texttt{tl}})= f_1({\pi}_v^{\texttt{tl}}).$$ When \algname{PP-A$^*$} expands node $\tilde{\rm{PP}}_u$, the path pair has the lexicographically smallest value of all nodes in the Open list. Since $f_1(\pi_u^{\texttt{tl}}) = f_1(\pi_v^{\texttt{tl}})$, it follows that $f_1(\pi_u^{\texttt{br}}) \leq f_1(\pi_v^{\texttt{br}})$. Again, we distinguish two cases \begin{itemize} \item[\textbf{C2.1}] $f_1(\pi_u^{\texttt{br}}) = f_1(\pi_v^{\texttt{br}})$: Since path pairs are ordered according to Eq.~\ref{eq:lexi}. we have that either (i)~$f_2(\pi_u^{\texttt{tl}}) < f_2(\pi_v^{\texttt{tl}})$ or that (ii)~$f_2(\pi_u^{\texttt{tl}}) = f_2(\pi_v^{\texttt{tl}})$. If $f_2(\pi_u^{\texttt{tl}}) < f_2(\pi_v^{\texttt{tl}})$ then If $f_2(\pi_u^{\texttt{tl}}) = f_2(\pi_v^{\texttt{tl}})$ then $f_2(\pi_u^{\texttt{br}}) \leq f_2(\pi_v^{\texttt{br}})$. Since each path in each path pair has an $f_2$-value that is no smaller than the $f_2$ values of its ancestor (Lemma~\ref{lem:1}), $f_2(\pi_v^{\texttt{br}}) \leq f_2(\tilde{\pi}_u^{\texttt{br}})$. Combining both inequalities yields $f_2(\pi_u^{\texttt{br}})\leq f_2(\tilde{\pi}_u^{\texttt{br}})$, which contradicts Lemma~\ref{lem:3}. \item[\textbf{C2.2}] $f_1(\pi_u^{\texttt{br}}) < f_1(\pi_v^{\texttt{br}})$: \end{itemize} \end{itemize} \end{proof} \begin{thm} Let $G$ be a graph, $c_1, c_2 : E \rightarrow \mathbb{R}$ two cost functions and ${\ensuremath{v_{\rm{start}}}\xspace}$ and~${\ensuremath{v_{\rm{goal}}}\xspace}$ be start and goal vertices, respectively. Given~$\ensuremath{\varepsilon}\xspace_1 \geq 0$ and $\ensuremath{\varepsilon}\xspace_2 \geq 0$ \algname{PP-A$^*$} computes an approximate Pareto frontier~$\Pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}(\ensuremath{\varepsilon}\xspace_1, \ensuremath{\varepsilon}\xspace_2)$. \end{thm} \begin{proof} Let $\pi_{{\ensuremath{v_{\rm{goal}}}\xspace}} \in \Pi_{{\ensuremath{v_{\rm{goal}}}\xspace}}$ be a solution and let \end{proof} \begin{lem} \label{lem:x} Let $\rm{PP}_u = (\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}})$ be a path pair that is pruned by the optimized dominance checks (Alg.~\ref{alg:is_dominated}). Any path that was not added Let $\pi$ be a solution that was not then any path in the partial Paerto frontier ${\small \ensuremath{\rm{PPF}}\xspace}_{u}^{\pi_u^{\texttt{tl}}, \pi_u^{\texttt{br}}}$ is approximately dominated by an extreme path of a path pair in OPEN or in the set of solutions. \end{lem} \begin{proof} We prove the Lemma by induction over the number of path pairs pruned. When no path pair is pruned the Lemma trivially holds. Now assume that the number of pruned nodes is $n+1$ and that the Lemma holds for $n \geq 0$. \end{proof} } \section{Evaluation} \paragraph{Experimental setup.} To evaluate our approach we compare it to \algname{BOA$^*$}eps as \algname{BOA$^*$} was recently shown to dramatically outperform other state-of-the-art algorithms for bi-criteria shortest path~\cite{UYBZSK20}. All experiments were run on an 1.8GHz Intel(R) Core(TM) i7-8565U CPU Windows 10 machine with 16GB of RAM. All algorithm implementations were in \mathbb{C}pp.\footnote{Our code is publicly available at \url{https://github.com/CRL-Technion/path-pair-graph-search}.} We use road maps from the 9'th DIMACS Implementation Challenge: Shortest Path\footnote{\url{http://users.diag.uniroma1.it/challenge9/download.shtml}.}. The cost components represent travel distances ($c_1$) and times ($c_2$). The heuristic values are the exact travel distances and times to the goal state, computed with Dijkstra’s algorithm. Since all algorithms use the same heuristic values, heuristic-computation times are omitted. \begin{table}[t] \resizebox{\columnwidth}{!}{ \begin{tabular}{cllllllll} \rowcolor[HTML]{656565} \multicolumn{9}{c}{\cellcolor[HTML]{656565}\textbf{New York City (NY)}} \\ \rowcolor[HTML]{9B9B9B} \multicolumn{9}{c}{\cellcolor[HTML]{9B9B9B}\textbf{264,346 states, 730,100 edges}} \\ \rowcolor[HTML]{C0C0C0} \cellcolor[HTML]{C0C0C0} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg $n_{sol}$}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{min t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{max t}} \\ \rowcolor[HTML]{C0C0C0} \multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{$\varepsilon$}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} \\ \textbf{0} & {\color[HTML]{000000} 158} & {\color[HTML]{000000} 158} & {\color[HTML]{000000} 1,047} & {\color[HTML]{000000} 405} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 13,563} & {\color[HTML]{000000} 5,038} \\ \textbf{0.01} & {\color[HTML]{000000} 19} & {\color[HTML]{000000} 20} & {\color[HTML]{000000} 291} & {\color[HTML]{000000} 353} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 3,662} & {\color[HTML]{000000} 4,577} \\ \textbf{0.025} & {\color[HTML]{000000} 10} & {\color[HTML]{000000} 10} & {\color[HTML]{000000} 168} & {\color[HTML]{000000} 295} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 2,207} & {\color[HTML]{000000} 4,101} \\ \textbf{0.05} & {\color[HTML]{000000} 6} & {\color[HTML]{000000} 6} & {\color[HTML]{000000} 111} & {\color[HTML]{000000} 240} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 1,523} & {\color[HTML]{000000} 3,538} \\ \textbf{0.1} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 69} & {\color[HTML]{000000} 174} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 932} & {\color[HTML]{000000} 2,694} \end{tabular} } \resizebox{\columnwidth}{!}{ \begin{tabular}{cllllllll} \rowcolor[HTML]{656565} \multicolumn{9}{c}{\cellcolor[HTML]{656565}\textbf{San Francisco Bay (BAY)}} \\ \rowcolor[HTML]{9B9B9B} \multicolumn{9}{c}{\cellcolor[HTML]{9B9B9B}\textbf{321,270 states, 794,830 edges}} \\ \rowcolor[HTML]{C0C0C0} \cellcolor[HTML]{C0C0C0} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg $n_{sol}$}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{min t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{max t}} \\ \rowcolor[HTML]{C0C0C0} \multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{$\varepsilon$}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} \\ \textbf{0} & {\color[HTML]{000000} 117} & {\color[HTML]{000000} 117} & {\color[HTML]{000000} 1,213} & {\color[HTML]{000000} 423} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 21,751} & {\color[HTML]{000000} 7,584} \\ \textbf{0.01} & {\color[HTML]{000000} 16} & {\color[HTML]{000000} 17} & {\color[HTML]{000000} 222} & {\color[HTML]{000000} 369} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 2,927} & {\color[HTML]{000000} 6,805} \\ \textbf{0.025} & {\color[HTML]{000000} 9} & {\color[HTML]{000000} 9} & {\color[HTML]{000000} 127} & {\color[HTML]{000000} 321} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 1,530} & {\color[HTML]{000000} 5,614} \\ \textbf{0.05} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 6} & {\color[HTML]{000000} 85} & {\color[HTML]{000000} 272} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 1,109} & {\color[HTML]{000000} 4,570} \\ \textbf{0.1} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 54} & {\color[HTML]{000000} 199} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 0} & {\color[HTML]{000000} 576} & {\color[HTML]{000000} 3,056} \end{tabular} } \resizebox{\columnwidth}{!}{ \begin{tabular}{cllllllll} \rowcolor[HTML]{656565} \multicolumn{9}{c}{\cellcolor[HTML]{656565}\textbf{Colorado (COL)}} \\ \rowcolor[HTML]{9B9B9B} \multicolumn{9}{c}{\cellcolor[HTML]{9B9B9B}\textbf{435,666 states, 1,042,400 edges}} \\ \rowcolor[HTML]{C0C0C0} \cellcolor[HTML]{C0C0C0} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg $n_{sol}$}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{min t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{max t}} \\ \rowcolor[HTML]{C0C0C0} \multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{$\varepsilon$}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} \\ \textbf{0} & {\color[HTML]{000000} 318} & {\color[HTML]{000000} 318} & {\color[HTML]{000000} 3,368} & {\color[HTML]{000000} 1,144} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 1} & {\color[HTML]{000000} 56,153} & {\color[HTML]{000000} 17,348} \\ \textbf{0.01} & {\color[HTML]{000000} 15} & {\color[HTML]{000000} 16} & {\color[HTML]{000000} 372} & {\color[HTML]{000000} 944} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 1} & {\color[HTML]{000000} 3,633} & {\color[HTML]{000000} 16,304} \\ \textbf{0.025} & {\color[HTML]{000000} 7} & {\color[HTML]{000000} 8} & {\color[HTML]{000000} 192} & {\color[HTML]{000000} 768} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 1} & {\color[HTML]{000000} 1,690} & {\color[HTML]{000000} 15,037} \\ \textbf{0.05} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 116} & {\color[HTML]{000000} 608} & {\color[HTML]{000000} 5} & {\color[HTML]{000000} 1} & {\color[HTML]{000000} 991} & {\color[HTML]{000000} 13,718} \\ \textbf{0.1} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 69} & {\color[HTML]{000000} 470} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 1} & {\color[HTML]{000000} 593} & {\color[HTML]{000000} 11,977} \end{tabular} } \resizebox{\columnwidth}{!}{ \begin{tabular}{cllllllll} \rowcolor[HTML]{656565} \multicolumn{9}{c}{\cellcolor[HTML]{656565}\textbf{Florida (FL)}} \\ \rowcolor[HTML]{9B9B9B} \multicolumn{9}{c}{\cellcolor[HTML]{9B9B9B}\textbf{1,070,376 states, 2,712,798 edges}} \\ \rowcolor[HTML]{C0C0C0} \cellcolor[HTML]{C0C0C0} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg $n_{sol}$}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{min t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{max t}} \\ \rowcolor[HTML]{C0C0C0} \multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{$\varepsilon$}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{PP-A$^*$}}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{\algname{BOA$^*$}}} \\ \textbf{0} & {\color[HTML]{000000} 357} & {\color[HTML]{000000} 357} & {\color[HTML]{000000} 12,177} & {\color[HTML]{000000} 3,545} & {\color[HTML]{000000} 12} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 270,450} & {\color[HTML]{000000} 68,467} \\ \textbf{0.01} & {\color[HTML]{000000} 12} & {\color[HTML]{000000} 13} & {\color[HTML]{000000} 1,000} & {\color[HTML]{000000} 3,228} & {\color[HTML]{000000} 12} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 17,092} & {\color[HTML]{000000} 64,642} \\ \textbf{0.025} & {\color[HTML]{000000} 6} & {\color[HTML]{000000} 6} & {\color[HTML]{000000} 479} & {\color[HTML]{000000} 2,738} & {\color[HTML]{000000} 11} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 8,060} & {\color[HTML]{000000} 59,908} \\ \textbf{0.05} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 4} & {\color[HTML]{000000} 263} & {\color[HTML]{000000} 1,985} & {\color[HTML]{000000} 12} & {\color[HTML]{000000} 3} & {\color[HTML]{000000} 3,945} & {\color[HTML]{000000} 39,214} \\ \textbf{0.1} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 144} & {\color[HTML]{000000} 1,172} & {\color[HTML]{000000} 11} & {\color[HTML]{000000} 2} & {\color[HTML]{000000} 1,780} & {\color[HTML]{000000} 21,665} \end{tabular} } \caption{Average number of solutions ($n_{\rm{sol}}$) and runtime (in ms) comparing \algname{BOA$^*$}eps and \algname{PP-A$^*$} on 50 random queries sampled for four different roadmaps for different approximation factors.} \label{tbl:res} \end{table} \begin{figure*} \caption{North East (NE) plots. \protect \subref{fig:NE1} \label{fig:NE1} \label{fig:NE2} \label{fig:NE4} \label{fig:NE3} \label{fig:NE} \end{figure*} \paragraph{General comparison.}Similar to the experiments of Hernandez et al~\cite{UYBZSK20} we start by comparing the algorithms for four different roadmaps containing between roughly $250K$ and $1M$ vertices. Table~\ref{tbl:res} summarizes the number of solutions in the approximate Pareto frontier and average, minimum and maximum running times of the two algorithms using the following values\footnote{While \algname{PP-A$^*$} allows a user to specify two approximation factors corresponding to the two cost functions, this is not the case for \algname{BOA$^*$}. Thus, in all experiments we use a single approximation factor $\varepsilon$ and set $\varepsilon_1 = \varepsilon_2 = \varepsilon$.} $\varepsilon \in \{ 0, 0.01, 0.025, 0.05, 0.1\}$. Here, approximation values of zero and $0.01$ correspond to computing the entire Pareto frontier and approximating it using a value of~$1\%$, respectively. When computing the entire Pareto frontier \algname{BOA$^*$} is roughly three times faster than \algname{PP-A$^*$} on average. This is to be expected as \algname{PP-A$^*$} stores for each element in the priority queue two paths and requires more computationally-demanding operations. As the approximation factor is increased, the average running time of \algname{PP-A$^*$} drops faster, when compared to \algname{BOA$^*$}eps and we observe a significant average speedup. Interestingly, when looking at the minimal running time, \algname{BOA$^*$}eps significantly outperforms \algname{PP-A$^*$}. This is because in such settings the approximate Pareto frontier contains one solution, which \algname{BOA$^*$}eps is able to compute very fast. Other nodes are approximately dominated by this solution and the algorithm can terminate very quickly. \algname{PP-A$^*$}, on the other hand, still performs merge operations which incur a computational overhead. When looking at the maximal running time, we can see an opposite trend where \algname{PP-A$^*$} outperforms \algname{BOA$^*$}eps by a large factor. \paragraph{Pinpointing the performance differences between \algname{PP-A$^*$} and \algname{BOA$^*$}eps.} The first set of results suggest that as the problem becomes harder, the speedup that \algname{PP-A$^*$} may offer becomes more pronounced. We empirically quantify this claim by moving to a larger map called the North East (NE) map which contains 1,524,453 states and 3,897,636 edges where we obtain even larger speedups (see Table~\ref{tbl:res-NE}). \begin{table}[t] \resizebox{\columnwidth}{!}{ \begin{tabular}{c ll ll ll} \rowcolor[HTML]{656565} \multicolumn{7}{c}{\cellcolor[HTML]{656565}\textbf{North East (NE)}} \\ \rowcolor[HTML]{9B9B9B} \multicolumn{7}{c}{\cellcolor[HTML]{9B9B9B}\textbf{1,524,453 states, 3,897,636 edges}} \\ \rowcolor[HTML]{C0C0C0} \cellcolor[HTML]{C0C0C0} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{avg t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{min t}} & \multicolumn{2}{c}{\cellcolor[HTML]{C0C0C0}\textbf{max t}} \\ \rowcolor[HTML]{C0C0C0} \multirow{-2}{*}{\cellcolor[HTML]{C0C0C0}\textbf{$\varepsilon$}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{PP-A*}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{BOA*}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{PP-A*}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{BOA*}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{PP-A*}} & \multicolumn{1}{c}{\cellcolor[HTML]{C0C0C0}\textbf{BOA*}} \\ \textbf{0} & {\color[HTML]{000000} 192.6} & {\color[HTML]{000000} 59.5} & {\color[HTML]{000000} 0.04} & {\color[HTML]{000000} 0.02} & {\color[HTML]{000000} 2,4189.9} & {\color[HTML]{000000} 592.6} \\ \textbf{0.01} & {\color[HTML]{000000} 13.1} & {\color[HTML]{000000} 68.3} & {\color[HTML]{000000} 0.03} & {\color[HTML]{000000} 0.01} & {\color[HTML]{000000} 111.6} & {\color[HTML]{000000} 600.9} \\ \textbf{0.025} & {\color[HTML]{000000} 5.6} & {\color[HTML]{000000} 57.3} & {\color[HTML]{000000} 0.02} & {\color[HTML]{000000} 0.01} & {\color[HTML]{000000} 46.9} & {\color[HTML]{000000} 510.9} \\ \textbf{0.05} & {\color[HTML]{000000} 2.7} & {\color[HTML]{000000} 40.8} & {\color[HTML]{000000} 0.02} & {\color[HTML]{000000} 0.01} & {\color[HTML]{000000} 22.6} & {\color[HTML]{000000} 345.1} \\ \textbf{0.1} & {\color[HTML]{000000} 1.3} & {\color[HTML]{000000} 25.8} & {\color[HTML]{000000} 0.02} & {\color[HTML]{000000} 0.01} & {\color[HTML]{000000} 9.0} & {\color[HTML]{000000} 229.8} \end{tabular} } \caption{Runtime (in seconds) comparing \algname{BOA$^*$} and \algname{PP-A$^*$} on 50 random queries sampled for the NE map.} \label{tbl:res-NE} \end{table} We plot the number of nodes expanded (which typically is proportional to the running time of \algname{A$^*$}-like algorithms) of each algorithm as a function of the approximation factor (see, Fig.~\ref{fig:NE1}. Here we used $\varepsilon \in \{ 0, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1\}$. Additionally, we plot both the arithmetic mean (Fig.~\ref{fig:NE2}) as well as the geometric mean (Fig.~\ref{fig:NE4}) of each algorithm as a function of the approximation factor.\footnote{We used both arithmetic and geometric mean as the arithmetic mean can be misleading skewing the mean towards the results on larger instances. Together, both means better capture the results. } We observe that the number of nodes expanded monotonically decreases when the approximation factor is increased for both algorithms. This is because additional nodes may be pruned which in turn, prunes all nodes in their subtree. It is important to discuss \emph{how} these nodes are pruned: Recall that \algname{BOA$^*$}eps prunes nodes according to Eq.~\ref{eq:d0}. Thus, increasing the approximation factor only allows to prune more nodes according to the already-computed solutions and not according to the paths computed to intermediate nodes. In contrast, \algname{PP-A$^*$} prunes nodes according to Eq.~\ref{eq:d1} and~\ref{eq:d2}. Thus, in addition to more path pairs being merged, increasing the approximation allows to prune more path pairs according to the already-computed solutions as well as the path pairs computed to intermediate vertices. Thus, for relatively-small approximation factors that are greater than zero (in our setting, $0 < \varepsilon < 0.5$, we see that \algname{BOA$^*$} expands a significantly higher number of nodes than \algname{PP-A$^*$} which explains the speedups we observed. However, for large approximation factors, there is typically only one solution in the approximate Pareto frontier. This solution, which is found quickly by \algname{BOA$^*$}eps, allows to prune almost all other paths which results in \algname{BOA$^*$}eps running much faster than \algname{PP-A$^*$}. This trend is visualized in Fig.~\ref{fig:NE3}. \section{Future Research} \subsection{Bidirectional search} We presented \algname{PP-A$^*$} as a unidirectional search algorithm, however a common approach to speed up search algorithms is to perform two simultaneous searches: a forward search from ${\ensuremath{v_{\rm{start}}}\xspace}$ to ${\ensuremath{v_{\rm{goal}}}\xspace}$ and a backward search from ${\ensuremath{v_{\rm{goal}}}\xspace}$ to~${\ensuremath{v_{\rm{start}}}\xspace}$~\cite{pohl1971bi}. Thus, an immediate task for future research is to suggest a bidirectional extension of \algname{PP-A$^*$}. Here we can build upon recent progress in bi-directional search algorithms for bi-criteria shortest-path problems~\cite{sedeno2019biobjective}. \subsection{Beyond two optimization criteria} We presented \algname{PP-A$^*$} as a search algorithm for two optimization criteria, however the same concepts can be used for multi-criteria optimization problems. Unfortunately, it is not clear how to perform operations such as dominance checks efficiently since the methods presented for \algname{BOA$^*$} do not extend to such settings. \end{document}
\begin{document} \begin{center} {\LARGE {\bf On $(N(k),\xi )$-semi-Riemannian manifolds: Pseudosymmetries}}\\% [0pt] {\large {\bf Mukut Mani Tripathi and Punam Gupta}} \end{center} {\bf Abstract.} Definition of $({\cal T}_{\!a},{\cal T}_{\!b})$ -pseudosymmetric semi-Riemannian manifold is given. $({\cal T}_{\!a},{\cal T} _{\!b})$-pseudosy-\newline mmetric $(N(k),\xi )$-semi-Riemannian manifolds are classified. Some results for ${\cal T}_{\!a}$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are obtained. $({\cal T}_{\!a},{\cal T}_{\!b},S^{\ell })$-pseudosymmetric semi-Riemannian manifolds are defined. $({\cal T}_{\!a},{\cal T} _{\!b},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are classified. Some results for $(R,{\cal T}_{\!a},S^{\ell })$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are obtained. In particular, some results for $(R,{\cal T}_{\!a},S)$-pseudosymmetric $ (N(k),\xi )$-semi-Riemannian manifolds are also obtained. After that, the definition of $({\cal T}_{\!a},S_{{\cal T}_{b}})$-pseudosymmetric semi-Riemannian manifold is given. $({\cal T}_{\!a},S_{{\cal T}_{b}})$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are classified. It is proved that a $(R,S_{{\cal T}_{\!a}})$-pseudosymmetric $(N(k),\xi )$ -semi-Riemannian manifold is either Einstein or $L=k$ under an algebraic condition. Some results for $({\cal T}_{\!a},S)$-pseudosymmetric $(N(k),\xi ) $-semi-Riemannian manifolds are also obtained. In last, $({\cal T} _{\!a},S_{{\cal T}_{\!b}},S^{\ell })$-pseudosymmetric semi-Riemannian manifolds are defined and $({\cal T}_{\!a},S_{{\cal T}_{\!b}},S^{\ell })$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are classified. \noindent {\bf 2000 Mathematics Subject Classification.} 53C25,53C50. \noindent {\bf Keywords.} ${\cal T}$-curvature tensor; quasi-conformal curvature tensor; conformal curvature tensor; conharmonic curvature tensor; concircular curvature tensor; pseudo-projective curvature tensor; projective curvature tensor; ${\cal M}$-projective curvature tensor; ${\cal W}_{i}$ -curvature tensors $(i=0,\ldots ,9)$; ${\cal W}_{j}^{\ast }$-curvature tensors $(j=0,1)$; $\eta $-Einstein manifold; Einstein manifold; $N(k)$ -contact metric manifold; $\left( \varepsilon \right) $-Sasakian manifold; Sasakian manifold; Kenmotsu manifold; $\left( \varepsilon \right) $ -para-Sasakian manifold; para-Sasakian manifold; $(N(k),\xi )$ -semi-Riemannian manifolds; $\left( {\cal T}_{\!a},{\cal T}_{\!b}\right) $ -pseudosymmetric semi-Riemannian manifold; $\left( {\cal T}_{\!a},{\cal T} _{\!b},S^{\ell }\right) $-pseudosymm\allowbreak etric semi-Riemannian manifold; $({\cal T}_{\!a},S_{{\cal T}_{b}}) $-pseudosymmetric semi-Riemannian manifold and $({\cal T}_{\!a},S_{{\cal T}_{b}},S^{\ell })$ -pseudosymmetric semi-Riemannian manifold. \section{Introduction} Let $M$ be an $n$-dimensional differentiable manifold and ${\frak X}(M)$ the Lie algebra of vector fields in $M$. We assume that $X,X_{1},\ldots ,X_{s},Y,Z,U,V,W\in {\frak X}(M)$. It is well known that every $(1,1)$ tensor field ${\cal A}$ on a differentiable manifold determines a derivation ${\cal A}\cdot $ of the tensor algebra on the manifold, commuting with contractions. For example, a $(1,1)$ tensor field ${\cal B}(V,U)$ induces the derivation ${\cal B}(V,U)\cdot $, thus given a $(0,s)$ tensor field $ {\cal K}$, we can associate a $(0,s+2)$ tensor ${\cal B}\cdot {\cal K}$, defined by \begin{eqnarray} ({\cal B}\cdot {\cal K})(X_{1},\ldots ,X_{s},X,Y) &=&({\cal B}(X,Y)\cdot {\cal K})(X_{1},\ldots ,X_{s}) \nonumber \\ &=&-\,{\cal K}({\cal B}(X,Y)X_{1},\ldots ,X_{s})-\cdots \nonumber \\ &&-\,{\cal K}(X_{1},\ldots ,{\cal B}(X,Y)X_{s}). \label{eq-RR} \end{eqnarray} Next, for a tensor $\sigma $ of type $(0,2)$, the operators $(X\wedge _{\sigma }Y)$ and $Q(\sigma ,{\cal K})$are defined by \[ (X\wedge _{\sigma }Y)Z=\sigma (Y,Z)X-\sigma (X,Z)Y, \] \begin{eqnarray} Q(\sigma ,{\cal K})(X_{1},\ldots ,X_{s},X,Y) &=&((X\wedge _{\sigma }Y)\cdot {\cal K)}(X_{1},\ldots ,X_{s}) \nonumber \\ &=&-\,{\cal K}((X\wedge _{\sigma }Y)X_{1},\ldots ,X_{s})-\cdots \nonumber \\ &&-\,{\cal K}(X_{1},\ldots ,(X\wedge _{\sigma }Y)X_{s}). \label{eq-pseudo} \end{eqnarray} Let $(M,g)$ be an $n$-dimensional semi-Riemannian manifold. Then $(M,g)$ is said to be \begin{enumerate} \item[{\bf (a)}] {\em pseudosymmetric} \cite{Deszcz-Grycak-87} if its curvature tensor $R$ satisfies \[ R\cdot R=L_{g}Q(g,R), \] where $L_{g}$ is some smooth function on $M$, \item[{\bf (b)}] {\em Ricci-generalized pseudosymmetric} \cite{Deszcz-90} if it satisfies \[ R\cdot R=L_{S}Q(S,R), \] where $L_{S}$ is some smooth function on $M$ and $S$ is the the Ricci tensor, \item[{\bf (c)}] {\em Ricci-pseudosymmetric} \cite{Deszcz-89} if on the set $ {\cal U}=\left\{ x\in M:\left( S-r/n\right) _{x}\not=0\right\} $, \[ R\cdot S=LQ(g,S), \] where $L$ is some function on ${\cal U}$. \end{enumerate} A pseudosymmetric manifold is a generalization of manifold of constant curvature, symmetric manifold $\left( \nabla R=0\right) $ and semisymmetric manifold $\left( R\cdot R=0\right) $. Deszcz et al. \cite{DVY} proved that hypersurfaces in spaces of constant curvature, with exactly two distinct principal curvatures at every point, are pseudosymmetric. Ricci-pseudosymmetric manifold is a generalization of manifold of constant curvature, Einstein manifold, Ricci symmetric manifold $\left( \nabla S=0\right) $, symmetric manifold, semisymmetric manifold, pseudosymmetric manifold and Ricci-semisymmetric manifold$\left( R\cdot S=0\right) $. Similar to pseudosymmetry condition, Deszcz and Grycak \cite {Deszcz-90.,Deszcz-91,Deszcz-Grycak-89} and \"{O}zg\"{u}r \cite{Ozgur-2005} also studied Weyl pseudosymmetric manifolds $\left( R\cdot {\cal C}=L_{g}Q(g, {\cal C})\right) $. In 1990, Prvanovi\'{c} \cite{Prvanovic-90} studied the condition \[ R\cdot \tilde{T}=LQ(S^{\ell },\tilde{T}),\qquad \ell =0,1,2,\ldots , \] where $\tilde{T}$ is some $\left( 0,4\right) $-tensor field and $L$ is some smooth function on $M$. If $\tilde{T}=R$ and $\ell =0$, this condition becomes the condition for pseudosymmetry and if $\tilde{T}=R$ and $\ell =1$, this condition becomes the condition for Ricci-generalized pseudosymmetry. Apart from curvature tensor and the Weyl conformal curvature tensor, quasi-conformal curvature tensor, concircular curavture tensor, conharmonic curvature tensor, pseudo-projective curvature tensor, projective curvature tensor are important curvature tensors in the semi-Riemannian point of view. It is interesting to study these curvature tensors and other curvature tensors on $(N(k),\xi )$-semi-Riemannian manifolds. In this paper, we study several derivation conditions on $(N(k),\xi )$-semi-Riemannian manifolds. The paper is organized as follows. In Section \ref{sect-GCT}, we give the definition of ${\cal T}$-curvature tensor. In Section~\ref{sect-NK}, we give examples and properties of $(N(k),\xi )$-semi-Riemannian manifolds. In Section~\ref{sect-TTP}, $({\cal T}_{\!a},{\cal T}_{\!b})$-pseudosymmetric semi-Riemannian manifolds are defined and studied. Some results for ${\cal T} _{\!a}$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are given. In Section~\ref{sect-TTSP}, $({\cal T}_{\!a},{\cal T}_{\!b},S^{\ell })$ -pseudosymmetric semi-Riemannian manifolds are defined and studied. Some results for $(R,{\cal T}_{\!a},S^{\ell })$-pseudosymmetric $(N(k),\xi )$ -semi-Riemannian manifolds are given. In particular, some results for $(R, {\cal T}_{\!a},S)$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are obtained. In Section~\ref{sect-TSP}, the definition of $({\cal T} _{\!a},S_{{\cal T}_{b}})$-pseudosymmetric semi-Riemannian manifold is given. $({\cal T}_{\!a},S_{{\cal T}_{b}})$-pseudosymmetric $(N(k),\xi )$ -semi-Riemannian manifolds are classified. It is proved that a $(R,S_{{\cal T }_{\!a}})$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold is either Einstein or $L=k$ under an algebraic condition. Some results for $({\cal T} _{\!a},S)$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifolds are obtained. In the last section, the definition of $(T_{\!a},S_{{\cal T} _{\!b}},S^{\ell })$-pseudosymmetric semi-Riemannian manifold is given. $ (T_{\!a},S_{{\cal T}_{\!b}},S^{\ell })$-pseudosymmetric $(N(k),\xi )$ -semi-Riemannian manifolds are classified. \section{Preliminaries\label{sect-GCT}} \begin{defn} \label{defn-GCT} In an $n$-dimensional semi-Riemannian manifold $\left( M,g\right) $, {\em ${\cal T}$-curvature tensor} {\rm {\cite{Tripathi-Gupta}} }is a tensor of type $(1,3)$, which is defined by \begin{eqnarray} {\cal T}\left( X,Y\right) Z &=&a_{0}\,R\left( X,Y\right) Z \nonumber \\ &&+\ a_{1}\,S\left( Y,Z\right) X+a_{2}\,S\left( X,Z\right) Y+a_{3}\,S(X,Y)Z \nonumber \\ &&+\ a_{4}\,g\left( Y,Z\right) QX+a_{5}\,g\left( X,Z\right) QY+a_{6}\,g(X,Y)QZ \nonumber \\ &&+\ a_{7}\,r\left( g\left( Y,Z\right) X-g\left( X,Z\right) Y\right) , \label{eq-GCT} \end{eqnarray} where $a_{0},\ldots ,a_{7}$ are real numbers; and $R$, $S$, $Q$ and $r$ are the curvature tensor, the Ricci tensor, the Ricci operator and the scalar curvature respectively. \end{defn} In particular, the ${\cal T}$-curvature tensor is reduced to \begin{enumerate} \item the{\em \ curvature tensor} $R$ if \[ a_{0}=1,\quad a_{1}=\cdots =a_{7}=0, \] \item the {\em quasi-conformal curvature tensor} ${\cal C}_{\ast }$ \cite {Yano-Sawaki-68} if \[ a_{1}=-\,a_{2}=a_{4}=-\,a_{5},\quad a_{3}=a_{6}=0,\quad a_{7}=-\,\frac{1}{n} \left( \frac{a_{0}}{n-1}+2a_{1}\right) , \] \item the {\em conformal curvature tensor} ${\cal C}$ \cite[p.~90] {Eisenhart-49} if \[ \qquad a_{0}=1,\; a_{1}=-\,a_{2}=a_{4}=-\,a_{5}=-\,\frac{1}{n-2},\; a_{3}=a_{6}=0,\; a_{7}=\frac{1}{(n-1)(n-2)}, \] \item the {\em conharmonic curvature tensor} ${\cal L}$ \cite{Ishii-57} if \[ a_{0}=1,\quad a_{1}=-\,a_{2}=a_{4}=-\,a_{5}=-\,\frac{1}{n-2},\,\quad a_{3}=a_{6}=0,\quad a_{7}=0, \] \item the {\em concircular curvature tensor} ${\cal V}$ (\cite{Yano-40}, \cite[p. 87]{Yano-Bochner-53}) if \[ a_{0}=1,\quad a_{1}=a_{2}=a_{3}=a_{4}=a_{5}=a_{6}=0,\quad a_{7}=-\,\frac{1}{ n(n-1)}, \] \item the {\em pseudo-projective curvature tensor }${\cal P}_{\ast }$ \cite {Prasad-2002} if \[ a_{1}=-\,a_{2},\quad a_{3}=a_{4}=a_{5}=a_{6}=0,\quad a_{7}=-\,\frac{1}{n} \left( \frac{a_{0}}{n-1}+a_{1}\right) , \] \item the {\em projective curvature tensor} ${\cal P}$ \cite[p. 84] {Yano-Bochner-53} if \[ a_{0}=1,\quad a_{1}=-\,a_{2}=-\,\frac{1}{(n-1)}\text{,\quad } a_{3}=a_{4}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal M}${\em -projective curvature tensor }\cite {Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{1}=-\,a_{2}=a_{4}=-\,a_{5}=-\frac{1}{2(n-1)},\quad a_{3}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{0}${\em -curvature tensor} \cite[Eq. (1.4)] {Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{1}=-\,a_{5}=-\,\frac{1}{(n-1)},\quad a_{2}=a_{3}=a_{4}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{0}^{\ast }${\em -curvature tensor} \cite[Eq. (2.1)] {Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{1}=-\,a_{5}=\frac{1}{(n-1)},\quad a_{2}=a_{3}=a_{4}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{1}${\em -curvature tensor} \cite{Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{1}=-\,a_{2}=\frac{1}{(n-1)},\quad a_{3}=a_{4}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{1}^{\ast }${\em -curvature tensor} \cite {Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{1}=-\,a_{2}=-\,\frac{1}{(n-1)},\quad a_{3}=a_{4}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{2}${\em -curvature tensor} \cite{Pokhariyal-Mishra-70} if \[ a_{0}=1,\quad a_{4}=-\,a_{5}=-\,\frac{1}{(n-1)},\quad a_{1}=a_{2}=a_{3}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{3}${\em -curvature tensor} \cite{Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{2}=-\,a_{4}=-\,\frac{1}{(n-1)},\quad a_{1}=a_{3}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{4}${\em -curvature tensor} \cite{Pokhariyal-Mishra-71} if \[ a_{0}=1,\quad a_{5}=-\,a_{6}=\frac{1}{(n-1)},\quad a_{1}=a_{2}=a_{3}=a_{4}=a_{7}=0, \] \item the ${\cal W}_{5}${\em -curvature tensor} \cite{Pokhariyal-82} if \[ a_{0}=1,\quad a_{2}=-\,a_{5}=-\,\frac{1}{(n-1)},\quad a_{1}=a_{3}=a_{4}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{6}${\em -curvature tensor} \cite{Pokhariyal-82} if \[ a_{0}=1,\quad a_{1}=-\,a_{6}=-\,\frac{1}{(n-1)},\quad a_{2}=a_{3}=a_{4}=a_{5}=a_{7}=0, \] \item the ${\cal W}_{7}${\em -curvature tensor} \cite{Pokhariyal-82} if \[ a_{0}=1,\quad a_{1}=-\,a_{4}=-\,\frac{1}{(n-1)},\quad a_{2}=a_{3}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{8}${\em -curvature tensor} \cite{Pokhariyal-82} if \[ a_{0}=1,\quad a_{1}=-\,a_{3}=-\,\frac{1}{(n-1)},\quad a_{2}=a_{4}=a_{5}=a_{6}=a_{7}=0, \] \item the ${\cal W}_{9}${\em -curvature tensor} \cite{Pokhariyal-82} if \[ a_{0}=1,\quad a_{3}=-\,a_{4}=\frac{1}{(n-1)},\quad a_{1}=a_{2}=a_{5}=a_{6}=a_{7}=0. \] \end{enumerate} Denoting \[ {\cal T}\left( X,Y,Z,V\right) =g({\cal T}\left( X,Y\right) Z,V), \] we write the curvature tensor ${\cal T}$ in its $\left( 0,4\right) $ form as follows. \begin{eqnarray} {\cal T}\left( X,Y,Z,V\right) &=&a_{0}\,R\left( X,Y,Z,V\right) \nonumber \\ &&+\ a_{1}\,S\left( Y,Z\right) g\left( X,V\right) +a_{2}\,S\left( X,Z\right) g\left( Y,V\right) \nonumber \\ &&+\ a_{3}\,S\left( X,Y\right) g\left( Z,V\right) +a_{4}\,S\left( X,V\right) g\left( Y,Z\right) \nonumber \\ &&+\ a_{5}\,S\left( Y,V\right) g\left( X,Z\right) +a_{6}\,S\left( Z,V\right) g\left( X,Y\right) \nonumber \\ &&+\ a_{7}\,r\left( g\left( Y,Z\right) g\left( X,V\right) -g\left( X,Z\right) g\left( Y,V\right) \right) . \label{eq-gen-cur-1} \end{eqnarray} \begin{notation} We will call ${\cal T}$-curvature tensor as ${\cal T}_{a}$-curvature tensor, whenever it is necessary. If $a_{0},\ldots ,a_{7}$ are replaced by $ b_{0},\ldots ,b_{7}$ in the definition of ${\cal T}$-curvature tensor, then we will call ${\cal T}$-curvature tensor as ${\cal T}_{b}$-curvature tensor. \end{notation} \section{$(N(k),\protect\xi )$-semi-Riemannian manifolds\label{sect-NK}} Let $(M,g)$ be an $n$-dimensional semi-Riemannian manifold \cite{ONeill-83} equipped with a semi-Riemannian metric $g$. If ${\rm index}(g)=1$ then $g$ is a Lorentzian metric and $(M,g)$ a Lorentzian manifold \cite {Beem-Ehrlich-81}. If $g$ is positive definite then $g$ is an usual Riemannian metric and $(M,g)$ a Riemannian manifold. The $k${\em -nullity distribution} \cite{Tanno-88} of $(M,g)$ for a real number $k$ is the distribution \[ N(k):p\mapsto N_{p}(k)=\left\{ Z\in T_{p}M:R(X,Y)Z=k(g(Y,Z)X-g(X,Z)Y)\right\} . \] Let $\xi $ be a non-null unit vector field in $(M,g)$ and $\eta $ its associated $1$-form. Thus \[ g(\xi ,\xi )=\varepsilon , \] where $\varepsilon =1$ or $-\,1$ according as $\xi $ is spacelike or timelike, and \begin{equation} \eta \left( X\right) =\varepsilon g\left( X,\xi \right) ,\qquad \eta \left( \xi \right) =1. \label{eq-cond} \end{equation} \begin{defn} An $(N(k),\xi )${\em -semi-Riemannian manifold} {\rm \cite{TG}} consists of a semi-Riemannian manifold $(M,g)$, a $k$-nullity distribution $N(k)$ on $ (M,g) $ and a non-null unit vector field $\xi $ in $(M,g)$ belonging to $ N(k) $. \end{defn} Now, we intend to give some examples of $(N(k),\xi )$-semi-Riemannian manifolds. For this purpose we collect some definitions from the geometry of almost contact manifolds and almost paracontact manifolds as follows: \subsection*{Almost contact manifolds} Let $M$ be a smooth manifold of dimension $n=2m+1$. Let $\varphi $, $\xi $ and $\eta $ be tensor fields of type $(1,1)$, $(1,0)$ and $(0,1)$, respectively. If $\varphi $, $\xi $ and $\eta $ satisfy the conditions \begin{equation} \varphi ^{2}=-I+\eta \otimes \xi , \label{eq-str-1} \end{equation} \begin{equation} \eta (\xi )=1, \label{eq-str-2} \end{equation} where $I$ denotes the identity transformation, then $M$ is said to have an almost contact structure $\left( \varphi ,\xi ,\eta \right) $. A manifold $M$ alongwith an almost contact structure is called an {\em almost contact manifold} \cite{Blair-76}. Let $g$ be a semi-Riemannian metric on $M$ such that \begin{equation} g\left( \varphi X,\varphi Y\right) =g\left( X,Y\right) -\varepsilon \eta (X)\eta \left( Y\right) , \label{eq-str-3} \end{equation} where $\varepsilon =\pm 1$. Then $(M,g)$ is an $\left( \varepsilon \right) $- {\em almost contact metric manifold} \cite{Duggal-90-IJMMS} equipped with an $\left( \varepsilon \right) ${\em -almost contact metric structure} $ (\varphi ,\xi ,\eta ,g,\varepsilon )$. In particular, if the metric $g$ is positive definite, then an $(\varepsilon )$-almost contact metric manifold is the usual {\em almost contact metric manifold }\cite{Blair-76}. From (\ref{eq-str-3}), it follows that \begin{equation} g\left( X,\varphi Y\right) =-g\left( \varphi X,Y\right) \label{eq-str-5} \end{equation} and \begin{equation} g\left( X,\xi \right) =\varepsilon \eta (X). \label{eq-str-6} \end{equation} From (\ref{eq-str-2}) and (\ref{eq-str-6}), we have \begin{equation} g\left( \xi ,\xi \right) =\varepsilon . \label{eq-str-7} \end{equation} In an $\left( \varepsilon \right) $-almost contact metric manifold, the fundamental $2$-form $\Phi $ is defined by \begin{equation} \Phi (X,Y)=g(X,\varphi Y). \label{eq-str-4} \end{equation} An $\left( \varepsilon \right) $-almost contact metric manifold with $\Phi =d\eta $ is an $\left( \varepsilon \right) ${\em -contact metric manifold } \cite{Takahashi-69}. For $\varepsilon =1$ and $g$ Riemannian, $M$ is the usual{\em \ contact metric manifold }\cite{Blair-76}. A contact metric manifold with $\xi \in N(k)$, is called a {\em $N(k)$-contact metric manifold }\cite{Blair-Kim-Tripathi-05}. An $\left( \varepsilon \right) $-almost contact metric structure $(\varphi ,\xi ,\eta ,g,\varepsilon )$ is called an $\left( \varepsilon \right) ${\em -Sasakian structure} if \[ \left( \nabla _{X}\varphi \right) Y=g(X,Y)\xi -\varepsilon \eta \left( Y\right) X, \] where $\nabla $ is Levi-Civita connection with respect to the metric $g$. A manifold endowed with an $\left( \varepsilon \right) $-Sasakian structure is called an $\left( \varepsilon \right) ${\em -Sasakian manifold }\cite {Takahashi-69}. For $\varepsilon =1$ and $g$ Riemannian, $M$ is the usual {\em \ Sasakian manifold }\cite{Sasaki-60,Blair-76}. An almost contact metric manifold is a {\em Kenmotsu manifold} \cite {Kenmotsu-72} if \begin{equation} \left( \nabla _{X}\varphi \right) Y=g(\varphi X,Y)\xi -\eta \left( Y\right) \varphi X. \label{eq-str-8} \end{equation} By (\ref{eq-str-8}), we have \begin{equation} \nabla _{X}\xi =X-\eta (X)\xi . \label{eq-str-9} \end{equation} \subsection*{Almost paracontact manifolds} Let $M$ be an $n$-dimensional {\em almost paracontact manifold} \cite {Sato-76} equipped with an {\em almost paracontact structure} $\left( \varphi ,\xi ,\eta \right) $, where $\varphi $, $\xi $ and $\eta $ are tensor fields of type $(1,1)$, $(1,0)$ and $(0,1)$, respectively; and satisfy the conditions \begin{equation} \varphi ^{2}=I-\eta \otimes \xi , \label{eq-str-11} \end{equation} \begin{equation} \eta (\xi )=1. \label{eq-str-12} \end{equation} Let $g$ be a semi-Riemannian metric on $M$ such that \begin{equation} g\left( \varphi X,\varphi Y\right) =g\left( X,Y\right) -\varepsilon \eta (X)\eta \left( Y\right) , \label{eq-str-13} \end{equation} where $\varepsilon =\pm 1$. Then $\left( M,g\right) $ is an $\left( \varepsilon \right) ${\em -almost paracontact metric manifold} equipped with an $\left( \varepsilon \right) ${\em -almost paracontact metric structure} $ (\varphi ,\xi ,\eta ,g,\varepsilon )$. In particular, if ${\rm index}(g)=1$, then an $(\varepsilon )$-almost paracontact metric manifold is said to be a {\em Lorentzian almost paracontact manifold}. In particular, if the metric $ g $ is positive definite, then an $(\varepsilon )$-almost paracontact metric manifold is the usual {\em almost paracontact metric manifold} \cite{Sato-76} . The equation (\ref{eq-str-13}) is equivalent to \begin{equation} g\left( X,\varphi Y\right) =g\left( \varphi X,Y\right) \label{eq-str-14} \end{equation} along with \begin{equation} g\left( X,\xi \right) =\varepsilon \eta (X). \label{eq-str-15} \end{equation} From (\ref{eq-str-12}) and (\ref{eq-str-15}), we have \begin{equation} g\left( \xi ,\xi \right) =\varepsilon . \label{eq-str-16} \end{equation} An $\left( \varepsilon \right) $-almost paracontact metric structure is called an $\left( \varepsilon \right) ${\em -para-Sasakian structure} \cite {TKYK-09} if \begin{equation} \left( \nabla _{X}\varphi \right) Y=-\,g(\varphi X,\varphi Y)\xi -\varepsilon \eta \left( Y\right) \varphi ^{2}X, \label{eq-str-17} \end{equation} where $\nabla $ is Levi-Civita connection with respect to the metric $g$. A manifold endowed with an $\left( \varepsilon \right) $-para-Sasakian structure is called an $\left( \varepsilon \right) ${\em -para-Sasakian manifold} \cite{TKYK-09}. For $\varepsilon =1$ and $g$ Riemannian, $M$ is the usual para-Sasakian manifold \cite{Sato-76}. For $\varepsilon =-1$, $g$ Lorentzian and $\xi $ replaced by $-\xi $, $M$ becomes a Lorentzian para-Sasakian manifold \cite{Matsumoto-89}. \begin{ex} {\rm \cite{TG}} The following are some well known examples of $(N(k),\xi )$ -semi-Riemannian manifolds: \begin{enumerate} \item An $N(k)$-contact metric manifold {\rm \cite{Blair-Kim-Tripathi-05}} is an $(N(k),\xi )$-Riemannian manifold. \item A Sasakian manifold {\rm \cite{Sasaki-60}} is an $(N(1),\xi )$ -Riemannian manifold. \item A Kenmotsu manifold {\rm \cite{Kenmotsu-72}} is an $(N(-1),\xi )$ -Riemannian manifold. \item An $(\varepsilon )$-Sasakian manifold {\rm \cite{Takahashi-69}} an $ (N(\varepsilon ),\xi )$-semi-Riemannian manifold. \item A para-Sasakian manifold {\rm \cite{Sato-76}} is an $(N(-1),\xi )$ -Riemannian manifold. \item An $(\varepsilon )$-para-Sasakian manifold {\rm \cite{TKYK-09}} is an $ (N(-\varepsilon ),\xi )$-semi-Riemannian manifold. \end{enumerate} \end{ex} In an $n$-dimensional $\left( N(k),\xi \right) $-semi-Riemannian manifold $ (M,g)$, it is easy to verify that \begin{equation} R(X,Y)\xi =\varepsilon k(\eta (Y)X-\eta (X)Y), \label{eq-curvature} \end{equation} \begin{equation} R(\xi ,X)Y=\varepsilon k(\varepsilon g(X,Y)\xi -\eta (Y)X), \label{eq-curvature-2} \end{equation} \begin{equation} R(\xi ,X)\xi =\varepsilon k(\eta (X)\xi -X), \label{eq-curvature-3} \end{equation} \begin{equation} R\left( X,Y,Z,\xi \right) =\varepsilon k(\,\eta \left( X\right) g\left( Y,Z\right) -\eta \left( Y\right) g\left( X,Z\right) ), \label{eq-eps-PS-R(X,Y,Z,xi)} \end{equation} \begin{equation} \eta \left( R\left( X,Y\right) Z\right) =k(\eta \left( X\right) g\left( Y,Z\right) -\eta \left( Y\right) g\left( X,Z\right) ), \label{eq-eps-PS-eta(R(X,Y),Z)} \end{equation} \begin{equation} S(X,\xi )=\varepsilon k(n-1)\eta (X), \label{eq-ricci} \end{equation} \begin{equation} Q\xi =k(n-1)\xi , \label{eq-Q} \end{equation} \begin{equation} S(\xi ,\xi )=\varepsilon k(n-1), \label{eq-S-xi-xi} \end{equation} \begin{equation} \eta (QX)=\varepsilon g(QX,\xi )=\varepsilon S(X,\xi )=k(n-1)\eta (X). \label{eq-eta-QX} \end{equation} Moreover, define \begin{equation} S^{\ell }(X,Y)=g(Q^{\ell }X,Y)=S(Q^{\ell -1}X,Y), \label{eq-S-p} \end{equation} where $\ell =0,1,2,\ldots $ and $S^{0}=g$. Using (\ref{eq-eta-QX}) in (\ref {eq-S-p}), we get \begin{equation} S^{\ell }(X,\xi )=\varepsilon k^{\ell }(n-1)^{\ell }\eta (X). \label{eq-Sp-QX-xi} \end{equation} Now, we give the following Lemma. \begin{lem} \label{GCT} {\rm \cite{TG}} Let $M$ be an $n$-dimensional $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then \begin{eqnarray} {\cal T}_{a}(X,Y)\xi &=&(-\varepsilon ka_{0}+\varepsilon k(n-1)a_{2}-\varepsilon a_{7}\,r)\eta (X)Y \nonumber \\ &&+\,(\varepsilon ka_{0}+\varepsilon k(n-1)a_{1}+\varepsilon a_{7}\,r)\,\eta (Y)X \nonumber \\ &&+\,a_{3}\,S(X,Y)\xi +\varepsilon a_{4}\,\eta (Y)QX \nonumber \\ &&+\,\varepsilon a_{5}\eta (X)QY+k(n-1)a_{6}g(X,Y)\xi , \label{eq-X-Y-xi} \end{eqnarray} \begin{eqnarray} {\cal T}_{\!a}(\xi ,X)\xi &=&(-\varepsilon ka_{0}\,+\,\varepsilon k(n-1)a_{2}-\varepsilon a_{7}\,r)X+\varepsilon a_{5}\,QX \nonumber \\ &&+\left\{ \varepsilon ka_{0}+\varepsilon k(n-1)a_{1}+\varepsilon k(n-1)a_{3}\right. \nonumber \\ &&\quad \quad +\left. \varepsilon k(n-1)a_{4}+\varepsilon k(n-1)a_{6}\,+\varepsilon a_{7}\,r\right\} \eta (X)\xi , \label{eq-xi-X-xi} \end{eqnarray} \begin{eqnarray} {\cal T}_{\!a}(\xi ,Y)Z &=&(ka_{0}+k(n-1)a_{4}+a_{7}r)g\left( Y,Z\right) \xi \, \nonumber \\ &&+\,a_{1}\,S\left( Y,Z\right) \xi +\varepsilon k(n-1)a_{3}\eta (Y)Z \nonumber \\ &&+\,\varepsilon a_{5}\,\eta (Z)QY+\varepsilon a_{6}\,\eta (Y)QZ \nonumber \\ &&+\,(-\varepsilon ka_{0}+\varepsilon k(n-1)a_{2}\,-\varepsilon a_{7}\,r)\eta (Z)Y, \label{eq-xi-Y-Z} \end{eqnarray} \begin{eqnarray} \eta ({\cal T}_{\!a}\left( X,Y)\xi \right) &=&\varepsilon k(n-1)(a_{1}+a_{2}+a_{4}+a_{5})\eta (X)\eta (Y) \nonumber \\ &&+\,a_{3}\,S(X,Y)+\,k(n-1)a_{6}g(X,Y), \label{eq-eta-xi-X-Y} \end{eqnarray} \begin{eqnarray} {\cal T}_{\!a}\left( X,Y,\xi ,V\right) &=&(-\varepsilon ka_{0}\,+\varepsilon k(n-1)a_{2}\,-\varepsilon a_{7}\,r)\eta (X)g(Y,V) \nonumber \\ &&+\ (\varepsilon ka_{0}+\varepsilon k(n-1)a_{1}+\varepsilon a_{7}\,r)\,\eta (Y)g(X,V) \nonumber \\ &&+\,\varepsilon a_{3}\,S(X,Y)\eta (V)+\varepsilon a_{4}\,\eta (Y)S(X,V) \nonumber \\ &&+\,\varepsilon a_{5}\,\eta (X)S(Y,V)+\varepsilon k(n-1)a_{6}\,g(X,Y)\eta (V), \label{eq-X-Y-xi-V} \end{eqnarray} \begin{eqnarray} {\cal T}_{\!a}(X,\xi )\xi &=&\left\{ -\varepsilon ka_{0}\,+\varepsilon k(n-1)a_{2}\,+\varepsilon k(n-1)a_{3}\right. \nonumber \\ &&\quad +\left. \varepsilon k(n-1)a_{5}+\varepsilon k(n-1)a_{6}\,\,-\varepsilon a_{7}\,r\right\} \eta (X)\xi \nonumber \\ &&+\ (\varepsilon ka_{0}+\varepsilon k(n-1)a_{1}+\varepsilon a_{7}\,r)\,X+\varepsilon a_{4}\,QX, \label{eq-X-xi-xi} \end{eqnarray} \begin{eqnarray} S_{{\cal T}_{\!a}}(X,\xi ) &=&\left\{ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\right. \nonumber \\ &&\left. +\,\varepsilon r(a_{4}+(n-1)a_{7})\right\} \eta (X), \label{eq-ric-T1} \end{eqnarray} \begin{eqnarray} S_{{\cal T}_{\!a}}(\xi ,\xi ) &=&\varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6}) \nonumber \\ &&+\,\varepsilon r(a_{4}+(n-1)a_{7}). \label{eq-ric-T2} \end{eqnarray} \end{lem} \begin{rem} The relations {\rm (\ref{eq-curvature}) -- (\ref{eq-ric-T2})} are true for \begin{enumerate} \item a $N(k)$-contact metric manifold {\rm \cite{Blair-Kim-Tripathi-05}}\ ($ \varepsilon =1$), \item a Sasakian manifold {\rm \cite{Sasaki-60}} ($k=1$, $\varepsilon =1$), \item a Kenmotsu manifold {\rm \cite{Kenmotsu-72}} ($k=-1$, $\varepsilon =1$ ), \item an $(\varepsilon )$-Sasakian manifold {\rm \cite{Takahashi-69}} ($ k=\varepsilon $, $\varepsilon k=1$), \item a para-Sasakian manifold {\rm \cite{Sato-76}} ($k=-1$, $\varepsilon =1$ ), and \item an $(\varepsilon )$-para-Sasakian manifold {\rm \cite{TKYK-09}} ($ k=-\,\varepsilon $, $\varepsilon k=-\,1$). \end{enumerate} \noindent Even, all the relations and results of this paper will be true for the above six cases. \end{rem} \section{$({\cal T}_{\!a},{\cal T}_{\!b})$-pseudosymmetry\label{sect-TTP}} \begin{defn-new} A semi-Riemannian manifold $\left( M,g\right) $ is said to be $({\cal T} _{\!a},{\cal T}_{\!b})$-pseudosymmetric if \begin{equation} {\cal T}_{\!a}\cdot {\cal T}_{\!b}=L_{g}Q(g,{\cal T}_{\!b}), \label{eq-T.T=Lg} \end{equation} where $L_{g}$ is some smooth function on $M$. In particular, it is said to be $(R,{\cal T}_{\!a})$-pseudosymmetric or, in brief, ${\cal T}_{\!a}$ -pseudosymmetric if \begin{equation} R\cdot {\cal T}_{\!a}=L_{g}Q(g,{\cal T}_{\!a}) \label{eq-R.T=Lg} \end{equation} holds on the set \[ {\cal U}_{g}=\left\{ x\in M\,:\left( {\cal T}_{\!a}\right) _{x}\not=0\;{\rm at}\;x\right\} , \] where $L_{g}$ is some smooth function on ${\cal U}_{g}$. In particular, if in (\ref{eq-R.T=Lg}), {\em ${\cal T}_{\!a}$} is equal to $R$, ${\cal C} _{\ast }$, ${\cal C}$, ${\cal L}$, ${\cal V}$, ${\cal P}_{\ast }$, ${\cal P}$ , ${\cal M}$, ${\cal W}_{0}$, ${\cal W}_{0}^{\ast }$, ${\cal W}_{1}$, ${\cal W}_{1}^{\ast }$, ${\cal W}_{2}$, ${\cal W}_{3}$, ${\cal W}_{4}$, ${\cal W} _{5}$, ${\cal W}_{6}$, ${\cal W}_{7}$, ${\cal W}_{8}$, ${\cal W}_{9}$, then it becomes pseudosymmetric, quasi-conformal pseudosymmetric, Weyl pseudosymmetric, conharmonic pseudosymmetric, concircular pseudosymmetric, pseudo-projective pseudosymmetric, projective pseudosymmetric, ${\cal M}$ -pseudosymmetric, ${\cal W}_{0}$-pseudosymmetric, ${\cal W}_{0}^{\ast }$ -pseudosymmetric, ${\cal W}_{1}$-pseudosymmetric, ${\cal W}_{1}^{\ast }$ -pseudosymmetric, ${\cal W}_{2}$-pseudosym\allowbreak metric, ${\cal W}_{3}$ -pseudosymmetric, ${\cal W}_{4}$-pseudosymmetric, ${\cal W}_{5}$ -pseudosymmetric, ${\cal W}_{6}$-pseudosymmetric, ${\cal W}_{7}$ -pseu\allowbreak dosymmetric, ${\cal W}_{8}$-pseudosymmetric, ${\cal W}_{9}$ -pseudosymmetric, respectively. \end{defn-new} \begin{th} \label{th-T-T-11} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T} _{\!b})$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&\varepsilon b_{0}(ka_{0}+\varepsilon k(n-1)a_{4}+a_{7}r)R(U,V,W,X)+\varepsilon a_{1}b_{0}S(X,R(U,V)W) \nonumber \\ &&-2k(n-1)a_{3}(kb_{0}+k(n-1)b_{4}+b_{7}r)\eta (X)\eta (U)g(V,W) \nonumber \\ &&-2k(n-1)a_{3}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\eta (X)\eta (V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{4}S^{2}(X,U)g(V,W)+\varepsilon a_{1}b_{5}S^{2}(X,V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{6}S^{2}(X,W)g(U,V)-a_{5}(b_{1}+b_{3})S^{2}(X,V)\eta (U)\eta (W) \nonumber \\ &&-a_{5}(b_{1}+b_{2})S^{2}(X,W)\eta (U)\eta (V)-a_{5}(b_{2}+b_{3})S^{2}(X,U)\eta (V)\eta (W) \nonumber \\ &&-2a_{6}b_{1}S^{2}(V,W)\eta (X)\eta (U)-2a_{6}b_{2}S^{2}(U,W)\eta (X)\eta (V) \nonumber \\ &&-2a_{6}b_{3}S^{2}(U,V)\eta (X)\eta (W)-2k^{2}(n-1)a_{3}b_{6}g(U,V)\eta (X)\eta (W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{1}+a_{6}(kb_{0}+k(n-1)b_{4}+b_{7}r)\right) \eta (X)\eta (U)S(V,W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{2}+a_{6}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\right) \eta (X)\eta (V)S(U,W) \nonumber \\ &&-2k(n-1)(a_{3}b_{3}+a_{6}b_{6})S(U,V)\eta (X)\eta (W) \nonumber \\ &&+\varepsilon \left( b_{4}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(kb_{0}+k(n-1)b_{4})\right) S(X,U)g(V,W) \nonumber \\ &&+\varepsilon \left( b_{5}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(-kb_{0}+k(n-1)b_{5})\right) S(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r)S(X,W)g(U,V) \nonumber \\ &&-\,\varepsilon (kb_{0}+k(n-1)b_{4})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,U)g(V,W) \nonumber \\ &&-\,\varepsilon (-kb_{0}+k(n-1)b_{5})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(U,W)g(X,V) \nonumber \\ &&-\,\varepsilon k(n-1)b_{6}(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,W)g(U,V) \nonumber \\ &&-\,k(n-1)\left( (b_{2}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)\right) g(X,U)\eta (V)\eta (W) \nonumber \\ &&-\,k(n-1)\left( (b_{1}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) g(X,V)\eta (U)\eta (W) \nonumber \\ &&-\,\left( (b_{1}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) S(X,V)\eta (U)\eta (W) \nonumber \\ &&-\,\left( (b_{2}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{5}+b_{6})+b_{7}r)\right) S(X,U)\eta (V)\eta (W) \nonumber \\ &&-\,\left( (b_{1}+b_{2})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +k(n-1)(b_{4}+b_{5})(a_{1}+a_{5})\right) S(X,W)\eta (U)\eta (V) \nonumber \\ &&-\,k(n-1)\left( k(n-1)(b_{4}+b_{5})(a_{2}+a_{4})\right. \nonumber \\ &&\left. +(b_{1}+b_{2})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right) g(X,W)\eta (U)\eta (V) \nonumber \\ &=&\,L_{g}(\varepsilon b_{0}R(U,V,W,X)+\varepsilon b_{4}S(X,U)g(V,W)+\varepsilon b_{5}S(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}S(X,W)g(U,V)-\varepsilon k(n-1)b_{6}g(U,V)g(X,W) \nonumber \\ &&-k(n-1)(b_{2}+b_{3})g(X,U)\eta (V)\eta (W) \nonumber \\ &&-k(n-1)(b_{1}+b_{3})g(X,V)\eta (U)\eta (W) \nonumber \\ &&-k(n-1)(b_{1}+b_{2})g(X,W)\eta (U)\eta (V) \nonumber \\ &&+(b_{2}+b_{3})S(X,U)\eta (V)\eta (W) \nonumber \\ &&+(b_{1}+b_{3})S(X,V)\eta (U)\eta (W) \nonumber \\ &&+(b_{1}+b_{2})S(X,W)\eta (U)\eta (V) \nonumber \\ &&-\varepsilon (-kb_{0}+k(n-1)b_{5})g(U,W)g(X,V) \nonumber \\ &&-\varepsilon (kb_{0}+k(n-1)b_{4})g(V,W)g(X,U)). \label{eq-T-T-2-i} \end{eqnarray} \end{th} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T }_{\!b})$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{equation} {\cal T}_{\!a}(Z,X)\cdot {\cal T}_{\!b}(U,V)W=L_{g}Q(g,{\cal T} _{\!b})(U,V,W;Z,X). \label{eq-txxi-1} \end{equation} Taking $Z=\xi $ in (\ref{eq-txxi-1}), we get \[ {\cal T}_{\!a}(\xi ,X)\cdot {\cal T}_{\!b}(U,V)W=L_{g}Q(g,{\cal T} _{\!b})(U,V,W;\xi ,X), \] which gives \begin{eqnarray*} &&{\cal T}_{\!a}(\xi ,X){\cal T}_{\!b}(U,V)W-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V)W \\ &&-\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V)W-{\cal T}_{\!b}(U,V){\cal T} _{\!a}(\xi ,X)W \\ &=&L_{g}((\xi \wedge X){\cal T}_{\!b}(U,V)W-{\cal T}_{\!b}((\xi \wedge X)U,V)W \\ &&-{\cal T}_{\!b}(U,(\xi \wedge X)V)W-{\cal T}_{\!b}(U,V)(\xi \wedge X)W), \end{eqnarray*} that is, \begin{eqnarray} &&{\cal T}_{\!a}(\xi ,X){\cal T}_{\!b}(U,V)W-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V)W \nonumber \\ &&\quad -\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V)W-{\cal T}_{\!b}(U,V) {\cal T}_{\!a}(\xi ,X)W \nonumber \\ &=&L_{g}({\cal T}_{\!b}(U,V,W,X)\xi -{\cal T}_{\!b}(U,V,W,\xi )X \nonumber \\ &&-g(X,U){\cal T}_{\!b}(\xi ,V)W+\varepsilon \eta (U){\cal T}_{\!b}(X,V)W \nonumber \\ &&-g(X,V){\cal T}_{\!b}(U,\xi )W+\varepsilon \eta (V){\cal T}_{\!b}(U,X)W \nonumber \\ &&-g(X,W){\cal T}_{\!b}(U,V)\xi +\varepsilon \eta (W){\cal T}_{\!b}(U,V)X). \label{eq-T-T-i} \end{eqnarray} Taking the inner product of (\ref{eq-T-T-i}) with $\xi $, we get \begin{eqnarray} &&{\cal T}_{\!a}(\xi ,X,{\cal T}_{\!b}(U,V)W,\xi )-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V,W,\xi ) \nonumber \\ &&\quad -\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V,W,\xi )-{\cal T} _{\!b}(U,V,{\cal T}_{\!a}(\xi ,X)W,\xi ) \nonumber \\ &=&L_{g}(\varepsilon {\cal T}_{\!b}(U,V,W,X)-\varepsilon \eta (X){\cal T} _{\!b}(U,V,W,\xi ) \nonumber \\ &&-g(X,U){\cal T}_{\!b}(\xi ,V,W,\xi )+\varepsilon \eta (U){\cal T} _{\!b}(X,V,W,\xi ) \nonumber \\ &&-g(X,V){\cal T}_{\!b}(U,\xi ,W,\xi )+\varepsilon \eta (V){\cal T} _{\!b}(U,X,W,\xi ) \nonumber \\ &&-g(X,W){\cal T}_{\!b}(U,V,\xi ,\xi )+\varepsilon \eta (W){\cal T} _{\!b}(U,V,X,\xi )). \label{eq-T-T-1-i} \end{eqnarray} By using (\ref{eq-X-Y-xi}),\ldots ,(\ref{eq-X-xi-xi}) in (\ref{eq-T-T-1-i}), we get (\ref{eq-T-T-2-i}). $\blacksquare $ \begin{th} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T}_{\!b})$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&-\varepsilon \left\{ a_{5}b_{0}+na_{5}b_{1}+a_{5}b_{2}+a_{5}b_{6}+a_{5}b_{3}+a_{5}b_{5}\right\} S^{2}(V,W) \nonumber \\ &&+\ (\left\{ (nb_{1}+b_{2}+b_{3}+b_{5}+b_{6}+b_{0})(\varepsilon ka_{0}+\varepsilon b_{7}r)\right. \nonumber \\ &&\qquad -\ \varepsilon k(n-1)(2a_{5}b_{6}+a_{2}b_{3}+a_{1}b_{6}+a_{1}b_{3}+a_{1}b_{5} \nonumber \\ &&\qquad \left. +\ a_{1}b_{1}+a_{1}b_{2}+a_{2}b_{2}+a_{2}b_{6}+na_{2}b_{1}+a_{1}b_{0}+a_{2}b_{0})\right\} \nonumber \\ &&-\ \varepsilon (n-1)a_{1}b_{7}r-\varepsilon na_{5}b_{7}r-\varepsilon b_{4}a_{5}r-\varepsilon a_{1}b_{4}r)S(V,W) \nonumber \\ &&+\ \left\{ -\varepsilon k(n-1)(nb_{1}+b_{2}+b_{3}+b_{5}+b_{6}+b_{0})(a_{7}r+ka_{0}+k(n-1)a_{4}) \right. \nonumber \\ &&\qquad \left. -\ \varepsilon k(n-1)r((n-1)b_{7}a_{2}+(n-1)b_{7}a_{4}+a_{2}b_{4}+a_{4}b_{4})\right\} g(V,W) \nonumber \\ &&+\ (a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6})\left\{ -k^{2}(n-1)^{2}(nb_{1}+b_{2}+b_{3}+b_{5}+b_{6}+b_{0})\right. \nonumber \\ &&\qquad \left. -\ k(n-1)^{2}b_{7}r-k(n-1)b_{4}r\right\} \eta (V)\eta (W) \nonumber \\ &=&L_{g}(\varepsilon (b_{0}+b_{5}+b_{6})S(V,W)+\varepsilon (b_{4}r-k(n-1)(b_{0}+nb_{4}+b_{5}+b_{6}))g(V,W) \nonumber \\ &&+(b_{2}+b_{3})(r-kn(n-1))\eta (V)\eta (W)). \label{eq-semi-TS-i} \end{eqnarray} \end{th} \begin{th} \label{GCT-ss-11} Let $M$ be an $n$-dimensional ${\cal T}_{\!a}$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&\varepsilon a_{0}kR(U,V,W,X)+\varepsilon ka_{4}S(X,U)g(V,W)+\varepsilon ka_{5}S(X,V)g(U,W) \nonumber \\ &&+\,\varepsilon ka_{6}S(X,W)g(U,V)-\varepsilon k^{2}(n-1)a_{6}g(X,W)g(U,V) \nonumber \\ &&-\,\varepsilon k(ka_{0}+k(n-1)a_{4})g(V,W)g(X,U) \nonumber \\ &&-\,\varepsilon k(-ka_{0}+k(n-1)a_{5})g(U,W)g(X,V) \nonumber \\ &&-\,k^{2}(n-1)(a_{2}+a_{3})g(X,U)\eta (V)\eta (W) \nonumber \\ &&-\,k^{2}(n-1)(a_{1}+a_{3})g(X,V)\eta (U)\eta (W) \nonumber \\ &&-\,k^{2}(n-1)(a_{1}+a_{2})g(X,W)\eta (U)\eta (V) \nonumber \\ &&+\,k(a_{2}+a_{3})S(X,U)\eta (V)\eta (W)+k(a_{1}+a_{3})S(X,V)\eta (U)\eta (W) \nonumber \\ &&+\,k(a_{1}+a_{2})S(X,W)\eta (U)\eta (V) \nonumber \\ &&=\, L_{g}(\varepsilon a_{0}R(U,V,W,X)+\varepsilon a_{4}S(X,U)g(V,W)+\varepsilon a_{5}S(X,V)g(U,W) \nonumber \\ &&+\varepsilon a_{6}S(X,W)g(U,V)-\varepsilon k(n-1)a_{6}g(U,V)g(X,W) \nonumber \\ &&-k(n-1)(a_{2}+a_{3})g(X,U)\eta (V)\eta (W) \nonumber \\ &&-k(n-1)(a_{1}+a_{3})g(X,V)\eta (U)\eta (W) \nonumber \\ &&-k(n-1)(a_{1}+a_{2})g(X,W)\eta (U)\eta (V) \nonumber \\ &&+(a_{2}+a_{3})S(X,U)\eta (V)\eta (W) \nonumber \\ &&+(a_{1}+a_{3})S(X,V)\eta (U)\eta (W) \nonumber \\ &&+(a_{1}+a_{2})S(X,W)\eta (U)\eta (V) \nonumber \\ &&-\varepsilon (-ka_{0}+k(n-1)a_{5})g(U,W)g(X,V) \nonumber \\ &&-\varepsilon (ka_{0}+k(n-1)a_{4})g(V,W)g(X,U)). \label{eq-semi-sym-R-i} \end{eqnarray} \end{th} \begin{rem-new} Here two cases arise. First is that when $L_{g}=0$. In this case, it is $ {\cal T}_{\!a}$-semisymmetric. We exclude this case, because it is already studied in \cite{TG}. And the second case is that when $L_{g}\not=0 $. In the following Theorem, we consider the result for $L_{g}\not=0$. \end{rem-new} \begin{th} \label{th-TPS} Let $M$ be an $n$-dimensional ${\cal T}_{\!a}$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold such that $a_{0}+a_{5}+a_{6}\not=0$. \begin{enumerate} \item If $a_{0}+a_{2}+a_{3}+na_{4}+a_{5}+a_{6}\not=0$, then either it is Einstein manifold or $L_{g}=k$. \item If $a_{0}+a_{2}+a_{3}+na_{4}+a_{5}+a_{6}=0$, then either it is $\eta $ -Einstein manifold or $L_{g}=k$. \end{enumerate} \end{th} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional ${\cal T}_{\!a}$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. On contracting (\ref{eq-semi-sym-R-i}), we get \begin{eqnarray*} &&k(\varepsilon (a_{0}+a_{5}+a_{6})S(V,W)+\varepsilon (a_{4}r-k(n-1)(a_{0}+na_{4}+a_{5}+a_{6}))g(V,W) \\ &&+(a_{2}+a_{3})(r-kn(n-1))\eta (V)\eta (W)) \\ &=&L_{g}(\varepsilon (a_{0}+a_{5}+a_{6})S(V,W)+\varepsilon (a_{4}r-k(n-1)(a_{0}+na_{4}+a_{5}+a_{6}))g(V,W) \\ &&+(a_{2}+a_{3})(r-kn(n-1))\eta (V)\eta (W)), \end{eqnarray*} which can be rewritten as \begin{eqnarray} &&(L_{g}-k)(\varepsilon (a_{0}+a_{5}+a_{6})S(V,W) \nonumber \\ &&+\varepsilon (a_{4}r-k(n-1)(a_{0}+na_{4}+a_{5}+a_{6}))g(V,W) \nonumber \\ &&+(a_{2}+a_{3})(r-kn(n-1))\eta (V)\eta (W)). \label{eq-TPS} \end{eqnarray} On contracting above equation, we get \[ (L_{g}-k)(a_{0}+a_{2}+a_{3}+na_{4}+a_{5}+a_{6})(r-kn(n-1)). \] {\bf Case 1.} If $a_{0}+a_{2}+a_{3}+na_{4}+a_{5}+a_{6}\not=0$, then either $ L_{g}=k $ or $r=kn(n-1)$. If $r=kn(n-1)$, then from (\ref{eq-TPS}), we get \[ S=k(n-1)g. \] {\bf Case 2.} If $a_{0}+a_{2}+a_{3}+na_{4}+a_{5}+a_{6}=0$, then by (\ref {eq-TPS}), we get either $L_{g}=k$ or \begin{eqnarray*} -\varepsilon (a_{0}+a_{5}+a_{6})S(V,W) &=&\varepsilon (a_{4}r-k(n-1)(a_{0}+na_{4}+a_{5}+a_{6}))g(V,W) \\ &&+(a_{2}+a_{3})(r-kn(n-1))\eta (V)\eta (W)). \end{eqnarray*} This proves the result. $\blacksquare$ \begin{cor} An $n$-dimensional ${\cal T}_{\!a}$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold is of the form $R\cdot {\cal T}_{\!a}=kQ(g,{\cal T }_{\!a})$. \end{cor} In view of Theorem~\ref{th-TPS}, we have the following \begin{cor} \label{cor-pseudo-1} Let $M$ be an $n$-dimensional ${\cal T}_{\!a}$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold such that \[ {\cal T}_{\!a}\in \left\{ R,{\cal V},{\cal P},{\cal M},{\cal W}_{0},{\cal W} _{0}^{\ast },{\cal W}_{1},{\cal W}_{1}^{\ast },{\cal W}_{3},\ldots ,{\cal W} _{8}\right\} \] Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} {\rm \cite{Ozgur-06} }Let $M$ be an $n$-dimensional, $n\geq 3$, Kenmotsu manifold. If $M$ is pseudosymmetric then either it is locally isometric to the hyperbolic space $H^{n}(-1)$ or $L_{g}=-1$ holds on $M$. \end{cor} \begin{cor} {\rm \cite{Ozgur-06} }Every Kenmotsu manifold $M$, $n\geq 3$, is a pseudosymmetric manifold of the form $R\cdot R=-Q(g,R)$. \end{cor} \begin{cor} Let $M$ be an $n$-dimensional quasi-conformal pseudosymmetric $(N(k),\xi )$ -semi-Riemannian manifold such that $a_{0}-a_{1}\not=0$\ and $ a_{0}+(n-2)a_{1}\not=0$. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional pseudo-projective pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold such that $a_{0}\not=0$ and $ a_{0}-a_{1}\not=0$. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional Weyl pseudosymmetric $\left( N(k),\xi \right) $ -semi-Riemannian manifold. Then we have the following table\/{\rm :}~\ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $S=\left( \dfrac{r}{n-1}-k\right) g+\left( nk- \dfrac{r}{n-1}\right) \eta \otimes \eta $ \\ \hline Sasakian & $1$ & $S=\left( \dfrac{r}{n-1}-1\right) g+\left( n-\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline Kenmotsu {\rm \cite{Ozgur-06}} & $-\,1$ & $S=\left( \dfrac{r}{n-1}+1\right) g-\left( n+\dfrac{r}{n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $S=\left( \dfrac{r}{n-1} -\varepsilon \right) g+\varepsilon \left( n\varepsilon -\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline para-Sasakian {\rm \cite{Ozgur-2005}} & $-\,1$ & $S=\left( \dfrac{r}{n-1} +1\right) g-\left( n+\dfrac{r}{n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $S=\left( \dfrac{r}{n-1} +\varepsilon \right) g-\varepsilon \left( n\varepsilon +\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional conharmonic pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :} ~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $S=\left( \dfrac{r}{n-1}-k\right) g+\left( nk- \dfrac{r}{n-1}\right) \eta \otimes \eta $ \\ \hline Sasakian & $1$ & $S=\left( \dfrac{r}{n-1}-1\right) g+\left( n-\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline Kenmotsu & $-\,1$ & $S=\left( \dfrac{r}{n-1}+1\right) g-\left( n+\dfrac{r}{ n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $S=\left( \dfrac{r}{n-1} -\varepsilon \right) g+\varepsilon \left( n\varepsilon -\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline para-Sasakian & $-\,1$ & $S=\left( \dfrac{r}{n-1}+1\right) g-\left( n+\dfrac{ r}{n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $S=\left( \dfrac{r}{n-1} +\varepsilon \right) g-\varepsilon \left( n\varepsilon +\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional ${\cal W}_{2}$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $\dfrac{r}{n}g$ \\ \hline Sasakian & $1$ & $\dfrac{r}{n}g$ \\ \hline Kenmotsu & $-\,1$ & $\dfrac{r}{n}g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\dfrac{r}{n}g$ \\ \hline para-Sasakian & $-\,1$ & $\dfrac{r}{n}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $\dfrac{r}{n}g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional ${\cal W}_{9}$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{g}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $S=\left( \dfrac{r}{n-1}-k\right) g+\left( nk- \dfrac{r}{n-1}\right) \eta \otimes \eta $ \\ \hline Sasakian & $1$ & $S=\left( \dfrac{r}{n-1}-1\right) g+\left( n-\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline Kenmotsu & $-\,1$ & $S=\left( \dfrac{r}{n-1}+1\right) g-\left( n+\dfrac{r}{ n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $S=\left( \dfrac{r}{n-1} -\varepsilon \right) g+\varepsilon \left( n\varepsilon -\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline para-Sasakian & $-\,1$ & $S=\left( \dfrac{r}{n-1}+1\right) g-\left( n+\dfrac{ r}{n-1}\right) \eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $S=\left( \dfrac{r}{n-1} +\varepsilon \right) g-\varepsilon \left( n\varepsilon +\dfrac{r}{n-1} \right) \eta \otimes \eta $ \\ \hline \end{tabular} \ \] \end{cor} \section{$({\cal T}_{\!a},{\cal T}_{\!b},S^{\ell })$-pseudosymmetry\label {sect-TTSP}} \begin{defn-new} A semi-Riemannian manifold is said to be $({\cal T}_{\!a},{\cal T} _{\!b},S^{\ell })$-pseudosymmetric if it satisfies \begin{equation} {\cal T}_{\!a}\cdot {\cal T}_{\!b}=L_{S^{\ell }}Q(S^{\ell },{\cal T}_{\!b}), \label{eq-T.T=LSl} \end{equation} where $L_{S^{\ell }}$ is some smooth function on $M$. In particular, it is said to be $(R,{\cal T}_{\!a},S^{\ell })$-pseudosymmetric if \begin{equation} R\cdot {\cal T}_{\!a}=L_{S^{\ell }}Q(S^{\ell },{\cal T}_{\!a}) \label{eq-R.T=LSl} \end{equation} holds on the set ${\cal U}_{S^{\ell }}=\left\{ x\in M:Q(S^{\ell },{\cal T} _{\!a})\not=0\right\} $, where $L_{S^{\ell }}$ is some smooth function on $ {\cal U}_{S^{\ell }}$. \end{defn-new} For $\ell =1$, we can give the following definition. \begin{defn-new} A semi-Riemannian manifold is called $({\cal T}_{\!a},{\cal T}_{\!b},S)$ -pseudosymmetric if it satisfies \begin{equation} {\cal T}_{\!a}\cdot {\cal T}_{\!b}=L_{S}Q(S,{\cal T}_{\!b}), \label{eq-T.T=LS} \end{equation} where $L_{S}$ is some smooth function on $M$. In particular, it is said to be $(R,{\cal T}_{\!a},S)$-pseudosymmetric if \begin{equation} R\cdot {\cal T}_{\!a}=L_{S}Q(S,{\cal T}_{\!a}) \label{eq-R.T=LS} \end{equation} holds on the set ${\cal U}_{S}=\left\{ x\in M:Q(S,{\cal T} _{\!a})\not=0\right\} $, where $L_{S}$ is some smooth function on ${\cal U} _{S}$. \end{defn-new} \begin{rem-new} A semi-Riemannian manifold is said to be $(R,R,S)$-pseudosymmetric or in short, Ricci-generalized pseudosymmetric if \[ R\cdot R=L_{S}Q(S,R) \] holds on the set ${\cal U}_{S}=\left\{ x\in M:Q(S,R)\not=0\right\} $, where $ L_{S}$ is some smooth function on ${\cal U}_{S}$. It is known \cite {Deszcz-90} that every $3$-dimensional semi-Riemannian manifold is Ricci-generalized pseudosymmetric along with $L_{S}=1$, that is, $R\cdot R=Q(S,R)$. \end{rem-new} \begin{th} \label{th-T-T-111} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T} _{\!b},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&\varepsilon b_{0}(ka_{0}+\varepsilon k(n-1)a_{4}+a_{7}r)R(U,V,W,X)+\varepsilon a_{1}b_{0}S(X,R(U,V)W) \nonumber \\ &&-2k(n-1)a_{3}(kb_{0}+k(n-1)b_{4}+b_{7}r)\eta (X)\eta (U)g(V,W) \nonumber \\ &&-2k(n-1)a_{3}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\eta (X)\eta (V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{4}S^{2}(X,U)g(V,W)+\varepsilon a_{1}b_{5}S^{2}(X,V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{6}S^{2}(X,W)g(U,V)-a_{5}(b_{1}+b_{3})S^{2}(X,V)\eta (U)\eta (W) \nonumber \\ &&-a_{5}(b_{1}+b_{2})S^{2}(X,W)\eta (U)\eta (V)-a_{5}(b_{2}+b_{3})S^{2}(X,U)\eta (V)\eta (W) \nonumber \\ &&-2a_{6}b_{1}S^{2}(V,W)\eta (X)\eta (U)-2a_{6}b_{2}S^{2}(U,W)\eta (X)\eta (V) \nonumber \\ &&-2a_{6}b_{3}S^{2}(U,V)\eta (X)\eta (W)-2k^{2}(n-1)a_{3}b_{6}g(U,V)\eta (X)\eta (W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{1}+a_{6}(kb_{0}+k(n-1)b_{4}+b_{7}r)\right) \eta (X)\eta (U)S(V,W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{2}+a_{6}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\right) \eta (X)\eta (V)S(U,W) \nonumber \\ &&-2k(n-1)(a_{3}b_{3}+a_{6}b_{6})S(U,V)\eta (X)\eta (W) \nonumber \\ &&+\varepsilon \left( b_{4}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(kb_{0}+k(n-1)b_{4})\right) S(X,U)g(V,W) \nonumber \\ &&+\varepsilon \left( b_{5}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(-kb_{0}+k(n-1)b_{5})\right) S(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r)S(X,W)g(U,V) \nonumber \\ &&-\varepsilon (kb_{0}+k(n-1)b_{4})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,U)g(V,W) \nonumber \\ &&-\varepsilon (-kb_{0}+k(n-1)b_{5})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(U,W)g(X,V) \nonumber \\ &&-\varepsilon k(n-1)b_{6}(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,W)g(U,V) \nonumber \\ &&-k(n-1)\left( (b_{2}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)\right) g(X,U)\eta (V)\eta (W) \nonumber \\ &&-k(n-1)\left( (b_{1}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) g(X,V)\eta (U)\eta (W) \nonumber \\ &&-\left( (b_{1}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) S(X,V)\eta (U)\eta (W) \nonumber \\ &&-\left( (b_{2}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{5}+b_{6})+b_{7}r)\right) S(X,U)\eta (V)\eta (W) \nonumber \\ &&-\left( (b_{1}+b_{2})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +k(n-1)(b_{4}+b_{5})(a_{1}+a_{5})\right) S(X,W)\eta (U)\eta (V) \nonumber \\ &&-k(n-1)\left( k(n-1)(b_{4}+b_{5})(a_{2}+a_{4})\right. \nonumber \\ &&\left. +(b_{1}+b_{2})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right) g(X,W)\eta (U)\eta (V) \nonumber \\ &=&L_{S^{\ell }}(\varepsilon b_{0}S^{\ell }(R(U,V)W,X)+\varepsilon b_{4}S^{\ell +1}(X,U)g(V,W)+\varepsilon b_{5}S^{\ell +1}(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}S^{\ell +1}(X,W)g(U,V)-\varepsilon k(n-1)b_{6}S^{\ell }(X,W)g(U,V) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)g(X,U)\eta (V)\eta (W) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)g(X,V)\eta (U)\eta (W) \nonumber \\ &&-(-kb_{0}+k(n-1)(b_{2}+b_{3}+b_{5}+b_{6})-b_{7}r)S^{\ell }(X,U)\eta (V)\eta (W) \nonumber \\ &&-(kb_{0}+k(n-1)(b_{1}+b_{3}+b_{4}+b_{6})+b_{7}r)S^{\ell }(X,V)\eta (U)\eta (W) \nonumber \\ &&-k(n-1)(b_{1}+b_{2}+b_{4}+b_{5})S(X,W)\eta (U)\eta (V) \nonumber \\ &&+k^{\ell +1}(n-1)^{\ell +1}(b_{4}+b_{5})g(X,W)\eta (U)\eta (V) \nonumber \\ &&-\varepsilon (-kb_{0}+k(n-1)b_{5})S^{\ell }(X,V)g(U,W) \nonumber \\ &&-\varepsilon (kb_{0}+k(n-1)b_{4})S^{\ell }(X,U)g(V,W) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(b_{1}+b_{3})S(X,V)\eta (U)\eta (W) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(b_{1}+b_{2})S(X,W)\eta (U)\eta (V) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(b_{2}+b_{3})S(X,U)\eta (V)\eta (W)). \label{eq-T-T-2-i-ii} \end{eqnarray} \end{th} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T }_{\!b},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{equation} {\cal T}_{\!a}(Z,X)\cdot {\cal T}_{\!b}(U,V)W=L_{S^{\ell }}Q(S^{\ell },{\cal T}_{\!b})(U,V,W;Z,X). \label{eq-txxi-1-ii} \end{equation} Taking $Z=\xi $ in (\ref{eq-txxi-1-ii}), we get \[ {\cal T}_{\!a}(\xi ,X)\cdot {\cal T}_{\!b}(U,V)W=L_{S^{\ell }}Q(S^{\ell }, {\cal T}_{\!b})(U,V,W;\xi ,X), \] which gives \begin{eqnarray*} &&{\cal T}_{\!a}(\xi ,X){\cal T}_{\!b}(U,V)W-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V)W \\ &&\quad -\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V)W-{\cal T}_{\!b}(U,V) {\cal T}_{\!a}(\xi ,X)W \\ &=&L_{S^{\ell }}((\xi \wedge _{S^{\ell }}X){\cal T}_{\!b}(U,V)W-{\cal T} _{\!b}((\xi \wedge _{S^{\ell }}X)U,V)W \\ &&-{\cal T}_{\!b}(U,(\xi \wedge _{S^{\ell }}X)V)W-{\cal T}_{\!b}(U,V)(\xi \wedge _{S^{\ell }}X)W), \end{eqnarray*} that is, \begin{eqnarray} &&{\cal T}_{\!a}(\xi ,X){\cal T}_{\!b}(U,V)W-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V)W \nonumber \\ &&\quad -\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V)W-{\cal T}_{\!b}(U,V) {\cal T}_{\!a}(\xi ,X)W \nonumber \\ &=&L_{S^{\ell }}(S^{\ell }(X,{\cal T}_{\!b}(U,V)W)\xi -S^{\ell }(\xi ,{\cal T }_{\!b}(U,V)W)X \nonumber \\ &&-S^{\ell }(X,U){\cal T}_{\!b}(\xi ,V)W+S^{\ell }(\xi ,U){\cal T} _{\!b}(X,V)W \nonumber \\ &&-S^{\ell }(X,V){\cal T}_{\!b}(U,\xi )W+S^{\ell }(\xi ,V){\cal T} _{\!b}(U,X)W \nonumber \\ &&-S^{\ell }(X,W){\cal T}_{\!b}(U,V)\xi +S^{\ell }(\xi ,W){\cal T} _{\!b}(U,V)X). \label{eq-T-T-i-ii} \end{eqnarray} Taking the inner product of (\ref{eq-T-T-i-ii}) with $\xi $, we get \begin{eqnarray} &&{\cal T}_{\!a}(\xi ,X,{\cal T}_{\!b}(U,V)W,\xi )-{\cal T}_{\!b}({\cal T} _{\!a}(\xi ,X)U,V,W,\xi ) \nonumber \\ &&\quad -\ {\cal T}_{\!b}(U,{\cal T}_{\!a}(\xi ,X)V,W,\xi )-{\cal T} _{\!b}(U,V,{\cal T}_{\!a}(\xi ,X)W,\xi ) \nonumber \\ &=&L_{S^{\ell }}(\varepsilon S^{\ell }(X,{\cal T}_{\!b}(U,V)W)-\varepsilon \eta (X)S^{\ell }(\xi ,{\cal T}_{\!b}(U,V)W) \nonumber \\ &&-S^{\ell }(X,U){\cal T}_{\!b}(\xi ,V,W,\xi )+S^{\ell }(\xi ,U){\cal T} _{\!b}(X,V,W,\xi ) \nonumber \\ &&-S^{\ell }(X,V){\cal T}_{\!b}(U,\xi ,W,\xi )+S^{\ell }(\xi ,V){\cal T} _{\!b}(U,X,W,\xi ) \nonumber \\ &&-S^{\ell }(X,W){\cal T}_{\!b}(U,V,\xi ,\xi )+S^{\ell }(\xi ,W){\cal T} _{\!b}(U,V,X,\xi )). \label{eq-T-T-1-i-ii} \end{eqnarray} By using (\ref{eq-X-Y-xi}),\ldots ,(\ref{eq-X-xi-xi}) in (\ref{eq-T-T-1-i-ii} ), we get (\ref{eq-T-T-2-i-ii}). $\blacksquare $ \begin{cor} \label{th-T-T-11 copy(1)} Let $M$ be an $n$-dimensional $({\cal T}_{\!a}, {\cal T}_{\!b},S)$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&\varepsilon b_{0}(ka_{0}+\varepsilon k(n-1)a_{4}+a_{7}r)R(U,V,W,X)+\varepsilon a_{1}b_{0}S(X,R(U,V)W) \nonumber \\ &&-2k(n-1)a_{3}(kb_{0}+k(n-1)b_{4}+b_{7}r)\eta (X)\eta (U)g(V,W) \nonumber \\ &&-2k(n-1)a_{3}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\eta (X)\eta (V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{4}S^{2}(X,U)g(V,W)+\varepsilon a_{1}b_{5}S^{2}(X,V)g(U,W) \nonumber \\ &&+\varepsilon a_{1}b_{6}S^{2}(X,W)g(U,V)-a_{5}(b_{1}+b_{3})S^{2}(X,V)\eta (U)\eta (W) \nonumber \\ &&-a_{5}(b_{1}+b_{2})S^{2}(X,W)\eta (U)\eta (V)-a_{5}(b_{2}+b_{3})S^{2}(X,U)\eta (V)\eta (W) \nonumber \\ &&-2a_{6}b_{1}S^{2}(V,W)\eta (X)\eta (U)-2a_{6}b_{2}S^{2}(U,W)\eta (X)\eta (V) \nonumber \\ &&-2a_{6}b_{3}S^{2}(U,V)\eta (X)\eta (W)-2k^{2}(n-1)a_{3}b_{6}g(U,V)\eta (X)\eta (W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{1}+a_{6}(kb_{0}+k(n-1)b_{4}+b_{7}r)\right) \eta (X)\eta (U)S(V,W) \nonumber \\ &&-2\left( k(n-1)a_{3}b_{2}+a_{6}(-kb_{0}+k(n-1)b_{5}-b_{7}r)\right) \eta (X)\eta (V)S(U,W) \nonumber \\ &&-2k(n-1)(a_{3}b_{3}+a_{6}b_{6})S(U,V)\eta (X)\eta (W) \nonumber \\ &&+\varepsilon \left( b_{4}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(kb_{0}+k(n-1)b_{4})\right) S(X,U)g(V,W) \nonumber \\ &&+\varepsilon \left( b_{5}(ka_{0}+k(n-1)a_{4}+a_{7}r)-a_{1}(-kb_{0}+k(n-1)b_{5})\right) S(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r)S(X,W)g(U,V) \nonumber \\ &&-\varepsilon (kb_{0}+k(n-1)b_{4})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,U)g(V,W) \nonumber \\ &&-\varepsilon (-kb_{0}+k(n-1)b_{5})(ka_{0}+k(n-1)a_{4}+a_{7}r)g(U,W)g(X,V) \nonumber \\ &&-\varepsilon k(n-1)b_{6}(ka_{0}+k(n-1)a_{4}+a_{7}r)g(X,W)g(U,V) \nonumber \\ &&-k(n-1)\left( (b_{2}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)\right) g(X,U)\eta (V)\eta (W) \nonumber \\ &&-k(n-1)\left( (b_{1}+b_{3})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right. \nonumber \\ &&\left. +(a_{2}+a_{4})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) g(X,V)\eta (U)\eta (W) \nonumber \\ &&-\left( (b_{1}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)\right) S(X,V)\eta (U)\eta (W) \nonumber \\ &&-\left( (b_{2}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{5}+b_{6})+b_{7}r)\right) S(X,U)\eta (V)\eta (W) \nonumber \\ &&-\left( (b_{1}+b_{2})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r)\right. \nonumber \\ &&\left. +k(n-1)(b_{4}+b_{5})(a_{1}+a_{5})\right) S(X,W)\eta (U)\eta (V) \nonumber \\ &&-k(n-1)\left( k(n-1)(b_{4}+b_{5})(a_{2}+a_{4})\right. \nonumber \\ &&\left. +(b_{1}+b_{2})(ka_{0}+k(n-1)a_{4}+a_{7}r)\right) g(X,W)\eta (U)\eta (V) \nonumber \\ &=&\,L_{S}(\varepsilon b_{0}S(R(U,V)W,X)+\varepsilon b_{4}S^{2}(X,U)g(V,W)+\varepsilon b_{5}S^{2}(X,V)g(U,W) \nonumber \\ &&+\varepsilon b_{6}S^{2}(X,W)g(U,V)-\varepsilon k(n-1)b_{6}S(X,W)g(U,V) \nonumber \\ &&+k(n-1)(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)g(X,U)\eta (V)\eta (W) \nonumber \\ &&+k(n-1)(kb_{0}+k(n-1)(b_{4}+b_{6})b_{7}r)g(X,V)\eta (U)\eta (W) \nonumber \\ &&+k^{2}(n-1)^{2}(b_{4}+b_{5})g(X,W)\eta (U)\eta (V) \nonumber \\ &&-(-kb_{0}+k(n-1)(b_{5}+b_{6})-b_{7}r)S(X,U)\eta (V)\eta (W) \nonumber \\ &&-(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)S(X,V)\eta (U)\eta (W) \nonumber \\ &&-k(n-1)(b_{4}+b_{5})S(X,W)\eta (U)\eta (V)-\varepsilon (-kb_{0}+k(n-1)b_{5})S(X,V)g(U,W) \nonumber \\ &&-\varepsilon (kb_{0}+k(n-1)b_{4})S(X,U)g(V,W)). \label{eq-T-T-2-i-i} \end{eqnarray} \end{cor} \begin{th} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T}_{\!b},S^{\ell })$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&(a_{1}b_{5}-a_{5}b_{1}-a_{5}b_{3})S^{2}(X,V)\eta (W) \\ &&+(a_{1}b_{6}-a_{5}b_{1}-a_{5}b_{2})S^{2}(X,W)\eta (V) \\ &&-2a_{6}b_{1}S^{2}(V,W)\eta (X) \\ &&+(b_{5}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r) \\ &&-(b_{1}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r) \\ &&-(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r))S(X,V)\eta (W) \\ &&+(b_{6}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r) \\ &&-(b_{1}+b_{2})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r) \\ &&-k(n-1)(a_{1}+a_{5})(b_{4}+b_{5}))S(X,W)\eta (V) \\ &&-2(k(n-1)a_{3}b_{1}+a_{6}(kb_{0}+k(n-1)b_{4}+b_{7}r))S(V,W)\eta (X) \\ &&-k(n-1)((b_{1}+b_{3}+b_{5})(ka_{0}+k(n-1)a_{4}+a_{7}r) \\ &&+(a_{2}+a_{4})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r))g(X,V)\eta (W) \\ &&-k(n-1)((b_{1}+b_{2}+b_{6})(ka_{0}+k(n-1)a_{4}+a_{7}r) \\ &&+k(n-1)(a_{2}+a_{4})(b_{4}+b_{5}))g(X,W)\eta (V) \\ &&-2k(n-1)a_{3}(kb_{0}+k(n-1)b_{4}+b_{7}r)g(V,W)\eta (X) \\ &&-\varepsilon k(n-1)((kb_{0}+b_{7}r)(a_{1}+a_{2}+a_{4}+a_{5}) \\ &&+k(n-1)(b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}))\eta (X)\eta (V)\eta (W) \\ &&=\, L_{S^{\ell }}(-(kb_{0}+k(n-1)(b_{1}+b_{3}+b_{4}+b_{5}+b_{6})+b_{7}r)S^{\ell }(X,V)\eta (W) \\ &&+k^{\ell }(n-1)^{\ell }(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)g(X,V)\eta (W) \\ &&+k^{\ell +1}(n-1)^{\ell +1}(b_{4}+b_{5})g(X,W)\eta (V) \\ &&-k(n-1)(b_{1}+b_{2}+b_{4}+b_{5}+b_{6})S^{\ell }(X,W)\eta (V) \\ &&+b_{5}S^{\ell +1}(X,V)\eta (W)+b_{6}S^{\ell +1}(X,W)\eta (V)) \\ &&+k^{\ell }(n-1)^{\ell }(b_{1}+b_{3})S(X,V)\eta (W) \\ &&+k^{\ell }(n-1)^{\ell }(b_{1}+b_{2})S(X,W)\eta (V)). \end{eqnarray*} \end{th} \begin{cor} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},{\cal T}_{\!b},S)$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&(a_{1}b_{5}-a_{5}b_{1}-a_{5}b_{3})S^{2}(X,V)\eta (W) \\ &&+(a_{1}b_{6}-a_{5}b_{1}-a_{5}b_{2})S^{2}(X,W)\eta (V) - 2a_{6}b_{1}S^{2}(V,W)\eta (X) \\ &&+(b_{5}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r) \\ &&-(b_{1}+b_{3})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r) \\ &&-(a_{1}+a_{5})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r))S(X,V)\eta (W) \\ &&+(b_{6}(ka_{0}+k(n-1)(a_{4}-a_{1})+a_{7}r) \\ &&-(b_{1}+b_{2})(-ka_{0}+k(n-1)(a_{1}+a_{2})-a_{7}r) \\ &&-k(n-1)(a_{1}+a_{5})(b_{4}+b_{5}))S(X,W)\eta (V) \\ &&-2(k(n-1)a_{3}b_{1}+a_{6}(kb_{0}+k(n-1)b_{4}+b_{7}r))S(V,W)\eta (X) \\ &&-k(n-1)((b_{1}+b_{3}+b_{5})(ka_{0}+k(n-1)a_{4}+a_{7}r) \\ &&+(a_{2}+a_{4})(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r))g(X,V)\eta (W) \\ &&-k(n-1)((b_{1}+b_{2}+b_{6})(ka_{0}+k(n-1)a_{4}+a_{7}r) \\ &&+k(n-1)(a_{2}+a_{4})(b_{4}+b_{5}))g(X,W)\eta (V) \\ &&-2k(n-1)a_{3}(kb_{0}+k(n-1)b_{4}+b_{7}r)g(V,W)\eta (X) \\ &&-\varepsilon k(n-1)((kb_{0}+b_{7}r)(a_{1}+a_{2}+a_{4}+a_{5}) \\ &&+k(n-1)(b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}))\eta (X)\eta (V)\eta (W) \end{eqnarray*} \begin{eqnarray} &=&L_{S}(-(kb_{0}+k(n-1)(b_{4}+b_{5}+b_{6})+b_{7}r)S(X,V)\eta (W) \nonumber \\ &&+k(n-1)(kb_{0}+k(n-1)(b_{4}+b_{6})+b_{7}r)g(X,V)\eta (W) \nonumber \\ &&+k^{2}(n-1)^{2}(b_{4}+b_{5})g(X,W)\eta (V) \nonumber \\ &&-k(n-1)(b_{4}+b_{5}+b_{6})S(X,W)\eta (V) \nonumber \\ &&+b_{5}S^{2}(X,V)\eta (W)+b_{6}S^{2}(X,W)\eta (V)). \label{eq-T-T-2-i-i-1} \end{eqnarray} \end{cor} \begin{th} Let $M$ be an $n$-dimensional $(R,{\cal T}_{\!a},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&k(a_{1}+a_{3}+a_{5})\left( S(X,V)-\,k(n-1)g(X,V)\right) \eta (W) \nonumber \\ &+&k(a_{1}+a_{2}+a_{6})\left( S(X,W)-\,k(n-1)g(X,W)\right) \eta (V) \nonumber \\ &=&L_{S^{\ell }}(-(ka_{0}+k(n-1)(a_{1}+a_{3}+a_{4}+a_{5}+a_{6})+a_{7}r)S^{\ell }(X,V)\eta (W) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(ka_{0}+k(n-1)(a_{4}+a_{6})+a_{7}r)g(X,V)\eta (W) \nonumber \\ &&+k^{\ell +1}(n-1)^{\ell +1}(a_{4}+a_{5})g(X,W)\eta (V) \nonumber \\ &&-k(n-1)(a_{1}+a_{2}+a_{4}+a_{5}+a_{6})S^{\ell }(X,W)\eta (V) \nonumber \\ &&+a_{5}S^{\ell +1}(X,V)\eta (W)+a_{6}S^{\ell +1}(X,W)\eta (V) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(a_{1}+a_{3})S(X,V)\eta (W) \nonumber \\ &&+k^{\ell }(n-1)^{\ell }(a_{1}+a_{2})S(X,W)\eta (V)). \label{eq-TRGP-i} \end{eqnarray} \end{th} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal T}_{\!a},S)$-pseudosymmetric $ (N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray} &&k(a_{1}+a_{3}+a_{5})\left( S(X,V)-\,k(n-1)g(X,V)\right) \eta (W) \nonumber \\ &+&k(a_{1}+a_{2}+a_{6})\left( S(X,W)-\,k(n-1)g(X,W)\right) \eta (V) \nonumber \\ &=&L_{S}(-(ka_{0}+k(n-1)(a_{4}+a_{5}+a_{6})+a_{7}r)S(X,V)\eta (W) \nonumber \\ &&+k(n-1)(ka_{0}+k(n-1)(a_{4}+a_{6})+a_{7}r)g(X,V)\eta (W) \nonumber \\ &&+k^{2}(n-1)^{2}(a_{4}+a_{5})g(X,W)\eta (V) \nonumber \\ &&-k(n-1)(a_{4}+a_{5}+a_{6})S(X,W)\eta (V) \nonumber \\ &&+a_{5}S^{2}(X,V)\eta (W)+a_{6}S^{2}(X,W)\eta (V)). \label{eq-TRGP} \end{eqnarray} \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,R,S^{\ell })$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not semisymmetric, then \[ S^{\ell }=k^{\ell }(n-1)^{\ell }g \] and $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & ${\boldmath S}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $k^{\ell }(n-1)^{\ell }g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $(n-1)^{\ell }g$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(\varepsilon )^{\ell -1}(n-1)^{\ell }} $ & $(\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $(-\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline \end{tabular} \] \end{cor} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional $(R,R,S^{\ell })$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold, that is \begin{equation} R\cdot R=L_{S^{\ell }}Q(S^{\ell },R) \label{eq-TRGP-1-i} \end{equation} holds on $M$. By putting the value for $R$ in (\ref{eq-TRGP-i}), we get \begin{equation} -kL_{S^{\ell }}\left( S^{\ell }(X,V)-k^{\ell }(n-1)^{\ell }g(X,V)\right) \eta (W)=0. \label{eq-TRGP-2-i} \end{equation} Putting $W=\xi $ in (\ref{eq-TRGP-2-i}), we get \begin{equation} -kL_{S^{\ell }}\left( S^{\ell }(X,V)-k^{\ell }(n-1)^{\ell }g(X,V)\right) =0. \label{eq-TRGP-3-i} \end{equation} Since $M$ is not semisymmetric $L_{S^{\ell }}\not=0$. Therefore from (\ref {eq-TRGP-3-i}), we have \[ S^{\ell }(X,V)=k^{\ell }(n-1)^{\ell }g(X,V). \] So putting $S^{\ell }=k^{\ell }(n-1)^{\ell }g$ in (\ref{eq-TRGP-1-i}), we get \[ R\cdot R=k^{\ell }(n-1)^{\ell }L_{S^{\ell }}Q(g,R), \] which is the condition of pseudosymmetric manifold. By comparison with the result of pseudosymmetric manifold (Corollary \ref{cor-pseudo-1}), we get $ L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. This proves the result. $ \blacksquare $ \begin{cor} Let $M$ be an $n$-dimensional Ricci-generalized pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not semisymmetric, then $M$ is an Einstein manifold with scalar curvature $kn(n-1)$ and $L_{S}= \dfrac{1}{n-1}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu {\rm \cite{Ozgur-06}} & $\dfrac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal C}_{\ast },S^{\ell })$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not quasi-conformal semisymmetric, then \begin{eqnarray*} S^{\ell +1} &=&\left( \left( \frac{r}{n(n-1)}-k\right) \frac{a_{0}}{a_{1}} +\left( \frac{2r}{n}-k(n-1)\right) \right) \left( S^{\ell }-k^{\ell }(n-1)^{\ell }g\right) \\ &&+k^{\ell }(n-1)^{\ell }S. \end{eqnarray*} Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $\left( \left( \dfrac{r}{n(n-1)}-k\right) \dfrac{ a_{0}}{a_{1}}+\left( \dfrac{2r}{n}-k(n-1)\right) \right) \left( S^{\ell }-k^{\ell }(n-1)^{\ell }g\right) $ \\ & $+k^{\ell }(n-1)^{\ell }S$ \\ \hline Sasakian & $\left( \left( \dfrac{r}{n(n-1)}-1\right) \dfrac{a_{0}}{a_{1}} +\left( \dfrac{2r}{n}-1(n-1)\right) \right) \left( S^{\ell }-(n-1)^{\ell }g\right) $ \\ & $+(n-1)^{\ell }S$ \\ \hline Kenmotsu & $\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1}} +\left( \frac{2r}{n}+(n-1)\right) \right) \left( S^{\ell }-(-1)^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-1)^{\ell }(n-1)^{\ell }S$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \left( \dfrac{r}{n(n-1)}-\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\left( \frac{2r}{n}-\varepsilon (n-1)\right) \right) \left( S^{\ell }-(\varepsilon )^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(\varepsilon )^{\ell }(n-1)^{\ell }S$ \\ \hline para-Sasakian & $\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1} }+\left( \frac{2r}{n}+(n-1)\right) \right) \left( S^{\ell }-(-1)^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-1)^{\ell }(n-1)^{\ell }S$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( \left( \dfrac{r}{n(n-1)} +\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\left( \frac{2r}{n}+\varepsilon (n-1)\right) \right) \left( S^{\ell }-(-\varepsilon )^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-\varepsilon )^{\ell }(n-1)^{\ell }S$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal C}_{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not quasi-conformal-semisymmetric, then \begin{eqnarray*} S^{2} &=&\left( \left( \frac{r}{n(n-1)}-k\right) \frac{a_{0}}{a_{1}}+\frac{2r }{n}\right) S \\ &&-k(n-1)\left( \left( \frac{r}{n(n-1)}-k\right) \frac{a_{0}}{a_{1}}+\left( \frac{2r}{n}-k(n-1)\right) \right) g. \end{eqnarray*} Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $\left( \left( \dfrac{r}{n(n-1)}-k\right) \dfrac{ a_{0}}{a_{1}}+\dfrac{2r}{n}\right) S$ \\ & $-k(n-1)\left( \left( \dfrac{r}{n(n-1)}-k\right) \dfrac{a_{0}}{a_{1}} +\left( \dfrac{2r}{n}-k(n-1)\right) \right) g$ \\ \hline Sasakian & $\left( \left( \dfrac{r}{n(n-1)}-1\right) \dfrac{a_{0}}{a_{1}}+ \dfrac{2r}{n}\right) S$ \\ & $-(n-1)\left( \left( \dfrac{r}{n(n-1)}-1\right) \dfrac{a_{0}}{a_{1}} +\left( \dfrac{2r}{n}-(n-1)\right) \right) g$ \\ \hline Kenmotsu & $\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1}}+ \dfrac{2r}{n}\right) S$ \\ & $+(n-1)\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1}} +\left( \dfrac{2r}{n}+(n-1)\right) \right) g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \left( \dfrac{r}{n(n-1)}-\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\dfrac{2r}{n}\right) S$ \\ & $-\varepsilon (n-1)\left( \left( \dfrac{r}{n(n-1)}-\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\left( \dfrac{2r}{n}-\varepsilon (n-1)\right) \right) g$ \\ \hline para-Sasakian & $\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1} }+\dfrac{2r}{n}\right) S$ \\ & $+(n-1)\left( \left( \dfrac{r}{n(n-1)}+1\right) \dfrac{a_{0}}{a_{1}} +\left( \dfrac{2r}{n}+(n-1)\right) \right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( \left( \dfrac{r}{n(n-1)} +\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\dfrac{2r}{n}\right) S$ \\ & $+\varepsilon (n-1)\left( \left( \dfrac{r}{n(n-1)}+\varepsilon \right) \dfrac{a_{0}}{a_{1}}+\left( \dfrac{2r}{n}+\varepsilon (n-1)\right) \right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal C},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not Weyl-semisymmetric, then \begin{eqnarray*} S^{\ell +1} &=&\left( \frac{r}{n-1}-k\right) \left( S^{\ell }-k^{\ell }(n-1)^{\ell }g\right) +k^{\ell }(n-1)^{\ell }S. \end{eqnarray*} Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $\left( \dfrac{r}{n-1}-k\right) \left( S^{\ell }-k^{\ell }(n-1)^{\ell }g\right) $ \\ & $+k^{\ell }(n-1)^{\ell }S$ \\ \hline Sasakian & $\left( \dfrac{r}{n-1}-1\right) \left( S^{\ell }-(n-1)^{\ell }g\right) $ \\ & $+(n-1)^{\ell }S$ \\ \hline Kenmotsu & $\left( \dfrac{r}{n-1}+1\right) \left( S^{\ell }-(-1)^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-1)^{\ell }(n-1)^{\ell }S$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \dfrac{r}{n-1}-\varepsilon \right) \left( S^{\ell }-(\varepsilon )^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(\varepsilon )^{\ell }(n-1)^{\ell }S$ \\ \hline para-Sasakian & $\left( \dfrac{r}{n-1}+1\right) \left( S^{\ell }-(-1)^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-1)^{\ell }(n-1)^{\ell }S$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( \dfrac{r}{n-1}+\varepsilon \right) \left( S^{\ell }-(-\varepsilon )^{\ell }(n-1)^{\ell }g\right) $ \\ & $+(-\varepsilon )^{\ell }(n-1)^{\ell }S$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal C},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not Weyl-semisymmetric, then \begin{eqnarray*} S^{2} &=&\left( k(n-2)+\frac{r}{n-1}\right) S +k(n-1)\left( k-\frac{r}{n-1} \right) g. \end{eqnarray*} Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $\left( k(n-2)+\frac{r}{n-1}\right) S$ \\ & $+k(n-1)\left( k-\frac{r}{n-1}\right) g$ \\ \hline Sasakian & $\left( (n-2)+\frac{r}{n-1}\right) S$ \\ & $+(n-1)\left( 1-\frac{r}{n-1}\right) g$ \\ \hline Kenmotsu & $\left( -(n-2)+\frac{r}{n-1}\right) S$ \\ & $+(n-1)\left( 1+\frac{r}{n-1}\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \varepsilon (n-2)+\frac{r}{n-1}\right) S$ \\ & $+\varepsilon (n-1)\left( \varepsilon -\frac{r}{n-1}\right) g$ \\ \hline para-Sasakian & $\left( -(n-2)+\frac{r}{n-1}\right) S$ \\ & $+(n-1)\left( 1+\frac{r}{n-1}\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( -\varepsilon (n-2)+\frac{r}{n-1} \right) S$ \\ & $+\varepsilon (n-1)\left( \varepsilon +\frac{r}{n-1}\right) g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal L},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not conharmonic semisymmetric, then \[ S^{\ell +1}=-kS^{\ell }+k^{\ell }(n-1)^{\ell }S+k^{\ell +1}(n-1)^{\ell }g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $-kS^{\ell }+k^{\ell }(n-1)^{\ell }S+k^{\ell +1}(n-1)^{\ell }g$ \\ \hline Sasakian & $-S^{\ell }+(n-1)^{\ell }S+(n-1)^{\ell }g$ \\ \hline Kenmotsu & $S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S+(-1)^{\ell +1}(n-1)^{\ell }g $ \\ \hline $(\varepsilon )$-Sasakian & $-\varepsilon S^{\ell }+(\varepsilon )^{\ell }(n-1)^{\ell }S+(\varepsilon )^{\ell +1}(n-1)^{\ell }g$ \\ \hline para-Sasakian & $S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S+(-1)^{\ell +1}(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\varepsilon S^{\ell }+(-\varepsilon )^{\ell }(n-1)^{\ell }S+(-\varepsilon )^{\ell +1}(n-1)^{\ell }g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal L},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not conharmonic semisymmetric, then \[ S^{2}=k(n-2)S+k^{2}(n-1)g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $k(n-2)S+k^{2}(n-1)g$ \\ \hline Sasakian & $(n-2)S+(n-1)g$ \\ \hline Kenmotsu & $-(n-2)S+(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-2)S+(n-1)g$ \\ \hline para-Sasakian & $-(n-2)S+(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\varepsilon (n-2)S+(n-1)g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal V},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not concircular semisymmetric, then $M$ either satisfies \[ S^{\ell }=k^{\ell }(n-1)^{\ell }g \] or scalar curvature is $kn(n-1)$ and $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & {\bf Result} \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $S^{\ell }=k^{\ell }(n-1)^{\ell }g\quad {\rm or\quad }r=kn(n-1)$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $S^{\ell }=(n-1)^{\ell }g\quad {\rm or\quad }r=n(n-1)$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $S^{\ell }=(-1)^{\ell }(n-1)^{\ell }g\quad {\rm or\quad }r=-n(n-1)$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(\varepsilon )^{\ell -1}(n-1)^{\ell }} $ & $S^{\ell }=(\varepsilon )^{\ell }(n-1)^{\ell }g\quad {\rm or\quad } r=\varepsilon n(n-1)$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $S^{\ell }=(-1)^{\ell }(n-1)^{\ell }g\quad {\rm or\quad }r=-n(n-1)$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $S^{\ell }=(-\varepsilon )^{\ell }(n-1)^{\ell }g\quad {\rm or\quad }r=-\varepsilon n(n-1)$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal V},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not concircularly semisymmetric, then $M$ is either an Einstein manifold or scalar curvature is $kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\frac{1}{n-1}$ & $S=k(n-1)g\quad {\rm or\quad } r=kn(n-1)$ \\ \hline Sasakian & $\frac{1}{n-1}$ & $S=(n-1)g\quad {\rm or\quad }r=n(n-1)$ \\ \hline Kenmotsu {\rm \cite{Ozgur-06}} & $\frac{1}{n-1}$ & $S=-(n-1)g\quad {\rm or\quad }r=-n(n-1)$ \\ \hline $(\varepsilon )$-Sasakian & $\frac{1}{n-1}$ & $S=\varepsilon (n-1)g\quad {\rm or\quad }r=\varepsilon n(n-1)$ \\ \hline para-Sasakian & $\frac{1}{n-1}$ & $S=-(n-1)g\quad {\rm or\quad }r=-n(n-1)$ \\ \hline $(\varepsilon )$-para-Sasakian & $\frac{1}{n-1}$ & $S=-\,\varepsilon (n-1)g\quad {\rm or\quad }r=-\varepsilon n(n-1)$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal P}_{\ast },S^{\ell })$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold such that $a_{0}+(n-1)a_{1}\not=0$. If $M$ is not pseudo-projective semisymmetric, then \begin{eqnarray*} &&\left( \left( k-\frac{r}{n(n-1)}\right) a_{0}+\left( k(n-1)-\frac{r}{n} \right) a_{1}\right) S^{\ell } \\ &=&k^{\ell }(n-1)^{\ell }\left( \left( k-\frac{r}{n(n-1)}\right) a_{0}-\left( \frac{r}{n}\right) a_{1}\right) g \\ &&+k^{\ell }(n-1)^{\ell }a_{1}S. \end{eqnarray*} Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell }=}$ \\ \hline $N(k)$-contact metric & $\dfrac{k^{\ell }(n-1)^{\ell }a_{1}}{{\left( \left( k-\dfrac{r}{n(n-1)}\right) a_{0}+\left( k(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{k^{\ell }(n-1)^{\ell }\left( \left( k-\dfrac{r}{n(n-1)}\right) a_{0}-\left( \dfrac{r}{n}\right) a_{1}\right) }{{\left( \left( k-\dfrac{r}{ n(n-1)}\right) a_{0}+\left( k(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline Sasakian & $\dfrac{(n-1)^{\ell }a_{1}}{{\left( \left( 1-\dfrac{r}{n(n-1)} \right) a_{0}+\left( (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{(n-1)^{\ell }\left( \left( 1-\dfrac{r}{n(n-1)}\right) a_{0}-\left( \frac{r}{n}\right) a_{1}\right) }{{\left( \left( 1-\dfrac{r}{ n(n-1)}\right) a_{0}+\left( (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline Kenmotsu & $\dfrac{(-1)^{\ell }(n-1)^{\ell }a_{1}}{{\left( \left( -1-\dfrac{r }{n(n-1)}\right) a_{0}+\left( -(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{(-1)^{\ell }(n-1)^{\ell }\left( \left( -1-\frac{r}{n(n-1)}\right) a_{0}-\left( \dfrac{r}{n}\right) a_{1}\right) }{{\left( \left( -1-\dfrac{r}{ n(n-1)}\right) a_{0}+\left( -(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{(\varepsilon )^{\ell }(n-1)^{\ell }a_{1} }{{\left( \left( \varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}+\left( \varepsilon (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{(\varepsilon )^{\ell }(n-1)^{\ell }\left( \left( \varepsilon - \dfrac{r}{n(n-1)}\right) a_{0}-\left( \dfrac{r}{n}\right) a_{1}\right) }{{ \left( \left( \varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}+\left( \varepsilon (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline para-Sasakian & $\dfrac{(-1)^{\ell }(n-1)^{\ell }a_{1}}{{\left( \left( -1- \dfrac{r}{n(n-1)}\right) a_{0}+\left( -(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{(-1)^{\ell }(n-1)^{\ell }\left( \left( -1-\dfrac{r}{n(n-1)} \right) a_{0}-\left( \dfrac{r}{n}\right) a_{1}\right) }{{\left( \left( -1- \dfrac{r}{n(n-1)}\right) a_{0}+\left( -(n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{(-\varepsilon )^{\ell }(n-1)^{\ell }a_{1}}{{\left( \left( -\varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}+\left( -\varepsilon (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}S$ \\ & $+\dfrac{(-\varepsilon )^{\ell }(n-1)^{\ell }\left( \left( -\varepsilon - \dfrac{r}{n(n-1)}\right) a_{0}-\left( \dfrac{r}{n}\right) a_{1}\right) }{{ \left( \left( -\varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}+\left( -\varepsilon (n-1)-\dfrac{r}{n}\right) a_{1}\right) }}g$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal P}_{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold such that $ a_{0}+(n-1)a_{1}\not=0$. If $M$ is not pseudo-projective semisymmetric, then either $M$ is an Einstein manifold or $r=\dfrac{kn(n-1)a_{0}}{ a_{0}+(n-1)a_{1}}$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $S=k(n-1)g\quad {\rm or\quad }r= \dfrac{kn(n-1)a_{0}}{a_{0}+(n-1)a_{1}}$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $S=(n-1)g\quad {\rm or\quad }r=\dfrac{ n(n-1)a_{0}}{a_{0}+(n-1)a_{1}}$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $S=-(n-1)g\quad {\rm or\quad }r=-\,\dfrac{ n(n-1)a_{0}}{a_{0}+(n-1)a_{1}}$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $S=\varepsilon (n-1)g\quad {\rm or\quad }r=\dfrac{\varepsilon n(n-1)a_{0}}{a_{0}+(n-1)a_{1}}$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $S=-(n-1)g\quad {\rm or\quad }r=-\dfrac{ n(n-1)a_{0}}{a_{0}+(n-1)a_{1}}$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $S=-\,\varepsilon (n-1)g\quad {\rm or\quad }r=-\dfrac{\varepsilon n(n-1)a_{0}}{a_{0}+(n-1)a_{1} }$ \\ \hline \end{tabular} \ \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal P},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not projective semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k(n-1)g$ \\ \hline Sasakian & $(n-1)g$ \\ \hline Kenmotsu & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal P},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not projective semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\frac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\frac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu {\rm \cite{Ozgur-06}} & $\frac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\frac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\frac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\frac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal M},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal M}$ -semisymmetric, then \[ S^{\ell +1}=k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S-k^{\ell +1}(n-1)^{\ell +1}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S-k^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline Sasakian & $(n-1)S^{\ell }+(n-1)^{\ell }S-(n-1)^{\ell +1}g$ \\ \hline Kenmotsu & $-(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S-(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)S^{\ell }+(\varepsilon )^{\ell }(n-1)^{\ell }S-(\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline para-Sasakian & $-(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S-(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\varepsilon (n-1)S^{\ell }+(-\varepsilon )^{\ell }(n-1)^{\ell }S-(-\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal M},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal M}$ -semisymmetric, then \[ S^{2}=2(n-1)kS-k^{2}(n-1)^{2}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $2(n-1)kS-k^{2}(n-1)^{2}g$ \\ \hline Sasakian & $2(n-1)S-(n-1)^{2}g$ \\ \hline Kenmotsu & $-2(n-1)S-(n-1)^{2}g$ \\ \hline $(\varepsilon )$-Sasakian & $2(n-1)\varepsilon S-(n-1)^{2}g$ \\ \hline para-Sasakian & $-2(n-1)S-(n-1)^{2}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-2(n-1)\varepsilon S-(n-1)^{2}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{0},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{0}$-semisymmetric, then \[ S^{\ell +1}=k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S-k^{\ell +1}(n-1)^{\ell +1}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S-k^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline Sasakian & $(n-1)S^{\ell }+(n-1)^{\ell }S-(n-1)^{\ell +1}g$ \\ \hline Kenmotsu & $-(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S-(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)S^{\ell }+(\varepsilon )^{\ell }(n-1)^{\ell }S-(\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline para-Sasakian & $-(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S-(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\varepsilon (n-1)S^{\ell }+(-\varepsilon )^{\ell }(n-1)^{\ell }S-(-\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{0},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{0}$ -semisymmetric, then \[ S^{2}=2(n-1)kS-k^{2}(n-1)^{2}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $2(n-1)kS-k^{2}(n-1)^{2}g$ \\ \hline Sasakian & $2(n-1)S-(n-1)^{2}g$ \\ \hline Kenmotsu & $-2(n-1)S-(n-1)^{2}g$ \\ \hline $(\varepsilon )$-Sasakian & $2(n-1)\varepsilon S-(n-1)^{2}g$ \\ \hline para-Sasakian & $-2(n-1)S-(n-1)^{2}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-2(n-1)\varepsilon S-(n-1)^{2}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{0}^{\ast },S^{\ell })$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{0}^{\ast }$-semisymmetric, then \[ S^{\ell +1}=-k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S+k^{\ell +1}(n-1)^{\ell +1}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $-k(n-1)S^{\ell }+k^{\ell }(n-1)^{\ell }S+k^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline Sasakian & $-(n-1)S^{\ell }+(n-1)^{\ell }S+(n-1)^{\ell +1}g$ \\ \hline Kenmotsu & $(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S+(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-Sasakian & $-\varepsilon (n-1)S^{\ell }+(\varepsilon )^{\ell }(n-1)^{\ell }S+(\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline para-Sasakian & $(n-1)S^{\ell }+(-1)^{\ell }(n-1)^{\ell }S+(-1)^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\varepsilon (n-1)S^{\ell }+(-\varepsilon )^{\ell }(n-1)^{\ell }S+(-\varepsilon )^{\ell +1}(n-1)^{\ell +1}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{0}^{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{0}^{\ast }$-semisymmetric, then \[ S^{2}=k^{2}(n-1)^{2}g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S}^{2}{=}$ \\ \hline $N(k)$-contact metric & $k^{2}(n-1)^{2}g$ \\ \hline Sasakian & $(n-1)^{2}g$ \\ \hline Kenmotsu & $(n-1)^{2}g$ \\ \hline $(\varepsilon )$-Sasakian & $(n-1)^{2}g$ \\ \hline para-Sasakian & $(n-1)^{2}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $(n-1)^{2}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{1},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{1}$-semisymmetric, then \[ 2S^{\ell }=k^{\ell -1}(n-1)^{\ell -1}S+k^{\ell }(n-1)^{\ell }g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath2S^{\ell }=}$ \\ \hline $N(k)$-contact metric & $k^{\ell -1}(n-1)^{\ell -1}S+k^{\ell }(n-1)^{\ell }g$ \\ \hline Sasakian & $(n-1)^{\ell -1}S+(n-1)^{\ell }g$ \\ \hline Kenmotsu & $(-1)^{\ell -1}(n-1)^{\ell -1}S+(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-Sasakian & $(\varepsilon )^{\ell -1}(n-1)^{\ell -1}S+(\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline para-Sasakian & $(-1)^{\ell -1}(n-1)^{\ell -1}S+(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}S+(-\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{1},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{1}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{1}^{\ast },S^{\ell })$ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{1}^{\ast }$-semisymmetric, then $M$ is an Einstein manifold with scalar curvature $kn(n-1)$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k(n-1)g$ \\ \hline Sasakian & $(n-1)g$ \\ \hline Kenmotsu & $-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{1}^{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{1}^{\ast }$-semisymmetric, then $M$ is an Einstein manifold with scalar curvature $kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{2},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{2}$-semisymmetric, then \[ L_{S^{\ell }}S^{\ell +1}=k(n-1)L_{S^{\ell }}S^{\ell }+kS-k^{2}(n-1)g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath L_{S^{\ell }}S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $k(n-1)L_{S^{\ell }}S^{\ell }+kS-k^{2}(n-1)g$ \\ \hline Sasakian & $(n-1)L_{S^{\ell }}S^{\ell }+S-(n-1)g$ \\ \hline Kenmotsu & $-(n-1)L_{S^{\ell }}S^{\ell }-S-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)L_{S^{\ell }}S^{\ell }+\varepsilon S-(n-1)g$ \\ \hline para-Sasakian & $-(n-1)L_{S^{\ell }}S^{\ell }-S-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\varepsilon (n-1)L_{S^{\ell }}S^{\ell }-\varepsilon S-(n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{2},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{2}$ -semisymmetric, then \[ L_{S}S^{2}=k\left( (n-1)L_{S}+1\right) S-k^{2}(n-1)g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath L_{S}S^{2}=}$ \\ \hline $N(k)$-contact metric & $k\left( (n-1)L_{S}+1\right) S-k^{2}(n-1)g$ \\ \hline Sasakian & $\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline Kenmotsu & $-\,\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon \left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline para-Sasakian & $-\,\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon \left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{3},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{3}$-semisymmetric, then \[ S^{\ell }=k^{\ell }(n-1)^{\ell }g \] and $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & ${\boldmath S}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $k^{\ell }(n-1)^{\ell }g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $(n-1)^{\ell }g$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(\varepsilon )^{\ell -1}(n-1)^{\ell }} $ & $(\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $(-\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{3},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{3}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{4},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{4}$-semisymmetric, then \[ L_{S^{\ell }}S^{\ell +1}=k(n-1)L_{S^{\ell }}S^{\ell }+kS-k^{2}(n-1)g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath L_{S^{\ell }}S^{\ell +1}=}$ \\ \hline $N(k)$-contact metric & $k(n-1)L_{S^{\ell }}S^{\ell }+kS-k^{2}(n-1)g$ \\ \hline Sasakian & $(n-1)L_{S^{\ell }}S^{\ell }+S-(n-1)g$ \\ \hline Kenmotsu & $-(n-1)L_{S^{\ell }}S^{\ell }-S-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon (n-1)L_{S^{\ell }}S^{\ell }+\varepsilon S-(n-1)g$ \\ \hline para-Sasakian & $-(n-1)L_{S^{\ell }}S^{\ell }-S-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\varepsilon (n-1)L_{S^{\ell }}S^{\ell }-\varepsilon S-(n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{4},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{4}$ -semisymmetric, then \[ L_{S}S^{2}=k\left( (n-1)L_{S}+1\right) S-k^{2}(n-1)g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath L_{S}S^{2}=}$ \\ \hline $N(k)$-contact metric & $k\left( (n-1)L_{S}+1\right) S-k^{2}(n-1)g$ \\ \hline Sasakian & $\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline Kenmotsu & $-\,\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon \left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline para-Sasakian & $-\,\left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon \left( (n-1)L_{S}+1\right) S-(n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{5},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{5}$-semisymmetric, then either \[ S=k(n-1)g \] or $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & ${\boldmath S}=$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $ \varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $-(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $-\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{5},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{5}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\frac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\frac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\frac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\frac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\frac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\frac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{6},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{6}$-semisymmetric, then \[ (n-1)L_{S^{\ell }}S^{\ell }=(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath(n-1)L_{S^{\ell }}S^{\ell }=}$ \\ \hline $N(k)$-contact metric & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline Sasakian & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline Kenmotsu & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline $(\varepsilon )$-Sasakian & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline para-Sasakian & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline $(\varepsilon )$-para-Sasakian & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{6},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{6}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{2(n-1)}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\frac{1}{2(n-1)}$ & $k(n-1)g$ \\ \hline Sasakian & $\frac{1}{2(n-1)}$ & $(n-1)g$ \\ \hline Kenmotsu & $\frac{1}{2(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\frac{1}{2(n-1)}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\frac{1}{2(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\frac{1}{2(n-1)}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{7},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{7}$-semisymmetric, then \[ (n-1)L_{S^{\ell }}S^{\ell }=(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g. \] Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath(n-1)L_{S^{\ell }}S^{\ell }=}$ \\ \hline $N(k)$-contact metric & $(1-k^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2k^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-k(n-1))g$ \\ \hline Sasakian & $(1-(n-1)^{\ell }L_{S^{\ell }})S+(2(n-1)^{\ell +1}L_{S^{\ell }}-(n-1))g$ \\ \hline Kenmotsu & $(1-(-1)^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2(-1)^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}+(n-1))g$ \\ \hline $(\varepsilon )$-Sasakian & $(1-(\varepsilon )^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2(\varepsilon )^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}-\varepsilon (n-1))g$ \\ \hline para-Sasakian & $(1-(-1)^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2(-1)^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}+(n-1))g$ \\ \hline $(\varepsilon )$-para-Sasakian & $(1-(-\varepsilon )^{\ell -1}(n-1)^{\ell }L_{S^{\ell }})S+(2(-\varepsilon )^{\ell }(n-1)^{\ell +1}L_{S^{\ell }}+\varepsilon (n-1))g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{7},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{7}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{2(n-1)}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\frac{1}{2(n-1)}$ & $k(n-1)g$ \\ \hline Sasakian & $\frac{1}{2(n-1)}$ & $(n-1)g$ \\ \hline Kenmotsu & $\frac{1}{2(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\frac{1}{2(n-1)}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\frac{1}{2(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\frac{1}{2(n-1)}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{8},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{8}$-semisymmetric, then \[ S^{\ell }=k^{\ell }(n-1)^{\ell }g \] and $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & ${\boldmath S}^{\ell }=$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $k^{\ell }(n-1)^{\ell }g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $(n-1)^{\ell }g$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(\varepsilon )^{\ell -1}(n-1)^{\ell }} $ & $(\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $(-\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{8},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{8}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{9},S^{\ell })$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W} _{9}$-semisymmetric, then \[ S^{\ell }=k^{\ell }(n-1)^{\ell }g \] and $L_{S^{\ell }}=\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$. Consequently, we have the following\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S^{\ell }}=$ & ${\boldmath S}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k^{\ell -1}(n-1)^{\ell }}$ & $k^{\ell }(n-1)^{\ell }g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{\ell }}$ & $(n-1)^{\ell }g$ \\ \hline Kenmotsu & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{(\varepsilon )^{\ell -1}(n-1)^{\ell }} $ & $(\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline para-Sasakian & $\dfrac{1}{(-1)^{\ell -1}(n-1)^{\ell }}$ & $(-1)^{\ell }(n-1)^{\ell }g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(-\varepsilon )^{\ell -1}(n-1)^{\ell }}$ & $(-\varepsilon )^{\ell }(n-1)^{\ell }g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $(R,{\cal W}_{9},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. If $M$ is not ${\cal W}_{9}$ -semisymmetric, then $M$ is an Einstein manifold with scalar curvature $ kn(n-1)$ and $L_{S}=\dfrac{1}{n-1}$. Consequently, we have the following\/ {\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & $L_{S}=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{n-1}$ & $k(n-1)g$ \\ \hline Sasakian & $\dfrac{1}{n-1}$ & $(n-1)g$ \\ \hline Kenmotsu & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{1}{n-1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\dfrac{1}{n-1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{n-1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \section{$({\cal T}_{\!a},S_{{\cal T}_{b}})$-pseudosymmetry\label{sect-TSP}} In this section, we determine the results for an $n$-dimensional $\left( N(k),\xi \right) $-semi-Riemannian manifold satisfy ${\cal T}_{\!a}\cdot S_{ {\cal T}_{b}}=LQ(g,S_{{\cal T}_{\!b}})$. \begin{defn-new} A semi-Riemannian manifold is said to be $({\cal T}_{\!a},S_{{\cal T}_{b}})$ -pseudosymmetric if \begin{equation} {\cal T}_{\!a}\cdot S_{{\cal T}_{b}}=LQ(g,S_{{\cal T}_{\!b}}), \label{T.S=LQ} \end{equation} where $L$ is some smooth function defined on $M$. In particular, it is said to be $(R\cdot S_{{\cal T}_{a}})$-pseudosymmetric if it satisfies \begin{equation} R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}}), \label{T.S=LQ1} \end{equation} holds on the set ${\cal U}=\left\{ x\in M:\left( S_{{\cal T}_{a}}-\dfrac{ tr(S_{{\cal T}_{a}})}{n}g\right) _{x}\not=0\right\} $, where $L$ is some function defined on ${\cal U}$. \end{defn-new} \begin{rem-new} If in {\rm (\ref{T.S=LQ1})}, $S_{{\cal T}_{a}}$ is replaced by $S$ then it is said to be Ricci-pseudosymmetric. \end{rem-new} \begin{th} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T}_{b}})$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(b_{4}r+(n-1)b_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(b_{4}r+(n-1)b_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6})\times \\ &&\left\{ b_{4}r+(n-1)b_{7}r\right. \\ &&\qquad \left. +\ k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\right\} \eta (Y)\eta (U) \\ &=&L(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})(\varepsilon k(n-1)g(Y,U)-\varepsilon S(Y,U)). \end{eqnarray*} In particular, if $M$ is an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T} _{a}})$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold, then \begin{eqnarray*} &&\varepsilon a_{5}(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(a_{4}r+(n-1)a_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(a_{4}r+(n-1)a_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}) \\ &&\left\{ a_{4}r+(n-1)a_{7}r\right. \\ &&\qquad \left. +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\right\} \eta (Y)\eta (U) \\ &=&L(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})(\varepsilon k(n-1)g(Y,U)-\varepsilon S(Y,U)). \end{eqnarray*} \end{th} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{ {\cal T}_{b}})$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{equation} {\cal T}_{\!a}(X,Y)\cdot S_{{\cal T}_{\!b}}(U,V)=LQ(g,S_{{\cal T} _{\!b}})(U,V;X,Y). \label{eq-Ric-pseudo} \end{equation} Taking $X=\xi =V$ in (\ref{eq-Ric-pseudo}), we have \[ {\cal T}_{\!a}(\xi ,Y)\cdot S_{{\cal T}_{\!b}}(U,\xi )=LQ(g,S_{{\cal T} _{\!b}})(U,\xi ;\xi ,Y), \] which gives \begin{eqnarray} &&S_{{\cal T}_{\!b}}({\cal T}_{\!a}(\xi ,Y)U,\xi )+S_{{\cal T}_{\!b}}(U, {\cal T}_{\!a}(\xi ,Y)\xi ) \nonumber \\ &=&L\left( S_{{\cal T}_{\!b}}((\xi \wedge Y)U,\xi )+S_{{\cal T} _{\!b}}(U,(\xi \wedge Y)\xi )\right) . \label{eq-T-S-11} \end{eqnarray} Using (\ref{eq-cond}), (\ref{eq-ricci}), (\ref{eq-xi-X-xi}), (\ref{eq-xi-Y-Z} ), (\ref{eq-ric-T1}) and (\ref{eq-ric-T2}) in (\ref{eq-T-S-11}), we get the result. $\blacksquare $ \begin{th} \label{GCT-rsss} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S)$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}\,S^{2}(Y,U)-E\,S(Y,U)-Fg(Y,U)-G\eta (Y)\eta \left( U\right) \\ &=&L(\varepsilon k(n-1)g(Y,U)-\varepsilon S(Y,U)), \end{eqnarray*} where \[ E=\varepsilon \,(ka_{0}+a_{7}r-k(n-1)a_{1}-k(n-1)a_{2}), \] \[ F=-\,\varepsilon k(n-1)(ka_{0}+k(n-1)a_{4}+a_{7}r), \] \[ G=-\,k^{2}(n-1)^{2}(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}). \] \end{th} In view of Theorem~\ref{GCT-rsss}, we have the following \begin{cor} Let $M$ be an $n$-dimensional Ricci-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath}L=$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal C}_{\ast },S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $-\,\left( \left( k-\dfrac{r}{n(n-1)}\right) \dfrac{ a_{0}}{a_{1}}-\dfrac{2r}{n}-\dfrac{L}{a_{1}}\right) S$ \\ & $+k(n-1)\left( \left( k-\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}} +k(n-1)-\dfrac{2r}{n}-\dfrac{L}{a_{1}}\right) g$ \\ \hline Sasakian & $-\,\left( \left( 1-\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}- \dfrac{2r}{n}-\dfrac{L}{a_{1}}\right) S$ \\ & $+(n-1)\left( \left( 1-\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+(n-1)- \dfrac{2r}{n}-\dfrac{L}{a_{1}}\right) g$ \\ \hline Kenmotsu & $\left( \left( 1+\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+ \dfrac{2r}{n}+\dfrac{L}{a_{1}}\right) S$ \\ & $+(n-1)\left( \left( 1+\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+(n-1)+ \dfrac{2r}{n}+\dfrac{L}{a_{1}}\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $-\,\varepsilon \left( \left( 1-\dfrac{ \varepsilon r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}-\dfrac{2\varepsilon r}{n}- \dfrac{\varepsilon L}{a_{1}}\right) S$ \\ & $+\varepsilon (n-1)\left( \left( \varepsilon -\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+\varepsilon (n-1)-\dfrac{2r}{n}-\dfrac{L}{a_{1}}\right) g$ \\ \hline para-Sasakian & $\left( \left( 1+\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1} }+\dfrac{2r}{n}+\dfrac{L}{a_{1}}\right) S$ \\ & $+(n-1)\left( \left( 1+\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+(n-1)+ \dfrac{2r}{n}+\dfrac{L}{a_{1}}\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\varepsilon \left( \left( 1+\dfrac{ \varepsilon r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+\dfrac{2\varepsilon r}{n}+ \dfrac{\varepsilon L}{a_{1}}\right) S$ \\ & $+\varepsilon (n-1)\left( \left( \varepsilon +\dfrac{r}{n(n-1)}\right) \dfrac{a_{0}}{a_{1}}+\varepsilon (n-1)+\dfrac{2r}{n}+\dfrac{L}{a_{1}}\right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal C},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $\left( \dfrac{r}{n-1}+\left( k-L\right) (n-2)\right) S-k(r-\left( L(n-2)+k\right) (n-1))g$ \\ \hline Sasakian & $\left( \dfrac{r}{n-1}+\left( 1-L\right) (n-2)\right) S-(r-\left( L(n-2)+1\right) (n-1))g$ \\ \hline Kenmotsu & $\left( \dfrac{r}{n-1}-\left( 1+L\right) (n-2)\right) S+(r-\left( L(n-2)-1\right) (n-1))g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \dfrac{r}{n-1}+\left( \varepsilon -L\right) (n-2)\right) S-\varepsilon (r-\left( L(n-2)+\varepsilon \right) (n-1))g$ \\ \hline para-Sasakian & $\left( \dfrac{r}{n-1}-\left( 1+L\right) (n-2)\right) S+1(r-\left( L(n-2)-1\right) (n-1))g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( \dfrac{r}{n-1}-\left( \varepsilon +L\right) (n-2)\right) S+\varepsilon (r-\left( L(n-2)-\varepsilon \right) (n-1))g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal L},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $(n-2)\left( k-L\right) S+k(n-1)(k+(n-2)L)g$ \\ \hline Sasakian & $(n-2)\left( 1-L\right) S+(n-1)(1+(n-2)L)g$ \\ \hline Kenmotsu & $-\,(n-2)\left( 1+L\right) S-(n-1)(-1+(n-2)L)g$ \\ \hline $(\varepsilon )$-Sasakian & $(n-2)\left( \varepsilon -L\right) S+\varepsilon (n-1)(\varepsilon +(n-2)L)g$ \\ \hline para-Sasakian & $-\,(n-2)\left( 1+L\right) S-(n-1)(-1+(n-2)L)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,(n-2)\left( \varepsilon +L\right) S-\varepsilon (n-1)(-\varepsilon +(n-2)L)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal V},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k-\dfrac{r}{n(n-1)}$ & $k(n-1)g$ \\ \hline Sasakian & $1-\dfrac{r}{n(n-1)}$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1-\dfrac{r}{n(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon -\dfrac{r}{n(n-1)}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1-\dfrac{r}{n(n-1)}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon -\dfrac{r}{n(n-1)}$ & $ -\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal P}_{\ast },S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $\left( k-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n} a_{1}$ & $k(n-1)g$ \\ \hline Sasakian & $\left( 1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}$ & $ (n-1)g$ \\ \hline Kenmotsu & $\left( -1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}$ & $ -\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( \varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}$ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $\left( -1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( -\varepsilon -\dfrac{r}{n(n-1)} \right) a_{0}-\dfrac{r}{n}a_{1}$ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal P},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal M},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $2(n-1)\left( k-L\right) S-k(n-1)^{2}\left( k-2L\right) g$ \\ \hline Sasakian & $2(n-1)\left( 1-L\right) S-(n-1)^{2}\left( 1-2L\right) g$ \\ \hline Kenmotsu & $-\,2(n-1)\left( 1+L\right) S-(n-1)^{2}\left( 1+2L\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $2(n-1)\left( \varepsilon -L\right) S-\varepsilon (n-1)^{2}\left( \varepsilon -2L\right) g$ \\ \hline para-Sasakian & $-\,2(n-1)\left( 1+L\right) S-(n-1)^{2}\left( 1+2L\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,2(n-1)\left( \varepsilon +L\right) S-\varepsilon (n-1)^{2}\left( \varepsilon +2L\right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{0},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $(n-1)(2k-L)S+k(n-1)^{2}\left( L-k\right) g$ \\ \hline Sasakian & $(n-1)(2-L)S+(n-1)^{2}\left( L-1\right) g$ \\ \hline Kenmotsu & $-\,(n-1)(2+L)S-(n-1)^{2}\left( L+1\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $(n-1)(2\varepsilon -L)S+\varepsilon (n-1)^{2}\left( L-\varepsilon \right) g$ \\ \hline para-Sasakian & $-\,(n-1)(2+L)S-(n-1)^{2}\left( L+1\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,(n-1)(2\varepsilon +L)S-\varepsilon (n-1)^{2}\left( L+\varepsilon \right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{0}^{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $L(n-1)S+k(n-1)^{2}(k-L)g$ \\ \hline Sasakian & $L(n-1)S+(n-1)^{2}(1-L)g$ \\ \hline Kenmotsu & $L(n-1)S+(n-1)^{2}(1+L)g$ \\ \hline $(\varepsilon )$-Sasakian & $L(n-1)S+\varepsilon (n-1)^{2}(\varepsilon -L)g$ \\ \hline para-Sasakian & $L(n-1)S+(n-1)^{2}(1+L)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $L(n-1)S+\varepsilon (n-1)^{2}(\varepsilon +L)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{1},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{1}^{\ast },S)$-pseudosymmetric $ \left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $k$ & $k(n-1)g$ \\ \hline Sasakian & $1$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,1$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{2},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $(n-1)\left( k-L\right) S+k(n-1)^{2}Lg$ \\ \hline Sasakian & $(n-1)\left( 1-L\right) S+(n-1)^{2}Lg$ \\ \hline Kenmotsu & $-\,(n-1)\left( 1+L\right) S-(n-1)^{2}Lg$ \\ \hline $(\varepsilon )$-Sasakian & $(n-1)\left( \varepsilon -L\right) S+\varepsilon (n-1)^{2}Lg$ \\ \hline para-Sasakian & $-\,(n-1)\left( 1+L\right) S-(n-1)^{2}Lg$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,(n-1)\left( \varepsilon +L\right) S-\varepsilon (n-1)^{2}Lg$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{3},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $2k$ & $k(n-1)g$ \\ \hline Sasakian & $2$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,2$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $2\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,2$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,2\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{4},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $(n-1)\left( k-L\right) S+k(n-1)^{2}\left( L-k\right) g+\varepsilon k^{2}(n-1)^{2}\eta \otimes \eta $ \\ \hline Sasakian & $(n-1)\left( 1-L\right) S+(n-1)^{2}\left( L-1\right) g+\varepsilon (n-1)^{2}\eta \otimes \eta $ \\ \hline Kenmotsu & $-\,(n-1)\left( 1+L\right) S-(n-1)^{2}\left( L+1\right) g+\varepsilon (n-1)^{2}\eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $(n-1)\left( \varepsilon -L\right) S+\varepsilon (n-1)^{2}\left( L-\varepsilon \right) g+\varepsilon (n-1)^{2}\eta \otimes \eta $ \\ \hline para-Sasakian & $-\,(n-1)\left( 1+L\right) S-(n-1)^{2}\left( L+1\right) g+\varepsilon (n-1)^{2}\eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,(n-1)\left( \varepsilon +L\right) S-\varepsilon (n-1)^{2}\left( L+\varepsilon \right) g+\varepsilon (n-1)^{2}\eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{5},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath S^{2}=}$ \\ \hline $N(k)$-contact metric & $(n-1)\left( 2k-L\right) S+k(n-1)^{2}\left( L-k\right) g$ \\ \hline Sasakian & $(n-1)\left( 2-L\right) S+(n-1)^{2}\left( L-1\right) g$ \\ \hline Kenmotsu & $-\,(n-1)\left( 2+L\right) S-(n-1)^{2}\left( L+1\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $(n-1)\left( 2\varepsilon -L\right) S+\varepsilon (n-1)^{2}\left( L-\varepsilon \right) g$ \\ \hline para-Sasakian & $-\,(n-1)\left( 2+L\right) S-(n-1)^{2}\left( L+1\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,(n-1)\left( 2\varepsilon +L\right) S+k(n-1)^{2}\left( L+\varepsilon \right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{6},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & {\bf Result} \\ \hline $N(k)$-contact metric & $\left( 2k-L\right) S+k(n-1)\left( L-k\right) g+k^{2}(n-1)\eta \otimes \eta =0$ \\ \hline Sasakian & $\left( 2-L\right) S+(n-1)\left( L-1\right) g+(n-1)\eta \otimes \eta =0$ \\ \hline Kenmotsu & $-\,\left( 2+L\right) S-(n-1)\left( L+1\right) g+(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-Sasakian & $\left( 2\varepsilon -L\right) S+\varepsilon (n-1)\left( L-\varepsilon \right) g+\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline para-Sasakian & $-\,\left( 2+L\right) S-(n-1)\left( L+1\right) g+(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\left( 2\varepsilon +L\right) S-\varepsilon (n-1)\left( L+\varepsilon \right) g+\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{7},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|l|} \hline ${\boldmath M}$ & ${\boldmath L=}$ & ${\boldmath S=}$ \\ \hline $N(k)$-contact metric & $2k$ & $k(n-1)g$ \\ \hline Sasakian & $2$ & $(n-1)g$ \\ \hline Kenmotsu & $-\,2$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-Sasakian & $2\varepsilon $ & $\varepsilon (n-1)g$ \\ \hline para-Sasakian & $-\,2$ & $-\,(n-1)g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,2\varepsilon $ & $-\,\varepsilon (n-1)g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{8},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & {\bf Result} \\ \hline $N(k)$-contact metric & $\left( 2k-L\right) S+k(n-1)\left( L-k\right) g-k^{2}(n-1)\eta \otimes \eta =0$ \\ \hline Sasakian & $\left( 2-L\right) S+(n-1)\left( L-1\right) g-(n-1)\eta \otimes \eta =0$ \\ \hline Kenmotsu & $-\,\left( 2+L\right) S-(n-1)\left( L+1\right) g-(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-Sasakian & $\left( 2\varepsilon -L\right) S+\varepsilon (n-1)\left( L-\varepsilon \right) g-\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline para-Sasakian & $-\,\left( 2+L\right) S-(n-1)\left( L+1\right) g-(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\left( 2\varepsilon +L\right) S-\varepsilon (n-1)\left( L+\varepsilon \right) g-\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $({\cal W}_{9},S)$-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & {\bf Result} \\ \hline $N(k)$-contact metric & $\left( L-k\right) S-k(n-1)Lg+k^{2}(n-1)\eta \otimes \eta =0$ \\ \hline Sasakian & $\left( L-1\right) S-(n-1)Lg+(n-1)\eta \otimes \eta =0$ \\ \hline Kenmotsu & $\left( L+1\right) S+(n-1)Lg+(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-Sasakian & $\left( L-\varepsilon \right) S-\varepsilon (n-1)Lg+\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline para-Sasakian & $\left( L+1\right) S+(n-1)Lg+(n-1)\eta \otimes \eta =0$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( L+\varepsilon \right) S+\varepsilon (n-1)Lg+\varepsilon (n-1)\eta \otimes \eta =0$ \\ \hline \end{tabular} \] \end{cor} \begin{th} \label{th-T-ric-pseudo} Let $M$ be an $n$-dimensional $(R,S_{{\cal T}_{a}})$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold such that \[ a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6}\not=0. \] Then $M$ is either Einstein manifold, that is, \[ S=k(n-1)g \] or $L=k$ holds on $M$. Consequently, we have the following table\/{\rm :} \[ \begin{tabular}{|l|l|l|l|} \hline {\bf Manifold} & {\bf Condition} & ${\boldmath S=}$ & ${\boldmath L=}$ \\ \hline $N(k)$-contact metric & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}})$ & $ k(n-1)g$ & $k$ \\ \hline Sasakian & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}})$ & $(n-1)g$ & $1$ \\ \hline Kenmotsu & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}})$ & $-\,(n-1)g$ & $ -\,1$ \\ \hline $(\varepsilon )$-Sasakian & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}})$ & $\varepsilon (n-1)g$ & $\varepsilon $ \\ \hline para-Sasakian & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T}_{a}})$ & $ -\,(n-1)g $ & $-\,1$ \\ \hline $(\varepsilon )$-para-Sasakian & $R\cdot S_{{\cal T}_{a}}=LQ(g,S_{{\cal T} _{a}})$ & $-\,\varepsilon (n-1)g$ & $-\,\varepsilon $ \\ \hline \end{tabular} \ \ \] \end{th} \begin{rem-new} The conclusions of Theorem {\rm \ref{th-T-ric-pseudo}} remain true if $S_{ {\cal T}_{a}}$ is replaced by $S$. \end{rem-new} \begin{cor} {\rm (\cite{Ozgur-06}, \cite{Hong-Ozgur-Tripathi-06})} If an $n$-dimensional Kenmotsu manifold $M$ is Ricci-pseudosymmetric then either $M$ is an Einstein manifold with the scalar curvature $r=n(1-n)$ or $L=-1$ holds on $M$ . \end{cor} \section{$({\cal T}_{\!a},S_{{\cal T}_{\!b}},S^{\ell })$-pseudosymmetry\label {sect-TSSP}} In this section, we determine the result for an $n$-dimensional $\left( N(k),\xi \right) $-semi-Riemannian manifold satisfy ${\cal T}_{\!a}\cdot S_{ {\cal T}_{b}}=LQ(S^{\ell },S_{{\cal T}_{\!b}})$. \begin{defn-new} A semi-Riemannian manifold $M$ is called $({\cal T}_{\!a},S_{{\cal T} _{\!b}},S^{\ell })$-pseudosymmetric if \[ {\cal T}_{\!a}\cdot S_{{\cal T}_{b}}=LQ(S^{\ell },S_{{\cal T}_{\!b}}), \] where $L$ is some smooth function defined on $M$. In particular, $M$ is said to be $(R,S_{{\cal T}_{\!a}},S^{\ell })$-pseudosymmetric if \[ R\cdot S_{{\cal T}_{a}}=LQ(S^{\ell },S_{{\cal T}_{\!a}}). \] \end{defn-new} \begin{th} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T}_{\!b}},S^{\ell })$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(b_{4}r+(n-1)b_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(b_{4}r+(n-1)b_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6})\times \\ &&\left\{ b_{4}r+(n-1)b_{7}r\right. \\ &&\qquad \left. +\ k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\right\} \eta (Y)\eta (U) \\ &=&L\varepsilon ((b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&(k(n-1)S^{\ell }(Y,U)-k^{\ell }(n-1)^{\ell }S(Y,U)) \\ &&+\,(b_{4}+(n-1)b_{7})r(k^{\ell }(n-1)^{\ell }g(Y,U)-S^{\ell }(Y,U))). \end{eqnarray*} In particular, if $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T} _{\!a}},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(a_{4}r+(n-1)a_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(a_{4}r+(n-1)a_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}) \\ &&\left\{ a_{4}r+(n-1)a_{7}r\right. \\ &&\qquad \left. +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\right\} \eta (Y)\eta (U) \\ &=&L\varepsilon ((a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \\ &&(k(n-1)S^{\ell }(Y,U)-k^{\ell }(n-1)^{\ell }S(Y,U)) \\ &&+\,(a_{4}+(n-1)a_{7})r(k^{\ell }(n-1)^{\ell }g(Y,U)-S^{\ell }(Y,U))). \end{eqnarray*} \end{th} \noindent {\bf Proof.} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{ {\cal T}_{\!b}},S^{\ell })$-pseudosymmetric $\left( N(k),\xi \right) $ -semi-Riemannian manifold. Then \begin{equation} {\cal T}_{\!a}(X,Y)\cdot S_{{\cal T}_{\!b}}(U,V)=LQ(S^{\ell },S_{{\cal T} _{\!b}})(U,V;X,Y). \label{eq-Ric-pseudo-1} \end{equation} Taking $X=\xi =V$ in (\ref{eq-Ric-pseudo-1}), we have \[ {\cal T}_{\!a}(\xi ,Y)\cdot S_{{\cal T}_{\!b}}(U,\xi )=LQ(S^{\ell },S_{{\cal T}_{\!b}})(U,\xi ;\xi ,Y), \] which gives \begin{eqnarray} &&S_{{\cal T}_{\!b}}({\cal T}_{\!a}(\xi ,Y)U,\xi )+S_{{\cal T}_{\!b}}(U, {\cal T}_{\!a}(\xi ,Y)\xi ) \nonumber \\ &=&L\left( S_{{\cal T}_{\!b}}((\xi \wedge _{S^{\ell }}Y)U,\xi )+S_{{\cal T} _{\!b}}(U,(\xi \wedge _{S^{\ell }}Y)\xi )\right) . \label{eq-T-S-111} \end{eqnarray} Using (\ref{eq-cond}), (\ref{eq-Sp-QX-xi}), (\ref{eq-xi-X-xi}), (\ref {eq-xi-Y-Z}), (\ref{eq-ric-T1}) and (\ref{eq-ric-T2}) in (\ref{eq-T-S-111}), we get the result. $\blacksquare $ For $\ell =1$, we have the following result. \begin{cor} Let $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T}_{\!b}},S)$ -pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(b_{4}r+(n-1)b_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(b_{4}r+(n-1)b_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6})\times \\ &&\left\{ b_{4}r+(n-1)b_{7}r\right. \\ &&\qquad \left. +\ k(n-1)(b_{0}+nb_{1}+b_{2}+b_{3}+b_{5}+b_{6})\right\} \eta (Y)\eta (U) \\ &=&L\varepsilon (b_{4}+(n-1)b_{7})r(k(n-1)g(Y,U)-S(Y,U)). \end{eqnarray*} In particular, if $M$ be an $n$-dimensional $({\cal T}_{\!a},S_{{\cal T} _{\!a}},S)$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})S^{2}(Y,U) \\ &&+\ \left\{ \varepsilon (a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \right. \\ &&(-ka_{0}+k(n-1)a_{1}+k(n-1)a_{2}-a_{7}r) \\ &&\qquad \left. +\ \varepsilon (a_{1}+a_{5})(a_{4}r+(n-1)a_{7}r)\right\} S(Y,U) \\ &&+\ \left\{ \varepsilon k(n-1)(a_{2}+a_{4})(a_{4}r+(n-1)a_{7}r)\right. \\ &&\qquad +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \\ &&\left. (ka_{0}+k(n-1)a_{4}+a_{7}r)\right\} g(Y,U) \\ &&+\ k(n-1)(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}) \\ &&\left\{ a_{4}r+(n-1)a_{7}r\right. \\ &&\qquad \left. +\ \varepsilon k(n-1)(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\right\} \eta (Y)\eta (U) \\ &=&L\varepsilon (a_{4}+(n-1)a_{7})r(k(n-1)g(Y,U)-S(Y,U)). \end{eqnarray*} \end{cor} \begin{th} \label{GCT-rssss} Let $M$ be an $n$-dimensional $\left( {\cal T} _{\!a},S,S^{\ell }\right) $-pseudosymmetric $\left( N(k),\xi \right)$ -semi-Riemannian manifold. Then \begin{eqnarray*} &&\varepsilon a_{5}\,S^{2}(Y,U)-E\,S(Y,U)-Fg(Y,U)-G\eta (Y)\eta \left( U\right) \\ &=&\varepsilon L(k(n-1)S^{\ell }(Y,U)-k^{\ell }(n-1)^{\ell }S(Y,U)), \end{eqnarray*} where \[ E=\varepsilon \,(ka_{0}+a_{7}r-k(n-1)a_{1}-k(n-1)a_{2}), \] \[ F=-\,\varepsilon k(n-1)(ka_{0}+k(n-1)a_{4}+a_{7}r), \] \[ G=-\,k^{2}(n-1)^{2}(a_{1}+a_{2}+2a_{3}+a_{4}+a_{5}+2a_{6}). \] \end{th} In view of Theorem~\ref{GCT-rssss}, we have the following \begin{cor} Let $M$ be an $n$-dimensional $\left( R,S,S^{\ell }\right) $-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( Lk^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1} \right) S+kg$ \\ \hline Sasakian & $\left( L(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S+g$ \\ \hline Kenmotsu & $\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( L(\varepsilon )^{\ell -1}(n-1)^{\ell -1}- \dfrac{1}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( L(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal C}_{\ast },S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath L(n-1)S^{\ell }=}$ \\ \hline $N(k)$-contact metric & $-\,\dfrac{a_{1}}{k}S^{2}-\left( \left( 1-\dfrac{r}{ kn(n-1)}\right) a_{0}-\dfrac{2r}{kn}a_{1}-k^{\ell -1}(n-1)^{\ell }L\right) S$ \\ & $+(n-1)\left( \left( k-\dfrac{r}{n(n-1)}\right) a_{0}+\left( k(n-1)-\dfrac{ 2r}{n}\right) a_{1}\right) g$ \\ \hline Sasakian & $-\,a_{1}S^{2}-\left( \left( 1-\dfrac{r}{n(n-1)}\right) a_{0}- \dfrac{2r}{n}a_{1}-(n-1)^{\ell }L\right) S$ \\ & $+(n-1)\left( \left( 1-\dfrac{r}{n(n-1)}\right) a_{0}+\left( (n-1)-\dfrac{ 2r}{n}\right) a_{1}\right) g$ \\ \hline Kenmotsu & $a_{1}S^{2}-\left( \left( 1+\dfrac{r}{n(n-1)}\right) a_{0}+\dfrac{ 2r}{n}a_{1}-(-1)^{\ell -1}(n-1)^{\ell }L\right) S$ \\ & $-\,(n-1)\left( \left( 1+\dfrac{r}{n(n-1)}\right) a_{0}+\left( (n-1)+ \dfrac{2r}{n}\right) a_{1}\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $-\,\varepsilon a_{1}S^{2}-\left( \left( 1- \dfrac{\varepsilon r}{n(n-1)}\right) a_{0}-\dfrac{2\varepsilon r}{n} a_{1}-(\varepsilon )^{\ell -1}(n-1)^{\ell }L\right) S$ \\ & $+(n-1)\left( \left( \varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}+\left( \varepsilon (n-1)-\dfrac{2r}{n}\right) a_{1}\right) g$ \\ \hline para-Sasakian & $a_{1}S^{2}-\left( \left( 1+\dfrac{r}{n(n-1)}\right) a_{0}+ \dfrac{2r}{n}a_{1}-(-1)^{\ell -1}(n-1)^{\ell }L\right) S$ \\ & $-\,(n-1)\left( \left( 1+\dfrac{r}{n(n-1)}\right) a_{0}+\left( (n-1)+ \dfrac{2r}{n}\right) a_{1}\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\varepsilon a_{1}S^{2}-\left( \left( 1+ \dfrac{\varepsilon r}{n(n-1)}\right) a_{0}+\dfrac{2\varepsilon r}{n} a_{1}-(-\varepsilon )^{\ell -1}(n-1)^{\ell }L\right) S$ \\ & $-\,(n-1)\left( \left( \varepsilon +\dfrac{r}{n(n-1)}\right) a_{0}+\left( \varepsilon (n-1)+\dfrac{2r}{n}\right) a_{1}\right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal C},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS^{\ell }=}$ \\ \hline $N(k)$-contact metric & $-\,\left( \dfrac{r}{k(n-1)^{2}(n-2)}+\dfrac{1}{n-1} -k^{\ell -1}(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( k-\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g+\dfrac{1}{ k(n-1)(n-2)}S^{2}$ \\ \hline Sasakian & $-\,\left( \dfrac{r}{(n-1)^{2}(n-2)}+\dfrac{1}{n-1}-(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( 1-\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g+\dfrac{1}{ (n-1)(n-2)}S^{2}$ \\ \hline Kenmotsu & $-\,\left( -\dfrac{r}{(n-1)^{2}(n-2)}+\dfrac{1}{n-1}-(-1)^{\ell -1}(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( -1-\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g-\,\dfrac{1}{ (n-1)(n-2)}S^{2}$ \\ \hline $(\varepsilon )$-Sasakian & $-\,\left( \dfrac{\varepsilon r}{(n-1)^{2}(n-2)}+ \dfrac{1}{n-1}-(\varepsilon )^{\ell -1}(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( \varepsilon -\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g+ \dfrac{\varepsilon }{(n-1)(n-2)}S^{2}$ \\ \hline para-Sasakian & $-\,\left( -\dfrac{r}{(n-1)^{2}(n-2)}+\dfrac{1}{n-1} -(-1)^{\ell -1}(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( -1-\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g-\,\dfrac{1}{ (n-1)(n-2)}S^{2}$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\left( -\dfrac{\varepsilon r}{ (n-1)^{2}(n-2)}+\dfrac{1}{n-1}-(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L\right) S$ \\ & $+\,\left( -\varepsilon -\dfrac{n-1}{n-2}+\dfrac{r}{(n-1)(n-2)}\right) g- \dfrac{\varepsilon }{(n-1)(n-2)}S^{2}$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal L},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS^{\ell }=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k(n-1)(n-2)}S^{2}+\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\dfrac{k}{n-2}g$ \\ \hline Sasakian & $\dfrac{1}{(n-1)(n-2)}S^{2}+\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S-\dfrac{1}{n-2}g$ \\ \hline Kenmotsu & $-\,\dfrac{1}{(n-1)(n-2)}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+\dfrac{1}{n-2}g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{(n-1)(n-2)}S^{2}+\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\dfrac{ \varepsilon }{n-2}g$ \\ \hline para-Sasakian & $-\,\dfrac{1}{(n-1)(n-2)}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+\dfrac{1}{n-2}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{\varepsilon }{(n-1)(n-2)} S^{2}+\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+\dfrac{\varepsilon }{n-2}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal V},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath}LS^{\ell }=$ \\ \hline $N(k)$-contact metric & $-\,\left( \dfrac{1}{n-1}-\dfrac{r}{kn(n-1)^{2}} -k^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( k-\dfrac{r}{n(n-1)}\right) g$ \\ \hline Sasakian & $-\,\left( \dfrac{1}{n-1}-\dfrac{r}{n(n-1)^{2}}-(n-1)^{\ell -1}\right) S$ \\ & $+\left( 1-\dfrac{r}{n(n-1)}\right) g$ \\ \hline Kenmotsu & $-\,\left( \dfrac{1}{n-1}+\dfrac{r}{n(n-1)^{2}}-(-1)^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $-\left( 1+\dfrac{r}{n(n-1)}\right) g$ \\ \hline $(\varepsilon )$-Sasakian & $-\,\left( \dfrac{1}{n-1}-\dfrac{\varepsilon r}{ n(n-1)^{2}}-(\varepsilon )^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \varepsilon -\dfrac{r}{n(n-1)}\right) g$ \\ \hline para-Sasakian & $-\,\left( \dfrac{1}{n-1}+\dfrac{r}{n(n-1)^{2}}-(-1)^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $-\left( 1+\dfrac{r}{n(n-1)}\right) g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\left( \dfrac{1}{n-1}+\dfrac{ \varepsilon r}{n(n-1)^{2}}-(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $-\left( \varepsilon +\dfrac{r}{n(n-1)}\right) g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal P}_{\ast },S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $-\,\left( \left( \dfrac{1}{n-1}-\dfrac{r}{ kn(n-1)^{2}}\right) a_{0}-\dfrac{r}{kn(n-1)}a_{1}-k^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( k-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}\right) g $ \\ \hline Sasakian & $-\,\left( \left( \dfrac{1}{n-1}-\dfrac{r}{n(n-1)^{2}}\right) a_{0}-\dfrac{r}{n(n-1)}a_{1}-(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( 1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}\right) $ \\ \hline Kenmotsu & $-\,\left( \left( \dfrac{1}{n-1}+\dfrac{r}{n(n-1)^{2}}\right) a_{0}+\dfrac{r}{n(n-1)}a_{1}-(-1)^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( -1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}\right) $ \\ \hline $(\varepsilon )$-Sasakian & $-\,\left( \left( \dfrac{1}{n-1}-\dfrac{ \varepsilon r}{n(n-1)^{2}}\right) a_{0}-\dfrac{\varepsilon r}{n(n-1)} a_{1}-(\varepsilon )^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( \varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n} a_{1}\right) $ \\ \hline para-Sasakian & $-\,\left( \left( \dfrac{1}{n-1}+\dfrac{r}{n(n-1)^{2}} \right) a_{0}+\dfrac{r}{n(n-1)}a_{1}-(-1)^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( -1-\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n}a_{1}\right) $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\left( \left( \dfrac{1}{n-1}+\dfrac{ \varepsilon r}{n(n-1)^{2}}\right) a_{0}+\dfrac{\varepsilon r}{n(n-1)} a_{1}-(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}\right) S$ \\ & $+\left( \left( -\varepsilon -\dfrac{r}{n(n-1)}\right) a_{0}-\dfrac{r}{n} a_{1}\right) $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal P},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S+kg$ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+g$ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{1}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal M},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS^{\ell }=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{2k(n-1)^{2}}S^{2}+\left( Lk^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S+\dfrac{k}{2}g$ \\ \hline Sasakian & $\dfrac{1}{2(n-1)^{2}}S^{2}+\left( L(n-1)^{\ell -1}-\dfrac{1}{n-1} \right) S+\dfrac{1}{2}g$ \\ \hline Kenmotsu & $-\,\dfrac{1}{2(n-1)^{2}}S^{2}+\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-\dfrac{1}{2}g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{2(n-1)^{2}}S^{2}+\left( L(\varepsilon )^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S+\dfrac{ \varepsilon }{2}g$ \\ \hline para-Sasakian & $-\,\dfrac{1}{2(n-1)^{2}}S^{2}+\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-\dfrac{1}{2}g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{\varepsilon }{2(n-1)^{2}} S^{2}+\left( L(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}-\dfrac{1}{n-1}\right) S-\dfrac{\varepsilon }{2}g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{0},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k(n-1)^{2}}S^{2}+\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+kg$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+\left( (n-1)^{\ell -1}L-\dfrac{2}{n-1} \right) S+g$ \\ \hline Kenmotsu & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{(n-1)^{2}}S^{2}+\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{0}^{\ast },S,S^{\ell }\right) $-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $-\,\dfrac{1}{k(n-1)^{2}}S^{2}+k^{\ell -1}(n-1)^{\ell -1}LS+kg$ \\ \hline Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+(n-1)^{\ell -1}LS+g$ \\ \hline Kenmotsu & $\dfrac{1}{(n-1)^{2}}S^{2}+(-1)^{\ell -1}(n-1)^{\ell -1}LS-g$ \\ \hline $(\varepsilon )$-Sasakian & $-\,\dfrac{\varepsilon }{(n-1)^{2}} S^{2}+(\varepsilon )^{\ell -1}(n-1)^{\ell -1}LS+\varepsilon g$ \\ \hline para-Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+(-1)^{\ell -1}(n-1)^{\ell -1}LS-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}LS-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{1},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S+kg$ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+g$ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{1}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{1}^{\ast },S,S^{\ell }\right) $-pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S+kg$ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+g$ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{1}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{2},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k(n-1)^{2}}S^{2}+\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S$ \\ \hline Kenmotsu & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{(n-1)^{2}}S^{2}+\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S$ \\ \hline para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{3},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1} \right) S+2kg$ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+2g$ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{2}{n-1}\right) S+2\varepsilon g$ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{4},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k(n-1)^{2}}S^{2}+\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+kg-\varepsilon k\eta \otimes \eta $ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S+g-\eta \otimes \eta $ \\ \hline Kenmotsu & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g+\eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{(n-1)^{2}}S^{2}+\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+\varepsilon g-\eta \otimes \eta $ \\ \hline para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-g+\eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{\varepsilon }{(n-1)^{2}} S^{2}+\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\varepsilon g+\eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{5},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\dfrac{1}{k(n-1)^{2}}S^{2}+\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+kg$ \\ \hline Sasakian & $\dfrac{1}{(n-1)^{2}}S^{2}+\left( (n-1)^{\ell -1}L-\dfrac{2}{n-1} \right) S+g$ \\ \hline Kenmotsu & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-Sasakian & $\dfrac{\varepsilon }{(n-1)^{2}}S^{2}+\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+\varepsilon g$ \\ \hline para-Sasakian & $-\,\dfrac{1}{(n-1)^{2}}S^{2}+\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g$ \\ \hline $(\varepsilon )$-para-Sasakian & $-\,\dfrac{\varepsilon }{(n-1)^{2}} S^{2}+\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{6},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1} \right) S+kg+k\eta \otimes \eta $ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+g+\eta \otimes \eta $ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{2}{n-1}\right) S+\varepsilon g+\varepsilon \eta \otimes \eta $ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-g-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-\varepsilon g-\varepsilon \eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{7},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }{=}$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1} \right) S+2kg$ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S+2g$ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2g$ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{2}{n-1}\right) S+2\varepsilon g$ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2g$ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{2}{n-1}\right) S-2\varepsilon g$ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{8},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }=$ \\ \hline $N(k)$-contact metric & $\left( Lk^{\ell -1}(n-1)^{\ell -1}-\dfrac{2}{n-1} \right) S+kg+k\eta \otimes \eta $ \\ \hline Sasakian & $\left( L(n-1)^{\ell -1}-\dfrac{2}{n-1}\right) S+kg+\eta \otimes \eta $ \\ \hline Kenmotsu & $\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{2}{n-1}\right) S+kg-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\left( L(\varepsilon )^{\ell -1}(n-1)^{\ell -1}- \dfrac{2}{n-1}\right) S+kg+\eta \otimes \eta $ \\ \hline para-Sasakian & $\left( L(-1)^{\ell -1}(n-1)^{\ell -1}-\dfrac{2}{n-1}\right) S+kg-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( L(-\varepsilon )^{\ell -1}(n-1)^{\ell -1}-\dfrac{2}{n-1}\right) S+kg-\eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{cor} Let $M$ be an $n$-dimensional $\left( {\cal W}_{9},S,S^{\ell }\right) $ -pseudosymmetric $\left( N(k),\xi \right) $-semi-Riemannian manifold. Then we have the following table\/{\rm :}~ \[ \begin{tabular}{|l|l|} \hline ${\boldmath M}$ & ${\boldmath LS}^{\ell }=$ \\ \hline $N(k)$-contact metric & $\left( k^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1} \right) S+k\eta \otimes \eta $ \\ \hline Sasakian & $\left( (n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S+\eta \otimes \eta $ \\ \hline Kenmotsu & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-Sasakian & $\left( (\varepsilon )^{\ell -1}(n-1)^{\ell -1}L- \dfrac{1}{n-1}\right) S+\eta \otimes \eta $ \\ \hline para-Sasakian & $\left( (-1)^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\eta \otimes \eta $ \\ \hline $(\varepsilon )$-para-Sasakian & $\left( (-\varepsilon )^{\ell -1}(n-1)^{\ell -1}L-\dfrac{1}{n-1}\right) S-\eta \otimes \eta $ \\ \hline \end{tabular} \] \end{cor} \begin{rem-new} If in the Theorem \ref{GCT-rssss}, we take $M$ be an $n$-dimensional $\left( {\cal T}_{\!a},S,S\right) $-pseudosymmetric $\left( N(k),\xi \right) $ -semi-Riemannian manifold. Then the result is same as given in \cite[Theorem 7.6]{TG}. \end{rem-new} \begin{cor} \label{th-T-ric-pseudo-1} Let $M$ be an $n$-dimensional $(R,S_{{\cal T} _{a}},S^{\ell })$-pseudosymmetric $(N(k),\xi )$-semi-Riemannian manifold. Then \begin{eqnarray*} &&(a_{0}+na_{1}+a_{2}+a_{3}+a_{5}+a_{6})\times \\ &&(Lk(n-1)S^{\ell }-Lk^{\ell }(n-1)^{\ell }S+kS-k^{2}(n-1)g) \\ &=&Lr(a_{4}+(n-1)a_{7})(S^{\ell }-k^{\ell }(n-1)^{\ell }g). \end{eqnarray*} \end{cor} \noindent Department of Mathematics and DST-CIMS\newline Faculty of Science\newline Banaras Hindu University\newline Varanasi-221005\newline [email protected] \noindent Department of Mathematics\newline Faculty of Science\newline Banaras Hindu University\newline Varanasi-221005\newline punam\[email protected] \end{document}
\begin{document} \title{Mirror Descent Maximizes Generalized Margin \ and Can Be Implemented Efficiently} \begin{abstract} Driven by the empirical success and wide use of deep neural networks, understanding the generalization performance of overparameterized models has become an increasingly popular question. To this end, there has been substantial effort to characterize the implicit bias of the optimization algorithms used, such as gradient descent (GD), and the structural properties of their preferred solutions. This paper answers an open question in this literature: For the classification setting, what solution does mirror descent (MD) converge to? Specifically, motivated by its efficient implementation, we consider the family of mirror descent algorithms with potential function chosen as the $p$-th power of the $\ell_p$-norm, which is an important generalization of GD. We call this algorithm $p$-{\small \sf GD}\xspace{}. For this family, we characterize the solutions it obtains and show that it converges in direction to a \emph{generalized maximum-margin} solution with respect to the $\ell_p$-norm for linearly separable classification. While the MD update rule is in general expensive to compute and perhaps not suitable for deep learning, $p$-{\small \sf GD}\xspace{} is fully parallelizable in the same manner as SGD and can be used to train deep neural networks with virtually \emph{no additional computational overhead}. Using comprehensive experiments with both linear and deep neural network models, we demonstrate that $p$-{\small \sf GD}\xspace{} can noticeably affect the structure and the generalization performance of the learned models. \begin{comment} Due to the success of deep and overparametrized models in practice, the problem of understanding their generalization performance has become an increasingly important question. Over the last decade, a significant research effort has been devoted to characterizing the implicit bias of optimization algorithms to find certain solutions under the overparametrized setting. However, there is one canonical open question in the literature: for the classification setting, what solution does mirror descent converge to? Motivated by its practicality, we consider the family of stochastic mirror descent (SMD) with the potential chosen as the $p$-th power of the $\ell_p$-norm, which we call $p$-{\small \sf GD}\xspace{}. We characterize the solution obtained by $p$-{\small \sf GD}\xspace and show that it converges in direction to a \emph{generalized maximum-margin} solution with respect to the $\ell_p$-norm for linearly separable classification. Notably, $p$-{\small \sf GD}\xspace{} has an efficient implementation because it is fully parallelizable in the same manner as SGD. In particular, we demonstrate through comprehensive experiments on standard datasets and deep architectures that $p$-{\small \sf GD}\xspace{} enjoys better generalization performance in many cases, with \emph{virtually no additional computational overhead} over the standard SGD optimizer. It has become increasingly clear that the inherent bias of the optimization algorithms to induce certain ``benign'' solutions that generalizes well play a key role in the performance of machine learning. These so-called implicit regularization properties allow deep and highly over-parameterized learning models to converge to specific regularized solutions without being directly part of the learning objective. In this paper, we consider subclass of the mirror descent algorithm (with respect to the $\ell_p^p$-norm), which we shall call $p$-{\small \sf GD}\xspace, and present a new result on its implicit regularization in the linear classification setting. We will show that with the logistics loss, mirror descent will converge in direction to a ``generalized'' maximum margin solution with respect the $\ell_p$-norm. Furthermore, similar result holds for more general monotonically decreasing loss functions. We also note that $p$-{\small \sf GD}\xspace can efficiently implemented in practice. So, we apply $p$-{\small \sf GD}\xspace to deep neural network training and find that inducing different biases on the learned model can significantly improve its generalization performance. \end{comment} \end{abstract} \section{Introduction} Overparameterized deep neural networks have enjoyed a tremendous amount of success in a wide range of machine learning applications~\citep{schrittwieser2020mastering,ramesh2021zero, brown2020language, dosovitskiy2020image}. However, as these highly expressive models have the capacity to have multiple solutions that interpolate training data, and not all these solutions perform well on test data, it is important to characterize which of these interpolating solutions the optimization algorithms converge to. Such characterization is important as it helps understand the generalization performance of these models, which is one of the most fundamental questions in machine learning. Notably, it has been observed that even in the absence of any explicit regularization, the interpolating solutions obtained by the standard gradient-based optimization algorithms, such as (stochastic) gradient descent, tend to generalize well. Recent research has highlighted that such algorithms favor particular types of solutions, i.e., they \emph{implicitly regularize} the learned models. \rebuttal{Importantly, such implicit biases are shown to play a crucial role in determining generalization performance, e.g., \citep{neyshabur2014search, zhang2021understanding, wilson2017marginal, donhauser2022fast}.} In the literature, the implicit bias of first-order methods is first studied in linear settings since the analysis is more tractable, and also, there have been several theoretical and empirical evidence that certain insights from linear models translate to deep learning, e.g. \citep{jacot2018neural,allen2019convergence, belkin2019reconciling,lyu2019gradient,bartlett2017spectrally,nakkiran2021deep}. In the linear setting, it is easier to establish implicit bias for regression tasks, where square loss is typically used and it attains its minimum at a finite value. For example, the implicit bias of gradient descent (GD) for square loss goes back to \cite{engl1996regularization}. Beyond GD, analysis of other popular algorithms such as the family of mirror descent (MD), which is an important generalization of GD , is more involved and was established only recently by \citep{gunasekar2018characterizing,azizan2018stochastic}. Specifically, those works showed that mirror descent converges to the interpolating solution that is closest to the initialization in terms of a Bregman divergence. Thus, the implicit bias in linear regression is relatively well-understood by now. On the other hand, {\bf in the classification setting, the implicit bias analysis becomes significantly more challenging, and several questions remain open} despite significant progress in the past few years. A key differentiating factor in the classification setting is that the loss function does not attain its minimum at a finite value, and the weights have to grow to infinity. It has been shown that for the logistics loss, the gradient descent iterates converge to the $\ell_2$-maximum margin SVM solution in direction~\citep{soudry2018implicit, ji2019implicit}. However, such characterizations for mirror descent are missing in the literature. Because it is possible for optimization algorithms to exhibit implicit bias in regression but not in classification (and vice versa) \citep{gunasekar2018characterizing}, resolving this gap of knowledge warrants careful analysis. See Table~\ref{table:main} for a summary. \begin{comment} To highlight several results in implicit regularization, in the case of linear regression, the implicit regularization have been characterized for both gradient descent and mirror descent (see Section~\ref{sec:priliminaries} for background) \citep{gunasekar2018characterizing}. More specifically, for the overparametrized linear regression setting, it has been shown that mirror descent converges to the interpolating solution that is closest to the initialization in terms of Bregman divergence (see Section~\ref{sec:priliminaries} for more details) \citep{azizan2018stochastic}. And \cite{wilson2017marginal} showed that various adaptive variants of SGD have different implicit regularization. On the other hands, {\bf in the case of classification, the results are unfortunately not as complete}. It has been shown that for the logistics loss, gradient descent iterates converge to the $\ell_2$-maximum margin SVM solution~\citep{soudry2018implicit, ji2019implicit}. However, such characterizations for mirror descent is missing in the literature. See Table~\ref{table:main} for a summary. We remark that apart from gradient/mirror descents, there have been results for other optimization methods, including AdaBoost, steepest descent~\citep{telgarsky2013margins,gunasekar2018characterizing,rosset2004boosting}. \end{comment} In this paper, we advance the understanding of the implicit regularization of mirror descent in the classification setting. In particular, inspired by their practicality, we focus on mirror descents with potential function $\psi(\cdot) = \frac{1}{p}\nablaorm{\cdot}_p^p$ for $p > 1$. More specifically, such choice of potential results in an update rule that \textit{can be applied coordinate-wise}, in the sense that updating the value at one coordinate does not depend on the values at other coordinates. Thanks to this property, this subclass of mirror descent can be implemented with \textit{no additional computational overhead}, making it much more practical than other algorithms in the literature; see Remark~\ref{rmk:sep} for more details. \begin{table}[t] \centering \renewcommand{1.5}{1.5} \setlength\tabcolsep{10pt} \begin{tabular}{ |c |c|c| } \hline & Regression & Classification \\ \hline\hline \multirow{4}{*}{\begin{tabular}{c}Gradient Descent\\($\psi(\cdot) = \frac{1}{2}\nablaorm{\cdot}_2^2$)\end{tabular}} & $\argmin_w \nablaorm{w-w_0}_2$ & $\argmin_w \nablaorm{w}_2$ \\ & $\mathrm{s.t.}~~w \text{ fits all data} $ & $\mathrm{s.t.}~~w \text{ classifies all data} $ \\ & \multirow{2}{*}{\citep[Thm 6.1]{engl1996regularization}} & \cite{soudry2018implicit} \\ & & \cite{ji2019implicit} \\ \hline \multirow{4}{*}{\begin{tabular}{c}Mirror Descent\\(e.g. $\psi(\cdot) = \frac{1}{p}\nablaorm{\cdot}_p^p$)\end{tabular}} & $\argmin_w \nablaorm{w-w_0}_p$ & $\argmin_w \nablaorm{w}_p$ \\ & $\mathrm{s.t.}~~w \text{ fits all data} $ & $\mathrm{s.t.}~~w \text{ classifies all data} $ \\ & \cite{gunasekar2018characterizing} & \multirow{2}{*}{ \large \high{This work}} \\ & \cite{azizan2018stochastic} & \\ \hline \end{tabular} \caption{{\bf Conceptual summary of our results.} In the case of linear regression, the implicit regularization results are complete; it is shown that mirror descent converges to the interpolating solution that is closest to the initialization. However, such characterization in the classification setting is missing in the literature and this is precisely the goal of this work. In particular, motivated by its practical application, we consider the potential function $\psi(\cdot) = \frac{1}{p}\nablaorm{\cdot}_p^p$ and extend the result of the gradient descent to such mirror descents. } \label{table:main} \end{table} \paragraph{Our contributions.} In this paper, we make the following contributions: \begin{list}{{\tiny $\blacksquare$}}{\leftmargin=1.5em} \setlength{\itemsep}{-0.75pt} \item We study mirror descent with potential $\frac{1}{p}\nablaorm{\cdot}_p^p$ for $p > 1$, which will call \textit{$p$-norm GD}, and abbreviated as $p$-{\small \sf GD}\xspace, as a practical and efficient generalization of the popular gradient descent. \item We show that for separable linear classification with logistics loss, $p$-{\small \sf GD}\xspace exhibits implicit regularization by converging in direction to a ``generalized'' maximum-margin solution with respect to the $\ell_p$ norm. More generally, we show that, for monotonically decreasing loss functions, $p$-{\small \sf GD}\xspace follows the so-called regularization path, which is defined in Section~\ref{sec:priliminaries}. \item We investigate the implications of our theoretical findings with two sets of experiments: Our experiments involving linear models corroborate our theoretical results, and real-world experiments with deep neural networks and popular datasets suggest that our findings carry over to such nonlinear settings. \rebuttal{Our deep learning experiments further show that $p$-{\small \sf GD}\xspace with different $p$ lead to significantly different generalization performance.} \end{list} \paragraph{Additional related work.} We remark that recent works also attempt to accelerate the convergence of gradient descent to its implicit regularization, either by using an aggressive step size schedule \citep{nacson2019convergence, ji2021characterizing} or with momentum \citep{ji2021fast}. Further, there have been several results for other optimization methods, including steepest descent, AdaBoost, and various adaptive methods such as RMSProp and Adam~\citep{telgarsky2013margins,gunasekar2018characterizing,rosset2004boosting,wang2021implicit,min2022one}. A mirror-descent-based algorithm for explicit regularization was recently proposed by \cite{azizan2021beyond}. \rebuttal{ Comparatively, there has been very little progress on mirror descent in the classification setting. \cite{li2021implicit} consider a mirror descent, but their assumptions are not applicable beyond the $\ell_2$ geometry.\footnote{To be precise, they assume that the Bregman divergence is lower and upper bounded by a constant factor of the squared Euclidean distance, e.g., as in the case of a squared Mahalanobis distance.} To the best of our knowledge, there is no result for more general mirror descent algorithms in the classification setting. } \section{Background and Problem Setting} \label{sec:priliminaries} We are interested in the well-known classification setting. Consider a collection of input-label pairs $\{(x_i, y_i)\}_{i=1}^n \subset \mathbb{R}^d \times \{\pm 1\}$ and a classifier $f_w(x)$, where $w \in \mathcal{W}$. For some \textit{loss function} $\ell : \mathbb{R} \times \{\pm 1\} \to \mathbb{R}$, our goal is to minimize the empirical loss: \[ L(w) = \frac{1}{n}\sum_{i=1}^n \ell(y_i \cdot f_w(x_i)).\] Throughout the paper, we assume that the classification loss function $\ell$ is decreasing, convex and does not attain its minimum, as in most common loss functions in practice (e.g., logistics loss and exponential loss). Without loss of generality, we assume that $\inf \ell(\cdot) = 0$. For our theoretical analysis, we consider a linear model, where the models can be expressed by $f_w(x) = w^\top x$ and $w \in \mathbb{R}^d$. We also make the following assumptions about the data. \rebuttal{ First, since we are mainly interested in the over-parameterized setting where $d > n$, we assume that the data is linearly separable, i.e., there exists $w^* \in \mathbb{R}^d$ s.t. $\mathrm{sign}(\inp{w^*}{x_i}) = y_i$ for all $i\in[n]$. } We also assume that the inputs $x_i$'s are bounded. More specifically, for our later purpose, we assume that for $p>0$, there exists some constant $C$ so that $\max_i \nablaorm{x_i}_q < C$, where $1/q + 1/p = 1$. \paragraph{Preliminaries on mirror descent.} The key component of mirror descent is a \textit{potential function}. In this work, we will focus on differentiable and strictly convex potentials defined on the entire domain $\mathbb{R}^n$.\footnote{In general, the mirror map is a convex function of Legendre type~(see, e.g., \citep[Section 26]{rockafellar1970convex}).} We call $\nablaabla \psi$ the corresponding \textit{mirror map}. Given a potential, the natural notion of ``distance'' associated with the potential $\psi$ is given by the Bregman divergence. \begin{definition}[Bregman divergence~\citep{bregman1967relaxation}] For a mirror map $\psi$, the Bregman divergence $\breg{\psi}{\cdot}{\cdot}$ associated to $\psi$ is defined as \begin{align*} \breg{\psi}{x}{y}:= \psi(x)-\psi(y) -\inp{\nablaabla \psi(y)}{x-y},\qquad \forall x,y\in \mathbb{R}^n\,. \end{align*} \end{definition} An important case is the potential $\psi = \frac{\nablaorm \cdot^2}{2}$, where $\nablaorm \cdot$ denotes the Euclidean norm. Then, the Bregman divergence becomes $D_\psi(x,y) = \frac{1}{2}\nablaorm{x-y}^2$. For more background on Bregman divergence and its properties, see, e.g., \citep[Section 2.2]{bauschke2017descent} and \citep{azizan2019stochastic}. Mirror descent (MD) with respect to the mirror map $\psi$ is a generalization of gradient descent where we use Bregman divergence as a measure of distance: \begin{align} \tag{\sf MD}\label{equ:md} w_{t+1} = \argmin_w \left\{\frac{1}{\eta}D_\psi(w, w_t) + \inp{\nablaabla L(w_t)}{w}\right\} \end{align} Equivalently, \ref{equ:md} can be written as $\nablaabla\psi(w_{t+1}) = \nablaabla\psi(w_t) - \eta \nablaabla L(w_t)$. We refer readers to \cite[Figure 4.1]{bubeck2015convex} for a nice illustration of mirror descent. Also, see \citep[Section 5.7]{juditsky2011first} for various examples of potentials depending on applications. One property we will repeatedly use is the following~\citep{azizan2018stochastic}: \begin{lemma}[\ref{equ:md} identity] \label{thm:key-iden} For any $w \in \mathbb{R}^n$, the following identities hold for \ref{equ:md}: \begin{subequations} \begin{align} &D_\psi(w, w_t) = D_\psi(w, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_t) + \eta D_{L}(w, w_t) - \eta L(w) + \eta L(w_{t+1})\,, \label{equ:key-iden-1}\\ &\quad = D_\psi(w, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_{t}) - \eta \inp{\nablaabla L(w_t)}{w - w_t} - \eta L(w_{t}) + \eta L(w_{t+1})\, \label{equ:key-iden-2}. \end{align} \end{subequations} \end{lemma} Using Lemma~\ref{thm:key-iden}, we make several new observations and prove the following useful statements. \begin{lemma} \label{thm:decreasing-lose} For sufficiently small step size $\eta$ such that $\psi - \eta L$ is convex, the loss is monotonically decreasing after each iteration of \ref{equ:md}, i.e., $L(w_{t+1}) \le L(w_{t})$. \end{lemma} \begin{lemma} \label{thm:to-infinity} In a separable linear classification problem, if $\eta$ is chosen sufficiently small s.t. $\psi - \eta L$ is convex, then we have $L(w_t) \to 0$ as $t \to \infty$. Hence, $\lim_{t\to \infty}\nablaorm{w_t} = \infty$ for any norm $\nablaorm{\cdot}$. \end{lemma} The formal proofs of these lemmas can be found in Appendix~\ref{sec:proof-basic-lemmas}. \begin{remark} \rebuttal{ We can relax the condition in Lemma \ref{thm:decreasing-lose} and \ref{thm:to-infinity} such that for a sufficiently small step size $\eta$, $\psi - \eta L$ is only locally convex at the iterates $w_t$. The relaxed condition allows us to analyze losses such as the exponential loss (see, e.g. Footnote 2 of \cite{soudry2018implicit}). } This condition can be considered as the mirror descent counterpart to the standard smoothness assumption in the analysis of gradient descent (see \cite{lu2018relatively}). \end{remark} {\bf Preliminaries on the convergence of linear classifier.} As we discussed above, the weights vector $w_t$ diverges for mirror descent. Here the main theoretical question is: \begin{center} What direction does \ref{equ:md} diverge to? In other words, can we characterize $w_t / \nablaorm{w_t}$ as $t\to \infty$? \end{center} To answer this question, we define two special directions whose importance will be illustrated later. \begin{definition} The \textbf{regularization path} with respect to the $\ell_p$-norm is defined as \begin{equation} \bar{w}_p(B) = \argmin_{\nablaorm{w}_p \le B} L(w) \end{equation} And if the limit $\lim_{B\to\infty} \bar{w}_p(B) / B$ exists, we call it the \textbf{regularized direction} and denote it by $\reg{p}$. \end{definition} \begin{definition} The \textbf{margin} $\gamma$ of the a linear classifier $w$ is defined as $\gamma(w) = \min_{i=1, \dots, n} y_i \inp{x_i}{w}. $ The {\bf max-margin direction} with respect to the $\ell_p$-norm is defined as: \begin{equation} \mmd{p} := \argmax_{\nablaorm{w}_p \le 1} \left\{ \min_{i=1, \dots, n} y_i \inp{x_i}{w} \right\} \end{equation} And let $\mar{p}$ be the optimal value to the equation above. \end{definition} Note that $\mmd{p}$ is parallel to the hard-margin SVM solution w.r.t. $\ell_p$-norm: $\argmin_w \{\nablaorm{w}_p : \gamma(w) \ge 1\}$. \rebuttal{Also note that the superscripts in $\reg{p}$ and $\mmd{p}$ are not variables and we only use this notation to differentiate the two definitions.} Prior results had shown that, in linear classification, gradient descent converges in direction. \begin{theorem}[\cite{soudry2018implicit}] \label{thm:gd-maxmargin} For separable linear classification with logistics loss, the gradient descent iterates with sufficiently small step size converge in direction to $\mmd{2}$, i.e., $\lim_{t\to\infty} \frac{w_t}{\nablaorm{w_t}_2} = \mmd{2}$. \end{theorem} \begin{theorem}[\cite{ji2020gradient}] \label{thm:gd-regdir} If the regularized direction $\reg{p}$ with respect to the $\ell_2$-norm exists, then the gradient descent iterates with sufficiently small step size converge to the regularized direction $\reg{2}$, i.e., $\lim_{t\to\infty} \frac{w_t}{\nablaorm{w_t}_2} = \reg{2}$. \end{theorem} \section{Mirror Descent with the $p$-th Power of $\ell_p$-norm} \label{sec:main-result} \begin{comment} \begin{lemma}[Law of Cosine] \label{thm:breg-loc} \begin{equation} \brg{w}{w'} = \brg{w}{w''} + \brg{w''}{w'} - \inp{\nablaabla\psi(w') - \nablaabla\psi(w'')}{w - w''} \end{equation} \end{lemma} \end{comment} In this section, we investigate theoretical properties of the main algorithm of interest, namely mirror descent with $\psi(\cdot) = \frac{1}{p} \nablaorm{\cdot}_p^p$ and for $p > 1$.\footnote{\rebuttal{Because the potential function must be \textit{strictly} convex for Bregman divergence to be well-defined, the value of $p$ cannot be exactly 1.}} We shall call this algorithm \textit{$p$-norm GD} because it naturally generalizes gradient descent to $\ell_p$ geometry, and for conciseness, we will refer to this algorithm by the shorthand $p$-{\small \sf GD}\xspace. As noticed by \cite{azizan2021stochastic}, this choice of mirror potential is particularly of practical interest because the mirror map $\nablaabla \psi$ updates becomes \textit{separable} in coordinates and thus can be implemented \textit{coordinate-wise} independent of other coordinates: \begin{align} \tag{$p$-{\small \sf GD}\xspace}\label{mdpp} \forall j \in [d],\quad \begin{cases} \rebuttal{w_{t+1}[j] \leftarrow \left| w_t^+[j] \right|^{\frac{1}{p-1}} \cdot \mathrm{sign}\left( w_t^+[j]\right)}\\ w_t^+[j]:= |w_t[j]|^{p-1}\mathrm{sign}(w_t[j]) - \eta \nablaabla L(w_t)[j] \end{cases} \end{align} Furthermore, we can extend upon the observation of \cite{azizan2021stochastic} and derive these identities that allow us to better manipulate $p$-{\small \sf GD}\xspace: \begin{subequations} \begin{align} \inp{\nablaabla \psi (w)}{w} &= \mathrm{sign}(w_1)w_1 \cdot |w_1|^{p-1} + \cdots + \mathrm{sign}(w_d)w_d \cdot |w_d|^{p-1} = \nablaorm{w}^p\\ \brg{c w}{c w'} &= |c|^p \brg{w}{w'} \quad \forall c\in \mathbb{R}. \label{equ:homo} \end{align} \end{subequations} \begin{remark} \label{rmk:sep} Note that the coordinate-wise separability property is not shared among other related algorithms in the literature. For instance, the choice $\psi = \frac{1}{2} \nablaorm{\cdot}_q^2$ for $1/p + 1/q = 1$, which is referred to as the $p$-norm algorithm~\citep{grove2001general, gentile2003robustness} is not fully coordinate-wise separable since it requires computing $\nablaorm{w_t}_p$ at each step (see, e.g., \citep[eq. (1)]{gentile2003robustness}). Another related algorithm is steepest descent, where the Bregman divergence in \ref{equ:md} is replaced with $\nablaorm{\cdot}^2$ for general norm $\nablaorm{\cdot}$.\footnote{It is also worth noting that steepest descent is not an instance of mirror descent since $\nablaorm{\cdot}^2$ is not a Bregman divergence for a general norm $\nablaorm{\cdot}$.} However, for similar reasons, the update rule is not fully separable. \end{remark} \subsection{Main theoretical results} We extend Theorems~\ref{thm:gd-maxmargin}~and~\ref{thm:gd-regdir} to the setting of $p$-{\small \sf GD}\xspace. We will resolve two major obstacles in the analysis of implicit regularization in linear classification: \begin{list}{{\tiny $\blacksquare$}}{\leftmargin=1.5em} \setlength{\itemsep}{-1pt} \item Our analysis approaches the classification setting as a limit of the regression implicit bias. This argument gives stronger theoretical justification for utilizing the regularized direction (as employed by~\cite{ji2020gradient}) and partially addresses the concern from \cite{gunasekar2018characterizing} that the implicit bias of regression and classification problems are ``fundamentally different.'' \item On a more technical note, analyzing the implicit bias requires handling the cross terms of the form $\inp{\nablaabla\psi(w)}{w'}$, which lack direct geometric interpretations. We demonstrate that for our potential functions of interest, these terms can be nicely written and can be handled in the analysis. \end{list} We begin with the motivation behind the regularized direction, and consider the regression setting in which there exists some weight vector $w$ such that $L(w) = 0$. Then, we can apply Lemma~\ref{thm:key-iden} to get \[D_\psi(w, w_{t}) = D_\psi(w, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_t) + \eta D_{L}(w, w_t) + \eta (L(w_{t+1}) - L(w))\] Since we assumed $L(w) = 0$, the equation above implies that $D_\psi(w, w_{t}) \ge D_\psi(w, w_{t+1})$ for sufficiently small step-size $\eta$. This can be interpreted as \ref{equ:md} having a decreasing ``potential'' of the from $\brg{w}{\cdot}$ during each step. Using this property, \citet{azizan2018stochastic} establishes the implicit bias results of mirror descent in the regression setting. However, such weight vector $w$ does not exist in the classification setting. One natural workaround would then be to choose a vector $w$ so that $L(w) \le L(w_t)$ for all $t \le T$. The following result, which is a generalization of \citep[Lemma 9]{ji2020gradient}, shows that one can in fact choose the reference vector $w$ as a scalar multiple of the regularized direction. \begin{lemma} \label{thm:approx-reg-dir-loss} If the regularized direction $\reg{p}$ exists, then $\forall \alpha > 0$, there exists $r_\alpha$ such that for any $w$ with $\nablaorm{w}_p > r_\alpha$, we have $L((1+\alpha)\nablaorm{w}_p \reg{p}) \le L(w)$. \end{lemma} However, this does not resolve the issue altogether. Recall from Lemma~\ref{thm:to-infinity} that the loss approaches 0, and therefore one cannot choose a fixed reference vector $w$ in the limit as $T\to \infty$. But due to the homogeneity of Bregman divergence \eqref{equ:homo}, we can scale $\reg{p}$ by a constant factor during each iteration, and, by doing so, we choose the reference vector $w$ to be a ``moving target.'' In other words, the idea behind our analysis is that the classification problem is chasing after a regression one and would behave similar to it in the limit. Let us formalize this idea. We begin with the following inequality: \begin{align} \label{ineq:1} \brg{c_t\reg{p}}{w_{t+1}} \le \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t), \end{align} where $c_t$ is taken to be $\approx \nablaorm{w_t}_p$.\footnote{To be more precise, we want $c_t = (1+\alpha) \nablaorm{w_t}_p$; and reason behind this choice is self-evident after we present Corollary~\ref{thm:cross-term}.} Now we modify \eqref{ineq:1} so that it can telescope over different iterations. One way is to add $\brg{c_{t+1}\reg{p}}{w_{t+1}}$ on both sides of \eqref{ineq:1} and move $ \brg{c_t\reg{p}}{w_{t+1}}$ to the right-hand side as follows: \begin{align*} &\brg{c_{t+1}\reg{p}}{w_{t+1}} \\ \le{}& \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t) + \brg{c_{t+1}\reg{p}}{w_{t+1}} - \brg{c_t\reg{p}}{w_{t+1}} \\ ={}& \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t) + \psi(c_{t+1} \reg{p}) - \psi(c_t \reg{p}) - \inp{\nablaabla\psi(w_{t+1})}{(c_{t+1} - c_t) \reg{p}} \end{align*} Summing over $t = 0, \dots, T-1$ gives us \begin{equation} \label{equ:tele-sum} \begin{aligned} \brg{c_T\reg{p}}{w_T} &\le \brg{c_0\reg{p}}{w_0} - \eta L(w_1) + \eta L(w_T) + \psi(c_{T} \reg{p}) - \psi(c_1 \reg{p}) \\ &\hspace{6em}- \sum_{t=1}^{T-1}\inp{\nablaabla\psi(w_{t+1})}{(c_{t+1} - c_t) \reg{p}} \end{aligned} \end{equation} The rest of the argument deals with simplifying quantities that do not cancel under telescoping sum. For instance, in order to deal with $\inp{\nablaabla\psi(w_{t+1})}{\reg{p}}$, we invoke the \ref{equ:md} update rule as follows \begin{align*} \inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_{t})}{\reg{p}} = \inp{-\eta\nablaabla L(w_t)}{\reg{p}} \gtrsim \inp{-\eta\nablaabla L(w_t)}{w_t}, \end{align*} where the last inequality follows from the intuition that $\reg{p}$ is the direction along which the loss achieves the smallest value and hence $\nablaabla L(w_t)$ must point away from $\reg{p}$, i.e., it must be that $\inp{\nablaabla L(w_t)}{\reg{p}} \lesssim \inp{\nablaabla L(w_t)}{u}$ for any direction $u$. The following result formalizes this intuition. \begin{corollary} \label{thm:cross-term} For $w$ so that $\nablaorm{w}_p > r_\alpha$, we have $\inp{\nablaabla L(w)}{w} \ge (1+\alpha)\nablaorm{w}_p\inp{\nablaabla L(w)}{\reg{p}}$. \end{corollary} \nablaoindent \emph{Proof.} This follows from the convexity of $L$ and Lemma~\ref{thm:approx-reg-dir-loss}: $\inp{\nablaabla L(w)}{w - (1+\alpha)\nablaorm{w}\reg{p})} \ge L(w) - L((1+\alpha)\nablaorm{w}\reg{p}) \ge 0$. \qed Now we are left with the terms $\inp{-\eta\nablaabla L(w_t)}{w_t}$. For general potential $\psi$, the quantity $\inp{-\eta\nablaabla L(w_t)}{w_t} = \inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_{t})}{w_t}$ cannot be simplified. On the other hand, due to our choice of potential, one can invoke Lemma~\ref{thm:key-iden} to lower bound these quantities in terms of $\nablaorm{w_{t+1}}_p$ and $\nablaorm{w_t}_p$, and this step is detailed in Lemma~\ref{thm:cross-term-diff} in Appendix~\ref{sec:cross-term-diff}. Once we have established a lower bound on $\inp{\nablaabla\psi(w_{t+1})}{\reg{p}}$, we can turn \eqref{equ:tele-sum} entirely into a telescoping sum and unwind the above process to show that $\brg{\reg{p}}{w_t / \nablaorm{w_t}_p}$ must converge to zero in the limit as $t \to \infty$. Putting this all together, we obtain the following result. \begin{comment} To address this, we note that for $\psi(\cdot) = \frac{1}{p} \nablaorm{\cdot}_p^p$, Bregman divergence is $p$-homogeneous, that is \begin{equation*} \begin{aligned} \brg{c w_1}{c w_2} &= \psi(c w_1) - \psi(c w_2) - \inp{\lambda{\psi}(cw_2)}{cw_1 - cw_2} \\ &= |c|^p \psi(w_1) - |c|^p \psi(w_2) - |c|^{p-1}\mathrm{sign}(c) \cdot c\inp{\lambda{\psi}(w_2)}{w_1 - w_2} \\ &= |c|^p \brg{w_1}{w_2} \end{aligned} \end{equation*} Therefore, we want to show a similar inequality after normalization: \begin{equation} \label{equ:approx-breg-inequ} \brg{\reg{p}}{\frac{w_{t+1}}{\nablaorm{w_{t+1}}_p}} \lesssim \brg{\reg{p}}{\frac{w_t}{\nablaorm{w_t}_p}} \end{equation} Recall that we have several nice properties when $\psi = \frac{1}{p}\nablaorm{\cdot}_p^p$: \begin{align*} \nablaabla \psi (w) &= (\mathrm{sign}(w_1)|w_1|^{p-1},\cdots, \mathrm{sign}(w_d)|w_d|^{p-1})\\ \inp{\nablaabla \psi (w)}{w} &= \mathrm{sign}(w_1)w_1|w_1|^{p-1} + \cdots + \mathrm{sign}(w_d)w_d|w_d|^{p-1} = \nablaorm{w}^p \end{align*} So, for a normalized vector $\tilde{w}$, we can perform the following manipulation on Bregman divergence: \begin{equation*} \begin{aligned} \brg{\reg{p}}{\tilde{w}} &= \psi(\reg{p}) - \psi\left(\tilde{w}\right) - \inp{\nablaabla\psi\left(\tilde{w}\right)}{\reg{p} - \tilde{w}} \\ &= \psi(\reg{p}) - \psi\left(\tilde{w}\right) + \inp{\nablaabla\psi(\tilde{w})}{\tilde{w}} - \inp{\nablaabla\psi(\tilde{w})}{\reg{p}} \\ &= \frac{1}{p}\nablaorm{\reg{p}}_p^p - \frac{1}{p} \nablaorm{\tilde{w}}_p^p + \nablaorm{\tilde{w}}_p^p - \inp{\nablaabla\psi(\tilde{w})}{\reg{p}} \\ &= 1 - \inp{\nablaabla\psi(\tilde{w})}{\reg{p}} \end{aligned} \end{equation*} So we can rewrite \eqref{equ:approx-breg-inequ} as \[\inp{\nablaabla\psi\left(\frac{w_{t+1}}{\nablaorm{w_{t+1}}_p}\right)}{\reg{p}} \gtrsim \inp{\nablaabla\psi\left(\frac{w_{t}}{\nablaorm{w_{t}}_p}\right)}{\reg{p}}\] If we assume for now that $\nablaorm{w_t}_p \approx \nablaorm{w_{t+1}}_p$, then we have \[\inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_t)}{\reg{p}} \gtrsim 0\] Note that the left-hand side corresponds to the \eqref{equ:md} update step, so we can establish an inequality of form with Lemma~\ref{thm:key-iden}. If formalize this intuition, our proof reverses the line of thought above by first showing a lower bound on $\inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_t)}{\reg{p}}$ and use telescoping sum to lower bound $\inp{\nablaabla\psi(w_{T}) - \nablaabla\psi(w_{t_0})}{\reg{p}}$, where the initial time $t_0$ depends only on $\alpha$ from Lemma~\ref{thm:approx-reg-dir-loss}. For more details, we ask the reader to check out Appendix~\ref{sec:proof-primal-bias}. \end{comment} \begin{theorem} \label{thm:primal-bias} For a separable linear classification problem, if the regularized direction $\reg{p}$ exists, then with sufficiently small step size, the iterates of $p$-{\small \sf GD}\xspace converge to $\reg{p}$ in direction: \begin{equation} \lim_{t\to\infty} \frac{w_t}{\nablaorm{w_t}_p} = \reg{p}. \end{equation} \end{theorem} A formal proof of this theorem can be found in Appendix~\ref{sec:proof-primal-bias}. We note that our proof further simplifies derivations using the separability of the mirror map. The final missing piece would be the existence of the regularized direction. In general, finding the limit direction $\reg{p}$ would be difficult. Fortunately, we can sometimes appeal to the max-margin direction that is much easier to compute. The following result is a generalization of \citep[Proposition 10]{ji2020gradient} and shows that for common losses in classification, the regularized direction and the max-margin direction are the same, hence proving the existence of the former. \begin{prop} \label{thm:reg-max-dir} If we have a loss with exponential tail, e.g. $\lim_{z\to\infty} \ell(z) e^{az} = b$, then the regularized direction exists and it is equal to the max-margin direction $\mmd{p}$. \end{prop} The proof of this result can be found in Appendix~\ref{sec:proof-reg-max-dir}. Note that many commonly used losses in classification, e.g., logistic loss, have exponential tail. \subsection{Asymptotic convergence rate} \label{sec:asymp-result} In this section, we characterize the rate of convergence in Theorem~\ref{thm:primal-bias}. Following the proof of Theorem~\ref{thm:primal-bias}, one can show the following result in the case of linearly separable data. \begin{corollary} \label{thm:convg-rate} The following rate of convergence holds: \[\brg{\reg{p}}{\frac{w_t}{\nablaorm{w_t}_p}} \in O\left(\nablaorm{w_t}_p^{-(p-1)}\right).\] \end{corollary} In order to fully understand the convergence rate, we need to characterize the asymptotic behavior of $\nablaorm{w_t}_p$. The next result precisely does that. Recall that we assumed the dataset is bounded so that $\max_i \nablaorm{x_i}_q \le C$ for $1/p + 1/q = 1$, and the max-margin direction $\mmd{p}$ satisfies $\inp{x_i}{\mmd{p}} \ge \hat{\gamma}_p \, \forall i \in [n]$. Then, we have the following bound on $\nablaorm{w_t}_p$. \begin{lemma} \label{thm:norm-rate} For exponential loss $\ell(z) = \exp(-z)$, the asymptotic growth of $\nablaorm{w_t}_p$ is contained in $\Theta(\log t)$. In particular, we have \[\liminf_{t\to\infty} \nablaorm{w_t}_p \ge \frac{1}{C} (\log t - p \log\log t) + O(1) \text{ and } \limsup_{t\to\infty} \nablaorm{w_t}_p \le \hat{\gamma}_p^{-1} \frac{p}{p-1} \log t.\] \end{lemma} The proof of this lemma can be found in Appendix~\ref{sec:proof-asymp-result}. \begin{comment} illustrates the operational convenience of the maximum-margin direction. For the upper bound, we consider the distance $\brg{\hat{\gamma}_p^{-1} \mmd{p} \log T}{w_T}$. Due to the property of max-margin solution $\mmd{p}$, the quantity $\inp{\nablaabla L(w_t)}{\hat{\gamma}_p^{-1} \mmd{p} \log T - w_t}$ must be small. Then we can apply Lemma~\ref{thm:key-iden} and sum up \eqref{equ:key-iden-2} over $t = 0, \dots, T-1$ to conclude that $\brg{\hat{\gamma}_p^{-1} \mmd{p} \log T}{w_T}$ cannot be much larger than $\brg{\hat{\gamma}_p^{-1} \mmd{p} \log T}{w_0}$, and this in turn upper bounds how fast $\nablaorm{w_T}_p$ can grow. And for the lower bound, we apply standard mirror descent convergence of the form $L(w_T) - L(w^\mathrm{s.t.}ar) \le \frac{1}{\eta T} \brg{w^\mathrm{s.t.}ar}{w_0}$. We can carefully pick $w^\mathrm{s.t.}ar$ to be a scalar multiple of $\mmd{p}$ so that $L(w^\mathrm{s.t.}ar) \le \frac{1}{2} L(w_T)$, and by the definition of the max-margin direction, $\nablaorm{w^\mathrm{s.t.}ar}_p$ cannot be much larger than $\nablaorm{w_T}_p$. So, combined with the upper bound, we can bound $L(w_T) \ge \exp(-\gamma(w_T))$ and this in turn implies a lower bound on $\nablaorm{w_T}_p$ because $\gamma(w_T) \le \inp{w_T}{x_i} \le C \cdot \nablaorm{w_T}_p$ for any $i \in [n]$. \end{comment} Similar conclusions can be reached for other losses with exponential tail. Therefore, in such cases, $p$-{\small \sf GD}\xspace{} has poly-logarithmic rate of convergence. \begin{corollary} \label{thm:final-convg-rate} For exponential loss, we have convergence rate \[ \brg{\reg{p}}{\frac{w_t}{\nablaorm{w_t}_p}} \in O\left(\frac{1}{\log^{p-1}(t)}\right).\] \end{corollary} \section{Experiments} \label{sec:experiments} In this section, we investigate the behavior and performance of $p$-{\small \sf GD}\xspace for various values of $p$. We naturally pick $p = 2$ that corresponds to gradient descent, Because $p$-{\small \sf GD}\xspace does not directly support $p = 1$ and $\infty$, we choose $p = 1.1$ as a surrogate for $\ell_1$, and $p = 10$ as a surrogate for $\ell_\infty$. We also consider $p = 1.5, 3, 6$ to interpolate these points. This section will present a summary of our results; the complete experimental setup and full results can be found in Appendices~\ref{sec:experiment-detail} and \ref{sec:add-experiments}. \subsection{Linear classification} \label{sec:linear-classifier} \begin{figure} \caption{An example of $p$-{\small \sf GD} \label{fig:synthetic-data} \end{figure} \paragraph{Visualization of the convergence of $p$-{\small \sf GD}\xspace.} To visualize the results of Theorem~\ref{thm:primal-bias} and Corollary~\ref{thm:final-convg-rate}, we randomly generated a linearly separable set of 15 points in $\mathbb{R}^2$. We then employed $p$-{\small \sf GD}\xspace on this dataset with exponential loss $\ell(z) = \exp(-z)$ and fixed step size $\eta = 10^{-4}$. We ran this experiment for $p = 1.5, 2, 3$ and for $10^6$ iterations. In the illustrations of Figure~\ref{fig:synthetic-data}, the mirror descent iterates $w_t$ have unbounded norm and converge in direction to $\mmd{p}$. These results are consistent with Lemma~\ref{thm:to-infinity} and with Theorem~\ref{thm:primal-bias}. Moreover, as predicted by Corollary~\ref{thm:final-convg-rate}, the exact rate of convergence for $\brg{\mmd{p}}{w_t / \nablaorm{w_t}_t}$ is poly-logarithmic with respect to the number of iterations. Corollary~\ref{thm:final-convg-rate} also indicates that the convergence rate would be faster for larger $p$ due to the larger exponent, and this is consistent with our observation in the second plot of Figure~\ref{fig:synthetic-data}. Finally, in the third plot of Figure~\ref{fig:synthetic-data}, the norm of the iterates $w_t$ grows at a logarithmic rate, which is the same as the prediction by Lemma~\ref{thm:norm-rate}. \paragraph{Implicit bias of $p$-{\small \sf GD}\xspace in linear classification.} We now verify the conclusions of Theorem~\ref{thm:primal-bias}. To this end, we recall that $\mmd{p}$ is parallel to the SVM solution $\argmin_w \{\nablaorm{w}_p : \gamma(w) \ge 1\}$. Hence, we can exploit the linearity and rescale any classifier so that its margin is equal to $1$. If the prediction of Theorem~\ref{thm:primal-bias} holds, then for each fixed value of $p$, the classifier generated by $p$-{\small \sf GD}\xspace should have the smallest $\ell_p$-norm after rescaling. To ensure that $\mmd{p}$ are sufficiently different for different values of $p$, we simulate an over-parameterized setting by randomly select 15 points in $\mathbb{R}^{100}$. We used fixed step size of $10^{-4}$ and ran 250 thousand iterations for different $p$'s. Table~\ref{tab:linear-bias} shows the results for $p = 1.1, 2, 3$ and 10; under each norm, we highlight the smallest classifier in \textbf{boldface}. Among the four classifiers we presented, $p$-{\small \sf GD}\xspace with $p = 1.1$ has the smallest $\ell_{1.1}$-norm. And similar conclusions hold for $p = 2, 3, 10$. Although $p$-{\small \sf GD}\xspace converges to $\mmd{p}$ at a very slow rate, we are able to observe a very strong implicit bias of $p$-{\small \sf GD}\xspace classifiers toward their respective $\ell_p$ geometry in a highly over-parameterized setting. This suggests we should be able to take advantage of the implicit regularization in practice and at a moderate computational cost. Due to space constraints, we defer a more complete result with additional values of $p$ to Appendix~\ref{sec:add-experiment-synthetic}. \begin{wraptable}[8]{r}{0.5\textwidth} \centering \centering {\small \begin{tabular}{l| c|c|c|c} & $\ell_{1.1}$ & $\ell_{2}$ & $\ell_{3}$& $\ell_{10}$ \\ \hline\hline $p=1.1$ & \textbf{5.670} & 1.659 & 1.100 & 0.698 \\ $p=2$ & 6.447 & \textbf{1.273} & 0.710 & 0.393 \\ $p=3$ & 7.618 & 1.345 & \textbf{0.691} & 0.318 \\ $p=10$ & 9.086 & 1.520 & 0.742 & \textbf{0.281} \\ \hline \end{tabular} } \caption{Size of the linear classifiers generated by $p$-{\small \sf GD}\xspace (after rescaling) in $\ell_{1.1}, \ell_2, \ell_3$ and $\ell_{10}$ norms.} \label{tab:linear-bias} \end{wraptable} \subsection{Deep neural networks} \label{sec:cifar} Going beyond linear models, we now investigate $p$-{\small \sf GD}\xspace{} in deep-learning settings in its impact on the structure of the learned model and potential implications on the generalization performance. As we had discussed in Section \ref{sec:main-result}, {\bf the implementation of $p$-{\small \sf GD}\xspace{} is straightforward}; to illustrate simplicity of implementation, we provide code snippets in Appendix~\ref{sec:practicality}. Thus, we are able to effectively experiment with the behaviors $p$-{\small \sf GD}\xspace in neural network training. Specifically, we perform a set of experiments on the CIFAR-10 dataset~\citep{krizhevsky2009learning}. We use the \textit{stochastic} version of $p$-{\small \sf GD}\xspace with different values of $p$. We choose a variety of networks: \textsc{VGG}~\citep{simonyan2014very}, \textsc{ResNet}~\citep{he2016deep}, \textsc{MobileNet}~\citep{sandler2018mobilenetv2} and \textsc{RegNet}~\citep{radosavovic2020designing}. \paragraph{Implicit bias of $p$-{\small \sf GD}\xspace in deep neural networks.} Since the notion of margin is not well-defined in this highly nonlinear setting, we instead visualize the impacts of $p$-{\small \sf GD}\xspace's implicit regularization on the histogram of weights (in absolute value) in the trained model. In Figure~\ref{fig:cifar10-hist}, we report the weight histograms of \textsc{ResNet-18} models trained under $p$-{\small \sf GD}\xspace with $p = 1.1, 2, 3$ and $10$. Depending on $p$, we observe interesting differences between the histograms. Note that the deep network is most sparse when $p=1.1$ as most weights clustered around $0$. Moreover, comparing the maximum weights, one can see that the case of $p = 10$ achieves the smallest value. Another observation is that the network becomes denser as $p$ increases; for instance, there are more weights away from zero for the cases $p = 3, 10$. These overall tendencies are also observed for other deep neural networks; see Appendix~\ref{sec:add-experiment-cifar-bias}. \begin{figure} \caption{The histogram of weights in \textsc{ResNet-18} \label{fig:cifar10-hist} \end{figure} \begin{table}[] \centering \setlength{\tabcolsep}{5.4pt} \begin{tabular}{l| c|c|c|c} \hline & \hspace{1.5em} \textsc{VGG-11} \hspace{1.5em} & \hspace{0.75em} \textsc{ResNet-18} \hspace{0.75em} & \textsc{MobileNet-v2} & \textsc{RegNetX-200mf} \\ \hline \hline $p = 1.1$ & \pmval{88.19}{.17} & \pmval{92.63}{.12} & \pmval{91.16}{.09}& \pmval{91.21}{.18} \\ $p = 2$ (SGD) & \pmval{90.15}{.16} & \bpmval{93.90}{.14} & \pmval{91.97}{.10}& \pmval{92.75}{.13} \\ $p = 3$ & \bpmval{90.85}{.15} & \bpmval{94.01}{.13} & \bpmval{93.23}{.26}& \bpmval{94.07}{.12} \\ $p = 10$ & \pmval{88.78}{.37} & \pmval{93.55}{.21} & \pmval{92.60}{.22}& \pmval{92.97}{.16} \\ \hline \end{tabular} \caption{CIFAR-10 test accuracy (\%) of $p$-{\small \sf GD}\xspace on various deep neural networks. For each deep network and value of $p$, the average $\pm$ \textcolor{gray}{std. dev.} over 5 trials are reported. And the best performing value(s) of $p$ for each individual deep network is highlighted in \textbf{boldface}.} \label{tab:generalization-cifar10} \end{table} \paragraph{Generalization performance.} We next investigate the generalization performance of networks trained with different $p$'s. To this end, we adopt a fixed selection of hyper-parameters and then train four deep neural network models to 100\% training accuracy with $p$-{\small \sf GD}\xspace with different $p$'s. As Table~\ref{tab:generalization-cifar10} shows, interestingly the networks trained by $p$-{\small \sf GD}\xspace with $p = 3$ consistently outperform other choices of $p$'s; notably, for \textsc{MobileNet} and \textsc{RegNet}, the case of $p=3$ outperforms the others by more than 1\%. Somewhat counter-intuitively, the sparser network trained by $p$-{\small \sf GD}\xspace with $p = 1.1$ does not exhibit better generalization performance, but rather shows worse generalization than other values of $p$. \rebuttal{ Although these observations are not directly predicted by our theoretical results, we believe that they nevertheless establish an important step toward understanding generalization of overparameterized models. } Due to space limit, we defer other experimental results to Appendix~\ref{sec:add-experiment-cifar-generalization}. \paragraph{\textsc{ImageNet} experiments.} We also perform a similar set of experiments on the {\sc ImageNet} dataset~\citep{russakovsky2015imagenet}, and these results can be found in Appendix \ref{sec:add-experiment-imagenet}. \section{Conclusion and Future Work} \label{sec:conclusion} In this paper, we establish an important step towards better understanding implicit bias in the classification setting, by showing that $p$-{\small \sf GD}\xspace converges in direction to the generalized regularized/max-margin directions. We also run several experiments to corroborate our main findings along with the practicality of $p$-{\small \sf GD}\xspace{}. The experiments are conducted in various settings: (i) linear models in both low and high dimensions, (ii) real-world data with highly over-parameterized nonlinear models. We conclude this paper with several important future directions: \begin{list}{{\tiny $\blacksquare$}}{\leftmargin=1.5em} \setlength{\itemsep}{-1pt} \item Our analysis holds for $\psi(\cdot) = \nablaorm{\cdot}_p^p$, where we argued that this choice is key practical interest due to its efficient algorithmic implementations. It is mathematically interesting to generalize our analysis to other potential functions regardless of practical interest. \item As we discussed in Section \ref{sec:cifar}, different choices of $p$'s for our $p$-{\small \sf GD}\xspace algorithm result in different generalization performance. \rebuttal{It would be interesting to investigate this phenomenon and to develop theory that explains why certain values of $p$ lead to better generalization performance}. \item Another interesting question is to further investigate how practical techniques used in training neural networks (such as weight decay and adaptive learning rate) can affect the implicit bias and generalization properties of $p$-{\small \sf GD}\xspace. \end{list} \section*{Checklist} \begin{comment} The checklist follows the references. Please read the checklist guidelines carefully for information on how to answer these questions. For each question, change the default \answerTODO{} to \answerYes{}, \answerNo{}, or \answerNA{}. You are strongly encouraged to include a {\bf justification to your answer}, either by referencing the appropriate section of your paper or providing a brief inline description. For example: \begin{itemize} \item Did you include the license to the code and datasets? \answerYes{See Section~\ref{gen_inst}.} \item Did you include the license to the code and datasets? \answerNo{The code and the data are proprietary.} \item Did you include the license to the code and datasets? \answerNA{} \end{itemize} Please do not modify the questions and only use the provided macros for your answers. Note that the Checklist section does not count towards the page limit. In your paper, please delete this instructions block and only keep the Checklist section heading above along with the questions/answers below. \end{comment} \begin{enumerate} \item For all authors... \begin{enumerate} \item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? \answerYes{All of our claims accurately reflect the results in Sections~\ref{sec:main-result} and \ref{sec:experiments}. } \item Did you describe the limitations of your work? \answerYes{In Section~\ref{sec:conclusion}, we described several directions where we can improve this work. } \item Did you discuss any potential negative societal impacts of your work? \answerNA{Our paper investigates the foundational properties of mirror descent algorithms in learning; we do not believe there are any direct societal impacts.} \item Have you read the ethics review guidelines and ensured that your paper conforms to them? \answerYes{ } \end{enumerate} \item If you are including theoretical results... \begin{enumerate} \item Did you state the full set of assumptions of all theoretical results? \answerYes{See Section~\ref{sec:priliminaries} for the assumptions we used and the motivation behind them. } \item Did you include complete proofs of all theoretical results? \answerYes{All proofs are included in the Appendix, and we have referenced them as we present our claims. } \end{enumerate} \item If you ran experiments... \begin{enumerate} \item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? \answerYes{They are included in the supplemental material.} \item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? \answerYes{ We gave an overview of our training setup in Section~\ref{sec:experiments} and the full details are given in Appendix~\ref{sec:experiment-detail}. } \item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? \answerYes{We reported the standard deviation whenever multiple trials were performed. } \item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? \answerYes{ They are reported in Appendix~\ref{sec:experiment-detail}. } \end{enumerate} \item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... \begin{enumerate} \item If your work uses existing assets, did you cite the creators? \answerYes{} \item Did you mention the license of the assets? \answerNA{We only used public datasets.} \item Did you include any new assets either in the supplemental material or as a URL? \answerNA{We did not curate any new assets.} \item Did you discuss whether and how consent was obtained from people whose data you're using/curating? \answerNA{} \item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? \answerNA{The datasets we used are well-known; so, we did not feel it was necessary to repeat that they do not contain sensitive information.} \end{enumerate} \item If you used crowdsourcing or conducted research with human subjects... \begin{enumerate} \item Did you include the full text of instructions given to participants and screenshots, if applicable? \answerNA{There were no human subjects in our work.} \item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? \answerNA{} \item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? \answerNA{} \end{enumerate} \end{enumerate} \appendix \section{Proofs for Section~\ref{sec:priliminaries}} \label{sec:proof-basic-lemmas} \subsection{Proof of Lemma~\ref{thm:key-iden}} The following proof is adopted from~\citep{azizan2021stochastic}. We make several small modifications to better fit the classification setting in this paper. In particular, in classification, there is no $w \in \mathbb{R}^d$ that satisfies $L(w) = 0$. \begin{proof} We start with the definition of Bregman divergence: \[\brg{w}{w_{t+1}} = \psi(w) - \psi(w_{t+1}) - \inp{\nablaabla\psi(w_{t+1})}{w-w_{t+1}}.\] Now, we plugin the \ref{equ:md} update rule $\nablaabla\psi(w_{t+1}) = \nablaabla\psi(w_t) - \eta\nablaabla L(w_t)$: \[\brg{w}{w_{t+1}} = \psi(w) - \psi(w_{t+1}) - \inp{\nablaabla\psi(w_{t})}{w - w_{t+1}} + \eta \inp{\nablaabla L(w_t)}{w - w_{t+1}}. \] We again invoke the definition of Bregman divergence so that: \begin{align*} \brg{w}{w_{t+1}} &= \psi(w) - \psi(w_{t+1}) - \inp{\nablaabla\psi(w_{t+1})}{w-w_{t+1}}, \\ \brg{w_{t+1}}{w_t} &= \psi(w_{t+1}) - \psi(w_t) - \inp{\nablaabla\psi(w_t)}{w_{t+1}-w_t}. \end{align*} It follows that \begin{equation} \label{equ:proof-key-iden-1} \begin{aligned} \brg{w}{w_{t+1}} &= \psi(w) - \psi(w_{t}) - \inp{\nablaabla\psi(w_{t})}{w - w_{t}} \\ &\hspace{7.25em}+ \inp{\nablaabla\psi(w_{t})}{w_{t+1} - w_t} - \psi(w_{t+1}) + \psi(w_t) \\ &\hspace{7.25em}+ \eta \inp{\nablaabla L(w_t)}{w - w_{t+1}} \\ &= \brg{w}{w_t} - \brg{w_{t+1}}{w_t} + \eta \inp{\nablaabla L(w_t)}{w - w_{t+1}} \end{aligned} \end{equation} Next, we consider the term $\inp{\nablaabla L(w_t)}{w - w_{t-1}}$: \begin{equation} \label{equ:proof-key-iden-2} \begin{aligned} \inp{\nablaabla L(w_t)}{w - w_{t-1}} &= \inp{\nablaabla L(w_t)}{w - w_t} - \inp{\nablaabla L(w_t)}{w_{t+1} - w_t} \\ &\hspace{8em}+ L(w_{t+1}) - L(w_t) - L(w_{t+1}) + L(w_t) \\ &= \inp{\nablaabla L(w_t)}{w - w_t} + D_L(w_{t+1}, w_t) - L(w_{t+1}) + L(w_t), \end{aligned} \end{equation} where the last step holds because $L$ is convex. Combining \eqref{equ:proof-key-iden-1} and \eqref{equ:proof-key-iden-2} yields: \begin{align*} &\brg{w}{w_t} \\ ={}& \brg{w}{w_{t+1}} + \brg{w_{t+1}}{w_t} -\eta\big( \inp{\nablaabla L(w_t)}{w - w_t} + D_L(w_{t+1}, w_t) - L(w_{t+1}) + L(w_t)\big)\\ ={}& \brg{w}{w_{t+1}} + \breg{\psi-\eta L}{w_{t+1}}{w_t} - \eta \inp{\nablaabla L(w_t)}{w - w_t} + \eta L(w_{t+1}) - \eta L(w_t), \end{align*} where in the last step, we note that Bregman divergence is additive in its potential. This gives us \eqref{equ:key-iden-2}. And for \eqref{equ:key-iden-1}, we use the definition of Bregman divergence again, i.e. $D_L(w, w_t) = L(w) - L(w_t) - \inp{\nablaabla L (w_t)}{w - w_t}$: \begin{align*} \brg{w}{w_t} &= \brg{w}{w_{t+1}} + \breg{\psi-\eta L}{w_{t+1}}{w_t} - \eta \inp{\nablaabla L(w_t)}{w - w_t} \\ &\hspace{8em} + \eta L(w) - \eta L(w_t) + \eta L(w_{t+1}) - \eta L(w) \\ &= \brg{w}{w_{t+1}} + \breg{\psi-\eta L}{w_{t+1}}{w_t} + \eta D_L(w, w_t) - \eta L(w) + \eta L(w_{t+1}) \\ \end{align*} \end{proof} \subsection{Proof of Lemma~\ref{thm:decreasing-lose}} \begin{proof} This is an application of Lemma~\ref{thm:key-iden} with $w = w_t$: \begin{align*} 0 &= D_\psi(w_t, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_t) - \eta L(w_t) + \eta L(w_{t+1}) \\ \implies \eta L(w_t) &= D_\psi(w_t, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_t) + \eta L(w_{t+1}) \ge \eta L(w_{t+1}) \end{align*} where we used the fact that Bregman divergence with a convex potential function is non-negative. \end{proof} \subsection{Proof of Lemma~\ref{thm:to-infinity}} \begin{proof} By Lemma~\ref{thm:decreasing-lose}, $L(w_t)$ is decreasing with respect to $t$, therefore the limit exists. Suppose the contrary that $\lim_{t\to\infty} L(w_t) = \varepsilon > 0$. Since the data is separable, we can pick $w$ so that $L(w) \le \varepsilon / 2$. Applying Lemma~\ref{thm:key-iden}, the following holds for all $t$: \begin{align*} D_\psi(w, w_{t+1}) &= D_\psi(w, w_{t}) - D_{\psi - \eta L}(w_{t+1}, w_t) - \eta D_{L}(w, w_t) + \eta L(w) - \eta L(w_{t+1}) \\ &\le D_\psi(w, w_{t}) + \eta\varepsilon/2 - \eta\varepsilon = D_\psi(w, w_{t}) - \eta\varepsilon/2 \end{align*} Hence, $D_\psi(w, w_{t}) \le D_\psi(w, w_0) - t\eta\varepsilon / 2$. This implies that $\limsup_{t\to\infty} D_\psi(w, w_{t}) = -\infty$, contradiction. \end{proof} \section{Proofs for Section~\ref{sec:main-result}} \subsection{Proof of Lemma~\ref{thm:approx-reg-dir-loss}} \begin{proof} Let $\bar{\gamma}$ be the margin of $\reg{p}$. Under separability, we know $\bar{\gamma} > 0$. Recall the definition of the regularization path. There exists sufficiently large $r_\alpha$ so that \[ \nablaorm{\frac{\bar{w}_p(\nablaorm{w}_p)}{\nablaorm{w}_p} - \reg{p}}_p \le \frac{\alpha \bar{\gamma}}{C} \] whenever $\nablaorm{w}_p \ge r_\alpha$. Recall the definition that $C = \max_{i = 1, \dots, n} \nablaorm{x_i}_q, 1/p + 1/q = 1$. Then, for all $i \in [n]$, we have \begin{align*} y_i \inp{\bar{w}(\nablaorm{w}_p)}{x_i} &= y_i \inp{\bar{w}(\nablaorm{w}_p) - \nablaorm{w}_p\reg{p}}{x_i} + y_i \inp{\nablaorm{w}_p\reg{p}}{x_i} \\ &\le \alpha \bar{\gamma} \nablaorm{w}_p \nablaorm{x_i}_q / C + y_i \inp{\nablaorm{w}_p\reg{p}}{x_i} \\ &\le \alpha \bar{\gamma} \nablaorm{w}_p + y_i \inp{\nablaorm{w}_p\reg{p}}{x_i} \\ &\le y_i \inp{(1+\alpha) \nablaorm{w}_p \reg{p}}{x_i} \end{align*} Since the loss $L$ is decreasing, we have \[L((1+\alpha)\nablaorm{w}_p\reg{p}) \le L(\bar{w}(\nablaorm{w}_p)) \le L(w). \] \end{proof} \subsection{Lower bounding the mirror descent updates} \label{sec:cross-term-diff} \begin{lemma} \label{thm:cross-term-diff} For $\psi(\cdot) = \frac{1}{p} \nablaorm{\cdot}_p^p$ with $p > 1$, the mirror descent update satisfies the following inequality: \begin{equation} \frac{p-1}{p} \nablaorm{w_{t+1}}_p^p - \frac{p-1}{p} \nablaorm{w_{t}}_p^p + \eta L (w_{t+1}) - \eta L (w_{t}) \le \inp{-\eta\nablaabla L (w_{t})}{w_t} \end{equation} \end{lemma} \begin{proof} This result follows from Lemma~\ref{thm:key-iden} with $w = 0$: \begin{align*} \brg{0}{w_t} &= \brg{0}{w_{t+1}} + \breg{\psi-\eta L}{w_{t+1}}{w_t} + \eta \brgl{0}{w_t}+ \eta L(w_{t+1}) - \eta L(0) \\ &\ge \brg{0}{w_{t+1}} + \eta \brgl{0}{w_t}+ \eta L(w_{t+1}) - \eta L(0) \\ &= \brg{0}{w_{t+1}} + \eta(L(0) - L(w_t) - \inp{\nablaabla L(w_t)}{-w_t}) + \eta L(w_{t+1}) - \eta L(0) \\ &= \brg{0}{w_{t+1}} + \eta \inp{\nablaabla L(w_t)}{w_t} + \eta L(w_{t+1}) - \eta L(w_t) \end{align*} Rearranging the terms yields \[ \brg{0}{w_{t+1}} - \brg{0}{w_{t}} + \eta L (w_{t+1}) - \eta L (w_{t}) \le \inp{-\eta\nablaabla L (w_{t})}{w_t} \] We conclude the proof by noting that for any $w \in \mathbb{R}^d$, \[\brg{0}{w} = \psi(0) - \psi(w) - \inp{\nablaabla\psi(w)}{-w} = \inp{\nablaabla\psi(w)}{w} - \psi(w) = \frac{p-1}{p} \nablaorm{w}_p^p \] \end{proof} \subsection{Proof of Theorem~\ref{thm:primal-bias}} \label{sec:proof-primal-bias} \begin{proof} Consider arbitrary $\alpha \in (0, 1)$ and define $r_\alpha$ according to Lemma~\ref{thm:approx-reg-dir-loss}. Since $\lim_{t\to\infty}\nablaorm{w_t}_p = \infty$, we can find $t_0$ so that $\nablaorm{w_t}_p > \max(1, r_\alpha)$ for all $t \ge t_0$. Let $c_t = (1+\alpha)\nablaorm{w_t}_p$. We list some properties about $\psi(\cdot) = \frac{1}{p}\nablaorm{\cdot}_p^p$ that will be useful in our analysis: \begin{align*} \nablaabla \psi (w) &= (\mathrm{sign}(w_1)|w_1|^{p-1},\cdots, \mathrm{sign}(w_d)|w_d|^{p-1})\\ \inp{\nablaabla \psi (w)}{w} &= \mathrm{sign}(w_1)w_1|w_1|^{p-1} + \cdots + \mathrm{sign}(w_d)w_d|w_d|^{p-1} = \nablaorm{w}_p^p\\ \nablaorm{\nablaabla\psi(w)}_q &= \nablaorm{w}_p^{p-1}, \text{ for } 1/p+1/q = 1 \\ \brg{c w}{c w'} &= |c|^p \brg{w}{w'} \quad \forall c\in \mathbb{R}. \end{align*} Substitute $w = c_t \reg{p}$ into Lemma~\ref{thm:key-iden}, we get \[\brg{c_t\reg{p}}{w_{t+1}} \le \brg{c_t\reg{p}}{w_t} + \eta\inp{\nablaabla L(w_t)}{c_t \reg{p} - w_t} - \eta L(w_{t+1}) + \eta L(w_t).\] By Corollary~\ref{thm:cross-term}, we have $\inp{\nablaabla L(w_t)}{c_t \reg{p} - w_t} \le 0$. Therefore, \[\brg{c_t\reg{p}}{w_{t+1}} \le \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t).\] It follows that \begin{align*} &\brg{c_{t+1}\reg{p}}{w_{t+1}} \\ \le{}& \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t) + \brg{c_{t+1}\reg{p}}{w_{t+1}} - \brg{c_t\reg{p}}{w_{t+1}} \\ ={}& \brg{c_t\reg{p}}{w_t} - \eta L(w_{t+1}) + \eta L(w_t) + \psi(c_{t+1} \reg{p}) - \psi(c_t \reg{p}) - \inp{\nablaabla\psi(w_{t+1})}{(c_{t+1} - c_t) \reg{p}} \end{align*} Summing over $t = t_0, \dots, T-1$ gives us \begin{align} \brg{c_T\reg{p}}{w_T} &\le \brg{c_{t_0}\reg{p}}{w_{t_0}} - \eta L(w_{t_0}) + \eta L(w_T) + \psi(c_{T} \reg{p}) - \psi(c_{t_0} \reg{p}) \nablaonumber\\ &\quad\quad- \sum_{t=t_0}^{T-1}\inp{\nablaabla\psi(w_{t+1})}{(c_{t+1} - c_t) \reg{p}} \label{equ:succ-cross-term} \end{align} Now we want to establish a lower bound on the last term of \eqref{equ:succ-cross-term}. To do so, we inspect the change in $\nablaabla\psi(w_t)$ from each successive mirror descent update: \begin{subequations} \begin{align} &\inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_t)}{\reg{p}} \\ ={}& \inp{-\eta\nablaabla L(w_{t})}{\reg{p}} \\ \ge{}& \frac{1}{(1+\alpha)\nablaorm{w_t}_p} \inp{-\eta\nablaabla L(w_t)}{w_t} \label{equ:cross-term-expansion-L1}\\ \ge{}& \frac{1}{(1+\alpha)\nablaorm{w_t}_p} \left(\frac{p-1}{p} \nablaorm{w_{t+1}}_p^p - \frac{p-1}{p} \nablaorm{w_{t}}_p^p + \eta L(w_{t+1}) - \eta L(w_{t})\right) \label{equ:cross-term-expansion-L2} \\ \ge{}& \frac{1}{(1+\alpha)\nablaorm{w_t}_p} \left(\frac{p-1}{p} \nablaorm{w_{t+1}}_p^p - \frac{p-1}{p} \nablaorm{w_{t}}_p^p\right) + \eta L(w_{t+1}) - \eta L(w_{t}) \label{equ:cross-term-expansion-L3} \end{align} \end{subequations} where we applied Corollary~\ref{thm:cross-term} on \eqref{equ:cross-term-expansion-L1} and Lemma~\ref{thm:cross-term-diff} on \eqref{equ:cross-term-expansion-L2}. Now we bound \eqref{equ:cross-term-expansion-L3}. We claim the following identity and defer its derivation to Section~\ref{sec:main-thm-aux-pow}. \begin{equation} \label{equ:p-norm-diff} \frac{p-1}{p} (\nablaorm{w_{t+1}}_p^p - \nablaorm{w_t}_p^p) \ge (\nablaorm{w_{t+1}}_p^{p-1} - \nablaorm{w_{t}}_p^{p-1}) \nablaorm{w_t}_p. \end{equation} We are left with \begin{equation*} \inp{\nablaabla\psi(w_{t+1}) - \nablaabla\psi(w_t)}{\reg{p}} \ge \frac{\nablaorm{w_{t+1}}_p^{p-1} - \nablaorm{w_{t}}_p^{p-1}}{1+\alpha} + \eta L(w_{t+1}) - \eta L(w_{t}). \end{equation*} Summing over $t = t_0, \dots, T-1$ gives us \begin{equation}\label{equ:mirror-cross-term} \inp{\nablaabla\psi(w_{T}) - \nablaabla\psi(w_{t_0})}{\reg{p}} \ge \frac{\nablaorm{w_T}_p^{p-1} - \nablaorm{w_{t_0}}_p^{p-1}}{1+\alpha} + \eta L(w_{T}) - \eta L(w_{t_0}). \end{equation} With \eqref{equ:mirror-cross-term}, we can bound the last term of \eqref{equ:succ-cross-term} as follows: \begin{align} \sum_{t=t_0}^{T-1}\inp{\nablaabla\psi(w_{t+1})}{(c_{t+1} - c_t) \reg{p}} \nablaonumber &\ge \sum_{t=t_0+1}^{T} \frac{\nablaorm{w_t}_p^{p-1} + O(1)}{1+\alpha}(c_t - c_{t-1}) \nablaonumber\\ &= \sum_{t=t_0+1}^{T} (\nablaorm{w_t}_p^{p-1} + O(1))(\nablaorm{w_t}_p - \nablaorm{w_{t-1}}_p) \nablaonumber\\ &\ge \sum_{t=t_0+1}^{T} \frac{1}{p}(\nablaorm{w_t}_p^{p} - \nablaorm{w_{t-1}}_p^p) + O(1) \cdot (\nablaorm{w_T}_p - \nablaorm{w_{t_0}}_p)\nablaonumber\\ &= \frac{1}{p} \nablaorm{w_T}_p^p + O(\nablaorm{w_T}_p) \label{equ:cross-telescoping} \end{align} where we defer the computation on the last inequality to Section~\ref{sec:main-thm-aux-pow}. We now apply the inequality in \eqref{equ:cross-telescoping} to \eqref{equ:succ-cross-term}. Note that $\psi(c_T \reg{p}) = \frac{1}{p}(1+\alpha)^p\nablaorm{w_T}_p^p$. We now have the following: \[ \brg{(1+\alpha)\nablaorm{w_T}_p \reg{p}}{w_T} \le \frac{1}{p} \nablaorm{w_T}_p^p ((1+\alpha)^p - 1) + O(\nablaorm{w_T}_p).\] After applying homogeneity of Bregman divergence, and recalling that $\alpha = \frac{\varepsilon}{1-\varepsilon}$, we have \[ \brg{\reg{p}}{(1-\varepsilon)\frac{w_T}{\nablaorm{w_T}_p}} \le \frac{\frac{1}{p} \nablaorm{w_T}_p^p (1 - (1-\varepsilon)^{p})}{\nablaorm{w_T}_p^p} + o(1).\] Let $\tilde{w}_T = \frac{w_T}{\nablaorm{w_T}_p}$. We note that Bregman divergence in fact satisfies the Law of Cosine: \begin{lemma}[Law of Cosine] \label{thm:breg-loc} \begin{equation*} \brg{w}{w'} = \brg{w}{w''} + \brg{w''}{w'} - \inp{\nablaabla\psi(w') - \nablaabla\psi(w'')}{w - w''} \end{equation*} \end{lemma} Therefore, \begin{equation} \label{equ:final-limit} \begin{aligned} \brg{\reg{p}}{\tilde{w}_T} &\le \frac{\frac{1}{p} \nablaorm{w_T}_p^p (1 - (1-\varepsilon)^{p})}{\nablaorm{w_T}_p^p} + \brg{(1-\varepsilon)\tilde{w}_T}{\tilde{w}_T} \\ &\hspace{6em} - \inp{\nablaabla\psi(\tilde{w}_T) - \nablaabla\psi((1-\varepsilon)\tilde{w}_T)}{\reg{p} - (1-\varepsilon)\tilde{w}_T} + o(1) \\ &\le \frac{1}{p} (1 - (1-\varepsilon)^{p}) + \frac{1}{p}((1-\varepsilon)^p - 1) + \varepsilon + 2d^{1/p}(1 - (1-\varepsilon)^p) + o(1) \end{aligned} \end{equation} And we defer the computation for the last inequality to Section~\ref{sec:main-thm-aux-pow}. Taking the limit as $T \to \infty$ and $\varepsilon \to 0$, we have that \begin{equation} \begin{aligned} \limsup_{T\to\infty} \brg{\reg{p}}{\frac{w_T}{\nablaorm{w_T}_p}} &\le \varepsilon + 2d^{1/p}(1 - (1-\varepsilon)^p) \end{aligned} \end{equation} Note that the RHS vanishes in the limit as $\varepsilon \to 0$. Since the choice of $\varepsilon$ is arbitrary, we have $w_T/\nablaorm{w_T}_p \to \reg{p}$ as $T \to\infty$. \end{proof} \subsection{Auxiliary Computation for Section~\ref{sec:proof-primal-bias}} \label{sec:main-thm-aux-pow} To show \eqref{equ:p-norm-diff}, we claim that for $\delta \ge -1$ and $p > 1$, we have \[ \frac{p-1}{p} ((1+\delta)^p - 1) \ge (1+\delta)^{p-1} - 1. \] Note that we equality when $\delta=0$, and now we consider the first derivative: \[\frac{d}{d\delta} \left\{\frac{p-1}{p} ((1+\delta)^p - 1) - (1+\delta)^{p-1} + 1 \right\} = (p-1)\delta(1+\delta)^{p-2},\] which is negative when $\delta \in [-1, 0)$ and positive when $\delta > 0$, so this identity holds. Now, \eqref{equ:p-norm-diff} follows from setting $\delta = (\nablaorm{w_{t+1}}_p - \nablaorm{w_t}_p)/\nablaorm{w_{t}}_p$ and then multiplying by $\nablaorm{w_t}_p^p$ on both sides. To finish showing \eqref{equ:cross-telescoping}, we claim that for $\delta \ge -1$ and $p > 1$, we have \[ \frac{1}{p}((1+\delta)^p - 1) \le \delta (1+\delta)^{p-1}. \] Note that we equality when $\delta=0$, and now we consider the first derivative: \[\frac{d}{d\delta} \left\{\frac{1}{p}((1+\delta)^p - 1) - \delta (1+\delta)^{p-1}\right\} = -(p-1)\delta(1+\delta)^{p-2},\] which is positive when $\delta \in [-1, 0)$ and negative when $\delta > 0$, so this identity holds. Now, the last inequality of \eqref{equ:cross-telescoping} follows by setting $\delta = (\nablaorm{w_{t}}_p - \nablaorm{w_{t-1}}_p)/\nablaorm{w_{t-1}}_p$ and then multiply by $\nablaorm{w_t}_p^p$ on both sides. Finally, we simplify the RHS of \eqref{equ:final-limit} by taking advantage of the fact that $\tilde{w}_T$ is normalized: \begin{align*} \brg{(1-\varepsilon)\tilde{w}_T}{\tilde{w}_T} &= (1-\varepsilon)^p \psi(\tilde{w}_T) - \psi(\tilde{w}_T) + \inp{\nablaabla\psi(\tilde{w}_T)}{\varepsilon \tilde{w}_T} \\ &= \frac{1}{p}((1-\varepsilon)^p - 1) + \varepsilon \end{align*} \begin{align*} &\left|\inp{\nablaabla\psi(\tilde{w}_T) - \nablaabla\psi((1-\varepsilon)\tilde{w}_T)}{\reg{p} - (1-\varepsilon)\tilde{w}_T}\right|\\ ={}& \left|\inp{(1-(1-\varepsilon)^p)\nablaabla\psi(\tilde{w}_T)}{\reg{p} - (1-\varepsilon)\tilde{w}_T}\right|\\ \le{}& (1 - (1-\varepsilon)^p) \nablaorm{\nablaabla\psi(\tilde{w_T})}_q \cdot \nablaorm{\reg{p} - (1-\varepsilon)\tilde{w}_T}_p \\ ={}& (1 - (1-\varepsilon)^p) \nablaorm{\tilde{w_T}}_p^{p-1} \cdot \nablaorm{\reg{p} - (1-\varepsilon)\tilde{w}_T}_p \\ \le{}& 2d^{1/p}(1 - (1-\varepsilon)^p) \end{align*} \subsection{Proof of Theorem~\ref{thm:reg-max-dir}} \label{sec:proof-reg-max-dir} \begin{proof} We first show that $\mmd{p}$ is unique. Suppose the contrary that there are two distinct unit $p$-norm vectors $u_1 \nablaeq u_2$ both achieving the maximum-margin $\mar{p}$. Then $u_3 = (u_1 + u_2)/2$ satisfies \[ \forall i, y_i \inp{u_3}{x_i} = \frac{1}{2} y_i \inp{u_1}{x_i} + \frac{1}{2} y_i \inp{u_2}{x_i} \ge \mar{p} \] Therefore, $u_3$ has margin of at least $\mar{p}$. Since $\nablaorm{\cdot}_p$ is strictly convex, we must have $\nablaorm{u_3}_p < 1$. Therefore, the margin of $u_3 / \nablaorm{u_3}_p$ is strictly greater than $\mar{p}$, contradiction. Define $\beta > 0$ so that $\ell(z) e^{az} \in [b/2, 2b]$ for $z = B\mar{p}/2$ and whenever $B > \beta$. Note that \[L(B\mmd{p}) = \sum_{i=1}^n \ell(y_i\inp{B\mmd{p}}{x_i}) \le n \cdot \ell(B\mar{p}) \le 2 b n \cdot \exp(-aB\mar{p})\] Suppose the contrary that the regularized direction does not converge to $\mmd{p}$, then there must exist $\mar{p}/2 > \varepsilon > 0$ so that there are arbitrarily large values of $B$ satisfying \[\min_{i=1, \dots, n} y_i \inp{\frac{\bar{w}(B)}{B}}{x_i} \le \mar{p} - \varepsilon. \] And this implies \[ L(\bar{w}(B)) \ge \ell(B(\mar{p} - \varepsilon)) \ge \frac{b}{2} \exp(-a B\mar{p}) \exp(a B\varepsilon)\] Then, for sufficiently large $B > \beta$, we have $\exp(aB\varepsilon) > 4 n \mathbb{R}ightarrow L(\bar{w}(B)) > L(B\mmd{p})$, contradiction. Therefore, the regularized direction exists and $\reg{p} = \mmd{p}$. \end{proof} \subsection{Simpler proof of Theorem~\ref{thm:primal-bias}} For potential function $\psi(\cdot) = \frac{1}{p} \nablaorm{\cdot}_p^p$, we can avoid most calculations involving \eqref{equ:succ-cross-term} by directly computing for Bregman divergence. However, we want to note that this approach is less general, and does not highlight the role of $\reg{p}$ as clearly. \begin{proof} Consider arbitrary $\alpha \in (0, 1)$. Since $\lim_{t\to\infty}\nablaorm{w_t}_p = \infty$, we can find $t_0$ so that $\nablaorm{w_t} > \max(1, r_\alpha)$ for all $t \ge t_0$. For $T > t_0$, define $\tilde{w}_T = \frac{w_T}{\nablaorm{w_T}_p}$. We can perform the following manipulation on Bregman divergence: \begin{equation} \label{equ:breg-expa} \begin{aligned} \brg{\reg{p}}{\tilde{w}_T} &= \psi(\reg{p}) - \psi\left(\tilde{w}_T\right) - \inp{\nablaabla\psi\left(\tilde{w}_T\right)}{\reg{p} - \tilde{w}_T} \\ &= \psi(\reg{p}) - \psi\left(\tilde{w}_T\right) + \inp{\nablaabla\psi(\tilde{w}_T)}{\tilde{w}_T} - \inp{\nablaabla\psi(\tilde{w}_T)}{\reg{p}} \\ &= \frac{1}{p}\nablaorm{\reg{p}}_p^p - \frac{1}{p} \nablaorm{\tilde{w}_T}_p^p + \nablaorm{\tilde{w}_T}_p^p - \inp{\nablaabla\psi(\tilde{w}_T)}{\reg{p}} \\ &= 1 - \inp{\nablaabla\psi(\tilde{w}_T)}{\reg{p}} \end{aligned} \end{equation} We divide both sides of \eqref{equ:mirror-cross-term} by $\nablaorm{w_T}$ and then taking the limit as $T \to \infty$ yields \begin{equation}\label{equ:cross-term-limit} \liminf_{T\to\infty} \frac{1}{\nablaorm{w_T}_p^{p-1}}\inp{\nablaabla\psi(w_{T})}{\reg{p}} \ge \frac{1}{1+\alpha}. \end{equation} Now, substituting \eqref{equ:cross-term-limit} into \eqref{equ:breg-expa} results in \begin{align*} \limsup_{T\to\infty} \brg{\reg{p}}{\frac{w_T}{\nablaorm{w_T}_p}} &= 1 - \liminf_{T\to\infty} \inp{\nablaabla\psi\left(\frac{w_T}{\nablaorm{w_T}_p}\right)}{\reg{p}} \\ &= 1 - \liminf_{T\to\infty} \frac{1}{\nablaorm{w_T}_p^{p-1}} \inp{\nablaabla\psi(w_{T})}{\reg{p}} \\ &\le 1 - \frac{1}{1+\alpha} < \alpha \end{align*} Since the value of $\alpha$ is arbitrary, we can conclude that \[\lim_{T\to\infty} \brg{\reg{p}}{\frac{w_T}{\nablaorm{w_T}_p}} = 0.\] \end{proof} \section{Proofs for Section~\ref{sec:asymp-result}} \label{sec:proof-asymp-result} \subsection{Proof of Corollary~\ref{thm:convg-rate}} \begin{proof} This is an immediate consequence of \eqref{equ:breg-expa} and \eqref{equ:cross-term-limit}. \end{proof} \subsection{Proof of Lemma~\ref{thm:norm-rate}} For the following proof, we assume without loss of generality that $y_i = 1$ by replacing every instance of $(x_i, -1)$ with $(-x_i, 1)$. \begin{proof} For the upper bound, we consider a reference vector $w^\mathrm{s.t.}ar = \mar{p}^{-1} \mmd{p}$. By the definition of the max-margin direction, the margin of $w^\mathrm{s.t.}ar$ is 1 and $\nablaorm{w^\mathrm{s.t.}ar}_p = \mar{p}^{-1}$. From Lemma~\ref{thm:key-iden}, we have \begin{align*} D_\psi(w^\mathrm{s.t.}ar \log T, w_t) = D_\psi(w^\mathrm{s.t.}ar \log T, w_{t+1}) + D_{\psi - \eta L}(w_{t+1}, w_{t}) &- \inp{\nablaabla L(w_t)}{w^\mathrm{s.t.}ar \log T - w_t} \\ &- \eta L(w_{t}) + \eta L(w_{t+1}). \end{align*} We first bound the quantity $\inp{\nablaabla L(w_t)}{w^\mathrm{s.t.}ar \log T - w_t}$ by expanding the definition of exponential loss: \begin{align*} &\inp{\nablaabla L(w_t)}{w^\mathrm{s.t.}ar \log T - w_t} \\ ={}& \sum_{i=1}^n \inp{\nablaabla \exp(-\inp{w_t}{x_i})}{w^\mathrm{s.t.}ar \log T - w_t} \\ ={}& \sum_{i=1}^n \inp{\exp(-\inp{w_t}{x_i})x_i}{w_t - w^\mathrm{s.t.}ar \log T} \\ ={}& \sum_{i=1}^n \exp(-\inp{w^\mathrm{s.t.}ar \log T}{x_i}) \exp(-\inp{w_t - w^\mathrm{s.t.}ar \log T}{x_i}) \inp{x_i}{w_t - w^\mathrm{s.t.}ar \log T} \\ \le{}& \sum_{i=1}^n \frac{1}{T} \cdot \frac{1}{e} = \frac{n}{eT} \end{align*} where the last line follows from the definition of $w^\mathrm{s.t.}ar$ and the fact that for any $x \in \mathbb{R}$, we have $e^{-x} x \le 1/e$. It follows that \[ D_\psi(w^\mathrm{s.t.}ar \log T, w_t) \ge D_\psi(w^\mathrm{s.t.}ar \log T, w_{t+1}) - \frac{n}{eT} - \eta L(w_{t}) + \eta L(w_{t+1}). \] Summing over $t = 0, \dots, T-1$ gives us \[ D_\psi(w^\mathrm{s.t.}ar \log T, w_0) \ge D_\psi(w^\mathrm{s.t.}ar \log T, w_T) - \frac{n}{e} - \eta L(w_0) + \eta L(w_T). \] Since Bregman divergence with respect to the $p$th power of $\ell_p$-norm is homogeneous, we can divide by a factor of $\log^p T$ on both sides: \begin{equation} \label{equ:lim-reference-scale} D_\psi\left(w^\mathrm{s.t.}ar, \frac{w_0}{\log T}\right) \ge D_\psi\left(w^\mathrm{s.t.}ar, \frac{w_T}{\log T}\right) - o(1). \end{equation} As $T\to\infty$, the left-hand side converges to $\brg{w^\mathrm{s.t.}ar}{0} = \psi(w^\mathrm{s.t.}ar) = \frac{1}{p} \mar{p}^{-p}$. Let $\tilde{w} = w_T / \log T$, we expand the right-hand side as \begin{align*} \brg{w^\mathrm{s.t.}ar}{\tilde{w}} &= \psi(w^\mathrm{s.t.}ar) - \psi(\tilde{w}) - \inp{\nablaabla\psi(\tilde{w})}{w^\mathrm{s.t.}ar - \tilde{w}} \\ &= \frac{1}{p} \mar{p}^{-p} + \frac{p-1}{p} \nablaorm{\tilde{w}}_p^p - \inp{\nablaabla\psi(\tilde{w})}{w^\mathrm{s.t.}ar} \\ &\ge \frac{1}{p} \mar{p}^{-p} + \frac{p-1}{p} \nablaorm{\tilde{w}}_p^p - \mar{p}^{-1} \nablaorm{\nablaabla\psi(\tilde{w})}_q \end{align*} for $1/p + 1/q = 1$. Recall that $\psi = \frac{1}{p}\nablaorm{\cdot}_p^p$ has the following nice properties: \begin{align*} \nablaabla \psi (w) &= (\mathrm{sign}(w_1)|w_1|^{p-1},\cdots, \mathrm{sign}(w_d)|w_d|^{p-1})\\ \inp{\nablaabla \psi (w)}{w} &= \mathrm{sign}(w_1)w_1|w_1|^{p-1} + \cdots + \mathrm{sign}(w_d)w_d|w_d|^{p-1} = \nablaorm{w}_p^p \end{align*} So, we can further simplify $\nablaorm{\nablaabla\psi(\tilde{w})}_q$: \begin{align*} \nablaorm{\nablaabla\psi(\tilde{w})}_q &= \left(\sum_{i=1}^d |\tilde{w}_i|^{(p-1)q} \right)^{1/q} \\ &= \left(\sum_{i=1}^d |\tilde{w}_i|^{p} \right)^{1/q} \\ &= \nablaorm{\tilde{w}}_p^{p/q} = \nablaorm{\tilde{w}}_p^{p-1}, \end{align*} where we note that because $1/p + 1/q = 1$, we also have $pq = p + q$ and $1 + p/q = p$. Now, we have \[\brg{w^\mathrm{s.t.}ar}{\tilde{w}} \ge \frac{1}{p} \mar{p}^{-p} + \frac{p-1}{p} \nablaorm{\tilde{w}}_p^p - \mar{p}^{-1} \nablaorm{\tilde{w}}_p^{p-1}\] If $\nablaorm{w_T / \log T}_p > \mar{p}^{-1} \cdot \frac{p}{p-1}$ for arbitrarily large $T$, then $\brg{w^\mathrm{s.t.}ar}{w_T / \log T} > \frac{1}{p} \mar{p}^{-p}$ for those $T$. This in turn contradicts inequality \eqref{equ:lim-reference-scale}. Therefore, we must have \[ \limsup_{T\to\infty} \nablaorm{w_T}_p \le \mar{p}^{-1} \frac{p}{p-1} \log T. \] Now we can turn our attention to the lower bound. Let $m_t = \gamma(w_t)$ be the margin of the mirror descent iterates. Then, \[ L(w_t) = \frac{1}{n} \sum_{i=1}^n \exp(-\inp{w_t}{x_i}) \ge \frac{1}{n} \exp(-m_t).\] Due to Lemma~\ref{thm:to-infinity}, we also know that $m_t \xrightarrow{t\to\infty} \infty$. By the definition of the max-margin direction, we know that $\gamma(\nablaorm{w_t}_p \mmd{p}) \ge m_t$. Then by linearity of margin, there exists $w^\mathrm{s.t.}ar$ so that $\gamma(w^\mathrm{s.t.}ar) \ge (1+\frac{2n}{m_t}) m_t$ and $\nablaorm{w^\mathrm{s.t.}ar}_p \le (1+\frac{2n}{m_t}) \nablaorm{w_t}_p$. It follows that \[ L(w^\mathrm{s.t.}ar) = \frac{1}{n} \sum_{i=1}^n \exp(-\inp{w_t}{x_i}) \le \exp(-\gamma(w^\mathrm{s.t.}ar)) = \frac{1}{2n} \exp(-m_t).\] Under the assumption that the step size $\eta$ is sufficiently small so that $\psi - \eta L$ is convex on the iterates, we can apply the convergence rate of mirror descent ~\citep[Theorem 3.1]{lu2018relatively}: \[ L(w_t) - L(w^\mathrm{s.t.}ar) \le \frac{1}{\eta t} \brg{w^\mathrm{s.t.}ar}{w_0} \] From our choice of $w^\mathrm{s.t.}ar$, we have \begin{align*} \frac{1}{2n} \exp(-m_t) &\le \frac{1}{\eta t} \brg{w^\mathrm{s.t.}ar}{w_0} \\ &= \frac{1}{\eta t} (\psi(w^\mathrm{s.t.}ar) - \psi(w_0) - \inp{\nablaabla\psi(w_0)}{w^\mathrm{s.t.}ar - w_0}) \end{align*} After dropping the lower order terms and recall the upper bounds on $\nablaorm{w^\mathrm{s.t.}ar}_p$ and $\nablaorm{w_t}_p$, we have \[ \frac{1}{2n} \exp(-m_t) \le O(1) \cdot \frac{1}{\eta t} \cdot \frac{1}{p} \left(1 + \frac{\log (2n)}{m_t}\right)^p \left(\mar{p}^{-1} \frac{p}{p-1} \log t\right)^p\] Since $m_t$ is unbounded, the quantity $1 + \frac{\log (2n)}{m_t}$ is upper bounded by a constant. Taking the logarithm on both sides yields \[ m_t \ge \log t - p \log\log t + O(1)\] Finally, we use the definition of margin to conclude that $ m_t \le \inp{w_t}{x_i} \le C \cdot \nablaorm{w_t}_p$. Therefore, \[ \nablaorm{w_t}_p \ge \frac{1}{C} (\log t - p \log\log t) + O(1).\] \end{proof} \section{Practicality of $p$-{\small \sf GD}\xspace} \label{sec:practicality} To illustrate that $p$-{\small \sf GD}\xspace can be easily implemented, we show a proof-of-concept implementation in PyTorch. This implementation can directly replace existing optimizers and thus require only minor changes to any existing training code. We also note that while the $p$-{\small \sf GD}\xspace update step requires more arithmetic operations than a standard gradient descent update, this does not significantly impact the total runtime because differentiation is the most computationally intense step. We observed from our experiments that training with $p$-{\small \sf GD}\xspace is approximate 10\% slower than with PyTorch's \texttt{optim.SGD} (in the same number of epochs),\footnote{This measurement may not be very accurate because we were using shared computing resources.} and we believe that this gap can be closed with a more optimized code. \begin{lstlisting}[caption={Sample PyTorch implementation of $p$-{\small \sf GD}\xspace}] import torch from torch.optim import Optimizer class pnormSGD(Optimizer): def __init__(self, params, lr=0.01, pnorm=2.0): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) # p-norm must be strictly greater than 1 if not 1.01 <= pnorm: raise ValueError("Invalid p-norm value: {}".format(pnorm)) defaults = dict(lr=lr, pnorm=pnorm) super(pnormSGD, self).__init__(params, defaults) def __setstate__(self, state): super(pnormSGD, self).__setstate__(state) def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: lr = group["lr"] pnorm = group["pnorm"] for param in group["params"]: if param.grad is None: continue x = param.data dx = param.grad.data # \ell_p^p potential function update = torch.pow(torch.abs(x), pnorm-1) * \ torch.sign(x) - lr * dx param.data = torch.sign(update) * \ torch.pow(torch.abs(update), 1/(pnorm-1)) return loss \end{lstlisting} \section{Experimental details} \label{sec:experiment-detail} \subsection{Linear classification} Here, we describe the details behind our experiments from Section~\ref{sec:linear-classifier}. First, we note that we can absorb the labels $y_i$ by replacing $(x_i, y_i)$ with $(y_ix_i, 1)$. This way, we can choose points with the same $+1$ label. For the $\mathbb{R}^2$ experiment, we first select three points $(\frac{1}{6}, \frac{1}{2}), (\frac{1}{2}, \frac{1}{6})$ and $(\frac{1}{3}, \frac{1}{3})$ so that the maximum margin direction is approximately $\frac{1}{\sqrt{2}}(1, 1)$. Then we sample 12 additional points from $\mathcal{N}((\frac{1}{2}, \frac{1}{2}), 0.15 I_2)$. The initial weight $w_0$ is selected from $\mathcal{N}(0, I_2)$. We ran $p$-{\small \sf GD}\xspace with step size $10^{-4}$ for 1 million steps. As for the scatter plot of the data, we randomly re-assign a label and plot out $(x_i, 1)$ or $(-x_i, -1)$ uniformly at random. For the $\mathbb{R}^{100}$ experiment, we select 15 sparse vectors that each has up to 10 nonzero entries. Each nonzero entry is independently sampled from $\mathcal{U}(-2, 4)$. Because we are in the over-parameterized case, these vectors are linearly separable with high probability. The initial weight $w_0$ is selected from $\mathcal{N}(0, 0.1 I_{100})$. We ran $p$-{\small \sf GD}\xspace with step size $10^{-4}$ for 1 million steps. These experiments were performed on an Intel Skylake CPU. \subsection{CIFAR-10 experiments} For the experiments with the CIFAR-10 dataset, we adopted the example implementation from the \texttt{FFCV} library.\footnote{\url{https://github.com/libffcv/ffcv/tree/main/examples/cifar}} For consistency, we ran $p$-{\small \sf GD}\xspace with the same hyper-parameters for all neural networks and values of $p$. We used a cyclic learning rate schedule with maximum learning rate of 0.1 and ran for 400 epochs so the training loss is approximately 0.\footnote{This differs from the setup from~\cite{azizan2021stochastic}, where they used a fixed small learning rate and much larger number of epochs.} This experiment was performed on a single Nvidia V100 GPU. \subsection{ImageNet experiments} For the experiments with the ImageNet dataset, we used the example implementation from the \texttt{FFCV} library.\footnote{\url{https://github.com/libffcv/ffcv-imagenet/}} For consistency, we ran $p$-{\small \sf GD}\xspace with the same hyper-parameters for all neural networks and values of $p$. We used a cyclic learning rate schedule with maximum learning rate of 0.5 and ran for 120 epochs. Note that, to more accurately measure the effect of $p$-{\small \sf GD}\xspace on generalization, we turned off any parameters that may affect regularization, e.g. with momentum set to 0, weight decay set to 0, and label smoothing set to 0, etc. This experiment was performed on a single Nvidia V100 GPU. \section{Additional experimental results} \label{sec:add-experiments} \subsection{Linear classification} \label{sec:add-experiment-synthetic} We present a more complete result for the setting of Section \ref{sec:linear-classifier} with more values of $p$. Note that Table~\ref{tab:linear-bias} is a subset of Table~\ref{tab:linear-bias-full-1}, as shown below. Except for $p = 1.1$, $p$-{\small \sf GD}\xspace produces the smallest linear classifier under the corresponding $\ell_p$-norm and thus consistent with the prediction of Theorem~\ref{thm:primal-bias}. When $p = 1.1$, Corollary~\ref{thm:final-convg-rate} predicts a much slower convergence rate. So, for the number of iterations we have, $p$-{\small \sf GD}\xspace with $p = 1.1$ in fact cannot compete against $p$-{\small \sf GD}\xspace with $p = 1.5$, which has much faster convergence rate but similar implicit bias. The second trial shows a rare case where $p$-{\small \sf GD}\xspace with $p = 1.1$ could not even match $p$-{\small \sf GD}\xspace with $p = 2$ under the $\ell_{1.1}$-norm. Therefore, before we come up with techniques to speed up the convergence of $p$-{\small \sf GD}\xspace, it is not advisable to pick $p$ that is too close to 1. \begin{table} \centering \setlength{\tabcolsep}{4.5pt} \begin{tabular}{l| c|c|c|c|c|c|c|c} \hline & $\ell_1$ norm & $\ell_{1.1}$ norm & $\ell_{1.5}$ norm & $\ell_{2}$ norm & $\ell_{3}$ norm & $\ell_{6}$ norm & $\ell_{10}$ norm & $\ell_{\infty}$ norm \\ \hline\hline $p=1.1$ & \textbf{7.692} & 5.670 & 2.650 & 1.659 & 1.100 & 0.782 & 0.698 & 0.634 \\ $p=1.5$ & 7.924 & \textbf{5.607} & \textbf{2.333} & 1.346 & 0.830 & 0.573 & 0.526 & 0.515 \\ $p=2$ & 9.417 & 6.447 & 2.413 & \textbf{1.273} & 0.710 & 0.444 & 0.393 & 0.374 \\ $p=3$ & 11.307 & 7.618 & 2.696 & 1.345 & \textbf{0.691} & 0.381 & 0.318 & 0.285 \\ $p=6$ & 13.115 & 8.787 & 3.044 & 1.481 & 0.729 & \textbf{0.369} & 0.288 & 0.233 \\ $p=10$ & 13.572 & 9.086 & 3.137 & 1.520 & 0.742 & 0.367 & \textbf{0.281} & \textbf{0.213} \\ \hline \end{tabular} \caption{Size of the linear classifiers generated by $p$-{\small \sf GD}\xspace (after rescaling) in $\ell_1, \ell_{1.1}, \ell_{1.5}, \ell_2, \ell_3, \ell_6$ and $\ell_{10}$ norms. For each norm, we highlight the value of $p$ for which $p$-{\small \sf GD}\xspace generates the smallest classifier under that norm. (Trial 1)} \label{tab:linear-bias-full-1} \end{table} \begin{table} \centering \setlength{\tabcolsep}{4.5pt} \begin{tabular}{l| c|c|c|c|c|c|c|c} \hline & $\ell_1$ norm & $\ell_{1.1}$ norm & $\ell_{1.5}$ norm & $\ell_{2}$ norm & $\ell_{3}$ norm & $\ell_{6}$ norm & $\ell_{10}$ norm & $\ell_{\infty}$ norm \\ \hline\hline $p=1.1$ & 10.688 & 8.013 & 3.883 & 2.465 & 1.644 & 1.187 & 1.082 & 1.009 \\ $p=1.5$ & \textbf{9.308} & \textbf{6.546} & \textbf{2.674} & 1.518 & 0.913 & 0.602 & 0.535 & 0.488 \\ $p=2$ & 10.735 & 7.340 & 2.735 & \textbf{1.435} & 0.790 & 0.479 & 0.418 & 0.397 \\ $p=3$ & 12.298 & 8.327 & 2.991 & 1.508 & \textbf{0.782} & 0.432 & 0.359 & 0.324 \\ $p=6$ & 13.817 & 9.322 & 3.297 & 1.631 & 0.816 & \textbf{0.418} & 0.328 & 0.265 \\ $p=10$ & 14.545 & 9.798 & 3.447 & 1.695 & 0.841 & 0.423 & \textbf{0.325} & \textbf{0.247} \\ \hline \end{tabular} \caption{Size of the linear classifiers generated by $p$-{\small \sf GD}\xspace (after rescaling) in $\ell_1, \ell_{1.1}, \ell_{1.5}, \ell_2, \ell_3, \ell_6$ and $\ell_{10}$ norms. For each norm, we highlight the value of $p$ for which $p$-{\small \sf GD}\xspace generates the smallest classifier under that norm. (Trial 2)} \label{tab:linear-bias-full-2} \end{table} \subsection{CIFAR-10 experiments: implicit bias} We present more complete illustrations of the implicit bias trends of trained models in CIFAR-10. Compared to Figure~\ref{fig:cifar10-hist}, the plots below include data from additional values for additional values of $p$ and more deep neural network architectures. We see that the trends we observed in Section~\ref{sec:cifar} continue to hold under architectures other than \textsc{ResNet}. In particular, for smaller $p$'s, the weight distributions of models trained with $p$-{\small \sf GD}\xspace have higher peak around zero, and higher $p$'s result in smaller maximum weights. \label{sec:add-experiment-cifar-bias} \begin{figure} \caption{The histogram of weights in \textsc{ResNet-18} \label{fig:cifar10-hist-resnet-full} \end{figure} \begin{figure} \caption{The histogram of weights in \textsc{MobileNet-v2} \label{fig:cifar10-hist-mobilenet-full} \end{figure} \begin{figure} \caption{The histogram of weights in \textsc{RegNetX-200mf} \label{fig:cifar10-hist-regnet-full} \end{figure} \begin{figure} \caption{The histogram of weights in \textsc{VGG-11} \label{fig:cifar10-hist-vgg-full} \end{figure} \subsection{CIFAR-10 experiments: generalization} We present a more complete result for the CIFAR-10 generalization experiment in Section~\ref{sec:cifar} with additional values of $p$. In the following table, we see that $p$-{\small \sf GD}\xspace with $p = 3$ continues have the highest generalization performance for all deep neural networks. \label{sec:add-experiment-cifar-generalization} \begin{table}[!h] \centering \setlength{\tabcolsep}{5.5pt} \begin{tabular}{l| c|c|c|c} \hline & \hspace{1.25em} \textsc{VGG-11} \hspace{1.25em} & \hspace{0.75em} \textsc{ResNet-18} \hspace{0.75em} & \textsc{MobileNet-v2} & \textsc{RegNetX-200mf} \\ \hline \hline $p = 1.1$ & \pmval{88.19}{.17} & \pmval{92.63}{.12} & \pmval{91.16}{.09}& \pmval{91.21}{.18} \\ $p = 1.5$ & \pmval{88.45}{.29} & \pmval{92.73}{.11} & \pmval{90.81}{.19}& \pmval{90.91}{.12} \\ $p = 2$ (SGD) & \pmval{90.15}{.16} & \bpmval{93.90}{.14} & \pmval{91.97}{.10}& \pmval{92.75}{.13} \\ $p = 3$ & \bpmval{90.85}{.15} & \bpmval{94.01}{.13} & \bpmval{93.23}{.26}& \bpmval{94.07}{.12} \\ $p = 6$ & \pmval{89.47}{.14} & \bpmval{93.87}{.13} & \pmval{92.84}{.15}& \pmval{93.03}{.17} \\ $p = 10$ & \pmval{88.78}{.37} & \pmval{93.55}{.21} & \pmval{92.60}{.22}& \pmval{92.97}{.16} \\ \hline \end{tabular} \caption{CIFAR-10 test accuracy (\%) of $p$-{\small \sf GD}\xspace on various deep neural networks. For each deep net and value of $p$, the average $\pm$ \textcolor{gray}{std. dev.} over 5 trials are reported. And the best performing value(s) of $p$ for each individual deep net is highlighted in \textbf{boldface}.} \label{tab:generalization-cifar10-full} \end{table} \begin{comment} \begin{table}[] \centering \begin{tabular}{c| c|c|c|c} & $\ell_{1.1}$ norm & $\ell_{2}$ norm & $\ell_{3}$ norm & $\ell_{10}$ norm \\ \hline\hline $p=1.1$ & \pmval{$2.83 \cdot 10^4$}{32.56} & \pmval{93.16}{0.32} & \pmval{19.92}{0.18} & \pmval{5.00}{0.81}\\ $p=2.0$ & \pmval{$3.32 \cdot 10^4$}{54.47} & \pmval{88.77}{0.04} & \pmval{17.02}{0.00} & \pmval{2.34}{0.00}\\ $p=3.0$ & \pmval{$9.49 \cdot 10^4$}{1073.27} & \pmval{158.21}{1.15} & \pmval{18.31}{0.02} & \pmval{2.33}{0.00}\\ $p=10.0$ & \pmval{$9.44 \cdot 10^5$}{5213.27} & \pmval{1250.72}{6.20} & \pmval{84.82}{0.37} & \pmval{2.40}{0.00}\\ \hline \end{tabular} \caption{Caption} \label{tab:foo} \end{table} \end{comment} \subsection{ImageNet experiments} \label{sec:add-experiment-imagenet} To verify if our observations on the CIFAR-10 generalization performance hold up for other datasets, we also performed similar experiments for the much larger ImageNet dataset. Due to computational constraints, we were only able to experiment with the \textsc{ResNet-18} and \textsc{MobileNet-v2} architectures and only for one trial. It is worth noting that the neural networks we used cannot reach 100\% training accuracy on Imagenet. The models we employed only achieved top-1 training accuracy in the mid-70's. So, we are not in the so-called \textit{interpolation regime}, and there are many other factors that can significantly impact the generalization performance of the trained models. In particular, we find that not having weight decay costs us around 3\% in validation accuracy in the $p = 2$ case and this explains why our reported numbers are lower than PyTorch's baseline for each corresponding architecture. Despite this, we find that $p$-{\small \sf GD}\xspace with $p = 3$ has the best generalization performance on the ImageNet dataset, matching our observation from the CIFAR-10 dataset. \begin{table}[!h] \centering \begin{tabular}{l| c | c} \hline & \textsc{ResNet-18} & \textsc{MobileNet-v2} \\ \hline\hline $p=1.1$ & 64.08 & 63.41 \\ $p=1.5$ & 65.14 & 65.75 \\ $p=2$ (SGD) & 66.76 & 67.91 \\ $p=3$ & \textbf{67.67} & \textbf{69.74} \\ $p=6$ & 66.69 & 67.05 \\ $p=10$ & 65.10 & 62.32 \\ \hline \end{tabular} \caption{ImageNet top-1 validation accuracy (\%) of $p$-{\small \sf GD}\xspace on various deep neural networks. The best performing value(s) of $p$ for each individual deep network is highlighted in \textbf{boldface}.} \label{tab:imagenet} \end{table} \end{document}
\begin{document} \title{The Cohomology Ring of the Space of Rational Functions} \author[Dinesh Deshpande]{Dinesh Deshpande} \address{Department of Pure Mathematics and Mathematical Statistics, \\University of Cambridge, \{\mathbb{C}}ambridge CB3 0WB, UK} \email{[email protected]} \maketitle \begin{abstract} Let $Rat_k$ be the space of based holomorphic maps from $S^2$ to itself of degree $k$. Let $\beta_k$ denote the Artin's braid group on $k$ strings and let $B\beta_k$ be the classifying space of $\beta_k$. Let $C_k$ denote the space of configurations of length less than or equal to $k$ of distinct points in ${\mathbb{R}}^2$ with labels in $S^1$. The three spaces $Rat_k$, $B\beta_{2k}$, $C_k$ are all stably homotopy equivalent to each other. For an odd prime $p$, the $\mathbb{F}_p$-cohomology ring of the three spaces are isomorphic to each other. The $\mathbb{F}_2$-cohomology ring of $B\beta_{2k}$ is isomorphic to that of $C_k$. We show that for all values of $k$ except 1 and 3, the $\mathbb{F}_2$-cohomology ring of $Rat_k$ is not isomorphic to that of $B\beta_{2k}$ or $C_k$. This in particular implies that the $H\mathbb{F}_2$-localization of $Rat_k$ is not homotopy equivalent to $H\mathbb{F}_2$-localization of $B\beta_{2k}$ or $C_k$. We also show that for $k \geq 1 $, $B\beta_{2k}$ and $B\beta_{2k+1}$ have homotopy equivalent $H\mathbb{F}_2$-localizations. \end{abstract} \section{Introduction} Let $\beta_k$ denote the Artin's braid group on $k$ strings. Let $B\beta_k$ be the classifying space of $\beta_k$. Let $C_k({\mathbb{R}}^2,S^1)$ denote the space of configurations of length less than or equal to $k$ of distict points in ${\mathbb{R}}^2$ with labels in $S^1$, with some identifications. We use just $C_k$ to denote the space $C_k({\mathbb{R}}^2,S^1)$. Let $Rat_k$ be the space of based holomorphic maps from $S^2$ to itself of degree $k$. \cite{seg79},\cite{cohcoh91,cohcoh93} shows that theses three spaces $Rat_k$, $B\beta_{2k}$, $C_k$ are all stably homotopy equivalent. In fact, \cite{cohdav88, sna74} shows that these spaces split stably as a wedge sum $\vee_{j \leq k} D_j(S^1)$, where $D_j = C_j/C_{j-1}$ is a space related to the Brown-Gitler spectra. The three spaces are closely related to $\Omega^2S^2$. We explain some facts about these spaces in the next section. Totaro \cite{tot90} has shown that the three spaces have isomorphic $\mathbb{F}_p$-cohomologies for an odd prime $p$. He has also shown that the $\mathbb{F}_2$-cohomology ring of $B\beta_{2k}$ is isomorphic to that of $C_k$ and if $k+1$ is not a power of 2, then the $\mathbb{F}_2$-cohomology ring of $Rat_k$ is not isomorphic to that of $B\beta_{2k}$ or $C_k$. This paper extends the result to all values of $k$ except when $k = 1$ or $k = 3$ [Theorem \ref{main}]. This in particular implies that $Rat_k$ is not homotopy equivalent to $C_k$ if $k$ is not equal to 1 or 3. Bousfield has defined the localization of spaces with respect to homology in \cite{bou75}. Two spaces $X$ and $Y$ have homotopy equivalent $HR$-localizations if and only if there are maps $X \ra X_1 \la Y$ such that each map induces an isomorphism on homology groups with coefficients in the ring $R = {\mathbb{Z}}, \mathbb{F}_p$ or ${\mathbb{Z}}[q^{-1}]$. Our result implies that, $H\mathbb{F}_2$-localizations of $Rat_k$ and $B\beta_{2k}$ are not homotopy equivalent. We also show that for $k \geq 1$ $B\beta_{2k}$ and $B\beta_{2k+1}$ have isomorphic $\mathbb{F}_2$-cohomologies and $H\mathbb{F}_2$-localizations of $B\beta_{2k}$ and $B\beta_{2k+1}$ are homotopy equivalent [Lemma \ref{braid}]. For $k=1$, the three spaces $Rat_1$, $B\beta_2$ and $C_1$ are all homotopy equivalent to $S^1$. For $k = 3$, it turns out that the corresponding three spaces have isomorphic cohomology rings with coefficients in $\mathbb{F}_p$ for any prime $p$. Moreover, the actions of the dual of the Steenrod algebra on the $\mathbb{F}_2$-homologies of $Rat_3$, $B\beta_6$ and $C_3$ are also isomorphic.\newline {\it Acknowledgements}: The author thanks his PhD supervisor Burt Totaro for introducing to this subject and for numerous interesting discussions. \section{$B\beta_k$, $C_k$, $Rat_k$} In this section, we describe the three spaces $Rat_k$, $B\beta_k$ and $C_k$, their respective integral cohomologies and their relation with the space $\Omega^2S^2$. We also describe the coalgebra structure of their respective $\mathbb{F}_2$-homologies. For this chapter, the default ring of coefficients is $\mathbb{F}_2$. Let $\Omega^2S^2$ be the double loop space of $S^2$, i.e the space of maps from $S^2$ to itself. \begin{align*} \pi_0(\Omega^2S^2) \cong \pi_2(S^2) \cong {\mathbb{Z}}, \end{align*} where the degree map induces an isomorphism \begin{align*} \xymatrix{ \pi_0(\Omega^2S^2) \ar[r]^{\cong} & {\mathbb{Z}}.} \end{align*} \subsection{$B\beta_k$} The braid space $B\beta_k$ is the classifying space of the braid group on $k$-strings, $\beta_k$. Let $F({\mathbb{R}}^2, k)$ denote the configuration space of $k$-points in ${\mathbb{R}}^2$, i.e. \begin{displaymath} F({\mathbb{R}}^2, k) = \{ (x_1,...,x_k)| x_i \in {\mathbb{R}}^2,i \neq j {\mathbb{R}}ightarrow x_i \neq x_j \}.\end{displaymath} The symmetric group on $k$ elements $\Sigma_k$ acts freely on $F({\mathbb{R}}^2, k)$. We can take $ F({\mathbb{R}}^2, k)/ \Sigma_k$ as a model of the classifying space of $\beta_k$. Thus the space $B\beta_k$ is the space of unordered $k$-tuples of points in ${\mathbb{R}}^2$. The space $B\beta_k$ can also be described as the space of degree $k$ complex polynomials without multiple roots and with the leading coefficient equal to the unity. The rational cohomology of the braid groups is as follows (\cite{ver99}, Theorem 8.1-2). \begin{Lemma} For $k \geq 2$, the rational cohomology groups of $B\beta_k$ are trivial except for \begin{align*} H^0(B\beta_k;{\mathbb{Q}}) \cong& {\mathbb{Q}},\\ H^1(B\beta_k;{\mathbb{Q}}) \cong& {\mathbb{Q}}. \end{align*} And for $k \geq 1$, \begin{align*} H^i(B\beta_{2k+1};{\mathbb{Z}}) \cong H^i(B\beta_{2k};{\mathbb{Z}}). \end{align*} \end{Lemma} As the spaces $B\beta_{2k}$, $Rat_k$ and $C_k$ are stably homotopy equivalent to each other, \begin{align*} H^i(B\beta_{2k};{\mathbb{Z}}) \cong H^i(Rat_k;{\mathbb{Z}}) \cong H^i(C_k;{\mathbb{Z}}). \end{align*} F. Cohen has calculated the $\mathbb{F}_p$-homology of $\coprod B\beta_k$ and $\Omega^2S^2$ in \cite{cohlad76}. The spaces $\Omega^2S^2$ and $B\beta_k$ are $\mathcal{C}_2$-spaces, i.e. $\mathcal{C}_2$, the `little 2-cubes operad' acts on them. Hence there is the Araki-Kudo operation on the $\mathbb{F}_2$-homologies of $\coprod_k B\beta_k$ and $\Omega^2S^2$, $ Q : H_q \ra H_{2q+1} $ and the Pontrjagin product which makes their homologies commutative rings. Let $\Omega_k^2 S^2$ denote the $k^{th}$ component of $\Omega^2S^2$ corresponding to the degree $k$ maps $f: S^2 \ra S^2$. Then $Q$ maps $H_i(\Omega_k^2S^2)$ to $H_{2i+1}(\Omega^2_{2k}S^2)$. There is a natural map $\phi: B\beta_k \ra \Omega^2_kS^2$. This map can be described as follows. Replace the $k$-tuple of distinct points in ${\mathbb{R}}^2$ by $k$ disjoint unit circles in ${\mathbb{R}}^2$. Then define a map from ${\mathbb{R}}^2 \cup \infty$ to itself by sending everything except interiors of unit circles to the point at infinity and by sending the interior of each unit circle onto the whole of ${\mathbb{R}}^2$ homeomorphically. Identifying ${\mathbb{R}}^2 \cup \infty$ with $S^2$ by the stereographic projection gives a degree $k$ map from $S^2$ to itself. This is precisely the natural map $\phi$ from $B\beta_k$ to $\Omega^2_kS^2$. An algebraic construction of a map $B\beta_k \ra \Omega^2S^2$ is given in section 1, \cite{seg79}. Note that \begin{align*} \pi_1(B\beta_k)& \cong \beta_k,\\ \pi_1(\Omega^2S^2)& \cong {\mathbb{Z}},\\ \pi_n(B\beta_k)& \cong \{0\}, \forall k > 1. \end{align*} Hence the map $\phi$ can not be a homotopy equivalence in any range of dimensions. But it turns out that the map induces an isomorphism of homologies up to dimension $\lfloor k/2 \rfloor := $\emph{the greatest integer smaller than or equal to }$k/2$. The map $\phi$ induces a map $\Phi: \coprod_{k \geq 0} B\beta_k \ra \Omega^2S^2$. Let $g$ be the generator of $H_0(B\beta_1)$. By using the map $\Phi$, let $g$ also denote the generator of $H_0(\Omega^2_1S^2)$. Then the homology of these two spaces is build-up by the `Araki-Kudo' operation $Q$ and its iterations $Q^i(x) = Q(Q^{i-1}(x))$. To be precise, there are algebra isomorphisms (appendix III, \cite{cohlad76}) \begin{displaymath} H_*(\coprod B\beta_k) \cong \mathbb{F}_2[g, Qg, Q^2g, \ldots], \end{displaymath} \begin{displaymath} H_*(\Omega^2S^2) \cong \mathbb{F}_2[g, g^{-1}] \otimes \mathbb{F}_2[Qg, Q^2g, \ldots]. \end{displaymath} Note that the dimension in homology of $Q^ig$ is $2^i-1$ and is contained in the $2^i$th component of $\Omega^2S^2$. Define the weight of a homology class to be the component in which that class lives. Hence, $H_*(B\beta_k)$ is the span of monomials in $g, Qg, Q^2g, ...$ of weight $k$, where $Q^ig$ has the weight $2^i$ and the dimension $2^i-1$. Hence note that for any $k$, the top dimensional homology of $B\beta_k$ is generated by a single element. If the binary expansion of $k$ is $k = \sum_{j \in J} 2^j$, then this top dimension is $H_{k-|J|}$. Also notice that $Q(x^2) = x^2Qx + Qx \cdot x^2 = 0$ as the homology coefficients are in $\mathbb{F}_2$. Further, this operation $Q$ is linear and that the Cartan formula holds (lemma 5.2, IX, \cite{cohlad76}) \begin{displaymath} Q(xy) = x^2Qy + Qx \cdot y^2. \end{displaymath} The coproduct structure on the homology, i.e. the cup product strucutre on the cohomology of $B\beta_k$ is as given below. It turns out that $H_*(B\beta_k)$ is a primitively generated Hopf algebra. i.e., let \begin{displaymath} \psi: H_* \ra H_* \otimes H_* \end{displaymath} denote the coproduct on the homology. Then $\psi(g) = g \otimes g$ and $Q^ig$ for $i \geq 1$ is primitive in its component, \begin{displaymath} \psi(Q^ig) = g^{2^i} \otimes Q^ig + Q^ig \otimes g^{2^i}. \end{displaymath} $\psi$ being a coproduct map satisfies that \begin{displaymath} \psi(xy) = \psi(x)\psi(y). \end{displaymath} The expressions for $Q(g^{-1})$ and $Q(g^{-1}Qg)$ in $H_*(\Omega^2S^2)$ in terms of $g$ and $Q^ig$ can be obtained using the Cartan formula. They are, \begin{align*} Q(g^{-1}) &= g^{-4}Q(g) \\ Q(g^{-1}Qg) &= g^{-2}Q^2g + g^{-4}(Qg)^3 \end{align*} $H^*(B\beta_{2k})$ is isomorphic to $H^*(B\beta_{2k+1})$ \cite{arn68,fuk70}. We will show that $B\beta_{2k}$ and $B\beta_{2k+1}$ have homotopy equivalent $H\mathbb{F}_2$-localizations. \begin{Lemma}\label{braid} The cohomology ring $H^*(B\beta_{2k})$ is isomorphic to $H^*(B\beta_{2k+1})$. In fact, $B\beta_{2k}$ and $B\beta_{2k+1}$ have homotopy equivalent $H\mathbb{F}_2$-localizations. \end{Lemma} {\noindent{\sc Proof: }} Let $x \in H_i(B\beta_{2k})$. Hence $x$ is an element of weight $2k$ and dimension $i$ in ${\mathbb{Z}}_2[g,Qg,Q^2g,\cdots]$. Hence $gx$ is an element of weight $2k+1$ and dimension $i$. Hence $gx \in H_i(B\beta_{2k+1})$. Also, let $y$ be a monomial in ${\mathbb{Z}}_2[g,Qg,Q^2g,\cdots]$ of the dimension $i$ and the weight $2k+1$, i.e. $y \in H_i(B\beta_{2k+1}) $. As each of the $Q^ig$ has even weight, $y$ is divisible by $g$, and $y/g = x \in H_i(B\beta_{2k})$. Also \begin{align*} \psi(y) =& \psi(g)\psi(x)\\ =& (g \otimes g)\psi(x). \end{align*} Hence multiplication by $g$ induces an isomorphism of coalgebras \begin{align*} \xymatrix{ H_*(B\beta_{2k}) \ar[r]^{\cdot g} & H_*(B\beta_{2k+1}).} \end{align*} Hence, \begin{align*} H^*(B\beta_{2k}) \cong H^*(B\beta_{2k+1}). \end{align*} Furthermore, let $i_{2k}: B\beta_{2k} \ra B\beta_{2k+1}$ be the inclusion map given by adding a point away from a given $2k$-tuple to get a $2k+1$-tuple. Then note that $i_{2k_*}$ is precisely multiplication by $g$. Hence $i_{2k_*}$ induces isomorphism on $\mathbb{F}_2$-homologies. Hence $B\beta_{2k}$ and $B\beta_{2k+1}$ have homotopy equivalent $H\mathbb{F}_2$-localizations. \qed The action of the dual of the Steenrod algebra on $H_*(\coprod B\beta_k)$ is given in the appendix of \cite{coh78}. Let $Sq_j^*: H_n(-) \ra H_{n-j}(-)$ be the dual of the $j$th Steenrod operation $Sq^j$. Then \begin{align*} Sq_j^*(Q^ig) =& 0 \text{ if } j \geq 2.\\ Sq_1^*(Q^ig) =& (Q^{i-1}g)^2 \text{ if } i \geq 2\\ Sq_1^*(Qg) =& 0. \end{align*} \subsection{$C_k$} The following description of the configuration spaces is taken from \cite{tot90}, i.e. in turn from \cite{cohshi91}. Let $C({\mathbb{R}}^2,Y)$ denote the space of all configurations of distinct points in ${\mathbb{R}}^2$ with labels in $Y$. It is defined by \begin{displaymath} C({\mathbb{R}}^2,Y) = (\bigcup_{j=1}^{\infty} F({\mathbb{R}}^2,j) \times_{\Sigma_j} Y^j)/\sim \end{displaymath} and if $* \in Y$ is a fixed basepoint then the equivalence relation $\sim$ is given by \begin{displaymath} (x_1,\ldots, x_j) \times_{\Sigma_j} (t_1,\ldots , t_{j-1},*) \sim (x_1,\ldots,x_{j-1}) \times_{\Sigma_{j-1}}(t_1,\ldots,t_{j-1}). \end{displaymath} Let $C_k({\mathbb{R}}^2,Y)$ denote the subspace of all configurations of length less than or equal to $k$. i.e. \begin{displaymath} C_k({\mathbb{R}}^2,Y) = (\bigcup_{j=1}^k F({\mathbb{R}}^2,j) \times_{\Sigma_j} Y^j)/\sim . \end{displaymath} We denote by $C_k$ the space $C_k({\mathbb{R}}^2, S^1).$ There is a relation between configuration spaces and iterated loop spaces(May-Milgram and Segal ). If $Y$ is a connected CW-complex then $C({\mathbb{R}}^2,Y)$ is homotopy equivalent to the based loop space $\Omega^2\Sigma^2Y$ which is defined by \begin{displaymath} \Omega^2\Sigma^2Y = \{f: S^2 \ra \Sigma^2Y | f(\infty) = * \} \end{displaymath} Hence $C_k$ can be considered as a finite dimensional approximation to $\Omega^2S^3$. \begin{displaymath} \pi_1(C_k) \cong {\mathbb{Z}}. \end{displaymath} The Hopf map $S^3 \ra S^2$ induces a map of 2-fold loop spaces, from $\Omega^2S^3$ to $\Omega^2S^2$. The long exact sequence of the homotopy groups of the fibration $S^1 \ra S^3 \ra S^2$ implies that $\Omega^2S^3 \ra \Omega^2S^2$ gives the homotopy equivalence from $\Omega^2S^3$ to $\Omega^2_0S^2$. This helps in obtaining the following result (theorem 3.1, III, \cite{cohlad76}), \begin{displaymath} H_*(\bigcup_{k \geq 0}C_k) \cong H_*(\Omega_0^2S^2) \cong \mathbb{F}_2[g^{-2}Qg, Q(g^{-2}Qg), \ldots]. \end{displaymath} $H_*C_k$ is the span of monomials of weight less than or equal to $k$, where the weight of $Q^i(g^{-2}Qg)$ is $2^i$ and it lives in the dimension $2^{i+1} - 1$. Proposition 1 from \cite{tot90} shows that as coalgebras, \begin{displaymath} H_*(B\beta_{2k}) \cong H_*(C_k). \end{displaymath} Havlicek \cite{hav95} has described the precise cohomology ring of $C_k$ as the dual to this coalgebra. \subsection{$Rat_k$} The space $Rat_k({\mathbb{C}} P^1)$ or $Rat_k$ is the space of based holomorphic maps $S^2 \ra S^2$ of degree $k$. It can be described more precisely as the space of rational functions from ${\mathbb{C}} \cup \infty$ to ${\mathbb{C}} \cup \infty$ which sends $\infty$ to 1, i.e. \begin{displaymath} Rat_k := \{ \dfrac{f(z)}{h(z)} = \dfrac{z^k + a_{k-1}z^{k-1} + \ldots + a_0}{z^k + b_{k-1}z^{k-1} + \ldots + b_0}| \text{\emph{f}(\emph{z}) and \emph{h}(\emph{z}) are coprime} \} \end{displaymath} $Rat_k$ is a nilpotent space up to dimension $k$ (corollary 6.3, \cite{seg79}). i.e. the fundamental group of $Rat_k$ acts nilpotently on homotopy groups $\pi_i(Rat_k)$ for $2 \leq i \leq k$. Consider the map given by resultant of two polynomials \begin{align*} R : Rat_k \ra& {\mathbb{C}}^*\\ (f/h) \mapsto& resultant(f,h). \end{align*} Then the map $R$ induces an isomorphism of fundamental groups (proposition 6.4, \cite{seg79}) \begin{displaymath} \pi_1(Rat_k) \cong {\mathbb{Z}}. \end{displaymath} There is a natural map $ Rat_k \ra \Omega^2_kS^2$ which simply forgets that a map in $Rat_k$ is holomorphic. This map is well described in (\cite{boyman88}, \cite{seg79}). This induces a map \begin{align*} \chi: \coprod_{k \geq 0} Rat_k \ra \Omega^2S^2. \end{align*} The map $\chi$ preserves the action of $\mathcal{C}_2$ operad on the spaces $\coprod_{k \geq 0} Rat_k$ and $\Omega^2S^2$. The map $\chi$ induces a map on the homologies and in the proof of Theorem1 in \cite{tot90}, it is shown that this induced map is an injection. The image of this map is a polynomial ring generated by $g$ and $Q^i(g^{-1}Qg)$ for $i \geq 0$. To be precise, \begin{displaymath} H_*(\coprod_k Rat_k) = \mathbb{F}_2[g, g^{-1}Qg, Q(g^{-1}Qg), \ldots]. \end{displaymath} As before, $g$ has weight 1 and dimension zero, and $Q^{i-1}(g^{-1}Qg)$, $i \geq 1$ has weight $2^{i-1}$ and dimension $2^i - 1$. $H_*(Rat_k)$ as a sub-coalgebra of $H_*(\coprod_k Rat_k)$ is generated by the monomials of weight $k$. Note again that the top dimensional homology of $Rat_k$ is generated by a single element. There is a one-to-one correspondence between the generators of $H_*(\coprod_k Rat_k)$ and $H_*(\coprod_k B\beta_{k})$. $g$ of course corresponds to $g$ and $Q^{i-1}(g^{-1}Qg)$ corresponds to $Q^ig$. Note that in this correspondence, except for $g$, the weights of the generators of $H_*(\coprod_k Rat_k)$ are exactly the half of the weights of the generators they correspond to in $H_*(\coprod_k B\beta_k)$. The coproduct structure on $H_*(Rat_k)$ is as follows. $g^{-1}Qg$ is primitive in its component, but $Q^i(g^{-1}Qg)$ is not primitive in its component for $i \geq 1$. \begin{align*} \psi(g^{-1}Qg) = g \otimes g^{-1}Qg + g^{-1}Qg \otimes g. \end{align*} And for $i \geq 1$, \begin{align*} \psi Q^i(g^{-1}Qg) = &\underbrace{g^{2^i} \otimes Q^i(g^{-1}Qg)}_{0,2^{i+1}-1} + \underbrace{Q^ig \otimes (g^{-1}Qg)^{2^i}}_{2^i-1,2^i} + \underbrace{(g^{-1}Qg)^{2^i} \otimes Q^ig}_{2^i,2^i-1} \\ &+ \underbrace{Q^i(g^{-1}Qg) \otimes g^{2^i}}_{2^{i+1}-1,0}. \end{align*} Numbers appearing below a symbol indicate the dimension in homology of the corresponding symbol. i.e. $(0,2^{i+1}-1)$ below $g^{2^{i}} \otimes Q^i(g^{-1}Qg)$ indicate that $g^{2^i}$ is zero dimensional and dimension of $Q^i(g^{-1}Qg)$ is $2^{i+1}-1$. \section{$\mathbb{F}_2$-cohomology ring of $Rat_k$ is not isomorphic to that of $B\beta_{2k}$ or $C_k$} This section proves that if $k$ is not equal to 1 or 3, then the cohomology ring of $Rat_k$ with coefficients in $F_2$ is not isomorphic to the cohomology ring of $B\beta_{2k}$ or $C_k$ with coefficients in $F_2$. Totaro \cite{tot90} has shown this statement when $k+1$ is not a power of 2. For $k=1$, all three spaces $Rat_1$, $B\beta_2$ and $C_1$ are homotopy equivalent ot $S^1$. For $k=3$, the three spaces $Rat_3$, $B\beta_6$ and $C_3$ have isomorphic $\mathbb{F}_p$-homology as coalgebras for any prime $p$. Following theorem shows the result for the remaining values of $k$, that is when $k+1$ is a power of 2 and $k>3$. This in particular shows that if $k$ is not equal to 1 or 3, then there does not exists any sequence of maps \begin{align*} B\beta_{2k} \ra X_1 \la Rat_k \end{align*} each of which induces isomorphism on $\mathbb{F}_2$-homology. Hence in the context of \cite{bou75} our result implies that the two spaces $Rat_k$ and $B\beta_{2k}$ can not have homotopy equivalent $HF_2$-localizations. For completeness, we include Totaro's argument when $k+1$ is not a power of 2. \begin{Theorem}\label{main} The $\mathbb{F}_2$-cohomology of $Rat_k$ is not isomorphic to the $\mathbb{F}_2$-cohomology of $B\beta_{2k}$ or $C_k$ except when $k = 1$ or $3$. \end{Theorem} {\noindent{\sc Proof: }} Firstly assume that $k+1$ is not a power of 2. Let \begin{align*} k = \sum_{j \in J} 2^j. \end{align*} The top dimensional homology group of both $Rat_k$ and $B\beta_{2k}$ is 1-dimensional. $H_{2k-|J|}$, the top dimensional homology of $Rat_k$ is spanned by $x$ equal to $\prod_{j\in J} Q^j(g^{-1}Qg)$ and of $B\beta_{2k}$ by $y$ equal to $\prod_{j \in J} Q^{j+1}g$. Consider the set \begin{displaymath} S(x) = \{s \geq 0 | \psi(x)|_{H_s \otimes H_{d-s}} \neq 0 \}.\end{displaymath} Similarly define $S(y)$. The aim is to show that $S(x)$ is not equal to $S(y)$ which implies that the homologies of $Rat_k$ and $B\beta_{2k}$ are not isomorphic as coalgebras. Let $r$ be the smallest integer such that $r \in J$ but $r-1 \notin J$. As $k+1$ is not a power of 2, such a $r$ exists. We observe that $\psi(x)$ is non-zero in $H_{2^r-1} \otimes H_{dim(x)-(2^r-1)}$. \begin{align*} \psi(x) = \prod_{j \in J} \psi(Q^j(g^{-1}Qg)). \end{align*} There is at least one term in dimension $H_{2^r-1} \otimes H_{dim(x)-(2^r-1)}$ in the expansion of $ \psi(Q^j(g^{-1}Qg))$, which is \begin{align*} Q^rg\prod_{j\in J, j \neq r}g^{2^j} \otimes (g^{-1}Qg)^{2^r}\prod_{j \in J, j \neq r} Q^j(g^{-1}Qg). \end{align*} As $r-1 \notin J$, there is no other term of this dimension in $\psi(x)$. Hence $2^r-1 \in S(x)$. \begin{displaymath} \psi(y) = \prod_{j \in J} \Big( \underbrace{g^{2^{j+1}} \otimes Q^{j+1}g}_{0,2^{j+1}-1} + \underbrace{Q^{j+1}g \otimes g^{2^{j+1}}}_{2^{j+1}-1,0} \Big). \end{displaymath} Note that $\psi(y)$ is zero in dimension $H_{2^r-1} \otimes H_{dim(x) - (2^r-1)}$ as $r-1 \notin J$. Hence $2^{r}-1 \notin S(y)$ and $S(x) \neq S(y)$. This proves that whenever $k+1$ is not a power of 2, the cohomology rings $H^*(Rat_k)$ and $H^*(B\beta_{2k})$ or $H^*(C_k)$ are not isomorphic. Now assume that $k+1$ is a power of 2, and assume that \begin{displaymath} k = \sum_0^r 2^j = 2^{r+1} -1.\end{displaymath} We continue to denote by $x$ the generator of the top dimensional homology group of $Rat_k$, \begin{displaymath} x = \prod_0^{r} Q^j(q^{-1}Qg) \end{displaymath} and by $y$, the generator of the top dimensional homology group of $B\beta_{2k}$, \begin{displaymath} y = \prod_0^{r} Q^{j+1}g. \end{displaymath} For both spaces, the top dimension of homology is $d = 2^{r+2} - r -3$. Let $S(x)$ and $S(y)$ be as before and we will show that $S(x) \neq S(y)$. \begin{align*} \psi(y) &= \prod_0^{r} \psi(Q^{j+1}g)\\ &= \prod_0^r \Big( \underbrace{g^{2^{j+1}} \otimes Q^{j+1}g}_{0,2^{j+1}-1} + \underbrace{Q^{j+1}g \otimes g^{2^{j+1}}}_{2^{j+1}-1,0} \Big) \end{align*} Numbers $(0,2^{j+1}-1)$ appearing below $g^{2^{j+1}} \otimes Q^{j+1}g$ indicate the dimension in homology of the corresponding element, i.e. $g^{2^{j+1}}$ has dimension $0$ and $Q^{j+1}g$ has dimension $2^{j+1}-1$. Hence dimensions which appear in $\psi(y)$ are precisely those which appear in the expression \begin{displaymath} \Big( ((1,0) + (0,1)) \cdot ((0,3) + (3,0)) \cdot ((0,7)+(7,0)) \cdot \ldots \Big) \end{displaymath} From this expression, it is clear that $2 \notin S(y)$ and $5 \notin S(y)$. It turns out that although $2 \notin S(x)$, $5 \in S(x)$. \begin{displaymath} \psi(x) = \psi(g^{-1}Qg)\prod_1^r \psi(Q^j(g^{-1}Qg)) \end{displaymath} Using the expressions for $\psi(g^{-1}Qg)$ and $\psi(Q^j(g^{-1}Qg))$, we get that the dimensions which appear in $\psi(x)$ are from the expression \begin{displaymath} ((0,1)+(1,0)) \cdot \Big( ((0,3)+(1,2)+(2,1)+(3,0)) \cdot ((0,7)+(3,4)+(4,3)+(7,0)) \cdot \ldots \Big) \end{displaymath} There are exactly two ways to obtain dimension $(2,d-2)$. Namely, $(1,0) \otimes (1,2) \otimes (0,7) \otimes \ldots$ and $(0,1) \otimes (2,1) \otimes (0,7) \otimes \ldots$. The expression corresponding to dimensions $(1,0) \otimes (1,2)$ is $(g^{-1}Qg \otimes g) \cdot (Qg \otimes (g^{-1}Qg)^2)$, which is $g^{-1}(Qg)^2 \otimes g^{-1}(Qg)^2$. By symmetry, expression corresponding to $(0,1) \otimes (2,1)$ is also $g^{-1}(Qg)^2 \otimes g^{-1}(Qg)^2$. Hence, \begin{align*} \psi(x)|_{H_2 \otimes H_{d-2}} &= g^{-1}(Qg)^2 \otimes g^{-1}(Qg)^2 \prod_2^r (g^{2^j} \otimes Q^j(g^{-1}Qg))\\ &+ g^{-1}(Qg)^2 \otimes g^{-1}(Qg)^2 \prod_2^r (g^{2^j} \otimes Q^j(g^{-1}Qg)). \end{align*} Because the coefficients of the homology are in $\mathbb{F}_2$, the two terms cancel each other. Hence, $2 \notin S(x)$. There are exactly four ways to obtain dimension $(5,d-5)$. Namely $(1,0) \otimes (1,2) \otimes (3,4) $, $(0,1) \otimes (2,1) \otimes (3,4)$, $(1,0) \otimes (0,3) \otimes (4,3)$ and $(0,1) \otimes (1,2) \otimes (4,3)$. From the paragraph above, first two of these, $(1,0) \otimes (1,2) \otimes (3,4) $ and $(0,1) \otimes (2,1) \otimes (3,4)$ cancel with each other. Hence, \begin{align*}\psi(x)|_{H_5 \otimes H_{d-5}} &= (g^{-1}Qg \otimes g)(g^2 \otimes Q(g^{-1}Qg))((g^{-1}Qg)^4 \otimes Q^2(g)) \prod_3^r (g^{2^{j}} \otimes Q^j(g^{-1}Qg))\\ &+ (g \otimes g^{-1}Qg)(Qg \otimes (g^{-1}Qg)^2)((g^{-1}Qg)^4 \otimes Q^2g) \prod_3^r (g^{2^{j}} \otimes Q^j(g^{-1}Qg)) \end{align*} \begin{align*} {\mathbb{R}}ightarrow \psi(x)|_{H_5 \otimes H_{d-5}} &= g^2(g^{1}Qg)^5 \otimes gQ^2gQ(g^{-1}Qg) \prod_3^r (g^{2^{j}} \otimes Q^j(g^{-1}Qg)) \\ &+ g^2(g^{1}Qg)^5 \otimes (g^{-1}Qg)^3Q^2g \prod_3^r (g^{2^{j}} \otimes Q^j(g^{-1}Qg)) \end{align*} Using that $Q(g^{-1}Qg) = g^{-2}Q^2(g) + g^{-4}(Qg)^3$, \begin{displaymath} \psi(x)|_{H_5 \otimes H_{d-5}} = g^2(g^{-1}Qg)^5 \otimes g^{-1}(Q^2g)^2 \prod_3^r (g^{2^{j}} \otimes Q^j(g^{-1}Qg)) \end{displaymath} This implies that $5 \in S(x)$, proving the result when $k+1$ is a power of 2. \qed \section{Some Questions} $\mathbb{F}_p$-cohomology rings of $Rat_3$, $B\beta_{6}$ and $C_3$ are isomorphic to each other. For $p > 2$, it follows from section 6, \cite{tot90}. For $p = 2$, we can see this by hand. $H_*(B\beta_6)$ is the span of monomials of weight 6 in $\mathbb{F}_2[g, Qg, Q^2g, \cdots]$. And these are $g^6$ (dim 0), $g^4Qg$ (dim 1), $g^2(Qg)^2$ (dim 2), $g^2Q^2g$, $(Qg)^3$ (both dim 3) and $Q^2gQg$ (dim 4). $H_*(Rat_3)$ is the span of monomials of weight 3 in $\mathbb{F}_2[g, g^{-1}Qg, Q(g^{-1}Qg), \cdots]$. And these are $g^3$ (dim 0), $gQg$ (dim 1), $g^{-1}(Qg)^2$ (dim 2), $g^{-3}(Qg)^3$, $gQ(g^{-1}Qg)$ (both dim 3) and $g^{-1}QgQ(g^{-1}Qg)$ (dim 4). It is easy to check from above that $H_*(B\beta_6)$ is isomorphic as a coalgebra to $H_*(Rat_3)$ proving that $Rat_3$, $B\beta_{6}$ and $C_3$ have isomorphic $\mathbb{F}_2$-cohomology rings. We can also see by hand that the action of the Steenrod algebra on $H_*(Rat_3)$ and $H_*(B\beta_6)$ is the same. Consider $g^2Q^2g \in H_3(B\beta_6)$. Then \begin{align*} Sq_1^*(g^2Q^2g) = g^2(Qg)^2. \end{align*} The element corresponding to $g^2Q^2g$ in $H_3(Rat_3)$ is $gQ(g^{-1}Qg)$. \begin{align*} Sq_1^*(gQ(g^{-1}Qg)) =& Sq_1^*(g^{-1}Q^g) + g^{-3}(Qg)^3 \\ =& g^{-1}(Qg)^2. \end{align*} $g^2(Qg)^2 \in H_2(B\beta_6)$ corresponds to $g^{-1}(Qg)^2 \in H_2(Rat_3)$. Similarly, by checking for each generator, we can verify that the action of $Sq_1^*$ on $H_*(Rat_3)$ and $H_*(B\beta_6)$ is the same. It is still unknown if $Rat_3$, $B\beta_{6}$ and $C_3$ have homotopy equivalent $HF_2$-localizations or not. Also it is still unknown for $p > 2$ and $k > 2$, if $Rat_k$, $B\beta_{2k}$ and $C_k$ have homotopy equivalent $H\mathbb{F}_p$-localizations or not. Cohen-Shimamoto \cite{cohshi91} have shown that $Rat_2$ and $C_2$ are not homotopy equivalent to each other by considering natural ${\mathbb{Z}}$-coverings of these spaces. \begin{displaymath} \pi_1(Rat_k) \cong \pi_1(C_k) \cong {\mathbb{Z}}. \end{displaymath} Let ${\widetilde{Rat}}_k$ and ${\widetilde{C}}_k$ be the universal covers of $Rat_k$ and $C_k$ respectively. Let $D_2$ be the ${\mathbb{Z}}/2$-Moore space $S^2 \cup_2 e^3$. It is known that $C_2$ is stably homotopy equivalent to $S^1 \vee D_2$ \cite{sna74}. Cohen-Shimamoto show that $C_2$ is homotopy equivalent to $S^1 \vee D_2$. Hence we can precisely calculate \begin{displaymath} H_2({\widetilde{C}}_2; {\mathbb{Z}}) \cong \pi_2({\widetilde{C}}_2) \cong \pi_2(C_2) \end{displaymath} which is infinitely generated. Whereas ${\widetilde{Rat}}_k$ is homotopy equivalent to $R^{-1}(\{1\})$ for the resultant map $R: Rat_k \ra {\mathbb{C}}^*$. $R^{-1}(\{1\})$ is a finite $CW$-complex and hence $H_*({\widetilde{Rat}}_k; {\mathbb{Z}})$ is finitely generated. This shows that ${\mathbb{Z}}$-homologies of ${\widetilde{Rat}}_2$ and ${\widetilde{C}}_2$ are not isomorphic and that \begin{align*} \pi_2(C_2) \ncong \pi_2(Rat_2). \end{align*}. Let $\gamma_k$ be the commutator subgroup $[\beta_k, \beta_k]$ of $\beta_k$. Then there is a short exact sequence \begin{displaymath} 0 \ra \gamma_k \ra \beta_k \ra {\mathbb{Z}} \ra 0. \end{displaymath} Let $B\gamma_k$ denote the classifying space of $\gamma_k$. The $\mathbb{F}_p$-homology of $B\gamma_k$ is calculated in (theorem 4, \cite{cal06}), and it is finitely generated. We conjecture that homology of ${\widetilde{C}}_k$ is infinitely generated for many values of $k$ whereas we already know that homologies of ${\widetilde{Rat}}_k$ and $B\gamma_k$ are finitely generated for all $k$. \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \end{document}
\begin{document} \maketitle \begin{abstract} In this article we deal with different forms of the unique continuation property for second order elliptic equations with nonlinear potentials of sublinear growth. Under suitable regularity assumptions, we prove the weak and the strong unique continuation property. Moreover, we also discuss the unique continuation property from measurable sets, which shows that nodal domains to these equations must have vanishing Lebesgue measure. Our methods rely on suitable Carleman estimates, for which we include the sublinear potential into the main part of the operator. \eta_{\delta,r}nd{abstract} \section{Introduction} This article is devoted to unique continuation properties for second order elliptic equations with \eta_{\delta,r}mph{sublinear potentials}. The unique continuation property for second order elliptic equations has a long tradition and many important ramifications. In a sense, it quantifies the rigidity of solutions to these equations. More precisely, the \eta_{\delta,r}mph{(weak) unique continuation property} (WUCP) addresses the question of whether if a given solution $u$ to an equation $Lu=0$ in an open set $\Omega \subset \R^n$ vanishes in an open subset of $\Omega$, i.e. $u= 0$ in $B_{r}(x_0)\subset \Omega$, this already implies that $u$ vanishes globally in $\Omega$, i.e. whether $u\eta_{\delta,r}quiv 0$ in $\Omega$. Similarly, it is also possible to ask whether the \eta_{\delta,r}mph{vanishing of infinite order} at a point $x_0\in \Omega$ of a solution $u$ to $Lu=0$ in $\Omega$, i.e. whether the assumption that \begin{align*} \lim\limits_{r\rightarrow 0} r^{-m}\int\limits_{B_r(x_0)} u^2 dx = 0 \mbox{ for all } m \in \N, \eta_{\delta,r}nd{align*} already implies the global vanishing of $u$ in $\Omega$. This property is referred to as the \eta_{\delta,r}mph{strong unique continuation property} (SUCP). In the context of nodal domain estimates of eigenfunctions to certain operators, also the \eta_{\delta,r}mph{unique continuation property from measurable sets} (MUCP) plays an important role, as it asserts that a solution $u$ of $Lu=0$ in $\Omega$, which vanishes on a set $E\subset \Omega$ of positive measure, already vanishes globally. In particular, if the MUCP holds for an equation, it implies that its nodal set has vanishing Lebesgue measure. Prototypical examples of functions which satisfy all of these properties are harmonic functions (since they are analytic). However, this property holds for a much larger class of second order elliptic operators \begin{align} \label{eq:L} Lu = \p_i a^{ij} \p_j u + W_{1,i} \p_i u + \p_i (W_{2,i} u) + V u \eta_{\delta,r}nd{align} under suitable regularity assumptions on the uniformly elliptic metric $a^{ij}:\Omega \rightarrow \R^{n\times n}_+$, the gradient potentials $W_{1,i}, W_{2,i}: \Omega \rightarrow \R$ and the $L^2$ potential $V: \Omega \rightarrow \R$. The setting of linear second order equations is by now quite well understood: Based on first works due to Carleman \cite{C39}, \eta_{\delta,r}mph{Carleman estimates} were developed as a major tool of addressing these problems. After early important results due to Aronszajn-Krzywicki-Szarski \cite{AKS62}, some of the seminal contributions in this context include the work due to Chanillo-Sawyer \cite{CS90}, Kenig-Ruiz-Sogge \cite{KRS87}, Jerison-Kenig \cite{JK85}, who deal with scaling critical potentials in different function spaces, e.g. $V\in L^{\frac{n}{2}}$, Wolff \cite{W90,W92}, who introduced osculation arguments in order to overcome intrinsic difficulties with Carleman estimates for low regularity gradient potentials (i.e. for potentials close to the critical space $W_{1,i}, W_{2,i} \in L^{n}$), c.f. \cite{J86}. Finally, Koch and Tataru \cite{KT01, KT05} showed how to combine Lipschitz continuous metrics $a^{ij}$ with critical function spaces for the gradient potentials $W_{1,i}, W_{2,i}$ and the $L^2$ potential $V$. Counterexamples \cite{P62, M74, M98, KT02, KN00, W92a, W94} show that both in the weak and the strong unique continuation setting these assumptions are essentially sharp. For a more extensive overview on the vast literature on unique continuation properties for second order elliptic equations, we refer to the survey article \cite{W93} and the above cited articles. A second line of thought was introduced by Garofalo and Lin \cite{GL86, GL87}, who proved similar unique continuation results by means of variational arguments. Their main tool, which also found numerous applications in other variational problems such as for instance free boundary value problems, is a so-called \eta_{\delta,r}mph{frequency function}. This is used to measure the growth of solutions to \eta_{\delta,r}qref{eq:L} away from its nodal set. With the afore mentioned results at hand, it is also possible to study the unique continuation properties of a quite general class of second order \eta_{\delta,r}mph{semilinear} elliptic equations. Since in the linear theory the lower order terms (including the potentials $W_{1,i}, W_{2,i}, V$) are usually treated perturbatively, it is in particular possible to deal with equations with \eta_{\delta,r}mph{superlinear} potentials, the model problem being given by the equation \begin{align*} (-\D) u = |u|^{q-2}u, \ u \in L^{\infty}_{loc}(\Omega)\cap H^1_{loc}(\Omega), \ q\geq 2. \eta_{\delta,r}nd{align*} Indeed, in this case, by setting $V= |u|^{q-2}$ and by using the assumed $L^{\infty}_{loc}$ property of $u$ (which is hence inherited by $V$), we may rewrite the problem in the form \eta_{\delta,r}qref{eq:L}. Similarly, one can however also wonder whether analogous properties hold for \eta_{\delta,r}mph{sublinear} potentials, i.e. whether for instance solutions to the equation \begin{align} \label{eq:mod1} (-\D) u = |u|^{q-2}u, \ u \in L^{\infty}_{loc}(\Omega)\cap H^1_{loc}(\Omega), \ q \in (1,2), \eta_{\delta,r}nd{align} satisfy the various unique continuation properties from above. Here however, the setting changes -- not only because the previous reasoning of simply defining $V=|u|^{q-2}$ fails, since, in general, with this definition, the potential $V$ need no longer be a function in the space $L^{\frac{n}{2}}$. Indeed, in the \eta_{\delta,r}mph{sublinear} regime there are intrinsic difficulties which have to be overcome: Already when studying the related ODEs \begin{align} \label{eq:mod11} u''=f_q(u), \eta_{\delta,r}nd{align} where $|f_q(u)|$ is bounded from below by $|u|^{q-1}$ for $q\in [1,2)$, one observes that in general the unique continuation property fails. For instance, a computation shows that for any $t_0\in \R$ the function \begin{align*} u(t)= \left\{ \begin{array}{ll} \left(\frac{2q}{(2-q)^2}\right)^{\frac{1}{q-2}}(t-t_0)^{\frac{2}{2-q}} \mbox{ for } t>t_0,\\ 0 \mbox{ for } t \leq t_0, \eta_{\delta,r}nd{array} \right. \eta_{\delta,r}nd{align*} is a solution to the equation \begin{align*} u'' = |u|^{q-2}u, \ q\in(1,2). \eta_{\delta,r}nd{align*} Motivated by the study of nonlinear eigenvalue problems, the analysis of the corresponding nodal domains \cite{PW15} and the relation of these problems to porous media type equations \cite{V07}, in a recent article Soave and Weth \cite{SW17} however observed that the right choice of the sign of the nonlinearity in \eta_{\delta,r}qref{eq:mod11} allows one to recover the WUCP. For instance, direct energy methods and ODE arguments show that the solutions to the equation \begin{align*} u''=-|u|^{q-2}u, \ q \in (1,2) \eta_{\delta,r}nd{align*} satisfy the UCP (these arguments even show that all zeros of $u$ must be simple zeros). More generally, in arbitrary dimensions, Soave and Weth \cite{SW17} prove that considering correctly signed equations modelled on the problem \begin{align} \label{eq:model0} \begin{split} -\D u = f_q(u) \mbox{ with } f_q(u) = \left\{ \begin{array}{ll} |u|^{q-2}u \mbox{ if } q \in (1,2),\\ \sgn(u) \mbox{ if } q = 1, \eta_{\delta,r}nd{array} \right. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} it is possible to prove the WUCP. To this end, they adapt the frequency function techniques due to Garofalo and Lin \cite{GL86,GL87} and Garofalo and Smit Vega Garcia \cite{GSVG14}. In their work it however remained open, whether the SUCP and the MUCP hold. In particular, the corresponding estimates on the nodal domains for general sign changing solutions from \cite{PW15} remained open. In this article we address the unique continuation property for these equations by applying Carleman techniques. In particular, under suitable assumptions on the nonlinear potential, we also derive the SUCP and the MUCP, thus settling the question from \cite{PW15} (at least for $q\in (1,2)$). \subsection{The results} Let us discuss the precise results: In order to motivate the problem and the ideas without having to deal with an additional layer of technicalities, in Section \ref{sec:model} we first address the \eta_{\delta,r}mph{constant coefficient setting} and explain the main ideas of our argument for a \eta_{\delta,r}mph{model situation}. Then, in Section \ref{sec:results_var} we generalize these results to \eta_{\delta,r}mph{variable coefficient equations} with more \eta_{\delta,r}mph{general sublinear potentials}. We remark that, as already observed by Soave-Weth \cite{SW17}, in both cases the sign of the sublinear nonlinearity is crucial. \subsubsection{The model case} \label{sec:model} In the sequel, as a model problem we consider a slight generalization of \eta_{\delta,r}qref{eq:model0}. We seek to prove that solutions to this equation posses the (strong) unique continuation property as well as the unique continuation property from measurable sets: \begin{thm} \label{thm:SMUCP} Let $\Omega \subset \R^n$ be open and let $x_0 \in \Omega$. Let $V\in L^{\infty}(\Omega)$. Suppose that $u\in H^1_{loc}(\Omega)\cap L^{\infty}_{loc}(\Omega)$ is a solution to \begin{align} \label{eq:model} \D u + f_q(u)= V u \mbox{ in } \Omega \mbox{ with } f_q(u) = \left\{ \begin{array}{ll} |u|^{q-2}u \mbox{ if } q \in (1,2),\\ \sgn(u) \mbox{ if } q = 1, \eta_{\delta,r}nd{array} \right. \eta_{\delta,r}nd{align} and assume that one of the following conditions holds: \begin{itemize} \item[(a)] $q\in [1,2)$ and there exists a radius $r_0>0$ such that $u$ vanishes on $B_{r_0}(x_0)\subset \Omega $. \item[(b)] $q\in (1,2)$ and $u$ vanishes of infinite order at $x_0\in \Omega$, i.e. for any $m\in \N$ we have $\lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_r(x_0)} u^2 dx = 0 $. \item[(c)] $q\in (1,2)$ and $u$ vanishes on a measurable set of positive measure, i.e. there exists $E \subset \Omega$ such that $|E|>0$ and $u|_{E}=0$. \eta_{\delta,r}nd{itemize} Then, $u \eta_{\delta,r}quiv 0$ in $\Omega$. \eta_{\delta,r}nd{thm} We remark that the sign of $f_q(u)$ is crucial and that any form of the UCP is false if the sign of $f_q(u)$ were to be reversed (c.f. the discussion of ODE examples from above). We prove the results of Theorem \ref{thm:SMUCP} by using Carleman estimates. In order to distinguish the setting with a favourable sign for the nonlinearity from the setting with an unfavourable sign, we include the nonlinearity into the leading part of the operator. This is in contrast to the situation of superlinear potentials, which one would typically treat perturbatively. \begin{thm} \label{thm:sub_lin_Carl} Let $q\in [1,2)$. Let $u\in H^1_{loc}(\R^n)\cap L^{\infty}_{loc}(\R^n)$ be a solution of \begin{align} \label{eq:eq_main} \begin{split} \D u + f_q(u) = g \mbox{ in } \R^n \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} with support contained in $B_1\setminus \overline{\varphi}erline{B_{r_1}}$ for some $r_1 \in (0,1/2)$. Define $\phi(x):= \psi(|x|)$ to be \begin{align*} \psi(r) = -\ln(r) + \frac{1}{10}\left( \ln(r) \arctan(\ln(r))- \frac{1}{2} \ln(1+ \ln^2(r)) \right). \eta_{\delta,r}nd{align*} Then there exist constants $\tau_0>1$ and $C>1$ (which only depend on $n,q$) such that for all $\tau \geq \tau_0$ we have \begin{align*} &\tau^{3/2} \|e^{\tau \phi} (1+\ln^2(|x|))^{-1/2} u\|_{L^2(\R^n)} + \tau^{1/2} \|e^{\tau \phi} |x| (1+\ln^2(|x|))^{-1/2} \nabla u \|_{L^2(\R^n)}\\ &+\left(\frac{q-2}{q}\right)^{1/2}\tau \|e^{\tau \phi} |x||u|^{\frac{q}{2}}\|_{L^2(\R^n)} \leq C \|e^{\tau \phi}|x|^2 g\|_{L^2(\R^n)}. \eta_{\delta,r}nd{align*} \eta_{\delta,r}nd{thm} \subsubsection{Variable coefficients and more general sublinear terms} \label{sec:results_var} More generally, with similar arguments, it is possible to treat the setting of more general nonlinearities $f_q(x,u)$ and equations, which involve Lipschitz metrics. In the sequel, we describe the assumptions precisely. For the metric $a^{ij}: \Omega \rightarrow \R^{n\times n}$ we assume that: \begin{itemize} \item[(A1)] The metric is uniformly elliptic, i.e. there exist constants $0< \lambda \leq 1 \leq \Lambda< \infty$ such that \begin{align*} \lambda |\xi|^2 \leq a^{ij}(x)\xi_i \xi_j \leq \Lambda |\xi|^2. \eta_{\delta,r}nd{align*} \item[(A2)] The metric is Lipschitz continuous, i.e. there exists a constant $\Lambda_0>1$ such that \begin{align*} |a^{ij}(x)-a^{ij}(y)|\leq \Lambda_0 |x-y| \mbox{ for } x,y \in \Omega. \eta_{\delta,r}nd{align*} \item[(A3)] The metric is normalized, i.e. we assume that $0 \in \Omega$ and that $a^{ij}(0)=\delta_{ij}$, where $\delta_{ij}$ denotes the Kronecker symbol. \eta_{\delta,r}nd{itemize} We remark that the assumption (A3) can always be imposed without any loss of generality, as it can always be achieved by a suitable affine change of coordinates. Compared to the setting in \cite{SW17}, we in the sequel require stronger differentiability conditions for the nonlinearity. More precisely, for $q\in [1,2)$ we impose the following conditions: \begin{itemize} \item[(F1)] $0<s f_q(x,s)\leq q F_q(x,s)$ for $s\in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0)\setminus \{0\}$, $x \in \Omega$, where $\eta_{\delta,r}psilon_0>0$ is an arbitrary but fixed constant and where $f_q\in L^{\infty}_{loc}(\Omega \times \R)$ and $F_q: \Omega \times \R \rightarrow \R $ denotes its primitive, i.e. $F_q(x,s)=\int\limits_{0}^s f_q(x,t)dt$. \\ Additionally, we assume that $f_q(x,0)=0$ for all $x\in \Omega$. \item[(F2)] There exists $\kappa_2>0$ such that $F_q(x,s)\geq \kappa_2$ for all $x\in \Omega$, $s\in \{-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0\}$. \item[(F3)] For every $s\in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0)$ the functions $f_q(\cdot,s)$ and $F_q(\cdot, s)$ are $C^1$ on $\Omega$ and there exists $\kappa_1>0$ such that \begin{align*} |\nabla_x F_q(x,s)|&\leq \kappa_1 F_q(x,s) \mbox{ for all } x \in \Omega, \ s \in (-\eta_{\delta,r}psilon_0,\eta_{\delta,r}psilon_0),\\ |\nabla_x f_q(x,s)|&\leq \kappa_1 |f_q(x,s)| \mbox{ for all } x \in \Omega, \ s \in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0). \eta_{\delta,r}nd{align*} \item[(F4)] For all $x\in \Omega$ we have $f_q(x,\cdot)\in C^1((-\eta_{\delta,r}psilon_0,\eta_{\delta,r}psilon_0)\setminus \{0\})$ and the function $(x,s) \mapsto g_q(x,s):=s\p_s f_q(x,s)$ is well-defined with $g_q\in L^{\infty}(\Omega \times (-\eta_{\delta,r}psilon_0,\eta_{\delta,r}psilon_0))$. \eta_{\delta,r}nd{itemize} As in \cite{SW17} we remark that the condition (F1) implies that the function $s\mapsto \frac{F_q(x,s)}{|s|^q}$ is non-increasing on $(0,\eta_{\delta,r}psilon_0)$, while it is non-decreasing on $(-\eta_{\delta,r}psilon_0,0)$. In particular, combined with the condition (F2), it provides a lower bound of the form \begin{align*} F_{q}(x,s) \geq \frac{\min\{F_q(x,\eta_{\delta,r}psilon_0), F_q(x,-\eta_{\delta,r}psilon_0)\}}{\eta_{\delta,r}psilon_0^q}|s|^q \geq \frac{\kappa_2}{\eta_{\delta,r}psilon_0^q} |s|^q. \eta_{\delta,r}nd{align*} To simplify notation, we introduce the following abbreviations, which we will use frequently in the sequel: \begin{align*} \hat{f}_q(u)(x):=f_q(x,u(x)),\ \hat{F}_q(u)(x):=F_q(x,u(x)). \eta_{\delta,r}nd{align*} In order to derive the strong unique continuation property and the unique continuation property from measurable sets, we in addition also make the following assumption: \begin{itemize} \item[(F5)] There exists $p\in (1,2)$ and $\kappa_3>0$ such that $|f_q(x,s)|\leq \kappa_3 |s|^{p-1}$ for $x\in \Omega$ and $s\in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0)$. \eta_{\delta,r}nd{itemize} In particular, by the definition of $F_q$ the condition (F5) also entails that \begin{align} \label{eq:F5_1} |F_q(x,s)| \leq \kappa_3 |s|^{p} \mbox{ for all } x \in \Omega, \ s \in (-\eta_{\delta,r}psilon_0,\eta_{\delta,r}psilon_0). \eta_{\delta,r}nd{align} \begin{example} As in \cite{SW17} we remark that an example of an equation for which the conditions (A1)-(A3) and (F1)-(F5) are satisfied is for instance given by \begin{align*} \D u + \sum\limits_{j=1}^{m} c_j(x) |u|^{q_j-2}u = V u, \eta_{\delta,r}nd{align*} where $q_j \in (1,2)$, $c_j, \nabla c_j \in L^{\infty}(\Omega)$ with $c_j>0$, $V \in L^{\infty}(\Omega)$ and $m\in \N$. In particular, the function $f_q(x,s)$ need not have a fixed power growth in $s$, but could for instance consist of a sum of different powers. \eta_{\delta,r}nd{example} Under these conditions, we then study a variable coefficient analogue of the model problem \eta_{\delta,r}qref{eq:model} and prove that analogous unique continuation properties hold: \begin{thm} \label{thm:SMUCP_var} Let $\Omega \subset \R^n$ be open, let $x_0 \in \Omega$ and let $V\in L^{\infty}(\Omega)$. Let $u\in H^1_{loc}(\Omega)\cap L^{\infty}_{loc}(\Omega)$ be a solution of \begin{align} \label{eq:model_1} \p_i a^{ij}\p_j u + \hat{f}_q(u)= V u \mbox{ in } \Omega, \eta_{\delta,r}nd{align} where the conditions (A1)-(A3) and (F1)-(F4) are assumed to be valid. Suppose further that one of the following conditions holds: \begin{itemize} \item[(a)] $q\in [1,2)$ and there exists a radius $r_0>0$ such that $u$ vanishes on $B_{r_0}(x_0)\subset \Omega$ for some $x_0 \in \Omega$. \item[(b)] $q\in(1,2)$, the condition (F5) is satisfied and $u$ vanishes of infinite order at $x_0\in \Omega$, i.e. for any $m\in \N$ we have $\lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_r(x_0)} u^2 dx = 0 $. \item[(c)] $q\in (1,2)$ and $u$ vanishes on a measurable set of positive measure, i.e. there exists $E \subset \Omega$ such that $|E|>0$ and $u|_{E}=0$, and the condition (F5) is satisfied. \eta_{\delta,r}nd{itemize} Then, $u \eta_{\delta,r}quiv 0$ in $\Omega$. \eta_{\delta,r}nd{thm} Again the argument is based on a Carleman inequality (c.f. Theorem \ref{prop:varmet}), which is explained in more detail in Section \ref{sec:var}. In order to deal with the Lipschitz coefficients of the metric, we use the ``geodesic normal coordinates" introduced by Aronszajn, Krzywicki and Szarski in \cite{AKS62}. This is technically more involved than the proof of Theorem \ref{thm:sub_lin_Carl}, but relies on the same ideas. \subsection{Outline of the article} The remainder of the article is organized as follows: In Section \ref{sec:prelim} we first recall some basic properties of the solutions to \eta_{\delta,r}qref{eq:model} and to \eta_{\delta,r}qref{eq:model_1}. Then, in Section \ref{sec:CarlI}, we prove the main Carleman estimate, i.e. Theorem \ref{thm:sub_lin_Carl}, in the model case. Based on this, we show how such a Carleman estimate implies the desired results of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var} in Section \ref{sec:proofs}. Finally, in Section \ref{sec:var} we then conclude our argument by also deducing the variable coefficient Carleman estimate of Theorem \ref{prop:varmet}. \section{Preliminaries} \label{sec:prelim} In this section, we describe several auxiliary results, which will be used in the proofs of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var}. We begin by defining the notation of a weak solution to \eta_{\delta,r}qref{eq:model_1}: \begin{defi} \label{defi:weak_sol} Assume that the conditions (A1)-(A3) and (F1)-(F4) hold true. Let $u\in H^1_{loc}(\Omega)\cap L^{\infty}_{loc}(\Omega)$ and assume that $x \mapsto f(x,u(x))$ is Lebesgue measurable. Then $u$ is a weak solution to \eta_{\delta,r}qref{eq:model_1} if for all $\xi \in H^{1}_{loc}(\Omega)\cap L^{\infty}_{loc}(\Omega)$ \begin{align*} (a \nabla u, \nabla \xi)_{L^2(\Omega)} -(\hat{f}_q( u), \xi)_{L^2(\Omega)} = (Vu, \xi)_{L^2(\Omega)}. \eta_{\delta,r}nd{align*} \eta_{\delta,r}nd{defi} Let us discuss the regularity of these solutions: If we only use the assumption that $f_q \in L^{\infty}_{loc}(\Omega \times \R)$, a bootstrap argument of elliptic regularity estimates directly implies that $u\in W^{2,p}_{loc}$ for all $p\in (1,\infty)$ and also $u\in C^{1,\alphapha}_{loc}$ for all $\alphapha \in (0,1)$. Due to our strengthening of the regularity conditions (c.f. conditions (F3) and (F4)), this could even be further bootstrapped. As it is not necessary in the sequel, we do not discuss this further. Assuming that for some $x_0\in \Omega$ we have $u(x_0)=0$, these regularity results imply that we may always assume that for all $x\in \Omega$ we have $u(x)\in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0)$, where $\eta_{\delta,r}psilon_0>0$ is the constant from the conditions (F1)-(F4). Indeed, if this were not the case, we could simply decrease the size of $\Omega$. In the sequel, we will always assume that this has already been carried out. \\ In the following arguments, we will often use that solutions to \eta_{\delta,r}qref{eq:model_1} satisfy elliptic gradient estimates: \begin{lem}[Caccioppoli] \label{lem:Cacc} Let (A1)-(A3) and (F1)-(F4) hold. Let $u\in H^{1}_{loc}(B_4)\cap L^{\infty}_{loc}(B_4)$ be a solution to \begin{align*} \p_i a^{ij} \p_j u + \hat{f}_q(u)= V u \mbox{ in } B_4, \eta_{\delta,r}nd{align*} where $V \in L^{\infty}(B_4)$. Then for any $r\in (0,2)$ we have \begin{align*} \|\nabla u\|_{L^2(B_{r})} \leq C \left(\left\|\frac{|\hat{F}_q(u)|^{1/2}}{|u|^{1/2}}\right\|_{L^2(B_{2r})} + r^{-1}\|u\|_{L^2(B_{2r})} \right). \eta_{\delta,r}nd{align*} \eta_{\delta,r}nd{lem} Here and in the sequel we have used the notation $B_{r}=B_{r}(0)$ in order to denote the ball of radius $r>0$ centered at zero. \begin{proof} The proof follows from the usual integration by parts identities. Indeed, let $\eta_{\delta,r}ta:B_{4}\rightarrow [0,\infty)$ be a cut-off function, which is equal to one in $B_{r}$ and which vanishes outside of $B_{2r}$ and which satisfies $|\nabla \eta_{\delta,r}ta|\leq \frac{C}{r}$, $|D^2 \eta_{\delta,r}ta|\leq \frac{C}{r^2}$. Then, \begin{align*} \lambda \int\limits_{B_{2r}}|\nabla (u\eta_{\delta,r}ta)|^2 dx &\leq \int\limits_{B_{2r}} a^{ij} \p_i(u\eta_{\delta,r}ta) \p_j(u \eta_{\delta,r}ta) dx = \int\limits_{B_{2r}} a^{ij} \eta_{\delta,r}ta(\p_i u)\p_j(u\eta_{\delta,r}ta) dx + \int\limits_{B_{2r}} a^{ij} u (\p_i \eta_{\delta,r}ta) \p_j(u\eta_{\delta,r}ta)dx\\ & = \int\limits_{B_{2r}} a^{ij}\p_i u \p_j(u \eta_{\delta,r}ta^2) dx + \int\limits_{B_{2r}} a^{ij} u^2 (\p_i \eta_{\delta,r}ta)( \p_j \eta_{\delta,r}ta) dx\\ &= \int\limits_{B_{2r}} \hat{f}_q(u) u \eta_{\delta,r}ta^2 dx + \int\limits_{B_{2r}} Vu^2 \eta_{\delta,r}ta^2 dx + \int\limits_{B_{2r}} u^2 a^{ij} (\p_i \eta_{\delta,r}ta)(\p_j \eta_{\delta,r}ta) dx\\ & \leq C_{\lambda, \Lambda,n}(\|V\|_{L^{\infty}}+1 + r^{-2})\|u\|_{L^2(B_{2r})}^2 + \left\| \eta_{\delta,r}ta \frac{|\hat{F}_q(u)|^{1/2}}{|u|^{1/2}} \right\|_{L^2(B_{2r})}^2. \eta_{\delta,r}nd{align*} Here we used the (weak) equation as well as Hölder's inequality. Using that $\eta_{\delta,r}ta=1$ on $B_r$ then implies the desired estimate. \eta_{\delta,r}nd{proof} Next, we show that the infinite order of vanishing can be equivalently defined by various different norms for solutions to \eta_{\delta,r}qref{eq:model_1}. \begin{lem}[Order of vanishing] \label{lem:order_of_van} Let the conditions (A1)-(A3) and (F1)-(F4) hold. Let $u\in H^{1}_{loc}(B_4)\cap L^{\infty}_{loc}(B_4)$ be a solution to \begin{align*} \p_i a^{ij} \p_j u + \hat{f}_q(u)= V u \mbox{ in } B_4, \eta_{\delta,r}nd{align*} where $V \in L^{\infty}(B_4)$ and $q\in (1,2)$. Then the following are equivalent: \begin{itemize} \item[(i)] For all $m\in \N$ we have \begin{align} \label{eq:L2_oov} \lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_{r}(0)} u^2 dx = 0. \eta_{\delta,r}nd{align} \item[(ii)] For some $\eta_{\delta,r}ll >0$ and all $m\in \N$ we have \begin{align} \label{eq:Lq_oov} \lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_{r}(0)} |u|^{\eta_{\delta,r}ll} dx = 0. \eta_{\delta,r}nd{align} \eta_{\delta,r}nd{itemize} If in addition the condition (F5) is satisfied and if (i) holds, then also the function $\hat{f}_q(u)$ vanishes of infinite order, i.e. for all $m\in \N$ \begin{align*} \lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_{r}(0)} \hat{f}_q(u) dx = 0. \eta_{\delta,r}nd{align*} \eta_{\delta,r}nd{lem} \begin{proof} Due to the regularity of solutions to \eta_{\delta,r}qref{eq:model_1} and due to the assumption that $\eta_{\delta,r}ll>0$, the implication $\eta_{\delta,r}qref{eq:L2_oov} \Rightarrow \eta_{\delta,r}qref{eq:Lq_oov}$ follows from Hölder's inequality and the fact that $u\in L^{\infty}_{loc}$. The reverse implication follows from the assumption that $u \in L^{\infty}_{loc}$. In the case that the condition (F5) is satisfied, we have \begin{align*} 0\leq \lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_{r}(0)} \hat{f}_q(u) dx \leq \kappa_3 \lim\limits_{r\rightarrow 0} r^{-m} \int\limits_{B_{r}(0)} |u|^{p-1} dx =0, \eta_{\delta,r}nd{align*} where for the last inequality, we used the equivalence of (i) and (ii) and the fact that $p-1>0$. \eta_{\delta,r}nd{proof} \section{The Carleman Estimate in the Model Set-Up} \label{sec:CarlI} In this section, we present the argument for Theorem \ref{thm:sub_lin_Carl}. The variable coefficient analogue will be proved in Section \ref{sec:var}, where we deal with the full problem (which also involves Lipschitz metrics). \subsection{Proof of Theorem \ref{thm:sub_lin_Carl}} The main idea leading to the Carleman estimate from Theorem \ref{thm:sub_lin_Carl} is to include the sublinear potential into the main operator instead of dealing with it perturbatively (as one would usually do for superlinear potentials). \begin{proof}[Proof of Theorem \ref{thm:sub_lin_Carl}] We separate the proof into several steps:\\ \eta_{\delta,r}mph{Step 1: Conjugation.} We introduce conformal polar coordinates $x=e^{t}\theta$ with $(t,\theta) \in \R \times S^{n-1}$. In these the Laplacian reads \begin{align*} |x|^2\D = \p_t^2 + (n-2)\p_t + \D_{S^{n-1}}. \eta_{\delta,r}nd{align*} Conjugating this with $e^{- \frac{n-2}{2}t}$ yields \begin{align*} e^{\frac{n-2}{2}t} (\p_t^2 + (n-2)\p_t + \D_{S^{n-1}}) e^{- \frac{n-2}{2}t} = \p_t^2 - \frac{(n-2)^2}{4} + \D_{S^{n-1}}. \eta_{\delta,r}nd{align*} To achieve this, we consider the function $\tilde{v}(t,\theta) := e^{-\frac{n-2}{2}t}u(e^{t}\theta)$. The equation \eta_{\delta,r}qref{eq:eq_main} then turns into \begin{align} \label{eq:eq1} \left(\p_t^2 - \frac{(n-2)^2}{4} + \D_{S^{n-1}} + e^{2t}\frac{\tilde{f}_q(\tilde{u})}{\tilde{u}}\right) \tilde{v} = \tilde{g}, \eta_{\delta,r}nd{align} where $\tilde{g}(t,\theta) = e^{2t} g(e^t \theta)$, $\tilde{u}(t,\theta):=u(e^{t}\theta)$ and $\tilde{f}_q(\tilde{u})(t,\theta):=\hat{f}_q(\tilde{u})(e^t\theta)$. In order to prove the Carleman estimate from Theorem \ref{thm:sub_lin_Carl}, we argue by means of the usual conjugation argument and conjugate \eta_{\delta,r}qref{eq:eq1} with the weight $e^{\tau \varphi}$, where $\varphi(t) = \psi(e^t)$. This yields the following symmetric and antisymmetric parts for the conjugated operator $L_{\varphi}:=S+A$: \begin{align} \label{eq:separate} \begin{split} S &= \p_t^2 + \D_{S^{n-1}} + \tau^2 (\varphi')^2 - \frac{(n-2)^2}{4} + h_q(\tilde{u}),\\ A &= -2\tau \varphi' \p_t - \tau \varphi''. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} For ease of notation, we have abbreviated $h_q(\tilde{u}):= e^{2t}\frac{\tilde{f}_q(\tilde{u})}{\tilde{u}}$. The Carleman estimate then follows from the expansion \begin{align} \label{eq:expand} \|A v\|_{L^2}^2 + \|Sv\|_{L^2}^2 + \int\limits_{\R \times S^{n-1}}([S,A]v,v)dt d\theta = \|Lv\|_{L^2}^2, \eta_{\delta,r}nd{align} where $v= e^{\tau \varphi} \tilde{v}$ and where we abbreviate $(\cdot, \cdot):=(\cdot,\cdot)_{L^2(\R \times S^{n-1})}$. More precisely, the Carleman estimate follows, if we can prove lower bounds for the commutator $[S,A]$. With respect to the usual commutator estimate for $L^2$ Carleman estimates, only the terms involving $h_q$ are new. Indeed, by choosing $\tau \geq \tau_0>1$ for some sufficiently large constant $\tau_0$ and recalling our choice of $\varphi$, the ``standard commutator term" $([\p_t^2 + \D_{S^{n-1}} + \tau^2 (\varphi')^2 - \frac{(n-2)^2}{4}, -2\tau \varphi' \p_t - \tau \varphi'']v,v)$ can be controlled as follows \begin{align} \label{eq:standard_Carl} \begin{split} &([\p_t^2 + \D_{S^{n-1}} + \tau^2 (\varphi')^2 - \frac{(n-2)^2}{4}, -2\tau \varphi' \p_t - \tau \varphi'']v,v)\\ &= 4 \tau^3 (\varphi'' (\varphi')^2 v,v) - \tau (\varphi'''' v, v) + 4 \tau (\varphi'' \p_t v, \p_t v)\\ &\geq 3\tau^{3}\||\varphi''|^{1/2} v\|_{L^2}^2 + 3 \tau \||\varphi''|^{1/2} \p_t v\|_{L^2}^2 . \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Hence, in Step 2, we mainly consider the new, nonlinear contribution $([h_q(\tilde{u}),-2\tau \varphi' \p_t - \tau \varphi''] v, v)$. In Step 3, we then exploit the commutator estimate for the sublinear term together with the symmetric operator $S$ in order to upgrade the (radial) gradient estimate from \eta_{\delta,r}qref{eq:standard_Carl} to a full gradient estimate (also controlling the spherical part of the gradient).\\ \eta_{\delta,r}mph{Step 2: The sublinear nonlinearity.} We compute \begin{align} \label{eq:comm_0} -2\tau [ h_q(\tilde{u}), \varphi' \p_t ] = 2 \tau (q-2) \varphi' e^{2t} \sgn(\tilde{u})|\tilde{u}|^{q-3} \p_t \tilde{u} + 4\tau \varphi' h_q(\tilde{u}). \eta_{\delta,r}nd{align} As a consequence, by the identity $v=e^{\tau \varphi} e^{\frac{2-n}{2}t} \tilde{u}$, \begin{align} \label{eq:comm} \begin{split} -2\tau (v,[ h_q(\tilde{u}), \varphi' \p_t ] v) &= 2\tau (q-2) (e^{2t} \varphi' v^2 \sgn(\tilde{u})|\tilde{u}|^{q-3}, \p_t \tilde{u} ) + 4 \tau (e^{2t}\varphi' |\tilde{u}|^{q-2}v,v)\\ &= 2 \tau (q-2) ( l(t) |\tilde{u}|^{q-2} \tilde{u}, \p_t \tilde{u}) + 4 \tau ( e^{2t}\varphi' v, |\tilde{u}|^{q-2}v), \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} where all scalar products are those of the Hilbert space $L^2(\R \times S^{n-1})$, $l(t):= e^{(2-n)t}e^{2t}\varphi'(t)e^{2 \tau \varphi}$. We further study the first term on the right hand side of \eta_{\delta,r}qref{eq:comm}: Noting that $|\tilde{u}|^{q-2}\tilde{u} \p_t \tilde{u}= \p_t \frac{1}{q}|\tilde{u}|^{q}$ leads to \begin{align*} ( l(t) |\tilde{u}|^{q-2} \tilde{u}, \p_t \tilde{u} ) = - \frac{1}{q}( l'(t) |\tilde{u}|^{q-2} \tilde{u},\tilde{u} ). \eta_{\delta,r}nd{align*} Inserting this back into \eta_{\delta,r}qref{eq:comm} implies \begin{align} \label{eq:comm2} \begin{split} -2\tau (v,[ h_q(\tilde{u}), \varphi' \p_t ] v) &= -\tau \frac{2(q-2)}{q}( l'(t) |\tilde{u}|^{q-2} \tilde{u},\tilde{u}) + 4 \tau (e^{2t}\varphi' v |\tilde{u}|^{q-2},v)\\ & = -\tau \frac{2(q-2)}{q}\left( e^{2t} \left(\varphi''(t) + 2 \tau (\varphi'(t))^2 +\left(4-n\right)\varphi'(t) \right)|\tilde{u}|^{q-2} v, v \right)\\ & \quad + 4 \tau (e^{2t}\varphi' v |\tilde{u}|^{q-2},v). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Since $\varphi'' \geq 0$ and $q-2\leq 0$ the first two terms in \eta_{\delta,r}qref{eq:comm2} are positive. The last two terms are not necessarily signed, but by choosing $\tau \geq \tau_0>0$ sufficiently large and by recalling the explicit choice of our Carleman weight $\varphi$, they can be absorbed into the second contribution. In particular, combining the estimates \eta_{\delta,r}qref{eq:standard_Carl} and \eta_{\delta,r}qref{eq:comm2}, then leads to the estimate \begin{align} \label{eq:est_wo_grad} \tau \left(\frac{2-q}{q} \right)^{\frac{1}{2}} \|e^{t} |\tilde{u}|^{\frac{q-2}{2}}v\|_{L^2} + \tau^{\frac{3}{2}} \||\varphi''|^{\frac{1}{2}}v\|_{L^2} + \tau^{\frac{1}{2}} \||\varphi''|^{\frac{1}{2}} \p_t v \|_{L^2} \leq C \|Lv\|_{L^2}. \eta_{\delta,r}nd{align} Returning to Cartesian coordinates, this yields all the terms in the Carleman estimate, with the exception of the estimate for the spherical component of the gradient.\\ \eta_{\delta,r}mph{Step 3: Deriving the full gradient estimate.} Last but not least, we upgrade the gradient estimate from \eta_{\delta,r}qref{eq:standard_Carl}, which only involves the radial derivatives to a full gradient estimate. To this end, we exploit the symmetric part $S$ of the operator. Indeed, testing the symmetric part with $\tau c_q \varphi'' v$ for a sufficiently small constant $c_q \in \left(0,\left(\frac{q-2}{q}\right)^{1/2}\right)$ and using \eta_{\delta,r}qref{eq:est_wo_grad}, we infer \begin{align*} c_q \tau \||\varphi''|^{1/2} \nabla_{S^{n-1}}v\|_{L^2}^2 &\leq c_q \tau |(Sv, \varphi'' v)| + c_q \tau \||\varphi''|^{1/2} \p_t v\|_{L^2}^2 + c_q \tau^3 \||\varphi''|^{1/2} v\|_{L^2}^2 + c_q \tau \|e^t |\tilde{u}|^{\frac{q-2}{2}}v\|_{L^2}^2\\ & \leq \frac{1}{2}\|S v\|_{L^2}^2 + C c_q \tau^2 \||\varphi''|^{1/2}v\|_{L^2}^2\\ & \quad + c_q \tau \||\varphi''|^{1/2} \p_t v\|_{L^2}^2 + c_q \tau^3 \||\varphi''|^{1/2} v\|_{L^2}^2 + c_q \tau \|e^t|\tilde{u}|^{\frac{q-2}{2}}v\|_{L^2}^2\\ &\leq \frac{1}{2}\|S v\|_{L^2}^2 + ([S,A]v,v) \leq \|L v\|_{L^2}^2. \eta_{\delta,r}nd{align*} As a consequence, we may include the full gradient term into the Carleman estimate. This concludes the proof of Theorem \ref{thm:sub_lin_Carl}. \eta_{\delta,r}nd{proof} \section{Proof of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var}} \label{sec:proofs} In this section we present the proof of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var} starting from the corresponding Carleman estimates (Theorems \ref{thm:sub_lin_Carl} and \ref{prop:varmet}). For the variable coefficient setting, the corresponding Carleman estimate will be proved in Section \ref{sec:Carl_var}. By the regularity estimates and the discussion in Section \ref{sec:prelim}, we may assume that for $x\in \Omega$ we have $u(x) \in (-\eta_{\delta,r}psilon_0, \eta_{\delta,r}psilon_0)$. In the sequel, we first prove part (b) of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var}, which in particular also implies (a) in the case $q\in (1,2)$. Then we explain the modifications that allow us to prove the property (a) in the limiting case $q=1$. Last but not least, we explain the derivation of part (c) of the corresponding theorems. \subsection{Proof of Theorems \ref{thm:SMUCP}(b) and \ref{thm:SMUCP_var}(b)} The proof of the SUCP is a direct consequence of the Carleman estimate. Indeed, we apply it to a cut-off of $u$. Using the vanishing of infinite order, we are able to remove the cut-off around zero, if $q\in (1,2)$. \begin{proof}[Proof of Theorems \ref{thm:SMUCP}(b) and \ref{thm:SMUCP_var}(b)] Since $\Omega$ is open, translation and scaling allows us to assume that $B_{4} \subset \Omega$. For $\eta_{\delta,r}psilon \in (0,1)$ let $\eta_{\delta,r}ta_{\eta_{\delta,r}psilon}: B_4 \rightarrow (0,\infty)$ be a cut-off function, which is supported in $B_{2}\setminus B_{\eta_{\delta,r}psilon}$, which is equal to one in $B_1\setminus B_{2 \eta_{\delta,r}psilon}$ and which satisfies the bounds \begin{align} \label{eq:eta} \begin{split} & |\nabla \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}(x)| \leq \frac{C}{\eta_{\delta,r}psilon}, \ |D^2 \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}(x)| \leq \frac{C}{\eta_{\delta,r}psilon^2} \mbox{ for all } x \in B_{2 \eta_{\delta,r}psilon} \setminus B_{\eta_{\delta,r}psilon},\\ & |\nabla \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}(x)| \leq C, \ |D^2 \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}(x)| \leq C \mbox{ for all } x \in B_{2 } \setminus B_{1}, \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} where $C>0$ is independent of $\eta_{\delta,r}psilon>0$. Then the function $v_{\eta_{\delta,r}psilon}:= u \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}$ satisfies \begin{align} \label{eq:cut_off_eq} \p_i a^{ij} \p_j v_{\eta_{\delta,r}psilon} + \hat{f}_q(v_{\eta_{\delta,r}psilon}) = V v_{\eta_{\delta,r}psilon} +(\hat{f}_q(v_{\eta_{\delta,r}psilon})-\eta_{\delta,r}ta_{\eta_{\delta,r}psilon} \hat{f}_q(u)) + 2 a^{ij}\p_i \eta_{\delta,r}ta_{\eta_{\delta,r}psilon} \p_j u + u \p_i a^{ij}\p_j \eta_{\delta,r}ta_{\eta_{\delta,r}psilon} \mbox{ in } B_4. \eta_{\delta,r}nd{align} We apply the Carleman estimate from Theorem \ref{thm:sub_lin_Carl} to $v_{\eta_{\delta,r}psilon}$, which leads to \begin{align} \label{eq:Carl_appl} \begin{split} &\tau^{3/2}\|e^{\tau \phi} (1+\ln^2(|x|))^{-\frac{1}{2}} v_{\eta_{\delta,r}psilon}\|_{L^2(B_2)} + \tau^{1/2} \|e^{\tau \phi} (1+\ln^2(|x|))^{-\frac{1}{2}}|x| \nabla v_{\eta_{\delta,r}psilon}\|_{L^2(B_2)}\\ & +\tau \left( \frac{q-2}{q} \right)^{\frac{1}{2}} \|e^{\tau \phi} |x||v_{\eta_{\delta,r}psilon}|^{\frac{q}{2}}\|_{L^2(B_2)} \\ &\leq C \left( \|e^{\tau \phi} |x|^2 V v_{\eta_{\delta,r}psilon}\|_{L^2(B_2)} + \|e^{\tau \phi}|x|^2(\hat{f}_q(v_{\eta_{\delta,r}psilon})-\eta_{\delta,r}ta_{\eta_{\delta,r}psilon}\hat{f}_{q}(u))\|_{L^2(B_2)} + 2 \Lambda \|e^{\tau \phi} |x|^2 |\nabla \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}|| \nabla u|\|_{L^2(B_2)} \right.\\ & \quad \left. + \|e^{\tau \phi}|x|^2 u |D^2 \eta_{\delta,r}ta_{\eta_{\delta,r}psilon}|\|_{L^2(B_2)} \right). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} We seek to pass to the limit $\eta_{\delta,r}psilon \rightarrow 0$. Since none of the constants in the estimate \eta_{\delta,r}qref{eq:Carl_appl} depends on $\eta_{\delta,r}psilon>0$ and using the bounds in \eta_{\delta,r}qref{eq:eta}, this can be achieved by invoking the infinite order of vanishing of $u$. Indeed, this directly allows us to pass to the limit $\eta_{\delta,r}psilon \rightarrow 0$ in all $L^2$ terms of $v_{\eta_{\delta,r}psilon}$ or $u$. In order to deal with the gradient terms and the nonlinearity on the left hand side, we apply Lemmas \ref{lem:Cacc} and \ref{lem:order_of_van}. For the nonlinearity on the right hand side, we use the condition (F5) in combination the second part of Lemma \ref{lem:order_of_van}. Setting $v_0:=\eta_{\delta,r}ta_0 u$ (where $\eta_{\delta,r}ta_0$ is the pointwise limit of $\eta_{\delta,r}ta_{\eta_{\delta,r}psilon}$; in particular $\eta_{\delta,r}ta_0=1$ in $B_1$), then implies \begin{align} \label{eq:Carl_appl_1} \begin{split} &\tau^{3/2}\|e^{\tau \phi} (1+\ln^2(|x|))^{-\frac{1}{2}}v_0\|_{L^2(B_2)} + \tau^{1/2} \|e^{\tau \phi} (1+\ln^2(|x|))^{-\frac{1}{2}}|x| \nabla v_0\|_{L^2(B_2)}\\ & + \tau \left( \frac{q-2}{q} \right)^{\frac{1}{2}} \|e^{\tau \phi} |x||v_0|^{\frac{q}{2}}\|_{L^2(B_2)}\\ &\leq C \left( \|e^{\tau \phi} |x|^2 V v_0\|_{L^2(B_2)} + \|e^{\tau \phi}|x|^2(\hat{f}_q(v_0)-\hat{f}_{q}(u))\|_{L^2(B_2)} + 2 \|e^{\tau \phi} |x|^2 |\nabla \eta_{\delta,r}ta_0|| \nabla u|\|_{L^2(B_2)} \right.\\ & \quad \left. + \|e^{\tau \phi}|x|^2 u |D^2 \eta_{\delta,r}ta_{0}|\|_{L^2(B_2)} \right). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} By virtue of the $L^{\infty}$ boundedness of $V$, we may further absorb the first term on the right hand side of \eta_{\delta,r}qref{eq:Carl_appl_1} into the left hand side of \eta_{\delta,r}qref{eq:Carl_appl_1} if $\tau$ is chosen such that $\tau \geq \tau_0(\|V\|_{L^{\infty}})$. Using that $\eta_{\delta,r}ta_0=1$ in $B_1$, which in particular entails that \begin{align*} \supp(|\nabla \eta_{\delta,r}ta_0||\nabla u|), \supp(|D^2 \eta_{\delta,r}ta_0||u|), \supp(|\hat{f}_q(v_0)-\hat{f}_q(u)|) \subset \overline{\varphi}erline{B_2 \setminus B_1}, \eta_{\delta,r}nd{align*} we then further estimate \begin{align*} \begin{split} &e^{\tau \psi(1/2)} \left(\tau^{3/2}\| (1+\ln^2(|x|))^{-\frac{1}{2}} v_0 \|_{L^2(B_{1/2})} + \tau^{1/2} \|(1+\ln^2(|x|))^{-\frac{1}{2}} |x| \nabla v_0\|_{L^2(B_{1/2})} \right.\\ & \left. + \tau \left( \frac{q-2}{q} \right)^{\frac{1}{2}} \||x| |v_0|^{\frac{q}{2}}\|_{L^2(B_{1/2})} \right)\\ &\leq C e^{\tau \psi(1)} \left( \||x|^2(\hat{f}_q(v_0)-\hat{f}_{q}(u))\|_{L^2(B_2\setminus B_1)} + 2 \| |x|^2 |\nabla \eta_{\delta,r}ta_0||\nabla u|\|_{L^2(B_2\setminus B_1)} \right.\\ & \quad \left. + \||x|^2 u |D^2 \eta_{\delta,r}ta_{0}|\|_{L^2(B_2\setminus B_1)} \right). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align*} Dividing by $e^{\tau \psi(1/2)}$ and passing to the limit $\tau \rightarrow \infty$ (and recalling the a priori estimates for $u$) then implies that $u\eta_{\delta,r}quiv 0$ in $B_{1/2}$. Iterating this argument yields that $u\eta_{\delta,r}quiv 0$ in $\Omega$. \eta_{\delta,r}nd{proof} We remark that this proof simultaneously deals with the situation of Theorems \ref{thm:SMUCP} and \ref{thm:SMUCP_var}. \subsection{Proof of Theorems \ref{thm:SMUCP}(a) and \ref{thm:SMUCP_var}(a)} Without loss of generality, we may assume that $u\eta_{\delta,r}quiv 0$ in $B_{r_0}(0)$ for some $r_0 \in (0,1/4)$. The proofs of Theorems \ref{thm:SMUCP}(a) and \ref{thm:SMUCP_var}(a) then proceed analogously to the one, which was explained in the previous subsection. However, as $u\eta_{\delta,r}quiv 0$ in $B_{r_0}(0)$, we do not need to use a cut-off function close to zero, but can directly consider a bump function: More precisely, we could consider a cut-off $\eta_{\delta,r}ta_{0}:B_4 \rightarrow (0,\infty)$ which is supported in $B_2$, is equal to one in $B_{1/2}$ and satisfies the bounds \begin{align*} |\nabla \eta_{\delta,r}ta_{0}|\leq C, \ |D^2 \eta_{\delta,r}ta_{0}| \leq C \mbox{ for all } x \in B_2 \setminus B_1. \eta_{\delta,r}nd{align*} As a consequence, inserting $v_0:= u \eta_{\delta,r}ta_0$ into the Carleman estimates from Theorems \ref{thm:sub_lin_Carl} and \ref{prop:varmet}, we directly infer the estimate \eta_{\delta,r}qref{eq:Carl_appl_1}, from which we conclude as in the previous section. \subsection{Proof of Theorems \ref{thm:SMUCP}(c) and \ref{thm:SMUCP_var}(c)} We reduce the statement of Theorems \ref{thm:SMUCP}(c) and \ref{thm:SMUCP_var}(c) to that of \ref{thm:SMUCP}(a) and \ref{thm:SMUCP_var}(a) by proving a suitable growth estimate. \begin{proof}[Proof of Theorems \ref{thm:SMUCP}(c) and \ref{thm:SMUCP_var}(c)] Assume that there exists a measurable set $E \subset \Omega$ such that $|E|>0$ and $u\eta_{\delta,r}quiv 0$ on $E$. By translation, without loss of generality we may assume that $0\in E$ and that $0$ is a point of density one of $E$. In particular, for any $\eta_{\delta,r}psilon>0$ there exists a radius $r_{\eta_{\delta,r}psilon}>0$ such that \begin{align} \label{eq:dense} \frac{|E \cap B_{r}|}{|E|} \leq \eta_{\delta,r}psilon \mbox{ for all } r \in (0,r_{\eta_{\delta,r}psilon}). \eta_{\delta,r}nd{align} Thus, for $r\in (0,\min\{r_{\eta_{\delta,r}psilon},1/2\})$ we obtain for some constant $C>1$, which depends on $\|V\|_{L^{\infty}(B_2)}, \lambda, \Lambda, \Lambda_0$ and $n$ and which may change from line to line, \begin{align} \label{eq:growth} \begin{split} \|u\|_{L^2(B_{r})} &= \|u\|_{L^2(B_{r}\cap E)} \leq |E \cap B_{r}|^{1/n} \|u\|_{L^{2*}(B_r \cap E)} \leq |E \cap B_{r}|^{1/n}\|\nabla u\|_{L^2(B_r)}\\ &\leq C|E\cap B_{r}|^{1/n}\left( r^{-1}\|u\|_{L^2(B_{2r})} + \left\|\frac{|F_q(u)|^{1/2}}{|u|^{1/2}}\right\|_{L^2(B_{2r})} \right)\\ &\stackrel{(F5)}{\leq} C|E\cap B_{r}|^{1/n}\left( r^{-1}\|u\|_{L^2(B_{2r})} + \left\||u|^{\frac{p-1}{2}}\right\|_{L^2(B_{2r})} \right)\\ &\leq C \eta_{\delta,r}psilon^{\frac{1}{n}} r (r^{-1} + r^{\gamma(p,n)})\|u\|_{L^2(B_{2r})}\\ &\leq C \eta_{\delta,r}psilon^{1/n} \|u\|_{L^2(B_{2r})} \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} for some $\gamma(p,n)>0$ (which is obtained by an application of Hölder's inequality). Here we have used the vanishing of $u$ on $E$, the condition (F5) in combination with Hölder's and Sobolev's inequalities, the fact that $0<r\leq 1$ and the density estimate \eta_{\delta,r}qref{eq:dense}. Next, we fix $m\in \N$ and choose $\eta_{\delta,r}psilon>0$ such that \begin{align*} C \eta_{\delta,r}psilon^{1/n} \leq \frac{1}{2^m}. \eta_{\delta,r}nd{align*} This, then implies the growth estimate \begin{align*} \|u\|_{L^2(B_{r})} \leq 2^{-m}\|u\|_{L^2(B_{2r})} \eta_{\delta,r}nd{align*} This can be iterated as long as $2^{k} r \leq r_m:=r_{\frac{1}{2^m}}$ (with $r_{\frac{1}{2^m}}$ denoting the corresponding radius in \eta_{\delta,r}qref{eq:dense}). In particular, it implies that for $r\in (2^{-k-1},2^{-k})$ we have \begin{align*} \|u\|_{L^2(B_{r})} \leq 2^{-km}\|u\|_{L^2(B_{r_{m}})}. \eta_{\delta,r}nd{align*} As we can argue in the same way for any $m\in \N$, we infer the infinite order of vanishing of $u$ at $x_0=0$. The strong unique continuation property from Theorem \ref{thm:SMUCP}(a) then implies that $u\eta_{\delta,r}quiv 0$ in $\Omega$. \eta_{\delta,r}nd{proof} \begin{rmk} \label{rmk:q0} We emphasize that from a technical point of view the ``only" obstruction in the above arguments preventing us from also deriving the SUCP and the MUCP for the case $q=1$ consists of justifying the support assumption, which we used above, for the strong $L^2$ limit \begin{align*} \lim\limits_{\eta_{\delta,r}psilon \rightarrow 0}e^{\tau \phi} |x|^2 (\hat{f}_q(v_{\eta_{\delta,r}psilon}) - \eta_{\delta,r}ta_{\eta_{\delta,r}psilon} f_q(u)). \eta_{\delta,r}nd{align*} The second main technical point, in which we used $q>1$, i.e. the estimate \eta_{\delta,r}qref{eq:growth}, could have easily been modified to work in the case $q=1$ by relying on the estimate \begin{align*} \|\nabla u\|_{L^2(B_r)} \leq C(\|V\|_{L^{\infty}}) \left( r^{-1} \|u\|_{L^2(B_{2r})}+ \|\hat{F}_q(u)\|_{L^1(B_{2r})}\right), \ r \in (0,2), \eta_{\delta,r}nd{align*} instead of invoking Lemma \ref{lem:Cacc} in the proof of \eta_{\delta,r}qref{eq:growth}. \eta_{\delta,r}nd{rmk} \section{The Case of More General Nonlinearities and Lipschitz Metrics} \label{sec:var} In this section we consider the setting described in Section \ref{sec:results_var} which involves equations with more general nonlinearities $\hat{f}_q(u)$ and with Lipschitz metrics. Throughout this section, we assume that the conditions (A1)-(A3) and (F1)-(F4) hold. Similarly as the proof of Theorem \ref{thm:SMUCP}, the argument for Theorem \ref{thm:SMUCP_var} is crucially based on a Carleman estimate: \begin{thm}[Variable coefficient Carleman estimate] \label{prop:varmet} Suppose that the conditions (A1)-(A3) and (F1)-(F4) hold. Let $\phi(x)=\psi(|x|)$ with \begin{align*} \phi(r)= - \ln(r) + \frac{1}{10}\left( \ln(r)\arctan(\ln(r)) - \frac{1}{2} \ln(1+\ln^2(r)) \right). \eta_{\delta,r}nd{align*} Assume that $u\in H^{1}_{loc}(\R^{n})\cap L^{\infty}_{loc}(\R^n)$ with $\supp{(u)} \subset \overline{\varphi}erline{B_{r_0}\setminus B_{\eta_{\delta,r}psilon}}$, where $0<\eta_{\delta,r}psilon\ll r_0\ll 1$, satisfies \begin{align*} \p_i a^{ij} \p_j u + \hat{f}_q(u) & = g \mbox{ in } \R^{n}. \eta_{\delta,r}nd{align*} Then there exists $\tau_0>0$ (depending on $n,q,\lambda,\Lambda, \Lambda_0,\kappa_1,\kappa_2$) such that for $\tau\geq \tau_{0}$ we have \begin{equation} \label{eq:vCarl} \begin{split} \tau^{\frac{3}{2}} \left\| e^{\tau \phi} (1+\ln(|x|)^2)^{-\frac{1}{2}} u \right\|_{L^2(\R^{n+1}_+)} + \tau^{\frac{1}{2}}\left\| e^{\tau \phi} |x| (1+\ln(|x|)^2)^{-\frac{1}{2}} \nabla u \right\|_{L^2(\R^{n+1}_+)}\\ + \tau \left\| e^{\tau \phi}|x| |u|^{\frac{q}{2}} \right\|_{L^2(\R^{n+1}_+)} \leq C(q,\lambda,\Lambda,\Lambda_0,n,\kappa_1,\kappa_2) \ \left\|e^{\tau \phi} |x|^2 g \right\|_{L^2(\R^{n+1}_+)}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{equation} \eta_{\delta,r}nd{thm} \begin{rmk} \begin{itemize} \item There are two main restrictions, which determine the size of the radius $r_0=r_0(q,n,\lambda, \Lambda,\Lambda_0,\kappa_1,\kappa_2)>0$ in the theorem: Firstly, we chose it so small that we may pass to suitable ``geodesic normal coordinates" in it. Secondly, we possibly impose even further restrictions on its size by requiring it to be sufficiently small in order to absorb some of the error terms, which arise in the proof of the Carleman estimate, into the leading order contributions of the Carleman estimate. This yields a dependence of $r_0$ on $n$ and $q$. The smallness of $r_0$ is no restriction, since the UCP is a local property of an equation. \item The proof of Theorem \ref{prop:varmet} illustrates that there are no additional difficulties in proving the Carleman estimate if additional lower order contributions are included in (\ref{eq:model_1}) as long as the coefficients remain bounded. For instance, it would have been possible to include bounded gradient potentials in our discussion. \eta_{\delta,r}nd{itemize} \eta_{\delta,r}nd{rmk} In order to prove this low regularity, variable coefficient Carleman estimate, we use the coordinates introduced by Aronszajn, Krywicki and Szarski in \cite{AKS62}, who had introduced a ``replacement" of ``geodesic normal coordinates" in the presence of Lipschitz continuous metrics. We recall this briefly in Section \ref{sec:AKS62}. Based on these ideas we introduce the corresponding ``geodesic polar coordinates" and carry out a similar conjugation argument as in the proof of the Carleman inequality in the model case (c.f. Section \ref{sec:Carl_var}). As explained in Section \ref{sec:proofs}, the proof of Theorem \ref{thm:SMUCP_var} then follows along the same lines as in the model situation. \subsection{The coordinates of Aronszajn, Krzywicki and Szarski} \label{sec:AKS62} A priori the introduction of suitable geodesic coordinates poses difficulties in the case of Lipschitz metrics since the ODE system describing the geodesics does not posses well-defined, sufficiently regular solutions. Hence, we pursue a slightly different strategy following the ideas of Aronszajn, Krzywicki and Szarski \cite{AKS62}, who had found a way of introducing suitable ``geodesic normal coordinates" in a slightly different way also in the presence of Lipschitz continuous metrics. With these coordinates at hand, we then carry out a similar conjugation procedure as in the model setting in Section \ref{sec:model} from above. An alternative approach of dealing with the Lipschitz metrics as perturbations of constant coefficient metrics would also have been possible, c.f. \cite{KT01}. \\ We seek to present the proof of the variable coefficient Carleman estimate from Theorem \ref{prop:varmet} in a way which on the one hand avoids lengthy calculations and which on the other hand follows the arguments from Section \ref{sec:CarlI} as closely as possible. To this end, in the sequel we briefly recall a convenient change of coordinates due to Aronszajn, Krzywicki and Szarski \cite{AKS62}. Starting from a metric tensor $a_{ij}$ in a neighbourhood of the origin $B_{r_0}\subset \R^{n}$ which satisfies the conditions (A1)-(A3) from above, the authors of \cite{AKS62} introduce the following ``radial'' coordinate and a modified metric: \begin{align} & r=r(x):=(a_{ij}(0)x^i x^j)^{\frac{1}{2}},\label{eq:newmetric1}\\ & \tilde{a}_{ij}(x) := a_{ij}(x)\Psi(x), \label{eq:newmetric2} \eta_{\delta,r}nd{align} where \begin{equation}\label{eq:newmetric3} \Psi(x)=a^{k l}(x)\frac{\p r}{\p x^{k}} \frac{\p r}{\p x^{l}} \quad \text{for }x\neq 0,\quad \Psi(0)=1. \eta_{\delta,r}nd{equation} and $(a^{kl})=(a_{kl})^{-1}$ is the inverse matrix. Note that it is immediate from the uniform ellipticity that $$\frac{\lambda}{\Lambda}\leq \Psi(x)\leq \frac{\Lambda}{\lambda},$$ and that by definition $\Psi$ is Lipschitz continuous. With these auxiliary quantities at hand, Aronszajn, Krywicki and Szarski construct the following replacement of geodesic polar coordinates: \begin{prop}[\cite{AKS62}, Sections III, IV] \label{prop:AKS62} In the ellipsoid $$\tilde{B}_{\tilde{r}_0}:=\{x\in \R^{n}:r(x)<\tilde{r}_0\}\subset B_{r_0} \quad \tilde{r}_0=r_0\sqrt{\lambda}$$ the following properties hold: \item[(i)] $\tilde{a}_{ij}$ is uniformly elliptic with $\tilde{\lambda}=\lambda^2/\Lambda$, $\tilde{\Lambda}=\Lambda^2/\lambda$. \item[(ii)] $\tilde{a}_{ij}$ is Lipschitz with Lipschitz constant $\tilde{\Lambda}_0$ depending on $\Lambda_0$, $\lambda$ and $\Lambda$. \item[(iii) (Polar coordinates)] Let $\Sigma:=\partial\tilde{B}_{\tilde{r}_0}$. Then one can parametrize $\tilde{B}_{\tilde{x}_0}\setminus \{0\}$ by $r$ and $\theta$, with $r=r(x)$ defined in \eta_{\delta,r}qref{eq:newmetric1} and $\theta=(\theta^1, \cdots,\theta^n)$ being a choice of local coordinates of $\Sigma$. In these coordinates, the metric turns into \begin{align*} \tilde{a}_{ij} dx^i dx^j=dr^2+r^2b_{k l}d \theta^k d \theta^{l} \text{ with } b_{k l}(r,\theta)=\frac{1}{r^2}\tilde{a}_{ij}\frac{\partial x^i}{\partial \theta ^k}\frac{\partial x^j}{\partial \theta^l}. \eta_{\delta,r}nd{align*} \item[(iv)] There exists a constant $M=M(\lambda, \Lambda, \Lambda_0)$ such that for any tangent vector $\sigma\in T_{\theta}(\Sigma)$, \begin{equation}\label{eq:polar_b} \left|\frac{\partial b_{k l}(r,\theta)}{\partial r}\sigma^k\sigma^l\right|\leq M |b_{k l}(r,\theta)\sigma^k\sigma^l|. \eta_{\delta,r}nd{equation} In particular, if we let $b:=\det(b_{k l})$, then \eta_{\delta,r}qref{eq:polar_b} implies that \begin{equation}\label{eq:logpolar_b} \left|\frac{\partial(\ln(\sqrt{b})}{\partial r}\right|\leq \frac{nM}{2}. \eta_{\delta,r}nd{equation} \eta_{\delta,r}nd{prop} In other words, the existence of the coordinates $(r,\theta)$, which is one of the central insights of the paper of Aronszajn, Krywicki and Szarski \cite{AKS62}, permits us to pass to ``geodesic polar coordinates" without explicitly making use of the system of ODEs defining the exponential map -- which, due to the low regularity of the metric, would not necessarily yield the desired choice of coordinates. \subsection{Proof of Theorem \ref{prop:varmet}} \label{sec:Carl_var} With the conformal polar coordinates of \cite{AKS62} at hand, we discuss the proof of the Carleman estimate from Theorem \ref{prop:varmet}. In order to use these coordinates efficiently and to switch to the associated conformal polar coordinates, we rewrite our equation as a Laplace-Beltrami operator on the underlying manifold. This has the advantage that changes of coordinates can be easily computed. \begin{lem} \label{lem:geo} Let $a^{ij}$ satisfy the assumptions (A1)-(A3) and let $\tilde{a}_{ij}$ be as \eta_{\delta,r}qref{eq:newmetric2} in Section \ref{sec:AKS62}. Then the following are equivalent: \begin{itemize} \item[(i)] $u$ is a solution to \begin{align*} \p_i a^{ij} \p_j u + \hat{f}_q(u) = g \mbox{ in } \R^n. \eta_{\delta,r}nd{align*} \item[(ii)] $u$ is a solution to \begin{align} \label{eq:LB} \D_{\tilde{a}^{ij}} u + \frac{1}{\Psi} \hat{f}_q(u) = \frac{g}{\Psi} + \frac{1}{2 \Psi \tilde{a}} a^{ij} (\p_{x_i} \tilde{a}) \p_{x_j}u - a^{ij}\frac{\p_{x_i}\Psi}{\Psi^2} \p_{x_j}u=: \frac{g}{\Psi} + R=:h, \eta_{\delta,r}nd{align} where $\D_{\tilde{a}^{ij}}$ denotes the Laplace-Beltrami operator with respect to the metric $\tilde{a}^{ij}$, $\tilde{a}:=\det(\tilde{a}_{ij})$ and $\Psi$ denotes the function from \eta_{\delta,r}qref{eq:newmetric3} in Section \ref{sec:AKS62}. \eta_{\delta,r}nd{itemize} \eta_{\delta,r}nd{lem} We omit the proof of this equivalence, as it follows from a direct calculation. Instead, we turn to the proof of Theorem \ref{prop:varmet}, for which we will rely on the geometric formulation of the Carleman estimate. \begin{proof}[Proof of Theorem \ref{prop:varmet}] \eta_{\delta,r}mph{Step 1: Choice of coordinates. } Relying on Lemma \ref{lem:geo}, we prove a Carleman estimate for the operator $L u= \D_{\tilde{a}^{ij}} u + \frac{1}{\Psi} \hat{f}_q(u)$. The terms on the right hand side in Lemma \ref{lem:geo} (ii) will be treated as error contributions and will eventually be absorbed into the left hand side of the Carleman inequality (c.f. Step 5 below). Consider the geodesic polar coordinates $(r,\theta)\in (0,1)\times \Sigma$ from Proposition \ref{prop:AKS62} (iii), where $r(x)=|x|$ (since $a_{ij}(0)=\delta_{ij}$ by the normalization assumption (A3)) and $\theta$ are suitable coordinates of $\Sigma$. By definition of the coordinates from \cite{AKS62}, we infer \begin{align*} \Delta_{\tilde{a}_{ij}}=&\frac{1}{r^n\sqrt{b}}\dr(r^{n}\sqrt{b}\dr) +\frac{1}{r^2}\Delta_{\Sigma}, \quad \sqrt{\tilde{a}}dx=r^n\sqrt{b}\ drd\theta, \eta_{\delta,r}nd{align*} where $$\Delta_{\Sigma}=\frac{1}{\sqrt{b}}\p_{\theta_k} \left(b^{k l}\sqrt{b}\ \p_{\theta_{l}}\right), \quad b=\det(b_{k l}), \quad (b^{k l})=(b_{k l})^{-1}.$$ Next we carry out a change into conformal coordinates, i.e. $x=e^{t}\theta$, which in particular yields $\dr = e^{-t}\dt$. This resulting Laplace-Beltrami operator then reads \begin{align*} \D_{\tilde{a}_{ij}}=e^{-2t}\left[ \frac{1}{e^{(n-2)t}\sqrt{b}}\partial_t\left(e^{(n-2)t}\sqrt{b}\partial_t\right) + \D_{\Sigma} \right]. \eta_{\delta,r}nd{align*} We conjugate the operator $\D_{\tilde{a}^{ij}}$ with the weight $e^{-\frac{n-2}{2}t}$ which leads to the representation \begin{align*} e^{\frac{n+2}{2}t}\D_{\tilde{a}_{ij}}e^{-\frac{n-2}{2}t}=\frac{1}{\sqrt{b}}\partial_t(\sqrt{b}\partial_t)-\left(\frac{n-2}{2}\right)^2 + \D_{\Sigma}. \eta_{\delta,r}nd{align*} Hence, our equation \eta_{\delta,r}qref{eq:LB} becomes \begin{align} \label{eq:eq_aux} \left( \frac{1}{\sqrt{b}}\p_t(\sqrt{b}\p_t) - \left( \frac{n-2}{2} \right)^2 + \Delta_{\Sigma} + e^{2t} \frac{\hat{f}_q(\tilde{u})}{\Psi \tilde{u}} \right)\tilde{v} = \tilde{h}, \eta_{\delta,r}nd{align} where \begin{align*} \tilde{v}(t,\theta) = e^{- \frac{n-2}{2}t} u(e^{t}\theta), \ \tilde{h}(t,\theta) = e^{\frac{n+2}{2}t}h(e^t \theta), \ \tilde{u}(t,\theta) = u(e^t \theta). \eta_{\delta,r}nd{align*} We conjugate \eta_{\delta,r}qref{eq:eq_aux} with the weight $e^{\tau \varphi}$, where $\varphi(t) = \psi(e^t)$. The correspondingly conjugated operator turns into $L_{\varphi}=S+A$, where \begin{align} \label{eq:sym_antisym} \begin{split} S & = \frac{1}{\sqrt{b}}\partial_t(\sqrt{b}\partial_t)+ \tau^2(\varphi')^2 - \left(\frac{n-2}{2}\right)^2 + \D_{\Sigma} +h_q(\tilde{u}),\\ A & = -2\tau \varphi'\dt - \tau \varphi''-\tau\varphi'\partial_t \ln(\sqrt{b}), \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} where $h_q(\tilde{u})= e^{2t} \frac{\hat{f}_q(\tilde{u})}{\Psi\tilde{u}}$. We seek to derive the desired Carleman estimate by expanding the operator $L_{\varphi}$.\\ \eta_{\delta,r}mph{Step 2: Expansion of the operator $L_{\varphi}$.} To estimate $\|L_\varphi u\|_{L^2}$, we use the splitting from \eta_{\delta,r}qref{eq:sym_antisym} and expand the operator $L_{\varphi}$. Due to the low regularity of the metric, we do not directly phrase this as a commutator estimate, but morally it reduces to this. Due to the $t$-dependence of the volume element, we have an extra term $\tau\varphi'\partial_t \ln(\sqrt{b})$ in the antisymmetric part, whose $t$-derivative is not controlled. Thus, we treat this contribution as an error term, i.e. we split $$L_\varphi=S+\tilde{A}+E$$ where \begin{align*} \tilde{A}=-2\tau \varphi'\dt - \tau \varphi'', \quad E=-\tau\varphi'\partial_t \ln(\sqrt{b}). \eta_{\delta,r}nd{align*} By virtue of the triangle inequality \begin{equation}\label{eq:tri} \|(S+\tilde{A})v\|_{L^2_{\vol}}\leq \|L_\varphi v\|_{L^2_{\vol}}+\|Ev\|_{L^2_{\vol}}. \eta_{\delta,r}nd{equation} Here we have set $v = e^{\tau \varphi} \tilde{v}$ and, for simplicity of notation, we have abbreviated \begin{align*} \|\cdot \|_{L^2_{\vol}}= \|\cdot\|_{L^2_{\vol}(\R \times \Sigma)}=\|\cdot\|_{L^2(\R \times \Sigma, \sqrt{b}d \theta dt)}. \eta_{\delta,r}nd{align*} The corresponding scalar product will be denoted by $(\cdot, \cdot)_{L^2_{\vol}}$. We first notice that \begin{align} \label{eq:split11} \|(S+\tilde{A})v\|^2_{L^2_{\vol}}=\|Su\|^2_{L^2_{\vol}} +\|\tilde{A}v\|^2_{L^2_{\vol}}+ 2(Su, \tilde{A}v)_{L^2_{\vol}}, \eta_{\delta,r}nd{align} We now estimate the contributions in $(S v, \tilde{A}v)_{L^2_{\vol}}$, which we split into three parts, which we consider separately: \begin{align} \label{eq:mixed} \begin{split} (S v, \tilde{A} v)_{L^2_{\vol}} &= -2 \tau (\p_t (\sqrt{b} \p_t ) v + \tau^2 \sqrt{b}(\varphi')^2 v + \D_{\Sigma}' v - \sqrt{b}\frac{(n-2)^2}{4} v, \varphi' \p_t v)_{L^2}\\ & \quad - \tau (\p_t (\sqrt{b} \p_t ) v + \tau^2 \sqrt{b}(\varphi')^2 v + \D_{\Sigma}' v - \sqrt{b}\frac{(n-2)^2}{4} v, \varphi'' v)_{L^2}\\ & \quad + ([h_q(\tilde{u})\sqrt{b}, -2\tau \varphi' \p_t - \tau \varphi''] v, v)_{L^2}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Here we now consider the standard scalar product, i.e., $(\cdot,\cdot)_{L^2}:=(\cdot,\cdot)_{L^2(\R \times \Sigma)}$, and we have set $\D_{\Sigma'} := \sqrt{b} \D_{\Sigma}$. We begin by discussing the first two contributions in \eta_{\delta,r}qref{eq:mixed}, which do not involve the sublinear part of the problem. Hence, the main difficulty with these is to deal with the low regularity of the metric. To this end we compute, \begin{align} \label{eq:first_term} \begin{split} &-2 \tau \left(\p_t (\sqrt{b} \p_t ) v + \tau^2 \sqrt{b}(\varphi')^2 v + \D_{\Sigma}' v - \frac{(n-2)^2}{4}\sqrt{b} v, \varphi' \p_t v\right)_{L^2}\\ &= -2\tau \left(\sqrt{b} \p_t^2 v + \tau^2 \sqrt{b}(\varphi')^3 v + \D_{\Sigma}' v- \frac{(n-2)^2}{4}\sqrt{b} v, \varphi' \p_t v\right)_{L^2} - \tau \left(\frac{b'}{\sqrt{b}}\p_t v, \varphi' \p_t v \right)_{L^2}\\ & = -\tau (\varphi' \sqrt{b}, \p_t(\p_t v)^2)_{L^2} - \tau^3 (\sqrt{b} (\varphi')^2, \p_t (v^2))_{L^2} - \tau (\frac{b'}{\sqrt{b}} \p_t v, \varphi' \p_t v)_{L^2}\\ & \quad - 2\tau (\D_{\Sigma}' v, \varphi' \p_t v)_{L^2} +\tau \frac{(n-2)^2}{4}(\varphi' \sqrt{b}, \p_t(v^2))_{L^2}\\ & = \tau ((\p_t v)\p_t (\varphi' \sqrt{b}), \p_t v)_{L^2} + \tau^3 (v \p_t (\sqrt{b} (\varphi')^3), v)_{L^2} -\tau (\frac{b'}{\sqrt{b}} \p_t v, \varphi' \p_t v)_{L^2} \\ & \quad + 2\tau (\nabla_{\theta}v, b \sqrt{b} \varphi' \p_t \nabla_{\theta} v)_{L^2}- \tau \frac{(n-2)^2}{4}(\varphi'' \sqrt{b} v, v)_{L^2} - \frac{\tau}{2} \frac{(n-2)^2}{4}(\varphi' \frac{b'}{\sqrt{b}} v, v)_{L^2}\\ & = \tau (\sqrt{b} \varphi'' \p_t v, \p_t v)_{L^2} + \frac{\tau}{2}(\varphi' \frac{b'}{\sqrt{b}}\p_t v, \p_t v)_{L^2} + 3 \tau^3 (\sqrt{b} \varphi'' \varphi' v, \varphi' v)_{L^2} + \frac{\tau^3}{2} ((\varphi')^3 \frac{b'}{\sqrt{b}} v, v)_{L^2}\\ & \quad - \tau(\varphi'' \nabla_{\theta}v, b \sqrt{b} \nabla_{\theta}v)_{L^2} - \tau(\varphi' (\p_t(b \sqrt{b})) \nabla_{\theta}v, \nabla_{\theta} v)_{L^2} - \tau (\frac{b'}{\sqrt{b}}\p_t v, \varphi' \p_t v)_{L^2}\\ & \quad -\tau \frac{(n-2)^2}{4}(\varphi'' \sqrt{b} v, v)_{L^2} - \frac{\tau}{2} \frac{(n-2)^2}{4}(\varphi' \frac{b'}{\sqrt{b}} v, v)_{L^2}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Here, for ease of notation, we have abbreviated $b'(t,\theta):=\p_t b(t,\theta)$. Next we consider the second contribution from \eta_{\delta,r}qref{eq:mixed}. It turns into \begin{align} \label{eq:second_term} \begin{split} &- \tau \left(\sqrt{b} \p_t^2 v + \sqrt{b}\tau^2 (\varphi')^2 v + \D_{\Sigma}'v - \sqrt{b}\frac{(n-2)^2}{4}v, \varphi'' v \right)_{L^2} - \frac{\tau}{2}\left(\frac{b'}{\sqrt{b}} \p_t v, \varphi'' v\right)_{L^2}\\ & = \frac{\tau}{2}(\frac{b'}{\sqrt{b}} \p_t v, \varphi'' v)_{L^2} + \tau (\sqrt{b} \p_t v, \varphi'' \p_t v)_{L^2} + \tau (\sqrt{b} \p_t v, \varphi''' v)_{L^2} - \tau^3 (\sqrt{b} (\varphi')^2 \varphi'' v, v)_{L^2} \\ & \quad + (\nabla_{\theta} v, \varphi'' b \sqrt{b} \nabla_{\theta} v)_{L^2} -\frac{\tau}{2}(\frac{b'}{\sqrt{b}} \p_t v, \varphi'' v)_{L^2} +\tau \frac{(n-2)^2}{4}(\varphi'' \sqrt{b} v, v)_{L^2}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Combining the contributions from \eta_{\delta,r}qref{eq:first_term}, \eta_{\delta,r}qref{eq:second_term} leads to \begin{align} \label{eq:combined_12} \begin{split} &2 \tau (\sqrt{b} \varphi'' \p_t v, \p_t v)_{L^2} + 2 \tau^3(\sqrt{b} (\varphi')^2 \varphi'' v, v)_{L^2}\\ & - \frac{\tau}{2} (\varphi' \frac{b'}{\sqrt{b}} \p_t v, \p_t v)_{L^2} + \frac{\tau^3}{2} ((\varphi')^2 \frac{b'}{\sqrt{b}} v,v)_{L^2} - \tau (\varphi' (\p_t(b \sqrt{b}))\nabla_{\theta} v, \nabla_{\theta} v)_{L^2} \\ & + \tau (\sqrt{b} \p_t v, \varphi''' v)_{L^2} - \frac{\tau}{2} \frac{(n-2)^2}{4}(\varphi' \frac{b'}{\sqrt{b}} v,v)_{L^2}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} The support assumption $\supp(v) \subset \{(t,\theta) \in (-\infty, t_0)\times \Sigma \}$ for a sufficiently small choice of $t_0$ combined with the explicit form of $\varphi$, and the bound \begin{align*} |b'(t)| \leq C e^{t}, \eta_{\delta,r}nd{align*} then a sufficiently large choice of $\tau_0$ allows us to estimate the contributions in \eta_{\delta,r}qref{eq:combined_12} by \begin{align} \label{eq:combined_12a} \begin{split} &2 \tau (\sqrt{b} \varphi'' \p_t v, \p_t v)_{L^2} + 2 \tau^3(\sqrt{b} (\varphi')^2 \varphi'' v, v)_{L^2}\\ &- \frac{\tau}{2} (\varphi' \frac{b'}{\sqrt{b}} \p_t v, \p_t v)_{L^2} + \frac{\tau^3}{2} ((\varphi')^2 \frac{b'}{\sqrt{b}}v,v)_{L^2} + \tau (\sqrt{b} \p_t v, \varphi''' v)_{L^2} \\ & \quad - \tau (\varphi' (\p_t(b \sqrt{b}))\nabla_{\theta} v, \nabla_{\theta} v)_{L^2} - \frac{\tau}{2} \frac{(n-2)^2}{4}(\varphi' \frac{b'}{\sqrt{b}} v,v)_{L^2}\\ & \geq \tau (\sqrt{b} \varphi'' \p_t v, \p_t v)_{L^2} + \tau^3(\sqrt{b} (\varphi')^2 \varphi'' v, v)_{L^2} - \tau (\varphi' (\p_t(b \sqrt{b}))\nabla_{\theta} v, \nabla_{\theta} v)_{L^2}. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} This shows the control of tangential gradients and $L^2$ contributions (with an error involving the spherical derivatives). In Step 4 we show that the potentially negative contribution involving the spherical gradient can be absorbed into positive contributions and that we can upgrade \eta_{\delta,r}qref{eq:combined_12a} to an estimate for the full gradient (including the spherical part of the gradient), for which we exploit the symmetric part of the operator.\\ \eta_{\delta,r}mph{Step 3: Estimates for the sublinear contributions.} For the terms involving the sublinear potential, we argue similarly as in the proof of the model situation. For ease of notation we define for $v \in \R$ \begin{align*} &\tilde{f}_q(v)(t,\theta):= f_q(e^t \theta,v), \ \p_1 \tilde{f}_q(v)(t,\theta) = (\p_t \tilde{f}_q)|_{(t,\theta,v)}, \ \tilde{f}'_q(v)(t,\theta):= (\p_{v} \tilde{f}_q)|_{(t,\theta,v)},\\ &\tilde{F}_q(v)(t,\theta):= f_q(e^t \theta,v), \ \p_1 \tilde{F}_q(v)(t,\theta) = (\p_t \tilde{f}_q)|_{(t,\theta,v)}, \ \tilde{F}'_q(v)(t,\theta):= (\p_{v} \tilde{f}_q)|_{(t,\theta,v)}. \eta_{\delta,r}nd{align*} With this notation at hand and using that \begin{align*} [\sqrt{b} h_q(\tilde{u}), -2\tau \varphi' \p_t - \tau \varphi''] = [\sqrt{b} h_q(\tilde{u}), -2\tau \varphi' \p_t ], \eta_{\delta,r}nd{align*} we compute \begin{align} \label{eq:non_lin_comm} \begin{split} -2\tau [\sqrt{b} h_q(\tilde{u}), \varphi' \p_t ] & = 2\tau \varphi' \p_t (h_q(\tilde{u}) \sqrt{b})\\ & = \frac{2\tau e^{2t} \varphi' \sqrt{b}}{\Psi}\left( \frac{\tilde{f}_q'(\tilde{u})}{\tilde{u}} - \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}^2} \right) \p_t \tilde{u} + \frac{4\tau e^{2t} \varphi' \sqrt{b}}{\Psi} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} \\ & \quad + \frac{2\tau e^{2t} \varphi' \sqrt{b}}{\Psi}\frac{\p_1 \tilde{f}_q(\tilde{u})}{\tilde{u}} + \frac{\tau e^{2t} \varphi'}{\Psi} \frac{b'}{\sqrt{b}} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} - 2\tau e^{2t} \varphi' \sqrt{b} \frac{\Psi'}{\Psi^2} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}}\\ & = \frac{2\tau e^{2t}\varphi' \sqrt{b}}{\Psi} \left( \frac{\tilde{f}_q'(\tilde{u})}{\tilde{u}} - \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}^2} \right) \p_t \tilde{u} + \frac{4\tau e^{2t} \varphi' \sqrt{b}}{\Psi} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} + E_1. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} We treat $E_1$ as an error, which we will discuss below, and thus first concentrate on the other contribution. Using that $v= e^{\tau \varphi}e^{-\frac{n-2}{2}t} \tilde{u}$ and that the condition (F4) ensures the well-definedness of $\tilde{u}\tilde{f}'_q(\tilde{u})$, we infer \begin{align} \label{eq:non_lin_comm_1} \begin{split} &-2 \tau ([\sqrt{b} h_q(\tilde{u}), \varphi' \p_t] v, v) = 2\tau \left( \frac{e^{2t} \varphi' v \sqrt{b}}{\Psi} \left( \frac{\tilde{f}_q'(\tilde{u})}{\tilde{u}} - \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}^2} \right) \p_t \tilde{u} , v \right) + 4\tau \left(\frac{ e^{2t} \varphi' \sqrt{b}}{\Psi} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}}v,v \right) \\ & \quad + (E_1 v,v)\\ &= 2\tau \left(\frac{l \sqrt{b}}{\Psi}, (\tilde{u} \tilde{f}_q'(\tilde{u}) - \tilde{f}_q(\tilde{u}))\p_t \tilde{u}\right) + 4q\tau \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi} \frac{\tilde{F}_q(\tilde{u})}{\tilde{u}^2}, \tilde{u}^2 \right) + (E_1 v,v)\\ & = 2 \tau \left( \frac{l \sqrt{b}}{\Psi}, \p_t (\tilde{u} \tilde{f}_q(\tilde{u})) - 2 \tilde{F}'_q(\tilde{u})\p_t \tilde{u} \right) + 4q\tau \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right) + (E_1 v,v)\\ &= 2 \tau \left(\frac{l \sqrt{b}}{\Psi}, \p_t (\tilde{u} \tilde{f}_q(\tilde{u})) - 2 \p_t( \tilde{F}_q(\tilde{u})) \right) + 4\tau(\sqrt{b} l, \p_1 \tilde{F}_q|_{\tilde{u}} ) + 4q\tau \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right) + (E_1 v,v)\\ &= 2 \tau \left(\frac{l \sqrt{b}}{\Psi}, \p_t (\tilde{u} \tilde{f}_q(\tilde{u})) - 2 \p_t( \tilde{F}_q(\tilde{u})) \right)+ 4q\tau \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right) + (E_2 v,v) + (E_1 v,v), \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} where we have set $l(t)= e^{(2-n)t} e^{2t} \varphi'(t) e^{2 \tau \varphi}$ and where we view $(E_2 v,v)$ as a controlled error. We note that by our choice of the weight function $\varphi$ \begin{align} \label{eq:l} \begin{split} l'(t) &= e^{(2-n)t} e^{2\tau \varphi}\left( \left(4- n\right)\varphi'(t) + \varphi''(t) + 2\tau (\varphi'(t))^2 \right)\\ &\geq e^{(2-n)t} e^{2\tau \varphi}\left( \varphi''(t) + \frac{3}{2} \tau (\varphi'(t))^2 \right) \geq 0, \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} if $\tau \geq \tau_0>0$ is sufficiently large. Integrating the expression from \eta_{\delta,r}qref{eq:non_lin_comm_1} by parts and using that $q\in [1,2)$, we thus further estimate \begin{align} \label{eq:comm_main} \begin{split} -2 \tau ([\sqrt{b} h_q(\tilde{u}), \varphi' \p_t] v, v) &= - 2\tau \left( \frac{l' \sqrt{b}}{\Psi}, \tilde{u} \tilde{f}_q(\tilde{u})- 2\tilde{F}_q(\tilde{u}) \right) + 4\tau q \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right) \\ & \quad + \tau \left(\frac{b'}{\Psi \sqrt{b}}l, \tilde{u}\tilde{f}_q(\tilde{u})-2 \tilde{F}_q(\tilde{u})\right) -2 \tau \left( \frac{l \sqrt{b} \Psi'}{\Psi^2}, \tilde{u} \tilde{f}_q(\tilde{u}) - 2 \tilde{F}_q(\tilde{u}) \right) \\ & \quad + ((E_1 + E_2)v,v) \\ & \geq 2(2-q)\tau \left( \frac{l' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u})\right) + 4\tau \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right)\\ & \quad + \left((E_1 + E_2+ E_3)v,v \right) \\ & = 2(2-q)\tau (\Psi^{-1}e^{2\tau \varphi}e^{2t}\sqrt{b}(\varphi'' + \frac{3 \tau}{2} (\varphi')^2), \tilde{F}_q(\tilde{u})) \\ & \quad + 4\tau q \left(\frac{ e^{2t} e^{2\tau \varphi}\varphi' \sqrt{b}}{\Psi}, \tilde{F}_q(\tilde{u}) \right) + ((E_1+E_2+E_3)v,v)\\ & \geq 2(2-q)\tau (\Psi^{-1}e^{2\tau \varphi}e^{2t}\sqrt{b}(\varphi'' + \tau (\varphi')^2), \tilde{F}_q(\tilde{u})) \\ & \quad + ((E_1+E_2+E_3)v,v). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} We estimate the error terms $((E_1+E_2+E_3)v,v)$ and show that they are indeed of lower order, i.e. that they can be absorbed into the positive contributions on the right hand side of \eta_{\delta,r}qref{eq:combined_12a}: To this end, we observe that by the assumption (F3) and by the definition of $F_q(x,s)$ as the antiderivative of $f_q(x,s)$ (c.f. the condition (F1)) \begin{align*} \left| \frac{\p_1 \tilde{f}_q(\tilde{u})}{\tilde{u}} \right| \leq \kappa_1 e^{t} \left| \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} \right| \leq \kappa_1 e^{t} \left| \frac{\tilde{F}_q(\tilde{u})}{\tilde{u}^2} \right| . \eta_{\delta,r}nd{align*} Thus, \begin{align*} \left| \left( v, e^{2t}\varphi' \sqrt{b} \Psi^{-1} \frac{\p_1 \tilde{f}_q(\tilde{u})}{\tilde{u}} v \right) \right| &\leq \left| \left( v, e^{2t}|\varphi'| \sqrt{b} \Psi^{-1} \left|\frac{\p_1 \tilde{f}_q(\tilde{u})}{\tilde{u}} \right| v \right) \right| \leq \left| \left( v, e^{2t}|\varphi'| \sqrt{b} \Psi^{-1} e^t \left|\frac{\tilde{F}_q(\tilde{u})}{\tilde{u}^2} \right| v \right) \right|\\ & \leq \kappa_1 \left| (|l|\sqrt{b} \Psi^{-1}, e^{t} |\tilde{F}_q(\tilde{u})| ) \right|. \eta_{\delta,r}nd{align*} Similarly, \begin{align*} \left| \left( e^{2t}\varphi' \frac{b'}{\sqrt{b} \Psi} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}}v,v \right) \right| &\leq \left| \left( e^{2t}|\varphi'|\left| \frac{b'}{\sqrt{b} \Psi} \right| \frac{|\tilde{f}_q(\tilde{u})\tilde{u}|}{|\tilde{u}|^2}v,v \right) \right| \leq q\left| \left( |l||\varphi'| \left| \frac{b'}{\sqrt{b} \Psi} \right|, |\tilde{F}_q(\tilde{u})|e^{2\tau \varphi} \right) \right|\\ &\leq M q \left| \left( |l||\varphi'| e^{t}, |\tilde{F}_q(\tilde{u})| \right) \right|, \eta_{\delta,r}nd{align*} and, since $|\Psi'|\leq C e^{t}$, \begin{align*} \left| \left( e^{2t} \varphi' \sqrt{b} \frac{\Psi'}{\Psi^2} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} v, v \right) \right| \leq C \left| \left( e^{2t} e^{2\tau \varphi} |\varphi'| \sqrt{b} \left|\frac{\Psi'}{\Psi^2}\right| \frac{\tilde{u}\tilde{f}_q(\tilde{u})}{\tilde{u}^2} v, v \right) \right| \leq C \left| \left( |l||\varphi'| \sqrt{b} e^{t},\tilde{F}_q(\tilde{u}) \right) \right|. \eta_{\delta,r}nd{align*} As a consequence, \begin{align} \label{eq:error_1} \begin{split} |(v, E_1 v)| &\leq 2\tau \left|\left(e^{2t} v, \varphi' \sqrt{b} \frac{\p_1 \tilde{f}_q(\tilde{u})}{\tilde{u}} v \right) \right| + \tau \left| \left( e^{2t}\varphi' \frac{b'}{\sqrt{b}} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} v, v \right) \right|+ 2\tau \left| \left( e^{2t}\varphi' \sqrt{b} \frac{\Psi'}{\Psi^2} \frac{\tilde{f}_q(\tilde{u})}{\tilde{u}} v, v \right) \right|\\ &\leq C(q,M,\kappa_1) \tau \left| \left( |l||\varphi'| e^{t}, \tilde{F}_q(\tilde{u}) \right) \right|. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} With a similar reasoning we infer \begin{align} \label{eq:error_2} \begin{split} &\left| \left(E_2 v, v \right) \right| = 4\tau \left| \left( \sqrt{b} l, \p_1 \tilde{F}_q(\tilde{u}) \right) \right| \leq 4 \tau \kappa_1 \left| \left( \sqrt{b} |l|, e^{t}\tilde{F}_q(\tilde{u}) \right) \right|.\\ &\left| \left( E_3 v,v \right) \right| \leq \tau \left| \left( \frac{|b'|}{|\sqrt{b}|}|l| + 2 \frac{|l| \sqrt{b}|\Psi'|}{\Psi^2} , \tilde{u} \tilde{f}_q(\tilde{u})-2 \tilde{F}_q(\tilde{u}) \right) \right| \leq (2-q)\tau \left| \left( \left| \frac{b'}{\sqrt{b}} \right||l| + 2 \frac{|l| \sqrt{b}|\Psi'|}{\Psi^2}, \tilde{F}_q(\tilde{u}) \right) \right|\\ &\qquad \qquad \leq C(b)(2-q)\tau \left| \left( \sqrt{b} e^{t}|l|, \tilde{F}_q(\tilde{u}) \right) \right|. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Choosing $ r_0(q)>0$ (and thus also $t_0 = \ln(r_0)<0$) sufficiently small, we may hence absorb the error contributions from \eta_{\delta,r}qref{eq:error_1}, \eta_{\delta,r}qref{eq:error_2} into the positive term on the right hand side of \eta_{\delta,r}qref{eq:comm_main}. Using that by our assumptions $F_q(\tilde{u})\geq c |\tilde{u}|^{q}$ and choosing $\tau \geq \tau_0=\tau_0(q,n)>0$ sufficiently large, we then deduce that \begin{align} \label{eq:main_comm_2} \begin{split} -2\tau ([ h_q(\tilde{u}), \varphi' \p_t] v, v ) &\geq 2\tau \frac{2-q}{q} \int\limits_{\R \times S^{n-1}} e^{2t}\sqrt{b}\left( \varphi''(t) + \tau (\varphi'(t))^2 \right) e^{2\tau \varphi} F_q(\tilde{u}) dt d\theta\\ & \qquad+ ((E_1+E_2+E_3)v,v)\\ & \geq \tau \frac{2-q}{q} \int\limits_{\R \times S^{n-1}} e^{2t} \sqrt{b} \left( \varphi''(t) + \tau (\varphi'(t))^2 \right) e^{2\tau \varphi} F_q(\tilde{u}) dt d\theta\\ & \geq \tau \frac{2-q}{q} \int\limits_{\R \times S^{n-1}} e^{2t}\sqrt{b} \left( \varphi''(t) + \tau (\varphi'(t))^2 \right)\max\{\kappa_2|\tilde{u}|^{q-2} v^2, e^{2\tau \varphi} F_q(\tilde{u}) \} dt d\theta. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} After passing back to Cartesian coordinates, this concludes the argument for the derivation of the sublinear contribution.\\ \eta_{\delta,r}mph{Step 4: Upgrading the gradient estimate.} We explain the derivation of the full gradient estimates. As in the corresponding estimate in Section \ref{sec:model}, this is based on the symmetric part of the operator. Testing it by $\tau c_0 \varphi'' v$ for a sufficiently small constant $c_0>0$, we infer \begin{align} \label{eq:sph_grad} \begin{split} c_0 \tau \||\varphi''|^{1/2} \nabla_{S^{n-1}} v\|_{L^2_{\vol}}^2 &\leq C c_0 \tau \left[|(Sv, \varphi'' v)_{L^2_{\vol}}| + \||\varphi''|^{1/2} \p_t v\|_{L^2_{\vol}}^2 \right.\\ & \quad \left. + C\tau^2 |(|\varphi''| |\varphi'|^2 v,v)_{L^2_{\vol}}| + |(h_q(\tilde{u})v,\varphi'' v)_{L^2_{\vol}}| \right]\\ & \leq \frac{1}{2}\|S v\|_{L^2_{\vol}}^2 + Cc_0 \tau^3 \||\varphi''|^{1/2} v\|_{L^2_{\vol}}^2 + C c_0 \tau \|e^t |\varphi''|^{1/2} e^{\tau \varphi} |F_q(\tilde{u})|^{1/2}\|_{L^2_{\vol}}^2 \\ &\stackrel{\eta_{\delta,r}qref{eq:main_comm_2}, \eta_{\delta,r}qref{eq:combined_12a}}{\leq} \|S v\|_{L^2_{\vol}}^2 + ([S,A]v,v)_{L^2_{\vol}} + C\tau|(\varphi' \sqrt{b}^{-1}\p_t(b \sqrt{b}) \nabla_{\theta}v, \nabla_{\theta} v)_{L^2_{\vol}}|\\ &\leq C \|L v\|_{L^2_{\vol}}^2 + C\tau|(\varphi' \sqrt{b}^{-1}\p_t(b \sqrt{b}) \nabla_{\theta}v, \nabla_{\theta} v)_{L^2_{\vol}}|. \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} Here $c_0>0$ is chosen so small that $Cc_0 \leq 1$ and where we used that \begin{align*} &C c_0 \tau \||\varphi''|^{1/2} e^t e^{\tau \varphi} |F_q(\tilde{u})|^{1/2}\|_{L^2_{\vol}}^2\\ &\leq \tau \frac{2-q}{q} \int\limits_{\R \times S^{n-1}}\sqrt{b}e^t \left( \varphi''(t) + \tau (\varphi'(t))^2 \right)\max\{\kappa_2|\tilde{u}|^{q-2} v^2, e^{2\tau \varphi} F_q(\tilde{u}) \} dt d\theta. \eta_{\delta,r}nd{align*} if the support of $v$ is chosen sufficiently small and $\tau\geq \tau_0(n,q)$ is chosen sufficiently large (depending on $q$). Since \begin{align*} |\p_t (\sqrt{b} b)| \leq c e^{t}|\sqrt{b}|, \eta_{\delta,r}nd{align*} we may absorb the contribution $C\tau|(\varphi' \sqrt{b}^{-1}\p_t(b \sqrt{b}) \nabla_{\theta}v, \nabla_{\theta}v)_{L^2_{\vol}}|$ from the right hand side of \eta_{\delta,r}qref{eq:sph_grad} into the left hand side of \eta_{\delta,r}qref{eq:sph_grad} if $r_0>0$ is chosen appropriately small. Thus, we obtain \begin{align*} c_0 \tau \||\varphi''|^{1/2} \nabla_{S^{n-1}} v\|_{L^2_{\vol}}^2 &\leq C \|L v\|_{L^2_{\vol}}^2 . \eta_{\delta,r}nd{align*} \\ \eta_{\delta,r}mph{Step 5: Absorbing the error terms.} Up to now we have proved the estimate \begin{align} \label{eq:intermediate} \begin{split} &\tau^{1/2} \||\varphi''|^{1/2} \nabla v\|_{L^2_{\vol}} + \tau^{3/2} \||\varphi''|^{1/2} v\|_{L^2_{\vol}} + \tau \|e^t |\tilde{u}|^{\frac{q-2}{2}} v\|_{L^2_{\vol}}\\ &\leq C (\|L v\|_{L^2_{\vol}} + \|Ev\|_{L^2_{\vol}} ). \eta_{\delta,r}nd{split} \eta_{\delta,r}nd{align} It hence remains to deal with the error contribution on the right hand side of \eta_{\delta,r}qref{eq:intermediate}. Using the Lipschitz continuity of $a,b$ and $\Psi$ we can estimate \begin{align*} \|Ev\|_{L^2_{\vol}}= \tau \||\varphi' \p_t \ln(\sqrt{b})| v \|_{L^2_{\vol}} \leq C \tau \|e^{t} v\|_{L^2_{\vol}}. \eta_{\delta,r}nd{align*} As before this can be absorbed into the left hand side of \eta_{\delta,r}qref{eq:intermediate} (after possibly choosing $r_0>0$ even smaller and $\tau_0$ even larger). Returning to Cartesian coordinates, then concludes the proof of the Carleman estimate for the operator $Lu=\D_{a^{ij}} u + \frac{\hat{f}_q(u)}{\Psi}$. Using the equivalence from Lemma \ref{lem:geo}, we then also infer a Carleman estimate for the operator $\p_i a^{ij}\p_j u + \hat{f}_q(u)$. This involves lower order errors of the type $R$ from \eta_{\delta,r}qref{eq:LB}, but as outlined in the previous error estimates, these can be absorbed into the left hand side of the Carleman estimate. Hence, we arrive at the desired result of Theorem \ref{prop:varmet}. \eta_{\delta,r}nd{proof} \eta_{\delta,r}nd{document}
\begin{document} \title{Polarization Entangled {\it W} State using Parametric Down-Conversion} \author{Takashi Yamamoto} \email{[email protected]} \author{Kiyoshi Tamaki} \author{Masato Koashi} \author{Nobuyuki Imoto} \homepage{http://www.soken.ac.jp/quantum/index.html} \address{CREST Research Team for Interacting Carrier Electronics, School of Advanced Sciences,\\ The Graduate University for Advanced Studies (SOKENDAI), Hayama, Kanagawa, 240-0193, Japan } \date{\today} \begin{abstract} An experimental scheme for preparing a polarization entangled {\it W} states from four photons emitted by parametric down-conversion is proposed. We consider two different configurations and a method of improving the yield by using single photon sources. In the proposed scheme, one uses only linear optical elements and photon detectors, so that this scheme is feasible by current technologies. \pacs{03.67.-a, 42.50.-p} \end{abstract} \maketitle In the quantum information processing including many quantum protocols and quantum computation \cite{N-H}, quantum entanglement plays a crucial rule. Most of the quantum protocols concern with the bipartite system, mainly because the nature of multipartite entanglement has not been clarified yet. Recently, however, the nature of multipartite entanglement, especially, that of tripartite entanglement begins to be clarified. In \cite{dur00}, D\"ur {\it et al.} have classified the tripartite pure states based on the equivalence under stochastic LOCC (local operations and classical communication). They showed that there are two different kinds of genuine tripartite entanglement. One is Greenberger-Horne-Zeilinger (GHZ) states \cite{ghz89}, which is represented, for example, as \begin{eqnarray} \ket{{\rm{GHZ}}}=\frac{1}{\sqrt{2}}\left(\ket{000}+\ket{111}\right)\,, \label{eq-ghz} \end{eqnarray} where $\{\ket{0}, \ket{1}\}$ is the orthonormal basis for a qubit. The other is {\it W} states, which is represented, for example, by \begin{eqnarray} \ket{W}=\frac{1}{\sqrt{3}}\left(\ket{001}+\ket{010}+\ket{010}\right)\,. \label{eq-w} \end{eqnarray} These two states cannot be converted to each other by LOCC with nonzero success probability. These states show different behavior when one of the qubits is discarded. For three qubits in GHZ states, the remaining two qubits are completely unentangled. But, for {\it W} states, the remaining two qubits are still entangled. Indeed it was shown that {\it W} states are optimal in the amount of such pairwise entanglement\cite{Koashi00}. Many works have been devoted to the study of GHZ states in connection with Bell's theorem \cite{Mer90-1,GHSZ90,Mer90-2,Roy91}, and violation of Bell inequalities are demonstrated experimentally \cite{zei00}. Besides the fundamental studies of GHZ states, several applications of these states such as the quantum teleportation \cite{Karlsson01}, the quantum secret sharing \cite{hillery99,Cleve99} and the quantum key distribution protocol \cite{Kempe99,Dukin01} have been proposed. On the other hand, the study of {\it W} states has not been done until recently. For application, the quantum key distribution (QKD) with {\it W} states is proposed \cite{Jaewoo02}, and a {\it W}-class state is used for the optimal universal quantum cloning machine \cite{Buzek96,Buzek97,Gisin97,Buzek97-2,Bruss98,Murao99}. In fundamental aspects, Cabello \cite{Cabello02} has illustrated some differences between the violation of local realism exhibited by {\it W} states and that by GHZ states. The {\it W} states have a clearer prescription for selecting a pair of qubits to be subjected to a Bell's theorem test than the GHZ states have. Thus, not only for the purpose of the realization of some applications, but also for the fundamental interests, it is important to prepare {\it W} states experimentally. \begin{figure}\label{fig:setup1} \end{figure} So far, several schemes for preparation of {\it W} states have been proposed. Zeilinger, Horne, and Greenberger proposed a scheme using third order nonlinearity for path entangled photons \cite{Zei97}. Guo and Zhang proposed a scheme for three entangled atoms via cavity quantum electrodynamics \cite{Guo02}. In this paper, we propose an experimentally feasible scheme for preparing a polarization entangled {\it W} states. The scheme is composed of parametric down-conversion (PDC), linear optical elements, and photon detectors, so that our scheme is feasible by current technologies. In our proposal, there is no interference between the photons passing through different paths, which makes it easy for us to align the optical elements and makes our system insensitive to fluctuations of optical path lengths. In our scheme, we utilize four photons emitted collinearly from type-II PDC, which are in the following state, \begin{eqnarray} \ket{2}_{0{\rm H}}\ket{2}_{0{\rm V}}, \label{eq:1} \end{eqnarray} where $\ket{n}$ is the normalized $n$-photon number state. The subscript numbers label the spatial modes, and ${\rm H}$ and ${\rm V}$ represent horizontal and vertical polarization modes, respectively. As shown in Fig.~\ref{fig:setup1}, these photons are split into four spatial modes (1, 2, 3, and 3$^\prime$) by beam splitters (BS$_k$, $k=1, 2, 3$), whose reflectivity and transmissivity are independent of polarization. The transformation by BS$_k$ is expressed by \begin{eqnarray} \ket{1}_{{\rm H}}&\to &r_k\ket{1}_{k{\rm H}}+t_k\ket{1}_{k^\prime {\rm H}}, \nonumber \\ \ket{1}_{{\rm V}}&\to &r_ke^{i\phi_{k}}\ket{1}_{k{\rm V}}+t_ke^{i\psi_{k}}\ket{1}_{k^\prime {\rm V}}, \nonumber \\ \ket{2}_{{\rm H}}&\to &r^2_k\ket{2}_{k{\rm H}}+t^2_k\ket{2}_{k^\prime {\rm H}}+2r_kt_k\ket{1}_{k{\rm H}}\ket{1}_{k^\prime {\rm H}}, \end{eqnarray} and \begin{eqnarray} \ket{2}_{{\rm V}}&\to &r^2_ke^{2i\phi_{k}}\ket{2}_{k{\rm V}}+t^2_ke^{2i\psi_{k}}\ket{2}_{k^\prime {\rm V}} \nonumber \\ & &+2r_kt_ke^{i(\phi_{k}+\psi_{k})}\ket{1}_{k{\rm V}}\ket{1}_{k^\prime {\rm V}}, \end{eqnarray} where $r_k$ and $t_k$ are the reflection and transmission coefficients of BS$_k$, respectively, which satisfy $|r_{k}|^2+|t_{k}|^2=1$. We assume that $r_k$ and $t_k$ are real, without loss of generality. Here $\phi_{k}$ and $\psi_{k}$ are the phase differences between mode ${\rm H}$ and ${\rm V}$ for reflected and transmitted photons, respectively. For simplicity, we omit the modes in the vacuum, using abbreviations such as $\ket{1}_{k{\rm V}}\ket{0}_{k^\prime{\rm V}} \to \ket{1}_{k{\rm V}}$. After these transformations, the phase offsets for the photons in mode 2 and 3 are compensated by birefringent phase shifters (BPS$_k$, $k=2,3$). The amount of compensation is chosen as \begin{eqnarray} \ket{1}_{{2\rm V}}\to e^{i(-\phi_{2}+\psi_{2}+\psi_{3})}\ket{1}_{{2\rm V}} \end{eqnarray} for BPS$_2$, and \begin{eqnarray} \ket{1}_{{3\rm V}}\to e^{i(-\phi_{3}+\psi_{3})}\ket{1}_{{3\rm V}} \end{eqnarray} for BPS$_3$. After compensating these phase differences, we are only interested in the case where there is a single photon in each spatial mode ($1$, $2$, $3$, and $3^\prime$). If such a case is successfully selected, these photons are in the following state, \begin{eqnarray} \frac{1}{\sqrt{2}}(e^{i\phi_{1}}\ket{1}_{1{\rm V}}\ket{W_{{\rm V}}}_{233^\prime}+e^{i(\psi_{1}+\psi_{2}+\psi_{3})}\ket{1}_{1{\rm H}}\ket{W_{{\rm H}}}_{233^\prime}) \label{eq:2} \end{eqnarray} where $\ket{W_{{\rm V}}}_{233^\prime}$ and $\ket{W_{{\rm H}}}_{233^\prime}$ are the {\it W} states which can be written as \begin{eqnarray} \ket{W_{{\rm V}}}_{233^\prime}&\equiv &\frac{1}{\sqrt{3}}(\ket{1}_{2{\rm H}}\ket{1}_{3H}\ket{1}_{3^\prime {\rm V}}+\ket{1}_{2{\rm H}}\ket{1}_{3{\rm V}}\ket{1}_{3^\prime {\rm H}} \nonumber \\ & &+\ket{1}_{2{\rm V}}\ket{1}_{3{\rm H}}\ket{1}_{3^\prime {\rm H}}) \nonumber \end{eqnarray} and \begin{eqnarray} \ket{W_{{\rm H}}}_{233^\prime}&\equiv &\frac{1}{\sqrt{3}}(\ket{1}_{2{\rm V}}\ket{1}_{3{\rm V}}\ket{1}_{3^\prime {\rm H}}+\ket{1}_{2{\rm V}}\ket{1}_{3{\rm H}}\ket{1}_{3^\prime {\rm V}} \nonumber \\ & &+\ket{1}_{2{\rm H}}\ket{1}_{3{\rm V}}\ket{1}_{3^\prime {\rm V}}). \nonumber \end{eqnarray} The probability of obtaining the photons in the state of Eq.~(\ref{eq:2}) is $(2\sqrt{6}r_{1}t^3_{1}r_{2}t^2_{2}r_{3}t_{3})^2$. If we detect a single photon at the photon detector D$_{1{\rm V}}$ and the state is projected to $\ket{1}_{1{\rm V}}\ket{W_{{\rm V}}}_{233^\prime}$, we obtain three photons in the state $\ket{W_{{\rm V}}}_{233^\prime}$. Even if we detect a single photon at the photon detector D$_{1{\rm H}}$ and the state is projected to $\ket{1}_{1{\rm H}}\ket{W_{{\rm H}}}_{233^\prime}$, we can also obtain the state $\ket{W_{{\rm V}}}_{233^\prime}$ after rotating the polarization by $90^{\circ}$ in mode 2, 3, and 3$^\prime$. In this case, the maximum probability of obtaining the photons in the state $\ket{W_{{\rm V}}}_{233^\prime}$ is $3/32$ when we set $r_{1}^2=1/4$, $r_{2}^2=1/3$, and $r_{3}^2=1/2$. \begin{figure}\label{fig:setup2} \end{figure} Although it is difficult to select the single photon in each spatial mode without destroying the photons, we can discard the photocounts caused by the non-{\it W} states if we are allowed to perform the postselection where we select the events of the photocounts in mode $2$, $3$, and $3^\prime$. In practice, to implement our scheme experimentally, we have to pay attention to the errors and the efficiency of generating the photons in {\it W} states. The errors in the selected state are mainly caused by generation of three photon pairs at PDC and the dark counts of photon detectors. In PDC, the photon pair generation rate per pulse $\gamma$ is approximately $10^{-4}$ in typical multi-photon experiments \cite{zei99,zei00,Zeilinger01,Zeilinger02}. The three-pair generation rate $O(\gamma^3)$ is approximately $10^{-4}$ lower than two-pair generation rate $O(\gamma^2)$. The dark counts of current photon detectors is quite low for multi-photon coincidence measurement, so that these errors are negligible. (See also \cite{TYamamoto01} about this kind of errors.) To see whether the efficiency of generating three photons in the {\it W} state is acceptable, we compare the yield of the {\it W} state with that of GHZ states in \cite{zei99,zei00,Zeilinger01,Zeilinger02} where type-II PDC is also used for generating three photons. In the GHZ experiment, the probability of obtaining the photons in the GHZ state after generating two photon pairs is $3/8$. Compared with this probability, the yield of {\it W} states in our scheme is smaller by a factor $1/4$ . However, using stimulated PDC \cite{Bouwmeester01}, the four-photon generation rate can be 16 times higher than spontaneous PDC, which suggest that our proposal is experimentally feasible. We can also consider another setup (scheme II) as shown in Fig.~\ref{fig:setup2}. In this scheme, after compensations similar to scheme I expressed by \begin{eqnarray} \ket{1}_{{2\rm V}}\to e^{i(-\phi_{1}-\phi_{2}+\psi_{1}+\psi_{3})}\ket{1}_{{2\rm V}} \end{eqnarray} and \begin{eqnarray} \ket{1}_{{3\rm V}}\to e^{i(-\phi_{3}+\psi_{3})}\ket{1}_{{3\rm V}}, \end{eqnarray} we obtain the photons in the following state, \begin{eqnarray} \frac{1}{\sqrt{2}}(e^{i\phi_{1}}\ket{1}_{1{\rm V}}\ket{W_{{\rm V}}}_{233^\prime}+e^{i(\psi_{1}-\psi_{2}+\psi_{3})}\ket{1}_{1{\rm H}}\ket{W_{{\rm H}}}_{233^\prime}) \end{eqnarray} with the probability $(2\sqrt{6}r^2_{1}t^2_{1}r_{2}t_{2}r_{3}t_{3})^2$. The maximum probability of obtaining these photons in the state {\it W} is $3/32$, which is the same as scheme I, when we set $r_{1}^2=r_{2}^2=r_{3}^2=1/2$. This scheme has an advantage that the maximum probability can be obtained by using only symmetric beam splitters. \begin{figure}\label{fig:setup3} \end{figure} So far, we have assumed that the reflectivity and transmissivity of BS$_k$ are independent of polarization. If these depend on the polarization, the fidelity of the final state to the desired {\it W} state becomes lower. In this case, scheme I and scheme II show slightly different behavior. Here, we represent the polarization-dependent reflection and transmission coefficient of BS$_k$ as $r_{kL}$ and $t_{kL}$, respectively, which satisfy $r_{kL}^2+t_{kL}^2=1$ where $L={\rm H},{\rm V}$. We also introduce the error factor $\delta _{k{\rm L}}$ defined by $\delta _{k{\rm L}}=r_{kL}^2-(r^{opt}_{k})^2$ where $r^{opt}_{k}$ is the optimal reflectivity, namely $(r^{opt}_{1})^2=1/4$, $(r^{opt}_{2})^2=1/3$, and $(r^{opt}_{3})^2=1/2$ in scheme I and $(r^{opt}_{1})^2=(r^{opt}_{2})^2=(r^{opt}_{3})^2=1/2$ in scheme II. When $\delta _{k{\rm L}}$ are small, the fidelity ${\rm F_{I}}$ in scheme I and ${\rm F_{II}}$ in scheme II are given by \begin{eqnarray} {\rm F_{I}}&\approx &1-\frac{1}{24}(27\delta^2_{2}+16\delta^2_{3})+O(\delta _{k}^{3}) \end{eqnarray} and \begin{eqnarray} {\rm F_{II}}&\approx &1-\frac{2}{9}[(2\delta_{1}+\delta_{2})^2 +3\delta^2_{3}]+O(\delta _{k}^{3}). \end{eqnarray} where $\delta _{k}=\delta _{k{\rm H}}- \delta _{k{\rm V}}$. In scheme I, $\delta _{1}$ merely changes the amplitudes of $\ket{1}_{1{\rm V}}\ket{W_{{\rm V}}}_{233^\prime}$ and $\ket{1}_{1{\rm H}}\ket{W_{{\rm H}}}_{233^\prime}$ in Eq.~(\ref{eq:2}), so that this does not affect the fidelity unlike scheme II. The use of a single photon source (SPS), which is currently being developed \cite{YYamamoto01,YYamamoto02}, will improve the rate of generating the photons in {\it W} states. An ideal SPS emits a single photon in a single mode at a desired time. In this case, we can start from only three photons in the state $\ket{2}_{0{\rm H}}\ket{1}_{0{\rm V}}$ (or $\ket{2}_{0{\rm V}}\ket{1}_{0{\rm H}}$ ). To prepare this initial state, three SPSs and a symmetric beam splitter (BS$_1$) are arranged as shown in Fig~\ref{fig:setup3} (SPS1 and SPS2 emit a photon in mode H and SPS3 emits a photon in mode V). The state at the output ports of BS$_1$ is $(\ket{2}\ket{0}+\ket{0}\ket{2})/\sqrt{2}$, so that we obtain three photons in the state $\ket{2}_{0{\rm H}}\ket{1}_{0{\rm V}}$ with probability $1/2$ under the condition that each SPS has emitted a photon. After we transform these photons by BS$_2$ and BS$_3$, and in the case where there is one photon in each spatial mode, we can obtain the photons in the state $\ket{W_{{\rm V}}}_{233^\prime}$. The probability of obtaining the photons in the state $\ket{W_{{\rm V}}}_{233^\prime}$ after generating a photon from each SPS is $1/2(\sqrt{6}r_{2}t^2_{2}r_{3}t_{3})^2$ and the maximum of this probability is $3/32$, which is the same as above schemes, at $r_{2}^2=r_{3}^2=1/2$. The generation rate of one photon from SPS is approximately $0.4$ per pulse \cite{YYamamoto02} so that three-photon generation rate is about $0.064$ per pulse, which is significantly larger than $\sim 10^{-8}$ per pulse for PDC \cite{zei99}. Since SPS and PDC currently achieve almost the same repetition rate, using SPS improves the rate of preparing the state {\it W}. In our scheme, one can also prepare non-equally weighted states belonging to {\it W}-class. An example is the state used for the optimal universal quantum cloning machine via teleportation by three distant parties \cite{Bruss98}, \begin{eqnarray} & &\sqrt{\frac{2}{3}}\ket{1}_{2{\rm H}}\ket{1}_{3H}\ket{1}_{3^\prime {\rm V}} -\frac{1}{\sqrt{6}}\ket{1}_{2{\rm H}}\ket{1}_{3{\rm V}}\ket{1}_{3^\prime {\rm H}} \nonumber \\ & &-\frac{1}{\sqrt{6}}\ket{1}_{2{\rm V}}\ket{1}_{3{\rm H}}\ket{1}_{3^\prime {\rm H}}. \end{eqnarray} To prepare such states, one can generally include additional polarization dependent losses in mode $2$, $3$, and $3^\prime$ and adjust BPS$_k$ properly. In summary, we have proposed simple schemes for preparing the the photons in {\it W} states by using parametric down-conversion, linear optical elements, and photon detectors. The schemes are easy to implement and feasible by current technologies. Our schemes can be improved by using single photon sources to obtain a higher rate of generating the photons. We thank K. Nagata, J. Shimamura, and S. K. \"Ozdemir for helpful discussions. \end{document}
\mathcal{M}athbf{e}gin{document} \mathcal{M}aketitle \mathcal{M}athbf{e}gin{abstract} We establish the $L^p(\mathcal{M}athbb{R}^3)$ boundedness of the helical maximal function for the sharp range $p>3$. Our results improve the previous known bounds for $p>4$. The key ingredient is a new microlocal smoothing estimate for averages along dilates of the helix, which is established via a square function analysis. \end{abstract} \section{Introduction} \subsection{Main results} For $n\ge 2$ let $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ be a smooth curve, where $I \subset \mathcal{M}athbb{R}$ is a compact interval, and $\chi \in C^{\infty}(\mathcal{M}athbb{R})$ be a bump function supported on the interior of $I$. Given $t>0$, consider the averaging operator \mathcal{M}athbf{e}gin{equation*} A_tf(x) := \int_{\mathcal{M}athbb{R}} f(x - t\gammamma(s))\,\chi(s)\,\mathcal{M}athrm{d} s \end{equation*} and define the associated maximal function \mathcal{M}athbf{e}gin{equation*} M_\gammamma f(x):= \sup_{t>0} |A_t f(x)|. \end{equation*} We are interested in the $L^p$ mapping properties of $M_{\gammamma}$. It is well-known that the range of exponents $p$ for which $M_{\gammamma}$ is bounded on $L^p$ depends on the curvature of the underlying curve. Accordingly, we consider smooth curves $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ which are \textit{non-degenerate}, in the sense that there is a constant $c_0 > 0$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{eq:nondegenerate} |\det(\gammamma'(s), \cdots, \gammamma^{(n)}(s))| \geq c_0 \qquad \textrm{for all $s \in I$.} \end{equation} A celebrated theorem of Bourgain \cite{Bourgain1985, Bourgain1986} states that if $\gammamma \colon I \to \mathcal{M}athbb{R}^2$ is a smooth, non-degenerate plane curve, then $M_{\gammamma}$ is bounded on $L^p(\mathcal{M}athbb{R}^2)$ if and only if $p > 2$. Here we establish a 3-dimensional variant of this result. \mathcal{M}athbf{e}gin{theorem}\lambdabel{intro max thm} If $\gammamma: I \to \mathcal{M}athbb{R}^3$ is a smooth, non-degenerate space curve, then $M_{\gammamma}$ is bounded on $L^p(\mathcal{M}athbb{R}^3)$ if and only if $p > 3$. \end{theorem} In the $n=3$ case, the condition \eqref{eq:nondegenerate} is equivalent to the non-vanishing of the curvature and torsion functions. As a concrete example, Theorem~\ref{intro max thm} implies that the \textit{helical maximal operator} \mathcal{M}athbf{e}gin{equation*} M_{\mathcal{M}athrm{Helix}}f(x) := \sup_{t > 0} \begin{itemize}g|\int_0^{2 \pi} f(x_1 - t\cos \theta, x_2 - t\sigma} \def\Si{\Sigman \theta, x_3 - t\theta)\,\mathcal{M}athrm{d} \theta \begin{itemize}g| \end{equation*} is bounded on $L^p(\mathcal{M}athbb{R}^3)$ for all $p > 3$.\mathcal{M}edskip A simple Knapp-type example shows $L^p$ boundedness fails for $p \leq 3$ (see \S\ref{nec cond sec}). On the other hand, Pramanik and the fourth author~\cite{PS2007} proved that Wolff's decoupling inequality \cite{Wolff2000} for the light cone implies the boundedness of $M_\gammamma$ for a suitable range of $p$. The optimal range for Wolff's inequality was obtained by Bourgain and Demeter \cite{BD2015} and the combination of the results in \cite{PS2007} and \cite{BD2015} yields the $L^p$ boundedness of $M_{\gammamma}$ for the partial range $4 < p \leq \infty$. Thus, Theorem~\ref{intro max thm} closes the gap by establishing boundedness for the remaining exponents $3 < p \leq 4$.\mathcal{M}edskip To prove Theorem~\ref{intro max thm}, we follow the basic strategy introduced by Mockenhaupt, Sogge and the fourth author \cite{MSS1992} in the context of the classical circular maximal function in the plane. In particular, in \cite{MSS1992} the authors gave an alternative proof of Bourgain's maximal theorem, deriving it as a consequence of certain \textit{local smoothing} estimates for the wave propagator. In the case of maximal functions associated to space curves, Theorem~\ref{intro max thm} follows from a local smoothing estimate for a class of Fourier integral operators associated to the averages $A_t$. To give a simple statement of the key underlying inequality, set $\mathcal{M}athfrak{A}_{\gammamma}f(x,t) := \rho(t) \cdot A_tf(x)$ for some $\rho \in C^{\infty}_c(\mathcal{M}athbb{R})$ with $\mathcal{M}athrm{supp}\, \rho \subseteq [1,2]$. Our main theorem then reads as follows. \mathcal{M}athbf{e}gin{theorem}\lambdabel{intro LS thm} Suppose $\gammamma: I \to \mathcal{M}athbb{R}^3$ is a smooth, non-degenerate space curve and let $3 \leq p \leq 4$ and $\sigma} \def\Si{\Sigmagmama < \sigma} \def\Si{\Sigmagmama(p)$ where $\sigma} \def\Si{\Sigmagmama(p) := \tfrac{1}{5}\big(1 + \tfrac{2}{p}\big)$. Then $\mathcal{M}athfrak{A}_{\gammamma}$ maps $L^p(\mathcal{M}athbb{R}^3)$ boundedly into $L^p_{\sigma} \def\Si{\Sigmagmama}(\mathcal{M}athbb{R}^4)$. \end{theorem} Note that $\sigma} \def\Si{\Sigmagmama(p)>1/p$ for $p > 3$. Thus, by a well-known Sobolev embedding argument, Theorem~\ref{intro LS thm} implies Theorem~\ref{intro max thm}. For completeness, the details of this implication are presented in~\S\ref{LS vs max sec}. \subsection{Comparison with previous results} It follows from work of Pramanik and the fourth author \cite{PS2007} (combined with sharp decoupling estimates from \cite{BD2015}) that, for each fixed $t$, the single average $A_t$ maps $L^p(\mathcal{M}athbb{R}^3)$ boundedly into $L^p_{\alpha}(\mathcal{M}athbb{R}^3)$ for all $2 \leq p \leq \infty$ and $\alpha < \alpha(p)$, where{\mathfrak {o}}otnote{In \cite{PS2007} the $\alpha=\alpha(p)$ endpoint estimate is also shown to hold for $p > 4$.} \mathcal{M}athbf{e}gin{equation*} \alpha(p):=\mathcal{M}athbf{e}gin{cases} {\mathfrak {r}}ac{1}{3}({\mathfrak {r}}ac{1}{2}+{\mathfrak {r}}ac{1}{p}) \qquad &\text{if $2 \leq p \leq 4$}\\ {\mathfrak {r}}ac{1}{p} \qquad & \text{if $p \geq 4$} \end{cases}. \end{equation*} Theorem~\ref{intro LS thm} represents a gain of $\sigma} \def\Si{\Sigmagmama(p)-\alpha(p)-\varepsilon={\mathfrak {r}}ac{1}{15}({\mathfrak {r}}ac{1}{2} + {\mathfrak {r}}ac{1}{p})-\varepsilon$ derivatives when integrating locally in time in the range $3 \leq p \leq 4$. In this sense, Theorem~\ref{intro LS thm} is an example of \textit{local smoothing} (see, for instance, \cite{Sogge1991, MSS1992, GWZ2020, BHS2} for a discussion of the classical local smoothing phenomenon for the wave equation).\mathcal{M}edskip Theorem~\ref{intro LS thm} complements previous local smoothing estimates from \cite{PS2007}, which deal with the supercritical{\mathfrak {o}}otnote{Here we are referring to criticality for the \textit{single average} operator, so that $p=4$ correspond to the critical point where the behaviour of the $\alpha(p)$ exponent changes.} regime $p > 4$. In \cite[Theorem 1.4]{PS2007} it is shown that $\mathcal{M}athfrak{A}_\gammamma$ maps $L^p(\mathcal{M}athbb{R}^3)$ boundedly into $L^p_{\delta}(\mathcal{M}athbb{R}^4)$ for all $2 \leq p \leq \infty$ and $\delta < \delta(p)$, where \mathcal{M}athbf{e}gin{equation*} \delta(p):=\mathcal{M}athbf{e}gin{cases} {\mathfrak {r}}ac{1}{3}({\mathfrak {r}}ac{1}{2}+{\mathfrak {r}}ac{1}{p}) \qquad &\text{if $2 \leq p \leq 6$}\\ {\mathfrak {r}}ac{4}{3p} \qquad & \text{if $p \geq 6$} \end{cases}. \end{equation*} Note that this does \textbf{not} yield any local smoothing estimates in the subcritical regime $2 \leq p \leq 4$, where $\alpha(p)$ and $\delta(p)$ agree. Consequently, the local smoothing estimates in \cite{PS2007} only imply $L^p(\mathcal{M}athbb{R}^3)$-boundedness of $M_\gammamma$ for the restricted range $p>4$.\mathcal{M}edskip It is remarked that the (somewhat loosely) related problem of $L^p(\mathcal{M}athbb{R}^n) \to L^p(\mathcal{M}athbb{R}^{n+1})$ bounds for $\mathcal{M}athfrak{A}_{\gammamma}$ (as opposed to Sobolev bounds) was investigated in \cite{Hickman2016}. This question is significantly easier than establishing local smoothing estimates and, accordingly, in \cite{Hickman2016} an almost complete characterisation of the $L^p(\mathcal{M}athbb{R}^n) \to L^q(\mathcal{M}athbb{R}^{n+1})$ mapping properties is obtained in all dimensions. \subsection{Overview of the argument}\lambdabel{overview subsec} For $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ a smooth curve let $\mathcal{M}u$ denote the pushforward of the measure $\chi(s)\mathcal{M}athrm{d} s$ under $\gammamma$. Defining the dilates $\inn{\mathcal{M}u_t}{f} = \inn{\mathcal{M}u}{f(t\,\cdot\,)}$, it follows that the underlying averaging operators satisfy $A_tf = f \ast \mathcal{M}u_t$. Thus, in the frequency domain $A_t$ corresponds to multiplication against the Fourier transform \mathcal{M}athbf{e}gin{equation*} \widehat{\mathcal{M}u}_t(\xi) = \int_{\mathcal{M}athbb{R}} e^{-i t \inn{\gammamma(s)}{\xi}} \chi(s)\, \mathcal{M}athrm{d} s. \end{equation*} Since the main estimate in Theorem~\ref{intro LS thm} is an $L^p$-Sobolev bound, we are led to studying the decay properties of the above oscillatory integral for large $\xi$.\mathcal{M}edskip Suppose $\gammamma \colon I \to \mathcal{M}athbb{R}^3$ satisfies the non-degeneracy hypothesis \eqref{eq:nondegenerate}. This implies $\sum_{j=1}^3|\inn{\gammamma^{(j)}(s)}{\xi}| \gtrsim |\xi|$ for all $s \in I$ and all $\xi \in \widehat{\mathcal{M}athbb{R}}^3$ and, consequently, a simple van der Corput estimate yields \mathcal{M}athbf{e}gin{equation*} |\widehat{\mathcal{M}u}_t(\xi)| \lesssim_{\gammamma} (1+t|\xi|)^{-1/3}. \end{equation*} However, this slow decay rate only occurs on a small portion of the frequency domain, corresponding to a (neighbourhood of a) codimension 1 cone $\Gammamma \subseteq \widehat{\mathcal{M}athbb{R}}^3$ generated by the binormal vector $\mathcal{M}athbf{e}_3(s)$ to the curve $\gammamma$. In light of this, it is natural to dyadically decompose the frequency domain into conic regions according to the distance to $\Gammamma$.\mathcal{M}edskip The pieces of the decomposition which are supported far away from $\Gammamma$ satisfy improved decay estimates. In one extreme case, the non-degeneracy condition improves to $\sum_{j=1}^2|\inn{\gammamma^{(j)}(s)}{\xi}| \gtrsim |\xi|$ and the van der Corput estimate therefore becomes \mathcal{M}athbf{e}gin{equation*} |\widehat{\mathcal{M}u}_t(\xi)| \lesssim_{\gammamma} (1+t|\xi|)^{-1/2}. \end{equation*} In this situation, the operator behaves in many ways like the circular average in the plane, and can be estimated using a lifted version of the argument developed to study the 2 dimensional problem in \cite{MSS1992} and \cite{Wolff2000}. In particular, to prove the desired local smoothing estimate in this extreme case, we observe that the Fourier transform of $\mathcal{M}athfrak{A}_{\gammamma}$ in all $4$ variables $(x,t)$ is essentially supported in a neighbourhood of a codimension 1 cone $\widetilde{\Gammamma}_1 \subseteq \widehat{\mathcal{M}athbb{R}}^4$. This surface is analogous to the light cone in $\widehat{\mathcal{M}athbb{R}}^3$ which is central to the analysis of local smoothing for the circular averages in \cite{MSS1992, Wolff2000} and, more recently, \cite{GWZ2020}. Following an argument of Wolff~\cite{Wolff2000}, the operator is further decomposed according to plate regions on $\widetilde{\Gammamma}_1$ using a decoupling estimate. The individual pieces of this decomposition are then finally amenable to direct estimation.\mathcal{M}edskip The method described in the previous paragraph only directly applies very far from the binormal cone (and therefore far from the most singular parts of the operator). However, by using decoupling inequalities and rescaling, it can also be used to study pieces of the decomposition which lie closer to $\Gammamma$. The key observation is that the pieces of the decomposition which lie close to $\Gammamma$ can be decoupled into smaller pieces which, when rescaled, resemble the part of the decomposition far from $\Gammamma$. This, roughly speaking, is the approach used in \cite{PS2007} to obtain Theorem~\ref{intro max thm} in the restricted range $4 < p \leq \infty$.\mathcal{M}edskip In order to prove the full range of $L^p$-boundedness of Theorem~\ref{intro max thm} a more direct method is required to analyse the pieces of the decomposition which lie close to the binormal cone. For this part of the operator, the microlocal geometry no longer resembles that of the 2-dimensional problem and, consequently, the decoupling and rescaling argument used in \cite{PS2007} is inefficient.\mathcal{M}edskip Close to the binormal cone, we observe that the Fourier transform of $\mathcal{M}athfrak{A}_{\gammamma}$ in all $4$ variables $(x,t)$ is essentially supported in a neighbourhood of a codimension 2 cone $\widetilde{\Gammamma}_2 \subseteq \widehat{\mathcal{M}athbb{R}}^4$. This cone is a lower-dimensional submanifold of the cone $\widetilde{\Gammamma}_1$ we encountered earlier. Similarly to the previous case, the operator is further decomposed according to plate regions, now along $\widetilde{\Gammamma}_2$. However, in order to efficiently carry out this decomposition, here we use a square function rather than a decoupling inequality, in the spirit of \cite{MSS1992}. The required square function estimate is deduced using a $4$-linear restriction estimate from \cite{BBFL2018}. After applying the square function, a series of weighted $L^2$ inequalities can be brought to bear on the problem to obtain, together with various corresponding Nikodym-type maximal bounds, a favourable estimate for this part of the operator. This final step of the argument is itself somewhat involved and a discussion of the details is beyond the scope of this introduction.\mathcal{M}edskip The above discussion focuses on two extreme cases of the problem: \mathcal{M}athbf{e}gin{enumerate}[i)] \item Far from the binormal cone $\Gammamma$, where $\mathcal{M}athfrak{A}_{\gammamma}$ is $(x,t)$-Fourier localised to a codimension 1 cone $\widetilde{\Gammamma}_1$. \item Close to the binormal cone $\Gammamma$, where $\mathcal{M}athfrak{A}_{\gammamma}$ is $(x,t)$-Fourier localised to a codimension 2 cone $\widetilde{\Gammamma}_2$. \end{enumerate} For pieces of the decomposition which lie in the intermediate range, both cones $\widetilde{\Gammamma}_1$ and $\widetilde{\Gammamma}_2$ play a r\^ole in the analysis. This complicates matters somewhat, since it is necessary to carry out frequency decompositions simultaneously with respect to both geometries. \subsection*{Outline of the paper} This paper is structured as follows: \mathcal{M}athbf{e}gin{itemize} \item In \S\ref{LS vs max sec} we show how Theorem~\ref{intro LS thm} implies Theorem~\ref{intro max thm}. \item In \S\ref{sec:bandlimited} we reduce Theorem~\ref{intro LS thm} to its version for band-limited functions, which is Theorem~\ref{LS thm}. \item In \S\ref{curve sym sec} we introduce a class of model curves. \item In \S\ref{key ingredients sec} we state 3 key auxiliary results that feature in the proof of Theorem~\ref{LS thm}: a reverse square function estimate in $\mathcal{M}athbb{R}^{3+1}$, a forward square function estimate in $\mathcal{M}athbb{R}^3$ and a Nikodym maximal operator bound. \item In \S\S\ref{sec:slow decay cone}--\ref{J=3 sec} we present the proof of Theorem~\ref{LS thm}. \item In \S\ref{reverse SF sec} we present the proof of the reverse square function estimate in $\mathcal{M}athbb{R}^{3+1}$ (Theorem~\ref{Frenet reverse SF theorem}). \item In \S\ref{forward SF sec} we present the proof of the forward square function estimate in $\mathcal{M}athbb{R}^3$ (Proposition~\ref{f SF prop}). \item In \S\ref{Nikodym sec} we present the proof of the Nikodym maximal operator bound (Proposition~\ref{Nikodym prop}). \item In \S\ref{nec cond sec} we show the condition $p > 3$ is necessary for the boundedness of the global maximal function. \item Appendix~\ref{BG appendix} contains an abstract broad/narrow decomposition lemma which features in the proof of Theorem~\ref{Frenet reverse SF theorem}. \item There are two further appendices which deal with various auxiliary results and technical lemmas used in the main argument. \end{itemize} \subsection*{Notational conventions} Given a (possibly empty) list of objects $L$, for real numbers $A_p, B_p \geq 0$ depending on some Lebesgue exponent $p$ or dimension parameter $n$ the notation $A_p \lesssim_L B_p$, $A_p = O_L(B_p)$ or $B_p \gtrsim_L A_p$ signifies that $A_p \leq CB_p$ for some constant $C = C_{L,p,n} \geq 0$ depending on the objects in the list, $p$ and $n$. In addition, $A_p \sigma} \def\Si{\Sigmam_L B_p$ is used to signify that both $A_p \lesssim_L B_p$ and $A_p \gtrsim_L B_p$ hold. Given $a$, $b \in \mathcal{M}athbb{R}$ we write $a \wedge b:= \mathcal{M}in \{a, b\}$ and $a \vee b:=\mathcal{M}ax \{a,b\}$. The length of a multiindex $\alpha\in {\mathbb {N}}_0^n$ is given by $|\alpha|=\sum_{i=1}^n{\alpha_i}$. \section{Local smoothing vs maximal bounds}\lambdabel{LS vs max sec} For the readers' convenience, here we state and prove a general result relating local smoothing estimates for the operator $\mathcal{M}athfrak{A}_{\gammamma}f(x,t) := \rho(t) \, A_tf(x)$ to $L^p$ estimates for the corresponding maximal function $M_{\gammamma}$. \mathcal{M}athbf{e}gin{proposition}\lambdabel{LS vs max prop} Let $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ be a smooth curve and suppose $\mathcal{M}athfrak{A}_{\gammamma}$ maps $L^{p}(\mathcal{M}athbb{R}^n)$ boundedly into $L^{p}_{\sigma} \def\Si{\Sigmagmama}(\mathcal{M}athbb{R}^{n+1})$ for some $2 \leq p < \infty$ and $\sigma} \def\Si{\Sigmagmama > 1/p$. Then $M_{\gammamma}$ is bounded on $L^p(\mathcal{M}athbb{R}^n)$. \end{proposition} Observe that the exponent $\sigma} \def\Si{\Sigmagmama(p) := \tfrac{1}{5}(1 + \tfrac{2}{p})$ satisfies $\sigma} \def\Si{\Sigmagmama(p) > 1/p$ for all $p > 3$. Consequently, Theorem~\ref{intro LS thm} combines with Proposition~\ref{LS vs max prop} to yield Theorem~\ref{intro max thm} in the restricted range $3 < p \leq 4$. The remaining estimates follow from interpolation with the trivial $L^{\infty}$ bound.\mathcal{M}edskip Before presenting the proof we introduce a system of Littlewood--Paley functions which will feature throughout the article. Fix $\eta \in C^\infty_c(\mathcal{M}athbb{R})$ non-negative and such that \mathcal{M}athbf{e}gin{equation}\lambdabel{eta def} \eta(r) = 1 \quad \textrm{if $r \in [-1,1]$} \quad \textrm{and} \quad \mathcal{M}athrm{supp}\, \eta \subseteq [-2,2] \end{equation} and define $\mathcal{M}athbf{e}ta^k$, $\tilde{\mathcal{M}athbf{e}ta}^k \in C^\infty_c(\mathcal{M}athbb{R})$ by \mathcal{M}athbf{e}gin{equation}\lambdabel{beta def} \mathcal{M}athbf{e}ta^k(r):=\eta(2^{-k}r) - \eta(2^{-k+1}r) \qquad \textrm{and} \qquad \tilde{\mathcal{M}athbf{e}ta}^k(r):=\eta(2^{-k-1}r) - \eta(2^{-k+2}r) \end{equation} for each $k \in \mathcal{M}athbb{Z}$. By a slight abuse of notation we also let $\eta$, $\mathcal{M}athbf{e}ta^k$, $\tilde{\mathcal{M}athbf{e}ta}^k \in C^{\infty}_c(\widehat{\mathcal{M}athbb{R}}^n)$ denote the radial functions obtained by evaluating the corresponding univariate functions at $|\xi|$. Finally, if $k=0$, then we drop the superscript and simply write $\mathcal{M}athbf{e}ta := \mathcal{M}athbf{e}ta^0$ and $\tilde{\mathcal{M}athbf{e}ta} := \tilde{\mathcal{M}athbf{e}ta}^0$. Note that the $\mathcal{M}athbf{e}ta^k$ form a partition of unity of $\widehat{\mathcal{M}athbb{R}}^n$ subordinated to a family of dyadic annuli, and they satisfy the reproducing formula $\mathcal{M}athbf{e}ta^k = \tilde{\mathcal{M}athbf{e}ta}^k\cdot \mathcal{M}athbf{e}ta^k$. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{LS vs max prop}] Decompose the $t$ parameter into dyadic intervals \mathcal{M}athbf{e}gin{equation*} M_{\gammamma} f(x) = \sup_{\ell \in \mathcal{M}athbb{Z}} \sup_{1 \leq t \leq 2} |A_{2^{\ell}t}f(x)|. \end{equation*} Performing a Littlewood--Paley decomposition on each of the averaging operators, \mathcal{M}athbf{e}gin{equation*} M_{\gammamma} f(x) \leq \sum_{k = 1}^{\infty}\begin{itemize}g(\sum_{\ell \in \mathcal{M}athbb{Z}} \sup_{1 \leq t \leq 2} |A_{2^{\ell}t}\mathcal{M}athbf{e}ta_{k-\ell}(D)f(x)|^p \begin{itemize}g)^{1/p} + CM_{\mathcal{M}athrm{HL}}f(x) \end{equation*} where $M_{\mathcal{M}athrm{HL}}$ is the Hardy--Littlewood maximal function. Indeed, it is not difficult to verify that the pointwise estimate \mathcal{M}athbf{e}gin{equation*} \sup_{\ell \in \mathcal{M}athbb{Z}} \sup_{1 \leq t \leq 2} |A_{2^{\ell}t}\eta_{-\ell}(D)f(x)| \leq C M_{\mathcal{M}athrm{HL}}f(x); \end{equation*} for $1 \leq t \leq 2$ the function $A_{2^{\ell}t} \eta_{-\ell}(D)f(x)$ roughly corresponds to an average of $f$ over a ball of radius $2^{\ell}$ centred at $x$. Thus, by the Hardy--Littlewood maximal theorem and the triangle inequality it suffices to show that \mathcal{M}athbf{e}gin{equation}\lambdabel{LP vs max 1} \sum_{k = 1}^{\infty}\begin{itemize}g(\sum_{\ell \in \mathcal{M}athbb{Z}} \big\|\sup_{1 \leq t \leq 2} |A_{2^{\ell}t}\mathcal{M}athbf{e}ta_{k-\ell}(D)f| \big\|_{L^p(\mathcal{M}athbb{R}^n)}^p \begin{itemize}g)^{1/p} \lesssim_{\gammamma, p} \|f\|_{L^p(\mathcal{M}athbb{R}^n)}. \end{equation} By a simple scaling argument, one obtains the operator norm identity \mathcal{M}athbf{e}gin{equation*} \|\sup_{1 \leq t \leq 2} |A_{2^{\ell} t} \mathcal{M}athbf{e}ta_{k-\ell}(D)|\|_{L^p(\mathcal{M}athbb{R}^n) \to L^p(\mathcal{M}athbb{R}^n)} = \|\sup_{1 \leq t \leq 2} |A_{t} \mathcal{M}athbf{e}ta_{k}(D)|\|_{L^p(\mathcal{M}athbb{R}^n) \to L^p(\mathcal{M}athbb{R}^n)}. \end{equation*} Combining this with the hypothesised local smoothing estimate, it follows that \mathcal{M}athbf{e}gin{align}\lambdabel{LP vs max 2} \begin{itemize}g(\int_1^2\|A_{2^{\ell} t} \mathcal{M}athbf{e}ta_{k-\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}^p \,\mathcal{M}athrm{d} t\begin{itemize}g)^{1/p} &\lesssim_{\gammamma, p, \sigma} \def\Si{\Sigmagmama} 2^{-\sigma} \def\Si{\Sigmagmama k} \|\tilde{\mathcal{M}athbf{e}ta}_{k-\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}, \\ \lambdabel{LP vs max 3} \begin{itemize}g(\int_1^2\|{\mathfrak {r}}ac{\partial}{\partial t} A_{2^{\ell} t} \mathcal{M}athbf{e}ta_{k-\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}^p \,\mathcal{M}athrm{d} t\begin{itemize}g)^{1/p} &\lesssim_{\gammamma, p, \sigma} \def\Si{\Sigmagmama} 2^{-\sigma} \def\Si{\Sigmagmama k + k} \|\tilde{\mathcal{M}athbf{e}ta}_{k-\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}. \end{align} The second estimate follows by noting that the Fourier multiplier associated to $\partial_t A_{2^\ell t} \mathcal{M}athbf{e}ta_{k-\ell}(D)$ is essentially the same as the multiplier associated to $A_{2^\ell t} \mathcal{M}athbf{e}ta_{k-\ell}(D)$ but with an extra $|\xi|$ factor. We therefore pick up an additional $2^k$ owing to the estimate $ \||D|\tilde{\mathcal{M}athbf{e}ta}_k(D)f\|_{L^p(\mathcal{M}athbb{R}^n)} \lesssim 2^k \|\tilde{\mathcal{M}athbf{e}ta}_k(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}$. Combining \eqref{LP vs max 2} and \eqref{LP vs max 3} with the elementary Sobolev embedding \mathcal{M}athbf{e}gin{equation*} \sup_{1 \leq t \leq 2}|F(t)|^p \leq \int_1^2|F(s)|^p\,\mathcal{M}athrm{d} s + p \begin{itemize}g(\int_1^2|F'(s)|^p\,\mathcal{M}athrm{d} s\begin{itemize}g)^{1/p} \begin{itemize}g(\int_1^2|F(s)|^p\,\mathcal{M}athrm{d} s\begin{itemize}g)^{1/p'}, \end{equation*} it follows that \mathcal{M}athbf{e}gin{equation}\lambdabel{LP vs max 4} \|\sup_{1 \leq t \leq 2} |A_{2^{\ell} t} \mathcal{M}athbf{e}ta_{k-\ell}(D)f| \|_{L^p(\mathcal{M}athbb{R}^n)} \lesssim_{\gammamma, p, \sigma} \def\Si{\Sigmagmama} 2^{-k(\sigma} \def\Si{\Sigmagmama - 1/p)} \|\tilde{\mathcal{M}athbf{e}ta}_{k-\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}. \end{equation} Taking the $\ell^p$-norm of both sides of \eqref{LP vs max 4}, we may sum the resulting expression in $\ell$ using the elementary inequality \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g(\sum_{\ell \in \mathcal{M}athbb{Z}}\|\tilde{\mathcal{M}athbf{e}ta}_{\ell}(D)f\|_{L^p(\mathcal{M}athbb{R}^n)}^p \begin{itemize}g)^{1/p} \lesssim \|f\|_{L^p(\mathcal{M}athbb{R}^n)}, \end{equation*} valid for $p \geq 2$. On the other hand, under the crucial hypothesis $\sigma} \def\Si{\Sigmagmama > 1/p$, we have a geometric decay which allows us to sum in $k$. Thus, we deduce the desired estimate \eqref{LP vs max 1}. \end{proof} \section{Reduction to band-limited estimates}\lambdabel{sec:bandlimited} We now turn to the proof of Theorem~\ref{intro LS thm}, which occupies almost the entirety of the article. Since we are interested in $L^p(\mathcal{M}athbb{R}^3) \to L^p_{\sigma} \def\Si{\Sigmagmama}(\mathcal{M}athbb{R}^{3+1})$ estimates for $\sigma} \def\Si{\Sigmagmama$ belonging to an \textit{open} range, the problem is immediately reduced to studying $L^p(\mathcal{M}athbb{R}^3) \to L^p(\mathcal{M}athbb{R}^{3+1})$ bounds for band-limited pieces of the operator. In order to describe this reduction in more detail, it is useful to set up some notational conventions.\mathcal{M}edskip Given $m \in L^{\infty}(\widehat{\mathcal{M}athbb{R}}^n \times \mathcal{M}athbb{R})$, for each $t \in \mathcal{M}athbb{R}$ let $m(D;t)$ denote the associated multiplier operator \mathcal{M}athbf{e}gin{equation*} m(D;t)f (x) := {\mathfrak {r}}ac{1}{(2 \pi)^n} \int_{\widehat{\mathcal{M}athbb{R}}^n} e^{i \inn{x}{\xi}} m(\xi;t) \widehat{f}(\xi)\,\mathcal{M}athrm{d} \xi, \end{equation*} defined initially for functions $f$ belonging to a suitable \textit{a priori} class. With this notation, the averaging operator $A_t$ is given by $A_t = \widehat{\mathcal{M}u}_t(D)$ where $\mathcal{M}u_t$ is the measure introduced in \S\ref{overview subsec}.\mathcal{M}edskip The multipliers of interest are of the following form. Let $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ be a smooth curve and fix $\chi$, $\rho \in C^\infty_c(\mathcal{M}athbb{R})$ supported in the interior of $I$ and $[1/2,4]$, respectively. Given a symbol $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^n\setminus\{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R} )$, define \mathcal{M}athbf{e}gin{equation}\lambdabel{multiplier definition} m[a](\xi;t) := \int_{\mathcal{M}athbb{R}} e^{-i t \inn{\gammamma(s)}{\xi}} a(\xi;t; s)\chi(s) \rho(t)\,\mathcal{M}athrm{d} s. \end{equation} Taking $a$ in this definition to be identically 1, we recover the ($t$-localised) multiplier $\rho(t) \widehat{\mathcal{M}u}_t(\xi)$. In general, we perform surgery on $\widehat{\mathcal{M}u}_t$ by choosing $a$ so that $m[a]$ is localised to a particular region of the frequency space. \mathcal{M}edskip For $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^n \setminus \{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$ as above, we form a dyadic decomposition by writing \mathcal{M}athbf{e}gin{equation}\lambdabel{symbol dec} a = \sum_{k = 0}^{\infty} a_k \qquad \textrm{where} \qquad a_k(\xi; t; s) := \left\{ \mathcal{M}athbf{e}gin{array}{ll} a(\xi; t; s) \, \mathcal{M}athbf{e}ta^k(\xi) & \textrm{for $k \geq 1$} \\ a(\xi; t; s) \, \eta(\xi) & \textrm{for $k =0$} \end{array} \right. . \end{equation} Here $\eta$ and $\mathcal{M}athbf{e}ta^k$ are the functions introduced in \eqref{eta def} and \eqref{beta def}.\mathcal{M}edskip With the above definitions, our main result is as follows. \mathcal{M}athbf{e}gin{theorem}\lambdabel{LS thm} Let $\gammamma:I \to \mathcal{M}athbb{R}^3$ be a smooth curve and suppose $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^3\setminus \{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$ satisfies the symbol condition \mathcal{M}athbf{e}gin{equation*} |\partial_{\xi}^{\alpha}\partial_t^i \partial_s^j a(\xi;t;s)| \lesssim_{\alpha, i, j} |\xi|^{-|\alpha|} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^3$ and $i$, $j \in \mathcal{M}athbb N_0$} \end{equation*} and that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 condition} \sum_{j=1}^3|\inn{\gammamma^{(j)}(s)}{\xi}| \gtrsim |\xi| \qquad \text{ for all $(\xi;s) \in \mathcal{M}athrm{supp}_{\xi}\, a \times I$}. \end{equation} Let $3 \leq p \leq 4$, $\varepsilon>0$ and $k\ge 1$. If $a_k$ is defined as in \eqref{symbol dec}, then \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g(\int_1^2\|m[a_k](D;t)f\|_{L^p(\mathcal{M}athbb{R}^3)}^p\,\mathcal{M}athrm{d} t\begin{itemize}g)^{1/p} \lesssim_{\varepsilon, p} 2^{-{\mathfrak {r}}ac{k}{5}(1+{\mathfrak {r}}ac{2}{p}) + k \varepsilon}\|f\|_{L^p(\mathcal{M}athbb{R}^3)}. \end{equation*} \end{theorem} For $n=3$, the condition \eqref{J=3 condition} is equivalent to the non-degeneracy hypothesis \eqref{eq:nondegenerate}. Thus, Theorem~\ref{LS thm} immediately implies Theorem~\ref{intro LS thm} via the Littlewood--Paley characterisation of Sobolev spaces.\mathcal{M}edskip Under a stronger hypothesis on the phase function, a stronger local smoothing estimate holds, by a combination of the work of Pramanik and the fourth author \cite{PS2007} with the full decoupling theorem for the light cone by Bourgain and Demeter \cite{BD2015}. {\mathfrak {o}}otnote{The estimates in \cite{PS2007} are stated for $p >6$. The version of the result presented here for $2 \leq p \leq \infty$ follows via interpolation with trivial $L^2$-estimates.} \mathcal{M}athbf{e}gin{theorem}[{\it cf.} Theorem 4.1 in \cite{PS2007}]\lambdabel{PS LS J=2} Let $\gammamma:I \to \mathcal{M}athbb{R}^3$ be a smooth curve and suppose that $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^3\setminus \{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$ satisfies the symbol conditions \mathcal{M}athbf{e}gin{equation*} |\partial_{\xi}^{\alpha}\partial_t^i \partial_s^j a(\xi;t;s)| \lesssim_{\alpha, i, j} |\xi|^{-|\alpha|} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^3$ and $i$, $j \in \mathcal{M}athbb N_0$} \end{equation*} and that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 condition} |\inn{\gammamma'(s)}{\xi}| + |\inn{\gammamma''(s)}{\xi}| \gtrsim |\xi| \qquad \text{ for all $\,\, (\xi;s) \in \mathcal{M}athrm{supp}_{\xi}\, a \times I$}. \end{equation} Let $2 \leq p \leq 6$, $\varepsilon>0$ and $k \geq 1$. If $a_k$ is defined as in \eqref{symbol dec}, then \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g(\int_1^2\|m[a_k](D;t)f\|_{L^p(\mathcal{M}athbb{R}^3)}^p\,\mathcal{M}athrm{d} t\begin{itemize}g)^{1/p} \lesssim_{\varepsilon, p} 2^{-{\mathfrak {r}}ac{k}{2}({\mathfrak {r}}ac{1}{2} + {\mathfrak {r}}ac{1}{p}) + k \varepsilon}\|f\|_{L^p(\mathcal{M}athbb{R}^3)}. \end{equation*} \end{theorem} Owing to the strengthened hypothesis \eqref{J=2 condition}, Theorem~\ref{PS LS J=2} alone is insufficient for our purposes. Indeed, Theorem~\ref{PS LS J=2} only effectively deals with parts of the multiplier which are supported away from the main singularity. However, we still make use of Theorem~\ref{PS LS J=2} in the proof of Theorem~\ref{LS thm} to analyse the multiplier in this less singular region, in which it is effective. \section{Symmetries and model curves}\lambdabel{curve sym sec} A prototypical example of a smooth curve satisfying the non-degeneracy condition \eqref{eq:nondegenerate} is the \textit{moment curve} $\gammamma_{\circ} \colon \mathcal{M}athbb{R} \to \mathcal{M}athbb{R}^n$, given by \mathcal{M}athbf{e}gin{equation*} \gammamma_{\circ}(s) := \begin{itemize}g(s, {\mathfrak {r}}ac{s^2}{2}, \dots, {\mathfrak {r}}ac{s^n}{n!} \begin{itemize}g). \end{equation*} Indeed, in this case the determinant appearing in \eqref{eq:nondegenerate} is everywhere equal to 1. Moreover, at small scales, any non-degenerate curve can be thought of as a perturbation of an affine image of $\gammamma_{\circ}$. To see why this is so, fix a non-degenerate curve $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ and $\sigma} \def\Si{\Sigmagmama \in I$, $\lambdambda > 0$ such that $[\sigma} \def\Si{\Sigmagmama - \lambdambda, \sigma} \def\Si{\Sigmagmama+\lambdambda] \subseteq I$. Denote by $[\gammamma]_{\sigma} \def\Si{\Sigmagmama}$ the $n\times n$ matrix \mathcal{M}athbf{e}gin{equation*} [\gammamma]_{\sigma} \def\Si{\Sigmagmama}:= \mathcal{M}athbf{e}gin{bmatrix} \gammamma^{(1)}(\sigma} \def\Si{\Sigmagmama) & \cdots & \gammamma^{(n)}(\sigma} \def\Si{\Sigmagmama) \end{bmatrix}, \end{equation*} where the vectors $\gammamma^{(j)}(\sigma} \def\Si{\Sigmagmama)$ are understood to be \textit{column} vectors. Note that this is precisely the matrix appearing in the definition of the non-degeneracy condition \eqref{eq:nondegenerate} and is therefore invertible by our hypothesis. It is also convenient to let $[\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$ denote the $n \times n$ matrix \mathcal{M}athbf{e}gin{equation}\lambdabel{gamma transformation} [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} := [\gammamma]_{\sigma} \def\Si{\Sigmagmama} \cdot D_{\lambdambda}, \end{equation} where $D_{\lambdambda}:=\text{diag}(\lambdambda, \dots, \lambdambda^n)$, the diagonal matrix with eigenvalues $\lambdambda$, $\lambdambda^2, \dots, \lambdambda^n$. Consider the portion of the curve $\gammamma$ lying over the subinterval $[\sigma} \def\Si{\Sigmagmama-\lambdambda, \sigma} \def\Si{\Sigmagmama+\lambdambda]$. This is parametrised by the map $s \mathcal{M}apsto \gammamma(\sigma} \def\Si{\Sigmagmama + \lambdambda s)$ for $s \in [-1,1]$. The degree $n$ Taylor polynomial of $s \mathcal{M}apsto \gammamma(\sigma} \def\Si{\Sigmagmama + \lambdambda s)$ around $\sigma} \def\Si{\Sigmagmama$ is given by \mathcal{M}athbf{e}gin{equation}\lambdabel{Taylor} s \mathcal{M}apsto \gammamma(\sigma} \def\Si{\Sigmagmama) + [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \cdot \gammamma_{\circ}(s), \end{equation} which is indeed an affine image of $\gammamma_{\circ}$. Furthermore, by Taylor's theorem, the original curve $\gammamma$ agrees with the polynomial curve \eqref{Taylor} to high order at $\sigma} \def\Si{\Sigmagmama$. Inverting the affine transformation $x \mathcal{M}apsto \gammamma(\sigma} \def\Si{\Sigmagmama) + [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \cdot x$ from \eqref{Taylor}, we can map the portion of $\gammamma$ over $[\sigma} \def\Si{\Sigmagmama - \lambdambda, \sigma} \def\Si{\Sigmagmama + \lambdambda]$ to a small perturbation of the moment curve. \mathcal{M}athbf{e}gin{definition}\lambdabel{rescaled curve def} Let $\gammamma \in C^{n+1}(I;\mathcal{M}athbb{R}^{n})$ be a non-degenerate curve and $\sigma} \def\Si{\Sigmagmama \in I, \lambdambda>0$ be such that $[\sigma} \def\Si{\Sigmagmama-\lambdambda, \sigma} \def\Si{\Sigmagmama+ \lambdambda] \subseteq I$. The \textit{$(\sigma} \def\Si{\Sigmagmama,\lambdambda)$-rescaling of $\gammamma$} is the curve $\gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \in C^{n+1}([-1,1];\mathcal{M}athbb{R}^{n})$ given by \mathcal{M}athbf{e}gin{equation*} \gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda}(s) := [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}^{-1}\big( \gammamma(\sigma} \def\Si{\Sigmagmama+\lambdambda s) - \gammamma(\sigma} \def\Si{\Sigmagmama) \big). \end{equation*} \end{definition} It follows from the preceding discussion that \mathcal{M}athbf{e}gin{equation*} \gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda}(s) = \gammamma_{\circ}(s) + [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}^{-1} \mathcal{M}athcal{E}_{\gammamma,\sigma} \def\Si{\Sigmagmama,\lambdambda}(s) \end{equation*} where $\mathcal{M}athcal{E}_{\gammamma,\sigma} \def\Si{\Sigmagmama,\lambdambda}$ is the remainder term for the Taylor expansion \eqref{Taylor}. In particular, if $\gammamma$ satisfies the non-degeneracy condition \eqref{eq:nondegenerate} with constant $c_0$, then \mathcal{M}athbf{e}gin{equation*} \| \gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda} - \gammamma_{\circ} \|_{C^{n+1}([-1,1];\mathcal{M}athbb{R}^n)} \lesssim c_0^{-1} \lambdambda \, \| \gammamma \|_{C^{n+1}(I)}^n. \end{equation*} Thus, if $\lambdambda>0$ is chosen to be small enough, then the rescaled curve $\gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$ is a minor perturbation of the moment curve. In particular, given any $0 < \delta < 1$, we can choose $\lambdambda$ so as to ensure that $\gammamma_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$ belongs to the following class of \textit{model curves}. \mathcal{M}athbf{e}gin{definition} Given $n \geq 2$ and $0 < \delta < 1$, let $\mathcal{M}athfrak{G}_n(\delta)$ denote the class of all smooth curves $\gammamma \colon [-1, 1] \to \mathcal{M}athbb{R}^n$ that satisfy the following conditions: \mathcal{M}athbf{e}gin{enumerate}[i)] \item $\gammamma(0) = 0$ and $\gammamma^{(j)}(0) = \vec{e}_j$ for $1 \leq j \leq n$; \item $\|\gammamma - \gammamma_{\circ}\|_{C^{n+1}([-1,1])} \leq \delta$. \end{enumerate} Here $\vec{e}_j$ denotes the $j$th standard Euclidean basis vector and \mathcal{M}athbf{e}gin{equation*} \|\gammamma\|_{C^{n+1}(I)} := \mathcal{M}ax_{1 \leq j \leq n + 1} \sup_{s \in I} |\gammamma^{(j)}(s)| \qquad \textrm{for all $\gammamma \in C^{n+1}(I;\mathcal{M}athbb{R}^n)$.} \end{equation*} \end{definition} Given any $\gammamma \in \mathcal{M}athfrak{G}_n(\delta)$, condition ii) and the multilinearity of the determinant ensures that $\det[\gammamma]_s = \det[\gammamma_{\circ}]_s + O(\delta) = 1 + O(\delta)$. Thus, there exists a dimensional constant $c_n > 0$ such that if $0 < \delta < c_n$, then any curve $\gammamma \in \mathcal{M}athfrak{G}_n(\delta)$ is non-degenerate and, moreover, satisfies $\det [ \gammamma]_s \geq 1/2$. Henceforth, it is always assumed that any such parameter $\delta > 0$ satisfies this condition, which we express succinctly as $0 < \delta \ll 1$. \section{Key analytic ingredients in the proof}\lambdabel{key ingredients sec} There are three key ingredients in the proof of Theorem~\ref{LS thm}: a square function on $\mathcal{M}athbb{R}^4$, a square function on $\mathcal{M}athbb{R}^3$ and a Nikodym-type maximal operator mapping functions in $\mathcal{M}athbb{R}^4$ to functions in $\mathcal{M}athbb{R}^3$. These operators are formulated in terms of the geometry of the underlying curve $\gammamma \colon I \to \mathcal{M}athbb{R}^3$ and, in particular, are defined with respect to the Frenet frame on $\gammamma$.{\mathfrak {o}}otnote{More precisely, the square function on $\mathcal{M}athbb{R}^4$ is defined with respect to Frenet frame associated to a lift of $\gammamma$ to $\mathcal{M}athbb{R}^4$.} In this section each of the three key operators is introduced and the relevant norm bounds for these objects are stated in Theorem~\ref{Frenet reverse SF theorem}, Proposition~\ref{f SF prop} and Proposition~\ref{Nikodym prop} below. In \S\S\ref{LS rel G sec}-\ref{J=3 sec}, a careful decomposition of the multiplier $m[a_k]$ is carried out which facilitates application of these results in the proof of Theorem~\ref{LS thm}. We return to proofs of Theorem~\ref{Frenet reverse SF theorem}, Proposition~\ref{f SF prop} and Proposition~\ref{Nikodym prop} in \S\ref{reverse SF sec}, \S\ref{forward SF sec} and \S\ref{Nikodym sec}, respectively. \subsection{Frenet geometry} It is convenient to recall some elementary concepts from differential geometry which feature in our proof. Given a smooth non-denegenate curve $\gammamma:I \to \mathcal{M}athbb{R}^n$, the Frenet frame is the orthonormal basis resulting from applying the Gram--Schmidt process to the vectors \mathcal{M}athbf{e}gin{equation*} \{ \gammamma'(s), \dots, \gammamma^{(n)}(s)\}, \end{equation*} which are linearly independent in view of the condition \eqref{eq:nondegenerate}. Defining the functions{\mathfrak {o}}otnote{Note that the $\tilde{\kappappa}_j$ depend on the choice of parametrisation and only agree with the (geometric) curvature functions \mathcal{M}athbf{e}gin{equation*} \kappappa_j(s) := {\mathfrak {r}}ac{\lambdangle \mathcal{M}athbf{e}_j'(s), \mathcal{M}athbf{e}_{j+1}(s) \rangle}{|\gammamma'(s)|} \end{equation*} if $\gammamma$ is unit speed parametrised. Here we do not assume unit speed parametrisation.} \mathcal{M}athbf{e}gin{equation*} \tilde{\kappappa}_j(s) := \lambdangle \mathcal{M}athbf{e}_j'(s), \mathcal{M}athbf{e}_{j+1}(s) \rangle \qquad \text{for } j=1, \dots, n-1, \end{equation*} one has the classical Frenet formul\ae \mathcal{M}athbf{e}gin{align*}\mathcal{M}athbf{e}_1'(s)&= \tilde{\kappappa}_1(s) \mathcal{M}athbf{e}_2(s), \\ \mathcal{M}athbf{e}_i'(s)&= -\tilde{\kappappa}_{i-1}(s)\mathcal{M}athbf{e}_{i-1}(s) + \tilde{\kappappa}_{i}(s)\mathcal{M}athbf{e}_{i+1}(s),\,\,i=2,\dots, n-1, \\ \mathcal{M}athbf{e}_n'(s)&=-\tilde{\kappappa}_{n-1}(s)\mathcal{M}athbf{e}_{n-1}(s). \end{align*} Repeated application of these formul\ae\ shows that \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athbf{e}^{(k)}_{i}(s) \perp \mathcal{M}athbf{e}_{j}(s) \qquad \textrm{whenever} \qquad 0 \leq k < |i - j|. \end{equation*} Consequently, by Taylor's theorem \mathcal{M}athbf{e}gin{equation*} |\inn{\mathcal{M}athbf{e}_{i}(s_1)}{\mathcal{M}athbf{e}_{j}(s_2)}| \lesssim_{\gammamma} |s_1 - s_2|^{|i-j|} \qquad \textrm{for $1 \leq i,j \leq n$ and $s_1, s_2 \in I$.} \end{equation*} Furthermore, one may deduce from the definition of $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^n$ that \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet bound alt 1} |\inn{\gammamma^{(i)}(s_1)}{\mathcal{M}athbf{e}_{j}(s_2)}| \lesssim_{\gammamma} |s_1 - s_2|^{(j-i)\vee 0} \qquad \textrm{for $1 \leq i,j \leq n$ and $s_1, s_2 \in I$.} \end{equation} In this paper, much of the microlocal geometry of the averaging operators $A_t$ is expressed in terms of the Frenet frame. We further introduce the following definitions. \mathcal{M}athbf{e}gin{definition}\lambdabel{def Frenet box} Given $1 \leq d \leq n-1$ and $0 < r \leq 1$, for each $s \in I$ let $\pi_{d-1}(s;\,r)$ denote the set of all $\xi \in \widehat{\mathcal{M}athbb{R}}^n$ satisfying the following conditions: \mathcal{M}athbf{e}gin{subequations} \mathcal{M}athbf{e}gin{align} \lambdabel{neighbourhood 1} |\inn{\mathcal{M}athbf{e}_j(s)}{\xi}| &\leq r^{d+1-j} \qquad \textrm{for $1 \leq j \leq d$,} \\ \lambdabel{neighbourhood 2} 1/2\leq |\inn{\mathcal{M}athbf{e}_{d+1}(s)}{\xi}| &\leq 2 \\ \lambdabel{neighbourhood 3} |\inn{\mathcal{M}athbf{e}_j(s)}{\xi}| &\leq 1 \qquad \textrm{for $d+2 \leq j \leq n$.} \end{align} \end{subequations} Such sets $\pi_{d-1}(s;\,r)$ are referred to as $(d-1,r)$-\textit{Frenet boxes}. \end{definition} The relevance of the $d-1$ index is that the $\pi_{d-1}(s;r)$ correspond to plate regions defined with respect to a codimension $d-1$ cone. For $n=4$ and $d-1 = 2$, this geometric observation is discussed in detail in \S\ref{geo obs sec}. \mathcal{M}athbf{e}gin{definition} A collection $\mathcal{M}athcal{P}_{d-1}(r)$ of $(d-1,r)$-Frenet boxes is a \textit{Frenet box decomposition along $\gammamma$} if it consists of precisely the $(d-1,r)$-Frenet boxes $\pi_{d-1}(s;\,r)$ for $s$ varying over an $r$-separated subset of $I$. \end{definition} \subsection{Reverse square function estimates in \texorpdfstring{$\mathcal{M}athbb{R}^{3+1}$}{}} The most important ingredient in the proof of Theorem~\ref{LS thm} is the following square function bound. \mathcal{M}athbf{e}gin{theorem}\lambdabel{Frenet reverse SF theorem} Let $0 < r < 1$ and $\mathcal{M}athcal{P}_2(r)$ be a $(2,r)$-Frenet box decomposition along a non-degenerate $\gammamma \colon I \to \mathcal{M}athbb{R}^4$. For all $\varepsilon > 0$ the inequality \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\|\sum_{\pi \in \mathcal{M}athcal{P}_2(r)} f_{\pi} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\gammamma,\varepsilon} r^{ -\varepsilon} \begin{itemize}g\|\big(\sum_{\pi \in \mathcal{M}athcal{P}_2(r)}|f_{\pi}|^2 \begin{itemize}g)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \end{equation*} holds for any tuple of functions $(f_{\pi})_{\pi \in \mathcal{M}athcal{P}_2(r)}$ satisfying $\mathcal{M}athrm{supp}\, \widehat{f}_{\pi} \subseteq \pi$. \end{theorem} This bound pertains to curves in $\mathcal{M}athbb{R}^4$ rather than $\mathcal{M}athbb{R}^3$ and therefore does not directly apply to the curve $\gammamma \colon I \to \mathcal{M}athbb{R}^3$ featured in the definition of our original helical maximal operator. Rather, in \S\ref{spatio temp subsec} we apply Theorem~\ref{Frenet reverse SF theorem} to a certain lift of the original curve $\gammamma$ into the spatio-temporal domain $\mathcal{M}athbb{R}^{3+1}$. This is somewhat analogous to the situation in \cite{MSS1992} where a square function estimate in $\mathcal{M}athbb{R}^{2+1}$ is used to study the circular maximal function in $\mathcal{M}athbb{R}^2$.\mathcal{M}edskip Theorem~\ref{Frenet reverse SF theorem} is related to the Lee--Vargas~\cite{LV2012} estimate for the Mockenhaupt square function in $\mathcal{M}athbb{R}^3$. In particular, the Mockenhaupt square function corresponds to studying functions frequency localised with repect to a $(1,r)$-Frenet box decomposition in $\mathcal{M}athbb{R}^3$. Moreover, the strategy used to prove Theorem ~\ref{reverse sf thm} mirrors that of \cite{LV2012}. We first obtain a $4$-linear variant of Theorem~\ref{Frenet reverse SF theorem} via the multilinear Fourier restriction estimates of Bennett--Bez--Flock--Lee \cite{BBFL2018}. The linear result is then deduced from the 4-linear inequality using a variant of the Bourgain--Guth method \cite{Bourgain2011}. The details of the argument are provided in \S\ref{reverse SF sec}. \subsection{Forward square function estimates in \texorpdfstring{$\mathcal{M}athbb{R}^3$}{}} We also make use of a (forward) $L^2$-weighted square function estimate in $\mathcal{M}athbb{R}^3$. Here the square function estimate is defined in relation to a $(0,r)$-Frenet decomposition. In contrast with Theorem~\ref{Frenet reverse SF theorem}, we work with an operator-theoretic formulation involving certain projection operators. As before, let $\eta \in C^\infty_c(\mathcal{M}athbb{R})$ be non-negative and such that $\eta(r) = 1$ if $r \in [-1,1]$ and $\mathcal{M}athrm{supp}\, \eta \subseteq [-2,2]$ and define $\tilde{\mathcal{M}athbf{e}ta} := \eta(2^{-1}\,\cdot\,) - \eta(4\,\cdot\,)$. Give an $(0,r)$-Frenet box $\pi = \pi_{0,\gammamma}(s;r)$ let \mathcal{M}athbf{e}gin{equation}\lambdabel{chi pi} \chi_{\pi}(\xi) := \eta\big(r^{-1}\,\inn{\mathcal{M}athbf{e}_1(s)}{\xi}\big) \, \tilde{\mathcal{M}athbf{e}ta}\big(\inn{\mathcal{M}athbf{e}_2(s)}{\xi}\big) \, \eta \big(\inn{\mathcal{M}athbf{e}_3(s)}{\xi}\big) \end{equation} so that $\chi_{\pi}(\xi) = 1$ if $\xi \in \pi_{0,\gammamma}(s;r)$ and $\chi_{\pi}$ vanishes outside some fixed dilate of this set. \mathcal{M}athbf{e}gin{proposition}\lambdabel{f SF prop} Let $0 < r < 1$ and $\mathcal{M}athcal{P}_0(r)$ be a $(0,r)$-Frenet box decomposition for a non-degenerate $\gammamma \colon I \to \mathcal{M}athbb{R}^3$. For all $\varepsilon > 0$ the inequality \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\pi \in \mathcal{M}athcal{P}_0(r)} |\chi_{\pi}(D)f(x)|^2 w(x)\,\mathcal{M}athrm{d} x \lesssim_{\varepsilon} r^{-\varepsilon} \int_{\mathcal{M}athbb{R}^3} |f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\gammamma, r}^{\, (\varepsilon)} w(x)\,\mathcal{M}athrm{d} x \end{equation*} holds for any non-negative $w \in L^1_{\mathcal{M}athrm{loc}}(\mathcal{M}athbb{R}^3)$, where $\widetilde{\mathcal{M}athcal{N}}_{\,\gammamma, r}^{\,(\varepsilon)}$ is a maximal operator satisfying \mathcal{M}athbf{e}gin{equation}\lambdabel{f SF eq} \|\widetilde{\mathcal{M}athcal{N}}_{\,\gammamma, r}^{\, (\varepsilon)}\|_{L^2(\mathcal{M}athbb{R}^3) \to L^2(\mathcal{M}athbb{R}^3)} \lesssim_{\varepsilon, \varepsilon_{\circ}} r^{-\varepsilon_{\circ}} \qquad \textrm{for all $\varepsilon_{\circ} > 0$.} \end{equation} \end{proposition} The above proposition is related to a $L^2$-weighted version of the classical sectorial square function of C\'ordoba \cite{Cordoba1982}, due to Carbery and the fourth author \cite[Proposition 4.6]{CS1995}. The proof is presented in \S~\ref{forward SF sec} below.\mathcal{M}edskip The definition of $\widetilde{\mathcal{M}athcal{N}}_{\,\gammamma, r}^{\, (\varepsilon)}$ is rather complicated, involving a repeated composition of Nikodym-type maximal operators at different scales. For this reason, we do not provide an explicit description of the operator here. Further details of the definition and basic properties of this operator are provided in \S~\ref{forward SF sec}. \mathcal{M}edskip \subsection{A singular Nikodym-type maximal function}\lambdabel{Nikodym lem subsec} The bounds on the spatio-temporal frequency localised pieces of our operator $m[a](D;\cdot)$ are reduced to bounding a Nikodym maximal function mapping functions in $\mathcal{M}athbb{R}^4$ to functions in $\mathcal{M}athbb{R}^3$. Given $\mathcal{M}athbf{r} \in (0,1)^3$ and $s \in [-1,1]$, consider the \textit{plates} \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s) := \big\{ (y,t) \in \mathcal{M}athbb{R}^3 \times [1,2] : \big|\inn{y - t\gammamma(s)}{\mathcal{M}athbf{e}_j(s)}\big| \leq r_j \, \textrm{ for $j=1,2,3$} \big\} \subset \mathcal{M}athbb{R}^4. \end{equation*} Using these sets, we define associated averaging and maximal operators \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g(x; s) := \fint_{\mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s)} g(x-y, t) \,\mathcal{M}athrm{d} y \mathcal{M}athrm{d} t \quad \textrm{and} \quad \mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g(x) := \sup_{-1 \leq s \leq 1} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g(x; s)|. \end{equation*} Note that $\mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}}$ takes as its input some $g \in L^1_{\mathcal{M}athrm{loc}}(\mathcal{M}athbb{R}^4)$ and outputs a measurable function on $\mathcal{M}athbb{R}^3$. In particular, there is a discrepancy between the number of input and the number of output variables of the operator. \mathcal{M}athbf{e}gin{proposition}\lambdabel{Nikodym prop} If $\mathcal{M}athbf{r} \in (0,1)^3$ satisfies $r_3 \leq r_2 \leq r_1 \leq r_2^{1/2}$ and $r_2 \leq r_{1}^{1/2} r_3^{1/2}$, then \mathcal{M}athbf{e}gin{equation*} \|\mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g\|_{L^2(\mathcal{M}athbb{R}^3)} \lesssim |\log r_3|^3 \|g\|_{L^2(\mathcal{M}athbb{R}^4)}. \end{equation*} \end{proposition} This result can be thought of as a higher dimensional analogue of a Nikodym maximal estimate from \cite{MSS1992}, which is used to study the circular maximal function in the plane. Note that the parameter triple $\mathcal{M}athbf{r} = (r, r, r)$ for some $0 < r < 1$ satisfies the hypothesis of Proposition~\ref{Nikodym prop}, corresponding to the case of tubes former around the rays $t \mathcal{M}apsto t\gammamma(s)$. More relevant to our study, however, is the highly anisotropic situation where $\mathcal{M}athbf{r} = (r, r^2, r^3)$; note that this case is also covered by the proposition. It is remarked that the situation here is somewhat different to that appearing in Proposition~\ref{f SF prop} (which will be defined in \S\ref{forward SF sec}), owing to the aforementioned disparity between the number of input and output variables. The proof of Proposition~\ref{Nikodym prop}, which is based on an oscillatory integral argument, is presented in \S\ref{Nikodym sec} below. \section{Proof of Theorem~\ref{LS thm}: the slow decay cone}\lambdabel{sec:slow decay cone} Throughout the remainder of the paper, we work with some fixed $0 < \delta_0 \ll 1$, chosen to satisfy the forthcoming requirements of the proofs. For the sake of concreteness, the choice of $\delta_0 := 10^{-10}$ is more than enough for our purposes. It suffices to prove Theorem~\ref{LS thm} in the special case where $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$ and $\mathcal{M}athrm{supp}\, \chi \subseteq I_0 := [-\delta_0,\delta_0]$. Indeed, using the observations of \S\ref{curve sym sec}, we may decompose and rescale the operator $m[a_k](D;\,\cdot\,)$ to reduce to this situation. Suppose $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$ and $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^3\setminus \{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$ satisfies the hypotheses Theorem~\ref{LS thm}. In view of Theorem~\ref{PS LS J=2}, we may further assume that \mathcal{M}athbf{e}gin{equation}\lambdabel{4 derivative bound_old} \left\{\mathcal{M}athbf{e}gin{array}{ll} |\inn{\gammamma^{(3)}(s)}{\xi}| \ge {\mathfrak {r}}ac{9}{10}\, |\xi| \\[5pt] |\inn{\gammamma^{(j)}(s)}{\xi}| \le 8\delta_0 |\xi| & \textrm{for $j = 1,2$} \end{array} \right. \qquad \textrm{for all $(\xi;t; s)\in \mathcal{M}athrm{supp}\, a$.} \end{equation} We note two further consequences of this technical reduction: \mathcal{M}athbf{e}gin{itemize} \item Since $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$, we have $\gammamma^{(j)}(0)=\vec{e}_j$ for $1 \leq j \leq 3$ and so \eqref{4 derivative bound_old} immediately implies that \mathcal{M}athbf{e}gin{equation*} |\xi_3|\ge \tfrac{9}{10} \, |\xi| \quad \textrm{and} \quad |\xi_j|\le 8 \delta_0 |\xi| \quad \textrm{for $j=1, 2$,} \qquad \textrm{for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$.} \end{equation*} \item Since $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$, we have $\|\gammamma^{(4)}\|_{\infty} \leq \delta_0$. Thus, provided $\delta_0$ is sufficiently small, \mathcal{M}athbf{e}gin{equation}\lambdabel{4 derivative bound} |\inn{\gammamma^{(3)}(s)}{\xi}| \geq \tfrac{1}{2} \, |\xi| \qquad \textrm{for all $(\xi; s) \in \mathcal{M}athrm{supp}_{\xi}\, a \times [-1,1]$}. \end{equation} Observe that this inequality holds on the large interval $[-1,1]$, rather than just $I_0$. \end{itemize} Henceforth, we also assume that $\xi_3>0$ for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$. In particular, \mathcal{M}athbf{e}gin{equation}\lambdabel{convex} \inn{\gammamma^{(3)}(s)}{\xi} > 0 \qquad \textrm{for all $(\xi; s ) \in \mathcal{M}athrm{supp}_{\xi}\, a \times [-1,1]$} \end{equation} and thus, for each $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$, the function $s \mathcal{M}apsto \inn{\gammamma'(s)}{\xi}$ is strictly convex on $[-1,1]$. The analysis for the portion of the symbol supported on the set $\{\xi_3<0\}$ follows by symmetry.\mathcal{M}edskip The first step is to isolate regions of the frequency space where the multiplier $m[a]$ decays relatively slowly. Owing to stationary phase considerations, this corresponds to a region around the conic variety \mathcal{M}athbf{e}gin{equation*} \Gammamma :=\{\xi \in \mathcal{M}athrm{supp}_{\xi}\, a: \inn{\gammamma^{(j)}(s)}{\xi}=0, \,\, 1 \leq j \leq 2, \text{ for some } s\in I_0 \}. \end{equation*} To analyse this cone, we begin with the following observation. \mathcal{M}athbf{e}gin{lemma}\lambdabel{theta2 lem} If $\xi\in \mathcal{M}athrm{supp}_{\xi}\, a$, then the equation $\inn{\gammamma''(s)}{\xi}= 0$ has a unique solution in $s \in [-1,1]$, which corresponds to the unique global minimum of the function $s \mathcal{M}apsto \inn{\gammamma'(s)}{\xi}$. Furthermore, the solution has absolute value $O(\delta_0)$. \end{lemma} \mathcal{M}athbf{e}gin{proof} Given $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$, let \mathcal{M}athbf{e}gin{equation}\lambdabel{theta2 lem 1} \phi \colon [-1,1] \to \mathcal{M}athbb{R}, \quad \phi \colon s \mathcal{M}apsto \inn{\gammamma'(s)}{\xi}. \end{equation} By \eqref{convex}, $\phi''(s)>0$ for all $s \in [-1,1]$ and the equation $\phi'(s)=\inn{\gammamma^{(2)}(s)}{\xi}=0$ has at most one solution on that interval. On the other hand, by the mean value theorem, \mathcal{M}athbf{e}gin{equation*} \phi'(s)=\inn{\gammamma^{(2)}(s)}{\xi}= \xi_2 + \omega} \def\Om{\Omegaega(\xi;s) \, s, \end{equation*} where $\omega} \def\Om{\Omegaega$ satisfies $|\omega} \def\Om{\Omegaega(\xi;s)|\geq \tfrac{1}{2}|\xi| >0$. As $|\xi_2| \leq 8 \delta_0 |\xi|$, it follows that $|\omega} \def\Om{\Omegaega(\xi;s)| |s| > |\xi_2|$ if $|s| > 16 \delta_0$, and so the equation $\inn{\gammamma^{(2)}(s)}{\xi}=0$ has a unique solution in the interval $[-16\delta_0,16\delta_0]$. Moreover, it immediately follows from \eqref{convex} that this solution is the unique global minimum of $\phi$ on $[-1, 1]$. \end{proof} Using Lemma~\ref{theta2 lem}, we construct a smooth mapping $\theta_2 \colon \mathcal{M}athrm{supp}_{\xi}\, a \to [-1,1]$ such that \mathcal{M}athbf{e}gin{equation*} \inn{\gammamma'' \circ \theta_2(\xi)}{\xi} = 0 \qquad \textrm{for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$.} \end{equation*} It is easy to see that $\theta_2$ is homogeneous of degree 0. This function can be used to construct a natural Whitney decomposition with respect to the cone $\Gammamma$ defined above. In particular, let \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 u function} u(\xi) := \inn{\gammamma' \circ \theta_2(\xi)}{\xi} \qquad \textrm{for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$.} \end{equation} This quantity plays a central r\^ole in our analysis. If $u(\xi)=0$, then $\xi \in \Gammamma$ and so, roughly speaking, $u(\xi)$ measures the distance of $\xi$ from $\Gammamma$. \mathcal{M}athbf{e}gin{lemma}\lambdabel{theta1 lem} Let $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$ and consider the equation \mathcal{M}athbf{e}gin{equation}\lambdabel{0404e3.29} \inn{\gammamma'(s)}{\xi}=0. \end{equation} \mathcal{M}athbf{e}gin{enumerate}[i)] \item If $u(\xi)>0$, then the equation \eqref{0404e3.29} has no solution on $[-1, 1]$. \item If $u(\xi)=0$, then the equation \eqref{0404e3.29} has only the solution $s=\theta_2(\xi)$ on $[-1,1]$. \item If $u(\xi)<0$, then the equation \eqref{0404e3.29} has precisely two solutions on $[-1,1]$. Both solutions have absolute value $O(\delta_0^{1/2})$. \end{enumerate} \end{lemma} \mathcal{M}athbf{e}gin{proof} Given $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$, define $\phi$ as in \eqref{theta2 lem 1}.\mathcal{M}edskip \noindentndent i) In this case, Lemma~\ref{theta2 lem} implies that \mathcal{M}athbf{e}gin{equation*} \phi(s) = \inn{\gammamma'(s)}{\xi} \geq u (\xi) >0 \quad \text{ for all $s \in [-1,1]$,} \end{equation*} and so \eqref{0404e3.29} has no solutions.\mathcal{M}edskip \noindentndent ii) This case also follows immediately from Lemma~\ref{theta2 lem}, since $s=\theta_2(\xi)$ is the only global minimum for $\phi$ on $[-1,1]$. \mathcal{M}edskip \noindentndent iii) Recall, by \eqref{convex}, the function $\phi$ is strictly convex on $[-1,1]$, and therefore $\phi(s)=0$ has at most two solutions on that interval. On the other hand, by (the proof of) Lemma~\ref{theta2 lem} we know that $|\theta_2(\xi)| \leq 16 \delta_0$. Moreover, the mean value theorem implies \mathcal{M}athbf{e}gin{equation}\lambdabel{theta1 lem 1} |u(\xi)| \leq |\xi_1| + \sup_{|s| \leq 16 \delta_0}|\gammamma^{(2)}(s)||\xi| |\theta_2(\xi)| \leq 8 \begin{itemize}g(1+ 2\sup_{|s| \leq 16 \delta_0}| \gammamma^{(2)}(s) |\begin{itemize}g) \delta_0 |\xi| \leq 40 \delta_0 |\xi|, \end{equation} since $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$. By Taylor expansion of $\phi$ around $\theta_2(\xi)$, one obtains \mathcal{M}athbf{e}gin{equation}\lambdabel{theta1 lem 2} \phi(s) = u(\xi) + \omega} \def\Om{\Omegaega(\xi;s) \, (s-\theta_2(\xi))^2, \end{equation} where $\omega} \def\Om{\Omegaega$ arises from the remainder term and satisfies $\omega} \def\Om{\Omegaega(\xi;s) \geq \tfrac{1}{4} \, |\xi|$. Combining \eqref{theta1 lem 1} and \eqref{theta1 lem 2}, it follows that if $|s-\theta_2(\xi)|\geq 20\delta_0^{1/2}$, then $\phi(s)>0$. Recall that $\phi \circ \theta_2(\xi)=u(\xi)<0$. Consequently, the equation $\phi(s) =0$ has exactly two solutions on the interval \mathcal{M}athbf{e}gin{equation*} [-16\delta_0, 16\delta_0] + [-20\delta_0^{1/2}, 20\delta_0^{1/2}] \subseteq [-36 \delta_0^{1/2}, 36 \delta_0^{1/2}], \end{equation*} as required. \end{proof} Using Lemma~\ref{theta1 lem}, we construct a (unique) pair of smooth mappings \mathcal{M}athbf{e}gin{equation*} \theta_1^{\pm} \colon \{ \xi \in \mathcal{M}athrm{supp}_{\xi}\, a : u(\xi) <0 \} \to [-1, 1] \end{equation*} with $\theta_1^-(\xi)\le \theta_1^+(\xi)$ which satisfies \mathcal{M}athbf{e}gin{equation*} \inn{\gammamma' \circ \theta_1^{\pm}(\xi)}{\xi}= 0 \quad \text{ for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a$ with $u(\xi) <0$.} \end{equation*} Define the functions \mathcal{M}athbf{e}gin{equation*} v^{\pm}(\xi):=\inn{\gammamma'' \circ \theta_1^\pm(\xi)}{\xi} \qquad \text{ for all $\xi \in \mathcal{M}athrm{supp}\, a$ with $u(\xi) <0$.} \end{equation*} \mathcal{M}athbf{e}gin{lemma}\lambdabel{root control lem} Let $\xi \in \mathcal{M}athrm{supp}\, a$ with $u(\xi)<0$. Then the following hold: \mathcal{M}athbf{e}gin{equation*} \big|v^{\pm}\big(\xip\big)\big| \sigma} \def\Si{\Sigmam |\theta_1^{\pm}(\xi) - \theta_2(\xi)| \sigma} \def\Si{\Sigmam |\theta_1^+(\xi)-\theta_1^-(\xi)|\sigma} \def\Si{\Sigmam \big|u\big(\xip\big)\big|^{1/2}. \end{equation*} \end{lemma} \mathcal{M}athbf{e}gin{proof} By Taylor expansion around $\theta_2(\xi)$, we obtain \mathcal{M}athbf{e}gin{align*} v^{\pm}(\xi)& = \omega} \def\Om{\Omegaega_{1}^{\pm}(\xi) \, (\theta_1^{\pm}(\xi)-\theta_2(\xi)),\\ 0&=\inn{\gammamma' \circ \theta_1^{\pm}(\xi)}{\xi} = u(\xi) + \omega} \def\Om{\Omegaega_2(\xi) \, (\theta_1^{\pm}(\xi)-\theta_2(\xi))^2 \end{align*} where $|\omega} \def\Om{\Omegaega_1^{\pm}(\xi)| \sigma} \def\Si{\Sigmam |\omega} \def\Om{\Omegaega_{2}(\xi)|\sigma} \def\Si{\Sigmam |\xi|$ by \eqref{4 derivative bound}. Similarly, Taylor expansion around $\theta_1^{\pm}(\xi)$ yields \mathcal{M}athbf{e}gin{equation*} 0 = \inn{\gammamma' \circ \theta_1^{+}(\xi)}{\xi} = v^-(\xi)\, (\theta_1^{+}(\xi) - \theta_1^{-}(\xi)) + \omega} \def\Om{\Omegaega_3 (\xi) \, (\theta_1^{+}(\xi) - \theta_1^{-}(\xi))^2 \end{equation*} where again the remainder satisfies $|\omega} \def\Om{\Omegaega_3(\xi)| \sigma} \def\Si{\Sigmam |\xi|$. As $\theta_1^+(\xi) \neq \theta_1^-(\xi)$, we can combine the identities above to obtain the desired bounds. \end{proof} \section{Proof of Theorem~\ref{LS thm}: Local smoothing relative to \texorpdfstring{$\Gammamma$}{}}\lambdabel{LS rel G sec} For $k \geq 1$, consider the frequency localised symbols $a_k:=a \, \mathcal{M}athbf{e}ta^k$, as introduced in \S\ref{sec:bandlimited}. We decompose each $a_k$ with respect to the size of $|u(\xi)|$. In particular, write{\mathfrak {o}}otnote{Here $\mathcal{M}athbf{e}ta$ function should be defined slightly differently compared with \eqref{beta def} and, in particular, here $\mathcal{M}athbf{e}ta(r) := \eta(2^{-2}r) - \eta(r)$. Such minor changes are ignored in the notation.} \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 akell def} a_k = \sum_{\ell = 0}^{{\mathfrak {l}}oor{k/3}} a_{k,\ell} \qquad \textrm{where} \qquad a_{k,\ell}(\xi; t; s) := \left\{\mathcal{M}athbf{e}gin{array}{ll} \displaystyle a_k(\xi; t; s) \, \mathcal{M}athbf{e}ta\big(2^{-k+ 2\ell}u(\xi)\big) \quad & \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/3}$} \\[8pt] \displaystyle a_{k}(\xi; t; s)\, \eta\big(2^{-k + 2{\mathfrak {l}}oor{k/3}}u(\xi)\big) & \textrm{if $\ell = {\mathfrak {l}}oor{k/3}$} \end{array}\right. . \end{equation} Here ${\mathfrak {l}}oor{k/3}$ denotes the greatest integer less than or equal to $k/3$.\mathcal{M}edskip To prove Theorem~\ref{LS thm}, we establish local smoothing estimates for each of the operators $m[a_{k,\ell}](D;\,\cdot\,)$. The main result is as follows. \mathcal{M}athbf{e}gin{proposition}\lambdabel{J=3 LS prop} Let $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$. For all $2 \leq p \leq 4$ and $\varepsilon > 0$, \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D; \,\cdot\,) f\|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} 2^{-k/p - \ell(1 - 3/p)} 2^{ \varepsilon k}\| f \|_{L^p(\mathcal{M}athbb{R}^3)}. \end{equation*} \end{proposition} Proposition~\ref{J=3 LS prop} provides an effective bound in the large $\ell$ regime (in particular, for ${\mathfrak {l}}oor{k/5} \leq \ell \leq {\mathfrak {l}}oor{k/3}$). This corresponds to those pieces of the multiplier which are supported close to the binormal cone $\Gammamma$, and therefore lie in a neighbourhood of the most significant singularity. In addition to Proposition~\ref{J=3 LS prop}, we also use results from \cite{PS2007} to deal with the less singular pieces of the multiplier. \mathcal{M}athbf{e}gin{proposition}[\cite{PS2007}]\lambdabel{PS LS prop} Let $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$. For all $2 \leq p \leq 6$ and $\varepsilon > 0$, \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D; \,\cdot\,) f\|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} 2^{-{\mathfrak {r}}ac{k-\ell}{2}({\mathfrak {r}}ac{1}{2} + {\mathfrak {r}}ac{1}{p}) + \varepsilon k}\| f \|_{L^p(\mathcal{M}athbb{R}^3)}. \end{equation*} \end{proposition} This proposition follows from Theorem~\ref{PS LS J=2} via the sharp Wolff inequality for the light cone \cite{BD2015} and a rescaling argument (c.f. \S\ref{overview subsec}). The details of the proof can be found in~\cite[\S5]{PS2007}. \mathcal{M}athbf{e}gin{proof}[Proof of Theorem~\ref{LS thm}, assuming Proposition~\ref{J=3 LS prop}] Applying the decomposition \eqref{J=3 akell def} and the triangle inequality, \mathcal{M}athbf{e}gin{equation*} \|m[a_k](D; \,\cdot\,) f\|_{L^p(\mathcal{M}athbb{R}^{3+1})} \leq \sum_{\ell=0}^{{\mathfrak {l}}oor{k/5}} \|m[a_{k,\ell}](D; \,\cdot\,) f\|_{L^p(\mathcal{M}athbb{R}^{3+1})} + \sum_{\ell={\mathfrak {l}}oor{k/5} + 1}^{{\mathfrak {l}}oor{k/3}} \|m[a_{k,\ell}](D; \,\cdot\,) f\|_{L^p(\mathcal{M}athbb{R}^{3+1})}. \end{equation*} For $2 \leq p \leq 4$ we may bound the terms of the first sum using Proposition~\ref{PS LS prop} and the terms of the second using Proposition~\ref{J=3 LS prop}. If, in addition, we assume $p \geq 3$, then the geometric series resulting from the constants can be evaluated to give the desired bound. \end{proof} \section{Proof of Theorem~\ref{LS thm}: the main argument}\lambdabel{J=3 sec} By the observations of the previous section, the problem is reduced to establishing Proposition~\ref{J=3 LS prop}. In this section we provide the details of the proof, following the scheme sketched in~\S\ref{overview subsec}. \subsection{Localisation along the curve}\lambdabel{loc curv subsec} We begin by further decomposing the symbols with respect to the distance of the $s$-variable to the roots $\theta_1^{\pm}$ and $\theta_2(\xi)$. Here it is convenient to introduce a `fine tuning' constant $\rho > 0$. This is a small (but absolute) constant which plays a minor technical r\^ole in the forthcoming arguments: taking $\rho := 10^{-6}$ more than suffices for our purposes. Recall from Lemma~\ref{theta1 lem} that the two distinct roots $\theta_1^{\pm}(\xi)$ only occur when $u(\xi) < 0$. In view of this, let $\mathcal{M}athbf{e}ta^{>0}$, $\mathcal{M}athbf{e}ta^{<0} \in C_c^\infty(\mathcal{M}athbb{R})$ be the unique functions with $\mathcal{M}athrm{supp}\, \mathcal{M}athbf{e}ta^{>0} \subset (0,\infty)$ and $\mathcal{M}athrm{supp}\, \mathcal{M}athbf{e}ta^{<0} \subset (-\infty, 0)$ such that $\mathcal{M}athbf{e}ta = \mathcal{M}athbf{e}ta^{>0} +\mathcal{M}athbf{e}ta^{<0}$. This induces a corresponding decomposition $a_{k,\ell} = a_{k,\ell}^{>0} + a_{k, \ell}^{<0}$ for $0 \leq \ell < {\mathfrak {l}}oor{k/3}$, where $u(\xi)$ is positive (respectively, negative) on the support of $a_{k,\ell}^{>0}$ (respectively, $a_{k,\ell}^{<0}$). Given $\varepsilon>0$, define \mathcal{M}athbf{e}gin{equation*} a_{k,\ell}^{(\varepsilon), \pm}(\xi;t;s):=a_{k,\ell}^{<0}(\xi;t;s) \, \eta \big(\rho^{-1}2^{(k-\ell)/2}2^{-k\varepsilon}|s- \theta_1^{\pm}(\xi)|\big) \quad \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$} \end{equation*} and \mathcal{M}athbf{e}gin{equation}\lambdabel{akell dec} a_{k,\ell}^{(\varepsilon)}(\xi;t;s):= \left\{\mathcal{M}athbf{e}gin{array}{ll} \displaystyle \sum_\pm a_{k,\ell}^{(\varepsilon), \pm}(\xi;t;s) \quad & \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$} \\[8pt] \displaystyle a_{k,\ell}(\xi;t;s) \, \eta \big(\rho 2^{\ell(1-\varepsilon)}|s-\theta_2(\xi)|\big) & \textrm{if ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$} \end{array}\right. , \end{equation} where ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} := {\mathfrak {l}}oor{\big({\mathfrak {r}}ac{1 - \varepsilon}{3}\big) \cdot k }$ is a number we think of as being slightly smaller than ${\mathfrak {l}}oor{k/3}$. Note that \mathcal{M}athbf{e}gin{equation*} \mathcal{M}in_\pm |s-\theta_1^\pm(\xi)| \lesssim \rho 2^{-(k-\ell)/2 + k \varepsilon} \qquad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{(\varepsilon)}$ \quad if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_\varepsilon$}. \end{equation*} \mathcal{M}athbf{e}gin{remark} The symbols $a_{k,\ell}^{(\varepsilon), +}$ and $a_{k,\ell}^{(\varepsilon), -}$ have disjoint supports if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\varepsilon}$. Indeed, the decomposition ensures that $|u(\xi)| \sigma} \def\Si{\Sigmam 2^{k-2\ell}$ for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{(\varepsilon)}$ and so Lemma~\ref{root control lem} implies \mathcal{M}athbf{e}gin{equation*} |\theta_1^-(\xi) - \theta_1^+(\xi)| \gtrsim 2^{-\ell} \gtrsim 2^{-(k-\ell)/2}2^{k\varepsilon}. \end{equation*} Here we use the hypothesis $\ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$. Provided $\rho$ is chosen to be sufficiently small, the above separation condition ensures that the disjointness of the supports of $a_{k,\ell}^{(\varepsilon), +}$ and $a_{k,\ell}^{(\varepsilon), -}$. Consequently, \mathcal{M}athbf{e}gin{equation*} \mathcal{M}in_\pm|s-\theta_1^\pm(\xi)| \gtrsim 2^{-(k-\ell)/2 + k \varepsilon} \qquad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)})$} \end{equation*} if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_\varepsilon$. \end{remark} The main contribution to $m[a_{k,\ell}]$ comes from the symbols $a_{k,\ell}^{(\varepsilon)}$. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 s loc lem} Let $2 \leq p < \infty$ and $\varepsilon>0$. For all $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$ \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell} - a_{k,\ell}^{(\varepsilon)}](D; \,\cdot\,) f \|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{N,\varepsilon,p} 2^{-kN} \| f \|_{L^p(\mathcal{M}athbb{R}^3)} \qquad \textrm{for all $N \in \mathcal{M}athbb N$.} \end{equation*} \end{lemma} \mathcal{M}athbf{e}gin{proof} It is clear that the multipliers satisfy a trivial $L^{\infty}$-estimate with operator norm $O(2^{Ck})$ for some absolute constant $C \geq 1$. Thus, by interpolation, it suffices to prove the rapid decay estimate for $p = 2$ only. This amounts to showing that, under the hypotheses of the lemma, \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 0} \|m[a_{k,\ell}-a_{k,\ell}^{(\varepsilon)}](\,\cdot\,; t)\|_{L^{\infty}(\widehat{\mathcal{M}athbb{R}}^3)} \lesssim_{N,\varepsilon} 2^{-kN} \qquad \textrm{for all $N \in \mathcal{M}athbb N$} \end{equation} uniformly in $1/2 \leq t \leq 4$.\mathcal{M}edskip \noindentndent \underline{Case: ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$}. Here the localisation of the $a_{k,\ell}$ and $a_{k,\ell}^{(\varepsilon)}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 1} |u(\xi)| \lesssim 2^{k-2\ell} \quad \text{ and } \quad |s-\theta_2(\xi)| \gtrsim \rho^{-1} 2^{-\ell(1 - \varepsilon)} \quad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})$,} \end{equation} where $u$ is the function introduced in \eqref{J=3 u function}. Fix $\xi \in \mathcal{M}athrm{supp}_{\xi}\, (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})$ and consider the oscillatory integral $m[a_{k,\ell} - a_{k,\ell}^{(\varepsilon)}](\xi;t)$, which has phase $s \mathcal{M}apsto t \, \inn{\gammamma(s)}{\xi}$. Taylor expansion around $\theta_2(\xi)$ yields \mathcal{M}athbf{e}gin{align}\lambdabel{J=3 curve loc 3} \inn{\gammamma'(s)}{\xi} &= u(\xi) + \omega} \def\Om{\Omegaega_1(\xi;s) \, (s-\theta_2(\xi))^2 \\ \lambdabel{J=3 curve loc 4} \inn{\gammamma''(s)}{\xi} &=\omega} \def\Om{\Omegaega_2(\xi;s) \, (s-\theta_2(\xi)) \end{align} where $\omega} \def\Om{\Omegaega_i$ arise from the remainder terms and satisfy $|\omega} \def\Om{\Omegaega_i(\xi;s)| \sigma} \def\Si{\Sigmam 2^k$. Provided $\rho$ is sufficiently small, \eqref{J=3 curve loc 1} implies that the $\omega} \def\Om{\Omegaega_1(\xi;s) \, (s-\theta_2(\xi))^2$ term dominates the right-hand side of \eqref{J=3 curve loc 3} and therefore \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 5} |\inn{\gammamma'(s)}{\xi}| \gtrsim 2^{k} |s-\theta_2(\xi)|^2\qquad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})$.} \end{equation} Furthermore, \eqref{J=3 curve loc 4}, \eqref{J=3 curve loc 5} and the localisation \eqref{J=3 curve loc 1} immediately imply \mathcal{M}athbf{e}gin{align*} |\inn{\gammamma''(s)}{\xi}| & \lesssim 2^{-k+3\ell(1-\varepsilon)}|\inn{\gammamma'(s)}{\xi}|^2, \\ |\inn{\gammamma^{(j)}(s)}{\xi}| & \lesssim 2^{k} \lesssim_j 2^{-(k-3\ell(1-\varepsilon))(j-1)} |\inn{\gammamma'(s)}{\xi}|^j \qquad \text{for all $j \geq 3$} \end{align*} and all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})$, where in the last inequality we have used $|s-\theta_2(\xi)|^{j-3} \lesssim 1$ for all $j \geq 3$. On the other hand, by the definition of the symbols, \eqref{J=3 curve loc 5} and the localisation in \eqref{J=3 curve loc 1}, \mathcal{M}athbf{e}gin{equation*} |\partial_s^N (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})(\xi;s)| \lesssim_N 2^{\ell(1-\varepsilon) N} \lesssim 2^{-(k-3\ell)N - 3\varepsilon \ell N}|\inn{\gammamma'(s)}{\xi}|^N \qquad \textrm{for all $N \in \mathcal{M}athbb N$} \end{equation*} and all $(\xi; t; s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell} - a_{k,\ell}^{(\varepsilon)})$. Thus, by repeated integration-by-parts (via Lemma~\ref{non-stationary lem}, with $r=2^{k-3\ell+ 3\varepsilon \ell } \geq 1$ for $0 \leq \ell \leq k/3$), one concludes that \mathcal{M}athbf{e}gin{equation*} |m[a_{k,\ell}-a_{k,\ell}^{(\varepsilon)}](\xi;t)| \lesssim_N 2^{-(k-3\ell)N - 3\varepsilon \ell N} \qquad \textrm{for all $N \in \mathcal{M}athbb N$} \end{equation*} uniformly in $1/2 \leq t \leq 4$. Since ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3} \leq k/3$, the desired bound follows.\mathcal{M}edskip \noindentndent \underline{Case: $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$}. If $u(\xi) > 0$, then \eqref{convex} and \eqref{J=3 curve loc 3} imply \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma'(s)}{\xi}| \gtrsim |u(\xi)| + 2^k|s - \theta_2(\xi)|^2 \qquad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{>0}$.} \end{equation*} Furthermore, the localisation of the symbol $a_{k,\ell}^{>0}$ guarantees that $u(\xi) \sigma} \def\Si{\Sigmam 2^{k-\ell}$ for all $\xi \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{>0}$. It is then a straightforward exercise to adapt the argument used in the previous case to show $ \|m[a_{k,\ell}^{>0}](\,\cdot\,; t)\|_{\infty} \lesssim_{N,\varepsilon} 2^{-kN}$, splitting the analysis into the cases $|s-\theta_2(\xi)|\geq 2^{-\ell}$ and $|s-\theta_2(\xi)|\leq 2^{-\ell}$. Here we use the fact that $2^{-(k-3\ell)} \leq 2^{-\varepsilon k}$. Thus, the problem is reduced to proving \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}^{<0} -a_{k,\ell}^{(\varepsilon)}](\,\cdot\,; t)\|_{L^{\infty}(\widehat{\mathcal{M}athbb{R}}^3)} \lesssim_{N,\varepsilon} 2^{-kN}. \end{equation*} Here the localisation of the $a_{k,\ell}^{<0}$ and $a_{k,\ell}^{(\varepsilon)}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 1a} |u(\xi)| \sigma} \def\Si{\Sigmam 2^{k-2\ell} \quad \text{ and } \quad \mathcal{M}in_{\pm}|s-\theta_1^{\pm}(\xi)| \gtrsim 2^{-(k-\ell)/2 + k \varepsilon} \quad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)})$,} \end{equation} where $u$ is the function introduced in \eqref{J=3 u function}. Fix $\xi \in \mathcal{M}athrm{supp}_{\xi}\, (a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)})$ and consider the oscillatory integral $m[a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)}](\xi;t)$, which has phase $s \mathcal{M}apsto t \, \inn{\gammamma(s)}{\xi}$. If we define \mathcal{M}athbf{e}gin{equation*} \phi \colon [-1,1] \to \mathcal{M}athbb{R}, \quad \phi \colon s \mathcal{M}apsto \inn{\gammamma'(s)}{\xi}, \end{equation*} then, by \eqref{convex}, this function is strictly convex. Thus, given $t \in [-1,1]$, the auxiliary function \mathcal{M}athbf{e}gin{equation*} q_t \colon [-1,1] \to \mathcal{M}athbb{R}, \quad q_t \colon s \mathcal{M}apsto {\mathfrak {r}}ac{\phi(s) - \phi(t)}{s - t} \quad \textrm{for $s \neq t$} \quad \textrm{and} \quad q_t \colon t \mathcal{M}apsto \phi'(t) \end{equation*} is increasing. Setting $t := \theta_1^-(\xi)$ and noting that $\phi\circ\theta_1^-(\xi)=0$, it follows that \mathcal{M}athbf{e}gin{equation*} {\mathfrak {r}}ac{\phi(s)}{s-\theta_1^-(\xi)} \leq {\mathfrak {r}}ac{\phi\circ \theta_2(\xi)}{\theta_2(\xi)-\theta_1^-(\xi)} = {\mathfrak {r}}ac{u(\xi)}{\theta_2(\xi)-\theta_1^-(\xi)} < 0 \qquad \text{ for all } -1 \leq s \leq \theta_2(\xi), \end{equation*} where we have used the fact that $u(\xi) < 0$ on the support of $a_{k,\ell}^{<0}$. If $s \in [\theta_2(\xi), 1]$, then we can carry out the same argument with respect to $t = \theta_1^+(\xi)$ to obtain a similar inequality. From this, we deduce the bound \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 2a} |\inn{\gammamma'(s)}{\xi}| \geq \mathcal{M}in_{\pm} {\mathfrak {r}}ac{|u(\xi)| |s-\theta_1^{\pm}(\xi)|} {|\theta_2(\xi)- \theta_1^{\pm}(\xi)|} \qquad \text{ for all } -1 \leq s \leq 1. \end{equation} Recall from \eqref{J=3 curve loc 1a} that $|u(\xi)| \sigma} \def\Si{\Sigmam 2^{k-2\ell}$ and therefore $|\theta_2(\xi)- \theta_1^{\pm}(\xi)| \sigma} \def\Si{\Sigmam 2^{-\ell}$ by Lemma~\ref{root control lem}. Substituting these bounds and the second bound in \eqref{J=3 curve loc 1a} into \eqref{J=3 curve loc 2a}, we conclude that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 curve loc 3a} |\inn{\gammamma'(s)}{\xi}| \gtrsim 2^{k-\ell}\mathcal{M}in_{\pm}|s-\theta_1^{\pm}(\xi)| \gtrsim 2^{(k-\ell)/2 + \varepsilon k} \qquad \textrm{for all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)})$.} \end{equation} Furthermore, by the mean value theorem, \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma''(s)}{\xi}| \lesssim \mathcal{M}ax_{\pm} |v^{\pm}(\xi)| + 2^k\mathcal{M}in_{\pm}|s - \theta_1^{\pm}(\xi)| \lesssim 2^{k-\ell} + 2^{\ell}|\inn{\gammamma'(s)}{\xi}| \lesssim 2^{-k\varepsilon}|\inn{\gammamma'(s)}{\xi}|^2, \end{equation*} where we have used \eqref{J=3 curve loc 3a}, the condition $|v^{\pm}(\xi)| \sigma} \def\Si{\Sigmam 2^{k-\ell}$ for $\xi \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{<0}$ from Lemma~\ref{root control lem} and $0 \leq \ell \leq k/3$ in the last inequality. For higher order derivatives, \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(j)}(s)}{\xi}| \lesssim_j 2^k \lesssim_j 2^{-(j-1)k\varepsilon } |\inn{\gammamma'(s)}{\xi}|^j \qquad \textrm{for all $j \geq 3$} \end{equation*} and all $(\xi;t;s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}^{<0} - a_{k,\ell}^{(\varepsilon)})$. On the other hand, by the definition of the symbols and \eqref{J=3 curve loc 3a} we have \mathcal{M}athbf{e}gin{equation*} |\partial_s^N (a_{k,\ell}-a_{k,\ell}^{(\varepsilon)})(\xi;s)| \lesssim_N 2^{N(k-\ell)/2} 2^{-Nk \varepsilon} \lesssim 2^{-2Nk \varepsilon}|\inn{\gammamma'(s)}{\xi}|^N \qquad \textrm{for all $N \in \mathcal{M}athbb N$} \end{equation*} and all $(\xi; t; s) \in \mathcal{M}athrm{supp}\, (a_{k,\ell}^{<0}-a_{k,\ell}^{(\varepsilon)})$. Thus, by repeated integration-by-parts (via Lemma~\ref{non-stationary lem}, with $r := 2^{k\varepsilon/2} \geq 1$), one obtains the desired bound \eqref{J=3 curve loc 0}. \end{proof} \subsection{Fourier localisation}\lambdabel{J=3 Fourier loc subsec} We perform a radial decomposition of the symbols $a_{k,\ell}^{(\varepsilon)}$ with respect to the homogeneous functions $\theta_2$ and $\theta_1^{\pm}$. Fix $\zeta \in C^{\infty}(\mathcal{M}athbb{R})$ with $\mathcal{M}athrm{supp}\, \zeta \subseteq [-1,1]$ such that $\sum_{l \in \mathcal{M}athbb{Z}} \zeta(\,\cdot\, - l) \equiv 1$. For $k \in \mathcal{M}athbb N$ and $0 \leq \ell < {\mathfrak {l}}oor{k/3}_\varepsilon$, write \mathcal{M}athbf{e}gin{equation*} a_{k,\ell}^{(\varepsilon)} = \sum_\pm \sum_{\nu \in \mathcal{M}athbb{Z}} a_{k,\ell}^{\nu, (\varepsilon), \pm} \end{equation*} where \mathcal{M}athbf{e}gin{equation*} a_{k,\ell}^{\nu, (\varepsilon),\pm}(\xi;t;s) := a_{k,\ell}^{(\varepsilon), \pm}(\xi;t;s) \, \zeta\big(\rho^{-1}(2^{(k-\ell)/2}\theta_1^{\pm}(\xi) - \nu)\big) \qquad \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\varepsilon}$}. \end{equation*} Each of the two terms in $\sum_\pm$ can be treated analogously. In order to simplify the notation, we drop the symbol $\pm$ from $a_{k,\ell}^{\nu, (\varepsilon)}$ and $\theta_1^\pm$ and adopt the convention \mathcal{M}athbf{e}gin{equation}\lambdabel{dec nu} a_{k,\ell}^{(\varepsilon)} = \sum_{\nu \in \mathcal{M}athbb{Z}} a_{k,\ell}^{\nu, (\varepsilon)}. \end{equation} The key properties of this decomposition are that \mathcal{M}athbf{e}gin{equation}\lambdabel{akellnu dec 1} |s-\theta_1(\xi)| \lesssim \rho 2^{-(k-\ell)/2 + k \varepsilon} \quad \text{ and } \quad |\theta_1(\xi) - s_\nu| \lesssim \rho 2^{-(k-\ell)/2 } \quad \text{for all $(\xi; t; s) \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu,(\varepsilon)} $}, \end{equation} where $s_\nu := 2^{-(k-\ell)/2} \nu$ and $\theta_1 \in \{\theta_1^+(\xi), \theta_1^-(\xi)\}$. The decomposition \eqref{dec nu} is also extended to the range $ {\mathfrak {l}}oor{k/3}_\varepsilon \leq \ell \leq {\mathfrak {l}}oor{k/3}$, with \mathcal{M}athbf{e}gin{equation}\lambdabel{akellnu dec 2} a_{k,\ell}^{\nu, (\varepsilon)}(\xi;t;s) :=a_{k,\ell}^{(\varepsilon)}(\xi;t;s) \, \zeta(2^{\ell}\theta_2(\xi) - \nu) \qquad \textrm{if ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$}. \end{equation} In the case $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$ we also consider symbols formed by grouping the $a_{k,\ell}^{\nu, (\varepsilon)}$ into pieces at the larger scale $2^{-\ell}$. Given $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$ we write $\mathcal{M}athbb{Z} = \bigcup_{\mathcal{M}u \in \mathcal{M}athbb{Z}} \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$, where the sets $\mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$ are disjoint and satisfy \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)\subseteq \{\nu \in \mathcal{M}athbb{Z}: |\nu-2^{(k-3\ell)/2} \mathcal{M}u| \leq 2^{(k-3\ell)/2} \}. \end{equation*} For each $\mathcal{M}u \in \mathcal{M}athbb{Z}$, we then define \mathcal{M}athbf{e}gin{equation*} a_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)} := \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} a_{k,\ell}^{\nu, (\varepsilon)} \end{equation*} and note that $|\theta_1^\pm(\xi)-s_{\mathcal{M}u}|\lesssim 2^{-\ell}$ on $\mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)}$, where $s_{\mathcal{M}u}:=2^{-\ell}\mathcal{M}u$. Of course, by the definition of the sets $\mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$, \mathcal{M}athbf{e}gin{equation*} a_{k,\ell}^{(\varepsilon)}=\sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} a_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)} = \sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} a_{k,\ell}^{\nu, (\varepsilon)}. \end{equation*} It is notationally convenient to trivially extend these definitions by setting $\mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u) := \{\mathcal{M}u\}$ for ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and, in this case, defining $a_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)} := a_{k,\ell}^{\mathcal{M}u, (\varepsilon)}$ accordingly.\mathcal{M}edskip Given $0 < r \leq 1$ and $s \in I$, recall the definition of the $(1,r)$-\textit{Frenet boxes} $\pi_{1}(s;\,r)$ introduced in Definition~\ref{def Frenet box}: \mathcal{M}athbf{e}gin{equation*} \pi_1(s;\,r):= \big\{ \xi \in \widehat{\mathcal{M}athbb{R}}^3: |\inn{\mathcal{M}athbf{e}_j(s)}{\xi}| \lesssim r^{3-j} \,\, \textrm{for $j=1,\,2$}, \quad |\inn{\mathcal{M}athbf{e}_{3}(s)}{\xi}| \sigma} \def\Si{\Sigmam 1\big\}. \end{equation*} It is also convenient to consider 2-parameter variants of the $(0,r)$-Frenet boxes. Given $0 < r_1, r_2$ and $s \in I$, define the set \mathcal{M}athbf{e}gin{equation*} \pi_0(s;\,r_1, r_2)\!:=\! \big\{\xi \in \widehat{\mathcal{M}athbb{R}}^3: |\inn{\mathcal{M}athbf{e}_1(s)}{\xi}| \lesssim r_1, \,\, |\inn{\mathcal{M}athbf{e}_2(s)}{\xi}| \sigma} \def\Si{\Sigmam 1, \,\, |\inn{\mathcal{M}athbf{e}_3(s)}{\xi}| \lesssim r_2\big\}. \end{equation*} The geometric significance of these sets is made apparent in \S\ref{f freq loc subsec} (and, in particular, Lemma~\ref{freq resc lem}) below. The multipliers $a_{k,\ell}^{*, \mathcal{M}u, (\varepsilon)}$ and $a_{k,\ell}^{\nu, (\varepsilon)}$ satisfy the following support properties. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 supp lem} For all $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$, $\varepsilon > 0$ and $\mathcal{M}u, \nu \in \mathcal{M}athbb{Z}$, \mathcal{M}athbf{e}gin{enumerate}[a)] \item If $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$, then $\mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)} \subseteq 2^k \cdot \pi_1(s_{\mathcal{M}u}; 2^{-\ell})$, where $s_{\mathcal{M}u} := 2^{-\ell} \mathcal{M}u$; \item If $\ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$, then $\mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)} \subseteq 2^{k-\ell} \cdot \pi_0(s_{\nu}; 2^{-(k- \ell)/2}, 2^{\ell})$, where $s_{\nu} := 2^{-(k-\ell)/2 } \nu$. \end{enumerate} \end{lemma} As an immediate consequence of part a), we see that $\mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{*, \mathcal{M}u, (\varepsilon)} \subseteq 2^k \cdot \pi_1(s_{\mathcal{M}u}; 2^{-\ell})$. \mathcal{M}athbf{e}gin{proof}[Proof of Lemma~\ref{J=3 supp lem}] \noindentndent a) For $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)}$ observe that the localisation in \eqref{J=3 akell def} implies \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(i)}\circ \theta_2(\xi)}{\xi}| \lesssim 2^{k-(3-i)\ell} \qquad \textrm{for $i = 1$, $2$,} \qquad |\inn{\gammamma^{(3)}\circ \theta_2(\xi)}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} If $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$, then $|s_{\nu} - \theta_1(\xi)| \lesssim 2^{-(k-\ell)/2}$ and so \mathcal{M}athbf{e}gin{equation*} |s_{\mathcal{M}u} - \theta_2(\xi)| \leq |s_{\mathcal{M}u} - s_{\nu}| + |s_{\nu} - \theta_1(\xi)| + |\theta_1(\xi) - \theta_2(\xi)| \lesssim 2^{-(k-\ell)/2} + 2^{-\ell} \lesssim 2^{-\ell} \end{equation*} by Lemma~\ref{root control lem}. Note that the inequality $|s_{\mathcal{M}u} - \theta_2(\xi)| \lesssim 2^{-\ell}$ also extends to the case ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$ in view of the definition of the symbol from \eqref{akellnu dec 2}. Taylor expansion around $\theta_2(\xi)$ therefore yields \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(i)}(s_{\mathcal{M}u})}{\xi}| \lesssim 2^{k-(3-i)\ell} \qquad \textrm{for $i = 1$, $2$,} \qquad |\inn{\gammamma^{(3)}(s_{\mathcal{M}u})}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} Since the Frenet vectors $\mathcal{M}athbf{e}_i(s_{\mathcal{M}u})$ are obtained from the $\gammamma^{(i)}(s_{\mathcal{M}u})$ via the Gram--Schmidt process, the matrix corresponding to change of basis from $\big(\mathcal{M}athbf{e}_i(s_{\mathcal{M}u})\big)_{i=1}^3$ to $\big(\gammamma^{(i)}(s_{\mathcal{M}u})\big)_{i=1}^3$ is lower triangular. Furthermore, the initial localisation implies that this matrix is an $O(\delta_0)$ perturbation of the identity. Consequently, \mathcal{M}athbf{e}gin{equation*} |\inn{\mathcal{M}athbf{e}_i(s_{\mathcal{M}u})}{\xi}| \lesssim 2^{k - (3 - i)\ell} \qquad \textrm{for $1 \leq i \leq 3$}. \end{equation*} Provided the parameter $\delta_0 > 0$ is sufficiently small, the argument can easily be adapted to prove the remaining lower bound $|\inn{\mathcal{M}athbf{e}_3(s_\mathcal{M}u)}{\xi}|\gtrsim 1$.\mathcal{M}edskip \noindentndent b) Let $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$. For $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)}$ observe that the localisation in \eqref{J=3 akell def} and Lemma~\ref{root control lem} imply \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma'\circ \theta_1(\xi)}{\xi}| = 0, \qquad |\inn{\gammamma''\circ \theta_1(\xi)}{\xi}| \sigma} \def\Si{\Sigmam 2^{k-\ell}, \qquad |\inn{\gammamma^{(3)}\circ \theta_1(\xi)}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} It then follows from Taylor expansion around $\theta_1(\xi)$ that \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma'(s_{\nu})}{\xi}| \lesssim 2^{(k-\ell)/2}, \quad |\inn{\gammamma''(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^{k-\ell} \quad \textrm{and} \quad |\inn{\gammamma^{(3)}(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^k, \end{equation*} provided $\rho$ is chosen sufficiently small. The $\gammamma^{(j)}(s_{\nu})$ in the above estimates can then be replaced with the Frenet vectors $\mathcal{M}athbf{e}_j(s_{\nu})$ by a similar argument to that used in part a). \end{proof} \subsection{Spatio-temporal Fourier localisation}\lambdabel{spatio temp subsec} The symbols are further localised with respect to the Fourier transform of the $t$-variable. In particular, let \mathcal{M}athbf{e}gin{equation*} q(\xi) := \inn{\gammamma\circ\theta_2(\xi)}{\xi} \qquad \textrm{and} \qquad \chi_{k,\ell}^{(\varepsilon)}(\xi, \tau) := \eta\big(2^{-(k-3\ell)-4\varepsilon k} (\tau + q(\xi))\big) \end{equation*} and define the multiplier $m_{k,\ell}^{\nu, (\varepsilon)}$ by \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{F}_t \big[m_{k,\ell}^{\nu, (\varepsilon)}(\xi;\,\cdot\,)\big](\tau):= \chi_{k,\ell}^{(\varepsilon)}(\xi, \tau) \, \mathcal{M}athcal{F}_t\big[ m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi; \,\cdot\,) \big](\tau). \end{equation*} Here $\mathcal{M}athcal{F}_t$ denotes the Fourier transform acting in the $t$ variable. Define $m_{k,\ell}^{*,\mathcal{M}u,(\varepsilon)}$ and $m_{k,\ell}^{(\varepsilon)}$ accordingly by setting \mathcal{M}athbf{e}gin{equation*} m_{k,\ell}^{*,\mathcal{M}u,(\varepsilon)} := \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} m_{k,\ell}^{\nu, (\varepsilon)} \qquad \textrm{and} \qquad m_{k,\ell}^{(\varepsilon)} := \sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} m_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)}. \end{equation*} The main contribution to $m[a_{k,\ell}^{\nu, (\varepsilon)}]$ comes from the multipliers $m_{k,\ell}^{\nu, (\varepsilon)}$. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 spatio temp loc lem} Let $1 \leq p \leq \infty$ and $\varepsilon>0$. For all $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$, \mathcal{M}athbf{e}gin{equation*} \big\|\big(m[a_{k,\ell}^{\nu, (\varepsilon)}] - m_{k,\ell}^{\nu, (\varepsilon)}\big)(D; \,\cdot\,) f \big\|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{N,\varepsilon} 2^{-kN} \| f \|_{L^p(\mathcal{M}athbb{R}^3)} \qquad \textrm{for all $N \in \mathcal{M}athbb N$.} \end{equation*} \end{lemma} \mathcal{M}athbf{e}gin{proof} It suffices to show that \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 1} |\partial_\xi^\alpha \big( m[a_{k,\ell}^{\nu, (\varepsilon)}] - m_{k,\ell}^{\nu, (\varepsilon)} \big) (\xi;t)| \lesssim_{N,\varepsilon} 2^{-kN}(1+|t|)^{-10} \qquad \text{ for $\alpha \in \mathcal{M}athbb N_0^3$, $\,\,|\alpha|\leq 10,\,\,$ and $\,N \in \mathcal{M}athbb N$. } \end{equation} Indeed, if \eqref{spatio temporal 1} holds, then Fourier inversion and repeated integration-by-parts imply \mathcal{M}athbf{e}gin{equation*} |\big( m[a_{k,\ell}^{\nu, (\varepsilon)}] - m_{k,\ell}^{\nu, (\varepsilon)} \big)(D;t) f(x)| \lesssim_{N,\varepsilon} 2^{-kN} (1+|t|)^{-10} (1+|\cdot|)^{-10} \ast f (x). \end{equation*} Taking the $L^p(\mathcal{M}athbb{R}^{3+1})$-norm of both sides of this inequality immediately yields the desired result. By the Fourier inversion formula \mathcal{M}athbf{e}gin{equation*} \big( m[a_{k,\ell}^{\nu, (\varepsilon)}] - m_{k,\ell}^{\nu, (\varepsilon)} \big)(\xi;t) = {\mathfrak {r}}ac{1}{2\pi} \int_\mathcal{M}athbb{R} e^{i t \tau} \big(1-\chi_{k,\ell}^{(\varepsilon)}(\xi, \tau) \big) \, \mathcal{M}athcal{F}_t \big[ m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;\,\cdot\,)\big](\tau) \, \mathcal{M}athrm{d} \tau. \end{equation*} Let $\Xi = (\xi, \tau) \in \widehat{\mathcal{M}athbb{R}}^{3+1}$ denote the spatio-temporal frequency variables. Clearly, there exists a constant $C\geq 1$ such that $| \partial_\Xi^{\alpha}\, \chi_{k,\ell}^{(\varepsilon)}(\Xi) | \lesssim 2^{Ck}$ for all $\alpha \in \mathcal{M}athbb N_0^4$ with $|\alpha|\leq 20$. Furthermore, if $(\xi, \tau) \in \mathcal{M}athrm{supp}\, \partial_\Xi^{\alpha}\, \big(1-\chi_{k,\ell}^{(\varepsilon)}\big)$, then $|\tau + q(\xi)| \gtrsim 2^{-k+3\ell +4 \varepsilon k}$. Thus, by integration-by-parts in the $\tau$-variable, to prove \eqref{spatio temporal 1} it suffices to show \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 2} | \partial_\Xi^{\alpha}\, \mathcal{M}athcal{F} \big[ m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;\,\cdot\,)\big](\tau)| \lesssim_{N,\varepsilon} 2^{Ck} \big(1 + 2^{-k + 3\ell + 3\varepsilon k}|\tau + q(\xi)|\big)^{-N}, \quad \alpha \in \mathcal{M}athbb N_0^4,\,\, |\alpha|\leq 20, \,\, N \in \mathcal{M}athbb N, \end{equation} for some choice of absolute constant $C \geq 1$ (not necessarily the same as above). By the Leibniz rule, \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 3} \partial_\Xi^{\alpha}\, \mathcal{M}athcal{F}_t \big[ m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;\,\cdot\,)\big](\tau)=\int_\mathcal{M}athbb{R} e^{-i r (\tau + q(\xi))} m[b_{k,\ell}^{\nu, (\varepsilon),\alpha}] (\xi; r)\, \mathcal{M}athrm{d} r \end{equation} where $b_{k,\ell}^{\nu, (\varepsilon),\alpha}(\xi;r;s) := e^{i r q(\xi)}a_{k,\ell}^{\nu, (\varepsilon),\alpha}(\xi;r;s)$ for some symbol $a_{k,\ell}^{\nu, (\varepsilon), \alpha}$ satisfying \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 9} \big|\partial_r^j\, a_{k,\ell}^{\nu, (\varepsilon), \alpha}(\xi;r;s)\big|\lesssim_j 2^{Ck} \qquad \text{ for all $j \in \mathcal{M}athbb N_0$, $\alpha \in \mathcal{M}athbb N_0^4$, $|\alpha| \leq 20$, $|r| \lesssim 1$} \end{equation} and with $\mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu,(\varepsilon), \alpha} \subseteq \mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu,(\varepsilon)}$. Note, in particular, that \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 4} m[b_{k,\ell}^{\nu, (\varepsilon),\alpha}](\xi;r) = \int_\mathcal{M}athbb{R} e^{-i r \inn{\gammamma(s) - \gammamma\, \circ\, \theta_2 (\xi)}{\xi}} a_{k,\ell}^{\nu, (\varepsilon), \alpha} (\xi;r;s) \rho(r) \chi(s)\, \mathcal{M}athrm{d} s. \end{equation} By Taylor expansion around $\theta_2(\xi)$, the phase in \eqref{spatio temporal 4} can be written as \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 6} \inn{\gammamma(s) - \gammamma \circ \theta_2 (\xi)}{\xi} = u(\xi) \, (s-\theta_2(\xi)) + \omega} \def\Om{\Omegaega(\xi;s) \, (s-\theta_2(\xi))^3 \end{equation} where $\omega} \def\Om{\Omegaega$ arises from the remainder term and satisfies $|\omega} \def\Om{\Omegaega(\xi;s)|\sigma} \def\Si{\Sigmam 2^k$. Recall, \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 7} |u(\xi)|\lesssim 2^{k-2\ell} \qquad \text{ and } \qquad |s-\theta_2(\xi)|\lesssim 2^{-\ell + \varepsilon k} \qquad \text{ for all $\,\,(\xi;r;s) \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu, (\varepsilon)}$,} \end{equation} which follows from the definition of $a_{k,\ell}^{\nu, (\varepsilon)}$. Here, in the case $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\varepsilon}$, we use Lemma~\ref{root control lem} to deduce that \mathcal{M}athbf{e}gin{equation*} |s-\theta_2(\xi)|\leq |s-\theta_1(\xi)| + |\theta_1(\xi) - \theta_2(\xi)| \lesssim 2^{-\ell}. \end{equation*} Combining the expansion \eqref{spatio temporal 6} and the localisation \eqref{spatio temporal 7} yields \mathcal{M}athbf{e}gin{equation}\lambdabel{spatio temporal 8} |\inn{\gammamma(s) - \gammamma \circ \theta_2 (\xi)}{\xi}| \lesssim 2^{k-3\ell +3\varepsilon k} \qquad \text{for all $(\xi;r;s) \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu, (\varepsilon)}$.} \end{equation} By \eqref{spatio temporal 8}, \eqref{spatio temporal 9} and integration by parts in \eqref{spatio temporal 3}, one obtains \mathcal{M}athbf{e}gin{equation*} |\partial_\Xi^{\alpha}\, \mathcal{M}athcal{F}_t \big[ m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;\,\cdot\,)\big](\tau)| \lesssim_{M} 2^{C k} |\tau + q(\xi)|^{-M} 2^{(k-3\ell +3\varepsilon k)M} \qquad \text{for all $M \in \mathcal{M}athbb N$} \end{equation*} and all $\alpha \in \mathcal{M}athbb N_0^4$, $|\alpha|\leq 20$. This implies \eqref{spatio temporal 2} and concludes the proof. \end{proof} To understand the support properties of the multipliers $m_{k,\ell}^{*,\mathcal{M}u,(\varepsilon)}$, we introduce the primitive curve \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athbf{a}r{\gammamma} \colon I \to \mathcal{M}athbb{R}^4, \qquad \mathcal{M}athbf{a}r{\gammamma} \colon s \mathcal{M}apsto \mathcal{M}athbf{e}gin{bmatrix} \int_0^s \gammamma \\ s \end{bmatrix}. \end{equation*} Here $\int_0^s \gammamma$ denotes the vector in $\mathcal{M}athbb{R}^3$ with $i$th component $\int_0^s \gammamma_i$ for $1 \leq i \leq 3$. Note that $\mathcal{M}athbf{a}r{\gammamma}$ is a non-degenerate curve in $\mathcal{M}athbb{R}^4$ and, in particular, $|\det(\mathcal{M}athbf{a}r{\gammamma}^{(1)} \cdots \mathcal{M}athbf{a}r{\gammamma}^{(4)})| = |\det(\gammamma^{(1)}\cdots \gammamma^{(3)})|$. Let $(\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s))_{j=1}^4$ denote the Frenet frame associated to $\mathcal{M}athbf{a}r{\gammamma}$ and consider the $(2,r)$-Frenet boxes for $\mathcal{M}athbf{a}r \gammamma$ \mathcal{M}athbf{e}gin{equation*} \pi_{2,\mathcal{M}athbf{a}r{\gammamma}}(s;\,r) := \big\{\Xi = (\xi, \tau) \in \widehat{\mathcal{M}athbb{R}}^3 \times \widehat{\mathcal{M}athbb{R}} : |\inn{\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s)}{\Xi}| \lesssim r^{4 - j} \textrm{ for $1 \leq j \leq 3$, } |\inn{\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_4(s)}{\Xi}| \sigma} \def\Si{\Sigmam 1 \big\}, \end{equation*} as introduced in Definition~\ref{def Frenet box}. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 4d supp lem} For all $\ceil{4 \varepsilon k} \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and $\mathcal{M}u \in \mathcal{M}athbb{Z}$, \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athrm{supp}\, \mathcal{M}athcal{F}_t \big[m_{k,\ell}^{\ast,\mathcal{M}u,(\varepsilon)}\big] \subseteq 2^k \cdot \pi_{2,\mathcal{M}athbf{a}r{\gammamma}}(s_{\mathcal{M}u}; 2^{4\varepsilon k}2^{-\ell}), \end{equation*} where $s_{\mathcal{M}u}:=2^{-\ell }\mathcal{M}u$ and $\mathcal{M}athcal{F}_t$ denotes the Fourier transform in the $t$-variable. \end{lemma} \mathcal{M}athbf{e}gin{proof} If $\Xi = (\xi, \tau) \in \mathcal{M}athrm{supp}\, \mathcal{M}athcal{F}_t \big[m_{k,\ell}^{\ast,\mathcal{M}u,(\varepsilon)}\big]$, then $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\ast,\mathcal{M}u,(\varepsilon)}$ and $|q(\xi) + \tau| \lesssim 2^{4\varepsilon k} 2^{k-3\ell}$. The former condition implies $|u(\xi)| \lesssim 2^{k-2\ell}$ and $|s - \theta_2(\xi)| \lesssim 2^{-\ell + \varepsilon k}$ (see \eqref{spatio temporal 7}) and so, by Taylor expansion around $\theta_2(\xi)$, \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 4d supp 1} |\inn{\gammamma(s_{\mathcal{M}u})}{\xi} + \tau| \lesssim |q(\xi) + \tau| + |u(\xi)||s-\theta_2(\xi)| + 2^k|s-\theta_2(\xi)|^3 \lesssim 2^{4\varepsilon k} 2^{k-3\ell}. \end{equation} Define the lifted curve and frame \mathcal{M}athbf{e}gin{equation*} \gammamma_{\uparrow} \colon I \to \mathcal{M}athbb{R}^4, \quad \gammamma_{\uparrow} \colon s \mathcal{M}apsto \mathcal{M}athbf{e}gin{bmatrix} \gammamma(s) \\ 1 \end{bmatrix} \quad \textrm{and} \quad \mathcal{M}athbf{e}_{j,\uparrow} \colon I \to S^3, \quad \mathcal{M}athbf{e}_{j, \uparrow} \colon s \mathcal{M}apsto \mathcal{M}athbf{e}gin{bmatrix} \mathcal{M}athbf{e}_j(s) \\ 0 \end{bmatrix} \quad \textrm{for $1 \leq j \leq 3$,} \end{equation*} respectively. This definition is motivated by our related work on $L^p$ Sobolev estimates for the moment curve in four dimensions \cite{BGHS-Sobolev}. Note that $\mathcal{M}athbf{a}r{\gammamma}$ is a primitive for $\gammamma_{\uparrow}$ in the sense that $\mathcal{M}athbf{a}r{\gammamma}' = \gammamma_{\uparrow}$. By the definition of the Frenet frame, it follows that \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s) \in \lambdangle \gammamma_{\uparrow}(s), \gammamma_{\uparrow}'(s), \dots, \gammamma_{\uparrow}^{(j-1)}(s) \rangle \qquad \textrm{and} \qquad \gammamma_{\uparrow}^{(i)}(s) \in \lambdangle \mathcal{M}athbf{e}_{1, \uparrow}(s), \dots, \mathcal{M}athbf{e}_{i,\uparrow}(s) \rangle \end{equation*} for $1 \leq i < j \leq 4$. Thus, one readily deduces that \mathcal{M}athbf{e}gin{equation*} |\inn{\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s)}{\Xi}| \lesssim |\inn{\gammamma_{\uparrow}(s)}{\Xi}| + \sum_{i = 1}^{j-1} |\inn{\mathcal{M}athbf{e}_i(s)}{\xi}| \qquad \textrm{for $\, \Xi = (\xi, \tau) \in \widehat{\mathcal{M}athbb{R}}^{3+1}$ and $1 \leq j \leq 4$.} \end{equation*} If $\Xi = (\xi, \tau) \in \mathcal{M}athrm{supp}\, \mathcal{M}athcal{F}_t \big[m_{k,\ell}^{\ast,\mathcal{M}u,(\varepsilon)}\big]$, then it follows from \eqref{J=3 4d supp 1} that \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma_{\uparrow}(s_{\mathcal{M}u})}{\Xi}| = | \inn{\gammamma(s_{\mathcal{M}u})}{\xi}+\tau| \lesssim 2^{4\varepsilon k} 2^{k-3\ell}. \end{equation*} On the other hand, since $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\ast,\mathcal{M}u,(\varepsilon)}$, Lemma~\ref{J=3 supp lem} yields \mathcal{M}athbf{e}gin{equation*} |\inn{\mathcal{M}athbf{e}_i(s_{\mathcal{M}u})}{\xi}| \lesssim 2^{k - (3-i)\ell} \qquad \textrm{for $i = 1$, $2$, } \qquad |\inn{\mathcal{M}athbf{e}_3(s_{\mathcal{M}u})}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} Combining these observations, $|\inn{\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s)}{\Xi}| \lesssim 2^{4\varepsilon k}2^{k-(4-j)\ell}$ for $1 \leq j \leq 3$ and therefore it suffices to prove $|\inn{\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_4(s_{\mathcal{M}u})}{\Xi}| \sigma} \def\Si{\Sigmam 2^k$. Since our hypothesis $\ell \geq \ceil{4\varepsilon k}$ implies that $2^{4\varepsilon k}2^{k-(3-i)\ell} \leq 2^k$ for $0 \leq i \leq 2$, the above argument directly yields the upper bound. On the other hand, since $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$ and we are localised to $|s_{\mathcal{M}u}| \lesssim \delta_0$, the change of basis mapping $(\mathcal{M}athbf{a}r{\mathcal{M}athbf{e}}_j(s_{\mathcal{M}u}))_{j=1}^4$ to $(\gammamma_{\uparrow}^{(j-1)}(s_{\mathcal{M}u}))_{j=1}^4$ is an $O(\delta_0)$ perturbation of the identity. In view of this, the above argument can also be adapted to give the required lower bound. \end{proof} \subsection{Reverse square function estimates in \texorpdfstring{$\mathcal{M}athbb{R}^{3+1}$}{}} In view of the Fourier localisation described in the previous subsection, Theorem~\ref{Frenet reverse SF theorem} implies the following square function estimate. \mathcal{M}athbf{e}gin{proposition}\lambdabel{J=3 rev sf prop} Let $k \in \mathcal{M}athbb N$, $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$. For all $2 \leq p \leq 4$ and $\varepsilon>0$, one has \mathcal{M}athbf{e}gin{equation*} \| m[a_{k,\ell}^{(\varepsilon)}](D;\, \cdot \,) f \|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} 2^{(k-3\ell)/4} 2^{O(\varepsilon k)} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^p(\mathcal{M}athbb{R}^{3+1})} + 2^{-kN} \|f\|_{L^p(\mathcal{M}athbb{R}^3)}. \end{equation*} \end{proposition} \mathcal{M}athbf{e}gin{proof} First suppose $\ceil{4 \varepsilon k} \leq \ell$ so that Lemma~\ref{J=3 4d supp lem} applies. Thus, \mathcal{M}athbf{e}gin{equation*} m_{k,\ell}^{(\varepsilon)}(D;\,\cdot\,)f = \sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} m_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)}(D;\,\cdot\,)f \end{equation*} where each $m_{k,\ell}^{*,\mathcal{M}u, (\varepsilon)}(D;\,\cdot\,)f$ has spatio-temporal Fourier support in $2^k\cdot \pi_{2,\mathcal{M}athbf{a}r{\gammamma}}(s_{\mathcal{M}u}; 2^{4\varepsilon k}2^{-\ell})$. The family of sets $\pi_{2,\mathcal{M}athbf{a}r{\gammamma}}(s_{\mathcal{M}u}; 2^{4\varepsilon k}2^{-\ell})$ for $|\mathcal{M}u| \leq 2^{\ell}$ may be partitioned into $O(2^{4\varepsilon k})$ subfamilies, each forming a $(2, 2^{4\varepsilon k}2^{-\ell})$-Frenet box decomposition for the non-degenerate curve $\mathcal{M}athbf{a}r{\gammamma}$ in $\mathcal{M}athbb{R}^4$. Consequently, by Theorem~\ref{Frenet reverse SF theorem} and pigeonholing, \mathcal{M}athbf{e}gin{equation*} \| m_{k,\ell}^{(\varepsilon)}(D; \,\cdot\,) f \|_{L^p(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} 2^{O(\varepsilon k)} \begin{itemize}g\| \big(\sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} |m_{k,\ell}^{*, \mathcal{M}u,(\varepsilon)}(D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^p(\mathcal{M}athbb{R}^4)}. \end{equation*} By a pointwise application of the Cauchy--Schwarz inequality, using the fact that $\# \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u) \lesssim 2^{(k-3\ell)/2}$ for all $\mathcal{M}u \in \mathcal{M}athbb{Z}$, we conclude that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 reverse sf 1} \| m_{k,\ell}^{(\varepsilon)}(D; \,\cdot\,) f \|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} 2^{(k-3\ell)/4}2^{O(\varepsilon k)} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m_{k,\ell}^{\nu,(\varepsilon)}(D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^p(\mathcal{M}athbb{R}^{3+1})}. \end{equation} The desired estimate, involving the $m[a_{k,\ell}^{\nu,(\varepsilon)}]$ multipliers rather than the $m_{k,\ell}^{\nu,(\varepsilon)}$, now follows by combining \eqref{J=3 reverse sf 1} and Lemma~\ref{J=3 spatio temp loc lem}. On the other hand, if $0 \leq \ell \leq \ceil{4 \varepsilon k}$, then the result follows directly from the Cauchy--Schwarz inequality. \end{proof} \mathcal{M}athbf{e}gin{remark} The above square function estimate is not very effective \textit{away} from the binormal cone ($\ell=0$ or small values of $\ell$), as in that case it essentially amounts to a trivial application of the Cauchy--Schwarz inequality. However, as noted in \S\ref{LS rel G sec}, Proposition~\ref{J=3 LS prop} is only used \textit{close} to the binormal cone ($\ell={\mathfrak {l}}oor{k/3}$ or large values of $\ell$), for which Proposition~\ref{J=3 rev sf prop} is most effective. The small values of $\ell$ in proving Theorem~\ref{LS thm} are handled via Proposition~\ref{PS LS prop}. \end{remark} For $p = 2$ a stronger square function estimate is available simply due to Plancherel's theorem. In particular, this avoids the loss induced by the Cauchy--Schwarz inequality in the proof above. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 L2 rev sf prop} Let $k \in \mathcal{M}athbb N$, $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$. For all $\varepsilon>0$, \mathcal{M}athbf{e}gin{equation*} \| m[a_{k,\ell}^{(\varepsilon)}](D;\, \cdot \,) f \|_{L^2(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^2(\mathcal{M}athbb{R}^{3+1})}. \end{equation*} \end{lemma} \mathcal{M}athbf{e}gin{proof} This is simply a consequence of Plancherel's theorem and the fact that the symbols $a_{k,\ell}^{\nu,(\varepsilon)}$ are supported on the essentially disjoint sets $2^{k-\ell} \cdot \pi_0(s_{\nu}; 2^{-(k-\ell)/2}, 2^{\ell})$ by Lemma~\ref{J=3 supp lem}. \end{proof} \subsection{Kernel estimates} Given a symbol $a \in C^{\infty}(\widehat{\mathcal{M}athbb{R}}^n\setminus\{0\} \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$, define the associated convolution kernel \mathcal{M}athbf{e}gin{equation*} K[a](x,t) := {\mathfrak {r}}ac{1}{(2\pi)^n}\int_{\widehat{\mathcal{M}athbb{R}}^n} e^{i \inn{x}{\xi}} m[a](\xi;t)\,\mathcal{M}athrm{d} \xi. \end{equation*} Each of the localised symbols $a_{k,\ell}^{\nu, (\varepsilon)}$ satisfies the following kernel estimate, which yields a gain due to the localisation of the symbols in the $s$-variable introduced in \eqref{akell dec}. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 ker lem} For $k \in \mathcal{M}athbb N$ and $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$, \mathcal{M}athbf{e}gin{equation*} |K[a_{k,\ell}^{\nu, (\varepsilon)}](x,t)| \lesssim 2^{-(k-\ell)/2}2^{O(\varepsilon k)} \, \psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(x,t)\, \rho(t) \end{equation*} where \mathcal{M}athbf{e}gin{equation*} \psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(x,t) := 2^{(5k-3\ell)/2} \bigg(1 + \sum_{j=1}^3 2^{j(k-\ell)/2 \wedge k}|\inn{x - t \gammamma(s_{\nu})}{\mathcal{M}athbf{e}_j(s_{\nu})}|\bigg)^{-100}. \end{equation*} \end{lemma} \mathcal{M}athbf{e}gin{proof} Let $\nabla_{\bm{v}_j}$ denote the directional derivative with respect to the $\xi$ variable in the direction of the vector $\bm{v}_j := \mathcal{M}athbf{e}_j(s_\nu)$, so that \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g({\mathfrak {r}}ac{1}{i \inn{x - t \gammamma(s_{\nu})}{\mathcal{M}athbf{e}_j(s_\nu)}}\nabla_{\bm{v_j}} - 1 \begin{itemize}g) e^{i\inn{x - t \gammamma(s_{\nu})}{\xi}} = 0. \end{equation*} Thus, by repeated integration-by-parts, it follows that \mathcal{M}athbf{e}gin{align*} |K[a_{k,\ell}^{\nu, (\varepsilon)}](x,t)| &\leq |\inn{x - t \gammamma(s_{\nu})}{\mathcal{M}athbf{e}_j(s_{\nu})}|^{-N} \int_{\widehat{\mathcal{M}athbb{R}}^3} \big|\nabla_{\bm{v}_j}^N\big[ e^{it\inn{\gammamma(s_{\nu})}{\xi}}m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;t)\big]\big|\,\mathcal{M}athrm{d} \xi \\ &\lesssim 2^{(5k - 3\ell)/2} 2^{O(\varepsilon k)}|\inn{x - t \gammamma(s_{\nu})}{\mathcal{M}athbf{e}_j(s_{\nu})}|^{-N} \sup_{\xi \in \widehat{\mathcal{M}athbb{R}}^3}\big|\nabla_{\bm{v}_j}^N\big[ e^{it\inn{\gammamma(s_{\nu})}{\xi}}m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;t)\big]\big|; \end{align*} here the second inequality follows from the $\xi$-support properties of the symbols $a_{k,\ell}^{\nu, (\varepsilon)}$ from Lemma~\ref{J=3 supp lem} b) if $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$ (in which case there is no $2^{O(\varepsilon k)}$ loss) or Lemma~\ref{J=3 supp lem} a) if ${\mathfrak {l}}oor{k/3}_{\,\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$. Observe that \mathcal{M}athbf{e}gin{equation*} e^{it\inn{\gammamma(s_{\nu})}{\xi}} m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;t) = \int_{\mathcal{M}athbb{R}} e^{-it \inn{\gammamma(s) - \gammamma(s_{\nu})}{\xi}} a_{k,\ell}^{\nu, (\varepsilon)}(\xi;s) \chi(s)\rho(t)\,\mathcal{M}athrm{d} s. \end{equation*} Passing the differential operator $\nabla_{\bm{v}_j}$ into the $s$-integral, we therefore have \mathcal{M}athbf{e}gin{equation}\lambdabel{J3 ker 0} \big|\nabla_{\bm{v}_j}^N\big[ e^{it\inn{\gammamma(s_{\nu})}{\xi}}m[a_{k,\ell}^{\nu, (\varepsilon)}](\xi;t)\big]\big| \lesssim 2^{-(k-\ell)/2} 2^{O(\varepsilon k)} \sup_{s \in \mathcal{M}athbb{R}} \big|\nabla_{\bm{v}_j}^N\big[ e^{-it \inn{\gammamma(s) - \gammamma(s_{\nu})}{\xi}} a_{k,\ell}^{\nu, (\varepsilon)}(\xi;t;s)\big]\big| \, \rho(t). \end{equation} Here we have used the $s$-support properties of $a_{k,\ell}^{\nu, (\varepsilon)}$; in particular, the definition \eqref{akell dec}. \mathcal{M}edskip Consider the oscillatory factor $e^{-it \inn{\gammamma(s) - \gammamma(s_{\nu})}{\xi}}$ on the right-hand side of \eqref{J3 ker 0}. The $\xi$ derivatives of this function can be controlled on $\mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu, (\varepsilon)}$ by noting that \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma(s) - \gammamma(s_{\nu})}{\bm{v}_j}| \leq \int_{s_{\nu}}^s |\inn{\gammamma'(\sigma} \def\Si{\Sigmagmama)}{\bm{v}_j}|\,\mathcal{M}athrm{d} \sigma} \def\Si{\Sigmagmama \lesssim |s - s_{\nu}|^j \lesssim 2^{-j(k-\ell)/2} 2^{O(\varepsilon k)} \qquad \textrm{for $1 \leq j \leq 3$,} \end{equation*} where we have used \eqref{Frenet bound alt 1} and triangle inequality and \eqref{akellnu dec 1} in the last inequality. Thus, by the Leibniz rule, the problem is reduced to showing \mathcal{M}athbf{e}gin{equation}\lambdabel{J3 ker 1} |\nabla_{\bm{v}_j}^{N} a_{k,\ell}^{\nu, (\varepsilon)}(\xi;t;s)| \lesssim_N 2^{-(j(k-\ell)/2 \wedge k)N} 2^{\varepsilon \ell N} \qquad \textrm{for all $1 \leq j \leq 3$ and all $N \in \mathcal{M}athbb N$.} \end{equation} For all $N \in \mathcal{M}athbb N$, we claim the following: \mathcal{M}athbf{e}gin{itemize} \item For $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k, \ell}^{\nu, (\varepsilon)}$ with $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$, \mathcal{M}athbf{e}gin{equation}\lambdabel{J3 ker 2a} 2^{\ell}|\nabla_{\bm{v}_j}^N \theta_2(\xi)|, \quad 2^{-k+2\ell}|\nabla_{\bm{v}_j}^N u(\xi)|\;\; \lesssim_N \;\; 2^{-(j(k-\ell)/2 \wedge k)N} 2^{\varepsilon \ell N}; \end{equation} \item For $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k, \ell}^{\nu, (\varepsilon)}$ with $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$, \mathcal{M}athbf{e}gin{equation}\lambdabel{J3 ker 2b} 2^{(k-\ell)/2}|\nabla_{\bm{v}_j}^N \theta_1(\xi)| \lesssim_N 2^{-(j(k-\ell)/2 \wedge k)N}. \end{equation} \end{itemize} Assuming that this is so, the derivative bounds \eqref{J3 ker 1} follow directly from the chain and Leibniz rule, applying \eqref{J3 ker 2a} and \eqref{J3 ker 2b}.\mathcal{M}edskip The claimed bound \eqref{J3 ker 2a} follows from repeated application of the chain rule, provided \mathcal{M}athbf{e}gin{subequations} \mathcal{M}athbf{e}gin{align}\lambdabel{J3 ker 3a} |\inn{\gammamma^{(3)}\circ \theta_2(\xi)}{\xi}| &\gtrsim 2^k, \\ \lambdabel{J3 ker 3b} |\inn{\gammamma^{(K)}\circ \theta_2(\xi)}{\xi}| &\lesssim_K 2^{k +\ell(K-3)}, \\ \lambdabel{J3 ker 3c} |\inn{\gammamma^{(K)}\circ \theta_2(\xi)}{\bm{v}_j}| &\lesssim_K 2^{-(j(k-\ell)/2 \wedge k) + k +\ell(K-3)}2^{\varepsilon \ell} \end{align} \end{subequations} hold for all $K \geq 2$ and all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu,(\varepsilon)}$. In particular, assuming \eqref{J3 ker 3a}, \eqref{J3 ker 3b} and \eqref{J3 ker 3c}, the bounds in \eqref{J3 ker 2a} are then a consequence of Lemma~\ref{imp deriv lem} in the appendix: \eqref{J3 ker 2a} corresponds to \eqref{multi imp der bound} and \eqref{multi Faa di Bruno eq 2} whilst the hypotheses in the above display correspond to \eqref{multi imp deriv 2} and \eqref{multi imp deriv 3} (see Example~\ref{deriv ex}). Here the parameters featured in the appendix are chosen as follows: \mathcal{M}athbf{e}gin{center} \mathcal{M}athbf{e}gin{tabular}{|c|c|c|c|c|c|c|} \mathcal{M}athcal{H}line & & & & & & \\[-0.8em] $g$ & $h$ & $A$ & $B$ & $M_1$ & $M_2$ & $\mathcal{M}athbf{e}$ \\ & & & & & & \\[-0.8em] \mathcal{M}athcal{H}line & & & & & & \\[-0.8em] $\gammamma''$ & $\gammamma'$ & $2^{k-\ell}$ & $2^{k-2\ell}$ & $2^{-(j(k-\ell)/2 \wedge k)} 2^{\varepsilon \ell}$ & $2^{\ell}$ & $\bm{v}_j$ \\ & & & & & & \\[-0.8em] \mathcal{M}athcal{H}line \end{tabular} \end{center} The conditions \eqref{J3 ker 3a}, \eqref{J3 ker 3b} and \eqref{J3 ker 3c} are direct consequences of the support properties of the $a_{k,\ell}^{\nu, (\varepsilon)}$. Indeed, \eqref{J3 ker 3a} and the $K \geq 3$ case of \eqref{J3 ker 3b} are trivial consequences of the localisation of the symbol $a_k$. The remaining $K = 2$ case of \eqref{J3 ker 3b} follows immediately since $\inn{\gammamma'' \circ \theta_2(\xi)}{\xi}=0$. Finally, the right-hand side of \eqref{J3 ker 3c} is always greater than 1 unless $j = 3$ and $K = 2$, and so we can immediately reduce to this case. If $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\varepsilon}$, then \eqref{Frenet bound alt 1} together with Lemma~\ref{root control lem} and the $\theta_1$ localisation in \eqref{akellnu dec 1} implies \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(2)}\circ \theta_2(\xi)}{\bm{v}_3}| \lesssim |\theta_2(\xi) - s_{\nu}| \leq |\theta_2(\xi) - \theta_1(\xi)| + |\theta_1(\xi) - s_{\nu}| \lesssim 2^{-\ell}. \end{equation*} On the other hand, if ${\mathfrak {l}}oor{k/3}_{\varepsilon} \leq \ell \leq {\mathfrak {l}}oor{k/3}$, then, by a similar argument, $|\inn{\gammamma^{(2)}\circ \theta_2(\xi)}{\bm{v}_3}| \lesssim 2^{-\ell(1 -\varepsilon)}$. This concludes the proof of \eqref{J3 ker 3c}.\mathcal{M}edskip Similarly, the claimed bound \eqref{J3 ker 2b} follows from repeated application of the chain rule, provided \mathcal{M}athbf{e}gin{subequations} \mathcal{M}athbf{e}gin{align}\lambdabel{J3 ker 4a} |\inn{\gammamma^{(2)}\circ \theta_1(\xi)}{\xi}| &\gtrsim 2^{k-\ell}, \\ \lambdabel{J3 ker 4b} |\inn{\gammamma^{(K)}\circ \theta_1(\xi)}{\xi}| &\lesssim_K 2^{K(k-\ell)/2}, \\ \lambdabel{J3 ker 4c} |\inn{\gammamma^{(K)}\circ \theta_1(\xi)}{\bm{v}_j}| &\lesssim_K 2^{-(j(k-\ell)/2 \wedge k) + K(k-\ell)/2} \end{align} \end{subequations} hold for all $K \geq 2$ and all $\xi \in \mathcal{M}athrm{supp}\, a_{k,\ell}^{\nu,(\varepsilon)}$ when $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$. This again follows by Lemma~\ref{imp deriv lem} in the appendix. Here the parameters are chosen as follows: \mathcal{M}athbf{e}gin{center} \mathcal{M}athbf{e}gin{tabular}{|c|c|c|c|c|c|c|} \mathcal{M}athcal{H}line & & & & \\[-0.8em] $g$ & $A$ & $M_1$ & $M_2$ & $\mathcal{M}athbf{e}$ \\ & & & & \\[-0.8em] \mathcal{M}athcal{H}line & & & & \\[-0.8em] $\gammamma'$ & $2^{(k-\ell)/2}$ & $2^{-(j(k-\ell)/2 \wedge k)}$ & $2^{(k-\ell)/2}$ & $\bm{v}_j$ \\ & & & & \\[-0.8em] \mathcal{M}athcal{H}line \end{tabular} \end{center} The conditions \eqref{J3 ker 4a}, \eqref{J3 ker 4b} and \eqref{J3 ker 4c} are direct consequences of the support properties of the $a_{k,\ell}^{\nu, (\varepsilon)}$ for $0 \leq \ell < {\mathfrak {l}}oor{k/3}_{\,\varepsilon}$. Indeed, \eqref{J3 ker 4a} and the $K = 2$ case of \eqref{J3 ker 4b} is just a restatement of the condition $|v^{\pm}(\xi)| \sigma} \def\Si{\Sigmam 2^{k-\ell}$, which holds due to Lemma~\ref{root control lem}. The $K \geq 3$ case of \eqref{J3 ker 4b} follows immediately from the localisation of the symbols $a_k$. Finally, the right-hand side of \eqref{J3 ker 4c} is always greater than 1 unless $j = 3$ and $K = 2$, and so we can immediately reduce to this case. However, \eqref{Frenet bound alt 1} together with the $\theta_1$ localisation in \eqref{akellnu dec 1} implies \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(2)}\circ \theta_1(\xi)}{\bm{v}_3}| \lesssim |\theta_1(\xi) - s_{\nu}| \lesssim 2^{-(k-\ell)/2}2^{\varepsilon k} \lesssim 2^{-\ell}, \end{equation*} which concludes the proof of \eqref{J3 ker 4c}. \end{proof} \subsection{Localising the input function}\lambdabel{f freq loc subsec} At this juncture it is useful to note some further geometric properties of the support of the multipliers $m[a_{k,\ell}^{\nu, (\varepsilon)}]$ featured in the decomposition.\mathcal{M}edskip Recall from Lemma~\ref{J=3 supp lem} a) that \mathcal{M}athbf{e}gin{equation}\lambdabel{rec supp eq a} \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)} \subseteq 2^k \cdot \pi_1(s_{\mathcal{M}u}; 2^{-\ell}) \qquad \textrm{for all $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$,} \end{equation} where $s_\mathcal{M}u:=2^{-\ell}\mathcal{M}u$. The right-hand set is contained in a certain sector in the frequency space. In particular, given $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and $m \in \mathcal{M}athbb{Z}$ define \mathcal{M}athbf{e}gin{equation}\lambdabel{sector def} \Delta_{k, \ell}(m) := \big\{ \xi \in \widehat{\mathcal{M}athbb{R}}^3 : |\xi_2 - \xi_3 2^{-\ell}m| \leq C 2^{-\ell} \xi_3 \textrm{ and } C^{-1} 2^k \leq \xi_3 \leq C 2^k \big\}, \end{equation} where $C \geq 1$ is an absolute constant, chosen sufficiently large so as to satisfy the requirements of the forthcoming argument. \mathcal{M}athbf{e}gin{lemma}\lambdabel{sec supp lem} If $\mathcal{M}u$, $\nu \in \mathcal{M}athbb{Z}$ satisfy $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$, then there exists some $ m(\mathcal{M}u) \in \mathcal{M}athbb{Z}$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{f freq claim} 2^k \cdot \pi_1(s_{\mathcal{M}u}; 2^{-\ell}) \subseteq \Delta_{k, \ell}\big(m(\mathcal{M}u)\big). \end{equation} Furthermore, for each fixed $k$ and $\ell$, given $m \in \mathcal{M}athbb{Z}$ there are only $O(1)$ values of $\mathcal{M}u \in \mathcal{M}athbb{Z}$ such that $m = m(\mathcal{M}u)$. \end{lemma} \mathcal{M}athbf{e}gin{proof} Define $G \colon I_0 \to \mathcal{M}athbb{R}^3$ by $G(s) := \mathcal{M}athbf{e}_{33}(s)^{-1} \mathcal{M}athbf{e}_3(s)$. As a consequence of the Frenet equations, the vectors $G'(s)$, $G''(s)$ span $\mathcal{M}athbb{R}^2 \times \{0\}$. Given $\xi \in \widehat{\mathcal{M}athbb{R}}^3$, it follows that there exist $\eta_1$, $\eta_2 \in \mathcal{M}athbb{R}$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{f freq loc 1} \xi - \xi_3 G(s) = \sum_{j=1}^2 2^{- \ell j} \eta_j G^{(j)}(s). \end{equation} Taking the inner product of both sides of this identity with respect to the $\mathcal{M}athbf{e}_j(s)$ for $j = 1$, $2$ and applying the Frenet equations \mathcal{M}athbf{e}gin{equation}\lambdabel{f freq loc 2} \mathcal{M}athbf{e}gin{bmatrix} \inn{\xi}{\mathcal{M}athbf{e}_1(s)} \\ \inn{\xi}{\mathcal{M}athbf{e}_2(s)} \end{bmatrix} = \mathcal{M}athbf{e}gin{bmatrix} 0 & \inn{G^{(2)}(s)}{\mathcal{M}athbf{e}_1(s)} \\ \inn{G^{(1)}(s)}{\mathcal{M}athbf{e}_2(s)} & \inn{G^{(2)}(s)}{\mathcal{M}athbf{e}_2(s)} \end{bmatrix} \mathcal{M}athbf{e}gin{bmatrix} 2^{-\ell} \eta_1 \\ 2^{-2\ell} \eta_2 \end{bmatrix} \end{equation} where the anti-diagonal entries of the right-hand $2 \times 2$ matrix have size $\sigma} \def\Si{\Sigmam 1$.{\mathfrak {o}}otnote{A similar computation is carried out in more detail in \S\ref{geo obs sec}.} Let $\xi \in 2^k \cdot \pi_1(s; 2^{-\ell})$ so that $|\inn{\xi}{\mathcal{M}athbf{e}_1(s_{\mathcal{M}u})}| \lesssim 2^{k-2\ell}$ and $|\inn{\xi}{\mathcal{M}athbf{e}_2(s_{\mathcal{M}u})}| \lesssim 2^{k-\ell}$. Combining these bounds with \eqref{f freq loc 1} and \eqref{f freq loc 2}, it follows that \mathcal{M}athbf{e}gin{equation*} |\xi_2 - \xi_3G_2(s_{\mathcal{M}u})| \leq |\xi - \xi_3G(s_{\mathcal{M}u})| \lesssim 2^{k-\ell} \sigma} \def\Si{\Sigmam 2^{-\ell} \xi_3. \end{equation*} If we take $m(\mathcal{M}u)$ to be the integer which minimises $|2^{-\ell}m -G_2(s_{\mathcal{M}u})|$, then we obtain \eqref{f freq claim}. On the other hand, the Frenet equations ensure that $G_2(s) = \mathcal{M}athbf{e}_{33}(s)^{-1}\mathcal{M}athbf{e}_{32}(s)$ satisfies $|G_2'(s)| \sigma} \def\Si{\Sigmam 1$ for all $s \in I_0$. Consequently, the assignment $\mathcal{M}u \mathcal{M}apsto m(\mathcal{M}u)$ is $O(1)$-to-1, as claimed. \end{proof} For each $\mathcal{M}u \in \mathcal{M}athbb{Z}$ define the smooth cutoff function \mathcal{M}athbf{e}gin{equation*} \chi_{k,\ell}^{*,\mathcal{M}u}(\xi) := \eta\big(C^{-1}|2^{\ell}\xi_2/\xi_3 - m(\mathcal{M}u)|\big) \, \big( \eta(C^{-1}2^{-k}\xi_3) - \eta(2C2^k \xi_3)\big). \end{equation*} If $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)}$ for $\nu \in \mathcal{M}athfrak{N}(\mathcal{M}u)$, then \eqref{rec supp eq a} and Lemma~\ref{sec supp lem} imply $\chi_{k,\ell}^{*,\mathcal{M}u}(\xi) = 1$. Thus, if we define the corresponding frequency projection \mathcal{M}athbf{e}gin{equation*} f^{*, \mathcal{M}u}_{k,\ell} := \chi_{k,\ell}^{*,\mathcal{M}u}(D)f, \end{equation*} it follows that \mathcal{M}athbf{e}gin{equation*} m[a_{k,\ell}^{\nu, (\varepsilon)}](D;\,\cdot\,)f = m[a_{k,\ell}^{\nu, (\varepsilon)}](D;\,\cdot\,)f^{*, \mathcal{M}u}_{k,\ell} \qquad \textrm{for all $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$.} \end{equation*} Recall from Lemma~\ref{J=3 supp lem} b) that we also have \mathcal{M}athbf{e}gin{equation}\lambdabel{rec supp eq b} \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)} \subseteq 2^{k-\ell} \cdot \pi_0(s_{\nu}; 2^{-(k-\ell)/2}, 2^{\ell}), \qquad \textrm{where $s_\nu:=2^{-(k-\ell)/2}\nu$.} \end{equation} Fix some $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and $\mathcal{M}u \in \mathcal{M}athbb{Z}$ with $s_{\mathcal{M}u} := 2^{-\ell} \mathcal{M}u \in [-1,1]$. To simplify notation, let $\sigma} \def\Si{\Sigmagmama := s_{\mathcal{M}u}$, $\lambdambda := 2^{-\ell}$ and let $\widetilde{\gammamma} := \gammamma_{\sigma} \def\Si{\Sigmagmama, \lambdambda}$ denote the rescaled curve, as defined in Definition~\ref{rescaled curve def}, so that \mathcal{M}athbf{e}gin{equation}\lambdabel{J4 gamma resc} \widetilde{\gammamma}(s) := \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1} \big( \gammamma(\sigma} \def\Si{\Sigmagmama + \lambdambda s) - \gammamma(\sigma} \def\Si{\Sigmagmama)\big). \end{equation} Let $(\tilde{\mathcal{M}athbf{e}}_j)_{j=1}^4$ denote the Frenet frame defined with respect to $\widetilde{\gammamma}$. Given $0 < r \leq 1$ and $s \in I_0$, recall the definition of the $(0,r)$-\textit{Frenet boxes} (with respect to $(\tilde{\mathcal{M}athbf{e}}_j)_{j=1}^3$) introduced in Definition~\ref{def Frenet box}: \mathcal{M}athbf{e}gin{equation*} \pi_{0,\widetilde{\gammamma}}(s;r):= \big\{\xi \in \widehat{\mathcal{M}athbb{R}}^3: |\inn{\tilde{\mathcal{M}athbf{e}}_1(s)}{\xi}| \lesssim r, \quad |\inn{\tilde{\mathcal{M}athbf{e}}_2(s)}{\xi}| \sigma} \def\Si{\Sigmam 1, \quad |\inn{\tilde{\mathcal{M}athbf{e}}_3(s)}{\xi}| \lesssim 1\big\}. \end{equation*} Note that all these definitions depend of the choice of $\mathcal{M}u$ and $\ell$, but we suppress this dependence in the notation. \mathcal{M}athbf{e}gin{lemma}\lambdabel{freq resc lem} With the above setup, and $\nu \in \mathcal{M}athfrak{N}(\mathcal{M}u)$, \mathcal{M}athbf{e}gin{equation*} [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}^{\top} \cdot 2^{k-\ell} \cdot \pi_{0,\gammamma}(s_{\nu}; 2^{-(k-\ell)/2}, 2^{\ell}) \subseteq 2^{k-3\ell} \cdot \pi_{0,\widetilde{\gammamma}}(\tilde{s}_{\nu}; 2^{-(k-3\ell)/2}), \end{equation*} where $\tilde{s}_{\nu} := 2^{\ell} (s_{\nu} - s_{\mathcal{M}u})$ for $s_{\nu} := 2^{-(k - \ell)/2}\nu$. \end{lemma} \mathcal{M}athbf{e}gin{proof} Let $\xi \in 2^{k-\ell} \cdot \pi_{0,\gammamma}(s_{\nu}; 2^{-(k-\ell)/2}, 2^{\ell})$ so that \mathcal{M}athbf{e}gin{equation*} |\inn{\mathcal{M}athbf{e}_1(s_{\nu})}{\xi}| \lesssim 2^{(k-\ell)/2} \quad, \quad |\inn{\mathcal{M}athbf{e}_2(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^{k-\ell}, \quad |\inn{\mathcal{M}athbf{e}_4(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} Since the matrix corresponding to the change of basis from $\big(\mathcal{M}athbf{e}_j(s_{\nu})\big)_{j=1}^3$ to $\big(\gammamma^{(j)}(s_{\nu})\big)_{j=1}^3$ is lower triangular and an $O(\delta_0)$ perturbation of the identity, provided $\delta_0$ is sufficiently small, \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(1)}(s_{\nu})}{\xi}| \lesssim 2^{(k-\ell)/2}, \quad |\inn{\gammamma^{(2)}(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^{k-\ell}, \quad |\inn{\gammamma^{(3)}(s_{\nu})}{\xi}| \sigma} \def\Si{\Sigmam 2^k. \end{equation*} Now define $\tilde{\xi} := \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{\top} \cdot \xi$. Since $\lambdambda := 2^{-\ell}$, it follows from the definition of $\widetilde{\gammamma}$ from \eqref{J4 gamma resc} that \mathcal{M}athbf{e}gin{equation*} \inn{\widetilde{\gammamma}^{(j)}(\tilde{s}_{\nu})}{\tilde{\xi}\,} = 2^{-j\ell}\inn{\gammamma^{(j)}(s_{\nu})}{\xi} \qquad \textrm{for $j \geq 1$}. \end{equation*} Combining the above observations, \mathcal{M}athbf{e}gin{equation*} |\inn{\widetilde{\gammamma}^{(1)}(\tilde{s}_{\nu})}{\tilde{\xi}\,}| \lesssim 2^{(k-3\ell)/2}, \quad |\inn{\widetilde{\gammamma}^{(2)}(\tilde{s}_{\nu})}{\tilde{\xi}\,}| \sigma} \def\Si{\Sigmam 2^{k-3\ell}, \quad |\inn{\widetilde{\gammamma}^{(3)}(\tilde{s}_{\nu})}{\tilde{\xi}\,}| \sigma} \def\Si{\Sigmam 2^{k-3\ell}. \end{equation*} Provided $\delta_0$ is sufficiently small, the desired result now follows since the matrix corresponding to the change of basis from $\big(\tilde{\mathcal{M}athbf{e}}_i(\tilde{s}_{\nu})\big)_{i=1}^3$ to $\big(\widetilde{\gammamma}^{(i)}(\tilde{s}_{\nu})\big)_{i=1}^3$ is also lower triangular and an $O(\delta_0)$ perturbation of the identity. \end{proof} For $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$ define the smooth cutoff \mathcal{M}athbf{e}gin{equation}\lambdabel{nu smooth cutoff} \chi_{k, \ell}^{\nu}(\xi) := \chi_{\tilde{\pi}}\big(C^{-1} 2^{-(k-3\ell)} [\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}^{\top} \cdot \xi \big) \end{equation} where $\chi_{\tilde{\pi}}$ is as defined in \eqref{chi pi} for $\tilde{\pi} := \pi_{0,\widetilde{\gammamma}}(\tilde{s}_{\nu}; 2^{-(k-3\ell)/2})$ as above. If $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{k,\ell}^{\nu, (\varepsilon)}$, then \eqref{rec supp eq b} and Lemma~\ref{freq resc lem} imply $\chi_{k,\ell}^{\nu}(\xi) = 1$. Thus if we define the corresponding frequency projection \mathcal{M}athbf{e}gin{equation*} f_{k,\ell}^{\nu} := \chi_{k,\ell}^{\nu}(D)f_{k,\ell}^{*,\mathcal{M}u}, \end{equation*} it follows that \mathcal{M}athbf{e}gin{equation*} m[a_{k,\ell}^{\nu, (\varepsilon)}](D;\,\cdot\,)f = m[a_{k,\ell}^{\nu, (\varepsilon)}](D;\,\cdot\,)f_{k,\ell}^{*, \mathcal{M}u} = m[a_{k,\ell}^{\nu, (\varepsilon)}](D;\,\cdot\,)f_{k,\ell}^{\nu} \qquad \textrm{for all $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$.} \end{equation*} \subsection{\texorpdfstring{$L^2$}{}-weighted bounds in \texorpdfstring{$\mathcal{M}athbb{R}^{3+1}$}{}}\lambdabel{L2 wtd 3+1 sec} We apply a standard duality argument to analyse the square function appearing in Proposition~\ref{J=3 rev sf prop}. In particular, we use $L^2$-weighted approach and the key ingredient is the Nikodym-type maximal inequality from \S\ref{Nikodym lem subsec}.\mathcal{M}edskip By duality, there exists a non-negative $g \in L^2(\mathcal{M}athbb{R}^{3+1})$ with $\|g\|_{L^2(\mathcal{M}athbb{R}^{3+1})} = 1$ such that \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^{3+1})}^2 = \sum_{\nu \in \mathcal{M}athbb{Z}} \int_{\mathcal{M}athbb{R}^{3+1}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D;t)f(x)|^2 g(x;t)\,\mathcal{M}athrm{d} x \mathcal{M}athrm{d} t. \end{equation*} By the observations of the previous subsection, \mathcal{M}athbf{e}gin{equation*} m[a_{k,\ell}^{\nu,(\varepsilon)}](D;t)f = m[a_{k,\ell}^{\nu,(\varepsilon)}](D;t) f_{k,\ell}^{\nu}. \end{equation*} Let $\psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}$ be the weight introduced in Lemma~\ref{J=3 ker lem}. Since the $\psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(\,\cdot\,;t)$ are $L^1$-normalised uniformly in $t$, it follows from Lemma~\ref{J=3 ker lem} and the Cauchy--Schwarz inequality that \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 weighted 3+1 1} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D;t)f(x)|^2 \lesssim 2^{-(k-\ell)} 2^{O(\varepsilon k)} \psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(\,\cdot\,;t) \ast |f_{k,\ell}^{\nu}|^2(x) \, \rho(t). \end{equation} Define the Nikodym-type maximal operator \mathcal{M}athbf{e}gin{equation*} \widetilde{\mathcal{M}athcal{N}}_{k,\ell}^{\,\mathcal{M}athrm{sing}}\,g(x) := \mathcal{M}ax_{\nu \in \mathcal{M}athbb{Z} \,:\, |s_{\nu}| \leq \delta_0} \int_{\mathcal{M}athbb{R}^4} |g(x - y,t)| \psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(y,t) \, \rho(t)\,\mathcal{M}athrm{d} y \mathcal{M}athrm{d} t. \end{equation*} By \eqref{L2 weighted 3+1 1} and Fubini's theorem, it follows that \mathcal{M}athbf{e}gin{equation*} \sum_{\nu \in \mathcal{M}athbb{Z}} \int_{\mathcal{M}athbb{R}^{3+1}} | m[a_{k,\ell}^{\nu, (\varepsilon)}](D;t)f(x)|^2 g(x;t)\,\mathcal{M}athrm{d} x \mathcal{M}athrm{d} t \lesssim 2^{-(k-\ell)}2^{O(\varepsilon k)} \int_{\mathcal{M}athbb{R}^{3}}\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{k,\ell}^{\,\mathcal{M}athrm{sing}}\,g(x)\,\mathcal{M}athrm{d} x. \end{equation*} Note that $\widetilde{\mathcal{M}athcal{N}}_{k,\ell}^{\,\mathcal{M}athrm{sing}}$ is essentially a smooth version of the maximal operator $\mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}}$ from \S\ref{Nikodym lem subsec} with parameters $r_1 := 2^{-(k-\ell)/2}$, $r_2 := 2^{-(k-\ell)}$ and $r_3 := 2^{-k}$. By the restriction $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$, it follows that this choice of $\mathcal{M}athbf{r}$ satisfies the hypotheses \mathcal{M}athbf{e}gin{equation*} r_3 \leq r_2 \leq r_1 \leq r_2^{1/2} \qquad \textrm{and} \qquad r_2 \leq r_{1}^{1/2} r_3^{1/2} \end{equation*} from the statement of Proposition~\ref{Nikodym prop}. Thus, by pointwise dominating $\psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}$ by a weighted series of indicator functions and applying Proposition~\ref{Nikodym prop}, one readily deduces the norm bound \mathcal{M}athbf{e}gin{equation*} \big\|\widetilde{\mathcal{M}athcal{N}}_{k,\ell}^{\,\mathcal{M}athrm{sing}}\big\|_{L^2(\mathcal{M}athbb{R}^{3+1}) \to L^2(\mathcal{M}athbb{R}^3)} \lesssim_{\varepsilon} 2^{\varepsilon k}. \end{equation*} By combining the above observations with an application of the Cauchy--Schwarz inequality, \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 weighted 3+1 2} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^{3+1})} \lesssim 2^{-(k-\ell)/2 + O(\varepsilon k)} \big\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)}. \end{equation} It remains to bound the right-hand square function, which involves only functions of $3$ variables. \subsection{\texorpdfstring{$L^2$}{}-weighted bounds in \texorpdfstring{$\mathcal{M}athbb{R}^3$}{}} A similar $L^2$-weighted approach is now applied one dimension lower to estimate the square function appearing in the right-hand side of \eqref{L2 weighted 3+1 2}. \mathcal{M}athbf{e}gin{proposition}\lambdabel{L4 forward SF prop} Let $k \in \mathcal{M}athbb N$, $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and $\varepsilon>0$. Then \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 main} \big\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)} \lesssim_{\varepsilon} 2^{O(\varepsilon k)} \|f\|_{L^4(\mathcal{M}athbb{R}^3)}. \end{equation} \end{proposition} \mathcal{M}athbf{e}gin{proof} By duality, there exists a non-negative $w \in L^2(\mathcal{M}athbb{R}^3)$ with $\|w\|_{L^2(\mathcal{M}athbb{R}^3)} = 1$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 0} \big\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)}^2 = \sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} |f_{k,\ell}^{\nu}(x)|^2 w(x)\,\mathcal{M}athrm{d} x. \end{equation} Recall that the $f_{k,\ell}^{\nu}$ are defined by \mathcal{M}athbf{e}gin{equation*} f_{k,\ell}^{\nu} := \chi_{k,\ell}^{\nu}(D)f_{k,\ell}^{*,\mathcal{M}u} \qquad \textrm{for $\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)$} \end{equation*} where the smooth cutoff function $\chi_{k,\ell}^{\nu}$ is as defined in \eqref{nu smooth cutoff}. Fix $\mathcal{M}u$ and, as in \S\ref{f freq loc subsec}, let $\sigma} \def\Si{\Sigmagmama := s_{\mathcal{M}u}$ and $\lambdambda := 2^{-\ell}$. Define $\tilde{f}_{k,\ell}^{\nu} := f_{k,\ell}^{\nu} \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$, $\tilde{f}_{k,\ell}^{\,*, \mathcal{M}u} := f_{k,\ell}^{*, \mathcal{M}u} \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$ and $\tilde{w} := w \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}$ so, by a change of variables, \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 1} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} |f_{k,\ell}^{\nu}(x)|^2\, w(x)\,\mathcal{M}athrm{d} x = |\det[\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}| \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} |\tilde{f}_{k,\ell}^{\nu}(x)|^2\, \tilde{w}(x)\,\mathcal{M}athrm{d} x. \end{equation} By the definition of $\chi_{k,\ell}^{\nu}$ and Lemma \ref{freq resc lem}, each of the $\tilde{f}_{k,\ell}^{\nu}$ is Fourier supported in a $2^{k-3\ell}$ dilate of a $(0, 2^{-(k-3\ell)/2})$-Frenet box. In view of this, we may apply Proposition~\ref{f SF prop} to deduce that \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 1 b} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} |f_{k,\ell}^{\nu}(x)|^2 \, w(x)\,\mathcal{M}athrm{d} x \lesssim_{\varepsilon} 2^{\varepsilon k} |\det[\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}| \int_{\mathcal{M}athbb{R}^3} |\tilde{f}_{k,\ell}^{\,*,\,\mathcal{M}u}(x)|^2 \, \widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell}\tilde{w}(x)\,\mathcal{M}athrm{d} x \end{equation} where the operator $\widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell}$ is defined by \mathcal{M}athbf{e}gin{equation*} \widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell} := \mathcal{M}athrm{Dil}_{2^{k-3\ell}} \circ \widetilde{\mathcal{M}athcal{N}}^{\,(\varepsilon)}_{\widetilde{\gammamma},\tilde{r}} \circ \mathcal{M}athrm{Dil}_{2^{-(k-3\ell)}} \end{equation*} for $\tilde{r} := 2^{-(k-3\ell)/2}$ and $\mathcal{M}athrm{Dil}_{\rho} \colon L^2(\mathcal{M}athbb{R}^3) \to L^2(\mathcal{M}athbb{R}^3)$ the dilation operator $\mathcal{M}athrm{Dil}_{\rho}\, f := f(\rho \;\cdot\,)$ for $\rho > 0$. Here $\widetilde{\mathcal{M}athcal{N}}^{\,(\varepsilon)}_{\widetilde{\gammamma},\tilde{r}}$ is the maximal operator featured in the statement of Proposition~\ref{f SF prop} (the precise definition is given in \S\ref{forward SF sec}). Note that $\widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell}$ depends on the choice of $\mathcal{M}u$. By reversing the change of variables in \eqref{L2 wtd 3 1}, we can show the following. \mathcal{M}athbf{e}gin{claim} There exists a maximal function $\mathcal{M}athcal{N}_{k,\ell}^{(\varepsilon)}$, independent of $\mathcal{M}u$, such that \mathcal{M}athbf{e}gin{equation*} \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}\big)^{-1} \circ \widetilde{\mathcal{M}athcal{N}}_{k, \ell}^{\mathcal{M}u, (\varepsilon)} \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \cdot w(x) \lesssim_\gammamma \mathcal{M}athcal{N}_{k, \ell}^{\,(\varepsilon)} w(x) \qquad \textrm{for all $x \in \mathcal{M}athbb{R}^3$}, \end{equation*} where $[\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda} \cdot f:= f \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}$, and \mathcal{M}athbf{e}gin{equation}\lambdabel{rescaled max L2 bound} \|\mathcal{M}athcal{N}^{\, (\varepsilon)}_{k,\ell}\|_{L^2(\mathcal{M}athbb{R}^3) \to L^2(\mathcal{M}athbb{R}^3)} \lesssim_{\varepsilon} 2^{\varepsilon k}. \end{equation} \end{claim} The proof of the above claim requires additional information on the form of the maximal operators arising from Proposition~\ref{f SF prop}. Since the definitions involved are somewhat unwieldy, the details are postponed until \S\ref{Nik scale subsec}. Assuming the claim, changing variables in \eqref{L2 wtd 3 1 b} yields \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athfrak{N}_{\ell}(\mathcal{M}u)} |f_{k,\ell}^{\nu}(x)|^2 \, w(x)\,\mathcal{M}athrm{d} x \lesssim 2^{\varepsilon k} \int_{\mathcal{M}athbb{R}^3} |f_{k,\ell}^{*,\,\mathcal{M}u}(x)|^2 \, \mathcal{M}athcal{N}^{\, (\varepsilon)}_{k,\ell} w(x)\,\mathcal{M}athrm{d} x. \end{equation*} Recalling \eqref{L2 wtd 3 0}, one can sum the above inequality in $\mathcal{M}u \in \mathcal{M}athbb{Z}$, and use \eqref{rescaled max L2 bound} and the Cauchy--Schwarz inequality to obtain \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 3a} \big\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)}^2 \lesssim_{\varepsilon} 2^{2\varepsilon k} \big\| \big(\sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{*,\mathcal{M}u}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)}^2 . \end{equation} Recall that each $f_{k,\ell}^{*,\mathcal{M}u}$ corresponds to a (smooth) frequency projection of $f$ onto the set $\Delta_{\ell}(m(\mathcal{M}u))$, as defined in \eqref{sector def}. Furthermore, by Lemma~\ref{sec supp lem} the assignment $m \mathcal{M}apsto m(\mathcal{M}u)$ is $O(1)$-to-1. Thus, the right-hand square function in \eqref{L2 wtd 3 3a} falls under the scope of the classical sectorial square function of C\'ordoba \cite{Cordoba1982}. In particular, by \cite[Theorem 1]{Cordoba1982} (see also \cite{CS1995}) and a Fubini argument, we have \mathcal{M}athbf{e}gin{equation}\lambdabel{L2 wtd 3 3b} \big\| \big(\sum_{\mathcal{M}u \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{*,\mathcal{M}u}|^2\big)^{1/2}\big\|_{L^4(\mathcal{M}athbb{R}^3)}^2 \lesssim_\varepsilon 2^{O(\varepsilon k)} \|f\|_{L^4(\mathcal{M}athbb{R}^3)}. \end{equation} The inequalities \eqref{L2 wtd 3 3a} and \eqref{L2 wtd 3 3b} imply the desired estimate \eqref{L2 wtd 3 main}. \end{proof} \subsection{Putting everything together} We combine our observations to establish favourable $L^4$ and $L^2$ estimates for the localised multipliers $m[a_{k,\ell}]$.\mathcal{M}edskip \noindentndent \textit{$L^4$ estimates}. By Lemma~\ref{J=3 s loc lem}, \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^4(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} \|m[a_{k,\ell}^{(\varepsilon)}](D;\,\cdot\,)f\|_{L^4(\mathcal{M}athbb{R}^{3+1})} + 2^{-kN} \|f\|_{L^4(\mathcal{M}athbb{R}^3)}. \end{equation*} Decompose each $m[a_{k,\ell}^{(\varepsilon)}]$ as a sum of multipliers $m[a_{k,\ell}^{\nu,(\varepsilon)}]$ as defined in \S\ref{J=3 Fourier loc subsec}. By Proposition~\ref{J=3 rev sf prop}, it follows that \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^4(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} 2^{(k-3\ell)/4} 2^{O(\varepsilon k)} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^{3+1})} + 2^{-kN} \|f\|_{L^4(\mathcal{M}athbb{R}^3)} . \end{equation*} Thus, \eqref{L2 weighted 3+1 2} and \eqref{L2 wtd 3 main} combine with the previous display to yield the $L^4$ estimate \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^4(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} 2^{(k-3\ell)/4} 2^{-(k-\ell)/2} 2^{O(\varepsilon k)} \|f\|_{L^4(\mathcal{M}athbb{R}^3)}. \end{equation*} Since $\varepsilon > 0$ may be chosen arbitrarily, this corresponds to the $p=4$ case of Proposition~\ref{J=3 LS prop}.\mathcal{M}edskip \noindentndent \textit{$L^2$ estimates}. Arguing as in the proof of the $L^4$ estimate, but now using Lemma~\ref{J=3 L2 rev sf prop} rather than Proposition~\ref{J=3 rev sf prop}, it follows that \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^2(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} \begin{itemize}g\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D; \,\cdot\,) f|^2 \big)^{1/2} \begin{itemize}g\|_{L^2(\mathcal{M}athbb{R}^{3+1})} + 2^{-kN} \|f\|_{L^2(\mathcal{M}athbb{R}^3)} . \end{equation*} Recall from \eqref{L2 weighted 3+1 1} that \mathcal{M}athbf{e}gin{equation*} |m[a_{k,\ell}^{\nu,(\varepsilon)}](D;t)f(x)|^2 \lesssim 2^{-(k-\ell)/2} 2^{O(\varepsilon k)} \psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(\,\cdot\,;t) \ast |f_{k,\ell}^{\nu}|^2. \end{equation*} Thus, by Young's convolution inequality and the fact that the $\psi_{\,\mathcal{M}athcal{T}_{k,\ell}(s_{\nu})}(\,\cdot\,;t)$ are $L^1$-normalised, \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^2(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} 2^{-(k-\ell)/2} 2^{O(\varepsilon k)} \big\| \big(\sum_{\nu \in \mathcal{M}athbb{Z}} |f_{k,\ell}^{\nu}|^2 \big)^{1/2} \big\|_{L^2(\mathcal{M}athbb{R}^3)} + 2^{-kN} \|f\|_{L^2(\mathcal{M}athbb{R}^3)} . \end{equation*} Finally, as the $f_{k,\ell}^{\nu}$ have essentially disjoint Fourier support, by Plancherel's theorem, \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^2(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon} 2^{-(k-\ell)/2} 2^{O(\varepsilon k)} \|f\|_{L^2(\mathcal{M}athbb{R}^3)} . \end{equation*} Since $\varepsilon > 0$ may be chosen arbitrarily, this corresponds to the $p=2$ case of Proposition~\ref{J=3 LS prop}.\mathcal{M}edskip Interpolating the above estimates, given $2 \leq p \leq 4$ and $\varepsilon > 0$, it follows that \mathcal{M}athbf{e}gin{equation*} \|m[a_{k,\ell}](D;\,\cdot\,)f\|_{L^p(\mathcal{M}athbb{R}^{3+1})} \lesssim_{\varepsilon, N} 2^{-(k-\ell)/2}2^{(k-3\ell)(1/2 -1/p)} 2^{\varepsilon k} \|f\|_{L^p(\mathcal{M}athbb{R}^3)}, \end{equation*} which is precisely the desired inequality from Proposition~\ref{J=3 LS prop}. \section{Proof of the reverse square function inequality in \texorpdfstring{$\mathcal{M}athbb{R}^{3+1}$}{}}\lambdabel{reverse SF sec} \subsection{Geometric observations}\lambdabel{geo obs sec} The first step is to relate the Frenet boxes $\pi_{2,\gammamma}(s;r)$ to a codimension 2 cone $\widetilde{\Gammamma}_2$ in the $(\xi, \tau)$-space. \mathcal{M}edskip \noindentndent \textit{The underlying cone.} Let $\gammamma \in \mathcal{M}athfrak{G}_4(\delta_0)$ for $0 < \delta_0 \ll 1$ and $\mathcal{M}athbf{e}_j \colon[-1,1] \to S^3$ for $1 \leq j \leq 4$ be the associated Frenet frame. Without loss of generality, in proving Theorem~\ref{Frenet reverse SF theorem} we may always localise so that we only consider the portion of the curve lying over the interval $I_0 = [-\delta_0, \delta_0]$. In this case \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet loc} \mathcal{M}athbf{e}_j(s) = \vec{e}_j + O(\delta_0) \qquad \textrm{for $1 \leq j \leq 4$} \end{equation} where, as usual, the $\vec{e}_j$ denote the standard basis vectors. We consider the conic surface $\widetilde{\Gammamma}_2$ `generated' over the curve $s \mathcal{M}apsto \mathcal{M}athbf{e}_4(s)$. This is similar to the analysis of \cite{PS2007}, where a cone in $\mathcal{M}athbb{R}^3$ generated by the binormal $\mathcal{M}athbf{e}_3$ features prominently in the arguments. Define $G \colon I_0 \to \mathcal{M}athbb{R}^4$ by $G(s) := \mathcal{M}athbf{e}_{44}(s)^{-1} \mathcal{M}athbf{e}_4(s)$ for all $s \in I_0$ (note that $\mathcal{M}athbf{e}_{44}(s)$ is bounded away from $0$ by \eqref{Frenet loc}), so that $G$ is of the form \mathcal{M}athbf{e}gin{equation*} G(s) = \mathcal{M}athbf{e}gin{bmatrix} g(s) \\ 1 \end{bmatrix} \qquad \textrm{for} \qquad g(s) := \begin{itemize}g( {\mathfrak {r}}ac{\mathcal{M}athbf{e}_{41}(s)}{\mathcal{M}athbf{e}_{44}(s)},\; {\mathfrak {r}}ac{\mathcal{M}athbf{e}_{42}(s)}{\mathcal{M}athbf{e}_{44}(s)},\; {\mathfrak {r}}ac{\mathcal{M}athbf{e}_{43}(s)}{\mathcal{M}athbf{e}_{44}(s)} \begin{itemize}g)^{\top}. \end{equation*} For $U := [1/4, 4]\times I_0$, the $2$-dimensional cone $\widetilde{\Gammamma}_2$ is parametrised by the function \mathcal{M}athbf{e}gin{equation*} \widetilde{\Gammamma}_2 \colon U \to \mathcal{M}athbb{R}^4, \qquad (\rho, s) \mathcal{M}apsto \rho\, G(s). \end{equation*} \noindentndent \textit{Non-degeneracy conditions.} We claim that the curve $g \colon I_0 \to \mathcal{M}athbb{R}^3$ is non-degenerate. To see this, first note that \mathcal{M}athbf{e}gin{equation*} G^{(i)}(s) \in \lambdangle \mathcal{M}athbf{e}_4(s), \mathcal{M}athbf{e}_4^{(1)}(s), \dots, \mathcal{M}athbf{e}_4^{(i)}(s) \rangle \end{equation*} where the right-hand expression denotes the linear span of the vectors $\mathcal{M}athbf{e}_4(s), \mathcal{M}athbf{e}_4^{(1)}(s), \dots, \mathcal{M}athbf{e}_4^{(i)}(s)$. Thus, one concludes from the Frenet formul\ae\ that \mathcal{M}athbf{e}gin{equation}\lambdabel{g a non deg 3} G^{(i)}(s) \in \lambdangle \mathcal{M}athbf{e}_{4 -i}(s), \dots, \mathcal{M}athbf{e}_4(s) \rangle \qquad \textrm{for $0 \leq i \leq 3$.} \end{equation} On the other hand, the Frenet formul\ae\ together with the Leibniz rule show that \mathcal{M}athbf{e}gin{equation*} \inn{G^{(i)}(s)}{\mathcal{M}athbf{e}_{4 -i}(s)} = (-1)^i\begin{itemize}g( \prod_{\ell = 4 -i}^3 \tilde{\kappappa}_{\ell}(s) \begin{itemize}g) \, \mathcal{M}athbf{e}_{44}(s)^{-1} \end{equation*} and, consequently, \mathcal{M}athbf{e}gin{equation}\lambdabel{g a non deg 5} |\inn{G^{(i)}(s)}{\mathcal{M}athbf{e}_{4 -i}(s)} | \sigma} \def\Si{\Sigmam 1 \qquad \textrm{for all $1 \leq i \leq 3$.} \end{equation} Thus, combining \eqref{g a non deg 3} and \eqref{g a non deg 5}, it follows that the vectors $G^{(i)}(s)$, $1 \leq i \leq 3$, are linearly independent. From this, we immediately conclude that \mathcal{M}athbf{e}gin{equation*} |\det [g]_{s}| \gtrsim 1 \end{equation*} for all $s \in I_0$, which is the claimed non-degeneracy condition. \mathcal{M}edskip \noindentndent \textit{Frenet boxes revisited.} By the preceding observations, the vectors $G^{(i)}(s)$ for $1 \leq i \leq 3$ form a basis of $\mathcal{M}athbb{R}^3 \times \{0\}$. Fixing $\xi \in \widehat{\mathcal{M}athbb{R}}^3$ and $r > 0$, one may write \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet box 1} \xi - \xi_4 G(s) = \sum_{i=1}^3 r^{i} \eta_i G^{(i)}(s) \end{equation} for some vector of coefficients $(\eta_1, \eta_2, \eta_3) \in \mathcal{M}athbb{R}^3$. The powers of $r$ appearing in the above expression play a normalising r\^ole below. For each $1 \leq k \leq 3$ form the inner product of both sides of the above identity with the Frenet vector $\mathcal{M}athbf{e}_k(s)$. Combining the resulting expressions with the linear independence relations inherent in \eqref{g a non deg 3}, the coefficients $\eta_k$ can be related to the numbers $\inn{\xi}{\mathcal{M}athbf{e}_k(s)}$ via a lower anti-triangular transformation, viz. \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet box 2} \mathcal{M}athbf{e}gin{bmatrix} \inn{\xi}{\mathcal{M}athbf{e}_1(s)} \\ \inn{\xi}{\mathcal{M}athbf{e}_2(s)} \\ \inn{\xi}{\mathcal{M}athbf{e}_3(s)} \end{bmatrix} = \mathcal{M}athbf{e}gin{bmatrix} 0 & 0 & \inn{G_{\mathcal{M}athbf{a}}^{(3)}(s)}{\mathcal{M}athbf{e}_1(s)} \\ 0 & \inn{G_{\mathcal{M}athbf{a}}^{(2)}(s)}{\mathcal{M}athbf{e}_2(s)} & \inn{G_{\mathcal{M}athbf{a}}^{(3)}(s)}{\mathcal{M}athbf{e}_2(s)} \\ \inn{G_{\mathcal{M}athbf{a}}^{(1)}(s)}{\mathcal{M}athbf{e}_3(s)} & \inn{G_{\mathcal{M}athbf{a}}^{(2)}(s)}{\mathcal{M}athbf{e}_3(s)} & \inn{G_{\mathcal{M}athbf{a}}^{(3)}(s)}{\mathcal{M}athbf{e}_3(s)} \end{bmatrix} \mathcal{M}athbf{e}gin{bmatrix} r\eta_1 \\ r^2\eta_2 \\ r^3 \eta_3 \end{bmatrix}. \end{equation} Recall that \mathcal{M}athbf{e}gin{equation*} \pi_{2,\mathcal{M}athbf{a}r{\gammamma}}(s;\,r) := \big\{\xi \in \widehat{\mathcal{M}athbb{R}}^4 : |\inn{\mathcal{M}athbf{e}_j(s)}{\xi}| \lesssim r^{4 - j} \textrm{ for $1 \leq j \leq 3$, } \, |\inn{\mathcal{M}athbf{e}_4(s)}{\xi}| \sigma} \def\Si{\Sigmam 1 \big\}. \end{equation*} Thus, if $\xi \in \pi_{2,\gammamma}(s;r)$, then it follows from combining the above definition and \eqref{g a non deg 5} with \eqref{Frenet box 2} that $|\eta_i| \lesssim_{\gammamma} 1$ for $1 \leq i \leq 3$. Similarly, the localisation \eqref{Frenet loc} implies that \mathcal{M}athbf{e}gin{equation*} \pi_{2,\gammamma}(s;r) \subseteq \mathcal{M}athcal{R} := [-2,2]^3 \times [1/4,4]. \end{equation*} The identity \eqref{Frenet box 1} can be succinctly expressed using matrices. In particular, for $s \in I_0$ and $r > 0$, define the $4 \times 4$ matrix \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet box 3} [g]_{\mathcal{M}athcal{C}, s,r} := \mathcal{M}athbf{e}gin{pmatrix} [g]_{s,r} & g(s) \\ 0 & 1 \end{pmatrix}. \end{equation} Here the block $[g]_{s,r}$ is the $3 \times 3$ matrix as defined in \eqref{gamma transformation}. With this notation, the identity \eqref{Frenet box 1} may be written as \mathcal{M}athbf{e}gin{equation*} \xi = [g]_{\mathcal{M}athcal{C},s,r} \cdot \eta \qquad \textrm{where $\eta = (\eta_1, \eta_2, \eta_3, \xi_4)$.} \end{equation*} Moreover, if $\xi \in \pi_{2,\gammamma}(s;r)$, then the preceding observations show that $\eta$ in the above equation may be taken to lie in a bounded region and so \mathcal{M}athbf{e}gin{equation}\lambdabel{Frenet box 4} \pi_{2,\gammamma}(s;r) \subseteq [g]_{\mathcal{M}athcal{C},s,C r}\big([-2,2]^4\big) \cap \mathcal{M}athcal{R} , \end{equation} where $C \geq 1$ is a suitably large dimensional constant.\mathcal{M}edskip \subsection{A square function estimate for cones generated by non-degenerate curves} Here the geometric setup described in \S\ref{geo obs sec} is abstracted. \mathcal{M}athbf{e}gin{definition} For $g \colon [-1,1] \to \mathcal{M}athbb{R}^3$ a smooth curve, let $\Gammamma_g $ denote the codimension $2$ cone in $\mathcal{M}athbb{R}^4$ parametrised by \mathcal{M}athbf{e}gin{equation*} (\rho,s) \mathcal{M}apsto \rho \, \mathcal{M}athbf{e}gin{pmatrix} g(s) \\ 1 \end{pmatrix} \qquad \textrm{for } (\rho, s) \in U := [1/4,4] \times [-1,1]. \end{equation*} In this case, $\Gammamma_g $ is referred to as the \textit{cone generated by $g$}. \end{definition} In view of \eqref{Frenet box 4}, one wishes to establish a reverse square function estimate with respect to the $r$-\textit{plates} \mathcal{M}athbf{e}gin{equation*} \theta(s;r) := [g]_{\mathcal{M}athcal{C}, s,r} \big( [-2,2]^4 \big) \cap \mathcal{M}athcal{R}. \end{equation*} In some cases it will be useful to highlight the choice of function $g$ by writing $\theta(g; s;r)$ for $\theta(s;r)$. Note that each of these plates lies in a neighbourhood of the cone $\Gammamma_g$. We think of the union of all plates $\theta(s;r)$ as $s$ varies over the domain $[-1,1]$ as forming an anisotropic neighbourhood of $\Gammamma_g$. \mathcal{M}athbf{e}gin{definition} A collection $\Thetaeta (r)$ of $r$-plates is a \textit{plate family for $\Gammamma_g$} if it consists of $\theta(g;s;r)$ for $s$ varying over an $r$-separated subset of $[-1,1]$. \end{definition} In view of the preceding observations, Theorem~\ref{Frenet reverse SF theorem} is a consequence of the following result. \mathcal{M}athbf{e}gin{theorem}\lambdabel{reverse sf thm} Suppose $g \colon [-1,1] \to \mathcal{M}athbb{R}^3$ is a smooth, non-degenerate curve and $\Thetaeta(r)$ is an $r$-plate family for $\Gammamma_g$ for some dyadic $0 < r \leq 1$. For all $\varepsilon > 0$ the inequality \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\|\sum_{\theta \in \Thetaeta(r)}f_{\theta}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(r)}|f_{\theta}|^2\big)^{1/2}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \end{equation*} holds whenever $(f_{\theta})_{\theta \in \Thetaeta(r)}$ is a sequence of functions satisfying $\mathcal{M}athrm{supp}\, \widehat{f}_{\theta} \subseteq \theta$ for all $\theta \in \Thetaeta(r)$. \end{theorem} \subsection{Multilinear estimates}\lambdabel{multilinear subsec} The proof of Theorem~\ref{reverse sf thm} follows an argument of Lee--Vargas~\cite{LV2012} which relies on first establishing a multilinear variant of the desired square function inequality.\mathcal{M}edskip Let ${\mathfrak {I}}$ denote the collection of all dyadic subintervals of $[-1,1]$ and for any dyadic number $0 < r \leq 1$ let ${\mathfrak {I}}(r)$ denote the subset of ${\mathfrak {I}}$ consisting of all intervals of length $r$. Given any pair of dyadic scales $0 < \lambdambda_1 \leq \lambdambda_2 \leq 1$ and $J \in {\mathfrak {I}}(\lambdambda_2)$, let ${\mathfrak {I}}(J;\,\lambdambda_1)$ denote the collection of all $I \in {\mathfrak {I}}(\lambdambda_1)$ which satisfy $I \subseteq J$. Fix $0 < r \leq 1$ and for each $0 \leq \lambdambda \leq 1$ decompose $\Thetaeta(r)$ as a disjoint union of subsets $\Thetaeta(I;\,r)$ for $I \in {\mathfrak {I}}(\lambdambda)$ such that: \mathcal{M}athbf{e}gin{enumerate}[i)] \item If $\theta(s;r) \in \Thetaeta(I;\,r)$, then $s \in I$; \item If $r \leq \lambdambda_1 \leq \lambdambda_2$ and $J \in {\mathfrak {I}}(\lambdambda_2)$, then $ \Thetaeta(J;\,r) = \bigcup_{I \in {\mathfrak {I}}(J;\lambdambda_1)} \Thetaeta(I;\,r)$. \end{enumerate} Thus, if for all $r \leq \lambdambda \leq 1$ we define \mathcal{M}athbf{e}gin{equation}\lambdabel{rsq dyadic dec} f_I := \sum_{\theta \in \Thetaeta(I;\,r)} f_{\theta} \qquad \textrm{for all $I \in {\mathfrak {I}}(\lambdambda)$}, \end{equation} then for all $r \leq \lambdambda_1 \leq \lambdambda_2$ it follows that \mathcal{M}athbf{e}gin{equation*} f_J = \sum_{I \in {\mathfrak {I}}(J;\,\lambdambda_1)} f_I \qquad \textrm{for all $J \in {\mathfrak {I}}(\lambdambda_2)$.} \end{equation*} For each dyadic number $0 < \lambdambda \leq 1$ let ${\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(\lambdambda)$ denote the collection of $4$-tuples of intervals $\vec{I} = (I_1, \dots, I_4) \in {\mathfrak {I}}(\lambdambda)^4$ which satisfy the separation condition \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athrm{dist}(I_1, \dots, I_4) := \mathcal{M}in_{1 \leq \ell_1 < \ell_2 \leq 4} \mathcal{M}athrm{dist}(I_{\ell_1}, I_{\ell_2}) \geq \lambdambda. \end{equation*} \mathcal{M}athbf{e}gin{proposition}\lambdabel{multi sf prop} Let $0 < r \leq \lambdambda < 1$ be dyadic. If $(I_1, \dots, I_4) \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(\lambdambda)$ and $\varepsilon > 0$, then \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\| \prod_{\ell=1}^4|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta}|^{1/4}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} M(\lambdambda) r^{-\varepsilon} \prod_{\ell=1}^4 \begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(I_{\ell}; r)}|f_{\theta}|^2\big)^{1/2}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^{1/4} \end{equation*} holds whenever $(f_{\theta})_{\theta \in \Thetaeta(r)}$ is a sequence of functions satisfying $\mathcal{M}athrm{supp}\, \widehat{f}_{\theta} \subseteq \theta$ for all $\theta \in \Thetaeta(r)$, where $\sup_{\lambdambda \in [\lambdambda_0, 1]} M(\lambdambda) <\infty$ for all $\lambdambda_0 > 0$. \end{proposition} Using a standard argument, Proposition~\ref{multi sf prop} will follow from a $4$-linear Fourier restriction estimate. To state the latter inequality, given an interval $J \subseteq [-1,1]$ let $\Gammamma_J$ denote the image of $\Gammamma_g \colon (\rho, s) \mathcal{M}apsto \rho \, (g(s),1)^\top$ restricted to the set $U_J := [1/4,4] \times J$ and, for $r > 0$, let $N_r \Gammamma_J$ denote the $r$-neighbourhood of $\Gammamma_J$. \mathcal{M}athbf{e}gin{proposition}\lambdabel{mult rest prop} If $(I_1, \dots, I_4) \in \mathcal{M}athfrak{I}_{\mathcal{M}athrm{sep}}^4(\lambdambda)$ , then for all $0 < r \leq \lambdambda$ and all $\varepsilon > 0$ the inequality \mathcal{M}athbf{e}gin{equation*} \big\| \prod_{\ell =1}^4 |F_{\ell}|^{1/4}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} M(\lambdambda) r^{1-\varepsilon} \prod_{\ell=1}^4 \|F_{\ell}\|_{L^2(\mathcal{M}athbb{R}^4)}^{1/4} \end{equation*} holds for all $F_{\ell} \in L^2(\mathcal{M}athbb{R}^4)$ with $\mathcal{M}athrm{supp}\, \widehat{F}_{\ell} \subseteq N_r \Gammamma_{I_\ell}$ for $1 \leq \ell \leq 4$. \end{proposition} Given an interval $J \subset [-1,1]$, define the extension operator \mathcal{M}athbf{e}gin{equation*} E_Jf(x) := \int_{U_J} e^{i \inn{\Gammamma(u)}{x}} f(u) \,\mathcal{M}athrm{d} u \qquad \textrm{for all $f \in L^1(U_J)$,} \end{equation*} where $U_J := [1/4,4] \times J$ as above. By standard uncertainty principle techniques and Plancherel's theorem (see, for instance, \cite{Bennett2006} or \cite[Appendix]{Tao2020}), Proposition~\ref{mult rest prop} is a consequence of the following multilinear extension estimate. \mathcal{M}athbf{e}gin{proposition}\lambdabel{multilin ex prop} If $(I_1, \dots, I_4) \in \mathcal{M}athfrak{I}_{\mathcal{M}athrm{sep}}^4(\lambdambda)$, then for all $R \geq 1$ and all $\varepsilon > 0$ the inequality \mathcal{M}athbf{e}gin{equation*} \big\| \prod_{\ell=1}^4 |E_{I_{\ell}} f_{\ell}|^{1/4}\big\|_{L^4(B_R)} \lesssim_{\varepsilon} M(\lambdambda) R^{\varepsilon} \prod_{\ell=1}^4 \|f_{\ell}\|_{L^2(U_{I_\ell})}^{1/4} \end{equation*} holds for all $f_{\ell} \in L^2(U)$ for $1 \leq \ell \leq 4$, where $B_R$ denotes a ball of radius $R$. \end{proposition} We refer to the above references for the argument use to pass from Proposition~\ref{multilin ex prop} to Proposition~\ref{mult rest prop} and turn to the proof of the extension estimate. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{multilin ex prop}] This inequality is a special case (via a compactness argument) of the recent generalisation of the Bennett--Carbery--Tao restriction theorem \cite{Bennett2006} due to Bennett--Bez--Flock--Lee \cite[Theorem 1.3]{BBFL2018}; an improved version with $R^\varepsilon$ replaced by $(\log R)^{O(d)}$ has also been obtained by Zhang \cite[(1.8)]{Zhang}, although the $R^\varepsilon$ loss suffices for our purposes. In order to see this, we must verify a certain linear-algebraic condition on the tangent planes to $\Gammamma$. The setup is recalled presently.\mathcal{M}edskip Fix $u_{\ell} = (\rho_\ell, s_{\ell}) \in U_{I_{\ell}}$ for $1 \leq \ell \leq 4$. We construct a \textit{Brascamp--Lieb datum} $(\mathcal{M}athbf{L}, \mathcal{M}athbf{p})$ by taking \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athbf{L} := (\pi_1, \dots, \pi_4) \qquad \textrm{and} \qquad \mathcal{M}athbf{p} := (p_1, \dots, p_4) := (1/2, \dots, 1/2) \end{equation*} where each $\pi_{\ell} \colon \mathcal{M}athbb{R}^4 \to V_{\ell}$ is the orthogonal projection map from $\mathcal{M}athbb{R}^4$ to the 2-dimensional tangent space $V_{\ell}$ to $\Gammamma$ at $\Gammamma(u_{\ell})$. With this definition, the problem is to show that $\mathcal{M}athrm{BL}(\mathcal{M}athbf{L}, \mathcal{M}athbf{p}) < \infty$, where the \textit{Brascamp--Lieb constant} $\mathcal{M}athrm{BL}(\mathcal{M}athbf{L}, \mathcal{M}athbf{p})$ is as defined in, for instance, \cite{BBFL2018}. By the characterisation of finiteness of the Brascamp--Lieb constant from \cite{BCCT2008} and our choice of datum, it suffices to verify the following two conditions: \mathcal{M}athbf{e}gin{enumerate}[i)] \item $\displaystyle \sum_{\ell = 1}^4 (\dim \mathcal{M}athrm{Im}\,\pi_{\ell}) \, p_{\ell} = 4$. \item $\displaystyle \dim V \leq {\mathfrak {r}}ac{1}{2} \sum_{\ell = 1}^4 \dim \big(\pi_{\ell} V\big)$ holds for all linear subspaces $V \subseteq \mathcal{M}athbb{R}^4$. \end{enumerate} The scaling condition i) is immediate from the choice of datum and it remains to prove the dimension condition ii).\mathcal{M}edskip Clearly one may replace $\pi_{\ell}$ with the linear map associated to the $2 \times 4$ Jacobian matrix $\mathcal{M}athrm{d} \Gammamma |_{(\rho_\ell,s_{\ell})}$. By subtracting the first column from the third column and applying the fundamental theorem of calculus, \mathcal{M}athbf{e}gin{equation*} \det \mathcal{M}athbf{e}gin{bmatrix} g(s_{\ell_1}) & g'(s_{\ell_1}) & g(s_{\ell_2}) & g'(s_{\ell_2}) \\ 1 & 0 & 1 & 0 \end{bmatrix} = -\int_{s_{\ell_1}}^{s_{\ell_2}} \det \mathcal{M}athbf{e}gin{bmatrix} g'(s_{\ell_1}) & g'(s) & g'(s_{\ell_2}) \end{bmatrix} \,\mathcal{M}athrm{d} s. \end{equation*} Furthermore, by repeated application of column reduction and the fundamental theorem of calculus, it follows from the non-degeneracy hypothesis and the initial localisation that \mathcal{M}athbf{e}gin{equation*} \big|\det \mathcal{M}athbf{e}gin{bmatrix} g'(s_{\ell_1}) & g'(s) & g'(s_{\ell_2}) \end{bmatrix}\big| \gtrsim |s_{\ell_2} - s_{\ell_1}||s-s_{\ell_1}||s_{\ell_2}-s|; \end{equation*} see, for instance, \cite[Proposition 4.1]{GGPRY}. Consequently, the determinant has constant sign and \mathcal{M}athbf{e}gin{equation}\lambdabel{dim cond 1} \big| \det \mathcal{M}athbf{e}gin{bmatrix} \mathcal{M}athrm{d} \Gammamma |_{(\rho_{\ell_1}, s_{\ell_1})} & \mathcal{M}athrm{d} \Gammamma |_{(\rho_{\ell_2}, s_{\ell_2})} \end{bmatrix}\big| \gtrsim |\rho_{\ell_1}||\rho_{\ell_2}||s_{\ell_2} - s_{\ell_1}|^4 \gtrsim \lambdambda^4, \end{equation} where the final bound is due to the separation between the $I_{\ell}$. Note that \eqref{dim cond 1} is equivalent to the geometric condition that $V_{\ell_1} + V_{\ell_2} = \mathcal{M}athbb{R}^4$ and therefore \mathcal{M}athbf{e}gin{equation}\lambdabel{dim cond 2} V_{\ell_1}^{\perp} \cap V_{\ell_2}^{\perp} = \big(V_{\ell_1} + V_{\ell_2}\big)^{\perp} = \{0\}. \end{equation} With this observation, it is now a simple matter to verify the dimension condition ii) above. \mathcal{M}athbf{e}gin{itemize} \item If $\dim V = 4$ or $\dim V = 0$, then ii) is trivial. \item If $\dim V = 1$, then it suffices to show that $\dim\pi_{\ell} V = 1$ for at least two values of $\ell$. Suppose $\dim\pi_{\ell_1} V = \dim\pi_{\ell_2} V = 0$ for some $1 \leq \ell_1 < \ell_2 \leq 4$, so that \mathcal{M}athbf{e}gin{equation*} V \subseteq \ker \pi_{\ell_1} \cap \ker \pi_{\ell_2} = V_{\ell_1}^{\perp} \cap V_{\ell_2}^{\perp}. \end{equation*} However, in this case it follows from \eqref{dim cond 2} that $V = \{0\}$, which contradicts our dimension hypothesis. Thus, $\dim\pi_{\ell} V = 0$ for at most a single value of $\ell$, which more than suffices for our purpose. \item If $\dim V = 2$, then we may assume that $\dim \pi_{\ell_0} V = 0$ for some $1 \leq \ell_0 \leq 4$, since otherwise ii) is immediate. By dimensional considerations, it follows that $V = V_{\ell_0}^{\perp}$. Now let $1 \leq \ell \leq 4$ with $\ell \neq \ell_0$. By \eqref{dim cond 2}, it follows that $V \cap V_{\ell}^{\perp} = \{0\}$. Thus, by the rank-nullity theorem applied to the mapping $\pi_{\ell}|_V \colon V \to V_{\ell}$, we deduce that $\dim \pi_{\ell} V = 2$. Since this is true for three distinct values of $\ell$, property ii) holds. \item If $\dim V = 3$, then it is clear that $\dim \pi_{\ell} V \geq 1$ for all $1 \leq \ell \leq 4$. Suppose there exist $1 \leq \ell_1 < \ell_2 \leq 4$ such that $\dim (\pi_{\ell_1} V) = \dim (\pi_{\ell_2} V) = 1$. In this case, by the rank-nullity theorem applied to $\pi_{\ell_i}|_V \colon V \to V_{\ell_i}$ and dimensional considerations, \mathcal{M}athbf{e}gin{equation*} V_{\ell_1}^{\perp} + V_{\ell_2}^{\perp} = \ker \pi_{\ell_1} + \ker \pi_{\ell_2} \subseteq V. \end{equation*} However, in this case it follows from \eqref{dim cond 2} that $V = \mathcal{M}athbb{R}^4$, which contradicts our dimension hypothesis. Thus, $\dim \pi_{\ell} V = 1$ for at most a single value of $\ell$, and for the remaining values of $\ell$ the dimension is at least 2. This again more than suffices for our purpose. \end{itemize} This establishes the finiteness of the Brascamp--Lieb constant and concludes the proof. \end{proof} Having established the multilinear restriction estimate, it is a simple matter to deduce the desired multilinear square function bound. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{multi sf prop}] Let $B$ be a ball of radius $r^{-1}$ in $\mathcal{M}athbb{R}^4$ with centre $x_0$. Fix $\eta \in \mathcal{M}athcal{S}(\mathcal{M}athbb{R}^4)$ with $\mathcal{M}athrm{supp}\, \widehat{\eta} \subset B(0,1)$ and $|\eta(x)| \gtrsim 1$ on $B(0,1)$ and define $\eta_{B}(x) := \eta\big(r(x-x_0)\big)$. By the rapid decay of $\eta$, it suffices to show that \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\| \prod_{\ell=1}^4|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta}|^{1/4}\begin{itemize}g\|_{L^4(B)} \lesssim_{\varepsilon} r^{-\varepsilon} \prod_{\ell=1}^4 \begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(I_{\ell}; r)}|f_{\theta}|^2\big)^{1/2}\, |\eta_B|^2\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^{1/4}. \end{equation*} Indeed, once established, this inequality can be summed over a collection of finitely-overlapping balls $B$ which cover $\mathcal{M}athbb{R}^4$ to obtained the desired global estimate. For $1 \leq \ell \leq 4$ define \mathcal{M}athbf{e}gin{equation*} F_{\ell} :=\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta} \, \eta_B \end{equation*} so that each $F_{\ell}$ is Fourier supported in an $O(r)$-neighbourhood of $\Gammamma_{I_{\ell}}$. Applying Proposition~\ref{mult rest prop} to these functions, it follows that \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\| \prod_{\ell=1}^4|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta}|^{1/4}\begin{itemize}g\|_{L^4(B)} \lesssim \begin{itemize}g\| \prod_{\ell=1}^4|F_{\ell}|^{1/4}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} M(\lambdambda) r^{1-\varepsilon} \prod_{j=1}^4 \begin{itemize}g\|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta} \, \eta_B\begin{itemize}g\|_{L^2(\mathcal{M}athbb{R}^4)}^{1/4}. \end{equation*} Note that the functions $f_{\theta}\, \eta_B$ appearing in the right-hand sum have essentially disjoint Fourier support. Consequently, by Plancherel's theorem and H\"older's inequality, \mathcal{M}athbf{e}gin{align*} \begin{itemize}g\|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta} \, \eta_B\begin{itemize}g\|_{L^2(\mathcal{M}athbb{R}^4)}^{1/4} &\lesssim \begin{itemize}g\|\big(\sum_{\theta \in \Thetaeta(I_{\ell}; r)}|f_{\theta}|^2\big)^{1/2}\,|\eta_B|\begin{itemize}g\|_{L^2(\mathcal{M}athbb{R}^4)} \\ &\lesssim r^{-1} \begin{itemize}g\|\big(\sum_{\theta \in \Thetaeta(I_{\ell}; r)}|f_{\theta}|^2\big)^{1/2}\,|\eta_B|^2\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}. \end{align*} Combining the previous two displays completes the proof. \end{proof} \subsection{Rescaling} By combining Proposition~\ref{multilin ex prop} with an affine rescaling argument, one may deduce a useful refined version of the multilinear inequality. This improves the dependence on separation parameter $\lambdambda$ under an additional localisation hypothesis on the intervals $J_1, \dots, J_4$. Given dyadic scales $0 < \lambdambda_1 \leq \lambdambda_2 \leq 1$ and $J \in {\mathfrak {I}}(\lambdambda_2)$, let ${\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\,\lambdambda_1)$ denote the collection of all $4$-tuples of intervals $\vec{I} = (I_1, \dots, I_4) \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(\lambdambda_1)$ such that $I_\ell \subseteq J$ for all $1 \leq \ell \leq 4$. With this definition, the refined version of Proposition~\ref{multi sf prop} reads as follows. \mathcal{M}athbf{e}gin{corollary}\lambdabel{resc multilin sf cor} Fix dyadic scales $0 < r \leq \lambdambda_1 \leq \lambdambda_2 \leq 1$. If $J \in {\mathfrak {I}}(\lambdambda_2)$, $(I_1, \dots, I_4) \in {\mathfrak {I}}_{\mathcal{M}athrm{sep}}^4(J;\,\lambdambda_1)$ and $\varepsilon > 0$, then \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g\| \prod_{\ell=1}^4|\sum_{\theta \in \Thetaeta(I_{\ell}; r)}f_{\theta}|^{1/4}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} M(\lambdambda_1/\lambdambda_2) r^{-\varepsilon} \prod_{j=1}^4 \begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(I_{\ell}; r)}|f_{\theta}|^2\big)^{1/2}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^{1/4} \end{equation*} holds whenever $(f_{\theta})_{\theta \in \Thetaeta(r)}$ is a sequence of functions satisfying $\mathcal{M}athrm{supp}\, \widehat{f}_{\theta} \subseteq \theta$ for all $\theta \in \Thetaeta(r)$. \end{corollary} \mathcal{M}athbf{e}gin{proof} The result is a consequence of Proposition~\ref{multi sf prop} and a rescaling argument. Let $J = [\sigma} \def\Si{\Sigmagmama - \lambdambda_2, \sigma} \def\Si{\Sigmagmama + \lambdambda_2] \subseteq [-1,1]$ and recall the definition of the rescaled curve \mathcal{M}athbf{e}gin{equation*} g_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}(\tilde{s}) := \big([g]_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}\big)^{-1} ( g(\sigma} \def\Si{\Sigmagmama + \lambdambda_2 \tilde{s}) - g(\sigma} \def\Si{\Sigmagmama)). \end{equation*} Differentiating this expression, it follows that $g_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}^{(j)}(\tilde{s}) = \lambdambda_2^j \big([g]_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}\big)^{-1} g^{(j)}(\sigma} \def\Si{\Sigmagmama + \lambdambda_2 \tilde{s})$ for $j \geq 1$ and so \mathcal{M}athbf{e}gin{equation*} [g_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}]_{\tilde{s},\tilde{r}} = \big([g]_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}\big)^{-1} \circ [g]_{s,r} \qquad \textrm{where $s = \sigma} \def\Si{\Sigmagmama + \lambdambda_2 \tilde{s}$ and $r = \lambdambda_2 \tilde{r}$.} \end{equation*} From this and the definition \eqref{Frenet box 3}, it is not difficult to deduce that \mathcal{M}athbf{e}gin{equation*} [g_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}]_{\mathcal{M}athcal{C}, \tilde{s},\tilde{r}} = \big([g]_{\mathcal{M}athcal{C},\sigma} \def\Si{\Sigmagmama, \lambdambda_2}\big)^{-1} \circ [g]_{\mathcal{M}athcal{C},s,r}. \end{equation*} Suppose $\theta \in \Thetaeta(J;\,r)$ and $\mathcal{M}athrm{supp}\, \widehat{F}_{\theta} \subseteq \theta$. If $\theta = \theta(s,r)$, then \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athrm{supp}\, \widehat{F}_{\theta} \circ [g]_{\mathcal{M}athcal{C},\sigma} \def\Si{\Sigmagmama, \lambdambda_2} \subseteq \tilde{\theta}(\tilde{s}, \tilde{r}) \end{equation*} where $\tilde{\theta}(\tilde{s}, \tilde{r})$ is the $\tilde{r}$-plate centred at $\tilde{s}$ defined with respect to $\tilde{g} := g_{\sigma} \def\Si{\Sigmagmama, \lambdambda_2}$. Finally, note that the above rescaling maps the intervals $(I_1, \dots, I_4) \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\lambdambda_1)$ to intervals $(\tilde{I}_1, \dots \tilde{I}_4) \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(\lambdambda_1/\lambdambda_2)$. \end{proof} \subsection{Broad/narrow analysis} Here arguments from \cite{Ham2014} are adapted to pass from the multilinear estimates of Proposition~\ref{multi sf prop} (or, more precisely, Corollary~\ref{resc multilin sf cor}) to the linear estimates in Theorem~\ref{reverse sf thm}.\mathcal{M}edskip The key ingredient is the following decomposition lemma, which follows by iteratively applying the decomposition scheme discussed in \cite{Ham2014}. \mathcal{M}athbf{e}gin{lemma}\lambdabel{broad narrow lemma E} Let $\varepsilon > 0$ and $r > 0$. There exist dyadic numbers $C_\varepsilon \geq 1$, $r_{\mathcal{M}athrm{n}}$ and $r_{\mathcal{M}athrm{b}}$ satisfying \mathcal{M}athbf{e}gin{equation}\lambdabel{broad narrow equation E 1} r < r_{\mathcal{M}athrm{n}} \lesssim_{\varepsilon, 1} r, \qquad r < r_{\mathcal{M}athrm{b}} \leq 1 \end{equation} such that \mathcal{M}athbf{e}gin{equation}\lambdabel{brd nrw E} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g( \sum_{I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})}\|f_I\|_{L^4(\mathcal{M}athbb{R}^4)}^4\begin{itemize}g)^{1/4} + r^{-\varepsilon} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(C_\varepsilon r_{\mathcal{M}athrm{b}}) \\ \vec{I} \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\,r_{\mathcal{M}athrm{b}})} }\big\|\prod_{\ell=1}^4|f_{I_{\ell}}|^{1/4}\big\|_{L^4(\mathcal{M}athbb{R}^4)}^4 \begin{itemize}g)^{1/4} \end{equation} holds whenever $(f_{\theta})_{\theta \in \Thetaeta(r)}$ is a sequence of functions satisfying $\mathcal{M}athrm{supp}\, \widehat{f}_{\theta} \subseteq \theta$ for all $\theta \in \Thetaeta(r)$. \end{lemma} We provide a proof of (an abstract version of) the above lemma in Appendix~\ref{BG appendix} (more precisely, Lemma \ref{broad narrow lemma E} follows from applying Lemma \ref{broad narrow lemma} to the decomposition $f:= \sum_{\theta \in \Thetaeta(r)} f_\theta$ for a fixed dyadic scale $0< r \leq 1$ and $\varepsilon>0$). We are now in position to prove the desired reverse square function estimate. \mathcal{M}athbf{e}gin{proof}[Proof of Theorem~\ref{reverse sf thm}] Fix $0 < r \leq 1$ a choice of dyadic scale and $\varepsilon > 0$, and apply Lemma \ref{broad narrow lemma E}. The analysis splits into two cases depending on which of the right-hand terms in \eqref{brd nrw E} dominates. We refer to the first term as the \textit{narrow} term and to the second term as the \textit{broad} term. \subsubsection*{The narrow case} Suppose the narrow term dominates the right-hand side of \eqref{brd nrw E} in the sense that \mathcal{M}athbf{e}gin{equation*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g( \sum_{I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})}\big\|\sum_{\theta \in \Thetaeta(I;\,r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)}^4\begin{itemize}g)^{1/4}. \end{equation*} This case is dealt with using a trivial argument. If $I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{narrow 1} \big|\sum_{\theta \in \Thetaeta(I;\,r)} f_{\theta}\big| \lesssim_{\varepsilon} \big( \sum_{\theta \in \Thetaeta(I;\,r)} |f_{\theta}|^2 \big)^{1/2} \end{equation} by Cauchy--Schwarz, since the condition $r_{\mathcal{M}athrm{n}} \sigma} \def\Si{\Sigmam r$ from \eqref{broad narrow equation E 1} implies that there are only $O_{\varepsilon}(1)$ intervals belonging to ${\mathfrak {I}}(I;\,r)$. Thus, \mathcal{M}athbf{e}gin{align*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g\|\big( \sum_{I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})}\sum_{\theta \in \Thetaeta(I;\,r)} |f_{\theta}|^2\big)^{1/2}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)} = r^{-\varepsilon} \begin{itemize}g\|\big( \sum_{\theta \in \Thetaeta(r)} |f_{\theta}|^2\big)^{1/2}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}, \end{align*} where the first step follows from \eqref{narrow 1} and the embedding $\ell^2 \mathcal{M}athcal{H}ookrightarrow \ell^4$ and the last step from the definition of $\mathcal{M}athfrak{I}(r_{\mathcal{M}athrm{n}})$ and $\Thetaeta(I;r)$. \subsubsection*{The broad case} Suppose the broad term dominates the right-hand side of \eqref{brd nrw E} in the sense that \mathcal{M}athbf{e}gin{equation*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(C_\varepsilon r_{\mathcal{M}athrm{b}}) \\ \vec{I} \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\,r_{\mathcal{M}athrm{b}})} }\big\|\prod_{\ell=1}^4\big|\sum_{\theta \in \Thetaeta(I_{\ell}; r)} f_{\theta}\big|^{1/4}\big\|_{L^4(\mathcal{M}athbb{R}^4)}^4 \begin{itemize}g)^{1/4}. \end{equation*} This case is treated using the rescaled multilinear inequality from Corollary~\ref{resc multilin sf cor}. Since $\#{\mathfrak {I}}^4(J;\,r_{\mathcal{M}athrm{b}}) \lesssim_{\varepsilon} 1$ for each $J \in {\mathfrak {I}}(C_{\varepsilon}r_{\mathcal{M}athrm{b}})$, by H\"older's inequality \mathcal{M}athbf{e}gin{equation*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g(\sum_{J \in {\mathfrak {I}}(C_\varepsilon r_{\mathcal{M}athrm{b}})} \begin{itemize}g(\sum_{\vec{I} \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\,r_{\mathcal{M}athrm{b}})} \begin{itemize}g\|\prod_{\ell=1}^4 \big|\sum_{\theta \in \Thetaeta(I_{\ell}; r)} f_{\theta}\big|^{1/4}\begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^{16}\begin{itemize}g)^{1/4} \begin{itemize}g)^{1/4}. \end{equation*} Applying Corollary~\ref{resc multilin sf cor} with $\lambdambda_1 := r_{\mathcal{M}athrm{b}}$ and $\lambdambda_2 := C_\varepsilon r_{\mathcal{M}athrm{b}}$, one deduces that \mathcal{M}athbf{e}gin{equation*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g(\sum_{J \in {\mathfrak {I}}(C_\varepsilon r_{\mathcal{M}athrm{b}})} \begin{itemize}g(\sum_{\vec{I} \in {\mathfrak {I}}^4_{\mathcal{M}athrm{sep}}(J;\,r_{\mathcal{M}athrm{b}})} \prod_{\ell = 1}^4\begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(I_{\ell};\,r)} |f_{\theta}|^2 \big)^{1/2} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^4\begin{itemize}g)^{1/4} \begin{itemize}g)^{1/4}. \end{equation*} Relaxing the inner range of summation to all $\vec{I} \in {\mathfrak {I}}(J;\,r_{\mathcal{M}athrm{b}})^4$ (that is, dropping the separation condition), \mathcal{M}athbf{e}gin{equation*} \big\|\sum_{\theta \in \Thetaeta(r)} f_{\theta}\big\|_{L^4(\mathcal{M}athbb{R}^4)} \lesssim_{\varepsilon} r^{-\varepsilon} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{b}})} \begin{itemize}g\| \big(\sum_{\theta \in \Thetaeta(I;\,r)} |f_{\theta}|^2 \big)^{1/2} \begin{itemize}g\|_{L^4(\mathcal{M}athbb{R}^4)}^4 \begin{itemize}g)^{1/4}. \end{equation*} Arguing as in the last steps of the narrow case, using the embedding $\ell^2 \mathcal{M}athcal{H}ookrightarrow \ell^4$, now concludes the argument. \end{proof} \section{Proof of the forward square function inequality in \texorpdfstring{$\mathcal{M}athbb{R}^3$}{}}\lambdabel{forward SF sec} In this section we establish the $L^2$ weighted forward square function estimate from Proposition~\ref{f SF prop}. Before we commence, it is useful to recall the basic setup. Let $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$ for $0 < \delta_0 \ll 1$ and $\mathcal{M}athbf{e}_j:[-1,1] \to S^3$ for $1 \leq j \leq 3$ be the associated Frenet frame. Recall that this satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq prelim 1} \mathcal{M}athbf{e}_j(s) = \vec{e}_j + O(\delta_0) \qquad \textrm{for $1 \leq j \leq 3$ and $s \in I_0=[-\delta_0,\delta_0]$,} \end{equation} where the $\vec{e}_j$ denote the standard basis vectors. For $0 < r \leq 1$, recall that a \textit{$(0,r)$-Frenet box} is a set of the form \mathcal{M}athbf{e}gin{equation*} \pi_{0,\gammamma}(s;\,r) := \big\{ \xi \in \widehat{\mathcal{M}athbb{R}}^3 : |\inn{\mathcal{M}athbf{e}_1(s)}{\xi}| \leq r, \,\, 1/2 \leq |\inn{\mathcal{M}athbf{e}_2(s)}{\xi}| \leq 1, \,\, |\inn{\mathcal{M}athbf{e}_3(s)}{\xi}| \leq 1 \big\} \end{equation*} for some $s \in [-1,1]$. Proposition~\ref{f SF prop} concerns smooth frequency projections $\chi_{\pi}(D)$ where $\chi_{\pi}$ is a bump function adapted to a $(0,r)$-Frenet box $\pi$. \subsection{Geometric observations}\lambdabel{fsq geo obs sec} We begin by reparametrising the sets $\pi_{0,\gammamma}(s;r)$ using an argument similar to that of \S\ref{geo obs sec}. Define the functions $g_j: I_0 \to \mathcal{M}athbb{R}^3$ by $g_j(s) := -\mathcal{M}athbf{e}_{1j}(s)\, \mathcal{M}athbf{e}_{11}(s)^{-1}$ for $j = 2$, $3$ (note that $\mathcal{M}athbf{e}_{1,1}(s)$ is bounded away from $0$ by \eqref{fsq prelim 1}) so that \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq Frenet 1} \inn{\mathcal{M}athbf{e}_1(s)}{\xi} = \mathcal{M}athbf{e}_{11}(s) \big( \xi_1 - \xi_2g_2(s) - \xi_3g_3(s) \big). \end{equation} Thus, we have the containment property \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq Frenet 2} \pi_{0, \gammamma}(s;\,r) \subseteq \theta(s;\,C r) \end{equation} where $\theta(s;\,r)$ is the region \mathcal{M}athbf{e}gin{equation*} \theta(s;\,r) := \begin{itemize}g\{ \xi \in \widehat{\mathcal{M}athbb{R}}^3 : \big|\xi_1 - \sum_{j=2}^3\xi_jg_j(s)\big| < r \textrm{ and } 1/4 \leq |\xi_2| \leq 4, \, |\xi_3| \leq 4\begin{itemize}g\}. \end{equation*} We refer to the sets $\theta(s;\,r)$ as `plates'.\mathcal{M}edskip It is useful to note that the curves $g_j \colon I_0 \to \mathcal{M}athbb{R}^3$ satisfy a certain regularity condition. In particular, for each $\mathcal{M}athbf{a} = (a_2, a_3) \in \mathcal{M}athbb{R}^2$ define the function $g_{\mathcal{M}athbf{a}}(s) := a_2g_2(s) + a_3g_3(s)$. By differentiating \eqref{fsq Frenet 1} with respect to $s$ and evaluating the result at $\xi = (0, a_2, a_3)$, provided the parameter $\delta_0 > 0$ featured in \eqref{fsq prelim 1} is chosen sufficiently small, it follows that \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq Frenet 3} |g_{\mathcal{M}athbf{a}}'(s)| \sigma} \def\Si{\Sigmam 1 \qquad \textrm{for all $\mathcal{M}athbf{a} \in [1/4, 4] \times [-1,1]$.} \end{equation} Indeed, this is a simple consequence of the Frenet equations.\mathcal{M}edskip We also observe a dual version of the containment condition \eqref{fsq Frenet 2}. In particular, if we define the dual Frenet box and dual plate \mathcal{M}athbf{e}gin{align*} \pi^*_{0,\gammamma}(s;r) &:= \big\{x\in \mathcal{M}athbb{R}^3 : |\inn{\mathcal{M}athbf{e}_1(s)}{x}| \leq r^{-1} \textrm{ and } |\inn{\mathcal{M}athbf{e}_j(s)}{x}| \leq 1 \textrm{ for $j=2$, $3$} \big\}, \\ \theta^*(s;r) &:= \big\{x\in \mathcal{M}athbb{R}^3 : |x_1| \leq r^{-1} \textrm{ and } |x_j + g_j(s)x_1| \leq 4 \textrm{ for $j=2$, $3$} \big\}, \end{align*} then it follows that $\pi^*_{0,\gammamma}(s; r) \subseteq \theta^*(s; C^{-1} r)$. To this, we first observe the identity \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq Frenet 4} \mathcal{M}athbf{e}gin{bmatrix} \inn{x}{\mathcal{M}athbf{e}_2(s)} \\ \inn{x}{\mathcal{M}athbf{e}_3(s)} \end{bmatrix} = \mathcal{M}athbf{e}gin{bmatrix} \mathcal{M}athbf{e}_{22}(s) & \mathcal{M}athbf{e}_{23}(s) \\ \mathcal{M}athbf{e}_{32}(s) & \mathcal{M}athbf{e}_{33}(s) \end{bmatrix} \mathcal{M}athbf{e}gin{bmatrix} x_2 + g_2(s)x_1 \\ x_3 + g_3(s)x_1 \end{bmatrix}, \end{equation} which follows from the orthogonality between the Frenet vectors $\big(\mathcal{M}athbf{e}_j(s)\big)_{j=1}^3$. Since the right-hand $2\times 2$ matrix is a small perturbation of the identity, the claimed containment property follows. \subsection{The iteration scheme} Our proof of Proposition~\ref{f SF prop} uses an iteration argument. This is based on the approach of Carbery and the fourth author in \cite[Proposition 4.6]{CS1995}, where a related inequality for the C\'ordoba sectorial square function was obtained. Driving the iteration scheme is an elementary pointwise square function bound due to Rubio de Francia~\cite{RdF1983}. Here it is convenient to state a slight generalisation of this result. \mathcal{M}athbf{e}gin{lemma}\lambdabel{RdF lem} Let $\psi \in \mathcal{M}athscr{S}(\widehat{\mathcal{M}athbb{R}}^n)$, $A \in \mathcal{M}athrm{GL}(\mathcal{M}athbb{R}, n)$ and $G \colon \mathcal{M}athbb{Z}^m \to \mathcal{M}athbb{R}^n$. For all $N \in \mathcal{M}athbb N$ the pointwise inequality \mathcal{M}athbf{e}gin{equation*} \sum_{\nu \in \mathcal{M}athbb{Z}^m} \big|\psi\big(AD - G(\nu)\big)f(x)\big|^2 \lesssim_{\psi, N} \sup_{\nu_2 \in \mathcal{M}athbb{Z}^m} \sum_{\nu_1 \in \mathcal{M}athbb{Z}^m} e^{- |G(\nu_1) - G(\nu_2)|/2} \int_{\mathcal{M}athbb{R}^n} |f(x - A^{\top} y)|^2 (1 + |y|)^{-N}\,\mathcal{M}athrm{d} y \end{equation*} holds for all $f \in \mathcal{M}athscr{S}(\mathcal{M}athbb{R}^n)$. \end{lemma} \mathcal{M}athbf{e}gin{proof} The case where $G \colon \mathcal{M}athbb{Z}^n \to \mathcal{M}athbb{Z}^n$ is the identity map is proven in \cite{RdF1983}. The argument can be generalised to prove the above lemma, by replacing an application of Plancherel's theorem with a $T^*T$ argument involving the Schur test. For convenience, the details of the argument are presented in Appendix~\ref{RdF appendix}. \end{proof} To describe the iteration step, we first define smooth cutoff functions adapted to the plates $\theta$ defined above. As usual, let $\eta \in C^{\infty}_c(\mathcal{M}athbb{R})$ satisfy $\eta(u) = 1$ for $|u| \leq 1/2$ and $\mathcal{M}athrm{supp}\, \eta \subseteq [-1,1]$ and define the multipliers \mathcal{M}athbf{e}gin{equation}\lambdabel{m r nu} m_r^{\nu}(\xi) := \eta\begin{itemize}g(r^{-1}\big( \xi_1 - \sum_{j=2}^3 \xi_j g_j(s_{\nu})\big) \begin{itemize}g) \qquad \textrm{for $\nu \in \mathcal{M}athbb{Z}$ and $s_{\nu} := r \nu$.} \end{equation} Let $b(\xi) = \tilde{\mathcal{M}athbf{e}ta}(4^{-1}\xi_2) \, \eta(4^{-1} \xi_3)$ where here $\tilde{\mathcal{M}athbf{e}ta}$ is as defined in \eqref{chi pi} so that $(m_r^{\nu}\cdot b)(\xi) = 1$ if $\xi \in \theta(s_{\nu};r)$. For the iteration scheme, we in fact work with truncated versions of the plates. Given $K \geq 1$, $-1 \leq s \leq 1$, $0 < r \leq 1$ and $\mathcal{M}athbf{a}=(a_2,a_3) \in \mathcal{M}athbb{R}^2$, consider the truncated plate \mathcal{M}athbf{e}gin{equation*} \theta^{\mathcal{M}athbf{a}, K}(s;\,r) := \begin{itemize}g\{ \xi \in \widehat{\mathcal{M}athbb{R}}^3 : \big|\xi_1 - \sum_{j=2}^3 \xi_j g_j(s)\big| \leq r \textrm{ and } |\xi_j - a_j| \leq K^{-1} \textrm{ for $j=2$, $3$}\begin{itemize}g\}. \end{equation*} Correspondingly, we let $\zeta \in C^{\infty}_c(\mathcal{M}athbb{R})$ satisfy $\mathcal{M}athrm{supp}\, \zeta \subseteq [-1,1]$ and $\sum_{k \in \mathcal{M}athbb{Z}} \zeta(\,\cdot - k) \equiv 1$ and decompose \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq b dec} b = \sum_{\mathcal{M}athbf{a} \in K^{-1}\mathcal{M}athbb{Z}^2} b_{\mathcal{M}athbf{a}} \qquad \textrm{where} \qquad b_{\mathcal{M}athbf{a}}(\xi) := \prod_{j=2}^3\zeta(K(\xi_j - a_j)\big) \, b(\xi). \end{equation} For $\mathcal{M}athbf{r} := (r_1, r_2,r_3) \in (0,1]^3$ and $s \in [-1,1]$ let $T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s)$ denote the parallelepiped consisting of all vectors $x \in \mathcal{M}athbb{R}^3$ satisfying $|\inn{x}{\mathcal{M}athbf{e}_j(s)}| \leq r_j^{-1}$ for $1 \leq j \leq 3$. These sets should be thought of a scaled versions of the dual Frenet box $\pi_{0,\gammamma}^*(s;r)$ introduced in \S\ref{fsq geo obs sec}. Consider the weighted averaging and Nikodym-type maximal operators associated to these sets, given by \mathcal{M}athbf{e}gin{equation}\lambdabel{wtd 3d Nik ops} \widetilde{\mathcal{M}athcal{A}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} g (x;s) := \int_{\mathcal{M}athbb{R}^3} g(x-y) \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(y) \, \mathcal{M}athrm{d} y \qquad \textrm{and} \qquad \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} g(x) := \sup_{s \in [-1,1]} | \widetilde{\mathcal{M}athcal{A}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} g (x;s)| \end{equation} where \mathcal{M}athbf{e}gin{equation}\lambdabel{it max fn} \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(x) := \big(\prod_{j=1}^3 r_j\big) \, \big(1 + \sum_{j=1}^3 r_j|\inn{\mathcal{M}athbf{e}_j(s)}{y}|\big)^{-300}. \end{equation} Here the subscript $\mathcal{M}athbf{e}$ refers to the Frenet frame $\mathcal{M}athbf{e} := (\mathcal{M}athbf{e}_1, \mathcal{M}athbf{e}_2, \mathcal{M}athbf{e}_3)$. With the above definitions, the key iteration step is as follows. \mathcal{M}athbf{e}gin{proposition}\lambdabel{it prop} Let $0 < r < 1$, $K \geq 1$, $\tilde{r} = Kr$, $\mathcal{M}athbf{r}:=(r, K^{-1}, K^{-1})$ and $\mathcal{M}athbf{a}=(a_2,a_3) \in [1/4, 4] \times [-1,1]$. With the above definitions, \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} \big|(m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \,w(x)\,\mathcal{M}athrm{d} x \lesssim \int_{\mathcal{M}athbb{R}^3} \sum_{\tilde{\nu} \in \mathcal{M}athbb{Z}} \big|(m_{\tilde{r}}^{\tilde{\nu}}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2\, \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}\, w(x)\,\mathcal{M}athrm{d} x \end{equation*} for any non-negative $w \in L^1_{\mathcal{M}athrm{loc}}(\mathcal{M}athbb{R}^3)$. \end{proposition} \mathcal{M}athbf{e}gin{proof} The proof is based on the following simple geometric observation, which motivates the use of the truncation. If $|s - \tilde{s}| \leq K r$, then the plates $\theta^{\mathcal{M}athbf{a}, K}(s;\,r)$, $\theta^{\mathcal{M}athbf{a}, K}(\tilde{s};\,r)$ are essentially parallel translates of one another. More precisely, if $\xi \in \theta^{\mathcal{M}athbf{a}, K}(s;\,r)$, then \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g| \xi_1 - \sum_{j=2}^3a_j\big(g_j(s) - g_j(\tilde{s})\big) - \sum_{j=2}^3 \xi_j g_j(\tilde{s})\begin{itemize}g| \leq \begin{itemize}g|\xi_1 - \sum_{j=2}^3 \xi_j g_j(s)\begin{itemize}g| + \sum_{j=2}^3 |a_j - \xi_j||g_j(s) - g_j(\tilde{s})| \lesssim_{\mathcal{M}athbf{g}} r \end{equation*} and, consequently, there exists some constant $C_{\mathcal{M}athbf{g}}$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{sf it 1} \theta^{\mathcal{M}athbf{a}, K}(s;\,r) - \sum_{j=2}^3a_j\big(g_j(s) - g_j(\tilde{s})\big)\vec{e}_1 \subseteq \theta^{\mathcal{M}athbf{a}, K}\big(\tilde{s};\,C_{\mathcal{M}athbf{g}} r\big). \end{equation} In light of this observation, define the multipliers \mathcal{M}athbf{e}gin{equation}\lambdabel{sf it 2} m^{\tilde{\nu}, \nu}_{\tilde{r}, r}(\xi) := {\mathfrak {r}}ac{\eta\begin{itemize}g((2C_{\mathcal{M}athbf{g}}r)^{-1}\begin{itemize}g( \xi_1 - \sum_{j=2}^3a_j\big(g_j(s_{\nu}) - g_j(\tilde{s}_{\tilde{\nu}})\big) - \sum_{j=2}^3 \xi_j g_j(\tilde{s}_{\tilde{\nu}})\begin{itemize}g) \begin{itemize}g)\, \tilde{b}_{\mathcal{M}athbf{a}}(\xi)} {\sum_{i=-1}^1 m_{\tilde r}^{\tilde \nu+i} (\xi)} \end{equation} for $\tilde{\nu}$, $\nu \in \mathcal{M}athbb{Z}$ and $\tilde{s}_{\tilde{\nu}} := \tilde{r} \tilde{\nu}$, $s_{\nu} := r \nu$ and $\tilde{r}=Kr$, where $ \tilde{b}_{\mathcal{M}athbf{a}}(\xi) := \prod_{j=2}^3\eta(K(\xi_j - a_j))$ so that $b_{\mathcal{M}athbf{a}} = \tilde{b}_{\mathcal{M}athbf{a}} \cdot b_{\mathcal{M}athbf{a}}$. Thus, in view of \eqref{sf it 1}, we have \mathcal{M}athbf{e}gin{equation} \lambdabel{reproducing-property} m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}} = m^{\tilde{\nu}, \nu}_{\tilde{r}, r} \cdot m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}} \sum_{i=-1}^1 m_{\tilde r}^{\tilde \nu+i} (\xi) \qquad \textrm{whenever $|s_{\nu} - \tilde{s}_{\tilde{\nu}}| \leq Kr=:\tilde{r}$.} \end{equation} Furthermore, since for fixed $\tilde{\nu}$ the multipliers $m_{\tilde{r},r}^{\tilde{\nu},\nu}$ correspond to essentially parallel frequency regions for $|s_\nu - \tilde{s}_{\tilde{\nu}}| \leq 5\tilde{r}$, Lemma~\ref{RdF lem} implies they satisfy a weighted $L^2$ inequality. Indeed, recall from \eqref{fsq Frenet 3} that the functions $g_{\mathcal{M}athbf{a}}(s) := a_2g_2(s) + a_3g_3(s)$ satisfy the uniform regularity condition $|g_{\mathcal{M}athbf{a}}'(s)| \sigma} \def\Si{\Sigmam 1$; recall that $\mathcal{M}athbf{a}=(a_2,a_3) \in [1/4,4] \times [-1,1]$. From this we deduce that \mathcal{M}athbf{e}gin{equation*} \sup_{\nu_2 \in \mathcal{M}athbb{Z}} \sum_{\nu_1 \in \mathcal{M}athbb{Z}} e^{-r^{-1}|g_{\mathcal{M}athbf{a}}(r \nu_1) - g_{\mathcal{M}athbf{a}}(\tilde{r} \nu_2)|/2} \lesssim 1, \end{equation*} where the above inequality holds with a constant uniform in both $r$ and $\mathcal{M}athbf{a}$. Thus, recalling the definition of the multipliers $m_{\tilde{r},r}^{\tilde{\nu}, \nu}$ from \eqref{sf it 2}, Lemma~\ref{RdF lem} implies that for fixed $\tilde{\nu} \in \mathcal{M}athbb{Z}$, \mathcal{M}athbf{e}gin{equation}\lambdabel{sf it 3} \int_{\mathcal{M}athbb{R}^3} \sum_{\substack{ \nu \in \mathcal{M}athbb{Z} \\ |s_{\nu} - \tilde{s}_{\tilde{\nu}}|\leq 5 \tilde{r}} } |m_{\tilde{r},r}^{\tilde{\nu}, \nu}(D)f(x)|^2 w(x)\,\mathcal{M}athrm{d} x \lesssim \int_{\mathcal{M}athbb{R}^3} |f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}} w(x)\,\mathcal{M}athrm{d} x; \end{equation} indeed the inequality holds with $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}} w(x)$ replaced by the single average $\widetilde{\mathcal{M}athcal{A}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}w(x; \tilde{s}_{\tilde{\nu}})$, but there is no loss in taking supremum over $s \in [-1,1]$ in view of other appearances of $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}$ (see \eqref{first step} below). From \eqref{reproducing-property} we get \mathcal{M}athbf{e}gin{equation*} \sum_{\nu \in \mathcal{M}athbb{Z}} \big|(m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \lesssim \sum_{\substack{\tilde{\nu}, \nu \in \mathcal{M}athbb{Z} \\ |s_{\nu} - \tilde{s}_{\tilde{\nu}}|\leq 5 \tilde{r}} } \big|(m_r^{\nu}\cdot \tilde{b}_{\mathcal{M}athbf{a}})(D) \circ m^{\tilde{\nu}, \nu}_{\tilde{r}, r}(D) \circ (m_{\tilde{r}}^{\tilde{\nu}}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2. \end{equation*} By the Schwartz decay property of $\widecheck{\eta}$, the convolution kernel associated to the multiplier operator $(m_r^{\nu} \cdot \tilde{b}_{\mathcal{M}athbf{a}})(D)$ satisfies \mathcal{M}athbf{e}gin{equation*} |(m_r^{\nu} \cdot \tilde{b}_{\mathcal{M}athbf{a}})\;\widecheck{}\;(x)| \lesssim_N rK^{-2} \, \big(1 + r|x_1| + K^{-1}\sum_{j=2}^3|x_j + x_1 g_j(s)|\big)^{-100} \lesssim \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(x) \end{equation*} where the function $\psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(x)$ is the $L^1$-normalised smooth cutoff defined in \eqref{it max fn}. To justify the second inequality in the above display we use \eqref{fsq Frenet 4}, which allows us to deduce that $\sum_{j=2}^3|x_j + x_1g_j(s)| \gtrsim \sum_{j=2}^3|\inn{\mathcal{M}athbf{e}_j(s)}{x}|$. Combining the preceding observations with a simple Cauchy--Schwarz and Fubini argument, \mathcal{M}athbf{e}gin{equation}\lambdabel{first step} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} \big|(m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \,w(x)\,\mathcal{M}athrm{d} x \lesssim \sum_{\tilde{\nu} \in \mathcal{M}athbb{Z}} \, \int_{\mathcal{M}athbb{R}^3} \! \sum_{\substack{\nu \in \mathcal{M}athbb{Z} \\ |s_{\nu} - \tilde{s}_{\tilde{\nu}}|\leq 5 \tilde{r}} } \big|m^{\tilde{\nu}, \nu}_{\tilde{r}, r}(D) \circ (m_{\tilde{r}}^{\tilde{\nu}}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}} w(x)\,\mathcal{M}athrm{d} x. \end{equation} On the other hand, \eqref{sf it 3} implies \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\substack{\nu \in \mathcal{M}athbb{Z} \\ |s_{\nu} - \tilde{s}_{\tilde{\nu}}|\leq 5 \tilde{r}} } \big|m^{\tilde{\nu}, \nu}_{\tilde{r}, r}(D) \circ (m_{\tilde{r}}^{\tilde{\nu}}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}\, w(x)\,\mathcal{M}athrm{d} x \lesssim \int_{\mathcal{M}athbb{R}^3} \big|(m_{\tilde{r}}^{\tilde{\nu}}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)\big|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}\, w(x)\,\mathcal{M}athrm{d} x. \end{equation*} The two previous displays combine to give the desired estimate. \end{proof} \subsection{Proof of the \texorpdfstring{$L^2$}{}-weighted estimate}\lambdabel{L2 wtd proof subsec} Lemma~\ref{it prop} is now repeatedly applied to prove Proposition~\ref{f SF prop}. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{f SF prop}] First observe that by the definition of $\pi$ in \eqref{chi pi}, the containment property \eqref{fsq Frenet 2} and the definition of $m_r^{\nu}$ in \eqref{m r nu}, for each $\pi \in \mathcal{M}athcal{P}_0(r)$ there is an associated $\nu \in \mathcal{M}athbb{Z}$ such that $m_r^\nu(\xi)=1$ for $\xi \in \mathcal{M}athrm{supp}\, \chi_\pi$. Thus, a simple Cauchy--Schwarz and Fubini argument yields \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\pi \in \mathcal{M}athcal{P}_0(r)} |\chi_{\pi}(D)f(x)|^2 w(x)\,\mathcal{M}athrm{d} x \lesssim \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} |(m_r^{\nu}\cdot b)(D)f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_*}w(x)\,\mathcal{M}athrm{d} x, \end{equation*} where $\mathcal{M}athbf{r}_* := (r, 1, 1)$. Take $K := r^{-\varepsilon/8}$ and decompose $b = \sum_{\,\mathcal{M}athbf{a} \in K^{-1} \mathcal{M}athbb{Z}^2} b_{\mathcal{M}athbf{a}}$ as in \eqref{fsq b dec}. By a pigeonholing, it follows that there exists a choice of $\mathcal{M}athbf{a} \in [1/4,4]\times [-1,1]$ satisfying \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\pi \in \mathcal{M}athcal{P}_0(r)} |\chi_{\pi}(D)f(x)|^2 w(x)\,\mathcal{M}athrm{d} x \lesssim r^{-\varepsilon/2} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} |(m_r^{\nu}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_*}w(x)\,\mathcal{M}athrm{d} x. \end{equation*} Define the sequence \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athbf{r}_M:=(r_M, K^{-1}, K^{-1}) \quad \text{where $\,\, r_M:= K^M r\,\,$ for $\,\,M \geq 0$} \end{equation*} and recursively define a sequence of maximal operators by \mathcal{M}athbf{e}gin{equation*} \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}^{\, 0}:=\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_0} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_0} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_*} \qquad \text{ and } \qquad \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}^M:= \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_{M}} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_{M}} \circ \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}^{M-1} \quad \text{ for $M \geq 1$.} \end{equation*} We now repeatedly apply Proposition~\ref{it prop} to deduce that \mathcal{M}athbf{e}gin{equation}\lambdabel{fsq 1} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} |(m_r^{\nu} \cdot b_{\mathcal{M}athbf{a}})(D)f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_*}w(x)\,\mathcal{M}athrm{d} x \leq C^{M} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} |(m_{r_M}^{\nu} \cdot b_{\mathcal{M}athbf{a}})(D)f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}^{M-1}w(x)\,\mathcal{M}athrm{d} x, \end{equation} provided $r_M \leq 1$. In particular, if $M := {\mathfrak {l}}oor{8/\varepsilon} - 1$, then $r^{\varepsilon/8}\leq r_M \leq 1$ and, consequently, there are only $O(r^{-\varepsilon/8})$ values of $\nu$ which contribute to the right-hand sum in \eqref{fsq 1}. Thus, one readily deduces that \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^3} \sum_{\nu \in \mathcal{M}athbb{Z}} |(m_{r_M}^{\nu}\cdot b_{\mathcal{M}athbf{a}})(D)f(x)|^2 \widetilde{\mathcal{M}athcal{N}}^{M-1}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}w(x)\,\mathcal{M}athrm{d} x \lesssim r^{-\varepsilon/8} \int_{\mathcal{M}athbb{R}^3} |f(x)|^2 \widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\,(\varepsilon)} \, w(x)\,\mathcal{M}athrm{d} x \end{equation*} where $\widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\, (\varepsilon)} := \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}_M} \circ \widetilde{\mathcal{M}athcal{N}}^{M-1}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}$. Combining the preceding observations concludes the proof of the $L^2$ weighted inequality, with the above choice of maximal operator. It remains to show that the iterated maximal operator $\widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\, (\varepsilon)}$ satisfies the $L^2$ bound from \eqref{f SF eq}. However, this is an immediate consequence of Proposition~\ref{3d Nik prop} of the following subsection. \end{proof} \subsection{Boundedness of the maximal functions} From the proof of Proposition~\ref{f SF prop}, we see that the maximal function $\widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\, (\varepsilon)}$ is obtained by repeatedly composing operators of the form $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}$, as defined in \eqref{wtd 3d Nik ops}, where: \mathcal{M}athbf{e}gin{itemize} \item The family of curves $\mathcal{M}athbf{e}$ corresponds to the Frenet frame $(\mathcal{M}athbf{e}_1, \mathcal{M}athbf{e}_2, \mathcal{M}athbf{e}_3)$ associated to $\gammamma$; \item The scales $\mathcal{M}athbf{r} = (r_1, r_2, r_3)$ depend on $r$ and $\varepsilon$ and vary over the different factors of the composition. Each featured tuple $\mathcal{M}athbf{r} = (r_1, r_2, r_3)$ satisfies \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r}) \leq r^{-1} \end{equation*} where the \textit{eccentricity} $ \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r})$ is the ratio of $\mathcal{M}ax_j r_j$ and $\mathcal{M}in_j r_j$. \end{itemize} In particular, to prove the $L^2$ bound \eqref{f SF eq} it suffices to show that, for all $\varepsilon_\circ>0$, \mathcal{M}athbf{e}gin{equation}\lambdabel{3d Nik reduction} \|\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}\|_{L^2(\mathcal{M}athbb{R}^3) \to L^2(\mathcal{M}athbb{R}^3)} \lesssim_{\varepsilon_{\circ}} \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r})^{\varepsilon_{\circ}}. \end{equation} To prove \eqref{3d Nik reduction}, we will in fact work with a more general setup, replacing $\mathcal{M}athbf{e}$ with a general family of smooth curves in $\mathcal{M}athbb{R}^n$ satisfying a non-degeneracy hypothesis. Let $\mathcal{M}athbf{e} := (\mathcal{M}athbf{e}_1, \dots, \mathcal{M}athbf{e}_n)$ where $\mathcal{M}athbf{e}_j \colon [-1,1] \to S^{n-1}$ is a smooth curve in the unit sphere in $\mathcal{M}athbb{R}^n$ for $1 \leq j \leq n$. Suppose these curves satisfy \mathcal{M}athbf{e}gin{equation*} \big| \bigwedge_{j=1}^n \mathcal{M}athbf{e}_j(s) \big| \gtrsim 1 \qquad \textrm{for all $s \in [-1,1]$.} \end{equation*} Note that the $\mathcal{M}athbf{e}_j$ notation was previously reserved for the Frenet frame. In applications, we always take the $\mathcal{M}athbf{e}_j$ to be the Frenet vectors, and therefore there should be no conflict in the above choice of notation. Given a tuple $\mathcal{M}athbf{r} := (r_1,\dots, r_n) \in (0,\infty)^n$ and $s \in [-1,1]$ define the parallelepiped \mathcal{M}athbf{e}gin{equation*} T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s) := \begin{itemize}g\{ x \in \mathcal{M}athbb{R}^n : x = \sum_{j=1}^n \lambdambda_j \mathcal{M}athbf{e}_j(s) \textrm{ where } \lambdambda_j \in [-r_j^{-1}, r_j^{-1}] \textrm{ for $1 \leq j \leq n$} \begin{itemize}g\}. \end{equation*} Associated to these sets are the averaging operators and the maximal operator \mathcal{M}athbf{e}gin{equation}\lambdabel{Nik ops} \mathcal{M}athcal{A}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f(x;s) := \fint_{T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s)} f(x-y)\,\mathcal{M}athrm{d} y \quad \textrm{and} \quad \mathcal{M}athcal{N}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f(x) := \sup_{s \in [-1,1]} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f(x;s)| \end{equation} defined for $f \in L^1_{\mathcal{M}athrm{loc}}(\mathcal{M}athbb{R}^n)$. The $\mathcal{M}athcal{N}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}$ satisfy favourable $L^2$ estimates. \mathcal{M}athbf{e}gin{proposition}\lambdabel{3d Nik prop} With the above definitions, for all $\varepsilon > 0$ we have the norm bound \mathcal{M}athbf{e}gin{equation*} \|\mathcal{M}athcal{N}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f\|_{L^2(\mathcal{M}athbb{R}^n) \to L^2(\mathcal{M}athbb{R}^n)} \lesssim_{\mathcal{M}athbf{e}, \varepsilon} \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r})^{\varepsilon}, \end{equation*} where the \textit{eccentricity} $\mathcal{M}athrm{ecc}(\mathcal{M}athbf{r}) \geq 1$ is defined to be the ratio of $\mathcal{M}ax_j r_j$ and $\mathcal{M}in_j r_j$. \end{proposition} This proposition is based on a classical maximal bound due to C\'ordoba~\cite{Cordoba1982}. The details of the proof are provided below.\mathcal{M}edskip We generalise the weighted operators introduced in \eqref{wtd 3d Nik ops} by setting \mathcal{M}athbf{e}gin{equation}\lambdabel{wtd Nik ops} \widetilde{\mathcal{M}athcal{A}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} f (x;s) := \int_{\mathcal{M}athbb{R}^n} f(x-y) \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(y) \, \mathcal{M}athrm{d} y \quad \textrm{and} \quad \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} f(x) := \sup_{s \in [-1,1]} | \widetilde{\mathcal{M}athcal{A}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} f (x;s)| \end{equation} where $\psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}$ is a smooth weight function adapted to the parallelepiped $T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)$, given by \mathcal{M}athbf{e}gin{equation}\lambdabel{gen it max fn} \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}(y) := \big(\prod_{j=1}^n r_j\big) \, \big(1 + \sum_{j=1}^n r_j|(\bm{E}(s)^{-1} y)_j|\big)^{-100n} \end{equation} where $\bm{E}(s)$ denotes the $n \times n$ matrix whose $j$th column is $\mathcal{M}athbf{e}_j(s)$ for $1 \leq j \leq n$. If $(\mathcal{M}athbf{e}_j(s))_{j=1}^n$ forms an orthonormal frame, then $(\bm{E}(s)^{-1}y)_j = (\bm{E}(s)^{\top} y)_j = \inn{\mathcal{M}athbf{e}_j(s)}{y}$ and so \eqref{gen it max fn} generalises the definition \eqref{it max fn}. Note that the operators in \eqref{wtd Nik ops} correspond to weighted version of the averaging operator and Nikodym maximal function in \eqref{Nik ops}. Moreover, by dominating $ \psi_{\,T_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}(s)}$ by a weighted sum of characteristic functions, it is clear that Proposition~\ref{3d Nik prop} implies analogous $L^2$ bounds for the $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}$ operators.\mathcal{M}edskip In view of the preceding discussion, the estimate \eqref{f SF eq} for the maximal function $\widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\, (\varepsilon)}$ appearing in Proposition~\ref{f SF prop} follows as a consequence of Proposition~\ref{3d Nik prop}. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{3d Nik prop}] Write $R := \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r})$ and let $\varepsilon > 0$ be given. We begin with some basic reductions. By pigeonholing, it suffices to show \mathcal{M}athbf{e}gin{equation*} \|\mathcal{M}athcal{N}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}\|_{L^2(\mathcal{M}athbb{R}^n) \to L^2(\mathcal{M}athbb{R}^n)} \lesssim_{\varepsilon} R^{\varepsilon/2} \end{equation*} where now the maximal operator $\mathcal{M}athcal{N}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}$ is redefined so that the supremum is taken over some subinterval $I_{\varepsilon} \subseteq [-1,1]$ of length $R^{-\varepsilon/2}$ rather than the whole of $[-1,1]$. Furthermore, if $|s_1-s_2| \leq R^{-1}$, then $T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s_1)$ and $T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s_2)$ define essentially the same parallelepiped, and therefore we may further restrict the supremum to some dyadic $R^{-1}$-net $\mathcal{M}athfrak{S}_{\varepsilon}$ in $I_{\varepsilon}$.\mathcal{M}edskip Let $a \in [-1,1]$ denote the centre of the interval $I_{\varepsilon}$ and $N := \ceil{1/\varepsilon}$. For $1 \leq j \leq n$ let $p_j$ denote the degree $N-1$ Taylor polynomial of $\mathcal{M}athbf{e}_j$ centred at $a$ and define $\bm{p} := (p_1, \dots, p_n)$. By Taylor's theorem, \mathcal{M}athbf{e}gin{equation*} |p_j (s) - \mathcal{M}athbf{e}_j(s)| \lesssim_{\gammamma} R^{-N\varepsilon} \leq R^{-1} \qquad \textrm{for all $s \in I_{\varepsilon}$} \end{equation*} and therefore there exists a constant $C \geq 1$, independent of $\mathcal{M}athbf{r}$, such that \mathcal{M}athbf{e}gin{equation*} T_{\bm{p}, C^{-1}\mathcal{M}athbf{r}}(s) \subseteq T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s) \subseteq T_{\bm{p}, C \mathcal{M}athbf{r}}(s) \qquad \textrm{for all $s \in I_{\varepsilon}$.} \end{equation*} In light of this observation, henceforth we may assume without loss of generality that the $\mathcal{M}athbf{e}_j$ are all polynomial mappings. Under this hypothesis, the $\mathcal{M}athbf{e}_j$ no longer map into the sphere; however, we may assume that over the domain $I_{\varepsilon}$ they map into, say, a $1/10$-neighbourhood of $S^{n-1}$.\mathcal{M}edskip Since the operators are all positive, it suffices to show \mathcal{M}athbf{e}gin{equation*} \|\sup_{s \in \mathcal{M}athfrak{S}_{\varepsilon}} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f(\,\cdot\,;s)| \|_{L^2(\mathcal{M}athbb{R}^n)} \lesssim_{\varepsilon} R^{\varepsilon} \|f\|_{L^2(\mathcal{M}athbb{R}^n)} \end{equation*} for all $f \in L^2(\mathcal{M}athbb{R}^n)$ continuous and non-negative. Fixing such an $f$, define the averages \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}_{\omega} \def\Om{\Omegaega, r} f(x) := \int_{\mathcal{M}athbb{R}} f(x - t\omega} \def\Om{\Omegaega) \chi_r(t)\,\mathcal{M}athrm{d} t \qquad \textrm{for $\omega} \def\Om{\Omegaega \in \mathcal{M}athbb{R}^n$ with $\big||\omega} \def\Om{\Omegaega| - 1\big| < 1/10$ and $r > 0$,} \end{equation*} where $\chi_r(t) := r^{-1}\chi_1(r^{-1}t)$ for some $\chi_1 \in C^{\infty}_c(\mathcal{M}athbb{R})$ non-negative which satisfies $\chi_1(s) = 1$ for $|s| \leq 1$. Thus, by the Fubini--Tonelli theorem, \mathcal{M}athbf{e}gin{equation}\lambdabel{3d Nik 1} \mathcal{M}athcal{A}_{\mathcal{M}athbf{e},\mathcal{M}athbf{r}}f(x;s) \lesssim \mathcal{M}athcal{A}_{\mathcal{M}athbf{e}_n(s), r_n} \circ \cdots \circ \mathcal{M}athcal{A}_{\mathcal{M}athbf{e}_1(s), r_1}f(x). \end{equation} Writing $\mathcal{M}athcal{A}_{\mathcal{M}athbf{e}_j}f(x;s) := \mathcal{M}athcal{A}_{\mathcal{M}athbf{e}_j(s), 1}f(x)$, we may combine \eqref{3d Nik 1} with a simple scaling argument the reduce to problem to showing \mathcal{M}athbf{e}gin{equation}\lambdabel{3d Nik 2} \|\sup_{s \in \mathcal{M}athfrak{S}_{\varepsilon}} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{e}_j}f(\,\cdot\,;s)|\|_{L^2(\mathcal{M}athbb{R}^n)} \lesssim (\log R) \, \|f\|_{L^2(\mathcal{M}athbb{R}^n)} \qquad \textrm{for $1 \leq j \leq n$.} \end{equation} The previous display is essentially a consequence of a maximal estimate proved in \cite[p.223]{Cordoba1982}. There similar maximal operators are considered for smooth curves $\gammamma \colon [-1,1] \to S^{n-1}$ under the key hypothesis that $\gammamma$ cross any affine hyperplane a bounded number of times. Since we are considering polynomial curves $\mathcal{M}athbf{e}_j$, the fundamental theorem of algebra ensures either: \mathcal{M}athbf{e}gin{enumerate}[a)] \item The curve $\mathcal{M}athbf{e}_j$ crosses any affine hyperplane a bounded number of times, where the bound depends on the degrees of the component polynomials, or \item There exists an affine hyperplane which contains the image of $\mathcal{M}athbf{e}_j$. \end{enumerate} In the former case, we may deduce \eqref{3d Nik 2} directly through appeal to the result from \cite[p.223]{Cordoba1982}.{\mathfrak {o}}otnote{It is remarked that the argument in \cite{Cordoba1982} carries through for a curve which maps into a $1/10$-neighbourhood of the sphere (rather than the sphere itself), provided the curve satisfies the finite crossing property.} In the latter case, we may apply the maximal bound from \cite{Cordoba1982} over a lower dimensional affine subspace and combine this with a Fubini argument to again deduce the desired result. \end{proof} \subsection{Scaling properties}\lambdabel{Nik scale subsec} We conclude this section with a discussion of the scaling properties of the maximal function $\widetilde{\mathcal{M}athcal{N}}_{\gammamma,r}^{\, (\varepsilon)}$ and, in particular, fill in the gap in proof of Proposition~\ref{L4 forward SF prop} by proving the Claim therein.\mathcal{M}edskip We begin by introducing a general setup for rescaling the operators $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}$ when defined with respect to a Frenet frame; as in the previous subsection, here we work in general dimensions. Fix $\gammamma \colon [-1,1] \to \mathcal{M}athbb{R}^n$ a non-degenerate curve with $\gammamma \in \mathcal{M}athfrak{G}(\delta)$ and $\sigma} \def\Si{\Sigmagmama \in [-1,1]$, $0 < \lambdambda < 1$ be such that $[\sigma} \def\Si{\Sigmagmama - \lambdambda, \sigma} \def\Si{\Sigmagmama + \lambdambda] \subseteq [-1,1]$. Consider the rescaled curve \mathcal{M}athbf{e}gin{equation*} \gammamma_{\sigma} \def\Si{\Sigmagmama, \lambdambda}(\tilde{s}) := \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1}\big(\gammamma(\sigma} \def\Si{\Sigmagmama + \lambdambda \tilde{s}) - \gammamma(\sigma} \def\Si{\Sigmagmama)\big) \end{equation*} as defined in Definition~\ref{rescaled curve def}. Let $\mathcal{M}athbf{e} = (\mathcal{M}athbf{e}_1, \dots, \mathcal{M}athbf{e}_n)$ denote the Frenet frame defined with respect to $\gammamma$ and $\tilde{\mathcal{M}athbf{e}} = (\tilde{\mathcal{M}athbf{e}}_1, \dots, \tilde{\mathcal{M}athbf{e}}_n)$ denote the Frenet frame defined with respect to $\widetilde{\gammamma} := \gammamma_{\sigma} \def\Si{\Sigmagmama, \lambdambda}$. We suppose $\mathcal{M}athbf{r} = (r_1, \dots, r_n) \in (0,1]^n$ satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{Nik scale hyp} r_i \leq \lambdambda r_{i+1} \qquad \textrm{for $1 \leq i \leq n-1$} \end{equation} and define $\tilde{\mathcal{M}athbf{r}} := D_{\lambdambda} \cdot \mathcal{M}athbf{r}$ where $D_{\lambdambda} := \mathcal{M}athrm{diag}(\lambdambda, \dots, \lambdambda^n)$ is as in \eqref{gamma transformation}.\mathcal{M}edskip \mathcal{M}athbf{e}gin{lemma}\lambdabel{gen Nik scale lem} If $f \in L^1_{\mathcal{M}athrm{loc}}(\mathcal{M}athbb{R}^n)$ is non-negative, then, with the above definitions, \mathcal{M}athbf{e}gin{equation}\lambdabel{gen Nik scale eq} \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}\big)^{-1} \circ \widetilde{\mathcal{M}athcal{N}}_{\tilde{\mathcal{M}athbf{e}}, \tilde{\mathcal{M}athbf{r}}} \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \cdot f(x) \lesssim_{\gammamma} \widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}} f(x) \qquad \textrm{for all $x \in \mathcal{M}athbb{R}^n$}. \end{equation} \end{lemma} Here we think of a matrix $M \in \mathcal{M}athrm{GL}(\mathcal{M}athbb{R}, n)$ as acting on $L^2(\mathcal{M}athbb{R}^n)$ by $M \cdot f := f\circ M$ for all $f \in L^2(\mathcal{M}athbb{R}^n)$. Thus, the left-hand side corresponds to the operator $\widetilde{\mathcal{M}athcal{N}}_{\tilde{\mathcal{M}athbf{e}}, \tilde{\mathcal{M}athbf{r}}}$ conjugated by the invertible operator $[\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \colon L^2(\mathcal{M}athbb{R}^n) \to L^2(\mathcal{M}athbb{R}^n)$.\mathcal{M}edskip Before presenting the proof of Lemma~\ref{gen Nik scale lem}, we use the result to verify the rescaling step in the proof of Proposition~\ref{L4 forward SF prop}. In view of the discussion in \S\ref{L2 wtd proof subsec} and by a simple rescaling argument, we know that the maximal function{\mathfrak {o}}otnote{Recall, in the setup in Proposition~\ref{L4 forward SF prop} we have $\widetilde{\gammamma} := \gammamma_{\sigma} \def\Si{\Sigmagmama, \lambdambda}$, where $\sigma} \def\Si{\Sigmagmama := 2^{-\ell} \mathcal{M}u$ and $\lambdambda := 2^{-\ell}$, and $\tilde{r} := 2^{-(k-3\ell)/2}$.} \mathcal{M}athbf{e}gin{equation*} \widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell} := \mathcal{M}athrm{Dil}_{2^{k-3\ell}} \circ \widetilde{\mathcal{M}athcal{N}}^{(\varepsilon)}_{\widetilde{\gammamma},\tilde{r}} \circ \mathcal{M}athrm{Dil}_{2^{-(k-3\ell)}} \end{equation*} corresponds to a repeated composition of operators of the form $\widetilde{\mathcal{M}athcal{N}}_{\tilde{\mathcal{M}athbf{e}}, \tilde{\mathcal{M}athbf{r}}}$ where the $\tilde{\mathcal{M}athbf{r}} = (\tilde{r}_1, \tilde{r}_2, \tilde{r}_3)$ satisfy \mathcal{M}athbf{e}gin{equation*} \tilde{r}_1 \leq \tilde{r}_2 \leq \tilde{r}_3 \qquad \textrm{and} \qquad \mathcal{M}athrm{ecc}(\tilde{\mathcal{M}athbf{r}}) \lesssim 2^{(k-3\ell)/2}. \end{equation*} Consequently, by Lemma~\ref{gen Nik scale lem}, the conjugate\ \mathcal{M}athbf{e}gin{equation*} \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}\big)^{-1} \circ \widetilde{\mathcal{M}athcal{N}}^{\,\mathcal{M}u, (\varepsilon)}_{k,\ell} \circ [\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda} \end{equation*} is dominated by a maximal function $\widetilde{\mathcal{M}athcal{N}}^{(\varepsilon)}_{k,\ell}$ given by a repeated composition of operators of the form $\widetilde{\mathcal{M}athcal{N}}_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}$ where each $\mathcal{M}athbf{r} = (r_1, r_2, r_3)$ satisfies \mathcal{M}athbf{e}gin{equation*} r_1 \leq \lambdambda r_2 \leq \lambdambda^2 r_3 \qquad \textrm{and} \qquad \mathcal{M}athrm{ecc}(\mathcal{M}athbf{r}) \lesssim 2^{(k+\ell)/2}. \end{equation*} Furthermore, there are only $O_{\varepsilon}(1)$ factors in this composition. The just given definition for $\widetilde{\mathcal{M}athcal{N}}^{(\varepsilon)}_{k,\ell}$ is independent of $\mathcal{M}u$ and, by Proposition~\ref{3d Nik prop}, for all $\varepsilon_{\circ} > 0$ the operator $\widetilde{\mathcal{M}athcal{N}}^{(\varepsilon)}_{k,\ell}$ is bounded on $L^2(\mathcal{M}athbb{R}^3)$ with operator norm $O_{\varepsilon}(2^{\varepsilon k})$. Thus, we have verified all the outstanding claims in the proof of Proposition~\ref{L4 forward SF prop}. \mathcal{M}athbf{e}gin{proof}[Proof of Lemma~\ref{gen Nik scale lem}] Consider the conjugated operator on the left-hand side of \eqref{gen Nik scale eq}. By applying a change of variables to the integral defining the underlying averages, the problem is quickly reduced to the pointwise estimate \mathcal{M}athbf{e}gin{equation*} |\det [\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}|^{-1} \cdot \psi_{T_{\tilde{\mathcal{M}athbf{e}}, \tilde{\mathcal{M}athbf{r}}}(\tilde{s})} \circ \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1}(y) \lesssim \psi_{T_{\mathcal{M}athbf{e}, \mathcal{M}athbf{r}}(s)} (y) \end{equation*} for the weight functions as defined in \eqref{gen it max fn}, where $s= \sigma} \def\Si{\Sigmagmama + \lambdambda \tilde{s}$. Suppose $y \in \mathcal{M}athbb{R}^n$ satisfies \mathcal{M}athbf{e}gin{equation*} R \leq \sum_{j=1}^n r_j |\inn{\mathcal{M}athbf{e}_j(s)}{y}| \leq 2 R \end{equation*} for some $R \geq 1$. From the definition of the weight function from \eqref{gen it max fn}, and the orthonormality of the Frenet frame, the problem is further reduced to showing \mathcal{M}athbf{e}gin{equation}\lambdabel{Nik scale 1} \sum_{j=1}^n \tilde{r}_j |\inn{\tilde{\mathcal{M}athbf{e}}_j(\tilde{s})}{\tilde{y}}| \gtrsim R \qquad \textrm{where $\tilde{y} := \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1}(y)$.} \end{equation} Let $\alpha = \big([\gammamma]_{s, \lambdambda}\big)^{-1}(y)$ so that, by the definition of the matrix $[\gammamma]_{s, \lambdambda}$, we have \mathcal{M}athbf{e}gin{equation*} y = \sum_{j=1}^n \lambdambda^j \alpha_j \gammamma^{(j)}(s). \end{equation*} Taking the inner product of both sides of this identity with respect to the vectors $\mathcal{M}athbf{e}_j(s)$, it follows that the vectors $\big(\inn{\mathcal{M}athbf{e}_j(s)}{y}\big)_{j=1}^n$ and $\big(\lambdambda^j \alpha_j\big)_{j=1}^n$ are related by an \textit{upper-triangular} matrix transformation, which is also an $O(\delta)$ perturbation of the identity. For this observation, we use the fact that $\lambdangle \mathcal{M}athbf{e}_1(s), \dots, \mathcal{M}athbf{e}_j(s)\rangle = \lambdangle \gammamma^{(1)}(s), \dots, \gammamma^{(j)}(s)\rangle$ for $1 \leq j \leq n$, owing to the definition of the Frenet frame. In view of the hypothesis \eqref{Nik scale hyp} which, in particular, implies $r_i \leq r_{i+1}$ for $1 \leq i \leq n-1$, the above observation yields that \mathcal{M}athbf{e}gin{equation}\lambdabel{Nik scale 3} r_j \lambdambda^j |\alpha_j| \lesssim R \qquad \textrm{for $1 \leq j \leq n$.} \end{equation} Furthermore, by pigeonholing, there exists some $1 \leq J \leq n$ such that \mathcal{M}athbf{e}gin{equation*} r_J |\inn{\mathcal{M}athbf{e}_J(s)}{y}| \geq R/n \quad \textrm{and} \quad r_j |\inn{\mathcal{M}athbf{e}_j(s)}{y}| < R/n \qquad \textrm{for $J + 1 \leq j \leq n$.} \end{equation*} Thus, by the same argument used to show \eqref{Nik scale 3}, provided $\delta$ is chosen sufficiently small, \mathcal{M}athbf{e}gin{equation}\lambdabel{Nik scale 4} r_J \lambdambda^J |\alpha_J| \sigma} \def\Si{\Sigmam R. \end{equation} Since $\widetilde{\gammamma}^{(j)}(\tilde{s}) = \lambdambda^j \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}\big)^{-1} \gammamma^{(j)}(s)$ for $j \geq 1$, it follows that $[\widetilde{\gammamma}]_{\tilde{s}} = \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama,\lambdambda}\big)^{-1} \circ [\gammamma]_{s,\lambdambda}$ and, consequently, \mathcal{M}athbf{e}gin{equation*} \tilde{y} = \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1} (y) = \big([\gammamma]_{\sigma} \def\Si{\Sigmagmama, \lambdambda}\big)^{-1} \circ [\gammamma]_{\lambdambda,s} (\alpha) = [\widetilde{\gammamma}]_{\tilde{s}} (\alpha). \end{equation*} Thus, we have $\alpha = \big([\widetilde{\gammamma}]_{\tilde{s}}\big)^{-1} (\tilde{y})$ and, arguing as before, this implies the vectors $\big(\inn{\tilde{\mathcal{M}athbf{e}}_j(\tilde{s})}{\tilde{y}}\big)_{j=1}^n$ and $\alpha$ are also related by an upper-triangle matrix transformation, which is again an $O(\delta)$ perturbation of the identity. From this observation, provided $\delta$ is chosen sufficiently small, we see that \mathcal{M}athbf{e}gin{equation*} \tilde{r}_J|\inn{\tilde{\mathcal{M}athbf{e}}_J(\tilde{s})}{\tilde{y}}| \gtrsim r_J \lambdambda^J|\alpha_J| - \delta \sum_{j = J+1}^n \big(r_J \lambdambda^{J - j}r_j^{-1}\big) r_j \lambdambda^j |\alpha_j| \gtrsim R, \end{equation*} where the final inequality uses the hypothesis \eqref{Nik scale hyp} together with \eqref{Nik scale 3} and \eqref{Nik scale 4}. This implies the desired bound \eqref{Nik scale 1}. \end{proof} \section{Proof of the \texorpdfstring{$\mathcal{M}athbb{R}^{3+1} \to \mathcal{M}athbb{R}^3$}{} Nikodym maximal estimate}\lambdabel{Nikodym sec} In this section we establish Proposition~\ref{Nikodym prop}. We begin by recalling the basic setup. Let $\gammamma \colon [-1,1] \to \mathcal{M}athbb{R}^3$ be a smooth, non-degenerate curve with Frenet frame $(\mathcal{M}athbf{e}_j)_{j=1}^3$. Given $\mathcal{M}athbf{r} \in (0,1)^3$ and $s \in [-1,1]$, consider the \textit{plates} \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s) := \big\{ (y,t) \in \mathcal{M}athbb{R}^3 \times [1,2] : \big|\inn{y - t\gammamma(s)}{\mathcal{M}athbf{e}_j(s)}\big| \leq r_j \, \textrm{ for $j=1,2,3$} \big\} \end{equation*} and the associated averaging and maximal operators \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}} g(x; s) := \fint_{\mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s)} g(x-y, t) \,\mathcal{M}athrm{d} y \mathcal{M}athrm{d} t \quad \textrm{and} \quad \mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g(x) := \sup_{-1 \leq s \leq 1} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}} g(x; s)|. \end{equation*} We assume the exponents satisfy the conditions \mathcal{M}athbf{e}gin{equation*} r_3 \leq r_2 \leq r_1 \leq r_2^{1/2} \qquad \textrm{and} \qquad r_2 \leq r_{1}^{1/2} r_3^{1/2} \end{equation*} and the goal is to establish the $L^2$ bound \mathcal{M}athbf{e}gin{equation}\lambdabel{Nikodym recall} \|\mathcal{M}athcal{N}_{\mathcal{M}athbf{r}}^{\,\mathcal{M}athrm{sing}} g\|_{L^2(\mathcal{M}athbb{R}^3)} \lesssim |\log r_3|^3 \|g\|_{L^2(\mathcal{M}athbb{R}^4)}. \end{equation} To prove this norm inequality we will rely on the Fourier transform and reduce the problem to certain oscillatory integral estimates. The argument is a (significant) elaboration of that used to establish a lower dimensional variant of \eqref{Nikodym recall} in \cite{MSS1992}. We shall make heavy use of the frequency decomposition used to analyse the helical averaging operator in \S\ref{J=3 sec}. \mathcal{M}athbf{e}gin{proof}[Proof of Proposition~\ref{Nikodym prop}] The argument is somewhat involved and is therefore broken into steps.\mathcal{M}edskip \noindentndent\textit{Initial reductions}. Let $0 < \delta_0 \ll 1$ be a small parameter, as introduced at the beginning of \S\ref{sec:slow decay cone}. By familiar localisation and rescaling arguments, we may assume $\gammamma$ satisfies $\gammamma(\,\cdot\,) - \gammamma(0) \in \mathcal{M}athfrak{G}_3(\delta_0)$. Further, we may replace $\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}} g(x; s)$ with the localised version $\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}} g(x; s) \chi(s)$, where $\chi \in C^{\infty}_c(\mathcal{M}athbb{R})$ is supported in $I_0 := [-\delta_0, \delta_0]$. Note that this model situation is already enough for our application in \S\ref{L2 wtd 3+1 sec}.\mathcal{M}edskip \noindentndent\textit{Fourier representation}. The first step is to derive an alternative representation of the averages $\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}}g$ in terms of an oscillatory integral operator. Given $a \in C^{\infty}_c(\widehat{\mathcal{M}athbb{R}}^3 \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$, define \mathcal{M}athbf{e}gin{align*} \mathcal{M}athcal{A}[a]g(x;s) &:= {\mathfrak {r}}ac{1}{(2\pi)^3}\int_1^2 \int_{\mathcal{M}athbb{R}^3} \int_{\widehat{\mathcal{M}athbb{R}}^3} e^{i \inn{x-y-t\gammamma(s)}{\xi}} a(\xi; s; t)\,\mathcal{M}athrm{d} \xi\, g(y,t)\mathcal{M}athrm{d} y\, \mathcal{M}athrm{d} t \\ &= {\mathfrak {r}}ac{1}{(2\pi)^3} \int_{\widehat{\mathcal{M}athbb{R}}^3} e^{i \inn{x}{\xi}}\int_1^2 e^{-it \inn{\gammamma(s)}{\xi}} a(\xi; s; t) \tilde{g}(\xi,t) \,\mathcal{M}athrm{d} t\,\mathcal{M}athrm{d} \xi, \end{align*} where $\tilde{g}$ denotes the Fourier transform of $g$ with respect to the $y$-variable only. The associated maximal operator is then defined by \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{N}[a] g(x) := \sup_{-1 \leq s \leq 1} |\mathcal{M}athcal{A}[a] g(x; s)|. \end{equation*} Without loss of generality, to prove Proposition~\ref{Nikodym prop} it suffices to consider the estimate for $g$ Schwartz and taking values in $[0,\infty)$. Fix $\psi \in C^{\infty}_c(\widehat{\mathcal{M}athbb{R}})$ with $\mathcal{M}athrm{supp}\, \psi \subseteq [-1,1]$ such that $\widecheck{\psi}$ takes values in the positive real line and $\widecheck{\psi}(y) \gtrsim 1$ for $|y| \leq 1$. Define \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r}}(\xi; s) := \prod_{j=1}^3 \psi\big(r_j \inn{\xi}{\mathcal{M}athbf{e}_j(s)}\big) \, \chi(s) \end{equation*} so that, by integral formula for the inverse Fourier transform and a change of variable, \mathcal{M}athbf{e}gin{equation*} {\mathfrak {r}}ac{1}{|\mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s)|} {\mathbbm 1}_{\mathcal{M}athcal{T}_{\mathcal{M}athbf{r}}(s)}(y, t)\chi(s) \lesssim \prod_{j=1}^3 r_j^{-1} \widecheck{\psi}\big(r_j^{-1} \inn{y - t\gammamma(s)}{\mathcal{M}athbf{e}_j(s)}\big)\chi(s) = {\mathfrak {r}}ac{1}{(2 \pi)^3} \int_{\mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3} e^{i \inn{y - t\gammamma(s)}{\xi}} a_{\mathcal{M}athbf{r}}(\xi;s;t)\,\mathcal{M}athrm{d} \xi. \end{equation*} Thus, the pointwise inequality \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{A}_{\mathcal{M}athbf{r}}^{\mathcal{M}athrm{sing}} g(x;s)| \lesssim |\mathcal{M}athcal{A}[a_{\mathcal{M}athbf{r}}]g(x;s)| \end{equation*} holds and therefore it suffices to bound the operator $\mathcal{M}athcal{N}[a_{\mathcal{M}athbf{r}}]$.\mathcal{M}edskip \noindentndent\textit{Sobolev embedding} Given $a \in C^{\infty}_c(\widehat{\mathcal{M}athbb{R}}^3 \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$, by elementary Sobolev embedding, \mathcal{M}athbf{e}gin{equation}\lambdabel{gen Sobolev} \| \mathcal{M}athcal{N}[a] g\|_{L^2(\mathcal{M}athbb{R}^3)}^2 \leq \| \mathcal{M}athcal{A}[a]g \|_{L^2(\mathcal{M}athbb{R}^{3+1})}^2 + 2 \prod_{\iota \in \{0,1\}}\| \partial_s^{\iota}\, \mathcal{M}athcal{A}[a]g \|_{L^2(\mathcal{M}athbb{R}^{3+1})}; \end{equation} indeed, this bound is a simple and standard consequence of the fundamental theorem of calculus and the Cauchy--Schwarz inequality (see for instance ~\cite[Chapter XI, $\S$3.2]{Stein1993}). Observe that $\partial_s\,\mathcal{M}athcal{A}[a]$ is an operator of the same form as $\mathcal{M}athcal{A}[a]$ and, in particular, \mathcal{M}athbf{e}gin{equation}\lambdabel{N gen deriv} \partial_s\, \mathcal{M}athcal{A}[a] = \mathcal{M}athcal{A}[\mathcal{M}athfrak{d}_s a] \qquad \textrm{where} \qquad \mathcal{M}athfrak{d}_s a(\xi; s; t) := -i t \inn{\gammamma'(s)}{\xi} \, a(\xi; s; t) + \partial_s a(\xi; s; t). \end{equation} These observations reduce the problem to proving estimates of the form \mathcal{M}athbf{e}gin{equation}\lambdabel{N gen norm} \| \mathcal{M}athcal{A}[\mathcal{M}athfrak{d}_s^{\iota}\,a]g \|_{L^2(\mathcal{M}athbb{R}^{3+1}) \to L^2(\mathcal{M}athbb{R}^{3+1})} \leq B^{(\iota - 1/2)} \qquad \textrm{for $\iota \in \{0, 1\}$} \end{equation} for suitable symbols $a$ and constants $B \geq 1$. In particular, it suffices to decompose the original symbol $a_{\mathcal{M}athbf{r}}$ into $O(|\log r_3|^3)$ many pieces and show that \eqref{N gen norm} holds for some choice of $B \geq 1$ on each piece.\mathcal{M}edskip \noindentndent \textit{Reduction to oscillatory integral estimates}. Continuting to work with a general $a \in C^{\infty}_c(\widehat{\mathcal{M}athbb{R}}^3 \times \mathcal{M}athbb{R} \times \mathcal{M}athbb{R})$, it follows from Plancherel's theorem in the $x$-variable and the Cauchy--Schwarz inequality that \mathcal{M}athbf{e}gin{align} \| \mathcal{M}athcal{A}[a]g\|_{L^2(\mathcal{M}athbb{R}^{3+1})}^2 &\leq \int_{\widehat{\mathcal{M}athbb{R}}^3} \int_{\mathcal{M}athbb{R}} |T_{\xi}[a]\tilde{g}(\xi;\,\cdot\,)(t)\tilde{g}(\xi;t)|\, \mathcal{M}athrm{d} t \, \mathcal{M}athrm{d} \xi \notag \\ &\leq \int_{\widehat{\mathcal{M}athbb{R}}^3} \|T_{\xi}[a]\tilde{g}(\xi;\,\cdot\,)\|_{L^2(\mathcal{M}athbb{R})}\|\tilde{g}(\xi;\,\cdot\,)\|_{L^2(\mathcal{M}athbb{R})} \, \mathcal{M}athrm{d} \xi \lambdabel{N gen square} \end{align} where, for each $\xi \in \widehat{\mathcal{M}athbb{R}}^3$, the operator $T_{\xi}[a]$ acts on univariate functions by integrating (in the $t'$-variable) against the kernel \mathcal{M}athbf{e}gin{equation}\lambdabel{N gen ker} \mathcal{M}athcal{K}[a](t,t';\xi) := \int_{\mathcal{M}athbb{R}} e^{i (t-t') \inn{\gammamma(s)}{\xi}} \overline{a(\xi; s; t)}a(s,t';\xi){\mathbbm 1}_{[1,2]^2}(t, t')\,\mathcal{M}athrm{d} s. \end{equation} It suffices to show that \mathcal{M}athbf{e}gin{equation}\lambdabel{N gen univar} \| T_\xi [{\mathfrak {d}}_s^\iota a] \tilde{g}(\xi; \cdot) \|_{L^2(\mathcal{M}athbb{R})} \leq B^{2 \iota -1} \| \tilde{g}(\xi; \cdot) \|_{L^2(\mathcal{M}athbb{R})} \qquad \textrm{ for $\iota \in \{0,1\}$} \end{equation} holds uniformly in $\xi \in \widehat{\mathcal{M}athbb{R}}^3$. Indeed, in this case the norm bound \eqref{N gen norm} would follow via \eqref{N gen square} and a further application of Plancherel's theorem in the $\xi$-variable. By the Schur test, the inequality \eqref{N gen univar} is reduced to verifying the oscillatory integral estimates \mathcal{M}athbf{e}gin{equation}\lambdabel{N gen Schur} \sup_{t' \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[{\mathfrak {d}}_s^\iota a](t,t';\xi)| \,\mathcal{M}athrm{d} t, \quad \sup_{t \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[{\mathfrak {d}}_s^\iota a](t,t';\xi)| \,\mathcal{M}athrm{d} t' \leq B^{2\iota -1}, \qquad \iota \in \{0,1\} \end{equation} hold uniformly over all $\xi \in \mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3$.\mathcal{M}edskip \noindentndent \textit{Initial decomposition}. In order to obtain favourable estimates, it is necessary to first decompose the original symbol $a_{\mathcal{M}athbf{r}}$ into a number of localised pieces. This decomposition is similar to that used in \S\ref{J=3 sec} and is described in detail presently. Later in the proof, the kernel estimates \eqref{N gen Schur} are verified for each piece of the decomposition and the resulting norm bounds are combined to estimate the entire operator. Define $\delta_1 := \delta_0^3$, $\delta_2 := \delta_0$ and $\delta_3 := 9/10$ and for $1\leq J \leq 3$ let $\Omega_J$ denote the set of $\xi \in \mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3$ satisfying \mathcal{M}athbf{e}gin{align*} \inf_{s \in I_0} |\inn{\gammamma^{(J)}(s)}{\xi}| &\geq \delta_J |\xi|, \\ \inf_{s \in I_0} |\inn{\gammamma^{(j)}(s)}{\xi}| &\leq \delta_j |\xi| \qquad \textrm{for $1 \leq j \leq J-1$.} \end{align*} Provided $\delta_0 > 0$ is chosen sufficiently small, the condition $\gammamma(\,\cdot\,) - \gammamma(0) \in \mathcal{M}athfrak{G}_3(\delta_0)$ ensures that these sets partition $\mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3$. By pigeonholing,{\mathfrak {o}}otnote{As we are interested in $L^2$ estimates here, we are free to decompose the symbol using the rough partition of unity $1 \equiv {\mathbbm 1}_{\Omega_1} + {\mathbbm 1}_{\Omega_2} + {\mathbbm 1}_{\Omega_3}$.} it suffices to work with the symbols $a_{\mathcal{M}athbf{r}}^J(\xi;s):= a_{\mathcal{M}athbf{r}}(\xi;s){\mathbbm 1}_{\Omega_J}(\xi)$ for $1 \leq J \leq 3$. Decompose the symbol into dyadic frequency bands by writing \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r}} = \sum_{k = 0}^{\infty} a_{\mathcal{M}athbf{r},k} \qquad \textrm{where} \qquad a_{\mathcal{M}athbf{r},k}(\xi; s) := \left\{ \mathcal{M}athbf{e}gin{array}{ll} a_{\mathcal{M}athbf{r}}^J(\xi; s) \cdot \mathcal{M}athbf{e}ta^k(\xi) & \textrm{for $k \geq 1$} \\ a_{\mathcal{M}athbf{r}}^J(\xi; s) \cdot \eta(\xi) & \textrm{for $k =0$} \end{array} \right. . \end{equation*} Here, for notational convenience, we suppress the choice of $J$ in the notation. Since $r_3 \leq r_1, r_2$, only the first $O(|\log r_3|)$ terms of the above sum are non-zero, so it suffices to show \mathcal{M}athbf{e}gin{equation}\lambdabel{N freq loc est} \|\mathcal{M}athcal{N}[a_{\mathcal{M}athbf{r},k}]\|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^3)} \lesssim k^2 \qquad \textrm{for all $k \in \mathcal{M}athbb N_0$.} \end{equation} In particular, note that $2^{k}\lesssim r_3^{-1}$. \mathcal{M}edskip \noindentndent\underline{$J = 1$ case.} Suppose $\mathcal{M}athrm{supp}_{\xi}\, a_{\mathcal{M}athbf{r},k} \subseteq \Omega_1$. Here a simple integration-by-parts argument yields \mathcal{M}athbf{e}gin{equation*} \sup_{t' \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r}, k}](t,t';\xi)| \,\mathcal{M}athrm{d} t, \quad \sup_{t \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r}, k}](t,t';\xi)| \,\mathcal{M}athrm{d} t' \lesssim 2^{k(2\iota - 1)} \qquad \textrm{ for $\iota \in \{0,1\}$.} \end{equation*} In view of our earlier observations, the bound \eqref{N freq loc est} therefore holds in this case with a uniform bound in $k$. \mathcal{M}edskip \noindentndent\underline{$J = 2$ case.} Suppose $\mathcal{M}athrm{supp}_{\xi}\, a_{\mathcal{M}athbf{r},k} \subseteq \Omega_2$. If $\xi \in \Omega_2$, then the equation $\inn{\gammamma'(s)}{\xi} = 0$ has a unique solution in ${\mathfrak {r}}ac{5}{4} \cdot I_0$ which we denote by $\theta(\xi)$. Indeed, this follows from a simple calculus exercise, similar to the proof of Lemma~\ref{theta2 lem}.\mathcal{M}edskip \noindentndent \textit{Further decomposition} Here the symbol $a_{\mathcal{M}athbf{r},k}$ is further decomposed by writing \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r},k} = \sum_{\ell = 0}^{{\mathfrak {l}}oor{k/2}} a_{\mathcal{M}athbf{r},k,\ell} \qquad \textrm{where} \qquad a_{\mathcal{M}athbf{r}, k,\ell}(\xi;s) := \left\{\mathcal{M}athbf{e}gin{array}{ll} \displaystyle a_{\mathcal{M}athbf{r},k}(\xi; s)\mathcal{M}athbf{e}ta\big(2^{\ell}|s - \theta(\xi)|\big) & \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/2}$} \\[6pt] \displaystyle a_{\mathcal{M}athbf{r},k}(\xi; s)\eta\big(2^{{\mathfrak {l}}oor{k/2}}|s - \theta(\xi)|\big) & \textrm{if $\ell = {\mathfrak {l}}oor{k/2}$} \end{array}\right. . \end{equation*} Since $|\inn{\gammamma''(s)}{\xi}| \sigma} \def\Si{\Sigmam 2^k$ for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r}, k, \ell}$, one has the relation $2^k \leq r_2^{-1}$. \mathcal{M}edskip \noindentndent \textit{Kernel estimates}. The kernels are analysed using stationary phase techniques. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=2 N ker lem} If $k \in \mathcal{M}athbb N$, $0 \leq \ell \leq {\mathfrak {l}}oor{k/2}$ and $\iota \in \{0,1\}$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 N ker} \sup_{t' \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell}](t,t';\xi)| \,\mathcal{M}athrm{d} t, \quad \sup_{t \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r}, k,\ell}](t,t';\xi)| \,\mathcal{M}athrm{d} t' \lesssim 2^{(k-\ell)(2\iota - 1)}. \end{equation} \end{lemma} \mathcal{M}athbf{e}gin{proof} If $\ell={\mathfrak {l}}oor{k/2}$, then the localisation of the symbol ensures that $|s-\theta (\xi)|\lesssim 2^{-\ell}$ for all $(\xi;s)\in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell}$. The bound for $\iota=0$ then follows immediately from the size of the $s$-support of $a_{\mathcal{M}athbf{r},k,\ell}$. For $\iota=1$, note that by the mean value theorem, we may write \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 Nik 1} \inn{\gammamma'(s)}{\xi} = \omega} \def\Om{\Omegaega(\xi; s) \, (s - \theta(\xi)) \end{equation} where $|\omega} \def\Om{\Omegaega(\xi;s)| \sigma} \def\Si{\Sigmam 2^k$ on $\mathcal{M}athrm{supp}\, a_{k,\ell}$. Consequently, \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 Nik 2} | \inn{\gammamma'(s)}{\xi}| \lesssim 2^{k/2} \qquad \textrm{ for all $(\xi; s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell}$.} \end{equation} Furthermore, by the definition of $a_{\mathcal{M}athbf{r}}$ and of the Frenet frame $\{\mathcal{M}athbf{e}_j(r)\}_{j=1}^3$, the relation $r_3 \leq r_2 \leq r_1 \lesssim r_2^{1/2} \leq 2^{-k/2}$ implies \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 Nik 3} |\partial_s a_{\mathcal{M}athbf{r}, k,\ell}(\xi;s)|\lesssim 2^{k/2}. \end{equation} In view of the definition of ${\mathfrak {d}}_s$ in \eqref{N gen deriv}, the bounds \eqref{J=2 Nik 2} and \eqref{J=2 Nik 3} immediately imply that $|{\mathfrak {d}}_s a_{\mathcal{M}athbf{r},k,\ell} (\xi;s)| \lesssim 2^{k/2}$ and the bound for $\iota=1$ now follows immediately from the size of the $s$-support of $a_{\mathcal{M}athbf{r},k,\ell}$ and the definition of $\mathcal{M}athcal{K}$ in \eqref{N gen ker}. If $0 \leq \ell < {\mathfrak {l}}oor{k/2}$, then the localisation of the symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 N ker 1} |s - \theta(\xi)| \sigma} \def\Si{\Sigmam 2^{-\ell} \qquad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r}, k,\ell}$.} \end{equation} Consequently, by directly applying \eqref{J=2 N ker 1} in \eqref{J=2 Nik 1}, we have the bounds \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 N ker 2} |\inn{\gammamma'(s)}{\xi}| \sigma} \def\Si{\Sigmam 2^{k-\ell}, \quad |\inn{\gammamma^{(N)}(s)}{\xi}| \lesssim 2^k \qquad \textrm{for $N \geq 2, \,\,\, (\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell}$.} \end{equation} Moreover, by the definition of $a_{\mathcal{M}athbf{r}}$, the first relation above immediately implies $2^{k-\ell} \leq r_1^{-1}$; recall that $r_2, r_3\leq 2^{-k}$. Thus, by the definition of the Frenet frame $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^3$, the symbol satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{J=2 N ker 3} |\partial_s^N a_{\mathcal{M}athbf{r},k,\ell}(\xi;s)| \lesssim 2^{\ell N} = 2^{-(k-2\ell)N} 2^{(k - \ell)N} \qquad \textrm{for all $N \in \mathcal{M}athbb N_0$.} \end{equation} Thus, we may bound the kernel via repeated integration-by-parts. In particular, applying Lemma~\ref{non-stationary lem} with $\phi(s):=(t-t')\inn{\gammamma(s)}{\xi}$ and $R := 2^{k-2\ell}|t-t'|$, we deduce that \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell}](\xi; t, t')| \lesssim_N 2^{2(k - \ell)\iota} 2^{-\ell} \big(1 + 2^{k-2\ell}|t - t'| \big)^{-N}, \qquad \textrm{for $\iota \in \{0,1\}$}. \end{equation*} The additional $2^{2(k - \ell)}$ factor arises in the bound for the derived operator owing to the formula \eqref{N gen deriv} for the corresponding symbol (and in particular, due to the first bound in \eqref{J=2 N ker 2}, the bounds in \eqref{J=2 N ker 3} and the relation $0 \leq \ell < {\mathfrak {l}}oor{k/2}$) and the form of the kernel as described in \eqref{N gen ker}. Integrating both sides of the above display in either $t$ or $t'$, the desired estimate \eqref{J=2 N ker} follows. \end{proof} \noindentndent \textit{Putting everything together}. In view of the kernel estimates from Lemma~\ref{J=2 N ker lem} and the discussion at the beginning of the proof, it follows that \mathcal{M}athbf{e}gin{equation*} \| \mathcal{M}athcal{A}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^{3+1})} \lesssim 2^{(k-\ell)(\iota - 1/2)} \qquad \textrm{for all $0 \leq \ell \leq {\mathfrak {l}}oor{k/2}$ and $\iota \in \{0,1\}$.} \end{equation*} Combining these bounds with \eqref{gen Sobolev}, it follows that \mathcal{M}athbf{e}gin{align*} \| \mathcal{M}athcal{N}[a_{\mathcal{M}athbf{r},k,\ell}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^3)} &\lesssim 1 \quad \textrm{for all $0 \leq \ell \leq {\mathfrak {l}}oor{k/2}$} \end{align*} The frequency localised maximal bound \eqref{N freq loc est} immediately follows (with linear dependence on $k$) from the triangle inequality.\mathcal{M}edskip \noindentndent\underline{$J = 3$ case.} Suppose $\mathcal{M}athrm{supp}_{\xi}\, a_{\mathcal{M}athbf{r},k} \subseteq \Omega_3$. As in Lemma~\ref{theta2 lem}, if $\xi \in \Omega_3$, then the equation $\inn{\gammamma''(s)}{\xi} = 0$ has a unique solution in $[-1,1]$, which we denote by $\theta_2(\xi)$. As in Lemma~\ref{theta1 lem}, if $u(\xi) < 0$, where \mathcal{M}athbf{e}gin{equation*} u(\xi) := \inn{\gammamma'\circ \theta_2(\xi)}{\xi}, \end{equation*} then the equation $\inn{\gammamma'(s)}{\xi} = 0$ has a precisely two solutions in $[-1,1]$, which we denote by $\theta_1^{\pm}(\xi)$. We will further assume without loss of generality that $\inn{\gammamma^{(3)}(s)}{\xi} > 0$ for all $\xi \in \mathcal{M}athrm{supp}_{\xi}\, a_{\mathcal{M}athbf{r},k}$. \mathcal{M}edskip \noindentndent \textit{Further decomposition} Here the symbol $a_{\mathcal{M}athbf{r},k}$ is decomposed in a manner similar (but not quite identical) to that used in \S\ref{J=3 sec}. First perform a dyadic decomposition of $u(\xi)$ by writing \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r},k} = \sum_{\ell = 0}^{{\mathfrak {l}}oor{k/3}} a_{\mathcal{M}athbf{r},k,\ell} + \sum_{\ell = 0}^{{\mathfrak {l}}oor{k/3}-1} a_{\mathcal{M}athbf{r},k,\ell}^+ \end{equation*} where \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r},k,\ell}(\xi; s) := \left\{\mathcal{M}athbf{e}gin{array}{ll} \displaystyle a_{\mathcal{M}athbf{r},k}(\xi; s)\mathcal{M}athbf{e}ta^-\big(2^{-k+ 2\ell}u(\xi)\big) & \textrm{if $0 \leq \ell < {\mathfrak {l}}oor{k/3}$} \\[6pt] \displaystyle a_{\mathcal{M}athbf{r},k}(\xi; s)\eta\big(2^{-k + 2{\mathfrak {l}}oor{k/3}}u(\xi)\big) & \textrm{if $\ell = {\mathfrak {l}}oor{k/3}$} \end{array}\right. \end{equation*} and the $a_{\mathcal{M}athbf{r},k,\ell}^+$ are defined similarly but with $\mathcal{M}athbf{e}ta^+$ in place of $\mathcal{M}athbf{e}ta^-$. Here $\mathcal{M}athbf{e}ta = \mathcal{M}athbf{e}ta^- + \mathcal{M}athbf{e}ta^+$ is the decomposition of the bump function described in \S\ref{loc curv subsec}. The symbols $a_{\mathcal{M}athbf{r},k,\ell}^+$ are relatively easy to analyse, and are dealt with using an argument similar to that of the $J=2$ case. Henceforth, we focus exclusively on the $a_{\mathcal{M}athbf{r},k,\ell}$. We further decompose each $a_{\mathcal{M}athbf{r},k,\ell}$ with respect to the distance of the $s$-variable to the root $\theta_2(\xi)$. Once again it is convenient to introduce a fine tuning constant $\rho > 0$. Similar to \eqref{akell dec}, define \mathcal{M}athbf{e}gin{equation}\lambdabel{N J3 s loc a} a_{\mathcal{M}athbf{r},k,\ell, 0}(\xi; s) := a_{\mathcal{M}athbf{r},k,\ell}(\xi; s) \eta \big(\rho 2^{\ell} |s-\theta_2(\xi)|\big) \qquad \textrm{for $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$.} \end{equation} Note, in contrast with \eqref{akell dec}, we have not decomposed with respect to $|s - \theta_1^{\pm}(\xi)|$ for $\ell < {\mathfrak {l}}oor{k/3}$. Such a decomposition does appear later: here it is necessary to localise simultaneously with respect to \textit{both} roots $\theta_2(\xi)$ and $\theta_1^{\pm}(\xi)$. Also in contrast with the analysis of \S\ref{J=3 sec}, here it is not possible to reduce the problem to studying the $s$-localised pieces in \eqref{N J3 s loc a}. Consequently, we also consider the $s$-localisation of the symbol to the remaining dyadic shells, viz. \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r},k,\ell, m}(\xi;s):= a_{\mathcal{M}athbf{r},k,\ell}(\xi) \mathcal{M}athbf{e}ta \big(\rho 2^{\ell - m}|s-\theta_2(\xi)|\big) \qquad \textrm{for $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$.} \end{equation*} The most difficult terms to estimate correspond to $0 \leq \ell < {\mathfrak {l}}oor{k/3}$ and $m = 0$. These symbols require a further decomposition. In particular, for $0 \leq \ell < {\mathfrak {l}}oor{k/3}$ let \mathcal{M}athbf{e}gin{equation*} b_{\mathcal{M}athbf{r},k,\ell, m}(\xi;s) := \mathcal{M}athbf{e}gin{cases} a_{\mathcal{M}athbf{r},k,\ell, 0}(\xi;s) \eta\big(\rho^{-1} 2^{(k-\ell)/2}\displaystyle\mathcal{M}in_{\pm}|s- \theta_1^{\pm}(\xi)|\big) & \textrm{if $m = 0$} \\[5pt] a_{\mathcal{M}athbf{r},k,\ell, 0}(\xi;s) \mathcal{M}athbf{e}ta\big(\rho^{-1} 2^{(k-\ell)/2 - m }\displaystyle\mathcal{M}in_{\pm}|s- \theta_1^{\pm}(\xi)|\big) & \textrm{if $1 \leq m < {\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$} \\[5pt] a_{\mathcal{M}athbf{r},k,\ell, 0}(\xi;s) \big(1-\eta\big(\rho^{-1} 2^{(k-\ell)/2 - m }\displaystyle\mathcal{M}in_{\pm}|s- \theta_1^{\pm}(\xi)|\big)\big) & \textrm{if $m = {\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$} \end{cases}. \end{equation*} Observe that Lemma~\ref{root control lem} already implies that $|s - \theta_1^{\pm}(\xi)| \lesssim \rho^{-1} 2^{-\ell}$ for $(\xi; s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell, 0}$. Thus, $\rho 2^{-\ell} \lesssim |s-\theta_1^{\pm}(\xi)| \lesssim \rho^{-1} 2^{-\ell}$ for $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell,m}$ for $m={\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$. Combining the above definitions and observations, the symbol may be written as \mathcal{M}athbf{e}gin{equation*} a_{\mathcal{M}athbf{r},k} = \sum_{\ell = 0}^{{\mathfrak {l}}oor{k/3}} \sum_{m = 0}^{\ell} a_{\mathcal{M}athbf{r},k,\ell, m} = \sum_{(\ell,m)\in \Lambdambda_a(k)} a_{\mathcal{M}athbf{r},k,\ell, m} + \sum_{(\ell, m) \in \Lambdambda_b(k)} b_{\mathcal{M}athbf{r},k,\ell, m} \end{equation*} where \mathcal{M}athbf{e}gin{align*} \Lambdambda_a(k) &:= \big\{(\ell, m) \in \mathcal{M}athbb N_0^2 : 0 \leq \ell \leq {\mathfrak {l}}oor{\tfrac{k}{3}} \textrm{ and } 1 \leq m \leq \ell \big\} \cup \big\{\big({\mathfrak {l}}oor{\tfrac{k}{3}}, 0\big) \big\}, \\ \Lambdambda_b(k) &:= \big\{(\ell, m) \in \mathcal{M}athbb N_0^2 : 0 \leq \ell < {\mathfrak {l}}oor{\tfrac{k}{3}} \textrm{ and } 0 \leq m \leq {\mathfrak {l}}oor{\tfrac{k-3\ell}{2}} \big\}. \end{align*} Note that the range of $m$ in the definition of $\Lambdambda_a(k)$ is restricted since $a_{\mathcal{M}athbf{r},k,\ell,m}$ is identically zero whenever $m > \ell$.\mathcal{M}edskip \noindentndent \textit{Kernel estimates}. The kernels are analysed using stationary phase techniques. \mathcal{M}athbf{e}gin{lemma}\lambdabel{J=3 N ker lem} Let $k \in \mathcal{M}athbb N$ and $\iota \in \{0,1\}$. \mathcal{M}athbf{e}gin{enumerate}[a)] \item If $(\ell, m) \in \Lambdambda_a(k)$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N ker a} \sup_{t' \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell, m}](t,t';\xi)| \,\mathcal{M}athrm{d} t, \quad \sup_{t \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell, m}](t,t';\xi)| \,\mathcal{M}athrm{d} t' \lesssim 2^{(k-2\ell + 2m)(2\iota - 1)}. \end{equation} \item If $(\ell, m) \in \Lambdambda_b(k)$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N ker b} \sup_{t' \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} b_{\mathcal{M}athbf{r},k,\ell, m}](t,t';\xi)| \,\mathcal{M}athrm{d} t, \quad \sup_{t \in [1,2]} \int_1^2 |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} b_{\mathcal{M}athbf{r},k,\ell, m}](t,t';\xi)| \,\mathcal{M}athrm{d} t' \lesssim 2^{((k - \ell)/2 + m)(2\iota - 1)}. \end{equation} \end{enumerate} \end{lemma} \mathcal{M}athbf{e}gin{proof} The argument is similar to that used to prove Lemma~\ref{J=3 s loc lem}. \mathcal{M}edskip \noindentndent a) Let $(\ell, m) \in \Lambdambda_a(k)$. If $(\ell,m)=({\mathfrak {l}}oor{k/3},0)$, then the localisation of the $a_{\mathcal{M}athbf{r},k,\ell, m}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 1} |u(\xi)| \lesssim 2^{k/3} \quad \text{ and } \quad |s-\theta_2(\xi)| \lesssim \rho^{-1}2^{-k/3} \quad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r}, k,\ell, m}$.} \end{equation} The bound \eqref{J=3 N ker a} for $\iota=0$ follows immediately from the size of the $s$-support of $a_{\mathcal{M}athbf{r},k,\ell,m}$. For $\iota=1$, apply the familiar Taylor expansion to write \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 2} \mathcal{M}athbf{e}gin{split} \inn{\gammamma'(s)}{\xi} &= u(\xi) + \omega} \def\Om{\Omegaega_1(\xi; s) \, (s - \theta_2(\xi))^2, \\ \inn{\gammamma''(s)}{\xi} &= \omega} \def\Om{\Omegaega_2(\xi; s) \, (s - \theta_2(\xi)) \end{split} \end{equation} where $|\omega} \def\Om{\Omegaega_j(\xi;s)| \sigma} \def\Si{\Sigmam 2^k$ on $\mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r}, k, \ell, m}$ for $j=1$, $2$. Consequently, by directly applying \eqref{J=3 Nik 1}, we have the upper bounds \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 3} | \inn{\gammamma'(s)}{\xi}| \lesssim \rho^{-2} 2^{k/3}, \quad | \inn{\gammamma''(s)}{\xi}| \lesssim \rho^{-1} 2^{2k/3}, \qquad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell,m}$.} \end{equation} Note that the relations $r_2 \leq r_1 \leq r_2^{1/2}$ and $r_3 \leq r_2 \leq r_1^{1/2}r_3^{1/2}$ imply, in particular, $r_1 \leq r_3^{1/3} \lesssim 2^{-k/3}$ and $r_2 \leq r_3^{2/3} \lesssim 2^{-2k/3}$. It then follows from the definitions of $a_{\mathcal{M}athbf{r}}$ and of the Frenet frame $\{\mathcal{M}athbf{e}_j(r)\}_{j=1}^3$ that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 4} |\partial_s a_{\mathcal{M}athbf{r},k,\ell, m}(\xi;s)| \lesssim 2^{k/3}. \end{equation} In view of the definition of ${\mathfrak {d}}_s$ in \eqref{N gen deriv}, the first bound in \eqref{J=3 Nik 3} and \eqref{J=3 Nik 4} immediately imply that $|{\mathfrak {d}}_s a_{\mathcal{M}athbf{r}, k, \ell, m}(\xi;s)| \lesssim 2^{k/3}$, and the bound for $\iota=1$ now follows immediately from the size of the $s$-support of $a_{\mathcal{M}athbf{r},k,\ell,m}$ and the definition of $\mathcal{M}athcal{K}$ in \eqref{N gen univar}. Now suppose $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$ and $1 \leq m \leq \ell$. Then the localisation of the $a_{\mathcal{M}athbf{r},k,\ell, m}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N ker 1 a} |u(\xi)| \lesssim 2^{k - 2\ell} \quad \text{ and } \quad |s-\theta_2(\xi)| \sigma} \def\Si{\Sigmam \rho^{-1}2^{-\ell + m} \quad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, a_{\mathcal{M}athbf{r},k,\ell, m}$.} \end{equation} Provided $\rho$ is chosen sufficiently small, by directly applying \eqref{J=3 N ker 1 a} in \eqref{J=3 Nik 2}, we have the bounds \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N ker 2 a} | \inn{\gammamma'(s)}{\xi}| \sigma} \def\Si{\Sigmam \rho^{-2} 2^{k - 2\ell + 2m}, \quad | \inn{\gammamma''(s)}{\xi}| \sigma} \def\Si{\Sigmam \rho^{-1} 2^{k - \ell + m}, \quad | \inn{\gammamma^{(N)}(s)}{\xi}| \lesssim_N 2^k \qquad \textrm{for $N \geq 3$.} \end{equation} By the definition of $a_{\mathcal{M}athbf{r}}$, the first and second bounds above immediately imply $2^{k-2\ell+2m} \leq r_1^{-1}$ and $2^{k-\ell+m}\leq r_2^{-1}$, whilst $2^k \leq r_3^{-1}$. Thus, by the definition of the Frenet frame $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^3$ and the bounds \eqref{J=3 N ker 2 a}, the symbol satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N ker 3 a} |\partial_s^N a_{\mathcal{M}athbf{r}, k,\ell, m}(\xi;s)| \lesssim 2^{(\ell - m)N} = 2^{-(k-3\ell + 3m)N} 2^{(k - 2\ell + 2m)N} \qquad \textrm{for all $N \in \mathcal{M}athbb N_0$.} \end{equation} Thus, we may bound the kernel via repeated integration-by-parts. In particular, applying Lemma~\ref{non-stationary lem} with $\phi(s):=(t-t')\inn{\gammamma(s)}{\xi}$ and $R:=2^{k-3\ell+3m}|t-t'|$, we deduce that \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell,m}](\xi; t, t')| \lesssim_N 2^{2(k - 2\ell + 2m)\iota} 2^{-\ell + m} \big(1 + 2^{k-3\ell + 3m}|t - t'| \big)^{-N}. \end{equation*} The additional $2^{2(k - 2\ell + 2m)\iota}$ arises in the bound for the derived operator ${\mathfrak {d}}_s$ owing to the formula \eqref{N gen deriv} for the corresponding symbol (and in particular, due to the bounds in \eqref{J=3 N ker 2 a} and in \eqref{J=3 N ker 3 a} and the relation $0 \leq \ell - m \leq \ell \leq {\mathfrak {l}}oor{k/3}$) and the form of the kernel $\mathcal{M}athcal{K}$ as described in \eqref{N gen ker}. Finally, by integrating both sides of the above display in either $t$ or $t'$, the desired estimate \eqref{J=3 N ker a} follows.\mathcal{M}edskip \noindentndent b) Let $(\ell, m) \in \Lambdambda_b(k)$. If $m =0$, then the localisation of the $b_{\mathcal{M}athbf{r},k,\ell, m}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 1 b} |u(\xi)| \sigma} \def\Si{\Sigmam 2^{k-2\ell} \quad \text{ and } \quad \mathcal{M}in_{\pm}|s-\theta_1^{\pm}(\xi)| \lesssim \rho 2^{-(k-\ell)/2} \quad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell, m}$.} \end{equation} The bound \eqref{J=3 N ker b} for $\iota=0$ follows immediately from the size of the $s$-support of $b_{\mathcal{M}athbf{r},k,\ell,m}$. For $\iota=1$, apply the familiar Taylor expansion to write \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 2 b} \mathcal{M}athbf{e}gin{split} \inn{\gammamma'(s)}{\xi} &= v^{\pm}(\xi)\, (s - \theta_1^{\pm}(\xi)) + \omega} \def\Om{\Omegaega_1^{\pm}(\xi; s) \, (s - \theta_1^{\pm}(\xi))^2, \\ \inn{\gammamma''(s)}{\xi} &= v^{\pm}(\xi) + \omega} \def\Om{\Omegaega_2^{\pm}(\xi; s) \, (s - \theta_1^{\pm}(\xi)) \end{split} \end{equation} where $|\omega} \def\Om{\Omegaega_j^{\pm}(\xi;s)| \sigma} \def\Si{\Sigmam 2^k$ on $\mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r}, k, \ell, m}$ for $j=1$, $2$. Consequently, in view of Lemma~\ref{root control lem} and \eqref{J=3 Nik 1 b}, and provided $\rho>0$ is chosen sufficiently small, we have the bounds, \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 4 b} | \inn{\gammamma'(s)}{\xi}| \lesssim \rho 2^{(k - \ell)/2}, \quad | \inn{\gammamma''(s)}{\xi}| \sigma} \def\Si{\Sigmam 2^{k - \ell} \qquad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell,m}$,} \end{equation} using the relation $0 \leq \ell \leq {\mathfrak {l}}oor{k/3}$. By the definition of $a_{\mathcal{M}athbf{r}}$, the second bound above implies $r_2 \lesssim 2^{-(k-\ell)}$ and therefore $r_1 \leq r_2^{1/2} \lesssim 2^{-(k-\ell)/2}$, whilst $r_3 \lesssim 2^{-k}$. Thus, by the definition of the Frenet frame $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^3$ and the bounds \eqref{J=3 Nik 4 b}, the symbol satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 5 b} |\partial_s b_{\mathcal{M}athbf{r},k,\ell,m}(\xi;s)| \lesssim_N 2^{(k-\ell)/2}, \end{equation} using the relation $0 \leq \ell < {\mathfrak {l}}oor{k/3}$. In view of the definition of ${\mathfrak {d}}_s$ in \eqref{N gen deriv}, the first bound in \eqref{J=3 Nik 4 b} and \eqref{J=3 Nik 5 b} immediately implies that $|{\mathfrak {d}}_s b_{\mathcal{M}athbf{r},k,\ell,m}(\xi;s)|\lesssim 2^{(k-\ell)/2}$, and the bound for $\iota=1$ now follows immediately from the size of the $s$-support of $b_{\mathcal{M}athbf{r},k,\ell,m}$ and the definition of $\mathcal{M}athcal{K}$ in \eqref{N gen ker}. Now suppose $0 < m < {\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$. Then the localisation of the $b_{\mathcal{M}athbf{r}, k,\ell, m}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N 1 b} |u(\xi)| \sigma} \def\Si{\Sigmam 2^{k-2\ell} \quad \text{ and } \quad \mathcal{M}in_{\pm}|s-\theta_1^{\pm}(\xi)| \sigma} \def\Si{\Sigmam \rho 2^{-(k-\ell)/2 + m} \quad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell, m}$.} \end{equation} Using the convexity argument from the proof of Lemma~\ref{J=3 s loc lem}, we may bound \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nik 3 b} |\inn{\gammamma'(s)}{\xi}| \geq \mathcal{M}in_{\pm} {\mathfrak {r}}ac{|u(\xi)| |s-\theta_1^{\pm}(\xi)|} {|\theta_2(\xi)- \theta_1^{\pm}(\xi)|} \qquad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell, m}$.} \end{equation} Consequently, using Lemma~\ref{root control lem} and \eqref{J=3 N 1 b} in \eqref{J=3 Nik 2 b} and \eqref{J=3 Nik 3 b}, and provided $\rho>0$ is chosen sufficiently small, \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N 2 b} | \inn{\gammamma'(s)}{\xi}| \sigma} \def\Si{\Sigmam \rho 2^{(k - \ell)/2 + m}, \quad | \inn{\gammamma''(s)}{\xi}| \sigma} \def\Si{\Sigmam 2^{k - \ell} \quad \textrm{and} \quad |\inn{\gammamma^{(N)}(s)}{\xi}| \lesssim_N 2^k \qquad \textrm{for all $N \geq 3$.} \end{equation} For the upper bound in the first derivative in the above display, we use the restriction $m \leq {\mathfrak {l}}oor{\tfrac{k-3\ell}{2}}$. It is for this reason that we simultaneously localise with respect to \textit{both} $\theta_2(\xi)$ and $\theta_1^{\pm}(\xi)$. In particular, \mathcal{M}athbf{e}gin{equation*} |\inn{\gammamma^{(N)}(s)}{\xi}|\lesssim 2^k \sigma} \def\Si{\Sigmam 2^{k-((k-\ell)/2+m)N} |\inn{\gammamma'(s)}{\xi}|^N \lesssim 2^{-2m(N-1)} |\inn{\gammamma'(s)}{\xi}|^N \quad \text{ for all $N \geq 3$,} \end{equation*} where in the last inequality one uses the restriction $m \leq {\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$ and the fact $N \geq 3$. By the definition of $a_{\mathcal{M}athbf{r}}$, the first and second bounds in \eqref{J=3 N 2 b} imply $r_1 \leq 2^{-(k-\ell)/2 - m}$ and $r_2 \leq 2^{-(k-\ell)}$, whilst $r_3 \leq 2^{-k}$. Thus, by the definition of the Frenet frame $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^3$ and the bounds \eqref{J=3 N 2 b}, the symbol satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 N 3 b} |\partial_s^N b_{\mathcal{M}athbf{r},k,\ell, m}(\xi;s)| \lesssim 2^{((k-\ell)/2 - m)N} = 2^{-2mN} 2^{((k-\ell)/2 + m)N} \qquad \textrm{for all $N \in \mathcal{M}athbb N_0$,} \end{equation} using the restriction $m\leq {\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$. Thus, we may bound the kernel via repeated integration-by-parts. In particular, applying Lemma~\ref{non-stationary lem} with $\phi(s):=(t-t')\inn{\gammamma(s)}{\xi}$ and $R := 2^{2m}|t-t'|$, we deduce that \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} b_{\mathcal{M}athbf{r},k,\ell,m}](\xi; t, t')| \lesssim_N 2^{(k - \ell + 2m)\iota} 2^{-(k-\ell)/2 + m} \big(1 + 2^{2m}|t - t'| \big)^{-N}. \end{equation*} The additional $2^{(k - \ell + 2m)\iota}$ arises in the bound for the derived operator ${\mathfrak {d}}_s$ owing to the formula \eqref{N gen deriv} for the corresponding symbol (and in particular, due to the bounds in \eqref{J=3 N 2 b} and in \eqref{J=3 N 3 b}) and the form of the kernel $\mathcal{M}athcal{K}$ as described in \eqref{N gen ker}. Finally, by integrating both sides of the above display in either $t$ or $t'$, the desired estimate \eqref{J=3 N ker a} follows. Finally, consider the case $m={\mathfrak {l}}oor{{\mathfrak {r}}ac{k-3\ell}{2}}$. Then the localisation of the $b_{\mathcal{M}athbf{r},k,\ell, m}$ symbols ensures that \mathcal{M}athbf{e}gin{equation}\lambdabel{J=3 Nikodym 1 b} |u(\xi)| \sigma} \def\Si{\Sigmam_{\rho} 2^{k-2\ell} \quad \text{ and } \quad \mathcal{M}in_{\pm}|s-\theta_1^{\pm}(\xi)| \sigma} \def\Si{\Sigmam_{\rho} 2^{-\ell} \quad \textrm{for all $(\xi;s) \in \mathcal{M}athrm{supp}\, b_{\mathcal{M}athbf{r},k,\ell, m}$.} \end{equation} Using Lemma~\ref{root control lem} and \eqref{J=3 Nikodym 1 b} in \eqref{J=3 Nik 2 b} and \eqref{J=3 Nik 3 b}, we have the bounds \mathcal{M}athbf{e}gin{equation*} | \inn{\gammamma'(s)}{\xi}| \sigma} \def\Si{\Sigmam 2^{k - 2\ell}, \quad | \inn{\gammamma''(s)}{\xi}| \lesssim 2^{k - \ell}, \quad \textrm{and} \quad |\inn{\gammamma^{(N)}(s)}{\xi}| \lesssim_N 2^k \qquad \textrm{for all $N \geq 3$.} \end{equation*} By the definition of $a_{\mathcal{M}athbf{r}}$, the first bound above implies $r_1 \lesssim_{\rho} 2^{-(k-2\ell)}$ and, as $r_3 \lesssim 2^{-k}$, one has $r_2 \leq r_1^{1/2}r_3^{1/2} \lesssim_{\rho} 2^{k-\ell}$. Thus, by the definition of the Frenet frame $\{\mathcal{M}athbf{e}_j(s)\}_{j=1}^3$ and the bounds \eqref{J=3 N 2 b}, the symbol satisfies \mathcal{M}athbf{e}gin{equation*} |\partial_s^N b_{\mathcal{M}athbf{r},k,\ell, m}(\xi;s)| \lesssim 2^{\ell N} = 2^{-(k-3\ell)N} 2^{(k-2\ell)N} \qquad \textrm{for all $N \in \mathcal{M}athbb N_0$.} \end{equation*} Thus, we may bound the kernel via repeated integration-by-parts. In particular, applying Lemma~\ref{non-stationary lem} with $\phi(s):=(t-t')\inn{\gammamma(s)}{\xi}$ and $R := 2^{k-3\ell}|t-t'|$, we deduce that \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K}[\mathcal{M}athfrak{d}_s^{\iota} b_{\mathcal{M}athbf{r},k,\ell,m}](\xi; t, t')| \lesssim_N 2^{2(k - 2\ell)\iota} 2^{-\ell} \big(1 + 2^{k-3\ell}|t - t'| \big)^{-N}. \end{equation*} The additional $2^{2(k - 2\ell)\iota}$ arises in the bound for the derived operator ${\mathfrak {d}}_s$ owing to the formula \eqref{N gen deriv} for the corresponding symbol (and in particular, due to the bounds in \eqref{J=3 N 2 b} and in \eqref{J=3 N 3 b} and the restriction $ \ell \leq {\mathfrak {l}}oor{k/3}$) and the form of the kernel $\mathcal{M}athcal{K}$ as described in \eqref{N gen ker}. Finally, by integrating both sides of the above display in either $t$ or $t'$, the desired estimate \eqref{J=3 N ker a} follows \mathcal{M}edskip \end{proof} \noindentndent \textit{Putting everything together}. In view of the kernel estimates from Lemma~\ref{J=3 N ker lem} and the discussion at the beginning of the proof, it follows that \mathcal{M}athbf{e}gin{align*} \| \mathcal{M}athcal{A}[\mathcal{M}athfrak{d}_s^{\iota} a_{\mathcal{M}athbf{r},k,\ell,m}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^{3+1})} &\lesssim 2^{(k-2\ell + 2m)(\iota - 1/2)} \quad \textrm{for all $(\ell, m) \in \Lambdambda_a(k)$}, \\ \| \mathcal{M}athcal{A}[\mathcal{M}athfrak{d}_s^{\iota} b_{\mathcal{M}athbf{r},k,\ell,m}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^{3+1})} &\lesssim 2^{((k - \ell)/2 + m)(\iota - 1/2)} \quad \textrm{for all $(\ell, m) \in \Lambdambda_b (k)$}, \end{align*} for $\iota \in \{0,1\}$. Combining these bounds with \eqref{gen Sobolev}, it follows that \mathcal{M}athbf{e}gin{align*} \| \mathcal{M}athcal{N}[a_{\mathcal{M}athbf{r},k,\ell,m}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^3)} &\lesssim 1 \quad \textrm{for all $(\ell, m) \in \Lambdambda_a(k)$}, \\\ \| \mathcal{M}athcal{N}[b_{\mathcal{M}athbf{r},k,\ell,m}]g \|_{L^2(\mathcal{M}athbb{R}^4) \to L^2(\mathcal{M}athbb{R}^3)} &\lesssim 1 \quad \textrm{for all $(\ell, m) \in \Lambdambda_b (k)$}. \end{align*} Since the cardinalities of $\Lambdambda_a(k)$ and $\Lambdambda_b(k)$ are $O(k^2)$, the frequency localised maximal bound \eqref{N freq loc est} immediately follows from the triangle inequality. Summing over $k$ then concludes the proof of the proposition. \end{proof} \section{Necessary conditions}\lambdabel{nec cond sec} In this final section we show the condition $p > 3$ in Theorem~\ref{intro max thm} is necessary. Moreover, we prove the following result, which is valid in arbitrary dimensions $n \geq 2$. \mathcal{M}athbf{e}gin{proposition} If $n \geq 2$ and $\gammamma \colon I \to \mathcal{M}athbb{R}^n$ is a smooth non-degenerate curve, then \mathcal{M}athbf{e}gin{equation*} \|M_{\gammamma}\|_{L^p(\mathcal{M}athbb{R}^n) \to L^p(\mathcal{M}athbb{R}^n)} = \infty \qquad \textrm{for $1 \leq p \leq n$.} \end{equation*} \end{proposition} \mathcal{M}athbf{e}gin{proof} By localisation of the operator and applying the rescaling from \S\ref{curve sym sec}, it suffices to consider the case where \mathcal{M}athbf{e}gin{equation*} \gammamma(\,\cdot\,) - \gammamma(0) \in \mathcal{M}athfrak{G}_n(\delta_0) \qquad \textrm{and} \qquad \inn{\gammamma(0)}{\vec{e}_n} \neq 0 \end{equation*} for $\delta_0 := 10^{-n}$, say. By reparametrising the curve, we may also assume that the first component of $\gammamma \colon [-1,1] \to \mathcal{M}athbb{R}^n$ is of the form $\gammamma_1(s) = s + a_1$ for some $a_1 \in \mathcal{M}athbb{R}$. By a simple projection argument, it suffices to study the boundedness of a maximal operator defined over the Euclidean plane. In particular, fix $a = (a_1, a_2) \in \mathcal{M}athbb{R}^2$ with $a_2 \neq 0$ and a smooth function $h \colon [-1,1] \to \mathcal{M}athbb{R}$ satisfying \mathcal{M}athbf{e}gin{equation}\lambdabel{nec prop 1} h^{(j)}(0) = 0 \quad \textrm{for $0 \leq j \leq n-1$} \quad \textrm{and} \quad h^{(n)}(0) \neq 0. \end{equation} Define the maximal operator \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{M}_h f(x) = \sup_{t > 0} \begin{itemize}g| \int_{\mathcal{M}athbb{R}} f\big(x_1 - t(s + a_1), x_2 - t(h(s) + a_2) \big)\chi(s)\mathcal{M}athrm{d} s\begin{itemize}g| \end{equation*} where $\chi \in C^{\infty}_c(\mathcal{M}athbb{R})$ is non-negative, satisfies $\chi(s) = 1$ for $|s| \leq 1/2$ and has support contained in the interior of $[-1,1]$. To prove the proposition, it suffices to show \mathcal{M}athbf{e}gin{equation}\lambdabel{nec prop 2} \|\mathcal{M}athcal{M}_h\|_{L^p(\mathcal{M}athbb{R}^2) \to L^p(\mathcal{M}athbb{R}^2)} = \infty \qquad \textrm{for $1 \leq p \leq n$.} \end{equation} Furthermore, since the maximal operator is trivially bounded on $L^{\infty}$ it suffices to consider the case $p=n$ only. By Taylor expansion and \eqref{nec prop 1}, we have \mathcal{M}athbf{e}gin{equation*} |h(s)| \leq D_h \cdot |s|^n \qquad \textrm{for $|s| \leq 1$} \quad \textrm{where} \quad D_h := {\mathfrak {r}}ac{1}{n!} \sup_{|s| \leq 1} |h^{(n)}(s)|. \end{equation*} For $0 < r < 1$ let $f_r := {\mathbbm 1}_{K(r)}$ denote the indicator function of the set \mathcal{M}athbf{e}gin{equation*} K(r) := \big\{ y = (y_1, y_2) \in \mathcal{M}athbb{R}^2 : |y_1 - a_1| \leq r \textrm{ and } |y_2 - a_2| \leq D_h \cdot r^n \big\} \end{equation*} and observe that \mathcal{M}athbf{e}gin{equation}\lambdabel{nec prop 3} \|f_r\|_{L^n(\mathcal{M}athbb{R}^2)} \sigma} \def\Si{\Sigmam_h r^{(n+1)/n}. \end{equation} Now let $\delta^n \leq \lambdambda \leq 1$ be a dyadic number and suppose $x \in E_{\lambdambda}(r)$ where \mathcal{M}athbf{e}gin{equation*} E_{\lambdambda}(r) := \begin{itemize}g\{x = (x_1, x_2) \in \mathcal{M}athbb{R}^2 : \begin{itemize}g|x_1 - {\mathfrak {r}}ac{a_1}{a_2} \, x_2\begin{itemize}g| \leq {\mathfrak {r}}ac{r}{2} \textrm{ and } \lambdambda \leq {\mathfrak {r}}ac{x_2}{a_2} - 1 < 2\lambdambda \begin{itemize}g\}. \end{equation*} If we define $t_x := a_2^{-1}x_2 - 1 \in [\lambdambda,2\lambdambda]$, then for any $s \in \mathcal{M}athbb{R}$ satisfying $|s| \leq {\mathfrak {r}}ac{1}{2} \cdot \lambdambda^{(n-1)/n} r$ we have \mathcal{M}athbf{e}gin{align*} |x_1 - t_x(t_x^{-1}s + a_1) - a_1| &\leq \begin{itemize}g|x_1 - {\mathfrak {r}}ac{a_1}{a_2} \, x_2\begin{itemize}g| + |s| \leq r, \\ |x_2 - t_x(h(t_x^{-1} s) + a_2) - a_2| &= |t_x||h(t_x^{-1} s)| \leq D_h \cdot \lambdambda^{-(n-1)}|s|^n \leq D_h \cdot r^n. \end{align*} From these observations, we conclude that \mathcal{M}athbf{e}gin{equation*} \textrm{if $x \in E_{\lambdambda}(r)$ and $|s| \leq {\mathfrak {r}}ac{1}{2} \, \lambdambda^{(n-1)/n} r$, then } \big(x_1 - t_x(t_x^{-1}s + a_1), x_2 - t_x(h(t_x^{-1}s) + a_2)\big) \in K(r). \end{equation*} Performing a change of variable in the underlying averaging operator, we deduce that \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{M}_h f_r(x) \gtrsim \lambdambda^{-1/n} r \qquad \textrm{for all $x \in E_{\lambdambda}(r)$,} \end{equation*} where here we pick up an extra factor of $\lambdambda^{-1}$ owing to the Jacobian. Consequently, \mathcal{M}athbf{e}gin{equation}\lambdabel{nec prop 4} \|\mathcal{M}athcal{M}_h f_r\|_{L^n(\mathcal{M}athbb{R}^2)} \gtrsim \begin{itemize}g(\sum_{\substack{\lambdambda \,:\, \mathcal{M}athrm{dyadic}\ \\ r^n \leq \lambdambda \leq 1}} \lambdambda^{-1}r^n |E_{\lambdambda}(r)|\begin{itemize}g)^{1/n} \sigma} \def\Si{\Sigmam_a |\log r|^{1/n} r^{(n + 1)/n}. \end{equation} Comparing \eqref{nec prop 3} and \eqref{nec prop 4}, we see that the ratio of $\|\mathcal{M}athcal{M}_h f_r\|_{L^n(\mathcal{M}athbb{R}^2)}$ and $\|f_r\|_{L^n(\mathcal{M}athbb{R}^2)}$ is unbounded in $r$ and therefore \eqref{nec prop 2} holds for $p=n$, as desired. \end{proof} \appendix \section{An abstract broad/narrow decomposition}\lambdabel{BG appendix} Here we provide an abstract version of the broad/narrow decomposition in Lemma \ref{broad narrow lemma E}. For the sake of self-containedness of this appendix, we recall some of the definitions introduced in \S\ref{multilinear subsec}. Let ${\mathfrak {I}}$ denote the collection of all dyadic subintervals of $[-1,1]$ and for any dyadic number $0 < r \leq 1$ let ${\mathfrak {I}}(r)$ denote the subset of ${\mathfrak {I}}$ consisting of all intervals of length $r$. Let $\mathcal{M}athfrak{I}_{\geq r}$ denote the union of the $\mathcal{M}athfrak{I}(\lambdambda)$ over all dyadic $\lambdambda$ satisfying $r \leq \lambdambda \leq 1$. Given any pair of dyadic scales $0 < \lambdambda_1 \leq \lambdambda_2 \leq 1$ and $J \in {\mathfrak {I}}(\lambdambda_2)$, let ${\mathfrak {I}}(J;\,\lambdambda_1)$ denote the collection of all $I \in {\mathfrak {I}}(\lambdambda_1)$ which satisfy $I \subseteq J$. For $d \in \mathcal{M}athbb N$ and each dyadic number $0 < \lambdambda \leq 1$ let ${\mathfrak {I}}^d_{\mathcal{M}athrm{sep}}(\lambdambda)$ denote the collection of $d$-tuples of intervals $\vec{I} = (I_1, \dots, I_d) \in {\mathfrak {I}}(\lambdambda)^d$ which satisfy the separation condition \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athrm{dist}(I_1, \dots, I_d) := \mathcal{M}in_{1 \leq \ell_1 < \ell_2 \leq d} \mathcal{M}athrm{dist}(I_{\ell_1}, I_{\ell_2}) \geq \lambdambda. \end{equation*} Given dyadic scales $0 < \lambdambda_1 \leq \lambdambda_2 \leq 1$ and $J \in {\mathfrak {I}}(\lambdambda_2)$, let ${\mathfrak {I}}^d_{\mathcal{M}athrm{sep}}(J;\,\lambdambda_1)$ denote the collection of all $d$-tuples of intervals $\vec{I} = (I_1, \dots, I_d) \in {\mathfrak {I}}^d_{\mathcal{M}athrm{sep}}(\lambdambda_1)$ which satisfy $I_\ell \subseteq J$ for all $1 \leq \ell \leq d$. The dyadic decomposition from \eqref{rsq dyadic dec} is one instance of an `abstract' notion of dyadic decomposition, introduced in the following definition. \mathcal{M}athbf{e}gin{definition} Let $(X,\mathcal{M}u)$ be a measure space and $F \colon X \to \mathcal{M}athbb{C}$ a measurable function and $0 < r \leq 1$. A sequence $(F_I)_{I \in {\mathfrak {I}}_{\geq r}}$ of measurable functions $F_I \colon X \to \mathcal{M}athbb{C}$ is said to be a \textit{dyadic decomposition of $F$ up to scale $r$} if it satisfies \mathcal{M}athbf{e}gin{equation*} F_{[0,1]} = F \qquad \textrm{and} \qquad F_{J} = \sum_{I \in {\mathfrak {I}}(J; \lambdambda_1)} F_I \qquad \textrm{for all $J \in {\mathfrak {I}}(\lambdambda_2)$} \end{equation*} whenever $0 < r \leq \lambdambda_1 \leq \lambdambda_2 \leq 1$ are dyadic. Here the identities are understood to hold $\mathcal{M}u$ almost everywhere. \end{definition} The broad/narrow decomposition result from which Lemma \ref{broad narrow lemma E} follows is the following. \mathcal{M}athbf{e}gin{lemma}\lambdabel{broad narrow lemma} Let $(X,\mathcal{M}u)$ be a measure space, $k \in \mathcal{M}athbb N$ with $k \geq 2$ and $\varepsilon > 0$. For all $r > 0$ there exist dyadic numbers $r_{\mathcal{M}athrm{n}}$ and $r_{\mathcal{M}athrm{b}}$ satisfying \mathcal{M}athbf{e}gin{equation}\lambdabel{broad narrow equation 1} r < r_{\mathcal{M}athrm{n}} \lesssim_{\varepsilon, k} r, \qquad r < r_{\mathcal{M}athrm{b}} \leq 1 \end{equation} such that the following holds. If $F \in L^p(X)$ for some $1 \leq p < \infty$ and $(F_I)_{I \in {\mathfrak {I}}}$ is a dyadic decomposition of $F$ up to scale $r$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{broad narrow equation 2} \|F\|_{L^p(X)} \lesssim_{\varepsilon, k} r^{-\varepsilon} \begin{itemize}g( \sum_{I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p} + r^{-\varepsilon} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(Cr_{\mathcal{M}athrm{b}}) \\ \vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J;\,r_{\mathcal{M}athrm{b}})} }\big\|\prod_{j=1}^{k}|F_{I_{j}}|^{1/k}\big\|_{L^p(X)}^{p} \begin{itemize}g)^{1/p}, \end{equation} where $C = C_{\varepsilon, k} \geq 1$ is a dyadic number depending only on $\varepsilon$ and $k$. \end{lemma} The intervals $I \in {\mathfrak {I}}(r_{\mathcal{M}athrm{n}})$ are referred to as \textit{narrow intervals} whilst the $k$-tuples of intervals $\vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(I;\,r_{\mathcal{M}athrm{b}})$ are referred to as \textit{broad interval tuples}. The key ingredient in the proof of Lemma~\ref{broad narrow lemma} is a 1-parameter variant of the Bourgain--Guth decomposition from \cite{Bourgain2011} due to Ham--Lee \cite{Ham2014}. \mathcal{M}athbf{e}gin{lemma}[Ham--Lee \cite{Ham2014}]\lambdabel{Ham--Lee lemma} Let $1 \leq p < \infty$ and $k \in \mathcal{M}athbb N$ with $k \geq 2$. Suppose $0 <\ell_1, \dots, \ell_{k-1} \leq 1$ are dyadic numbers such that \mathcal{M}athbf{e}gin{equation*} 1 =: \ell_0 \geq \ell_1 \geq \dots \geq \ell_{k-1} > 0. \end{equation*} If $(X,\mathcal{M}u)$ is a measure space and $(F_I)_{I \in {\mathfrak {I}}}$ is a dyadic decomposition of $F \in L^p(X)$, then for any $\ell>0$, \mathcal{M}athbf{e}gin{align*} \begin{itemize}g(\sum_{J \in {\mathfrak {I}}(\ell)} \|F_J\|_{L^p(X)}^p\begin{itemize}g)^{1/p} &\leq 4 \sum_{i=1}^{k-1} \ell_{i-1}^{-2(i-1)} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(\ell_i \ell)}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p} \\ & \quad + \ell_{k-1}^{-2(k-1)} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(\ell) \\ \vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J;\ell_{k-1} \ell)}} \big\|\prod_{j=1}^{k}|F_{I_j}|^{1/k}\big\|_{L^p(X)}^p\begin{itemize}g)^{1/p}. \end{align*} \end{lemma} Rather than working in the relatively abstract setting of dyadic decompositions of measurable functions, Ham--Lee \cite[ Lemma 2.8]{Ham2014} apply the decomposition only in the concrete setting of Fourier extension operators associated to space curves. However, the proof is elementary, relying little on the exact form of the extension operator, and can easily be adapted to yield Lemma~\ref{Ham--Lee lemma}. For completeness, the details are presented at the end of the section. Lemma~\ref{broad narrow lemma} is deduced by applying Lemma~\ref{Ham--Lee lemma} iteratively, for appropriately chosen dyadic scales $\ell_1, \dots, \ell_{k-1}$. \mathcal{M}athbf{e}gin{proof}[Proof of Lemma~\ref{broad narrow lemma}] Fix $\varepsilon > 0$ and $k \in \mathcal{M}athbb N$ with $N \geq 2$. Define the dyadic scales $1 \geq \ell_1 \geq \cdots \geq \ell_{k-1} > 0$ recursively so as to satisfy \mathcal{M}athbf{e}gin{equation*} {\mathfrak {r}}ac{\log \big(4 \vee (k-1)\big)}{\log \ell_1^{-1}} \leq {\mathfrak {r}}ac{\varepsilon}{6}, \qquad {\mathfrak {r}}ac{\log \ell_{j-1}^{-2(j-1)}}{\log \ell_j^{-1}} \leq {\mathfrak {r}}ac{\varepsilon}{6} \quad \textrm{for $2 \leq j \leq k-1$.} \end{equation*} Now fix $r > 0$, $F \in L^p(X)$ and $(F_I)_{I \in {\mathfrak {I}}}$ a dyadic decomposition of $F$. If $r \gtrsim_{\varepsilon, k} 1$, then the desired result immediately follows from the triangle inequality and so $r$ may be assumed to be smaller than some small constant $c_{\varepsilon, k}$, depending only on $\varepsilon$ and $k$ and chosen for the purposes of the forthcoming argument; in particular we can assume $\ell_{k-1} > r$. Let $\mathcal{M}athcal{W}$ denote the set of all finite words formed from the alphabet $\{1, \dots, k-1\}$. Given any $w \in \mathcal{M}athcal{W}$ and $1 \leq j \leq k-1$ write $[w]_j$ for the number of occurrences of $j$ in $w$ and $|w| := [w]_1 + \cdots + [w]_{k-1}$ for the length of the word. Let $\ell^{w} := \prod_{j=1}^{k-1} \ell_j^{[w]_j}$ for any $w \in \mathcal{M}athcal{W}$ and define \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}(r) := \big\{ \alpha \in \mathcal{M}athcal{W} : r < \ell^{\alpha} \leq r/\ell_{k-1} \big\}, \quad \mathcal{M}athcal{B}(r) := \{ \mathcal{M}athbf{e}ta \in \mathcal{M}athcal{W} : \ell^{\mathcal{M}athbf{e}ta} > r/\ell_{k-1} \}. \end{equation*} Finally, for each $N \in \mathcal{M}athbb N_0$ define \mathcal{M}athbf{e}gin{gather*} \mathcal{M}athcal{A}_{\leq N}(r) := \big\{ \alpha \in \mathcal{M}athcal{A}(r): |\alpha| \leq N \big\}, \quad \mathcal{M}athcal{B}_{\leq N}(r) := \{ \mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}(r) : |\mathcal{M}athbf{e}ta| \leq N \}, \\ \mathcal{M}athcal{A}_N(r) := \{ \alpha \in \mathcal{M}athcal{A}(r) : |\alpha| = N \}, \qquad \mathcal{M}athcal{B}_N(r) := \{ \mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}(r) : |\mathcal{M}athbf{e}ta| = N \}. \end{gather*} An iterative application of Lemma~\ref{Ham--Lee lemma} yields the following key claim. \mathcal{M}athbf{e}gin{claim} For all $N \in \mathcal{M}athbb N_0$, \mathcal{M}athbf{e}gin{align}\lambdabel{iteration claim} \|F\|_{L^p(X)} &\leq \sum_{\alpha \in \mathcal{M}athcal{A}_{\leq N}(r) \cup \mathcal{M}athcal{B}_N(r)} M_{\varepsilon, k}^{\alpha} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(\ell^{\alpha})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p}\\ \nonumber & + \ell_{k-1}^{-2(k-1)}\sum_{\mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}_{\leq N-1}(r)} M_{\varepsilon, k}^{\mathcal{M}athbf{e}ta} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(\ell^{\mathcal{M}athbf{e}ta}) \\\vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J;\ell_{k-1} \ell^{\mathcal{M}athbf{e}ta}) }}\big\|\prod_{j=1}^{k}|F_{I_j}|^{1/k}\big\|_{L^p(X)}^p\begin{itemize}g)^{1/p}. \end{align} where $M_{\varepsilon, k}^{\alpha} := 4^{|\alpha|} \prod_{j=1}^{k-1} \ell_{j-1}^{-2(j-1)[\alpha]_j}$. \end{claim} \mathcal{M}athbf{e}gin{proof}[Proof (of Claim)] The proof is by induction on $N$. The case $N = 0$ is vacuous and thus one may assume, by way of induction hypothesis, that \eqref{iteration claim} holds for some $N \geq 0$. It remains to establish the inductive step. Consider the terms on the right-hand side of \eqref{iteration claim} of the form \mathcal{M}athbf{e}gin{equation*} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(\ell^{\mathcal{M}athbf{e}ta})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p} \qquad \textrm{for $\mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}_N(r)$.} \end{equation*} Applying Lemma~\ref{Ham--Lee lemma} to each of these terms, \mathcal{M}athbf{e}gin{align*} \|F\|_{L^p(X)} &\leq \sum_{\alpha \in \mathcal{M}athcal{A}_{\leq N}(r)} M_{\varepsilon, k}^{\alpha} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(\ell^{\alpha})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p} \\ & + \sum_{\mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}_{N}(r)} M_{\varepsilon, k}^{\mathcal{M}athbf{e}ta} 4\sum_{i=1}^{k-1} \ell_{i-1}^{-2(i-1)} \begin{itemize}g(\sum_{I \in {\mathfrak {I}}(\ell_i \ell^{\mathcal{M}athbf{e}ta})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p}\\ & + \ell_{k-1}^{-2(k-1)}\sum_{\mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}_{\leq N}(r)} M_{\varepsilon, k}^{\mathcal{M}athbf{e}ta} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(\ell^{\mathcal{M}athbf{e}ta}) \\ \vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J;\ell_{k-1} \ell^{\mathcal{M}athbf{e}ta})} }\big\|\prod_{j=1}^{k}|F_{I_j}|^{1/k}\big\|_{L^p(X)}^p \begin{itemize}g)^{1/p}. \end{align*} From the definitions, \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}_{\leq N+1}(r) \cup \mathcal{M}athcal{B}_{N+1}(r) = \mathcal{M}athcal{A}_{\leq N}(r) \cup \mathcal{M}athcal{A}_{N+1}(r) \cup \mathcal{M}athcal{B}_{N+1}(r), \end{equation*} where the union is disjoint. Furthermore, the set $\mathcal{M}athcal{A}_{N+1}(r) \cup \mathcal{M}athcal{B}_{N+1}(r)$ precisely corresponds to the set of words obtained by adding a single letter to one of the words in $\mathcal{M}athcal{B}_{ N}(r)$. Combining these observations, the induction readily closes. \end{proof} Using the claim, the proof of Lemma~\ref{broad narrow lemma} quickly follows from the choice of scales $\ell_j$. Indeed, first observe that for $N := \mathcal{M}ax_{\alpha \in \mathcal{M}athcal{A}(r)} |\alpha|$ it follows that $\mathcal{M}athcal{B}_N(r) = \emptyset$ and thus \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{A}_{\leq N}(r) \cup \mathcal{M}athcal{B}_N(r) = \mathcal{M}athcal{A}(r) \qquad \textrm{and} \qquad \mathcal{M}athcal{B}_{\leq N-1}(r) = \mathcal{M}athcal{B}(r). \end{equation*} Note that each $w \in \mathcal{M}athcal{A}(r) \cup \mathcal{M}athcal{B}(r)$ satisfies $(\ell^{w})^{-1} < r^{-1}$ and therefore \mathcal{M}athbf{e}gin{equation}\lambdabel{how many steps?} |w| \log \ell_1^{-1} \leq \sum_{j=1}^{k-1} [w]_j \log \ell_j^{-1} \leq \log r^{-1}. \end{equation} By the choice of $\ell_1$, it follows that \mathcal{M}athbf{e}gin{equation}\lambdabel{4 to w} 4^{|w|} \leq 4^{\log r^{-1}/\log \ell_1^{-1}} = r^{-\log 4/\log \ell_1^{-1}} \leq r^{-\varepsilon/6}, \end{equation} whilst, similarly, \mathcal{M}athbf{e}gin{equation*} \# \mathcal{M}athcal{A}(r) \cup \mathcal{M}athcal{B}(r) \leq \#\{w \in \mathcal{M}athcal{W} : |w| \leq \log r^{-1}/\log \ell_1^{-1}\} \leq r^{-\log(k-1)/\log \ell_1^{-1}} \leq r^{-\varepsilon/6}. \end{equation*} On the other hand, as a further consequence of \eqref{how many steps?} and the choice of scales $\ell_j$, if $w \in \mathcal{M}athcal{A}(r) \cup \mathcal{M}athcal{B}(r)$, then \mathcal{M}athbf{e}gin{equation}\lambdabel{prod of ell's} \log \prod_{j=1}^{k-1} \ell_{j-1}^{-2(j-1)[w]_j} = \sum_{j=1}^{k-1} [w]_j\log \ell_j^{-1} {\mathfrak {r}}ac{\log \ell_{j-1}^{-2(j-1)}}{\log \ell_j^{-1}} \leq \log r^{-\varepsilon/6}. \end{equation} The estimates \eqref{4 to w} and \eqref{prod of ell's} imply that \mathcal{M}athbf{e}gin{equation*} M_{\varepsilon,k}^\alpha= 4^{|\alpha|}\prod_{j=1}^{k-1} \ell_{j-1}^{-2(j-1)[w]_j} \leq r^{-\varepsilon/3}, \end{equation*} where $M_{\varepsilon,k}^\alpha$ are the constants appearing in the above claim. Combining these observations with \eqref{iteration claim} for the choice of $N$ as above, \mathcal{M}athbf{e}gin{align*} \|F\|_{L^p(X)} &\leq r^{-\varepsilon/3} \sum_{\alpha \in \mathcal{M}athcal{A}(r)} \begin{itemize}g( \sum_{ I \in {\mathfrak {I}}(\ell^{\alpha})}\|F_I\|_{L^p(X)}^p\begin{itemize}g)^{1/p}\\ & \quad + r^{-\varepsilon/3}\ell_{k-1}^{-2(k-1)} \sum_{\mathcal{M}athbf{e}ta \in \mathcal{M}athcal{B}(r)} \begin{itemize}g(\sum_{\substack{J \in {\mathfrak {I}}(\ell^{\mathcal{M}athbf{e}ta}) \\ \vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J; \ell_{k-1}\ell^{\mathcal{M}athbf{e}ta}) }}\big\|\prod_{j=1}^{k}|F_{I_j}|^{1/k}\big\|_{L^p(X)}^p\begin{itemize}g)^{1/p}. \end{align*} Finally, since $\ell_{k-1}^{-1} \lesssim_{\varepsilon, k} 1$ and $\# \mathcal{M}athcal{A}(r), \# \mathcal{M}athcal{B}(r), \leq r^{-\varepsilon/6}$, by pigeonholing there exists some $\alpha_{\mathcal{M}athrm{n}} \in \mathcal{M}athcal{A}(r)$ and $\mathcal{M}athbf{e}ta_{\mathcal{M}athrm{b}} \in \mathcal{M}athcal{B}(r)$ such that, if $r_{\mathcal{M}athrm{n}} := \ell^{\alpha_{\mathcal{M}athrm{n}}}$ and $r_{\mathcal{M}athrm{b}} := \ell_{k-1} \ell^{\mathcal{M}athbf{e}ta_{\mathcal{M}athrm{b}}}$ and $C_{\varepsilon,k} := \ell_{k-1}^{-1}$, then the desired inequality \eqref{broad narrow equation 2} holds. It is easy to see that these parameters also satisfy \eqref{broad narrow equation 1} directly from and the relevant definitions. \end{proof} To close this section, the proof of Lemma~\ref{Ham--Lee lemma} is presented, following the argument in \cite{Ham2014}. \mathcal{M}athbf{e}gin{proof}[Proof of Lemma~\ref{Ham--Lee lemma}] For notational convenience, given $m \in \mathcal{M}athbb N$ and $J \in {\mathfrak {I}}(\ell)$ define \mathcal{M}athbf{e}gin{equation*} \pi^m_J(F)(x) := \mathcal{M}ax_{\vec{I}^{k-1} \in {\mathfrak {I}}_{\mathcal{M}athrm{sep}}^m(J;\ell_{k-1}\ell) } \prod_{j=1}^m |F_{I^{m-1}_j}(x)|^{1/m}. \end{equation*} When $m = 1$ this reduces to $\pi^m_J(F)(x) = |F_J(x)|$. The main step in the proof of Lemma~\ref{Ham--Lee lemma} is the following pointwise estimate. \mathcal{M}athbf{e}gin{claim} For all $m \in \mathcal{M}athbb N$ and $J \in {\mathfrak {I}}(\ell)$, the pointwise estimate \mathcal{M}athbf{e}gin{equation*} \pi^m_J(F)(x) \leq 4 \mathcal{M}ax_{\substack{I^m \in {\mathfrak {I}}(J;\ell_m\ell)}} |F_{I^m}(x)| + \ell_m^{-2} \, \pi^{m+1}_J(F)(x) \end{equation*} holds for $\mathcal{M}u$-almost all $x \in X$. \end{claim} \mathcal{M}athbf{e}gin{proof} Fix $x \in X$ and $\vec{I}^{m-1} = (I^{m-1}_1,\dots,I^{m-1}_m) \in {\mathfrak {I}}^m(\ell_{m-1}\ell)$ with $I^{m-1}_j \subset J$ for $1 \leq j \leq m$. For each $j$ there exists an interval $I^{m,*}_j \in {\mathfrak {I}}(\ell_m \ell)$ satisfying \mathcal{M}athbf{e}gin{equation*} I^{m,*}_j \subset I^{m-1}_j \quad \textrm{and} \quad |F_{I^{m,*}_j}(x)| = \mathcal{M}ax_{\substack{I^{m}_j \in {\mathfrak {I}}(I_j^{m-1};\ell_m\ell)}} |F_{I^{m}_j}(x)|. \end{equation*} There are two cases to consider:\\ \noindentndent \textbf{Narrow case:} Either one of the following two conditions hold: \mathcal{M}athbf{e}gin{enumerate}[i)] \item For all $1 \leq j \leq m$, if $I^{m}_j \in {\mathfrak {I}}(\ell_m\ell)$ satisfies $I^m_j \subset I^{m-1}_j$ and $\mathcal{M}athrm{dist}(I^m_j, I^{m,*}_j) \geq \ell_m \ell$, then \mathcal{M}athbf{e}gin{equation*} |F_{I^m_j}(x)| \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m}}{\ell_{m-1}}\begin{itemize}g)|F_{I^{m,*}_j}(x)|. \end{equation*} \item The selected interval $I_j^{m,*} \in {\mathfrak {I}} (\ell_m \ell)$ above satisfies \mathcal{M}athbf{e}gin{equation*} \mathcal{M}in_{1 \leq j \leq m} |F_{I^{m,*}_j}(x)| \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m}}{\ell_{m-1}}\begin{itemize}g)^m\mathcal{M}ax_{1 \leq j \leq m} |F_{I^{m,*}_j}(x)|. \end{equation*} \end{enumerate} \noindentndent \textbf{Broad case:} The conditions of the narrow case fail.\\ \subsubsection*{The narrow case} If condition i) of the narrow case holds, then \mathcal{M}athbf{e}gin{equation*} |F_{I^{m-1}_j}(x)| \leq 3 |F_{I^{m, *}_j}(x)| + \sum_{\substack{ I^m_j \in {\mathfrak {I}}(\ell_m\ell),\, I^m_j \subset I^{m-1}_j \\ \mathcal{M}athrm{dist}(I^m_j, I^{m,*}_j) \geq \ell_m\ell }}|F_{I^{m}_j}(x)| \leq 4 |F_{I_j^{m,*}}(x)|, \end{equation*} since there are at most $\ell_{m-1}/\ell_m$ intervals $I^m_j \in {\mathfrak {I}}(\ell_m\ell)$ contained in $I^{m-1}_j$. Thus, in this case, \mathcal{M}athbf{e}gin{equation}\lambdabel{Ham--Lee 1} \prod_{j=1}^m |F_{I^{m-1}_j}(x)|^{1/m} \leq 4 \mathcal{M}ax_{I^m \in {\mathfrak {I}}(J;\ell_m\ell) } |F_{I^{m}}(x)|. \end{equation} Now suppose that condition ii) of the narrow case holds. Thus, \mathcal{M}athbf{e}gin{equation*} \prod_{j=1}^m |F_{I^{m-1}_j}(x)|^{1/m} \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_{m}}\begin{itemize}g) \prod_{j=1}^m |F_{I^{m,*}_j}(x)|^{1/m} \leq \mathcal{M}ax_{1 \leq j \leq m} |F_{I^{m,*}_j}(x)| \end{equation*} where the first inequality follows since there are at most $\ell_{m-1}/\ell_m$ intervals $I_j^m \in \mathcal{M}athfrak{I}(\ell_m \ell)$ contained in $I_j^{m-1}$. Once again, \eqref{Ham--Lee 1} holds (in fact, it holds with a constant 1 rather 4). Hence, a favourable estimate holds in the narrow case. \subsubsection*{The broad case} Suppose the broad case holds. By definition, condition i) from the narrow fails. Consequently, there exists some $1 \leq j_0 \leq m$ and an interval $I_{j_0}^{m,**} \in {\mathfrak {I}}(\ell_m\ell)$ satisfying \mathcal{M}athbf{e}gin{equation*} I_{j_0}^{m,**} \subseteq I_{j_0}^{m-1}, \qquad \mathcal{M}athrm{dist} (I_j^{m,**}, I_j^{m,*}) \geq \ell_m \ell \qquad \textrm{and} \qquad |F_{I_{j_0}^{m,*}}(x)| \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_m}\begin{itemize}g)|F_{I_{j_0}^{m,**}}(x)|. \end{equation*} On the other hand, condition ii) from the narrow case also fails and, consequently, \mathcal{M}athbf{e}gin{equation*} \mathcal{M}ax_{1 \leq j \leq m} |F_{I^{m,*}_j}(x)| \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_m}\begin{itemize}g)^m |F_{I^{m,*}_{j_0}}(x)| \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_m}\begin{itemize}g)^{m+1}|F_{I_{j_0}^{m,**}}(x)|. \end{equation*} Thus, for each $1 \leq j \leq m$, it follows that \mathcal{M}athbf{e}gin{equation*} |F_{I^{m,*}_j}(x)|^{1/m} \leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_m}\begin{itemize}g)^{1/m}|F_{I^{m,*}_j}(x)|^{1/(m+1)}|F_{I_{j_0}^{m,**}}(x)|^{1/m(m+1)}. \end{equation*} Finally, taking the product of the above estimate over all $j$, one deduces that \mathcal{M}athbf{e}gin{align*} \prod_{j=1}^m |F_{I^{m-1}_j}(x)|^{1/m} &\leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_{m}}\begin{itemize}g) \prod_{j=1}^m |F_{I^{m,*}_j}(x)|^{1/m} \\ &\leq \begin{itemize}g({\mathfrak {r}}ac{\ell_{m-1}}{\ell_{m}}\begin{itemize}g)^2 \begin{itemize}g(\prod_{j=1}^m |F_{I^{m,*}_j}(x)|^{1/(m+1)}\begin{itemize}g) |F_{I_{j_0}^{m,**}}(x)|^{1/(m+1)} \\ &\leq \ell_m^{-2} \, \pi^{m+1}_J(F)(x), \end{align*} where in the last inequality we use the separation condition. Hence, in the broad case a favourable estimate also holds. \end{proof} By repeated application of the claim and the relation $\ell_1 \geq \cdots \geq \ell_{k-1}$, \mathcal{M}athbf{e}gin{equation*} |F_J(x)| \leq 4\sum_{m=1}^{k-1} \ell_{m-1}^{-2(m-1)} \mathcal{M}ax_{I^m \in {\mathfrak {I}}(J;\ell_m\ell)} |F_{I^m}(x)| + \ell_{k-1}^{-2(k-1)} \pi^{k}_J(F)(x) \end{equation*} for $\mathcal{M}u$-almost every $x \in X$. Bounding all the maxima in the above display by the corresponding $\ell^p$ expressions and integrating over $x \in X$, one deduces that \mathcal{M}athbf{e}gin{align*} \|F_J\|_{L^p(X)} &\leq 4 \sum_{m=1}^{k-1} \ell_{m-1}^{-2(m-1)} \begin{itemize}g(\sum_{I^m \in {\mathfrak {I}}(J;\ell_m\ell)} \|F_{I^m}\|_{L^p(X)}^p\begin{itemize}g)^{1/p} \\ & \qquad + \ell_{k-1}^{-2(k-1)} \begin{itemize}g(\sum_{\vec{I} \in {\mathfrak {I}}^k_{\mathcal{M}athrm{sep}}(J;\ell_{k-1}\ell)} \big\|\prod_{j=1}^k |F_{I_j}|^{1/k}\big\|_{L^p(X)}^p\begin{itemize}g)^{1/p} \end{align*} Finally, taking a $\ell^p$ sum over $J$ of both sides of the above inequality and applying the triangle inequality concludes the proof. \end{proof} \section{A pointwise square function inequality}\lambdabel{RdF appendix} Here we provide the simple proof of Lemma~\ref{RdF lem}, which is a slight extension of an argument due to Rubio de Francia \cite{RdF1983}. Given $G \colon \mathcal{M}athbb{Z}^m \to \mathcal{M}athbb{R}^n$ define \mathcal{M}athbf{e}gin{equation*} \vvvert G \vvvert := \sup_{\nu_2 \in \mathcal{M}athbb{Z}^m} \sum_{\nu_1 \in \mathcal{M}athbb{Z}^m} e^{- |G(\nu_1) - G(\nu_2)|/2}. \end{equation*} By rescaling and a simple limiting argument, Lemma~\ref{RdF lem} is a consequence of the following pointwise bound. \mathcal{M}athbf{e}gin{lemma} Let $\psi \in \mathcal{M}athscr{S}(\widehat{\mathcal{M}athbb{R}}^n)$ and $G \colon \mathcal{M}athbb{Z}^m \to \mathcal{M}athbb{R}^n$. For all $M$, $N \in \mathcal{M}athbb N$ the pointwise inequality \mathcal{M}athbf{e}gin{equation*} \sum_{\nu \in \mathcal{M}athbb{Z}^m\cap[-M,M]^m} \big|\psi\big(D - G(\nu)\big)f(x)\big|^2 \lesssim_{\psi, N} \vvvert G \vvvert \int_{\mathcal{M}athbb{R}^n} |f(x - y)|^2 (1 + |y|)^{-N}\,\mathcal{M}athrm{d} y \end{equation*} holds for all $f \in \mathcal{M}athscr{S}(\mathcal{M}athbb{R}^n)$, with an implied constant independent of $M$. \end{lemma} \mathcal{M}athbf{e}gin{proof} Let $a = (a_{\nu})_{\nu \in \mathcal{M}athbb{Z}^m}$ be a sequence supported in $\mathcal{M}athbb{Z}^m \cap [-M, M]^m$ satisfying $\|a\|_{\ell^2} = 1$. Consider the function \mathcal{M}athbf{e}gin{equation*} \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} \psi\big(D - G(\nu)\big)f(x) = \mathcal{M}athcal{K} \ast f(x) \end{equation*} where the kernel $\mathcal{M}athcal{K}$ is given by \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{K}(x) := {\mathfrak {r}}ac{1}{(2\pi)^n} \int_{\widehat{\mathcal{M}athbb{R}}^n}e^{i\inn{x}{\xi}} \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} \psi\big(\xi - G(\nu)\big)\,\mathcal{M}athrm{d} \xi =\begin{itemize}g[ \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} e^{i \inn{x}{G(\nu)}} \begin{itemize}g]\widecheck{\psi}(x). \end{equation*} By duality, it suffices to show \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K} \ast f(x)|^2 \leq \vvvert G \vvvert \int_{\mathcal{M}athbb{R}^n} |f(x - y)|^2 (1 + |y|)^{-N}\,\mathcal{M}athrm{d} y. \end{equation*} Applying the Cauchy--Schwarz inequality, \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{K} \ast f(x)|^2 \leq \int_{\mathcal{M}athbb{R}^n} \begin{itemize}g| \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} e^{i \inn{y}{G(\nu)}} \begin{itemize}g|^2 |\widecheck{\psi}(y)|\,\mathcal{M}athrm{d} y \int_{\mathcal{M}athbb{R}^n} |f(x-y)|^2 |\widecheck{\psi}(y)|\,\mathcal{M}athrm{d} y \end{equation*} and so, in view of the rapid decay of $\widecheck{\psi}$, the problem is further reduced to showing \mathcal{M}athbf{e}gin{equation*} \int_{\mathcal{M}athbb{R}^n} \begin{itemize}g| \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} e^{i \inn{y}{G(\nu)}} \begin{itemize}g|^2 |\widecheck{\psi}(y)|\,\mathcal{M}athrm{d} y \lesssim \vvvert G \vvvert. \end{equation*} Since $\psi \in \mathcal{M}athscr{S}(\widehat{\mathcal{M}athbb{R}}^n)$ we have $|\widecheck\psi (y)|\lesssim \phi(y)$ where $\phi(z):= (1+z^2)^{-n-1}$. Consider $\phi(z)$ for $|\operatorname{Im\,}(z)|\le 1/2$ and observe that, by contour integration, $|\widehat{\phi}(\xi) |\lesssim e^{-|\xi|/2} $ for $\xi\in \widehat \mathcal{M}athbb{R}^n$. Hence \mathcal{M}athbf{e}gin{align*} \int_{\mathcal{M}athbb{R}^n} \begin{itemize}g| \sum_{\nu \in \mathcal{M}athbb{Z}^m} a_{\nu} e^{i \inn{y}{G(\nu)}} \begin{itemize}g|^2 |\widecheck{\psi}(y)|\,\mathcal{M}athrm{d} y &\lesssim \sum_{\nu_1, \nu_2 \in \mathcal{M}athbb{Z}^m} \overline{a_{\nu_1}}a_{\nu_2} \widehat{\phi}\big(G(\nu_1) - G(\nu_2)\big) \\ &\lesssim_{\psi} \sum_{\nu_1, \nu_2 \in \mathcal{M}athbb{Z}^m} |a_{\nu_1}||a_{\nu_2}| e^{-|G(\nu_1)-G(\nu_2)|/2}. \end{align*} The right-hand side of the above inequality is then bounded by $\vvvert G \vvvert$ via the Cauchy--Schwarz inequality and the Schur test, as $\| a \|_{\ell^2}=1$. \end{proof} \section{Derivative bounds for implicitly defined functions} Let $\Omega$, $I \subseteq \mathcal{M}athbb{R}$ be open intervals and $G \colon \Omega \times I \to \mathcal{M}athbb{C}$ a $C^{\infty}$ mapping. Suppose $\partial_y G(x,y)$ is non-vanishing on $\Omega \times I$ and $y \colon \Omega \to I$ is a $C^{\infty}$ mapping such that \mathcal{M}athbf{e}gin{equation*} G(x,y(x)) = 0 \qquad \textrm{for all $x \in \Omega$.} \end{equation*} \mathcal{M}athbf{e}gin{lemma}\lambdabel{imp deriv lem} Let $G \colon \Omega \times I \to \mathcal{M}athbb{C}$ and $y \colon \Omega \to I$ be as above and suppose $A, M_1$, $M_2 > 0$ are constants such that \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq} \left\{\mathcal{M}athbf{e}gin{array}{rcl} \big|(\partial_y G)(x,y(x))\big| &\geq& AM_2, \\[5pt] \big|(\partial_x^{\alpha_1} \partial_y^{\alpha_2}G)(x,y(x))\big| &\lesssim_{\alpha} & AM_1^{\alpha_1} M_2^{\alpha_2} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2\setminus\{0\}$.} \end{array} \right. \end{equation} Then the function $y$ satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{der bounds implicit function} |y^{(j)}(x)| \lesssim_j M_1^j M_2^{-1} \qquad \textrm{for all $j \in \mathcal{M}athbb N$.} \end{equation} Consequently, for all $C^{\infty}$ functions $H \colon \Omega \times I \to \mathcal{M}athbb{C}$ for which there exists some constant $B > 0$ such that \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv hyp} |(\partial_x^{\alpha_1}\partial_y^{\alpha_2}H)(x,y(x))| \lesssim_N B M_1^{\alpha_1} M_2^{\alpha_2} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2 \setminus \{0\}$,} \end{equation} one has \mathcal{M}athbf{e}gin{equation}\lambdabel{Faa di Bruno eq 2} \begin{itemize}g|{\mathfrak {r}}ac{\mathcal{M}athrm{d}^N}{\mathcal{M}athrm{d} x^N} H(x, y(x))\begin{itemize}g| \lesssim_N B M_1^N \qquad \textrm{for all $N \in \mathcal{M}athbb N$.} \end{equation} \end{lemma} Before giving the proof of Lemma~\ref{imp deriv lem}, we make some preliminary observations. A simple induction argument shows that there exists a sequence of coefficients $(C_{\alpha, d})_{d \in \mathcal{M}athbb N_0^j}$, depending only on $j$ and $\alpha$, such that for all $C^{\infty}$ functions $H \colon \Omega \times I \to \mathcal{M}athbb{C}$ the identity \mathcal{M}athbf{e}gin{equation}\lambdabel{Faa di Bruno eq} {\mathfrak {r}}ac{\mathcal{M}athrm{d}^j}{\mathcal{M}athrm{d} x^j} H(x, y(x)) = \sum_{\substack{\alpha \in \mathcal{M}athbb N_0^2 \setminus\{0\} \\ \alpha_1, \alpha_2 \leq j}} (\partial_{x}^{\alpha_1} \partial_y^{\alpha_2}H)(x,y(x)) \sum_{\substack{ d_1 + \cdots + j d_j = j - \alpha_1 \\ d_1 + \cdots + d_j = \alpha_2}} C_{\alpha, d} \prod_{i=1}^j y^{(i)}(x)^{d_i} \end{equation} holds. The precise values of the $C_{\alpha,d}$ are given by the multivariate Fa\`a di Bruno formula: see \cite[Theorem 4.2]{LP}. Similarly, for $1 \leq k \leq |\alpha|$ there exists a sequence of coefficients $(C_{k, e})_{e \in \mathcal{M}athcal{E}(\alpha,k)}$, depending only on $\alpha$, such that \mathcal{M}athbf{e}gin{equation}\lambdabel{Faa di Bruno ratio} \partial_x^{\alpha_1} \partial_y^{\alpha_2}\big[(\partial_yG)(x,y)^{-1}\big] = \sum_{k = 1}^{|\alpha|} (\partial_yG)(x,y)^{-k-1} \sum_{e \in \mathcal{M}athcal{E}(\alpha,k)} C_{k,e} \prod_{\mathcal{M}athbf{e}ta \preceq \alpha} (\partial_x^{\mathcal{M}athbf{e}ta_1} \partial_y^{\mathcal{M}athbf{e}ta_2 + 1}G)(x,y)^{e_{\mathcal{M}athbf{e}ta}} \end{equation} where \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{E}(\alpha,k) := \begin{itemize}g\{ e = (e_{\mathcal{M}athbf{e}ta})_{\mathcal{M}athbf{e}ta \preceq \alpha} : e_{\mathcal{M}athbf{e}ta} \in \mathcal{M}athbb N_0 \textrm{ for all $\mathcal{M}athbf{e}ta \preceq \alpha$ and } \sum_{\mathcal{M}athbf{e}ta \preceq \alpha} \mathcal{M}athbf{e}ta_{\ell} \cdot e_{\mathcal{M}athbf{e}ta} = \alpha_{\ell} \textrm{ for $\ell = 1, 2$, } \sum_{\mathcal{M}athbf{e}ta \preceq \alpha} e_{\mathcal{M}athbf{e}ta} = k \begin{itemize}g\} \end{equation*} and the notation $\mathcal{M}athbf{e}ta \preceq \alpha$ refers to those $\mathcal{M}athbf{e}ta \in \mathcal{M}athbb N_0^2 \setminus \{0\}$ which satisfy $\mathcal{M}athbf{e}ta_{\ell} \leq \alpha_{\ell}$ for $\ell = 1,2$. Once again, the precise values of the $C_{k,e}$ are given by the multivariate Fa\`a di Bruno formula. Both identities \eqref{Faa di Bruno eq} and \eqref{Faa di Bruno ratio} play a r\^ole in the proof of Lemma~\ref{imp deriv lem}. \mathcal{M}athbf{e}gin{proof} By scaling, it suffices to show the case $A=1$. The proof of \eqref{der bounds implicit function} proceeds by (strong) induction on $j$. By implicit differentiation, \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq 1} y'(x) = Q(x, y(x)) \quad \textrm{where} \quad Q(x,y) := - (\partial_xG)(x,y) \cdot (\partial_yG)(x,y)^{-1} \quad \textrm{for $(x,y) \in \Omega \times I$.} \end{equation} Thus, the $j=1$ case is an immediate consequence of this identity together with the hypothesised bounds \eqref{imp deriv eq}. Now let $j \geq 1$ and suppose $|y^{(i)}(x)| \lesssim_i M_1^i M_2^{-1}$ holds for all $1 \leq i \leq j$. To bound the higher order derivative $y^{(j+1)}$ we make use of the differential identity \eqref{Faa di Bruno eq}, taking $H := Q$. In particular, \eqref{Faa di Bruno eq} together with \eqref{imp deriv eq 1} directly imply that \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq 5} y^{(j+1)}(x) = \sum_{\substack{\alpha \in \mathcal{M}athbb N_0^2 \setminus\{0\} \\ \alpha_1, \alpha_2 \leq j}} (\partial_{x}^{\alpha_1} \partial_y^{\alpha_2} Q)(x,y(x)) \sum_{\substack{ d_1 + \cdots + j d_j = j - \alpha_1 \\ d_1 + \cdots + d_j = \alpha_2}} C_{\alpha, d} \prod_{i=1}^j y^{(i)}(x)^{d_i}. \end{equation} The bound \eqref{der bounds implicit function} is now reduced to showing \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq 4} |(\partial_x^{\alpha_1} \partial_y^{\alpha_2}Q)(x,y(x))| \lesssim_{\alpha} M_1M_2^{-1}M_1^{\alpha_1}M_2^{\alpha_2}. \end{equation} Indeed, once \eqref{imp deriv eq 4} is established, one may use this inequality to bound the derivatives of $Q$ appearing on the right-hand side of \eqref{imp deriv eq 5} and the induction hypothesis to bound the $y^{(i)}(x)$ terms. Consequently, one deduces that \mathcal{M}athbf{e}gin{equation*} |y^{(j+1)}(x)| \lesssim_j M_1^{j + 1}M_2^{- 1}. \end{equation*} This closes the induction and completes the proof of \eqref{der bounds implicit function}. Turning to the proof of \eqref{imp deriv eq 4}, note that \eqref{Faa di Bruno ratio} and the hypothesised bounds \eqref{imp deriv eq} imply \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq 2} \big|\partial_x^{\alpha_1} \partial_y^{\alpha_2}\big[(\partial_yG)(x,y)^{-1}\big]|_{y = y(x)}\big| \lesssim_{\alpha} M_2^{-1}M_1^{\alpha_1}M_2^{\alpha_2} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2 \setminus \{0\}$.} \end{equation} On the other hand, \eqref{imp deriv eq} immediately implies that \mathcal{M}athbf{e}gin{equation}\lambdabel{imp deriv eq 3} \big|\partial_x^{\alpha_1} \partial_y^{\alpha_2}(\partial_x G)(x,y)|_{y = y(x)}\big| \lesssim_{\alpha} M_1M_1^{\alpha_1}M_2^{\alpha_2} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2 \setminus \{0\}$.} \end{equation} Combining \eqref{imp deriv eq 2} and \eqref{imp deriv eq 3} with the Leibniz rule one obtains \eqref{imp deriv eq 4}. The bound \eqref{Faa di Bruno eq 2} is a simple consequence of \eqref{der bounds implicit function} and \eqref{imp deriv hyp} via the formula \eqref{Faa di Bruno eq}. \end{proof} Lemma~\ref{imp deriv lem} immediately implies the following multivariate extension. Let $\Omega \subseteq \mathcal{M}athbb{R}^n$ be an open set, $I \subseteq \mathcal{M}athbb{R}$ an open interval and $G \colon \Omega \times I \to \mathcal{M}athbb{C}$ a $C^{\infty}$ mapping, for some $N \in \mathcal{M}athbb N$. Suppose $\partial_y G(\bm{x}, y)$ is non-vanishing on $\Omega \times I$ and $y \colon \Omega \to I$ is a $C^{\infty}$ mapping such that \mathcal{M}athbf{e}gin{equation*} G(\bm{x},y(\bm{x})) = 0 \qquad \textrm{for all $\bm{x} \in \Omega$.} \end{equation*} For $\mathcal{M}athbf{e} \in S^{n-1}$ let $\nabla_{\mathcal{M}athbf{e}}$ denote the directional derivative operator with respect to $\bm{x}$ in the direction of $\mathcal{M}athbf{e}$. Suppose $A$, $M_1$, $M_2 > 0$ are constants such that \mathcal{M}athbf{e}gin{equation}\lambdabel{multi imp deriv} \left\{\mathcal{M}athbf{e}gin{array}{rcl} \big|(\partial_y G)(\bm{x},y(\bm{x}))\big| &\geq& A M_2, \\[5pt] \big|(\nabla_{\mathcal{M}athbf{e}}^{\alpha_1}\partial_y^{\alpha_2}G)(\bm{x},y(\bm{x}))\big| &\lesssim_N& A M_1^{\alpha_1} M_2^{\alpha_2} \end{array} \right. \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2\setminus\{0\}$ and all $\bm{x} \in \Omega$.} \end{equation} Then the function $y$ satisfies \mathcal{M}athbf{e}gin{equation}\lambdabel{multi imp der bound} |\nabla_{\mathcal{M}athbf{e}}^{N} y(\bm{x})| \lesssim_N M_1^N M_2^{-1} \qquad \textrm{for all $\bm{x} \in \Omega$ and all $N \in \mathcal{M}athbb N_0$.} \end{equation} Similarly, \eqref{Faa di Bruno eq 2} has a multivariate extension. In particular, suppose, in addition to the above, that $H \colon \Omega \times I \to \mathcal{M}athbb{C}$ a $C^{\infty}$ mapping and $B > 0$ is a constant such that \mathcal{M}athbf{e}gin{equation}\lambdabel{multi imp deriv hyp} \big|(\nabla_{\mathcal{M}athbf{e}}^{\alpha_1}\partial_y^{\alpha_2}H)(\bm{x},y(\bm{x}))\big| \lesssim_N B M_1^{\alpha_1} M_2^{\alpha_2} \qquad \textrm{for all $\alpha \in \mathcal{M}athbb N_0^2\setminus\{0\}$.} \end{equation} Then it follows from \eqref{Faa di Bruno eq 2} that \mathcal{M}athbf{e}gin{equation}\lambdabel{multi Faa di Bruno eq 2} \big|\nabla_{\mathcal{M}athbf{e}}^N H(\bm{x}, y(\bm{x}))\big| \lesssim_N B M_1^N \qquad \textrm{for all $\bm{x} \in \Omega$ and all $N \in \mathcal{M}athbb N$.} \end{equation} For the purposes of this paper, we are interested in the special case where $G$, $H \colon \mathcal{M}athbb{R}^n \times I \to \mathcal{M}athbb{R}$ are both linear in the $\bm{x}$ variable. Thus, $G$ and $H$ are of the form \mathcal{M}athbf{e}gin{equation*} G(\bm{x},y) = \inn{g(y)}{\bm{x}}, \quad H(\bm{x},y) = \inn{h(y)}{\bm{x}}, \end{equation*} for some $C^{\infty}$ functions $g$, $h \colon I \to \mathcal{M}athbb{R}^n$. Furthermore, the conditions in \eqref{multi imp deriv} can be written as \mathcal{M}athbf{e}gin{equation}\lambdabel{multi imp deriv 2} \left\{\mathcal{M}athbf{e}gin{array}{rcl} |\inn{g'\circ y(\bm{x})}{\bm{x}}| &\geq& A M_2, \\[2pt] |\inn{g^{(N)}\circ y(\bm{x})}{\bm{x}}| &\lesssim_N& A M_2^{N} \\[2pt] |\inn{g^{(N)}\circ y(\bm{x})}{\mathcal{M}athbf{e}}| &\lesssim_N& A M_1 M_2^{N} \end{array} \right. \qquad\textrm{for all $N \in \mathcal{M}athbb N$ and all $\bm{x} \in \Omega$} \end{equation} and the condition in \eqref{multi imp deriv hyp} can be written as \mathcal{M}athbf{e}gin{equation}\lambdabel{multi imp deriv 3} \left\{\mathcal{M}athbf{e}gin{array}{rcl} |\inn{h^{(N)}\circ y(\bm{x})}{\bm{x}}| &\lesssim_N& B M_2^{N} \\[2pt] |\inn{h^{(N)}\circ y(\bm{x})}{\mathcal{M}athbf{e}}| &\lesssim_N& B M_1 M_2^{N} \end{array} \right. \qquad\textrm{for all $N \in \mathcal{M}athbb N$ and all $\bm{x} \in \Omega$.} \end{equation} \mathcal{M}athbf{e}gin{example}[Application to Lemma~\ref{J=3 ker lem}]\lambdabel{deriv ex} Let $\gammamma \in \mathcal{M}athfrak{G}_3(\delta_0)$, and $\theta_2: \widehat{\mathcal{M}athbb{R}}^3 \mathcal{M}athbf{a}ckslash\{0\} \to I_0$ satisfying \mathcal{M}athbf{e}gin{equation*} \inn{\gammamma'' \circ \theta_2 (\xi)}{\xi}=0. \end{equation*} We apply the previous result with $g=\gammamma''$ and $h=\gammamma'$. If $B \leq A$ the conditions \eqref{multi imp deriv 2} and \eqref{multi imp deriv 3} read succinctly as \mathcal{M}athbf{e}gin{equation*} \left\{\mathcal{M}athbf{e}gin{array}{rcl} |\inn{\gammamma^{(3)}\circ \theta_2(\xi)}{\xi}| &\geq& A M_2, \\[2pt] |\inn{\gammamma^{(1+N)}\circ \theta_2(\xi)}{\xi}| &\lesssim_N& B M_2^{N} \\[2pt] |\inn{\gammamma^{(1+N)}\circ \theta_2(\xi)}{\mathcal{M}athbf{e}}| &\lesssim_N& B M_1 M_2^{N} \end{array} \right. \qquad\textrm{for all $N \in \mathcal{M}athbb N$ and all $\xi \in \Omega \subset \mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3 \mathcal{M}athbf{a}ckslash \{0\}$}, \end{equation*} which imply \mathcal{M}athbf{e}gin{equation*} |\nabla_{\bm{e}}^N \theta_2(\xi)| \lesssim_N M_1^N M_2^{-1} \quad \text{ and } \quad |\nabla_{\bm{e}}^N \inn{\gammamma' \circ \theta_2(\xi)}{\xi}| \lesssim_N BM_1^N. \end{equation*} for all $N \in \mathcal{M}athbb N$ and all $\xi \in \Omega \subset \mathcal{M}athcal{H}at{\mathcal{M}athbb{R}}^3 \mathcal{M}athbf{a}ckslash \{0\}$. The application with respect to $\theta_1^\pm: \widehat{\mathcal{M}athbb{R}}^3 \mathcal{M}athbf{a}ckslash \{0\} \to I_0$ satisfying \mathcal{M}athbf{e}gin{equation*} \inn{\gammamma' \circ \theta_1^\pm (\xi)}{\xi}=0 \end{equation*} is similar, with $g=\gammamma'$ (we do not require to take an auxiliary $h$ in this case). \end{example} \section{Integration-by-parts} For $a \in C^{\infty}_c(\mathcal{M}athbb{R})$ supported in an interval $I \subset \mathcal{M}athbb{R}$ and $\phi \in C^{\infty}(I)$, define the oscillatory integral \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{I}[\phi, a] := \int_{\mathcal{M}athbb{R}} e^{i \phi(s)} a(s)\,\mathcal{M}athrm{d} s. \end{equation*} The following lemma is a standard application of integration-by-parts. \mathcal{M}athbf{e}gin{lemma}[Non-stationary phase]\lambdabel{non-stationary lem} Let $R \geq 1$ and $\phi, a$ be as above. Suppose that for each $j \in \mathcal{M}athbb N_0$ there exist constants $C_j \geq 1$ such that the following conditions hold on the support of $a$: \mathcal{M}athbf{e}gin{enumerate}[i)] \item $|\phi'(s)| > 0$, \item $|\phi^{(j)}(s)| \leq C_j R^{-(j-1)}|\phi'(s)|^j\,\,$ for all $j \geq 2$, \item $|a^{(j)}(s)| \leq C_j R^{-j}|\phi'(s)|^j\,\,$ for all $j \geq 0$. \end{enumerate} Then for all $N \in \mathcal{M}athbb N_0$ there exists some constant $C(N)$ such that \mathcal{M}athbf{e}gin{equation*} |\mathcal{M}athcal{I}[\phi, a]| \leq C(N) \cdot |\mathcal{M}athrm{supp}\, a| \cdot R^{-N}. \end{equation*} Moreover, $C(N)$ depends on $C_1, \dots, C_N$ but is otherwise independent of $\phi$ and $a$ and, in particular, does not depend on $r$. \end{lemma} \mathcal{M}athbf{e}gin{proof} Taking $D := \phi'(s)^{-1} \partial_s$, repeated integration-by-parts implies that \mathcal{M}athbf{e}gin{equation*} \mathcal{M}athcal{I}[\phi, a] = (-i)^{-N}\int_{\mathcal{M}athbb{R}} e^{i\phi(s)} (D^*)^Na(s)\,\mathcal{M}athrm{d} s \end{equation*} where $D^*$ is the `adjoint' differential operator $D^* \colon a \mathcal{M}apsto -\partial_s \big[(\phi')^{-1} \cdot a \big]$. Thus, the proof boils down to establishing a pointwise estimate \mathcal{M}athbf{e}gin{equation*} |(D^*)^Na(s)| \leq C(N) \cdot R^{-N} \end{equation*} under the hypotheses of the lemma. It is in fact convenient to prove a more general inequality \mathcal{M}athbf{e}gin{equation}\lambdabel{non-stationary 1} |\partial_s^j(D^*)^Na(s)| \leq C(j, N) \cdot R^{-N-j}\cdot |\phi'(s)|^j, \qquad \textrm{for all $j, N \in \mathcal{M}athbb N_0$}, \end{equation} where the $C(j, N)$ again only the constants $C_k$ for $1 \leq k \leq N+j$. The inequality \eqref{non-stationary 1} is amenable to induction on the parameter $N$. Indeed, if $N = 0$, then \eqref{non-stationary 1} reduces to hypothesis iii), which establishes the base case. Assume the inequality \eqref{non-stationary 1} holds for some $N \geq 0$ and all $j$. By the Leibniz rule, \mathcal{M}athbf{e}gin{equation}\lambdabel{non-stationary 2} \partial_s^j(D^*)^{N+1}a(s) = \sum_{i=0}^{j+1} \binom{j+1}{i} \big[\partial_s^i (\phi')^{-1}\big](s) \cdot \big[\partial^{j+1-i} (D^*)^N a\big](s). \end{equation} Using the induction hypothesis, one may immediately bound \mathcal{M}athbf{e}gin{equation}\lambdabel{non-stationary 3} \big|\big[\partial^{j+1-i} (D^*)^N a\big](s)\big| \leq C(j+1-i, N) \cdot R^{-N - 1 - j + i} \cdot |\phi'(s)|^{j + 1 - i}. \end{equation} On the other hand, an induction argument shows that there exists a polynomial $\wp \in \mathcal{M}athbb{R}[X_0, \dots, X_i]$, with coefficients depending only on $i$, with the following properties: \mathcal{M}athbf{e}gin{enumerate}[a)] \item $\wp$ is a linear combination of monomials $X_0^{\alpha_0}\cdots X_i^{\alpha_i}$ for multi-indices $(\alpha_0, \dots, \alpha_i)$ satisfying \mathcal{M}athbf{e}gin{equation*} 0 \cdot \alpha_0 + 1 \cdot \alpha_1 + \cdots + i \cdot \alpha_i = \alpha_0 + \alpha_1 + \cdots + \alpha_i = i. \end{equation*} \item The identity \mathcal{M}athbf{e}gin{equation*} \big[\partial_s^K (\phi')^{-1}\big](s) = {\mathfrak {r}}ac{\wp\big(\phi'(s), \dots, \phi^{(i+1)}(s)\big)}{\phi'(s)^{i+1}} \qquad \textrm{holds for all $s \in I$.} \end{equation*} \end{enumerate} If $(\alpha_0, \dots, \alpha_i)$ is a monomial satisfying a), then hypothesis ii) of the lemma implies that \mathcal{M}athbf{e}gin{equation*} \prod_{k=0}^i |\phi^{(k+1)}(s)|^{\alpha_k} \lesssim R^{-i} \cdot |\phi'(s)|^{2i}, \end{equation*} where the implied constant is here allowed to depend on the $C_k$ for $1 \leq k \leq i+1$. Consequently, from the formula in b) above one deduces that \mathcal{M}athbf{e}gin{equation}\lambdabel{non-stationary 4} |\big[\partial_s^i (\phi')^{-1}\big](s)| \lesssim R^{-i} \cdot |\phi'(s)|^{i-1}. \end{equation} Substituting the bounds \eqref{non-stationary 3} and \eqref{non-stationary 4} into \eqref{non-stationary 2}, the induction now closes provided $C(j,N)$ is appropriately defined. \end{proof} \end{document}
\begin{document} \title{Ramsey numbers of cycles versus general graphs} \begin{abstract} The Ramsey number $R(F,H)$ is the minimum number $N$ such that any $N$-vertex graph either contains a copy of $F$ or its complement contains $H$. Burr in 1981 proved a pleasingly general result that for any graph $H$, provided $n$ is sufficiently large, a natural lower bound construction gives the correct Ramsey number involving cycles: $R(C_n,H)=(n-1)(\chi(H)-1)+\sigma(H)$, where $\sigma(H)$ is the minimum possible size of a colour class in a $\chi(H)$-colouring of $H$. Allen, Brightwell and Skokan conjectured that the same should be true already when $n\geq \abs{H}\chi(H)$. We improve this 40-year-old result of Burr by giving quantitative bounds of the form $n\geq C\abs{H}\log^4\chi(H)$, which is optimal up to the logarithmic factor. In particular, this proves a strengthening of the Allen-Brightwell-Skokan conjecture for all graphs $H$ with large chromatic number. \end{abstract} \section{Introduction} For any pair of graphs $F$ and $H$, Ramsey~\cite{R30} famously proved in 1930 that there exists a number $R(F,H)$ such that given any graph $G$ on at least $R(F,H)$ vertices either $F \subseteq G$ or $H \subseteq \overline{G}$. Determining $R(F,H)$ exactly for every pair of graphs $F,H$ is a notoriously difficult problem in combinatorics. Indeed, $R(K_n, K_n)$ is not even known exactly for $n = 5$. However, for some pairs of graphs, particularly when $F$ and/or $H$ are certain sparse graphs, $R(F,H)$ is known. We require some notation. For a graph $H$, define the chromatic number $\chi(H)$ of $H$ to be the smallest number of colours in a proper colouring of $H$, that is, a colouring where no two adjacent vertices have the same colour. Further, let $\sigma(H)$ be the minimum possible size of a colour class in a $\chi(H)$-colouring of $H$. Building on observations of Erd\H{o}s~\cite{E47} and Chv\'{a}tal and Harary~\cite{CH72}, Burr~\cite{B81} constructed the following lower bound for $R(F,H)$ when $F$ is a connected graph with $\abs{F} \geq \sigma(H)$: \begin{equation}\label{lem:burr} R(F,H) \geq (\abs{F} - 1)(\chi(H) - 1) + \sigma(H). \end{equation} The construction proving this bound is the graph $G$ on $(\abs{F} - 1)(\chi(H) - 1) + \sigma(H) - 1$ vertices consisting of $\chi(H) - 1$ disjoint cliques of size $\abs{F}-1$ and an additional disjoint clique of size $\sigma(H) - 1$. Clearly, since $F$ is connected, $F \nsubseteq G$. Since $\overline{G}$ is the complete $\chi(H)$-partite graph with $\chi(H)-1$ vertex sets of size $\abs{F} - 1$ and one vertex set of size $\sigma(H) - 1$, and the vertex set of size $\sigma(H) - 1$ cannot completely contain any colour class of $H$, we have $H \nsubseteq \overline{G}$. In what follows we will often say a graph $G$ is \emph{$H$-free} to mean $H \nsubseteq G$. Although the bound in~\eqref{lem:burr} is very general, for some pairs of graphs it is extremely far from the truth. Indeed, Erd\H{o}s \cite{E47} proved that $R(K_k, K_k) \geq \Omega(2^{k/2})$, whereas \eqref{lem:burr} only gives the much smaller lower bound of $(k-1)^2 + 1$. For some pairs of graphs however the bound in~\eqref{lem:burr} is tight. For graphs $F$ and $H$ satisfying this lower bound $R(F,H)=(\chi(H)-1)(\abs{F}-1)+\sigma(H)$, Burr and Erd\H{o}s~\cite{BE83} coined the expression `$F$ is \emph{$H$-good}'. The study of so-called \emph{Ramsey goodness}, after being initiated by Burr and Erd\H{o}s in 1983~\cite{BE83}, has attracted considerable interest. Prior to Burr's observation~\cite{B81}, already Erd\H{o}s~\cite{E47} had proved that the path on $n$ vertices $P_n$ was $K_k$-good; Gerensc\'{e}r and Gy\'{a}rf\'{a}s~\cite{GG67} proved that for $n \geq k$ the path $P_n$ is $P_k$-good; and Chv\'{a}tal~\cite{C77} proved every tree $T$ is $K_k$-good. The result of Chvatal can be in fact viewed as a generalisation of Tur\'{a}n's theorem, which states that the complete balanced $(k-1)$-partite graph on $N$ vertices, the so-called \emph{Tur\'{a}n graph}, has the maximum number of edges amongst $K_k$-free graphs. Indeed, Tur\'{a}n's theorem is equivalent to the statement `$S_n$ is $K_k$-good', where $S_n$ is the $n$-vertex star with $n-1$ leaves. To see this, let $N = (n-1)(k-1)$ and $T$ be the complement of this Tur\'{a}n graph. Then $T$ is precisely the construction in~\eqref{lem:burr} when $(F,H) = (S_n, K_k)$. Moreover, $T$ is the unique graph on $(n-1)(k-1)$ vertices without $S_n \subseteq T$ or $K_k \subseteq \overline{T}$. Add a vertex to $T$ and denote the resulting graph by $T + v$. If $v$ neighbours any vertex in $T$, then $S_n \subseteq T + v$. Otherwise, $K_k \subseteq \overline{T + v}$. That is, $R(S_n, K_k) = (n-1)(k-1) + \sigma(K_k) = (n-1)(k-1) + 1$. This connection to Tur\'an's theorem highlights how Ramsey goodness results can generalise other results in graph theory. See \cite{FHW, LL21, M21, PS17, PS20} and their references for more recent progress in the area of Ramsey goodness, as well as the survey of Conlon, Fox and Sudakov~\cite[Section 2.5]{CFS15}. In this paper we are specifically interested in when $C_n$, the $n$-vertex cycle, is $H$-good for general graphs $H$. This study can be traced back to Bondy and Erd\H{o}s~\cite{BE73} who proved that $C_n$ is $K_k$-good whenever $n \geq k^2-2$, which led Erd\H{o}s, Faudree, Rousseau and Schelp~\cite{EFRS78} to conjecture that $C_n$ is $K_k$-good whenever $n \geq k \geq 3$. Keevash, Long and Skokan~\cite{KLS21} recently proved a strengthening of this conjecture for large $k$, showing that $n\ge C\frac{\log k}{\log\log k}$ suffices for some constant $C \geq 1$. For smaller $k$, Nikiforov~\cite{N09} proved $n \geq 4k+2$ is sufficient and several authors have proved the conjecture for certain small values of $k$ (see \cite{CCZ08} and its references). For Ramsey numbers of cycles versus general graphs $H$, Burr~\cite{B81}, in 1981, proved a satisfying result that $C_n$ is $H$-good when $n$ is sufficiently large as a function of $\abs{H}$. It remains an intriguing open question to determine the threshold of $n$ below which the Ramsey number $R(C_n,H)$ behaves differently from the natural construction of Burr yielding~\eqref{lem:burr}. In particular, Allen, Brightwell and Skokan \cite{ABS} conjectured the following explicit bound. \begin{conj}\label{conj:abs}For any graph $H$ and $n\geq\chi(H)\abs{H}$, the cycle $C_n$ is $H$-good, i.e.\ $R(C_n, H) = (\chi(H)-1)(n-1)+\sigma(H)$. \end{conj} Note that we may assume $H$ to be a complete multipartite graph, as every $H$ is a subgraph of some complete $\chi(H)$-partite graph $H'$ with $\sigma(H')=\sigma(H)$, and clearly $R(C_n, H)\leq R(C_n,H')$. Towards Conjecture~\ref{conj:abs}, Pokrovskiy and Sudakov \cite{PS20} very recently proved an important case when the graph $H$ has polynomially small chromatic number: $\abs{H}\ge \chi(H)^{23}$. More precisely, they showed that for $n\geq 10^{60} m_k$ and $m_1 \leq m_2 \leq\cdots\leq m_k$ satisfying $m_i\geq i^{22}$ for each $i$, $R(C_n ,K_{m_1,\ldots,m_k} ) = (n - 1)(k - 1) + m_1$. Note, however, that it is well-known from random graph theory (e.g.~\cite{Bol88}) that for almost all graphs $H$, the chromatic number is much larger: $\chi(H)=\Theta\big(\frac{\abs{H}}{\log\abs{H}}\big)$. Our main result below is an almost optimal quantitative version of Burr's result~\cite{B81}. In particular, this theorem holds for all graphs $H$. Moreover, as $x> C\log^4 x$ for all large $x$, it proves a strengthening of Conjecture~\ref{conj:abs} for all graphs $H$ with large (constant) chromatic number. \begin{theorem}\label{thm:main} There exists a constant $C > 0$ such that for any graph $H$ and any $n\geq C\abs{H}\log^4\chi(H)$ we have that $C_n$ is $H$-good, i.e.\ $R(C_n, H) = (\chi(H)-1)(n-1)+\sigma(H)$. \end{theorem} The bound on $n$ in Theorem~\ref{thm:main} is best possible up to the logarithmic factor $\log^4\chi(H)$. The following construction shows that the cycle length $n$ has to be at least $(1-o(1))\abs{H}$ in order to be $H$-good. \noindent\emph{Lower bound construction.} Fix arbitrary $m,k\in\mathbb{N}$ with $k\ge 2$ and $0<\varepsilon<\frac{1}{4}$. Set $n=(1-\varepsilon)mk$. Consider the complete $k$-partite graph $H$ on partite set $V_1,\ldots,V_k$ with $\abs{V_1}=(1-\varepsilon)mk$ and $\abs{V_2}=\cdots=\abs{V_k}=\frac{\varepsilon mk}{k-1}$; so $\abs{H}=mk$ and $\sigma(H)=\frac{\varepsilon mk}{k-1}$. Let $G$ be the $N$-vertex graph, with $N=k(n-1)$, consisting of $k$ vertex disjoint cliques $K_{n-1}$. It is easy to see that $G$ is $C_n$-free with $H$-free complement. Therefore, for these choices of $C_n$ and $H$, we have \[R(C_n,H)>k(n-1)>(\chi(H)-1)(n-1)+\sigma(H).\] Our proof takes a similar approach to the work of Pokrovskiy and Sudakov \cite{PS20} and Keevash, Long and Skokan~\cite{KLS21}. The bulk of the work is to prove a stability type result showing that $C_n$-free graphs $G$ with $H$-free complement whose order is around the lower bound in~\eqref{lem:burr} must be structurally close to Burr's construction. The two novel ingredients at the heart of our proof are (i) certain sublinear expansion properties and (ii) an `adjuster' structure, both of which are inspired by recent work of Liu and Montgomery \cite{LM20+} on cycle embeddings in sublinear expanders. The theory of sublinear expanders has played a pivotal role in the resolutions of several old conjectures, see e.g.~\cite{FKKL,H-K-L,KLShS17,LM1,LM20+}. The logarithmic factor in our bound is an artifact of the use of sublinear expansion. Considering the above construction, it is not inconceivable that already when $n\ge (1+o(1))\abs{H}$, the cycle $C_n$ is $H$-good. It would be interesting to at least get rid of the logarithmic factor and obtain a bound linear in $\abs{H}$. \noindent\textbf{Organisation.} The rest of the paper is organised as follows. We give an outline in Section~\ref{sec:outline}. Preliminaries are given in Section~\ref{sec:prelim}. The two main ingredients, the sublinear expansion and the adjuster structure (and related lemmas) are given in Sections~\ref{sec:expansion} and~\ref{sec:gadgets}, respectively. The stability result and the proof of the main theorem are in Section~\ref{sec:stability}. \section{Outline}\label{sec:outline} Suppose that $G$ has order at least $(\chi(H)-1)(n-1)+\sigma(H)$, but $\overline G$ is $H$-free. With the given condition on $n$, it is not too hard to find a cycle of length \emph{at least $n$} in $G$; the difficulty lies in obtaining a cycle of the \emph{precise} desired length $n$. A natural approach to deal with this is to create a sufficiently large structure which has inbuilt flexibility about the length of cycles it can produce. Pokrovskiy and Sudakov \cite{PS20} use expansion properties to create some complex gadgets, which can be joined together to produce structures capable of producing paths of a wide range of lengths, up to almost all of their total size. However, the connectivity properties needed to link up these structures require $n$ to be very large compared to $\chi(H)$, and in particular only produce a good bound on $n$ if $\abs{H}$ is at least a large power of $\chi(H)$. To deal with the missing regime where $\abs{H}$ is bounded by a polynomial in $\chi(H)$, we use an orthogonal approach. Instead of the complex gadgets featured in \cite{PS20}, we borrow ideas from recent work of Liu and Montgomery \cite{LM20+} to use certain sublinear expansion properties (Definition \ref{def:expand}) to find simpler gadgets which we call adjusters (Definition~\ref{defn:adjuster}). However, these adjusters are less flexible in terms of how much we can vary the length of the final cycle, relative to its total length. We circumvent this potential difficulty by creating a cycle which has enough adjusters to permit slightly more than $\abs{H}$ different lengths, and also has a long adjuster-free section. The complement being $H$-free means that we may shorten this section until the cycle is close enough to the desired length, and use the flexibility from the adjusters to deliver the final blow. One of the main technical difficulties in our proof is showing that a suitable expanding subgraph may be found in a graph with $H$-free complement under our definition of sublinear expansion. Additionally, we need to work directly with the specific graph $H$, rather than passing to a balanced multipartite graph, and the possibility that the parts of $H$ are very unbalanced creates additional difficulties when one tries to use induction and it is necessary to reduce the number of parts first. We prove this, together with some useful consequences of expansion, in Section \ref{sec:expansion}. We then leverage these expansion properties to find the adjusters that we need in Section \ref{sec:gadgets}, and show how these may be combined together to give a suitable long cycle in a sufficiently well-connected subgraph. As a consequence of sublinear expansion and the fact that we do not need as much flexibility in length adjustment, we require only a very weak connectivity condition. Finally, in Section \ref{sec:stability} we establish a stability result on graphs $G$ which have close to $(\chi(H)-1)(n-1)$ vertices, no copy of $C_n$, no copy of $H$ in the complement, and which cannot be reduced to a smaller example by removing a part from $H$. That is, we show that a small number of vertices may be removed from such a graph $G$ to leave $\chi(H)-1$ reasonably well-connected subgraphs of order close to $n$, whose complements exclude a complete bipartite graph $H'$; here our low connectivity requirements will obviate the need for a lower bound on the class sizes of $H$. The Ramsey goodness of the cycle then follows quickly from this stability result by considering how $2$-connected blocks in $G$ are linked. \section{Preliminaries}\label{sec:prelim} Our aim is to use induction on the number of partite sets $k$. We therefore define how we order possible graphs $H$. For a complete multipartite graph $H$, we write $H'\sqsubset H$ if there exists a graph $H''$ such that $H\cong H'\vee H''$, where $\vee$ denotes graph join, i.e.\ ~$H$ is obtained by taking disjoint copies of $H'$ and $H''$ and then adding all edges between $V(H')$ and $V(H'')$. Informally, $H'$ consists of the subgraph induced by a proper subcollection of the parts of $H$. We will sometimes use $H'\sqsubseteq H$ to mean ``$H'\sqsubset H$ or $H'=H$'', i.e.\ $H'$ is induced by a (not necessarily proper) subcollection of parts of $H$. For the main step of the induction, we will require both $H'\sqsubset H$ and $\sigma(H')=\sigma(H)$, but in some intermediate steps this latter condition is not needed. For a graph $H$ with $\chi(H)=k$, we write $m=m(H)=\frac{\abs{H}}{k}$ for the average part size of $H$, i.e.\ $\abs{H}=km$. For a graph $G$ and vertex set $A\subset V(G)$, we define the \textit{external neighbourhood} $N_G(A)$ to be the set $\{w\in V(G)\setminus A:vw\in E(G)\}$; note that this is disjoint from $A$. We omit the subscript if the graph is clear from context. The subgraph induced on $A$ will be denoted by $G[A]$. We write $G-A=G[V(G)\setminus A]$ for the subgraph obtained by removing vertices in $A$. For disjoint subsets $A,B\subset V(G)$, an $A$-$B$ path is a path between a vertex of $A$ and a vertex of $B$. For two vertices $u,v\in V(G)$, the graph distance $\operatorname{dist}_G(u,v)$ is the length of a shortest $u$-$v$ path in $G$. Logarithms with no specified base are always taken to the base $e$ throughout. For an integer $t$, we write $[t]$ for the set $\{1,2,\ldots,t\}$. We will need the following result of Erd\H{o}s and Szekeres \cite{ES35}. \begin{theorem}\label{thm:ES}Any sequence of at least $(r-1)^2+1$ integers contains a monotonic subsequence of length $r$. \end{theorem} We use the following results of Pokrovskiy and Sudakov \cite{PS20}. \begin{cor}\label{PS-cor}For $n\geq 10^{60} m_k$ and $m_1 \leq m_2 \leq\cdots\leq m_k$ satisfying $m_k\geq k^{22}$, we have $R(C_n ,K_{m_1,\ldots,m_k} ) = (n - 1)(k - 1) + m_1$.\end{cor} \begin{proof}Apply the aforementioned result~\cite{PS20} for $K_{m_1,\ldots,m_k}$ with $m_i\geq i^{22}$ for each $i$ to $K_{m'_1,\ldots, m'_k}$ where $m'_1=m_1$ and $m'_i=m_k$ if $i>1$. For each $i>1$ we have $m'_i=m_k\geq k^{22}\geq i^{22}$, and clearly $m'_1\geq 1^{22}$. Thus if $G$ is a $C_n$-free graph on $(n - 1)(k - 1) + m_1$ vertices, then $\overline{G}$ contains a copy of $K_{m'_1,\ldots, m'_k}$, which in turn contains $K_{m_1,\ldots, m_k}$.\end{proof} Our induction may reduce to the case when $H$ is a complete bipartite graph. \begin{cor}[{\cite[Corollary 3.8]{PS20}}]\label{PS-cor38} Let $n, m_1, m_2$ be integers with $m_2 \geq m_1$ , $m_2 \geq 8$, and $n \geq 2\times 10^{49}m_2$. Then we have $R(C_n, K_{m_1 ,m_2}) = n + m_1 - 1$. \end{cor} The last one we need is an intermediate result on path embeddings. \begin{lemma}[{\cite[Lemma 3.7]{PS20}}]\label{PS-lemma} Let $n$ and $m$ be integers with $n \geq 2\times10^{49}m$ and $m \geq 8$. Let $G$ be a graph with $\overline G$ being $K_{m,m}$-free and $\abs{ N_G (A) \cup A} \geq n$ for every $A\subseteq V (G)$ with $\abs{A} \geq m$. Let $x$ and $y$ be two vertices in $G$ such that there exists an $x$-$y$ path with order at least $8m$. Then there is an $x$-$y$ path of order exactly $n$ in $G$. \end{lemma} \subsection{Adjusters} In order to construct cycles covering a range of lengths, we will use the following graphs called \emph{adjusters}. In the following definition, $m$ and $k$ are fixed numbers. When constructing adjusters, the relevant values of $m$ and $k$ will be clear from context. \begin{defn}\label{defn:adjuster}For $r\geq 1$, an \emph{$r$-adjuster} consists of $r$ disjoint odd cycles $C_1,\ldots,C_r$, with $C_i$ having distinguished vertices $v_i,w_i$ which are almost-antipodal (i.e.\ $\operatorname{dist}_{C_i}(v_i,w_i)=(\abs{C_i}-1)/2$), together with paths $P_1,\ldots,P_r$ where the endpoints of $P_i$ are $w_i$ and $v_{i+1}$ (subscripts taken modulo $r$), such that the paths are internally disjoint from each other and from the cycles, and $\abs{C_i} \leq 2000\log k\log(km)$. For the special case $r=0$, a $0$-adjuster is simply a cycle. We refer to the cycles $C_i$ as the \emph{short cycles} of the adjuster. There are also cycles which use all the paths $P_i$ and part of each short cycle; we refer to these as the \emph{routes} of the adjuster. An $r$-adjuster contains $2^r$ routes, whose lengths are $r+1$ consecutive integers. We define the \emph{length} of an adjuster to be the length of its longest route. \end{defn} \begin{figure} \caption{An $r$-adjuster. An example of a route in this $r$-adjuster is given by the red dashed line.} \label{fig:gadget-ex} \end{figure} \section{Sublinear expansions}\label{sec:expansion} Given a large graph whose complement is $H$-free we will pass to a subgraph which is not too large and has sublinear expansion properties, in which we can find an $r$-adjuster for some suitable $r$. After removing this $r$-adjuster we repeat the process with the remaining graph. We then join many adjusters together to create a very large adjuster with greater flexibility in the length of a route, and where this length can exceed $n$. We will shrink the structure, if necessary, to ensure that the maximum length of a cycle is not much more than $n$, without impairing this flexibility. We then show this structure contains $C_n$. In this section we define the expansion properties we need, and show that any sufficiently large graph whose complement is $F$-free, for some graph $F$, contains an expanding subgraph of suitable size. \begin{defn}\label{def:expand} A graph $G$ \emph{$(\Delta,\beta,d,k)$-expands into a set $W \subseteq V(G)$} if the following holds. \begin{itemize} \item $\abs{N_G(S) \cap W} \geq \Delta \abs{S}$, for every $S\subseteq V(G)$ with $\abs{S} \leq \beta d$. \item $\abs{N_G(S)} \geq \frac{\abs{S}}{10\log k}$, for every $S\subseteq V(G)$ with $\beta d \leq \abs{S} \leq \abs{G}/2$. \end{itemize} \end{defn} Note that whenever we say that $G$ $(\Delta, \beta, d, k)$-expands, and do not specify the set $G$ expands into, we mean that $G$ $(\Delta, \beta, d, k)$-expands into $W = V(G)$. \begin{lemma}\label{lem:expansion} Fix a complete $k$-partite graph $H$ of order $mk$, where $k\geq 2$, then for all $\beta, M, \Delta \geq 1$ with $M \geq 60 \beta \geq 240 \Delta$, $M \geq 10\beta \Delta$, $M \geq 4k$ and $\beta \geq 10\log k$, the following holds. Let $G$ be a graph with $\overline{G}$ being $H$-free and $\abs{G} \geq Mmk\log k$. Then there exists a subgraph $H'\sqsubseteq H$ induced by some collection of at least two parts and an induced subgraph $F \subseteq G$ such that the following holds: \begin{itemize} \item $\overline{F}$ is $H'$-free; \item $M\abs{H'}\log \chi(H') - m(H') \leq \abs{F} \leq M\abs{H'}\log \chi(H')$. \item $F$ is $(\Delta,\beta,m(H'),\chi(H'))$-expanding. \end{itemize} \end{lemma} \begin{proof} We proceed by induction on $k$, considering two cases. The base case $k=2$ will be covered by the inductive argument, since we argue that we may either (i) construct a suitable subgraph directly for $H'=H$, (ii) reduce to some smaller $H'\sqsubset H$ with at least two parts, or (iii) obtain a contradiction; in the base case only the first and third possibilities arise. Note that, by removing vertices if necessary, we may assume $\abs{G}=Mmk \log k$ for $k\geq 2$. We write $m_1\leq\cdots\leq m_k$ for the orders of the vertex classes of $H$. The first case is if there exists a set $S\subset V(G)$ such that $m \leq \abs{S} \leq \abs{G}/2$ and $\abs{N_G(S)} \leq \abs{S}/(10\log k) + m\Delta\beta$. Let $T = V(G)\setminus (N_G(S) \cup S)$. Since $M \geq 10\Delta\beta $, we have that \begin{align*} \abs{T}\geq \abs{G}-\abs{N_G(S)\cup S} & \geq \left(1-1/(10\log k)\right)\abs{G}/2 - m\Delta\beta \\ & = m\left(\tfrac{Mk}{2}(\log k-1/10) - \Delta\beta\right) \\ & \geq km. \end{align*} We distinguish two subcases depending on whether $\abs{S}\geq mk$ or $\abs{S}<mk$. First, suppose $\abs{S}\geq mk$. We choose $t \in \ensuremath{\mathbb{N}}$ as large as possible such that $\abs{T}\geq M\abs{H_t}\log t$, where $H_t$ is the graph induced by the $t$ smallest parts of $H$. We then choose $s \in \ensuremath{\mathbb{N}}$ as large as possible so that $\abs{S}\geq M\abs{H_{s,t}}\log s$ where $H_{s,t}$ is the graph induced by the $s$ smallest parts of $H$ not contained in $H_t$; observe that $s,t\geq 1$ and $s,t\leq k-1$. We will quickly be able to resolve our first subcase after proving the following claim. \begin{claim}\label{claim:s+tgeqk} $s + t = k$. \end{claim} \begin{poc} The claim is trivial for $k=2$, since $s,t\geq 1$. For all $k\geq 3$ we have \[\abs{T} \geq \abs{G}/2-\frac{\abs{G}}{20\log k} - \frac{\abs{G}}{10k\log k} \geq M\abs{H_{\ceil{k/2}}}\log(\ceil{k/2}),\] which implies $t\geq\ceil{k/2}$; in particular, this proves the claim when $k=3$. Assume for a contradiction that $s + t < k$, where $k\geq 4$ is fixed; as $t \geq \ceil{k/2}$, we have $s\leq(k-1)/2$. Maximalities of $s$ and $t$ ensure that \begin{equation}\label{eq:lem2.4begin1} M\abs{H_{s+1, t}}\log(s+1) > \abs{S} \end{equation} and \begin{equation}\label{eq:lem2.4begin2} M\abs{H_{t+1}}\log(t+1) > \abs{T}. \end{equation} Observe that $H_{t+1}$ and $H_{s+1, t}$ both contain the $(t+1)$th smallest part of $H$. Also, $\abs{S}+\abs{T} = \abs{G} - \abs{N_G(S)} > Mmk\log k - \frac{\abs{S}}{10\log k} - m\Delta\beta$, hence by \eqref{eq:lem2.4begin1}, \eqref{eq:lem2.4begin2} and the fact that $M\geq 10\beta\Delta$ we have \begin{equation}\label{eq:lem2.4main} \abs{H_{t+1}}\log(t+1) + \abs{H_{s+1, t}}\log(s + 1)c(k) \geq mk\log k - \frac{m}{10}, \end{equation} where $c(k)=\left(1 + \frac{1}{10\log k}\right)$. We want to show that \eqref{eq:lem2.4main} is in fact false, which provides the contradiction we need to prove Claim~\ref{claim:s+tgeqk}. To this end, we may assume $s+t = k-1$. It follows that \begin{align*}\abs{H_{t+1}} + \abs{H_{s+1, t}}=mk+m_{t+1} \leq mk+\frac{\abs{H_{s+1, t}}}{s+1};\end{align*} recall that $m_{t+1}$ is the size of the $(t+1)$th smallest part of $H$. Consequently the LHS of \eqref{eq:lem2.4main} is at most \begin{equation}\label{LHS-ub} c(k)\log(s+1)\abs{H_{s+1, t}} + \log(k-s)\left(mk - \abs{H_{s+1, t}}\frac{s}{s+1}\right). \end{equation} Note that $m(s+1)\leq \abs{H_{s+1, t}}\leq mk$. For fixed $s$, \eqref{LHS-ub} is linear in $|H_{s+1, t}|$, and consequently within this range it is maximised either at $\abs{H_{s+1, t}}=m(s+1)$ or at $\abs{H_{s+1, t}}=mk$. We first consider the case $\abs{H_{s+1, t}}=m(s+1)$, when \eqref{LHS-ub} becomes \[m(c(k)(s+1)\log(s+1)+(k-s)\log(k-s)).\] This is a convex function of $s$, and so for $1\leq s\leq (k-1)/2$ is maximised when $s=1$ or when $s=(k-1)/2$. When $s=1$ we have \begin{equation}\label{bal-small-s} 2c(k)\log 2+(k-1)\log(k-1)<k\log k-0.1\end{equation} for all $k\geq 4$. When $s=(k-1)/2$, we likewise have \begin{equation}\label{bal-large-s} (1+c(k))\frac{k+1}{2}\log\left(\frac{k+1}{2}\right)<k\log k-0.1\end{equation} for all $k\geq 4$. Finally, we consider the case $\abs{H_{s+1, t}}=mk$, when \eqref{LHS-ub} becomes \[mk(\log(s+1)c(k)+\log(k-s)/(s+1)).\] Suppose $2\leq s\leq \log(k-s)$. Then $\frac{s}{s+1}\log(k-s)\geq\frac{s^2}{s+1}\geq c(k)\log(s+1)+0.1$, and so \eqref{LHS-ub} is decreasing in $\abs{H_{s+1, t}}$, and maximised when $\abs{H_{s+1, t}}=m(s+1)$. Similarly if $s=1$ and $k\geq 7$, \eqref{LHS-ub} is decreasing. For $s=1$ and $4\leq k\leq 6$, by direct calculation \eqref{LHS-ub} contradicts \eqref{eq:lem2.4main}. Thus we may assume that $s\geq\log(k-s)$. Since $s\leq(k-1)/2$ we have $s+1\leq k-s$. Thus \[\frac{\mathrm{d}}{\mathrm{d}s}\left(\log(s+1)c(k)+\frac{\log(k-s)}{s+1}\right)=\frac{c(k)(s+1)-\frac{s+1}{k-s}-\log(k-s)}{(s+1)^2}>0\] and so $\log(s+1)c(k)+\log(k-s)/(s+1)$ is increasing in $s$ for $\log(k-s)\leq s\leq (k-1)/2$. Hence within this range $mk(\log(s+1)c(k)+\log(k-s)/(s+1))$ is maximised when $s=(k-1)/2$, and we obtain \begin{equation}\label{unbal-large-s}k\left(c(k)+\frac{2}{k+1}\right)\log\left(\frac{k+1}{2}\right)<k\log k - 0.1\end{equation} for all $k\geq 4$. Then \eqref{bal-small-s}, \eqref{bal-large-s} and \eqref{unbal-large-s} together show that \eqref{LHS-ub} is less than $mk\log k - \frac{m}{10}$, contradicting \eqref{eq:lem2.4main}. \end{poc} Hence $s+t= k$. Now either $\overline{G[T]}$ is $H_t$-free or $\overline{G[S]}$ is $H_{s,t}$-free, since otherwise disjointness of $T$ and $N_G(S)\cup S$ ensures we have a copy of $H$ in $\overline{G[S\cup T]}$. However, since $\abs{S},\abs{T}\geq mk$, $\overline{G[T]}$ is not $H_1$-free and $\overline{G[S]}$ is not $H_{1,k-1}$-free. Thus in the former case we have $2\leq t\leq k-1$ so we may apply the induction hypothesis to $G[T]$ with $k$ replaced by $t$ and $H$ by $H_t$, and in the latter $2\leq s\leq k-1$ so we may apply the induction hypothesis to $G[S]$ with $k$ replaced by $s$ and $H$ by $H_{s,t}$. Here, such applications of the induction hypothesis are possible by the definition of $s$ and $t$. Secondly, suppose $m\leq \abs{S}<mk$. Note that, since $M \geq 10\Delta\beta$ and $M \geq 4k$, in this case \[\abs{T}\geq Mmk\log k-2mk-m\Delta\beta \geq M(mk-1)\log (k-1).\] Indeed, $Mmk\log \frac{k}{k-1} \geq Mm \geq 2mk + m\Delta\beta$ where we have used $\log(1+x) \geq \frac{x}{1+x}$ with $x = \frac{1}{k-1}$. Thus $\abs{T} \geq M\abs{H_{k-1, 1}}\log (k-1)$ and $\abs{S} \geq m \geq \abs{H_1}$. Since $H_1$ is an independent set of size at most $m$, $\overline{G[S]}$ contains $H_1$. Thus $\overline{G[T]}$ must be $H_{k-1, 1}$-free, as otherwise $\overline{G[S\cup T]}$ contains $H$, as before. Since $\abs{T}\geq mk$, it is not $H_{1,1}$-free and so $k-1\geq 2$. We may therefore apply the induction hypothesis to $G[T]$ with $k-1$ replacing $k$ and $H_{k-1,1}$ replacing $H$. The final case is where we have $\abs{N_G(S)\cup S} \geq \left(1+1/(10\log{k})\right)\abs{S} + m\Delta\beta$ for every set $S \subseteq V(G)$ with $m \leq \abs{S} \leq \abs{G}/2$. Consider the largest set $X \subseteq V(G)$ with $\abs{X} \leq 2m$ such that $\abs{N_G(X)} \leq \Delta \abs{X}$. Since $\beta \geq 4\Delta$, we have that $\abs{N_G(X) \cup X} \leq (\Delta + 1)\abs{X} \leq m\Delta\beta $. Thus, we must have $\abs{X} < m$. Now consider $F = G- X$. If there is some $Y\subset V(F)$ with $\abs{Y}\leq m$ and $N_F(Y) < \Delta\abs{Y}$, then \[ \abs{N_G(X \cup Y) \cup (X \cup Y)} \leq \abs{N_G(X) \cup X} + \abs{N_F(Y) \cup Y} < (\Delta + 1)\abs{X\cup Y}, \] contradicting our choice of $X$. For $\abs{Y}\geq m$, we have \[\abs{N_F(Y) \cup Y} \geq \abs{N_G(Y) \cup Y} - \abs{X}\geq \left(1 + \frac{1}{10\log{k}}\right)\abs{Y} + m\Delta\beta - m.\] If $\abs{Y}\leq \beta m$ then utilising $\beta \geq 10\log k$ we have that $\abs{N_F(Y) \cup Y} \geq\abs{Y}+\Delta\abs{Y}$, whereas if $\beta m \leq \abs{Y} \leq \abs{F}/2$ then $\abs{N_F(Y) \cup Y}\geq \left(1 + 1/10\log{k}\right)\abs{Y}$. Thus $F$ fulfils the requirements of Lemma~\ref{lem:expansion} for $H'=H$. \end{proof} These expansion properties give us three abilities we will need to construct adjusters. The first is the ability to link together large sets, while avoiding a smaller set, via a path which is not too long. The main advantage of our stronger definition of expansion (compared to that in \cite{PS20}) is that the length of path required is much shorter, being only of order $\log k\log \abs{G}$, and the control we have over the size of the expanding subgraph means that we can ensure $\log\abs{G}=O(\log k)$. \begin{lemma}\label{lem:short-path} Suppose that $G$ $(\Delta,\beta,d,k)$-expands into a set $W\subseteq V(G)$, for $\Delta\geq2$ and $k \geq 2$. If $A,B,C\subseteq V(G)$ are disjoint sets with $A,B\neq\varnothing$ such that $\abs{C\cap W} \leq (\Delta-2) \min\{\abs{A},\abs{B}\}$ and $\abs{C}\leq \beta d/(20\log k)$, then there is an $A$-$B$ path $P$ in $G$ avoiding $C$ and with $\abs{P}\leq44\log k\log\abs{G}$. \end{lemma} \begin{proof} Set $A_0=A$ and $A_{i+1}=(N(A_i)\cup A_i)\setminus C$ for each $i$. Since we have that $\abs{A_{i+1}}\geq 2\abs{A_i}$ for $\abs{A_i}<\beta d$, and thus $\abs{A_a}\geq \beta d$ where $a=\log_2(\beta d)$. For $i\geq a$ we have $\abs{N(A_i)}\geq \frac{1}{10\log k}\abs{A_i}\geq 2\abs{C}$, and so $\abs{A_{i+1}}\geq (1+1/20\log k)\abs{A_i}$. Consequently $\abs{A_b}\geq\abs{G}/2$, where $b=a+\log_{1+1/20\log k}(\abs{G}/2\beta d)$. Applying $\log(1+x)=-\log(1-\frac{x}{1+x})\geq (1+1/x)^{-1}$ for $x=1/20\log k$, and noting that $1+20\log k\leq 22\log k$, we get \[b\leq a+22\log k\log \abs{G}-22\log k\log(2\beta d)\leq22\log k\log \abs{G}.\] Since the same argument applies to $B$, we have that $A_b\cap B_b\neq\varnothing$, and so there is a path of length at most $2b\leq 44\log k \log\abs{G}$ avoiding $C$, as desired. \end{proof} The next ingredient follows from Lemma~\ref{lem:short-path}, and allows us to find a short odd cycle while retaining expansion into the rest of the graph. \begin{lemma}\label{lem:cycle}Suppose $G$ is not bipartite and $(\Delta,\beta,d,k)$-expands into $W\subseteq V(G)$ for $\Delta\geq 4$, $k \geq 2$ and $\beta\geq 8\Delta$. Then $G$ contains an odd cycle $C$ with length at most $88\log k\log\abs{G}$, such that $G$ also $(\Delta-3,\beta,d,k)$-expands into $W\setminus V(C)$.\end{lemma} \begin{proof}Since $G$ is not bipartite, it contains an odd cycle. Let $C$ be a shortest odd cycle; it follows that if $x,y\in V(C)$ then $\operatorname{dist}_G(x,y)=\operatorname{dist}_C(x,y)$. Indeed, if not, choosing $x,y$ to violate this condition with minimal distance, the shortest path between $x$ and $y$ is internally disjoint from the cycle, and adding this path to whichever path around the cycle has the appropriate parity gives a shorter odd cycle. Consequently $\abs{C}$ is at most twice the diameter of $G$. Using Lemma~\ref{lem:short-path} with $A_{\ref*{lem:short-path}},B_{\ref*{lem:short-path}}$ being any singletons and $C_{\ref*{lem:short-path}}=\varnothing$ implies the desired bound $\abs{C}\leq 88\log k\log\abs{G}$. Now, if $C$ is a triangle then any vertex $v$ can have at most 3 neighbours on $C$ (trivially). Otherwise $v$ can have at most two neighbours on $C$; indeed, if $v$ has at least 3 neighbours on $C$ then either $v$ has two neighbours $x,y$ with $\operatorname{dist}_C(x,y)\geq 3>\operatorname{dist}_G(x,y)$, contradicting the earlier observation, or $v$ has two neighbours $x,y$ which are adjacent, giving a shorter odd cycle $vxy$. Thus for any set $S$ we have $\abs{N_G(S)\cap (W\setminus V(C))}\geq \abs{N_G(S)\cap W}-3\abs{S}$, and, since $G$ $(\Delta,\beta,d,k)$-expands into $W$, $G$ also $(\Delta-3,\beta,d,k)$-expands into $W\setminus V(C)$. \end{proof} Finally, we show that we can construct two large, well-connected disjoint sets centred around any two given vertices. \begin{lemma}\label{lem:wings}Suppose $G$ is a graph which $(\Delta,\beta,d,k)$-expands into $W\subseteq V(G)$, and let $x,y$ be distinct vertices. Then there exist disjoint sets $A,B\subset W\cup\{x,y\}$ of size $\beta d/2$ such that $x\in A$, and every vertex of $A$ is connected to $x$ by a path in $A$ of length at most $\log_\Delta(\beta d/2)$, and similarly for $B$ and $y$.\end{lemma} \begin{proof}We iteratively build up sets $A_i,B_i$ with $\abs{A_i}=\abs{B_i}=\Delta^i$, such that any vertex in $A_i$ is connected to $x$ by a path in $A_i$ of length at most $i$, until $i$ is large enough that $\abs{A_i}\geq\beta d/2$. We start from $A_0=\{x\},B_0=\{y\}$. Given disjoint sets $A_i,B_i$ with $\abs{A_i}=\abs{B_i}<\beta d/2$, we aim to choose disjoint sets $A'\subset N_G(A_i)\cap W$ and $B'\subset N_G(B_i)\cap W$ of size at least $\Delta\abs{A_i}$. Then we can pick $A_{i+1}\subseteq A_i\cup (A'\setminus B_i)$ and $B_{i+1} \subseteq B_i \cup (B'\setminus A_i)$ with the required disjointness, connectivity and size properties. First, take $A''=(N(A_i) \cap W)\setminus N(B_i)$ and $B''=(N(B_i) \cap W)\setminus N(A_i)$. If either of these, say $A''$ has size at least $\Delta\abs{A_i}$, then we may choose $A'\subseteq A''$ and $B'\subseteq N(B_i) \cap W$ both of size $\Delta\abs{A_i}$ (using the expansion property for the latter); by definition of $A''$ these sets are disjoint. Consequently we may assume $\abs{A''},\abs{B''}<\Delta\abs{A_i}$. Since $\abs{A_i\cup B_i}<\beta d$, the expansion property gives $\abs{N(A_i)\cap N(B_i) \cap W}\geq 2\Delta\abs{A_i}-\abs{A''}-\abs{B''}$, and so we can add disjoint subsets of $\abs{N(A_i)\cap N(B_i) \cap W}$ to $A''$ and $B''$ to form sets $A',B'$ as required. \end{proof} \section{Constructing adjusters}\label{sec:gadgets} In this section we prove the following result for some suitable constant $C$. \begin{lemma}\label{lem:sn} There exists a constant $C>0$ satisfying the following. Fix integers $m,k$ with $C\leq m\leq k^{22}$ and $n\geq 8\times 10^{18}mk\log^4k$. Let $H$ be a complete $k$-partite graph on $mk$ vertices, $H' \sqsubseteq H$ be $(s+1)$-partite for some $1\leq s\leq k-1$ and $m_2$ be the size of the second smallest part of $H$. Let $G$ be a graph of order at least $sn-n/10$ with $\overline{G}$ being $H'$-free, and such that any two sets of size at least $m_2+4.1\times 10^{18}\log^4 k$ have at least $4.1\times 10^{18}\log^4 k$ disjoint paths between them. \begin{itemize}\item If $s\geq 2$, then $G$ contains a cycle of length exactly $n$. \item If $s=1$ then $G$ contains an $r$-adjuster for some $r\geq 9mk/8$ having length between $0.6n$ and $0.7n$. \end{itemize}\end{lemma} We first show how the expansion properties established in Section \ref{sec:expansion} can be used to construct adjusters. \begin{lemma}\label{lem:find-gadget}Fix $k \geq 2$, $10^6 \leq m\leq k^{22}$ and $n\geq 8\times10^{5}mk\log^2 k$. Let $H$ be a complete $(s+1)$-partite graph of order $m'(s+1)$, where $m' \in \mathbb{R}$, $1\leq s\leq k-1$ and $\abs{H}\leq mk$. Let $G$ be a graph with $\overline{G}$ being $H$-free and $\abs{G}\geq sn/10$. Then $G$ contains an $r$-adjuster for some $r\geq \frac{mk}{2\times 10^4\log^2 k}$, which has between $mk$ and $mk + (3\times 10^4\log^2) k$ vertices. \end{lemma} \begin{proof}First note that we may assume, at the cost of replacing the upper bound with $\abs{H}\leq 2mk$, that every part of $H$ has order at least $m'$. Indeed, we may replace each part of $H$ with order below $m'$ with a part of order $\lceil m'\rceil$; this adds at most $sm'$ vertices, and the new graph has average part size at most $2m'$. In particular, this means that if $H'\sqsubseteq H$ is $a$-partite of order $ab$ then $b\geq m'$ and $ab\leq 2m'(s+1)\leq 2mk$. Clearly, replacing $H$ with a supergraph preserves the property that $\overline{G}$ is $H$-free. Set \[M=(10^{4}k\log k) \times \frac{2m}{m'}\quad\text{ and }\quad \beta=M/(200)\geq 100.\] Then, as $1 \leq s \leq k-1$, we get \[\abs{G}\geq 8\times10^{4}smk\log^2 k\geq M(2m'(s+1))\log (s+1).\] Further, note that $M \geq 4(s+1)$ and $\beta \geq 10 \log k$. We use Lemma~\ref{lem:expansion} with $(M, \beta, \Delta, m, k)_{\ref*{lem:expansion}} = (M, \beta, 10, m', s+1)$ to pass to a subgraph $G'$ where $\overline{G'}$ is $H'$-free for some $H'\sqsubseteq H$ and which $(10,\beta,m'',s')$-expands into $V(G')$, where $\abs{H'}=s'm''$ and $\chi(H')=s'$ with $2 \leq s' \leq s+1$, such that \begin{equation}\label{eq:G'}Mm''s'\log s' - m''\leq\abs{G'}\leq Mm''s'\log s'.\end{equation} In particular, since $m''\geq m'$, we have that $G'$ $(10,\beta,m',s')$-expands. As $\overline{G'}$ is $H'$-free, $G'$ has no independent set of order $\abs{H'} = s'm''$, and it follows that $\chi(G')\geq\abs{G'}/(s'm'')>2$. Note that as $m \leq k^{22}$, from \eqref{eq:G'} we have that $\abs{G'} \leq M (2m'(s+1)) \log k \leq 10^{5}mk^2 \log^2 k \leq 10^{5}k^{26}$. Thus, by Lemma~\ref{lem:cycle} we may find an odd cycle $C_1$ of length at most $88\log k \log\abs{G'}\leq 10^4\log^2 k$, such that we retain $(7,\beta,m',s')$-expansion into $V(G')\setminus V(C_1)$. Choosing almost-antipodal points $v_1,w_1$ on $C_1$ (i.e.\ $\operatorname{dist}_{C_1}(v_1,w_1) = (\abs{C_1} - 1)/2$), we use Lemma~\ref{lem:wings} to find sets $A_1$ and $B_1$ of size $\beta m'/2$ with $A_1\cap B_1=\varnothing$, $A_1\cap V(C_1)=\{v_1\}$ and $B_1\cap V(C_1)=\{w_1\}$, with each vertex in $A_1$ (resp. $B_1$) having a path in $A_1$ to $v_1$ (resp. in $B_1$ to $w_1$) of length at most $\log_{7}(\beta m'/2)<40\log^2 k$. Next we set $X=A_1\cup V(C_1)\cup B_1$ and $Y=V(C_1)$. Note that, by \eqref{eq:G'} and that $\abs{X} \leq \beta m' + 10^4 \log^2 k$, we have $\abs{G' - X}\geq \frac{1}{2}Mm''s'\log s'$. We apply Lemma~\ref{lem:expansion} with $(M, \beta, \Delta, m, k)_{\ref*{lem:expansion}} = (M/2, \beta, 10, m'', s')$ again to find a subgraph $G''$ with $V(G'')\subset V(G')\setminus X$ which $(10,\beta, m''',s'')$-expands and has order at most $\frac{1}{2}Mm'''s''\log s''$, for some $2 \leq s''\leq s'$ and $m'''\geq m'$. Within $G''$ we find another similar structure $A_2\cup C_2\cup B_2$, and since $G'$ $(10, \beta, m', s')$-expands we can use Lemma~\ref{lem:short-path}, with parameters $(\Delta, \beta, d, k, A, B, C, W)_{\ref*{lem:short-path}} = (10, \beta, m', s', A_2, B_1, V(C_1 \cup C_2), G')$ to find a path $Q_1$ between $A_2$ and $B_1$ that avoids $V(C_1\cup C_2)$ and is of length at most $5000\log^2 k$. We may assume $Q_1$ is disjoint from $A_1\cup B_2$ since otherwise we may find a shorter path between (say) $A_1$ and $A_2$, then relabel appropriately. We then extend $Q_1$ by paths within $A_2$ to $v_2$ and within $B_1$ to $w_1$ to obtain a $w_1$-$v_2$ path $P_1$ of length at most $10^4\log^2 k$. Now we update $X$ to $A_1\cup V(C_1\cup P_1\cup C_2)\cup B_2$ (note that the unused parts of $A_2\cup B_1$ are released) and $Y$ to $V(C_1\cup P_1\cup C_2)$. As before, we choose $G'''$ with $V(G''')\subset V(G')\setminus X$ such that $G'''$ $(10,\beta, m^{(4)},s''')$-expands for some $2 \leq s'''\leq s'$ and $m^{(4)}\geq m'$ and continue as before, updating $X$ and $Y$. Observe that we can continue this process as long as $\abs{G' - X} \geq \frac{1}{2}Mm''s'\log s'$. Thus, since $\abs{G'} \geq Mm''s'\log s' - m''$, we can continue whenever $\abs{X} \leq \frac{1}{4}Mm''s'\log s'$. Since $mk + \beta m' \leq \frac{1}{4}Mm''s'\log s'$, we can continue the process until $\abs{Y}>mk$, as $\abs{A_i}, \abs{B_i} \leq \beta m'/2$ for all $i$. Moreover, at each stage of this process $\abs{Y}$ increases by at most $2\times 10^4\log^2 k$. Thus as soon as $\abs{Y}>mk$ we can stop the process and ensure $\abs{Y} \leq mk + (2\times 10^4)\log^2 k$ at this point. Since $mk + 2\times 10^4\log^2 k \leq \beta m'/20\log (s+1) \leq \beta m'/20\log s'$, we can then apply Lemma~\ref{lem:short-path} with $(\Delta, \beta, d, k, A, B, C, W)_{\ref*{lem:short-path}} = (10, \beta, m', s', A_1, B_r, Y, G')$ to find a path of length at most $10^4\log^2 k$ connecting the two remaining large sets $A_1,B_r$ avoiding $Y$, which closes up the structure into an $r$-adjuster; since each cycle $C_i$ and each path $P_i$ built in this whole process has length at most $10^4\log^2 k$ we have $r \geq \frac{mk}{2 \times 10^4\log^2 k}$, and the adjuster contains all of $Y$, giving the required bounds. \end{proof} We will also need some long cycles to extend the adjusters we construct. \begin{lemma}\label{lem:long-cycle} Fix $k \geq 2$, $10^6 \leq m\leq k^{22}$ and $n\geq 8\times10^{5}mk\log^2 k$. Let $H$ be a complete $(s+1)$-partite graph of order $m'(s+1)$, where $m' \in \mathbb{R}$, $1\leq s\leq k-1$ and $\abs{H}\leq mk$. Let $G$ be a graph with $\overline{G}$ being $H$-free and $\abs{G}\geq sn/10$. Then $G$ contains a cycle of length between $n/((8\times 10^5)\log^2 k)$ and $n/((8\times 10^5)\log^2 k) + 2mk$. \end{lemma} \begin{proof} We proceed as in Lemma~\ref{lem:find-gadget}, with the following differences. First, set \[M=\frac{n}{10m'\log(s+1)}\geq\frac{n}{10m'\log k}\quad\text{and}\quad\beta=M/200.\] Secondly, at each stage rather than finding a cycle with two large sets, each of size $\beta m'/2$, attached we find a single edge with two large sets. This ensures that after each joining step we have a path with two large sets attached. Again, we continue until the total size of the path exceeds $\beta m'/40\log k$, and then join the two large sets, using Lemma~\ref{lem:short-path}, to obtain a desired long cycle. If this cycle is longer than our desired upper bound, we truncate it using that $\overline{G}$ is $H$-free (choose $mk$ vertices that are non-adjacent on the cycle). \end{proof} In order to complete the proof of Lemma~\ref{lem:sn}, we need to be able to join adjusters together while simultaneously retaining most of the flexibility of each adjuster and most of the length. This resembles Lemma~2.19 of \cite{PS20}, but the fact that in our case the length of an adjuster and the number of vertices are only loosely related creates extra difficulties. \begin{lemma}\label{lem:merge-gadgets}Let $F_i$ be an $r_i$-adjuster of length $\ell_i$ for $i\in\{1,2\}$, with $F_1$ and $F_2$ disjoint. Suppose there are $s>16$ vertex-disjoint paths $P_1,\ldots,P_s$ between $F_1$ and $F_2$, each of length at most $t$. Then for some $i\neq j$ there is an $r$-adjuster of length $\ell$ contained in $F_1\cup F_2\cup P_i\cup P_j$, where \[(r_1+r_2)(1-4s^{-1/2})-4\leq r\leq r_1+r_2\] and \[(\ell_1+\ell_2)(1-4s^{-1/2})\leq\ell\leq\ell_1+\ell_2+2t.\] Furthermore, if $r_2=0$ then the adjuster obtained contains a section of length $\ell-\ell_1$ with no short cycles. \end{lemma} \begin{proof}We may assume each $P_i$ is internally disjoint from $F_1\cup F_2$, since otherwise we can replace it by a shorter path with this property. Write $x_i$ for the endpoint of $P_i$ in $F_1$, and $y_i$ for the endpoint in $F_2$. We will progressively reduce the number of paths to focus our attention on at least $s^{1/2}/4$ paths which relate to one another in a useful way. First, we give each path $P_i$ an ordered pair of labels from $\{+,-\}$ as follows. If $x_i$ lies on one of the $r_1$ short cycles of $F_1$, choose the first label to be $+$ if it is on the longer section of that cycle (that is, the longer section between the two vertices of that short cycle that have degree $3$ in $F_1$), and $-$ if it is on the shorter section. If $x_i$ lies on a path between cycles, choose the first label arbitrarily. Choose the second label similarly with respect to $y_i$. Now we may find a set of at least $s/4$ paths with identical label pairs. Note that this ensures there are routes $C_1$ around $F_1$ and $C_2$ around $F_2$ which cover all the $x_i$ and $y_i$ for $P_i$ in this set of paths, with $\abs{C_i}\in[\ell_i-r_i,\ell_i]$. In what follows we consider $C_1$ and $C_2$ to be oriented in a particular direction around $F_1$ and $F_2$, respectively. Renumbering, if necessary, we may assume these paths are $P_1,\ldots,P_{\ceil{s/4}}$, and that $x_1,\ldots,x_{\ceil{s/4}}$ are in order of their appearance in $C_1$. By the Erd\H{o}s--Szekeres Theorem (Theorem \ref{thm:ES}), there is a subset of at least $t\geq\left\lceil\sqrt{s/4}\right\rceil\geq 2$ paths with endvertices $y_i$ either in order of their appearance in $C_2$ or in reverse order. Again, by renumbering and reversing $C_2$ if necessary we may assume these are $P_1,\ldots,P_{t}$ and the $y_i$ appear in reverse order. Let $C'_1$ be the longest route in $F_1$, and associate each vertex $x_i$ for $i\in[t]$ with a vertex $x'_i$, chosen as follows. If $x_i$ is internal to a shorter side of a short cycle, let $x'_i$ be the vertex on the longer side which is the same distance from the start vertex of the cycle (consequently, $x'_i$ is one step further from the end of the cycle than $x_i$). If $x_i$ is on one of the paths or is internal to the longer side of a short cycle, set $x'_i=x_i$. Define $C'_2$ and each $y_i'$ similarly. Now we show that some consecutive pair of paths work. Consider making a new adjuster from $F_1$, $F_2$, $P_i$ and $P_{i+1}$ for $i\in[t]$ (we take subscripts modulo $t$) as follows: starting from $x_i$, traverse $P_i$ to $y_i$, then follow a route around $F_2$ to $y_{i+1}$, including both sections of any short cycle traversed in this route (apart from any short cycle containing $y_i$ or $y_{i+1}$), traverse $P_{i+1}$ to $x_{i+1}$, then follow a route around $F_1$ to $x_i$ similarly. See Figure \ref{fig:gadget-merge} for an example. Write $S_1$ for the edges of $C'_1$ between $x'_i$ and $x'_{i+1}$ and $S_2$ for the edges of $C'_2$ between $y'_{i+1}$ and $y'_{i}$. The short cycles of the new adjuster are those of the original two adjusters except for any which intersect $S_1$ or $S_2$. The longest route through the new adjuster uses all of $C'_1$ except for at most $\abs{S_1} + 1$ edges (the $+1$ added if $x'_{i+1}\neq x_{i+1}$), and all of $C'_2$ except for at most $\abs{S_2} + 1$ edges (the $+1$ added if $y'_{i+1}\neq y_{i+1}$). It also uses all of the two paths $P_i$ and $P_{i+1}$, which contain at least one edge each, so its length is at least $\ell_1+\ell_2-\abs{S_1}-\abs{S_2}$. Note that the sets $S_1\cup S_2$ obtained for different choices of $i\in[t]$ are disjoint. \begin{figure} \caption{Creating a new adjuster using paths $P_i$ and $P_{i+1} \label{fig:gadget-merge} \end{figure} Now we claim that for some pair $P_i,P_{i+1}$ the adjuster constructed has the required properties. Note that, since each path $P_i$ has length at most $t$, the upper bound on length is satisfied for all adjusters constructed in this way, so it suffices to prove the lower bounds on $\ell$ and $r$. Suppose not, then for each $i$ either more than $(r_1+r_2)4s^{-1/2}+4$ short cycles are lost, or $\abs{S_1}+\abs{S_2}>4s^{-1/2}(\ell_1+\ell_2)$. In the former case this means more than $(r_1+r_2)4s^{-1/2}$ short cycles lie entirely in the section between $x_i$ and $x_{i+1}$ or the section between $y_{i+1}$ and $y_i$, since only $4$ can lie partially in these sections. Since these sections are disjoint, the former case occurs for fewer than $s^{1/2}/4$ choices of $i$. Similarly the latter case occurs for fewer than $s^{1/2}/4$ choices of $i$. Since there are at least $s^{1/2}/2$ choices for $i$, some choice gives an adjuster with the required properties. Finally, note that the section of the adjuster obtained consisting of vertices not in $F_1$ is contiguous, and contributes at least $\ell-\ell_1$ to the overall length. If $r_2=0$ this section has no short cycles, as required. \end{proof} We are now ready to prove the main result of this section. \begin{proof}[Proof of Lemma~\ref{lem:sn}] Let $C= 10^{30}$. We will prove both parts of the statement simultaneously, by finding an $r$-adjuster of suitable length for some $r\geq 9mk/8$. If $s\geq 2$ this adjuster can be taken to have length slightly larger than $n$, such that one of the routes has length exactly $n$. However, for $s=1$ this is not possible, since $G$ itself could have order less than $n$. In this case we show that the length of the adjuster can be made significantly larger than $n/2$. We repeatedly apply Lemma~\ref{lem:find-gadget}, with $H_{\ref*{lem:find-gadget}}=H'$ and $G_{\ref*{lem:find-gadget}}=G-X_{i-1}$, where $X_0=\varnothing$ and $X_i=V(F'_{i})$ for $i\geq 1$ is the set of vertices used up from $G$ (see the construction of $F_i'$, for each $i$, below), in order to find $f \leq 4\times 10^4\log^2 k$ adjusters $F_i$ for $i\in[f]$, such that $F_i$ is an $r_i$-adjuster and $r_{\mathrm{total}}:=\sum^{f}_{i=1}r_i\geq 2mk$. Note that, since $k^{22}\geq m\geq C= 10^{30}$ and $k\geq 10\log^4 k$ for any $k\geq 2$, each $F_i$ satisfies \begin{equation}\abs{F_i}\geq mk\geq m_2+4.1\times 10^{18}\log^4k.\label{fi-bound}\end{equation} Also, the total size of all these adjusters (using the fact that every adjuster found has size at most $mk+3\times 10^4\log^4 k\leq 1.1mk$) is at most $5\times 10^4mk\log^2k$. Set $F'_1=F_1$ and $r'_1=r_1$. After finding each $F_i$ (for $i\geq 2$), we merge it with the adjuster $F'_{i-1}$ using Lemma~\ref{lem:merge-gadgets}. There are at least $q=4.1\times 10^{18}\log^4k$ vertex-disjoint paths between the two adjusters, by \eqref{fi-bound} and the hypothesis, and we may choose these to have length at most $\abs{H'}+s\leq mk+k$ since the fact that $\overline G$ is $H'$-free implies that any longer path contains a short-cut. Thus we obtain a merged adjuster $F'_i$, which is an $r'_i$-adjuster with \begin{equation}(r'_{i-1}+r_i)\geq r'_i\geq (r'_{i-1}+r_i)(1-4q^{-1/2}) - 4,\label{merge-power}\end{equation} and \begin{equation}\abs{F'_i}\leq \abs{F'_{i-1}}+\abs{F_i}+2mk+2k.\label{merge-total}\end{equation} It follows from \eqref{merge-total} that for each $i \in [f]$, \[\abs{F'_{i}}\leq \sum_{j\leq i}\abs{F_{j}}+(i-1)(2mk+2k)\leq (5\times 10^4)mk\log^2k+(4\times 10^4)\log^2 k(2mk+2k)\leq n/10,\] and so, setting $X_i=V(F'_i)$, we have $\abs{G-X_{i-1}}\geq (sn - n/10) - n/10 \geq sn/10$. Consequently, we can indeed repeatedly apply Lemma~\ref{lem:find-gadget} with $G_{\ref*{lem:find-gadget}} = G-X_{i-1}$ for all $i \in [f]$. Using \eqref{merge-power}, by induction we have $r'_{i-1}\leq \sum_{j<i}r_j$ for each $i\geq 2$, and so \begin{equation}r'_i\geq r'_{i-1}+r_i-4q^{-1/2}r_{\mathrm{total}} - 4.\label{r-bound}\end{equation} Summing \eqref{r-bound}, we obtain $r'_{f}\geq r_{\mathrm{total}}(1-((16\times 10^4)q^{-1/2}\log^2 k))-4f\geq 3r_{\mathrm{total}}/4$. We repeatedly apply Lemma~\ref{lem:long-cycle} with $H_{\ref*{lem:long-cycle}} = H'$ and $G_{\ref*{lem:long-cycle}} = G - Y_j$, where $Y_1 = V(F_{f}')$ and $Y_j =Y_1\cup V(C_j')$ for $j \geq 2$ is the set of vertices used up from $G$ so far (see the construction of $C_i'$ below), in order to find $c \leq 1.6 \times 10^6\log^2 k$ cycles $C_i$ each of length at least $n/(8\times 10^5\log^2k) \geq m_2 + 4.1 \times 10^{18}\log^4k$ (which we regard as $0$-adjusters) satisfying the following bound on their total length: \begin{itemize}\item if $s\geq 2$ then $3n/2 \geq \sum_{i=1}^c\abs{C_i} \geq 4n/3$; whereas \item if $s=1$ then $3n/4 \geq \sum_{i=1}^c\abs{C_i} \geq 2n/3$. \end{itemize} Set $C_1' = C_1$. After finding each $C_i$ (for $i \geq 2$), we merge it with the cycle $C_{i-1}'$ using Lemma~\ref{lem:merge-gadgets}. As before, there are at least $q = 4.1 \times 10^{18}\log^4 k$ vertex-disjoint paths between the two cycles, by the hypothesis, and we may choose these to have length at most $\abs{H'} + s \leq mk + k$. Thus we obtain a merged cycle $C_i'$ with \begin{equation} (\abs{C_{i-1}'} + \abs{C_i})(1 - 4q^{-1/2})\leq \abs{C_i'} \leq \abs{C_{i-1}'} + \abs{C_i} +2mk + 2k. \end{equation} By induction, and that after each merging of cycles we can truncate the length of the new cycle (just as we did with the paths), we have $\abs{C_{i-1}'} \leq \left(\sum_{j<i}\abs{C_j}\right) + 2mk+2m$ for each $i \geq 2$, and so \begin{equation}\label{eq:C_i'}\abs{C_i'} \geq \abs{C_{i-1}'} + \abs{C_i} - 4q^{-1/2}\left(\left(\sum_{j=1}^{c}\abs{C_j}\right) + 2mk + 2m\right).\end{equation} Summing \eqref{eq:C_i'}, we obtain \[\abs{C_c'} \geq \sum_{j=1}^{c}\abs{C_j}(1 - (6.4 \times 10^6)q^{-1/2}\log^2 k) - ((6.4 \times 10^6)q^{-1/2}\log^2 k)(2mk + 2m),\] hence for $s\geq 2$, $\abs{C_c'} \geq 5n/4$ and for $s=1$, $\abs{C_c'} \geq 0.62n$. Now merge $C_c'$ with $F_{f}'$ using Lemma~\ref{lem:merge-gadgets} to produce our desired $r$-adjuster. One can see that $r \geq (3/4)^2r_{\mathrm{total}}\geq 9mk/8\geq (m+1)k$. Suppose $s\geq 2$. Note that the final use of Lemma \ref{lem:merge-gadgets} merges a adjuster of length less than $n$ with a $0$-adjuster of length at least $5n/4$, and thus our $r$-adjuster of length $\ell>n$ contains a section of length at least $\ell-n$ in which there are no short cycles. Using the fact that $\overline G$ is $H'$-free we can shortcut parts of this section, if necessary, without reducing $r$, until $0\leq\ell-n\leq mk+k$. Now, using the fact that it is an $r$-adjuster for some $r\geq (m+1)k$, we may find a route of length exactly $n$. Finally, if $s=1$ the final adjuster similarly has length $\ell\geq 0.6 n$ and contains a section of length $\ell-0.6n$ with no short cycles. We can shorten this section, if necessary, as before to obtain an $r$-adjuster of length between $0.6n$ and $0.7n$. \end{proof} \section{Stability and exactness}\label{sec:stability} In this section we prove our main result via the following statement. \begin{theorem}\label{thm:main2} There exists a constant $C' > 0$ such that for any complete $k$-partite graph $H$ on $mk$ vertices where $C' \leq m \leq k^{22}$ and $n \geq 10^{60}mk\log^4 k$, we have that $C_n$ is $H$-good, i.e.\ $R(C_n, H) = (\chi(H)-1)(n-1)+\sigma(H)$. \end{theorem} \begin{proof}[Proof of Theorem \ref{thm:main}] Set $k=\chi(H)$ and $m=\abs{H}/k$. By adding edges to $H$, if necessary, without changing $\sigma(H)$, we may assume it is a complete $k$-partite graph for $k=\chi(H)$. If $m>k^{22}$ then the same is true of the size of the largest part, and so Corollary \ref{PS-cor} gives the required bound for some constant $C''$ (and in fact the logarithmic term is not needed). If $C'\leq m\leq k^{22}$ then Theorem \ref{thm:main2} gives the required bound with constant $10^{60}$. Finally, if $m<C'$ then by increasing the size of the largest part we may replace $H$ with a graph $H'$ of order at most $C'\abs{H}$ and $\sigma(H')=\sigma(H)$ which satisfies the conditions of Theorem \ref{thm:main2}, giving that $C_n$ is $H'$-good (and hence $H$-good) for $n\geq 10^{60}C'\abs{H}\log^4\chi(H)$. \end{proof} To prove Theorem \ref{thm:main2}, we first establish the following stability result. \begin{lemma}\label{lem:stability} There exists a constant $C>0$ satisfying the following. Fix $k\geq 2$ and $C\leq m\leq k^{22}$ and $z\geq 0$ and $n\geq 10^{49}km\log^4 k$. Let $H$ be a complete $k$-partite graph with $mk$ vertices, and write $m_1\leq \cdots\leq m_k$ for the sizes of the parts. Define $\hat{H}\sqsubset H$ to be the graph obtained by removing a part of size $m_2$. Suppose $G$ is a $C_n$-free graph with $\abs{G}\geq(k-1)(n-1)+z$ such that $\overline{G}$ is $H$-free. Then at least one of the following holds. \begin{enumerate}[(i)] \item\label{stability-i} There exists $G'\subset G$ such that $\overline{G'}$ is $\hat H$-free and $\abs{G'}\geq (k-2)(n-1)+z$. \item\label{stability-ii} There exist disjoint sets $A_1,\ldots,A_{k-1}$, with no edges between them, such that for each $i$ we have \begin{itemize} \item $\abs{A_i}\geq 0.95n$, \item $\overline{G[A_i]}$ is $K_{m_1,m_2}$-free, and \item within $G[A_i]$, any two disjoint sets of size at least $m_2+4.1\times 10^{18}\log^4 k$ have at least $4.1\times 10^{18}\log^4 k$ disjoint paths between them. \end{itemize} \end{enumerate} \end{lemma} \begin{proof} Let $C$ be a constant given in Lemma~\ref{lem:sn}. Suppose not. Start with $A=V(G)$ and $S=\varnothing$. If there exists a set $X$ of order at most $4.1\times 10^{18}\log^4k$ such that removing $X$ from $G[A]$ increases the number of components of $G[A]$ of order at least $m_2$, remove $X$ from $A$ and add it to $S$. Do this for $k$ iterations or until no such set exists. At the end of the process we have $\abs{S}\leq 4.1\times 10^{18}k\log^4k$. Note that any set $B$ of order at least $m_2$ satisfies $\abs{B\cup N(B)}\geq n$, else $G[V(G)\setminus(B\cup N(B))]$ satisfies (i). In particular, if $B\subset A$ with $\abs{B}\geq m_2$ then \[\abs{B\cup N_{G[A]}(B)}\geq \abs{B\cup N(B)}-\abs{S}\geq n-4.1\times 10^{18}k\log^4 k\geq \abs{H}.\] It follows that the total size of all components of $G[A]$ of size less than $m_2$ is less than $m_2$, since otherwise there is a union of some of these components of size between $m_2$ and $2m_2$, contradicting the fact that sets of this size expand well. It also follows that any other component has order at least $n-4.1\times 10^{18}k\log^4 k$; let these components of $G[A]$ be $A_1,\ldots A_t$, where $t\geq 1$ and \begin{equation}\sum_{i\in[t]}\abs{A_i}\geq\abs{A}-m_2\geq \abs{G}-\abs{S}-m_2\geq(k-1)n-k-4.1\times 10^{18}k\log^4k-m_2. \label{Ai-size}\end{equation} We must have $t\leq k-1$ since otherwise $\overline G$ contains a copy of $H$ obtained by choosing a suitable number of vertices from $A_1,\ldots,A_k$. In particular, we added vertices to $S$ fewer than $k$ times, and so the process described above stopped because no more removals were possible. This will give the required connectivity properties. Indeed, if $B_1,B_2$ are disjoint sets in $A_i$ of size at least $m_2+4.1\times 10^{18}\log^4 k$ then no set $X$ of order $4.1\times 10^{18}\log^4 k$ separates $B_1\setminus X$ from $B_2\setminus X$ within $G[A_i]$ (since both these sets have size at least $m_2$ and by the construction of $A$). Thus, by Menger's theorem there are at least $4.1\times 10^{18}\log^4 k$ disjoint paths between them in $G[A_i]$. For each $i\leq t$, we define $k-t+1$ subgraphs $H^{(1)}_i\sqsubset \cdots \sqsubset H^{(k-t+1)}_i\sqsubset H$ as follows. Set $H_i^{(1)}$ to be an independent set of $m_{k+1-i}$ vertices, and for $2\leq j\leq k-t+1$ set $H_i^{(j)}=K_{m_{k+1-i},m_{k-t},m_{k-t-1},\ldots,m_{k-t-j+2}}$, i.e.\ $H_i^{(j)}$ is a complete $j$-partite subgraph consisting of the $i$th largest class of $H$ together with $j-1$ of the $k-t$ smallest classes taken in decreasing order. For each $i\leq t$, let $s_i\geq 1$ be the largest value such that $\overline{G[A_i]}$ contains $H_i^{(s_i)}$. Suppose that $\sum_{i\in[t]} s_i\geq k$, and consider the graph induced in $\overline{G}$ by the vertices of a copy of $H_i^{(s_i)}$ in $\overline{G[A_i]}$ for each $i$. Taking the largest class from each copy creates a copy of $K_{m_k,\ldots,m_{k-t+1}}$, and the other classes give a complete $(k-t)$-partite graph with $j$th largest part having size at least $m_{k-t+1-j}$ for each $j\in[k-t]$, which contains $K_{m_{k-t},\ldots,m_1}$. Thus $\overline G\supset H$, a contradiction. Consequently we have $\sum_{i\in[t]} s_i\leq k-1$, and in particular $s_i\leq k-t$ for each $i$. By definition of $s_i$, $\overline{G[A_i]}$ is $H^{(s_i+1)}_i$-free for each $i$. If $s_i>1$ we have $\abs{A_i}<s_in-n/10$, since otherwise Lemma~\ref{lem:sn}, taking $H'_{\ref*{lem:sn}}=H_i^{(s_i+1)}$, gives a copy of $C_n$ inside $G[A_i]$. Contrariwise, if $s_i=1$ we have $\abs{A_i}<n+m_{k-t}$ by Corollary \ref{PS-cor38}. In either case, for each $i\in [t]$ we have $\abs{A_i}<s_in+m_{k-t}$. Note that $(t-1)m_{k-t}\leq\abs{H}=mk$. Thus, if for any $i$ we have $s_i>1$ then \[\sum_{j\in [t]}\abs{A_j}<s_in-n/10+\sum_{\substack{j\in[t]\\j\neq i}}(s_in+m_{k-t})\leq (k-1)n-n/10+(t-1)m_{k-t},\] contradicting \eqref{Ai-size}. We may therefore assume each $s_i=1$. Similarly if $\sum_{i\in [t]} s_i<k-1$, and so $t<k-1$, we obtain a contradiction to \eqref{Ai-size}. Thus $t=k-1$, and each of $A_1,\ldots,A_{k-1}$ has size at least $n-4.1\times 10^{18}k\log^4k\geq 0.95n$. If for some $i$ we have $K_{m_1,m_2}\subset\overline{G[A_i]}$, then taking the copy of $K_{m_1,m_2}$ together with $m_k$ vertices from each other part gives a copy of $K_{m_k,\ldots,m_k,m_2,m_1}$ (with $k$ parts in total) in $\overline{G}$, and so $\overline G\supset H$, a contradiction. Together with the connectivity properties heretofore established, this completes the proof. \end{proof} We now proceed from the stability result above to exactness. We need the following result which says that when the complement is $K_{m_1,m_2}$-free we can do much better than Lemma~\ref{lem:merge-gadgets}. \begin{lemma}\label{lem:merge-efficient}Let $F_i$ be an $r_i$-adjuster of length $\ell_i$ for $i\in\{1,2\}$, with $F_1$ and $F_2$ disjoint, and suppose $\overline{G[V(F_i)]}$ is $K_{m_1,m_2}$-free for each $i$, where $m_2\geq m_1$. Suppose further that there are two vertex-disjoint paths $P_1,P_2$ between $F_1$ and $F_2$, each of length at most $t$. Then there is an $r$-adjuster of length $\ell$ all of whose vertices are contained in $F_1\cup F_2\cup P_1\cup P_2$, where $r\geq r_1+r_2-2(m_1+m_2)$ and $\ell_1+\ell_2-2(m_1+m_2)\leq\ell\leq\ell_1+\ell_2+2t$. Furthermore, if $r_2=0$ then the adjuster contains a section of length at least $\ell-\ell_1$ with no short cycles. \end{lemma} \begin{proof}Let $P_i$ have ends $x_i$ in $F_1$ and $y_i$ in $F_2$. We claim that there is a path $Q_1$ of length at least $\ell_1-m_1-m_2-1$ in $F_1$ between $x_1$ and $x_2$. To see this, first note that unless $x_1$ and $x_2$ are on opposite sides of the same short cycle, there is a route in $F_1$ which includes both $x_1$ and $x_2$, and has length at least $\ell_1-2$. If $x_1$ and $x_2$ are at distance at most $m_2$ in this route then by taking the whole route except for a path of length at most $m_2$ between the two vertices, we are done. If not, then the $m_1$ vertices immediately before $x_1$ on the route do not include $x_2$, and the $m_2$ vertices immediately before $x_2$ do not include $x_1$. There is an edge $z_1z_2$ between these two sets of vertices in $G[V(F_1)]$, since $\overline{G[V(F_1)]}$ is $K_{m_1,m_2}$-free, and so there is a path which starts at $x_1$, goes around the route to $z_2$, via the extra edge to $z_1$ and then around the route in the opposite direction to $x_2$. This path misses out at most $m_1+m_2-1$ vertices from the route, so has at least the required length. See Figure \ref{fig:nearly-covering-path}. Finally, if $x_1$ and $x_2$ are on opposite sides of the same short cycle, there is a path around the adjuster from $x_1$ to $x_2$ using the vertices on that short cycle after $x_1$ and before $x_2$, and the longer side of every other short cycle. Similarly there is another path which uses the vertices on that short cycle after $x_2$ and before $x_1$. At least one of these uses at least half of every short cycle, so has length at least $\ell_1$. \begin{figure} \caption{A long $x_1$-$x_2$ path using the extra edge $z_1z_2$ (shown in bold, and highlighted if colour is available). Dashed lines indicate parts of the adjuster not in the relevant route.} \label{fig:nearly-covering-path} \end{figure} Now taking this path together with a corresponding path for $F_2$ and the two paths $P_1,P_2$ between them gives a cycle $C$ of length between $\ell_1+\ell_2-2(m_1+m_2)$ and $\ell_1+\ell_2+2t$. We extend this to an adjuster by including all short cycles from $F_1$ and $F_2$ such that one of the routes round that short cycle is entirely contained in $C$. Since all but at most $m_1+m_2$ edges of a route of $F_i$ are included in the cycle, this means at most $2(m_1+m_2)$ short cycles of $F_1$ or $F_2$ are not included, giving $r\geq r_1+r_2-2(m_1+m_2)$. Finally, note that the section of the adjuster obtained consisting of vertices not in $F_1$ is contiguous, and contributes at least $\ell-\ell_1$ to the overall length. If $r_2=0$ this section has no short cycles, as required. \end{proof} This enables us to effectively deal with the case where two of the $A_i$ (or, more precisely, the expanding subgraphs within them) are connected by two disjoint paths. \begin{proof}[Proof of Theorem~\ref{thm:main2}] It suffices to prove, via induction on $k$, the conclusion for $2C\frac{k-1}{k}\le m\le k^{22}$, where $C$ is at least the constants in Lemma~\ref{lem:sn} and Lemma~\ref{lem:stability}. Since we have $C\leq 2C\frac{k-1}{k}\leq 2C$, we can then take $C'=2C$ to conclude. Suppose it is not true, and take a counterexample, i.e.\ a graph $G$ of order $(k-1)(n-1)+\sigma(H)$ that is $C_n$-free and with $H$-free complement. We apply Lemma~\ref{lem:stability}. If \eqref{stability-i} applies then we have a graph $G'\subset G$ of order $(k-2)(n-1)+\sigma(H)$, with $\hat H$-free complement. Note that by definition of $\hat H$ we have $\sigma(H)=\sigma(\hat H)$. Also, since $m_2\leq \frac{mk}{k-1}$, we have \[m(\hat H)\geq m\frac{k(k-2)}{(k-1)^2}\geq 2C\frac{k-2}{k-1}.\] Thus if $\abs{\hat H}\leq (k-1)^{23}$ we have $G'\supset C_n$ by the induction hypothesis, whereas otherwise we have $G'\supset C_n$ by Corollary~\ref{PS-cor}. Consequently we must have \eqref{stability-ii}. Within each set $A_i$ we apply Lemma \ref{lem:expansion} with $G_{\ref*{lem:expansion}}=G[A_i]$, $H_{\ref*{lem:expansion}}=K_{m_1,m_2}$, $M_{\ref*{lem:expansion}}=\abs{A_i}/((m_1+m_2)\log 2)$ and $\beta_{\ref*{lem:expansion}}=M_{\ref*{lem:expansion}}/100>40$ (note that also $\beta_{\ref*{lem:expansion}} \geq 10 \log k$) to find a $(10,M_{\ref*{lem:expansion}}/100,(m_1+m_2)/2,2)$-expander $H_i$ of order at least $\abs{A_i}-(m_1+m_2)/2\geq \abs{A_i}-m$. Suppose that $H_i$ and $H_j$ are linked (in $G$) by two disjoint paths for some distinct $i,j\in[k-1]$. Recall that $\abs{A_i}\geq 0.95n$. Applying Lemma~\ref{lem:sn} to $G[A_i]$, we find an $r$-adjuster $F_i$ for $r\geq 9mk/8$ with length between $0.6n$ and $0.7n$. Similarly, we may find an adjuster in $A_j$ of length between $0.6n$ and $0.7n$; however, instead of using it as an adjuster we simply take the longest route to find a cycle $C_j$ of that length. Note that the size of the adjuster and cycle ensures that they intersect $H_i$ and $H_j$ respectively in at least $\abs{H_i}+0.6n-\abs{A_i}\geq 0.5n$ vertices. Suppose that the disjoint paths between $H_i$ and $H_j$ have endpoints $x_1$ and $x_2$ in $H_i$ and endpoints $y_1$ and $y_2$ in $H_j$. By Lemma \ref{lem:wings}, we may find disjoint sets $B_1\ni x_1$ and $B_2\ni x_2$ in $V(H_i)$ of size $\abs{A_i}/200$, such that every vertex in $B_1$ is connected to $x_1$ by a path, and likewise for $B_2$ and $x_2$. We may therefore use Lemma \ref{lem:short-path} with $A_{\ref*{lem:short-path}}=V(F_i)\cap V(H_i),B_{\ref*{lem:short-path}}=B_1\cup B_2,C_{\ref*{lem:short-path}}=\varnothing$ to give a path from $V(F_i)$ to $B_1$ or $B_2$, without loss of generality the former. Extend it to a path from $V(F_1)$ to $x_1$ using vertices from $B_1$, and remove vertices from this path, if necessary, to obtain a shortest path $P_1$. This ensures that $\abs{V(P_1)}\leq m_1+m_2+1$ since $\overline{G[A_i]}$ is $K_{m_1,m_2}$-free; note also that $P_1$ is disjoint from $B_2$. Next apply Lemma \ref{lem:short-path} with $A_{\ref*{lem:short-path}}=(V(F_i)\cap V(H_i))\setminus V(P_1),B_{\ref*{lem:short-path}}=B_2,C_{\ref*{lem:short-path}}=V(P_1)$ to give a disjoint $x_2$-$V(F_i)$ path $P_2$. We can similarly find two disjoint paths from $y_1$, $y_2$ to $F_j$ in $H_j$, and the union of these with the existing paths between $H_i$ and $H_j$ give two disjoint paths between the adjuster and the cycle. Since $\overline{G}$ is $H$-free, each of these paths contains a shortest path of length at most $mk+k$. We apply Lemma~\ref{lem:merge-efficient} to merge the adjuster and cycle using these shortest paths, obtaining an adjuster with at least $9mk/8-2(m_1+m_2)\geq mk+k$ short cycles, of total length $\ell$ between $1.2n-2(m_1+m_2)> 1.19n$ and $1.4n+2(mk+k)<1.41n$. Recall that $F_i$ has length at most $0.7n$, so the adjuster obtained from merging $F_i$ and $C_j$ contains a section of length at least $\ell-0.7n\ge 0.49n>\ell-n$ without short cycles (the part corresponding to $C_j$). We may reduce this section until the total length is between $n$ and $n+mk+k$, since $\overline G$ is $H$-free, and then use the reducing power of the adjuster to obtain a cycle of length exactly $n$. Thus we may assume that no pair $H_i,H_j$ is linked by two disjoint paths. It follows that for some index $i$ there is a vertex $v$ which separates $H_i$ from all other $H_j$. Let $A$ be the component of $G-v$ containing $H_i-v$ and let $G'=G[A\cup\{v\}]$. Note that $\overline{G'}$ is $K_{m_1+1,m_2+1}$-free, since otherwise we may choose $m_k$ vertices from $H_j-v$ for each $j\neq i$ together with the vertices of a $K_{m_1,m_2}$ in $\overline{G'-v}$ to give a graph containing $H$ in $\overline{G}$. Recall that \eqref{stability-i} of Lemma~\ref{lem:stability} does not apply. It follows that any set $B\subset A$ of size $m_2$ has $\abs{N_G(B)\cup B}\geq n$, since otherwise $\overline{G-(N_G(B)\cup B)}$ is not $\hat{H}$-free, implying that $\overline G$ is not $H$-free. Since also $N_{G'}(B)\cup B=N_G(B)\cup B$, we have $\abs{N_{G'}(B)\cup B}\geq n$. Thus, for any set $B'\subset V(G')$ of order at least $m_2+1$, we have $\abs{N_{G'}(B')\cup B'}\geq n$ (since $B'$ contains a set of order $m_2$ not including $v$). Now, using Lemma~\ref{lem:long-cycle} with $G_{\ref*{lem:long-cycle}}=H_i$ and $H_{\ref*{lem:long-cycle}}=K_{m_2,m_2}$, we may find a cycle of length at least $10m_2$ in $H_i$. Taking $x,y$ to be two consecutive vertices on this cycle, Lemma \ref{PS-lemma} applies to $G_{\ref*{PS-lemma}}=G'$ with $m_{\ref*{PS-lemma}}=m_2+1$, and we may find a path of order exactly $n$ between $x$ and $y$, giving a copy of $C_n$. \end{proof} \end{document}
\begin{document} \title{A Note on BSDEs with singular driver coefficients} \begin{abstract} In this note we study a class of BSDEs which admits a particular singularity in the driver. More precisely, we assume that the driver is not integrable and degenerates when approaching to the terminal time of the equation. \end{abstract} \section{Introduction} Since the seminal works of Bismut \cite{mjarBismut_78} and of Pardoux and Peng \cite{mjarPardoux_Peng90}, a lot of attention has been given to the study of Backward Stochastic Differential Equations (BSDEs) as this object naturally arises in stochastic control problems and was found to be an ad hoc tool for many financial applications as illustrated in the famous guideline paper \cite{mjarElKaroui_Peng_Quenez}. Recall that a BSDE takes the following form: $$ Y_t=Y_T -\int_t^T f(s,Y_s,Z_s) ds -\int_t^T Z_s dW_s, \quad t\in [0,T],$$ where $W$ is a multi-dimensional Brownian motion. The historical natural assumption for providing existence and uniqueness (in the appropriate spaces) is to assume the driver $f$ to be Lipschitz plus some integrability conditions on the terminal condition. However, in applications one may deal with drivers which are not Lipschitz continuous, and which exhibit e.g. a quadratic growth in $z$ (in the context of incomplete markets in Finance), or only some monotonicity in the $y$-variable. One way of relaxing the Lipschitz growth condition in $y$ is the so-called stochastic Lipschitz assumption which basically consists in replacing the usual Lipschitz constant by a stochastic process satisfying appropriate integrability conditions. As noted in Section 2.1.2 "Pathology" in \cite{mjarElKaroui_redbook}, even in the stochastic linear framework, one has to be very careful when relaxing the integrability conditions on the driver of the equation. As an illustration, consider the following example presented in \cite{mjarElKaroui_redbook} (cf. \cite[(2.9)]{mjarElKaroui_redbook}): \begin{equation} \label{mjareq:EKred} Y_t =0+\int_t^T [r Y_s +\sigma Z_s + \gamma Y_s (e^{\gamma (T-s)}-1)^{-1}] ds +\int_t^T Z_s dW_s, \quad t\in [0,T], \end{equation} where $W$ is a one-dimensional Brownian motion, $r, \sigma, \gamma>0$ and $T$ is a fixed positive real number. It is proved in \cite{mjarElKaroui_redbook} that the BSDE \eqref{mjareq:EKred} has an infinite number of solutions. Note that here the driver is not Lipschitz continuous in $y$ due to the exploding term $(e^{\gamma (T-t)}-1)^{-1}$ as $t$ goes to $T$, and completely escapes the existing results of the literature.\\\\ \noindent The aim of this note is to elaborate on the pathology mentioned in \cite{mjarElKaroui_redbook} and to try to understand better what kind of behavior can appear as soon as the usual integrability conditions are relaxed. In light of Example \eqref{mjareq:EKred}, multiple solutions is one of the behaviour which can be observed. However, is it the only type of problem that can occur ? For instance is it clear that existence is guaranteed? This note is an attempt in this direction and is motivated by the work in preparation \cite{mjarJNR} where equations with this specific pathology appear naturally in the financial application under interest in \cite{mjarJNR}.\\\\ \noindent We proceed as follows. First we make precise the context of our study and we explain what is the notion of solution we use for dealing with non-integrable drivers. Then we deal with the particular case of affine equations in Section \ref{mjarsection:affine}. These equations already allow us to present several type of pathologic behaviour. We then study in Section \ref{mjarsection:nonlinear} a class of non-linear drivers which will be of interest for a specific financial application presented in \cite{mjarJNR}. In particular, in our main result Theorem \ref{mjarth:expo2} we provide an existence and uniqueness result under a monotonicty assumption on the mapping $f$ in \eqref{mjareq:proto} defined below. \section{Preliminaries} In this note $T$ denotes a fixed positive real number and $d$ a given positive integer. We set $(W_t)_{t\in [0,T]}:=(W_t^1,\ldots,W_t^d)_{t\in [0,T]}$ a $d$-dimensional standard Brownian motion defined on a filtered probability space $(\Omega,\mathcal{F}, \mathbb{F}:=(\mathcal{F}_t)_{t\in [0,T]},\mathbb{P})$ where $\mathbb{F}$ denotes the natural filtration of $W$ (completed and right-continuous) and $\mathcal{F}=\mathcal{F}_T$. Throughout this paper "$\mathbb{F}$-predictable" (rep. $\mathbb{F}$-adapted) processes will be referred to predictable (resp. adapted) processes. For later use we set for $p\geq 1$: $$ \mathbb{S}^{p}:=\left\{(Y_t)_{t\in [0,T]} \textrm{ continuous adapted one dimensional process }, \, \mathbb{E}\left[\sup_{t\in [0,T]} |Y_t|^p\right]<+\infty \right\}, $$ $$ \mathbb{H}^{p}(\mathbb{R}^m):=\left\{(Z_t)_{t\in [0,T]} \textrm{ predictable } m\textrm{-dimensional process }, \, \mathbb{E}\left[\left(\int_0^T \|Z_t\|^2 dt\right)^{p/2}\right]<+\infty \right\},$$ where $\|\cdot\|$ denotes the Euclidian norm on $\mathbb{R}^m$ ($m\geq 1$). For any element $Z$ of $\mathbb{H}^1(\mathbb{R}^d)$, we set $\int_0^\cdot Z_s dW_s:=\sum_{i=1}^d \int_0^\cdot Z_s^i dW_s^i$. We also set $L^p:=L^p(\Omega,\mathcal{F}_T,\mathbb{P})$.\\\\ \noindent Let $\lambda:(\lambda_t)_{t\in [0,T]}$ be a one-dimensional non-negative predictable process. For convenience we set $\Lambda_t:=\int_0^t \lambda_s ds, \; t\in [0,T].$ We make the following \\\\ \textbf{Standing assumption on $\lambda$}: $$ \Lambda_t<+\infty, \; \forall t<T, \; \textrm{ and } \Lambda_T=+\infty, \; \mathbb{P}-a.s.$$ The typical example we have in mind is a coefficient $\lambda$ of the form $\lambda_t:=(e^{\gamma (T-t)}-1)^{-1}$ as in the introducing example \eqref{mjareq:EKred}, or when $\lambda$ is the intensity process related to a prescribed random time $\tau$ in the context of enlargement of filtration as presented in \cite{mjarJNR}. In this note, we aim in studying BSDEs of the form: \begin{equation} \label{mjareq:proto} Y_t=A-\int_t^T [\varphi_s + \lambda_s f(Y_s)] ds -\int_t^T Z_s dW_s, \quad t\in [0,T], \end{equation} where $A$ is a regular enough $\mathcal{F}_T$-measurable random variable, $f:\mathbb{R} \to \mathbb{R}$ is a deterministic map and $\varphi$ is a predictable processes with some integrability conditions to be specified. Before going further, we would like to stress that in contradistinction to the classical case where $\lambda$ is bounded (and $A$ and $\varphi$ are square-integrable), the space $\mathbb{S}^{2}\times \mathbb{H}^{2}(\mathbb{R}^d)$ is no more the natural space for solutions of our BSDEs. For instance if $f(x):=x$, the fact that $(Y,Z)$ belongs to $\mathbb{S}^{2}\times \mathbb{H}^{2}(\mathbb{R}^d)$ does not guarantee that $$ \mathbb{E}\left[ \int_0^T |\lambda_s Y_s|^p ds\right]<+\infty$$ for some $p\geq 1$ (which would be immediately satisfied with $p=2$ if $\lambda$ were bounded) leading to a possible definition problem for the term $\int_0^t \lambda_s Y_s ds$ in equation \eqref{mjareq:proto}. For this reason we make very precise the notion of solution in our context. \begin{definition}[Solution] \label{mjarsolution} Let $A$ be an element of $L^1$ and $f:\Omega\times [0,T] \times \mathbb{R} \times \mathbb{R}^d \to \mathbb{R}$ such that for any $(y,z)$ in $\mathbb{R}\times \mathbb{R}^d$ the stochastic process $(t,\omega) \mapsto f(t,y,z)$ (where as usual we omit the $\omega$-variable in the expression of $f$) is progressively measurable. We say that a pair of predictable processes $(Y,Z)$ with values in $\mathbb{R}\times\mathbb{R}^d$ is a solution to the BSDE \begin{equation} \label{mjareq:BSDEtheo1} Y_t=A-\int_t^T f(s,Y_s,Z_s) ds -\int_t^T Z_s dW_s, \quad t\in [0,T], \end{equation} if \begin{equation} \label{mjareq:BSDEtheo2} \mathbb{E}\left[\int_0^T |f(t,Y_t,Z_t)| dt + \left(\int_0^T \|Z_t\|^2 dt\right)^{1/2}\right]<+\infty, \end{equation} and Relation \eqref{mjareq:BSDEtheo1} is satisfied for any $t$ in $[0,T]$, $\mathbb{P}$-a.s. \end{definition} \begin{remark} \label{mjarrk:D} This notion of solution is related to the theory of $L^1$-solution (see e.g. \cite[Definition 2.1]{mjarBriandDelyonHuPardouxStoica} or \cite{mjarConfortolaFuhrmanJacod,mjarmjarConfortolaFuhrmanJacod_multijumps}) where in Relation \eqref{mjareq:BSDEtheo2} the expectation is replaced by a $\mathbb{P}$-a.s. criterion. The fact that $Z$ is an element of $\mathbb{H}^1(\mathbb{R}^d)$ implies that the martingale $\int_0^\cdot Z_s dW_s$ is uniformly integrable. Combining this property with the $(\Omega \times [0,T],\mathbb{P}\otimes dt)$-integrability of $f(\cdot,Y,Z)$, it immediately follows that the solution process $Y$ is of class (D) (which then finds similarities with the notion of solution used in \cite{mjarBriandDelyonHuPardouxStoica}). \end{remark} \begin{remark} We would like to stress that even in the case where the terminal condition $A$ is in $L^2$ we do not require $Y$ to be an element of $\mathbb{S}^2$. This fact bears some similarities with the papers \cite{mjarConfortolaFuhrmanJacod,mjarmjarConfortolaFuhrmanJacod_multijumps} and with \cite[Section 6]{mjarBriandDelyonHuPardouxStoica}. \end{remark} \begin{remark}[Classical $L^2$ setting] If $f$ is uniformly (in time) Lipschitz in $(y,z)$ and if $\mathbb{E}\left[|A|^2+\int_0^T |f(s,0,0)|^2 ds\right]<+\infty$, then the fact that there exists $(Y,Z)$ in $\mathbb{S}^{2}\times \mathbb{H}^{2}(\mathbb{R}^d)$ satisfying \eqref{mjareq:BSDEtheo1} implies that the process $f(\cdot,Y_\cdot,Z_\cdot)$ is in $\mathbb{H}^2(\mathbb{R}^d)$ and thus Relation \eqref{mjareq:BSDEtheo2} is satisfied. \end{remark} Another important issue in our context is uniqueness. The uniqueness for the $Z$ component will be understood in the $\mathbb{H}^1(\mathbb{R}^d)$ sense. Concerning the $Y$ component, since we do not impose $Y$ to belong to $\mathbb{S}^1$ we will say that $Y^1=Y^2$ if the processes are indistinguishable (by definition of a solution, both processes are continuous, and hence uniqueness boils down to require $Y^1$ to be a modification of $Y^2$). This definition for uniqueness in our very special setting coincides with the notion of uniqueness with respect to a particular norm. More precisely, according to Remark \ref{mjarrk:D} a solution process $Y$ is of class (D). This space can be naturally equipped with the norm $\|\cdot \|_{(D)}$ defined as\footnote{This norm is referred as $\|\cdot\|_1$ in \cite[Definition VI.20]{mjarDellacherieMeyer_2}, we do not use this notation here to avoid any confusion.}: $$ \|X\|_{(D)}:=\sup_{\tau \in \cal T} \mathbb{E}[|X_\tau|], \quad X \textrm{ of class } (D),$$ where $\mathcal{T}$ denotes the set of stopping time smaller or equal to $T$. By \cite[Theorem IV.86]{mjarDellacherieMeyer_1}, uniqueness with respect to the norm $\|\cdot \|_{(D)}$ is equivalent to indistinguishability. \\\\ From now on, by solution to a BSDE we mean a solution in the sense of Definition \ref{mjarsolution}. For any pair of ($\mathcal{F}_T$-measurable) random variables $(A,B)$, we write $A\not\equiv B$ if $\mathbb{P}[A \neq B]>0$. Similarly, $A=B$, $\mathbb{P}$-a.s. will be denoted as $A \equiv B$. Throughout this paper $C$ will denote a generic constant which can differ from line to line. \section{Affine equations with exploding coefficients} \label{mjarsection:affine} As the reader will figure out later, it seems pretty complicated to define a general theory since many situations (non-existence, non-uniqueness) can be found under our assumption on $\lambda$ for BSDEs of the form \eqref{mjareq:proto}. These very different behaviours can be clearly illustrated by studying affine equations, that is when $f$ in \eqref{mjareq:proto} stands for the identity (or minus the identity). In some sense, our results find immediate counterparts in the deterministic realm while considering the corresponding ODEs when all the coefficients of the equation are deterministic. However, for this latter case, techniques of time-reversion can be employed to provide immediate results which unfortunately can not be applied in the stochastic framework due to the measurability feature of the solution to a BSDE calling for different techniques.\\\\ \noindent In this section, we consider stochastic affine BSDEs of one of the following forms: \begin{equation} \label{mjareq:l1} Y_t=A- \int_t^T (\varphi_s -\lambda_s Y_s) ds -\int_t^T Z_s dW_s; \quad t\in [0,T], \end{equation} \begin{equation} \label{mjareq:l2} Y_t=A- \int_t^T (\varphi_s +\lambda_s Y_s) ds -\int_t^T Z_s dW_s; \quad t\in [0,T], \end{equation} \noindent We start with Equation \eqref{mjareq:l1}. \begin{prop} \label{mjarprop:aff-} Let $A$ be in $L^1$ and $\varphi:=(\varphi_t)_{t\in [0,T]}$ be an element of $\mathbb{H}^1(\mathbb{R})$. The Brownian BSDE \begin{equation} \label{mjareq:aff-} dY_t= (\varphi_t-\lambda_t Y_t) dt + Z_t dW_t; \quad Y_T=A. \end{equation} admits no solution if $A\not\equiv 0$. If $A\equiv 0$, the BSDE \eqref{mjareq:aff-} may admit infinitely many solutions. \end{prop} \begin{proof} \textbf{Step 1: non-existence of solution if $A\not\equiv 0$\\\\} Let $(Y,Z)$ be a solution to \eqref{mjareq:aff-}. Assume there exists a set $\mathcal{A}$ in $\mathcal{F}_T$ such that $A>0$ on $\mathcal{A}$. By definition of a solution, it holds that \begin{equation} \label{mjareq:tempaff} \int_0^T |\lambda_s Y_s| ds <\infty, \quad \mathbb{P}\textrm{-a.s.} \end{equation} since $$\int_0^T |\lambda_s Y_s| ds \leq \int_0^T |\varphi_s - \lambda_s Y_s| ds+\int_0^T |\varphi_s| ds.$$ For $\omega$ in $\cal A$, let $t_0(\omega):=\sup \{t\in [0,T], \; Y_t(\omega) <A/2\}$. By continuity of $Y$ and the fact that $Y_T=A$, for $\mathbb{P}$-almost all $\omega$ in $\mathcal{A}$, $t_0(\omega)<T$ and $Y_t(\omega) \textbf{1}_{[t_0(\omega),T]}(t) \geq A/2$. Note that $t_0$ is not a stopping time but only a $\mathcal{F}_T$-measurable random variable. As a consequence, on $\cal A$, it holds that $$ \int_0^T | \lambda_s Y_s| ds \geq \int_{t_0}^T \lambda_s Y_s ds \geq A/2 \underset{=+\infty}{\underbrace{\int_{t_0}^T \lambda_s ds}},$$ which contradicts \eqref{mjareq:tempaff}. As a consequence, $A\leq 0$, $\mathbb{P}$-a.s.. Similarly, one proves that $A\geq 0$, $\mathbb{P}$-a.s..\\\\ \textbf{Step 2: Multiplicity of solutions if $A \equiv 0$\\\\} If $A\equiv 0$, we will provide examples of non-uniqueness of solution. Remark that if $(\mathcal{Y}, \mathcal{Z})$ is a particular solution to the BSDE and that $(Y,Z)$ is a solution to the (fundamental) BSDE: \begin{equation} \label{mjareq:fond-} dY_t =-\lambda_t Y_t dt + Z_t dW_t, \quad Y_T=0, \end{equation} then as for ODEs, the sum of any of these fundamental solutions and $\mathcal{Y}$ is a solution to \eqref{mjareq:aff-} (together with the sum of the associated $Z$ processes). In addition, Equation \eqref{mjareq:fond-} admits an infinite number of solutions (like $Y_t=Y_0 e^{-\Lambda_t}$ and $Z\equiv 0$ which is an adapted continuous solution to the BSDE satisfying $\mathbb{E}\left[\int_0^T \lambda_s |Y_s| ds\right]=|Y_0|$ for any chosen real number $Y_0$). An example of particular solution can be given by the process $\mathcal{Y}_t:=- \mathbb{E}\left[\int_t^{T} e^{\int_t^s \lambda_u du} \varphi_s ds\vert \mathcal{F}_t \right]$ if it is well-defined, such that Relation \eqref{mjareq:BSDEtheo2} is satisfied. In that case, the existence of $\mathbb{E}\left[\int_t^T \varphi_s e^{\Lambda_s } ds \vert \mathcal{F}_t\right]$ entails that it converges to $0$ as $t$ goes to $T$, and hence that $\mathcal{Y}_T=0$. One can check that $\mathcal{Y}$ together with the process $\mathcal{Z}:=\tilde Z e^{-\Lambda}$ is solution to \eqref{mjareq:aff-}, where $\tilde Z$ is such that $ \mathbb{E} \left[\int_0^T \varphi_s e^{\Lambda_s} ds \vert \mathcal{F}_t\right] = \mathbb{E}\left[ \int_0^T \varphi_s e^{\Lambda_s} ds\right] -\int_0^t \tilde Z_s dW_s$ ($t\in [0,T]$). We conclude the proof with an example: set $\varphi_t:=e^{-\Lambda_t}$. With this choice, the process $\mathcal{Y}$ satisfies all the requirements above providing infinitely many solutions to \eqref{mjareq:aff-}. \end{proof} Note that in the previous proof the non-existence when $A\not\equiv 0$ relies on the assumption that $\int_0^T |\varphi_s| ds<+\infty, \;\mathbb{P}-a.s.$ If the latter is not satisfied, one may find existence of solutions for $A \not\equiv 0$ as the following proposition illustrates in the deterministic case. \begin{prop} \label{mjarprop:ODE} \label{mjarprop:affdet-} Let $A$ be a given constant and $\varphi:=(\varphi_t)_{t\in [0,T]}$ be a deterministic map. We assume that $\lambda$ is a deterministic function such that $\Lambda_t=\int_0^t \lambda _sds<+\infty$, for $t<T$, and $\int_0^T \lambda _sds=+\infty$. Then \begin{itemize} \item[(i)] If $e^{-\Lambda_t} \int_0^t e^{\Lambda _s}\varphi _s ds$ converges to $C$ when $t$ goes to $T$, then the ODE \begin{equation} \label{mjareq:affdet-} dY_t= (\varphi_t-\lambda_t Y_t) dt ; \quad Y_T=A. \end{equation} admits no solution if $A\neq C$. If $A=C$, the ODE \eqref{mjareq:affdet-} admits infinitely many solutions given by $Y_t=e^{-\Lambda_t} \left(Y_0+ \int_0^t e^{\Lambda _s}\varphi _s ds\right)$ provided that $\int_0^T |\varphi_t - \lambda_t Y_t| dt<\infty$. \item[(ii)] If $e^{-\Lambda_t} \int_0^t e^{\Lambda _s}\varphi _s ds$ does not converge, the ODE \eqref{mjareq:affdet-} has no solution. \end{itemize} \end{prop} \begin{remark} \label{mjarrk:34} Note that the assumption in (i) of Proposition \ref{mjarprop:affdet-} when $C=0$, can be met only if $\int_0^T |\varphi_s| ds=+\infty$. Indeed, assume that $\int_0^T |\varphi_s| ds<\infty$. Let $\varepsilon>0$ and $t<T$. We have: \begin{align*} e^{-\Lambda_t} \left| \int_0^t e^{\Lambda_s} \varphi_s ds \right|&\leq e^{-\Lambda_t} \int_0^{t-\varepsilon} e^{\Lambda_s} |\varphi_s| ds + e^{-\Lambda_t} \int_{t-\varepsilon}^t e^{\Lambda_s} |\varphi_s| ds\\ &\leq e^{-\Lambda_t} e^{\Lambda_{t-\varepsilon}} \int_0^T |\varphi_s| ds + \int_{t-\varepsilon}^t |\varphi_s| ds. \end{align*} Hence as $t$ goes to $T$, we have that $ \lim_{t\to T} e^{-\Lambda_t} \left|\int_0^t e^{\Lambda_s} \varphi_s ds\right| \leq \int_{T-\varepsilon}^T |\varphi_s| ds,$ and hence $$ \lim_{t\to T} e^{-\Lambda_t} \left|\int_0^t e^{\Lambda_s} \varphi_s ds\right| =0,$$ which contradicts the assumption of (i). \end{remark} \begin{remark} Since $\lambda$ is unbounded, assuming $A, \lambda$ and $\varphi$ to be deterministic in Equation \eqref{mjareq:l1} does not lead to deterministic solutions (and so differs from the ODE framework of Proposition \ref{mjarprop:ODE}) as the following example illustrates. Assume $A\equiv 0$, $\varphi\equiv 0$ and $\lambda$ is a deterministic mapping. Then for any element $\beta:=(\beta_t)_{t\in [0,T]}$ in $\mathbb{H}^1(\mathbb{R}^d)$, the pair of adapted processes $(Y,Z)$ defined as: $$ Y_t:=Y_0 e^{-\Lambda_t} + e^{-\Lambda_t} \int_0^t \beta_s dW_s, \quad Z_t:=e^{-\Lambda_t} \beta_t,\quad t\in [0,T]$$ is a solution to \eqref{mjareq:l1}. This provides in turn a generalization of the fundamental solution to Equation \eqref{mjareq:fond-}. \end{remark} We continue with the BSDE: $$ dY_t= (\varphi_t+\lambda_t Y_t) dt + Z_t dW_t; \quad Y_T=A.$$ \begin{prop} \label{mjarprop:affine1} Let $A$ be in $L^1$ and $\varphi:=(\varphi_t)_{t\in[0,T]}$ be a bounded predictable process. The Brownian BSDE \begin{equation} \label{mjareq:affine1} dY_t= (\varphi_t + \lambda_t Y_t) dt + Z_t dW_t; \quad Y_T=A. \end{equation} admits no solution unless $A\equiv 0$. If $A \equiv 0$, then the BSDE admits a unique solution. \end{prop} \begin{proof} Let $(Y,Z)$ be a solution and set $\tilde{Y}:=Y e^{-\Lambda}-\int_0^\cdot e^{-\Lambda_s} \varphi_s ds$. We have that $$d\tilde{Y}_t = e^{-\Lambda_t} Z_t dW_t, \; \textrm{ and } \tilde{Y}_T=-\int_0^T e^{-\Lambda_s} \varphi_s ds. $$ Hence $\tilde{Y}$ is a $L^1$-martingale and $$ \tilde{Y}_t =-\mathbb{E}\left[\int_0^T e^{-\Lambda_s} \varphi_s ds \vert \mathcal{F}_t\right], \quad t\in [0,T],$$ leading to \begin{equation} \label{mjareq:Yfacil} Y_t=-\mathbb{E}\left[\int_t^T e^{-\int_t^s \lambda_u du} \varphi_s ds \vert \mathcal{F}_t\right], \quad t\in [0,T]. \end{equation} In particular, $Y_T=0$. Indeed, since $\varphi$ is bounded $$e^{ \Lambda_t}\left\vert \int_t^T e^{-\Lambda_s} \varphi_s ds\right\vert \leq e^{ \Lambda_t} e^{-\Lambda_t} \int_t^T |\varphi_s| ds\leq \|\varphi\|_\infty (T-t).$$ This proves that there is no solution to the equation unless $A\equiv 0$. We now assume that $A \equiv 0$. In that case, we prove that the process given by \eqref{mjareq:Yfacil} together with a suitable process $Z$ is a solution to the BSDE. We begin with the integrability condition $$ \mathbb{E}\left[\int_0^T \vert \lambda_s Y_s \vert ds\right] <+\infty.$$ We have \begin{align*} \mathbb{E}\left[\int_0^T |\lambda_s Y_s| ds\right]&=\mathbb{E}\left[\int_0^T \left|\lambda_s \mathbb{E}\left[\int_s^T e^{-\int_s^u \lambda_r dr} \varphi_u du \vert \mathcal{F}_s\right] \right| ds\right]\\ &\leq \mathbb{E}\left[\int_0^T \lambda_s e^{\Lambda_s} \int_s^T e^{-\Lambda_u} |\varphi_u| du ds\right]\\ &=\mathbb{E}\left[ \lim_{s \to T, s<T} [e^{\Lambda_s} \int_s^T e^{-\Lambda_u} |\varphi_u| du] - \int_0^T e^{-\Lambda_u} |\varphi_u| du + \int_0^T e^{\Lambda_s} e^{-\Lambda_s} |\varphi_s| ds \right]\\ &<+\infty, \end{align*} where we have used the estimate $e^{-\Lambda_u} \leq e^{-\Lambda_s}$ for $u\geq s$. We now turn to the definition of the $Z$ process in the equation. Consider the $L^2$ martingale $\hat L$ defined as: $$ \hat L_t:=\mathbb{E}\left[ \int_0^T e^{-\Lambda_s } \varphi_s ds \vert \mathcal{F}_t\right], \quad t\in [0,T].$$ By the martingale representation theorem, there exists a process $\hat Z$ in $\mathbb{H}^2(\mathbb{R}^d)$ such that $\hat L_t=\hat L_0+\int_0^t \hat Z_s dW_s$. Now let $Z_t:=-e^{\Lambda_t} \hat Z_t$ and $L_t:=\int_0^t Z_s dW_s$ which is a local martingale. With this definition, it is clear that the pair $(Y,Z)$ has the dynamics: $$ dY_t=(\varphi_t +\lambda_t Y_t) dt +Z_t dW_t, \quad t\in [0,T].$$ Note that a priori $\int_0^\cdot Z_s dW_s$ is only a local martingale. From the equation, there exists a constant $C>0$ such that $$ \mathbb{E}\left[\sup_{t\in [0,T]} \left|\int_0^t Z_s dW_s\right|\right] \leq C \left(2\mathbb{E}[\sup_{t\in [0,T]} |Y_t|] + T\|\varphi\|_{\infty} + \mathbb{E}\left[\int_0^T |\lambda_s Y_s| ds\right]\right) <\infty,$$ since by definition $Y$ is bounded. Hence $Z$ is an element of $\mathbb{H}^1(\mathbb{R}^d)$ by Burkholder-Davis-Gundy's inequality. Finally note that this argument provides uniqueness of the solution since we have characterized any solution via the process $\tilde Y$. \end{proof} \begin{remark} Up to a Girsanov transformation, the previous result can be generalized to equations of the form: $$Y_t=A- \int_t^T (\varphi_s + \sigma_t Z_t-\lambda_s Y_s) ds -\int_t^T Z_s dW_s; \quad t\in [0,T],$$ $$Y_t=A- \int_t^T (\varphi_s + \sigma_t Z_t +\lambda_s Y_s) ds -\int_t^T Z_s dW_s; \quad t\in [0,T],$$ where $\sigma:=(\sigma_t)_{t\in [0,T]}$ is any bounded predictable process. In particular, our results contain the motivating example \eqref{mjareq:proto} from \cite{mjarElKaroui_redbook}. \end{remark} \section{A class of non-linear equations} \label{mjarsection:nonlinear} From the results of Section \ref{mjarsection:affine} it appears clearly that there is no hope to provide a general theory for equations of the form \eqref{mjareq:proto} with a non-integrable coefficient $\lambda$. However, motivated by financial applications, we need to prove that the particular equation \eqref{mjareq:proto} with $f(x):=\alpha^{-1}(1-e^{-\alpha x})$ admits a unique solution if and only if $Y_T=0$. In addition, in order to provide a complete answer to the financial problem associated to this equation, we need to prove that the process $Y$ is bounded and that the martingale $\int_0^\cdot Z_s dW_s$ is a BMO-martingale (whose definition will be recalled below). This section is devoted to the study of a class of equations which generalizes this particular case. We start with a generalization of Proposition \ref{mjarprop:aff-}. \begin{prop} \label{mjarprop:expo1} Let $\varphi$ be an element of $\mathbb{H}^1(\mathbb{R})$ and $A$ in $L^1$. Let $f:\mathbb{R} \to \mathbb{R}$ be an increasing (respectively decreasing) map with $f(0)=0$. The BSDE \begin{equation} \label{mjareq:nonlin1} Y_t=A-\int_t^T [\varphi_s +\lambda_s f(Y_s)] ds -\int_t^T Z_s dW_s, \quad t\in [0,T] \end{equation} admits no solution if $A\not\equiv 0$. \end{prop} \begin{proof} The proof follows the lines of the one of Proposition \ref{mjarprop:aff-} and of Proposition \ref{mjarprop:affine1}. \end{proof} \begin{remark} Note that the previous result does not contradict the conclusion of Proposition \ref{mjarprop:affdet-} in the deterministic setting, since according to Remark \ref{mjarrk:34} the assumption of (i) in Proposition \ref{mjarprop:affdet-} on $\lambda$ is not compatible with the $\mathbb{H}^1(\mathbb{R})$-requirement of Proposition \ref{mjarprop:expo1}. \end{remark} The following lemma will be of interest for proving the main result of this section. \begin{lemma} \label{mjarlemma:expo} Let $f:\mathbb{R} \to \mathbb{R}$ satisfying $f(0)=0$, $f$ is non-decreasing and $f(x)- x\leq 0, \; \forall x\in \mathbb{R}$. Then the equation \begin{equation} \label{mjareq:sansphi} Y_t=0-\int_t^T \lambda_s f(Y_s) ds -\int_t^T Z_s dW_s, \quad t\in [0,T] \end{equation} admits $(0,0)$ as unique solution. \end{lemma} \begin{proof} It is clear that $(0,0)$ solves \eqref{mjareq:sansphi}. Let $(Y,Z)$ be any solution and $\tilde Y:=e^{-\Lambda} Y$. It holds that $\tilde Y_T=0$ and that $$ d\tilde Y_t = \lambda_t e^{-\Lambda_t} (-Y_t+f(Y_t)) dt + e^{-\Lambda_t} Z_t dW_t.$$ Since $ f(x)-x \leq 0$, for all $x\in \mathbb R$, $\tilde{Y}_t \geq 0.$ Hence by definition, $Y_t\geq 0, \; \forall t\in [0,T]$, $\mathbb{P}$-a.s. From Equation \eqref{mjareq:sansphi}, since $Y\geq 0$ we have that $f(Y_t)\geq0$ which implies that $$ Y_t =0-\mathbb{E}\left[\int_t^T \lambda_s f(Y_s) ds \vert \mathcal{F}_t \right] \leq 0, \quad \forall t\in [0,T], \; \mathbb{P}-a.s.$$ As a consequence $Y_t= 0$ for all $t$, $\mathbb{P}$-a.s. which in turn gives $Z = 0$ (in $\mathbb{H}^1(\mathbb{R}^d)$), which concludes the proof. \end{proof} We now consider a class of nonlinear BSDEs. \begin{theorem} \label{mjarth:expo2} Let $\varphi$ be a non-negative bounded predictable process and $f:\mathbb{R} \to \mathbb{R}$ a continuously differentiable map satisfying: $f(0)=0$, $f$ is non-decreasing, there exists $\delta>0$ such that $$f(x)-x\leq 0, \; \forall x\in \mathbb{R} \textrm{ and } f'(x) \geq \delta, \; \forall x \leq 0.$$ Assume in addition that $\mathbb{E}[\Lambda_t]<+\infty, \; \forall t<T$. Then the BSDE \begin{equation} \label{mjareq:expo1} Y_t=A-\int_t^T (\varphi_s +\lambda_s f(Y_s)) ds -\int_t^T Z_s dW_s, \quad t\in [0,T]. \end{equation} admits a solution if and only if $A \equiv 0$. In that case, the solution is unique, $Y$ is bounded and $\int_0^\cdot Z_s dW_s$ is a BMO-martingale, that is: $$ \mathrm{esssup}_{\tau \in \mathcal{T}}\mathbb{E}\left[\int_\tau^T \|Z_s\|^2 ds \vert \mathcal{F}_\tau \right] <\infty,$$ where we recall that $\mathcal{T}$ denotes the set of stopping time smaller or equal to $T$. \end{theorem} \begin{proof} We have seen in Proposition \ref{mjarprop:expo1} that the only possible value for $A$ to admit a solution is $0$. From now on, we assume that $A \equiv 0$.\\\\ \textbf{Step 1: some estimates\\} We start with some estimates on the (possible) solution to the BSDE. Assume that there exists a solution $(Y,Z)$ to Equation \eqref{mjareq:expo1}. Since $\varphi$ is non-negative, $(Y,Z)$ is a sub-solution to the BSDE: $$ \mathcal{Y}_t=0-\int_t^T \lambda_s f(\mathcal{Y}_s) ds-\int_t^T \mathcal{Z}_s dW_s, \quad t\in [0,T],$$ which admits $(0,0)$ as unique solution by Lemma \ref{mjarlemma:expo}. Indeed, this sub-solution property is classical for ($L^2$) Lipschitz BSDE and follows from the comparison theorem. However, here the BSDE \eqref{mjareq:sansphi} is not Lipschitz due to the unboundedness of $\lambda$. In our context the result can be proved explicitly. Since $f(0)=0$ the BSDE \eqref{mjareq:expo1} can be written as\footnote{by: $f(x)-f(y)=(x-y) \int_0^1 f'(y+\theta (x-y)) d\theta$, $(x,y) \in \mathbb{R}^2$} \begin{equation} \label{mjareq:delta} Y_t =0-\int_t^T \tilde{\lambda}_sY_s ds -\int_t^T Z_s dW_s - \int_t^T \varphi_s ds, \quad t\in [0,T], \end{equation} with $\tilde \lambda_t:=\lambda_t \int_0^1 f'(\theta Y_t) d\theta$ which is non-negative. Following the lines of Proposition \ref{mjarprop:affine1} with $\lambda$ replaced by $\tilde\lambda$, and using the non-negativity of $\varphi$, we get that $$ Y_t =-\mathbb{E}\left[\int_t^T e^{-\int_t^s \tilde{\lambda}_u du} \varphi_s ds \vert \mathcal{F}_t\right] \leq 0. $$ From the non-positivity of $Y$, we can deduce that $\tilde \lambda \geq \delta \lambda$ from which we get that $$Y_t\geq-\mathbb{E}\left[\int_t^T e^{-\delta \int_t^s \lambda_u du} \varphi_s ds \vert \mathcal{F}_t\right], \quad t\in [0,T].$$ To summarize, we have proven that \begin{equation} \label{mjareq:expoesti} -(T-t) \|\varphi\|_\infty \leq -\mathbb{E}\left[\int_t^T e^{-\delta\int_t^s \lambda_u du} \varphi_s ds \vert \mathcal{F}_t\right] \leq Y_t \leq 0, \quad \forall t \in [0,T], \; \mathbb{P}-a.s.. \end{equation} We now prove that the process $\int_0^\cdot Z_s dW_s$ is a BMO-martingale. Let $\tau$ be any stopping time such that $\tau\leq T$. By It\^o's formula, we have that $$ |Y_\tau|^2 = 0-2 \int_\tau^T \varphi_s Y_s ds - 2 \int_\tau^T Y_s Z_s dW_s -\int_\tau^T \|Z_s\|^2 ds -2\int_\tau^T Y_s f(Y_s) \lambda_s ds.$$ Since $Y$ is bounded and $Z$ is an element of $\mathbb{H}^{1}(\mathbb{R}^d)$, the stochastic integral process is a true martingale, and since $Y$ is non-positive, the last term of the previous expression is non-positive. As a consequence, it holds that $$ \mathbb{E}\left[\int_\tau^T \|Z_s\|^2 ds \vert \mathcal{F}_\tau \right] \leq -2 \mathbb{E}\left[\int_\tau^T \varphi_s Y_s ds \vert \mathcal{F}_\tau \right] \leq 2 T^2\|\varphi\|_\infty^2,$$ So the claim is proved.\\\\ \textbf{Step 2: existence\\} Now, we prove the existence of a solution for the BSDE \eqref{mjareq:expo1}. For any positive integer $n$, we set $\lambda_\cdot^n:=\lambda_\cdot \wedge n$, $\tilde{f}(x):=f(x) \textbf{1}_{\{[-T \|\varphi\|_\infty, 0]\}}(x) +f(-T \|\varphi\|_\infty) \textbf{1}_{\{(-\infty,-T \|\varphi\|_\infty]\}}(x)$, and $(Y^n,Z^n)$ the unique (classical) solution in $\mathbb{S}^2 \times \mathbb{H}^2(\mathbb{R}^d)$ to the BSDE \begin{equation} \label{mjareq:Yapproxim} Y_t^n = 0-\int_t^T (\varphi_s +\tilde f(Y_s^n) \lambda_s^n) ds -\int_t^T Z_s^n dW_s, \quad t\in [0,T]. \end{equation} It is clear that this equation admits a unique solution since $\tilde f$ is Lipschitz continuous and $\lambda^n$ is bounded. In addition, by definition, $\tilde f(Y_s^n) \leq 0$, and so $Y_t^n \geq -\|\varphi\|_{\infty} (T-t)$. Thus $(Y^n,Z^n)$ solves the same equation with $\tilde f$ replaced by $\hat f(x):=f(x) \textbf{1}_{\{x\geq 0\}}$. Note that $\hat f(x) \leq x$ for any $x$ in $\mathbb{R}$. Since $\varphi$ is non-negative, $Y^n$ is a classical sub-solution to the BSDE \eqref{mjareq:sansphi} with $f$ replaced by $\hat f$, and so by Lemma \ref{mjarlemma:expo} we deduce that $Y_t^n\leq 0$. Thus \begin{equation} \label{mjareq:temp12} |Y_t^n| \leq (T-t) \|\varphi\|_\infty, \quad \forall t \in [0,T], \; \mathbb{P}-a.s.. \end{equation} Hence we can re-write Equation \eqref{mjareq:Yapproxim} as: \begin{equation} \label{mjareq:Yapproximbis} Y_t^n = 0-\int_t^T (\varphi_s +f(Y_s^n) \lambda_s^n) ds -\int_t^T Z_s^n dW_s, \quad t\in [0,T]. \end{equation} Repeating the same argument used in the previous step we can prove that \begin{equation} \label{mjareq:boundZn} \sup_{n} \mathbb{E}\left[\int_0^T \|Z_t^n\|^2 dt\right] <\infty. \end{equation} By comparison theorem for Lipschitz BSDEs the sequence $(Y^n)_n$ is non-decreasing. Hence it converges pointwise to some element $Y:=\limsup_{n\to \infty} Y^n$. We would like to point out at this stage that by construction $Y$ takes values in $[-T\|\varphi\|_\infty,0]$. In view of Dini's theorem, to obtain convergence uniformly in time, we need to prove that $Y$ is continuous. This is done in two steps. Fix $0<t_0<T$, $n\geq 1$ and $p,q \geq n$. We show that the sequence $(Y_n \textbf{1}_{[0,t_0]})_n$ is a Cauchy sequence in $\mathbb{S}^2$. Let $\delta Y:=Y^p-Y^q$, $\delta Z:=Z^p-Z^q$. It\^o's formula gives for every $t\in [0,t_0]$ that \begin{equation} \label{mjareq:apriori1} |\delta Y_t|^2 + \int_t^{t_0} \|Z_s\|^2 ds \leq |\delta Y_{t_0}|^2 -2 \int_t^{t_0} \delta Y_s f(Y_s^q) (\lambda_s^p-\lambda_s^q) ds - 2 \int_t^{t_0} \delta Y_s \delta Z_s dW_s, \end{equation} where we have used the fact that $\delta Y_s (f(Y_s^p)-f(Y_s^q))\geq 0$ since $f$ is non-decreasing. From this relation we deduce in particular for $t=0$ that \begin{equation} \label{mjareq:apriori2} \mathbb{E}\left[\int_0^{t_0} \|Z_s\|^2 ds\right] \leq C \mathbb{E}\left[ |\delta Y_{t_0}|^2 + \int_0^{t_0} |\lambda_s^p-\lambda_s^q| ds \right], \end{equation} since $Y^p$ and $Y^q$ are uniformly (in $p,q$) bounded. Taking the supremum over $[0,t_0]$ in Relation \eqref{mjareq:apriori1} leads to \begin{align*} &\mathbb{E}[\sup_{t\in [0,t_0]} |\delta Y_t|^2] \\ &\leq C \left(\mathbb{E}[|\delta Y_{t_0}|^2] +\mathbb{E}\left[\int_0^{t_0} |\delta Y_s f(Y_s^q)| |\lambda_s^p-\lambda_s^q| ds + \sup_{t\in [0,t_0]} \left|\int_t^{t_0} \delta Y_s \delta Z_s dW_s\right|\right]\right)\\ &\leq C \left(\mathbb{E}[|\delta Y_{t_0}|^2] +\mathbb{E}\left[\int_0^{t_0} |\lambda_s^p-\lambda_s^q| ds\right] + \mathbb{E}\left[\left(\int_0^{t_0} |\delta Y_s|^2 \|\delta Z_s\|^2 ds\right)^{1/2}\right]\right)\\ &\leq C \left(\mathbb{E}[|\delta Y_{t_0}|^2] +\mathbb{E}\left[\int_0^{t_0} |\lambda_s^p-\lambda_s^q| ds\right]\right) + \frac12 \mathbb{E}\left[\sup_{t\in [0,t_0]} |\delta Y_t|^2\right] +\frac{C^2}{2} \mathbb{E}\left[\int_0^{t_0} \|\delta Z_s\|^2 ds\right], \end{align*} where we have used the fact that $|\delta Y_s f(Y_s^q)|$ is bounded uniformly in $p,q$, the Burkholder inequality and the inequality $C a b \leq \frac12 a^2+ \frac{C^2 b^2}{2}$. Combining the previous estimate with Estimate \eqref{mjareq:apriori2} proves that $$ \mathbb{E}[\sup_{t\in [0,t_0]} |\delta Y_t|^2] \leq C \left(\mathbb{E}[|\delta Y_{t_0}|^2] +\mathbb{E}\left[\int_0^{t_0} |\lambda_s^p-\lambda_s^q| ds\right]\right),$$ where $C$ does not depend on $p,q$. Recalling the definition of $\delta Y=Y^p-Y^q$ it follows that $$ \lim_{n\to \infty} \sup_{p,q\geq n} \mathbb{E}[\sup_{t\in [0,t_0]} |\delta Y_t|^2] \leq C\lim_{n\to \infty} \left(\mathbb{E}[|Y_{t_0}^n-Y_{t_0}|^2] +\mathbb{E}\left[\int_0^{t_0} |\lambda_s^n-\lambda_s| ds\right]\right) =0,$$ by Lebesgue's dominated convergence Theorem (since $\mathbb{E}[\Lambda_{t_0} ]<\infty$)\footnote{Here we did not use the classical a priori estimates for Lipschitz BSDEs since they would lead to an estimate of the form $\mathbb{E}\left[\int_0^{t_0} |\lambda_s^p-\lambda_s^q|^2 ds\right]$ which is not compatible with our $L^1$ assumption: $\mathbb{E}[\Lambda_{t_0} ]<\infty$.}. Hence $(Y^n \textbf{1}_{[0,t_0]})_n$ is a Cauchy sequence in $\mathbb{S}^2$ which thus converges to $Y \textbf{1}_{[0,t_0]}$. So $Y$ is continuous on $[0,t_0]$ for any $t_0<T$. It remains to prove that $Y$ is continuous at $T$. Let $\varepsilon>0$. By Inequality \eqref{mjareq:temp12} it holds that \begin{align*} |Y_{T-\varepsilon}| = \lim_{n \to \infty} |Y^n_{T-\varepsilon}| \leq \varepsilon \|\varphi\|_\infty, \end{align*} proving that $Y$ is continuous at $T$. Hence, $(Y^n)_n$ is a non-decreasing sequence of continuous bounded processes converging to a continuous process $Y$, thus by Dini's Theorem, $(Y_n)_n$ converges in $\mathbb{S}^2$ to $Y$.\\\\ \noindent We now prove that $Y$ together with a suitable process $Z$ solves the BSDE \eqref{mjareq:expo1}. To this end, we aim at applying \cite[Theorem 1]{mjarBarlow_Protter}. We have obtained already that $\lim_{n\to\infty} \mathbb{E}[\sup_{t\in [0,T]} |Y_t^n-Y_t|]=0$. To satisfy the assumptions of \cite[Theorem 1]{mjarBarlow_Protter}, we have to prove that for every $n$ \begin{equation} \label{mjareq:BPa1} \sup_n \mathbb{E}\left[\left(\int_0^T \|Z_s^n\|^2 ds\right)^{1/2}\right] \leq C \end{equation} (which by Burkholder's inequality implies that $\mathbb{E}\left[\sup_{t\in [0,T]} \left|\int_0^t Z_s^n dW_s\right|\right] \leq C$ for every $n$) and that \begin{equation} \label{mjareq:BPa2} \sup_n \mathbb{E}\left[\int_0^T \left|\lambda_s^n f(Y_s^n) \right| ds\right] \leq C, \quad \forall n\geq 1, \end{equation} since the process $\int_0^\cdot \lambda_s^n f(Y_s^n) ds$ is non-increasing (recall that $Y^n\leq 0$ and the assumptions on $f$). Relation \eqref{mjareq:BPa1} is a direct consequence of \eqref{mjareq:boundZn}. With this estimate at hand we can deduce Relation \eqref{mjareq:BPa2}. Indeed, using Equation \eqref{mjareq:Yapproximbis} and the uniform estimates on the $Y^n$ obtained above we deduce that \begin{align*} \mathbb{E}\left[\int_0^T \vert \lambda_s^n f(Y_s^n) \vert ds\right]&= \mathbb{E}\left[\left\vert \int_0^T \lambda_s^n f(Y_s^n) ds\right\vert\right]\\ &= \mathbb{E}\left[\left\vert Y_0^n + \int_0^T \varphi_s ds +\int_0^T Z_s^n dW_s\right\vert\right]\leq C, \quad n \geq 1, \end{align*} where $C$ depends only on $T$ and $\|\varphi\|_\infty$ (and not on $n$). Thus, by \cite[Theorem 1]{mjarBarlow_Protter}, $Y$ writes down as $Y_t=A_t+\int_0^t \varphi_s ds +\int_0^t Z_s dW_s$, with $Z \in \mathbb{H}^1(\mathbb{R}^d)$, and \begin{equation} \label{mjareq:limA} \lim_{n\to \infty} \mathbb{E}[\sup_{t\in [0,T]} |A_t-\int_0^t \lambda_s^n f(Y_s^n) ds| ] =0. \end{equation} We now identify the process $A$. We proceed in two steps: first we prove that $A_t=\int_0^t f(Y_s) \lambda_s ds$ for $t<T$ and then we prove the relation for $t=T$. Fix $t<T$. We have that \begin{align*} &\left|\int_0^t f(Y^n_s) (\lambda_s^n-\lambda_s) ds \right\vert\\ &\leq C \int_0^t |\lambda_s^n-\lambda_s| ds \to_{n\to\infty} 0, \quad \mathbb{P}-a.s. \end{align*} by the monotone convergence theorem, since the $Y^n$ are uniformly bounded and $\Lambda_t<\infty$, $\mathbb{P}$-a.s. Hence up to a subsequence, $$ \lim_{n\to \infty} \left|A_t - \int_0^t f(Y^n_s) \lambda_s ds\right|=0.$$ Recalling that $Y^n\leq Y$, we have that \begin{align*} &\left|\int_0^t (f(Y^n_s)-f(Y_s)) \lambda_s ds \right\vert\\ &\leq C \int_0^t |Y^n_s-Y_s| \lambda_s ds \to_{n\to\infty} 0 \end{align*} where once again we have used monotone convergence Theorem. This leads to $$ A_t = \int_0^t f(Y_s) \lambda_s ds,\quad \mathbb{P}-a.s.$$ for any $t<T$. The relation for $t=T$ follows from the continuity of $A$ by \eqref{mjareq:limA}. Finally according to Definition \ref{mjarsolution} it remains to prove Relation \eqref{mjareq:BSDEtheo2}. This is done as follows by combining the monotone convergence theorem together with \eqref{mjareq:BPa2} and \eqref{mjareq:limA}: $$ \mathbb{E}\left[\int_0^T |f(Y_s)| \lambda_s ds\right] = \lim_{t\to T} \mathbb{E}\left[\int_0^t |f(Y_s)| \lambda_s ds\right] = \lim_{t\to T} \mathbb{E}[|A_t|] <\infty.$$ \textbf{Step 3: uniqueness\\} Assume there exist two solutions $(Y^1,Z^1)$ and $(Y^2,Z^2)$ to the BSDE \eqref{mjareq:expo1}. Then, the difference processes $(\delta Y:=Y^1-Y^2,\delta Z:=Z^1-Z^2)$ satisfies $$\delta Y_t=0-\int_t^T \lambda_s (f(Y_s^1)-f(Y_s^2)) ds -\int_t^T \delta Z_s dW_s, \quad t\in [0,T].$$ From the existence part we know that both processes $Y^1$ and $Y^2$ are uniformly bounded. As a consequence the mapping $f$ restricted to the set $[-T\|\varphi\|_\infty,0]$ has a non-negative derivative. Hence the equation re-writes as: $$\delta Y_t=0-\int_t^T \tilde{\lambda}_s \delta Y_s ds -\int_t^T \delta Z_s dW_s, \quad t\in [0,T],$$ where $\tilde{\lambda}_t:=\lambda_t \int_0^1f'(Y_t^2 +\theta (Y_t^1-Y_t^2)) d\theta$ is a non-negative process which satisfies $\int_0^t \tilde\lambda_s ds <\infty$ for $t < T$, $\mathbb{P}$-a.s. and $\int_0^T \tilde\lambda_s ds=\infty$, $\mathbb{P}$-a.s. Similarly to Proposition \ref{mjarprop:affine1} with $\lambda$ replaced with $\tilde \lambda$, we deduce that $(\delta Y,\delta Z)=(0,0)$ is the unique solution. \end{proof} \begin{remark} Note that our previous result is not contained in the theory of monotonic drivers for BSDEs (see e.g. \cite{mjarPardoux_lingrowth,mjarBriandDelyonHuPardouxStoica} or \cite{mjarFanJiang}) where conditions of the form \cite[(H5) and (H1'')]{mjarBriandDelyonHuPardouxStoica} are not satisfied in our setting due to the non-integrability at $T$ of $\Lambda$. \end{remark} \section*{Acknowledgments} The authors are very grateful to Nicole El Karoui and Jean Jacod and for helpful comments and discussions. The financial support of Chaire Markets in transition (FBF) is acknowledged. \end{document}
\begin{document} \title{b-metric spaces, fixed points and Lipschitz functions} \author{S. Cobza\c{s} } \address{S. Cobza\c{s}, Babe\c{s}-Bolyai University, Department of Mathematics, Cluj-Napoca, Romania} \email{[email protected]} \date{\today} \begin{abstract} The paper is concerned with b-metric and generalized b-metric spaces. One proves the existence of the completion of a generalized b-metric space and some fixed point results. The behavior of Lipschitz functions on b-metric spaces of homogeneous type, as well as of Lipschitz functions defined on, or with values in quasi-Banach spaces, is studied. MSC2010: 54E25 54E35 47H09 47H10 46A16 26A16 ^{*}xtbf{Keywords:} metric space, generalized metric space, b-metric space, completion, metrizability, fixed point, quasi-Banach space, Lipschitz mapping \end{abstract} \maketitle \tableofcontents \section*{Introduction} There are a lot of extensions of the notions of metric and metric space -- see for instance the books \cite{Deza}, \cite{Kirk-Shah}, \cite{Rus-PP}, or the survey papers \cite{beri-chob13}, \cite{khamsi15}. In this paper we concentrate on b-metric and generalized b-metric spaces, with emphasis on their topological properties, some fixed point results and Lipschitz functions on such spaces. A part of the results from this paper are included in \cite{cobz-czerw18}. \section{b-metric spaces} In this section we present some results on b-metric spaces. \subsection{Topological properties and metrizability}\langlebel{Ss.bms-top} A \emph{b-metric} on a nonempty set $X$ is a function $d:X\times X\to[0,\infty)$ satisfying the conditions \begin{equation}\langlebel{def.b-metric} \begin{aligned} {\rm (i) } \quaduad &d(x,y) = 0 \iff x=y;\\ {\rm (ii) } \quaduad &d(x,y) = d(y,x);\\ {\rm (iii) } \quaduad &d(x,y) \le s[d(x,z) + d(z,y)], \end{aligned} \end{equation} for all $x,y,z\in X,$ and for some fixed number $s\ge 1.$ The pair $(X,d)$ is called a b-\emph{metric space}. Obviously, for $s=1$ one obtains a metric on $X$. \begin{example} If $(X,d)$ is a metric space and $\beta >1$, then $d^\beta(x,y)$ is a b-metric. \end{example} Indeed, \begin{align*} d^\beta(x,y)&\le [d(x,z)+d(z,y)]^\beta\le 2^\beta \left(\max\{d(x,z),d(z,y)\}\right)^\beta\\&\le 2^\beta[d^\beta(x,y)+d^\beta(x,y)]\,. \end{align*} The $s$-relaxed triangle inequality implies \begin{equation}\langlebel{s-relax-n} d(x_0,x_n)\le sd(x_0,x_1)+s^2d(x_1,x_2)+\dots+s^{n-1}d(x_{n-2},x_{n-1})+s^{n-1}d(x_{n-1},x_n)\,, \end{equation} for all $n\in\mathbb N$ and all $x_0,x_1,\dots,x_n\in X$. Indeed, we obtain successively \begin{align*} d(x_0,x_n)&\le sd(x_{0},x_1)+sd(x_{1},x_n)\le sd(x_{0},x_1)+s^2d(x_{1},x_2)+s^2d(x_2,x_n)\le \dots\\ &\le sd(x_0,x_1)+s^2d(x_1,x_2)+\dots+s^{n-1}d(x_{n-2},x_{n-1})+s^{n-1}d(x_{n-1},x_n) \end{align*} Along with the inequality (iii), called the $s$-\emph{relaxed triangle inequality}, one considers also the $s$-\emph{relaxed polygonal inequality} \begin{equation}\langlebel{def.polyg}\tag{{\rm iv}} d(x_0,x_n) \le s[d(x_0,x_1) + d(x_1,x_2)+\dots+d(x_{n-1},x_n)], \end{equation} for all $x_0,x_1,\dots,x_n\in X$ and all $n\in\mathbb N.$ For $n=2$ one obtains the inequality (iii). The following example shows that the converse is not true -- there exist b-metrics that do not satisfy the relaxed polygonal inequality. \begin{example}[\cite{Kirk-Shah}, Theorem 12.10]\langlebel{ex.polyg} Let $X=[0,1] $ and $d(x,y)=(x-y)^2,\, x,y\in [0,1].$ Then $d$ is a 2-relaxed metric on $X$ which is not polygonally $s$-relaxed for any $s\ge 1.$ \end{example} Indeed, it is easy to check that $d$ satisfies the 2-relaxed triangle inequality. Suppose that, for some $s\ge 1,$ $d$ satisfies the $s$-relaxed polygonal inequality. Taking $x_i=\fracrac in,\, 1\le i\le n-1,$ we obtain $$ \fracrac1s= \fracrac 1s\cdot d(0,1)\le d(0,x_1)+d(x_1,x_2)+\dots+d(x_{n-1},1)=n\cdot\left(\fracrac1n\right)^2=\fracrac1n\,,$$ for all $n\in \mathbb N,$ which is impossible. One can consider also an ultrametric version of (iii): \begin{equation}\langlebel{b-ultra}\tag{{\rm iii$'$}} d(x,y)\le\lambda\max\{d(x,z),d(y,z)\}\,, \end{equation} for all $x,y,z\in X$. It is obvious that \begin{align*} \mbox{(iii$'$)}\;&\Longrightarrow\; \mbox{(iii) \;\, with }\; s=\lambda;\\ \mbox{(iii)}\;&\Longrightarrow\; \mbox{(iii$'$) \; with }\; \lambda=2s. \end{align*} The condition \begin{equation}\langlebel{b-metric-eps}\tag{{\rm iii$''$}} \max\{d(x,z),d(y,z)\}\le\varepsilon \;\Longrightarrow\; d(x,y)\le 2\varepsilon\,, \end{equation} for all $\varepsilon >0$ and $x,y,z\in X$, is equivalent to \eqref{b-ultra} with $\lambda =2$. Let now $(X,d)$ be again a b-metric space. One introduces a topology on a b-metric space $(X,d)$ in the usual way. The ``open" ball $B(x,r)$ of center $x\in X$ and radius $r>0$ is given by \begin{equation*} B(x,r)=\{y\in X : d(x,y)<r\}\,. \end{equation*} A subset $Y$ of $X$ is called open if for every $x\in Y$ there exists a number $r_x>0$ such that $B(x,r_x)\subseteq Y.$ Denoting by $\tau_d$ (or $\tau(d)$) the family of all open subsets of $X$ it follows that $\tau_d$ satisfies the axioms of a topology. This topology is derived from a uniformity $\mathcal U_d$ on $X$ having as basis the sets $$ U_\varepsilon=\{(x,y)\in X\times X : d(x,y)<\varepsilon\},\quaduad \varepsilon >0\,.$$ The uniformity $\mathcal U_d$ has a countable basis $\{U_{1/n} : n\in\mathbb N\}$ so that, by Frink's metrization theorem (\cite{frink37}), the uniformity $\mathcal U_d$ is derived from a metric $\rho$, hence the topology $\tau_d$ as well. This was remarked in the paper \cite{maci-sego79a}. In \cite{fagin03} it is shown that the topology $\tau_d$ satisfies the hypotheses of the Nagata-Smirnov metrizability theorem. Concerning the metrizability of uniform and topological spaces, see the treatise \cite{Engel}. There exist also direct proofs of the metrizability of the topology of a b-metric space. Let $(X,d)$ be a b-metric space. Put \begin{equation}\langlebel{def.1-metric} \rho(x,y)=\inf\Big\{\sum_{k=0}^nd(x_{i-1},x_i) \Big\}\,,\end{equation} where the infimum is taken over all $n\in\mathbb N$ and all chains $x=x_0,x_1,\dots, x_n=y$ of elements in $X$ connecting $x$ and $y$. As remarked Frink \cite{frink37}, if a b-metric $d$ satisfies \eqref{b-ultra} for $\langlembda =2$, then formula \eqref{def.1-metric} defines a metric equivalent to $d$. We present the result in the form given by Schroeder \cite{schroed06}. \begin{theo}[A.~H. Frink \cite{frink37} and V.~Schroeder \cite{schroed06}] \langlebel{t.Frink} If $d:X\times X\to [0,\infty)$ satisfies the conditions {\rm(i), (ii)} from \eqref{def.b-metric} and \eqref{b-ultra} for some $1\le\lambda \le 2,$ then the function $\rho$ defined by \eqref{def.1-metric} is a metric on $X$ satisfying the inequalities $\fracrac1{2\lambda} d\le \rho\le d$. \end{theo} V.~Schroeder \cite{schroed06} also showed that for every $\varepsilon>0$ there exists a b-metric $d$ satisfying \eqref{def.b-metric}.(iii) with $s=1+\varepsilon$ such that the mapping $\rho$ defined by \eqref{def.1-metric} is not a metric. Other example showing the limits of Frink's metrization method was given in {An} and {Dung} \cite{an-dung15b}. General results of metrizability were obtained in \cite{aimar98} and \cite{stempak09} by a slight modification of Frink's technique. Let $(X,d)$ be a b-metric space. For $0<p\le 1$ define \begin{equation}\langlebel{def.p-metric} \rho_p(x,y)=\inf\Big\{\sum_{k=0}^nd(x_{i-1},x_i)^p \Big\}\,,\end{equation} where the infimum is taken over all $n\in\mathbb N$ and all chains $x=x_0,x_1,\dots, x_n=y$ of elements in $X$. The function $ \rho_p$ defined by \eqref{def.p-metric} satisfies the conditions \begin{enumerate} \item $\rho_p(x,y)=\rho_p(y,x),$ \item $\rho_p(x,y)\le\rho_p(x,z)+\rho_p(z,y),$ \item $d^p(x,y)\le\rho_p(x,y)$\,, \end{enumerate} for all $x,y,z\in X$, i.e., $\rho$ is a pseudometric on $X$ and $d^p$ is dominated by $\rho$. \begin{theo}[\cite{stempak09}]\langlebel{t.Stemp} Let $d$ be a b-metric on a nonempty set $X$ satisfying the $s$-relaxed triangle inequality \eqref{def.b-metric}.(iii), for some $s\ge 1.$ If the number $p\in (0,1]$ is given by the equation $(2s)^p=2$, then the mapping $\rho_p:X\times X \to [0,\infty)$ defined by \eqref{def.p-metric} is a metric on $X$ satisfying the inequalities \begin{equation}\langlebel{ineq1.Stemp} \rho_p(x,y)\le d^p(x,y)\le 2 \rho_p(x,y)\,, \end{equation} for all $x,y\in X$. The same conclusions hold if $d$ satisfies the conditions (i), (ii) from \eqref{def.b-metric} and \eqref{b-ultra} for some $\lambda \ge 2.$ In this case $0<p\le 1$ is given by $\lambda^p=2$ and the metric $\rho_p$ satisfies the inequalities \begin{equation}\langlebel{ineq2.Stemp} \rho_p(x,y)\le d^p(x,y)\le 4 \rho_p(x,y)\,, \end{equation} for all $x,y\in X$. \end{theo} The inequalities \eqref{ineq1.Stemp} have the following consequences. \begin{corol} Under the hypotheses of Theorem \ref{t.Stemp}, $\tau_d=\tau_\rho,$ that is, the topology of any b-metric space is metrizable, and the convergence of sequences with respect to $\tau_d$ is characterized in the following way: \begin{equation*} x_n\xrightarrow{\tau_d} x \iff d(x,x_n)\longrightarrow 0 \,, \end{equation*} for any sequence $(x_n)$ in $X$ and $x\in X$. \end{corol}\begin{proof} The equality of topologies follows from the inclusions $$ B_d(x,r^{1/p})\subseteq B_\rho(x,r)\;\mbox{ and }\; B_\rho\big(x, 4^{-1}r^p\big)\subseteq B_d(x,r)\,,$$ valid for all $x\in X$ and $r>0$. The statement concerning sequences is a consequence of the equality $\tau_d=\tau_\rho$ and of the inequalities \eqref{ineq1.Stemp}. \end{proof} \begin{remark} In \cite{aimar98} the proof is given for a $p$ satisfying the inequalities \begin{equation}\langlebel{ineq1.Aimar} 1\ge p\ge \left(\log_2(3s)\right)^{-2},\end{equation} while in Theorem \ref{t.Stemp} the result holds for \begin{equation}\langlebel{ineq2.Aimar} p= \left(\log_2(2s)\right)^{-1}.\end{equation} Putting $$ \widetildede \rho(p)=\sup\{p\in(0,1] : \rho_p\;\mbox{is a metric, Lipschitz equivalent to}\; d^p\}\,,$$ the estimation \ref{ineq1.Aimar} yields $\widetildede\rho(p)\ge \left(\log_2(3s)\right)^{-2}$, while from \ref{ineq2.Aimar} one obtains the better evaluation $\widetildede\rho(p)\ge \left(\log_2(2s)\right)^{-1}$, which cannot be improved, as it is shown by the example of the spaces $\ell^p$ with $0<p<1$. A proof of Theorem \ref{t.Stemp} is also given in the book by Heinonen \cite[Prop. 14.5]{Heinonen}, with the evaluation $\,p\ge (\log_2\lambda)^{-2},$ where $\lambda$ is the constant from \eqref{b-ultra}. \end{remark} \begin{remark}\langlebel{re.p-norm} It follows that $ \widetildede\rho(x,y)=\rho_p(x,y)^{1/p},\, x,y\in X$, is a b-metric on $X$, Lipschitz equivalent to $d$ and satisfying the inequality $$ \widetildede\rho(x,y)^p\le\widetildede\rho(x,z)^p+\widetildede\rho(z,y)^p\,,$$ for all $x,y,z\in X$. This is a well known fact in the theory of quasi-normed spaces, where a quasi-norm $\|\cdot\|$ satisfying the inequality $$ \|x+y\|^p\le \|x\|^p+\|y\|^p,$$ for some $0<p\le 1$ is called a $p$-norm (see Subsection \ref{Ss.quasi-normed space}). \end{remark} Let $(X,d)$ be a b-metric space. The b-metric $d$ is called \begin{itemize} \item \emph{continuous} if \begin{equation}\langlebel{eq1.cont-metric} d(x_n,x)\to 0\;\mbox{ and }\; d(y_n,y)\to 0\;\Longrightarrow d(x_n,y_n)\to d(x,y)\,, \end{equation} \item \emph{separately continuous} if the function $d(x,\cdot)$ is continuous on $X$ for every $x\in X$, i.e., \begin{equation}\langlebel{eq2.sep-cont-metric} d(y_n,y)\to 0\;\Longrightarrow d(x,y_n)\to d(x,y)\,, \end{equation} \end{itemize} for all sequences $(x_n),(y_n)$ in $X$ and all $x,y\in X$. The topology $\tau_d$ generated by a b-metric $d$ has some peculiarities -- a ball $B(x,r)$ need not be $\tau_d$-open and the b-metric $d$ could not be continuous on $X\times X$. \begin{remark} Let $(X,d)$ be a b-metric space and $x\in X$. Then $$ B(x,r) \;\mbox{ is $\tau_d$-open for every } r>0 \iff d(x,\cdot) \;\mbox{ is upper semicontinuous on }\; X.$$ Consequently, if the b-metric is separately continuous on $X$, then the balls $B(x,r)$ are $\tau_d$-open. \end{remark} The equivalence follows from the equality $$ B(x,r)=d(x,\cdot)^{-1}\big((-\infty,r)\big)\,.$$ We present now an example of a b-metric space where the balls are not necessarily open. \begin{example}[\cite{stempak09}] Consider a fixed number $\varepsilon >0$. For $X=\mathbb N_0=\{0,1,\dots\} $ let $d:X\times X\to[0,\infty)$ be defined by $$\begin{matrix}\langlebel{ex.Stemp} &d(0,1)=1, &d(0,m)=1+\varepsilon &\;\mbox{ for } m\ge 2\\ &d(1,m)=\fracrac1m, &\;\;d(n,m)=\fracrac1n+\fracrac1m &\;\mbox{ for } n\ge 2 \end{matrix}$$ and extended to $X\times X$ by $d(n,n)=0$ and symmetry. Then $$ d(m,n)\le (1+\varepsilon)[d(n,k)+d(k,m)]\,,$$ for all $m,n,k\in X$, $B\left(0,1+\fracrac\varepsilon2\right)=\{0,1\}$ and the ball $B(1,r)$ contains an infinity of terms for every $r>0$, that is, for any $1\in B\left(0,1+\fracrac\varepsilon2\right)$, $\, B(1,r) \nsubseteq B\left(0,1+\fracrac\varepsilon2\right)$ for every $r>0,$ showing that the ball $ B\left(0,1+\fracrac\varepsilon2\right)$ is not $\tau_d$-open. \end{example} Other examples are given in \cite{an-dung15}. \begin{remark} If, for some $0<p<1,$ a b-metric $d$ on a set $X$ satisfies the inequality \begin{equation}\langlebel{ineq.p-metric} d(x,y)^p\le d(x,z)^p+d(z,y)^p,\;\mbox{ for all }\; x,y,z \in X, \end{equation} then the balls corresponding to $d$ are $\tau_d$-open. Moreover, the b-metric $d$ is continuous. By Remark \ref{re.p-norm}, the b-metric $\widetildede\rho=\rho_p^p$ corresponding to the metric $\rho_p$ constructed in Theorem \ref{t.Stemp} satisfies the inequality \eqref{ineq.p-metric}. \end{remark} Indeed, let $B(x,r)$ be a ball in $(X,d)$ and $y\in B(x,r)$. We have to show that there exists $r'>0$ such that $B(y,r')\subseteq B(x,r)$. Taking $r':=\left(r^p-d(x,y)^p\right)^{1/p}>0,$ then $d(y,z)<r'$ implies \begin{align*} d(x,z)^p&\le d(x,y)^p+d(y,z)^p \\ &< d(x,y)^p+r'^p=r^p, \end{align*} that is, $d(x,z)<r.$ The continuity of the b-metric $d$ follows from the inequality $$ | d(x_n,y_n)^p- d(x,y)^p|\le d(x_n,x)^p+ d(y_n,y)^p\,,$$ which can be proved as in the metric case (using \eqref{ineq.p-metric}). ^{*}xtbf{Equivalence notions for b-metrics.} In connection to the metrizability of b-metric spaces, we mention the following notions of equivalence for b-metrics. Let $d_1,d_2$ be two b-metrics on the same set $X$. Then $d_1,d_2$ are called \begin{itemize}\item \emph{topologically equivalent} if $\tau_{d_1}=\tau_{d_2}$; \item \emph{uniformly equivalent} if the identity mapping $I_X$ on $X$ is uniformly continuous both from $(X,d_1)$ to $(X,d_2)$ as well as from $(X,d_2)$ to $(X,d_1)$, i.e. \begin{align*} &\fracorall \varepsilon >0, \exists \delta(\varepsilon)>0\;\mbox{ such that } \fracorall x,y\in X\;(d_1(x,y)\le\delta(\varepsilon)\; \Longrightarrow\; d_2(x,y)\le \varepsilon),\\ &\fracorall \varepsilon >0, \exists \delta(\varepsilon)>0\;\mbox{ such that }\; \fracorall x,y\in X\;( d_2(x,y)\le\delta(\varepsilon)\; \Longrightarrow\; d_1(x,y)\le \varepsilon)\,. \end{align*} \item \emph{Lipschitz equivalent} if there exist $c_1,c_2>0$ such that $$c_1d_2(x,y)\le d_1(x,y)\le c_2d_2(x,y)\,,$$ for all $x,y\in X$ \end{itemize} Of course, the above definitions applies to metrics as well, as particular cases of b-metrics. \begin{remark} It is obvious that, in general, Lipschitz equivalence \; $\Longrightarrow$\; uniform equivalence \; $\Longrightarrow$\; topological equivalence. For quasi-norms, topological equivalence is equivalent to Lipschitz equivalence. \end{remark} So the expression ``the topology $\tau_d$ generated by a b-metric $d$ on a set $X$ is metrizable" means that there exists a metric $\rho$ on $X$ topologically equivalent to $d$. The problem of the existence of a metric that is Lipschitz equivalent to a b-metric was solved in \cite{fagin03}, where this property was called \emph{metric boundedness}. \begin{theo}[\cite{fagin03}, see also \cite{Kirk-Shah}, Theorem 12.9] Let $(X,d)$ be a b-metric space. Then $d$ is Lipschitz equivalent to a metric if and only if $d$ satisfies the $s$-relaxed polygonal inequality \eqref{def.polyg} for some $s\ge 1.$ \end{theo} \subsection{An axiomatic definition of balls in $b$-metric spaces} H. Aimar \cite{aimar98} found a set of properties characterizing balls in b-metric spaces. For a nonempty set $X$ consider a mapping $U:X\times (0,\infty)\to \mathcal P(X)$ satisfying the following properties: \begin{equation}\langlebel{def.balls} \begin{aligned} {\rm (i)}\quaduad &\bigcap_{r>0} U(x,r)=\{x\};\\ {\rm (ii)}\quaduad &\bigcup_{r>0} U(x,r)=X;\\ {\rm (iii)}\quaduad &0<r_1\le r_2\;\Longrightarrow\; U(x,r_1)\subseteq U(x,r_2);\\ {\rm (iv)}\quaduad &\mbox{there exists } \,c\ge 1\,\mbox{ such that } \\ & y\in U(x,r)\;\Longrightarrow\; U(x,r)\subseteq U(y,cr)\;\mbox{ and }\;U(y,r)\subseteq U(x,cr), \end{aligned}\end{equation} for all $x\in X$ and $r>0$. We call the sets $U(x,r)$ \emph{formal balls}. \begin{remark}\langlebel{re1.formal} By (i), $x\in U(x,r)$ for all $r>0$ and $x\in X$. \end{remark} It is easy to check that if $(X,d)$ is a $b$-metric space, where the $b$-metric $d$ satisfies the relaxed triangle inequality for some $s\ge 1,$ then the sets $U(x,r)=B_d(x,r),\, x\in X, r>0$ satisfy the properties from \eqref{def.balls} with $c=2s.$ Conditions (i)--(iii) are easy to check. For (iv), if $y\in B_d(x,r)$ and $z\in B_d(x,r)$, then $$ d(y,z)\le s(d(y,x)+d(x,z))<2sr\,,$$ i.e. $z\in B_d(y,2sr).$ Similarly, $y\in B_d(x,r)$ and $z\in B_d(y,r)$ imply $$ d(x,z)\le s(d(x,y)+d(y,z))<2sr\,,$$ i.e. $z\in B_d(x,2sr).$ It can be shown that, conversely, a family of subsets of $X$ satisfying the properties from \eqref{def.balls} generates a $b$-metric on $X$, the balls corresponding to $d$ being tightly connected with the sets $U(x,r).$ \begin{theo}[\cite{aimar98}] \langlebel{t1.Aimar} For a nonempty set $X$ and a mapping $\, U:X\times (0,\infty)\to \mathcal P(X)$ satisfying the properties from \eqref{def.balls}, define $d:X\times X\to [0,\infty)$ by \begin{equation}\langlebel{def.d-U} d(x,y)=\inf\{r>0 : y\in U(x,r) \,\mbox{ and }\, x\in U(y,r)\},\quaduad x,y\in X.\end{equation} Then: \begin{enumerate} \item $d$ is a $b$-metric on $X$ satisfying the relaxed triangle inequality for $s=c$; \item the open balls $B_d(x,r)$ corresponding to $d$ and the sets $U(x,r)$ are related by the inclusions \begin{equation}\langlebel{eq.ball-incl} U\left(x,(\gamma c)^{-1}r\right)\subseteq B_d(x,r)\subseteq U(x,r),\end{equation} for all $x\in X$ and $r>0$, where $\gamma >1$. \end{enumerate}\end{theo}\begin{proof} We shall verify only the relaxed triangle inequality. Let $x,y,z\in X $ and $\varepsilon >0$. By the definition \eqref{def.d-U} of $d$ there exists $r_1,r_2>0$ such that \begin{equation}\langlebel{eq1.d-U} \begin{aligned} & 0<r_1<d(x,z)+\varepsilon\;\mbox{ and }\; z\in U(x,r_1), \, x\in U(z,r_1);\\ & 0<r_2<d(z,y)+\varepsilon\;\mbox{ and }\; z\in U(y,r_2), \, y\in U(z,r_2). \end{aligned}\end{equation} Taking into account (iii) it follows $$ x,y\in U(z,r_1+r_2) \; \mbox {and }\; z\in U(x,r_1+r_2)\cap U(y,r_1+r_2)\,.$$ Applying (iv) to $x\in U(z,r_1+r_2)$ one obtains $$ y\in U(z,r_1+r_2)\subseteq U(x,c(r_1+r_2)).$$ Similarly, $y\in U(z,r_1+r_2)$ implies $$ x\in U(z,r_1+r_2)\subseteq U(y,c(r_1+r_2)),$$ so that, by the definition of $d,\, d(x,y)\le c(r_1+r_2)$. But then, by adding the inequalities \eqref{eq1.d-U}, one obtains $$ d(x,y)\le c(r_1+r_2)<c(d(x,z)+d(z,y))+2c\varepsilon\,.$$ Since these hold for all $\varepsilon >0,$ it follows $$ d(x,y)\le c(d(x,z)+d(z,y))\,.$$ Let us prove now the inclusions \eqref{eq.ball-incl}. If $d(x,y)<r$, then there exists $0<r'<r$ such that $y\in U(x,r')$ and $x\in U(y,r'). $ By (iii), $U(x,r')\subseteq U(x,r)$, so that $B_d(x,r)\subseteq U(x,r).$ Let now $y\in U(x,(\gamma c)^{-1}r).$ By Remark \ref{re1.formal} and (iv), \begin{align*} &x\in U\left(x,(\gamma c)^{-1}r\right)\subseteq U\left(y, r/\gamma\right),\; \mbox{and}\\ &y\in U\left(y,(\gamma c)^{-1}r\right)\subseteq U\left(x, r/\gamma\right)\,. \end{align*} By the definition of $d,\, d(x,y)\le r/\gamma<r\,,$ i.e. $y\in B_d(x,r).$ \end{proof} \begin{remark} In \cite{aimar98} it is shown that $d$ satisfies the relaxed triangle inequality with $s=2c.$ Also, the first inclusion in \eqref{eq.ball-incl} is proved for $\gamma =2,$ i.e. it is shown that $U\left(y,(2c)^{-1}r\right)\subseteq B_d(x,r)$. \end{remark} In \cite{aimar10} a similar characterization is given in terms of some subsets of $X\times X$. Denoting by $\Delta$ the diagonal of $X\times X$, $$\Delta =\{(x,x) : x\in X\}\,,$$ one considers a mapping $V:(0,\infty)\to \mathcal P(X\times X)$ satisfying the properties: \begin{equation}\langlebel{def.ant} \begin{aligned} {\rm (j)}\quaduad &\bigcap_{r>0} V(r)=\Delta;\\ {\rm (jj)}\quaduad &\bigcup_{r>0} V(r)=X\times X;\\ {\rm (jjj)}\quaduad &0<r_1\le r_2\;\Longrightarrow\; V(r_1)\subseteq V(r_2);\\ {\rm (jv)}\quaduad &\mbox{there exists } \,c\ge 1\,\mbox{ such that } V(r)\circ V(r)\subseteq V(cr).\\ \end{aligned}\end{equation} for all $r>0$. By analogy with the case of uniform spaces we call the sets $V(r)$ \emph{antourages}. If $(X,d)$ is a $b$-metric space with $d$ satisfying the relaxed triangle inequality for some $s\ge 1,$ then the sets $$ W_d(r)=\{(x,y)\in X\times X : d(x,y)<r\},\quaduad, r>0,$$ satisfy the conditions from \eqref{def.ant} with $c=2s$. Indeed, the conditions (j)--(jjj) are easily verified. To check (jv), suppose that $(x,y)\in W_d(r)\circ V(r).$ Then there exists $z\in X$ such that $(x,z)\in W_d(r)$ and $(z,y)\in W_d(r),$ implying $$ d(x,y)\le s(d(x,z)+d(z,y))<2sr\,,$$ showing that (jv) holds with $c=2s.$ A converse result holds in this case too. \begin{theo}[\cite{aimar10}] For a nonempty set $X$ and a mapping $\,V: (0,\infty)\to \mathcal P(X\times X)$ satisfying the properties from \eqref{def.ant}, define $d:X\times X\to [0,\infty)$ by \begin{equation}\langlebel{def.d-unif} d(x,y)=\inf\{r>0 : (x,y)\in V(r) \},\quaduad x,y\in X.\end{equation} Then: \begin{enumerate} \item $d$ is a $b$-metric on $X$ satisfying the relaxed triangle inequality for $s=c$; \item for any $0<\gamma <1$ the following inclusions hold \begin{equation}\langlebel{eq1.inclusions} V\left(\gamma r\right)\subseteq W_d(r)\subseteq V(r),\end{equation} for all $r>0$. \end{enumerate}\end{theo}\begin{proof} Again, we prove only the validity of the relaxed triangle inequality. Let $x,y,z\in X$. By the definition of $d$ there exist $r_1,r_2>0$ such that \begin{equation}\langlebel{eq1.d-unif} \begin{aligned} & 0<r_1<d(x,z)+\varepsilon\;\mbox{ and }\; (x,z)\in V(r_1);\\ & 0<r_2<d(z,y)+\varepsilon\;\mbox{ and }\; (z,y)\in V(r_2). \end{aligned}\end{equation} By (jjj) $V(r_1)\cup V(r_2)\subseteq V(r_1+r_2)$ so that, by (jv), $(x,y)\in V(c(r_1+r_2)),$ implying $d(x,y)\le c(r_1+r_2).$ Consequently, the inequalities \eqref{eq1.d-U} yield by addition $$ d(x,y)\le c(r_1+r_2)<c(d(x,z)+d(z,y))+2c\varepsilon\,.$$ Since these hold for all $\varepsilon >0,$ it follows $$ d(x,y)\le c(d(x,z)+d(z,y))\,.$$ The proof of the inclusions \eqref{eq1.inclusions} is simpler than in the case considered in Theorem \ref{t1.Aimar}. Indeed, $(x,y)\in W_(r)$ is equivalent to $d(x,y)<r.$ By the definition of $d$, there exists $0<r'<r$ such that $(x,y)\in V(r')$. By (jjj), $V(r')\subseteq V(r),$ showing that $(x,y)\in V(r)$, i.e. $W_d(r)\subseteq V(r).$ If $(x,y)\in V(\gamma r), $ then $$ d(x,y)\le\gamma r<r\,,$$ showing that $(x,y)\in W_d(r)$, i.e. $V(\gamma r)\subseteq W_d(r).$ \end{proof} \subsection{Strong b-metric spaces and completion} Let $(X,d)$ be a b-metric space. As we have seen, the topology $\tau_d$ generated by the b-metric $d$ has some drawbacks in what concerns the continuity property of $d$ and the topological openness of the ``open" balls. To remedy these shortcomings Kirk and Shahzad \cite[\S 12.4]{Kirk-Shah} introduced a special class of b-metrics. A mapping $d:X\times X\to [0,\infty)$ is called a \emph{strong b-metric} if it satisfies the conditions (i) and (ii) from \eqref{def.b-metric} and \begin{equation}\langlebel{s-b-metric}\tag{{\rm v}} d(x,y)\le d(x,z)+sd(y,z)\,, \end{equation} for some $s\ge 1$ and all $x,y,z\in X.$ It is obvious that \eqref{s-b-metric} is equivalent to \begin{equation}\langlebel{s-b-m2}\tag{{\rm v$'$}} d(x,y)\le \min\{sd(x,z)+d(y,z),d(x,z)+sd(y,z)\}\,, \end{equation} for all $x,y,z\in X, $ and that \eqref{s-b-metric} implies the $s$-relaxed triangle inequality (iii) from \eqref{def.b-metric}. The topology generated by a strong b-metric has good properties as, for instance, the openness of the balls $B(x,r)$. Indeed, if $y\in B(x,r)$ then $$d(y,z)\le d(x,y+sd(y,z)<\varepsilon\,,$$ provided $sd(y,z)<\varepsilon- d(x,y)$, that is $B(y,r')\subseteq B(x,r), $ where $r'= (\varepsilon- d(x,y))/s.$ Also the following inequality \begin{equation}\langlebel{eq1.sbm} |d(x,y)-d(x',y')|\le s[d(x,x')+d(y,y')]\,, \end{equation} holds for all $x,y,x',y'\in X,$ implying the continuity of the b-metric: if $d(x_n,x)\to 0$ and $d(y_n,y)\to 0$, then the relations $$ |d(x_n,y_n)-d(x,y)|\le s[d(x_n,x)+d(y_n,y)]\longrightarrow 0\;\mbox{ as }\; n\to\infty\,, $$ show that $d(x_n,y_n)\longrightarrow d(x,y)$ as $n\to\infty.$ A strong $b$-metric satisfies the $s$-polygonal inequality. Indeed, \begin{align*} d(x_0,x_n)&\le sd(x_0,x_1)+d(x_1,x_n)\le sd(x_0,x_1)+s d(x_1,x_2)+d(x_2,x_n)\le\dots\\ &\le s[sd(x_0,x_1)+s d(x_1,x_2)+\dots+d(x_{n-1},x_n)]\,. \end{align*} ^{*}xtbf{Completeness and completion.} A \emph{Cauchy sequence} in a b-metric space $(X,d) $ is a sequence $(x_n)$ in $X$ such that $\lim_{m,n\to\infty}d(x_n,x_m)=0.$ The inequality $d(x_n,x_m)\le s\left[d(x_n,x)+d(x,x_m)\right] $ shows that every convergent sequence is Cauchy. The b-metric space $(X,d)$ is called \emph{complete} if every Cauchy sequence converges to some $x\in X$. By a \emph{completion} of a b-metric space $(X,d)$ one understands a complete b-metric space $(Y,\rho)$ such that there exists an isometric embedding $\,j:X\to Y$ with $j(X)$ dense in $Y$. By an \emph{isometric embedding} of a b-metric space $(X_1,d_1)$ into a b-metric space $(X_2,d_2)$ one understands a mapping $f:X_1\to X_2$ such that $$d_2(f(x),f(y))=d_1(x,y)\,,$$ for all $x,y\in X_1.$ Two b-metric spaces $(X_1,d_1)$, $(X_2,d_2)$ are called \emph{isometric} if there exists a surjective isometric embedding $f:X_1\to X_2.$ The completeness is preserved by the uniform equivalence of b-metrics, but not by the topological equivalence. A question raised in \cite[p. 128]{Kirk-Shah} is: \emph{Does every strong b-metric space admit a completion}? This question was answered in the affirmative in \cite{an-dung16}. \begin{theo}\langlebel{t.compl-bm} Let $(X,d)$ be a strong b-metric space. \begin{enumerate}\item[ \rm 1. ]There exists a complete strong b-metric space $(\widetildede X,\widetildede d\,)$ which is a completion of $(X,d)$. \item[\rm 2. ] The completion is unique up to an isometry, in the sense that if $(X_1,d_1)$, $(X_2,d_2)$ are two strong b-metric spaces which are completions of $(X,d)$, then $(X_1,d_1)$ and $(X_2,d_2)$ are isometric. \end{enumerate} \end{theo}\begin{proof} The proof follows the ideas from the metric case. On the family $\mathcal C(X)$ of all Cauchy sequences in $X$ one considers the equivalence relation $$ (x_n)\sim (y_n)\iff \lim_nd(x_n,y_n)=0\,.$$ On the quotient space $ \widetildede X=\mathcal C(X)/\!\!\sim$ one defines $\widetildede d$ by $\widetildede d(\xi,\eta)=\lim_nd(x_n,y_n)$, where $(x_n)\in\xi$ and $(y_n)\in\eta,$ and one shows that $(\widetildede X,\widetildede d)$ is a complete strong b-metric space containing $X$ isometrically as a dense subset. \end{proof} \begin{remark} As it is mentioned in \cite{an-dung16}, the existence of a completion of an arbitrary b-metric space is still an important open problem. \end{remark} \subsection{Spaces of homogeneous type}\langlebel{Ss.homog-sp} Completing some earlier results of Coifman and de Guzman \cite{coif-guzman70}, Mac\'{\i}as and Segovia \cite{maci-sego79a,maci-sego79b} considered b-metrics (under the name quasi-distances) in connection with some problems in harmonic analysis. The framework in \cite{maci-sego79a} is the following. Let $(X,d)$ be a b-metric space. One considers a positive measure $\mu$ defined on a $\sigma$-algebra of subsets of $X$ containing the open sets and the balls $B(x,r)$ such that \begin{equation}\langlebel{eq1.homog} 0<\mu\left(B(x,ar)\right)\le \beta \mu\left(B(x,r)\right)\,, \end{equation} for all $x\in X$ and $r>0$, where $a>1$ and $A>0$ are fixed numbers. A b-metric space equipped with a measure $\mu$ satisfying \eqref{eq1.homog} is called a space of \emph{homogeneous type} and is denoted by $(X,d,\mu)$. If further, there exist $c_1,c_2>0$ such that \begin{equation}\langlebel{eq2.homog} 0<\mu(\{x\})<r<\mu(X)\;\Longrightarrow\; c_1 r\le \mu\left(B(x,r)\right)\le c_2r\,, \end{equation} for all $x\in X$, then the space $(X,d,\mu)$ of homogeneous type is called \emph{normal}. \begin{remark} One can show that if $(X,d)$ is a b-metric space with a positive measure $\mu$ satisfying \eqref{eq2.homog}, then the space $(X,d,\mu)$ is of homogeneous type. \end{remark} Concerning the openness of balls in b-metric spaces we mention the following result. \begin{theo}[\cite{maci-sego79a}] Let $(X,d)$ be a b-metric space. Then there exist a b-metric $d'$ on $X$, Lipschitz equivalent to $d$, and the constants $C>0$ and $0<\alpha<1$ such that \begin{equation}\langlebel{eq1.maci} |d'(x,z)-d'(y,z)|\le Cr^{1-\alpha} d'(x,y)^\alpha,\end{equation} whenever $\max\{d'(x,z), d'(y,z)\}<r.$ \end{theo} \begin{remark} The inequality \eqref{eq1.maci} can be written in the equivalent form \begin{equation}\langlebel{eq2.maci} |d'(x,z)-d'(y,z)|\le C d'(x,y)^\alpha\left(\max\{d'(x,z),d'(y,z)\}\right)^{1-\alpha},\end{equation} and it is easy to check that the balls corresponding to a b-metric $d'$ satisfying \eqref{eq2.maci} are $\tau_{d'}$-open. \end{remark} Indeed, let $B_{d'}(x,r)$ be a ball. We have to show that for every $y\in B_{d'}(x,r)$ there exists $r'>0$ such that $B_{d'}(y,r')\subseteq B_{d'}(x,r),$ that is, $$ d'(y,z)<r' \;\Longrightarrow \; d'(x,z)<r\,,$$ for any $z\in X$. Supposing that $d'$ satisfies the $s'$-relaxed triangle inequality for some $s'\ge 1$, choose first $0<r'<r$. Then $$ d'(x,z)\le s'd'(x,y)+s'd'(y,z)<2s'r\,.$$ By \eqref{eq2.maci}, \begin{align*} d'(x,z)&\le d'(x,y)+|d'(x,z)-d'(x,y)|\\ &<d'(x,y)+Cd'(y,z)^\alpha\left(\max\{d'(x,z),d'(x,y)\}\right)^{1-\alpha}\\ &<d'(x,y)+C(2s'r)^{1-\alpha}(r')^\alpha\,. \end{align*} Choosing $0<r'<r\,$ such that $\,C(2s'r)^{1-\alpha}(r')^\alpha<r-d'(x,y)$, it follows $d'(x,z)<r.$ Concerning the set of points $x\in X$ with $\mu(\{x\})>0$ we mention. \begin{prop}[\cite{maci-sego79a}] Let $(X,d,\mu)$ be a space of homogeneous type and $$ M=\{x\in X :\mu(\{x\})>0\}\,.$$ Then the set $M$ is at most countable and for every $x\in M$ there exists $r>0$ such that $M\cap B(x,r)=\{x\}.$ \end{prop} We mention also the following result. \begin{theo}[\cite{maci-sego79a}] Let $(X,d,\mu)$ be a space of homogeneous type such that the balls are $\tau_d$-open. Let $\delta:X\times X\to[0,\infty)$ be given by $$ \delta(x,y)=\inf\{\mu(B) : B\mbox{ is a ball containing } x,y\} \,,$$ if $x\ne y$ and $\delta(x,x)=0$. Then $\delta$ is a b-metric on $X$, $(X,\delta,\mu)$ is a normal space and $\tau_\delta=\tau_d.$ \end{theo} \subsection{Topological properties of $f$-quasimetric spaces} We present now, following \cite{arutyun17a} a very general class of metric type spaces. On a nonempty set $X$ consider a mapping $d:X\times X\to\mathbb R_+$ satisfying only the condition \begin{equation}\langlebel{def.dist} d(x,y)=0\iff x=y\,, \end{equation} for all $x,y\in X$, and call such a function \emph{distance}. One can define open balls with respect to $d$ as usual $$ B(x,r)=\{y\in X : d(x,y)<r\}\,, $$ for $x\in X$ and $r>0$, and a topology $\tau_d$ by $$ G\in\tau_d\overlineerset{{\rm def}}{\iff} \fracorall x\in G,\;\exists r>0,\; B(x,r)\subseteq G\,,$$ where $G\subseteq X$. The topology $\tau_d$ is $T_1$ because $X\smallsetminus\{x\}$ is open, and so $\{x\}$ closed. Indeed, if $y\ne x$, then $r:=d(y,x)>0$ and $x\notin B(y,r)$. Along with the distance $d$ one can consider the \emph{conjugate distance} $\begin{array}r d(x,y)=d(y,x),\, x,y\in X$. The $\begin{array}r d$-balls are given by $$ B_{\begin{array}r d}(x,r)=\{y\in X : \begin{array}r d(x,y)<r\}= \{y\in X : d(y,x)<r\}\,,$$ and the corresponding topology is denoted by $\tau_{\begin{array}r d}.$ Consider now a function $f:\mathbb R_+\times\mathbb R_+\to\mathbb R_+$ such that \begin{equation}\langlebel{def.f} (t_1,t_2)\to (0,0)\;\Longrightarrow \; f(t_1,t_2)\to (0,0)\,, \end{equation} where $(t_1,t_2)\in \mathbb R_+\times\mathbb R_+.$ we say that a distance $d$ on a set $X$ is an $f$-\emph{quasimetric} if it satisfies the inequality \begin{equation}\langlebel{ineq.f-tr} d(x,y)\le f(d(x,z),d(y,z))\,, \end{equation} for all $x,y,z\in X$. \begin{example}\langlebel{ex.f-fcs} We present first some important particular cases of function $f$. \begin{itemize} \item $f(t_1,t_2)=t_1+t_2$. In this case $d$ is a quasimetric (see \cite{Cobzas}) and a metric if the distance $d$ is symmetric. \item $f(t_1,t_2)=s_1t_1+s_2t_2$, for some $s_1,s_2\ge 1.$ In this case $d$ is called an $(s_1,s_2)$-quasimetric (see \cite{arutyun17b}), a b-quasimetric if $s_1=s_2=s$, respectively an $(s_1,s_2)$-metric, and b-metric if $d$ is symmetric. \end{itemize} \end{example} From \ref{ineq.f-tr} one obtains the following result, called the \emph{asymptotic triangle inequality}: \begin{equation}\langlebel{ineq.as-tr} d(x_n,y_n)\to 0\mbox{ and } d(y_n,z_n)\to 0\;\Longrightarrow\; d(x_n,z_n)\to 0\,.\end{equation} Conversely, if a distance functions satisfies \eqref{ineq.as-tr}, then there exists a function $f$, satisfying \eqref{def.f}, such that $d$ is an $f$-quasimetric. Indeed, define $h:\mathbb R_+\to\mathbb R_+$ by \begin{equation*} h(t)=\sup\{d(u,v) : u,v\in X,\, \exists w\in X,\, d(u,w)+d(w,v)\le t\}\,. \end{equation*} The function $h$ is obviously nondecreasing and $$\lim_{t\searrow 0}h(t)=0\,.$$ Indeed, if $t_n\to 0$, where $t_n\in\mathbb R_+,\, n\in\mathbb N,$ then there exist $u_n,v_n,w_n\in X$ such that \begin{align*} d(u_n,w_n)+d(w_n,v_n)\le t_n\;\;\mbox{ and }\;\; d(u_n,v_n)>f(t_n)-\fracrac 1n\,, \end{align*} for all $n\in \mathbb N.$ The first inequality implies $d(u_n,w_n),d(w_n,v_n)\to 0,$ so that, by \eqref{ineq.as-tr}, $d(u_n,v_n) \to 0,$ which, by the second inequality from above, yields $f(t_n)\to 0.$ By the definition of the function $h$, $$h(d(x,z)+d(z,y))\ge d(x,y)\,,$$ for all $x,y,z\in X$, so we can take $f(t_1,t_2)=h(t_1+t_2).$ Define the convergence of a sequence $(x_n)$ in a distance space $(X,d)$ to $x\in X$ by $$ x_n\xrightarrow{d}x\overlineerset{{\rm def}}{\iff} d(x,x_n)\to 0\,, $$ and, for $Z\subseteq X$ and $x\in X$ put $$ d(x,Z)=\inf\{d(x,z) : z\in Z\}\,.$$ We have the following useful characterizations of the interior and closure. \begin{prop}\langlebel{p1.Arutyun} Let $(X,d)$ be an $f$-quasimetric space and $Z\subseteq X$. Then \begin{equation}\langlebel{eq1.Arutyun} \operatorname{int} (Z)=\{x\in X : d(x,X\smallsetminus Z)>0\}\;\mbox{ and }\; \operatorname{cl}(Z)=\{x\in X : d(x,A)=0\}\,. \end{equation} \end{prop}\begin{proof} Let $$ \widetildede Z:=\{x\in X : d(x,X\smallsetminus Z)>0\}\,.$$ We show that \begin{align*} &{\rm (i)}\quaduad \widetildede Z\;\mbox{is open};\\ &{\rm (ii)}\quaduad \widetildede Z\subseteq Z;\\ &{\rm (iii)}\quaduad \operatorname{int} (Z)\subseteq\widetildede Z\,, \end{align*} which will imply that $\operatorname{int} (Z)=\widetildede Z$. Suppose that $\widetildede Z$ is not open. Then there exists $x\in \widetildede Z$ such that $B(x,n^{-1})\nsubseteq \widetildede Z$ for all $n\in\mathbb N.$ Hence, for each $n\in\mathbb N,$ there exists $y_n\in X$ such that $$ d(x,y_n)<\fracrac1n\;\mbox{ and} \; d(y_n,X\smallsetminus Z)=0\,.$$ Then, for every $n\in\mathbb N,$ there exists $w_n\in X\smallsetminus Z$ such that $d(y_n,w_n)<1/n\,.$ By \eqref{ineq.as-tr}, $d(x,w_n)\to 0$, implying $d(x,X\smallsetminus Z)=0$, in contradiction to the hypothesis that $x\in\widetildede Z$. The proof of (ii) is simple. If $x\notin Z$, then $x\in X\smallsetminus Z$, so that $d(x,X\smallsetminus Z)=0,$ that is, $x\notin\widetildede Z$. To prove (iii), suppose that $x\in\operatorname{int}(Z)$. Then there exists $r>0$ such that $B(x,r)\subseteq\operatorname{int} (Z)\subseteq Z.$ But then, for any $y\in X\smallsetminus Z,\, d(x,y)\ge r>0,$ that is, $x\in \widetildede Z$. The proof of the formula for closure is based on the equality $$ \operatorname{cl}(X\smallsetminus Y)=X\smallsetminus\operatorname{int}(Y)\,,$$ valid for any subset $Y$ of $X$. Then \begin{align*} \operatorname{cl}(Z)&=X\smallsetminus\operatorname{int}(X\smallsetminus Z) =X\smallsetminus \{x\in X : d(x,Z)>0\}\\&= \{x\in X : d(x,Z)=0\}\,. \end{align*} \end{proof} Proposition \ref{p1.Arutyun} has some important consequences. \begin{corol}\langlebel{c1.Arutyun} Let $(X,d)$ be an $f$-quasimetric space. \begin{enumerate} \item[\rm 1.] For every $x\in X$ and $r>0,\, x\in \operatorname{int}\big(B(x,r)\big)$, or, equivalently, $B(x,r)$ is a neighborhood of $x$. \item[\rm 2.] The topology $\tau_d$ satisfies the first axiom of countability, i.e. every point has a countable base of neighborhood. Consequently, usual sequences suffices to characterize the topological properties of $X$. \item[\rm 3.] The convergence of a sequence $(x_n)$ in $X$ to $x\in X$ with respect to $\tau_d$ is characterized in the following way: \begin{equation}\langlebel{eq1.d-converg} x_n\xrightarrow{\tau_d}x\iff d(x,x_n)\longrightarrow 0\,.\end{equation} \end{enumerate}\end{corol}\begin{proof} 1. This follows from the following relations \begin{align*} y\in X\smallsetminus B(x,r)&\iff d(x,y)\ge r\\ &\;\,\Longrightarrow \, d(x,X\smallsetminus B(x,r))\ge r>0\\ &\iff x\in\operatorname{int} \big(B(x,r)\big)\,. \end{align*} 2. A countable base of neighborhoods of a point $x\in X$ is $B(x,n^{-1}),\, n\in\mathbb N.$ If $V$ is a neighborhood of $x$, then there exists $G\in\tau_d$ such that $$ x\in G\subseteq V\,.$$ By the definition of the topology $\tau_d$, there exists $r>0$ such that $ B(x,r)\subseteq G,$ implying $$B(x,n^{-1})\subseteq B(x,r)\subseteq G\subseteq V\,,$$ for some sufficiently large $n\in\mathbb N.$ 3. Suppose that $x_n\xrightarrow{\tau_d}x $ and let $r>0$. Then, by 1, $B(x,r)\in\mathcal V(x)$ so there exists $n_r\in\mathbb N$ such that $x_n\in B(x,r)$, or, equivalently, $d(x,x_n)<r$ for all $n\ge n_r.$ Consequently, $d(x,x_n)\longrightarrow 0.$ Suppose now that $d(x,x_n)\longrightarrow 0$ and let $V\in\mathcal V(x).$ Then $x\in\operatorname{int}(V)\in\tau_d$, so there exists $r>0$ such that $ B(x,r)\subseteq\operatorname{int}(V)\subseteq V.$ By hypothesis, there exists $n_0\in\mathbb N$, such that $d(x,x_n)<r$ for all $n\ge n_0$, implying $x_n\in B(x,r)\subseteq V$ for all $n\ge n_0.$ \end{proof} \begin{remark} The convergence with respect to the topology $\tau_{\begin{array}r d}$ generated by the conjugate $f$-quasimetric $\begin{array}r d(x,y)=d(y,x),\, x,y\in X,$ is characterized by \begin{equation}\langlebel{eq2.d-converg} x_n\xrightarrow{\tau_{\begin{array}r d}}x\iff d(x_n,x)\longrightarrow 0\,.\end{equation} \end{remark} Since $x\in \operatorname{int}\big(B(x,r)\big),$ there exists $r'>0$ such that $B(x,r')\subseteq\operatorname{int}\big(B(x,r)\big)\subseteq B(x,r).$ A natural question is to find the biggest $r'$ such that $B(x,r')\operatorname{int}\big(B(x,r)\big).$ For $r>0$ let $$ \Lambda(r)=\{t_1\ge 0 :\limsup_{t_2\searrow 0}f(t_1,t_2)\ge r\}\,,$$ and $$ \theta(r)=\begin{cases} \sup\Lambda(r) &\mbox{ if } \; \Lambda(r)\ne \emptyset \\ r &\mbox{ if } \; \Lambda(r)= \emptyset\,. \end{cases} $$ Here, by definition, \begin{equation}\langlebel{limsup} \limsup_{t_2\searrow 0}f(t_1,t_2)=\inf_{\delta>0}\big(\sup\{f(t_1,t_2) : 0\le t_2<\delta\}\big)\,.\end{equation} \begin{remark}\langlebel{re1.Arutyun} Observe that $\Lambda(r)=\emptyset$ implies $B(x,r)=X$. Also $\theta(r)>0$ in both cases. \end{remark} Indeed, if $\Lambda(r)=\emptyset,$ then putting $t_1=d(x,y)$ for some arbitrary $y\in X$, it follows $\limsup_{t_2\searrow 0}f(t_1,t_2)<r$, so there exists $\delta >0$ such that $\sup\{f(t_1,t_2) : 0\le t_2<\delta\}<r$, implying $$ d(x,y)\le f(d(x,y),d(y,y))=f(t_1,0)<r\,,$$ that is, $y\in B(x,r).$ If $\theta(r)=0$, then $\Lambda(r)\ne\emptyset,$ so there exists a sequence $(t_1^k)$ in $\Lambda(r)$ such that $t^k_1\to 0$ as $k\to\infty.$ By \eqref{limsup} and the definition of $\Lambda(r)$, for every $k\in\mathbb N$ there exists $0\le t^k_2<1/k$ such that $f(t^k_1,t^k_2)>r/2.$ Taking into account \eqref{def.f}, one obtains the contradiction. $$ 0=\lim_{k\to\infty} f(t^k_1,t^k_2)\ge r/2\,.$$ \begin{prop}\langlebel{p2.Arutyun} Let $(X,d)$ be an $f$-quasimetric space. Then $$ B(x,\theta(r))\subseteq\operatorname{int}\big(B(x,r)\big)\,,$$ for every $x\in X$ and $r>0$. \end{prop}\begin{proof} By Remark \ref{re1.Arutyun} we can suppose $\Lambda(r)\ne\emptyset.$ Let $y\in B(x,\theta(r)).$ Then $t_1:=d(x,y)<\theta(r),$ implying $t_1\notin\Lambda(r)$ so that $\limsup_{t_2\searrow 0}f(t_1,t_2)<r.$ By \eqref{limsup} there exists $\delta>0$ such that \begin{equation}\langlebel{eq3a.Arutyun}\sup\{f(t_1,t_2) : 0\le t_2<\delta\}<r\,.\end{equation} We show that \begin{equation}\langlebel{eq3b.Arutyun} B(y,\delta)\subseteq B(x,r)\,.\end{equation} If $z\in B(y,\delta), $ then, by \eqref{eq3a.Arutyun} $$ d(x,z)\le f(d(x,y),d(y,z))=f(t_1,d(y,z))<r\,,$$ because $d(y,z)<\delta.$ Since $B(y,\delta)$ is a neighborhood of $y$, the inclusion \eqref{eq3b.Arutyun} shows that $B(x,r)$ is also a neighborhood of $y$, that is, $y\in\operatorname{int}\big(B(x,r)\big).$ \end{proof} \begin{remark} If $(X,d)$ is an $(s_1,s_2)$-quasimetric space, i.e. an $f$-quasimetric space for $f(t_1,t_2)=s_1t_1+s_2t_2$, then $$ \theta (r)=r/s_1\,.$$ \end{remark} Indeed, \begin{align*} \theta(r)&=\inf\{t_1\ge 0 : \lim_{t_2\searrow 0}(s_1t_1+s_2t_2)\ge r\}\\ &=\inf\{t_1\ge 0 : s_1t_1\ge r\}=r/s_1\,. \end{align*} The authors define in \cite{arutyun17a} the notion of Cauchy sequence and completeness. A sequence in a distance space $(X,d)$ is called \emph{Cauchy} if for every $\varepsilon >0$ there exists $n_0=n_0(\varepsilon)$ such that \begin{equation}\langlebel{Cauchy-l} d(x_n,x_{n+k})<\varepsilon\,, \end{equation} for all $n\ge n_0$ and all $k\in\mathbb N.$ The distance space $(X,d)$ is called \emph{complete} if every Cauchy sequence is convergent to some $x\in X$. The authors prove in \cite{arutyun17a} the validity of Baire category theorem in complete $f$-quasimetric spaces which satisfy the separation axiom $T_3$ (i.e. are regular). As in the metric case, the proof is based on the nonemptiness of descending sequences of closed balls with radii tending to 0. They extend the metrization Theorems \ref{t.Frink} and \ref{t.Stemp} to this setting proving the quasimetrizability of $f$-quasimetric spaces. In this case, the equivalent quasimetrics are also given by the formulae \eqref{def.1-metric} and \eqref{def.p-metric} and are denoted by Inf $d$. They introduce the notion of weak symmetry of the $f$-quasimetric $d$ by the condition \begin{equation}\langlebel{eq.w-sym} d(x,x_n)\to 0\;\Longrightarrow\; d(x_n,x)\to 0\,,\end{equation} for all sequences $(x_n)$ in $X$ and $x\in X$. The topology generated by a weakly symmetric $f$-quasimetric is normal and metrizable. \begin{remark} By \eqref{eq1.d-converg} and \eqref{eq2.d-converg}, the condition \eqref{eq.w-sym} means that the identity mapping $I:(X,\tau_d)\to (X,\tau_{\begin{array}r d})$ is continuous, or equivalently, $\tau_{\begin{array}r d}\subseteq\tau_d,$ i.e. the topology $\tau_{d}$ is finer than $\tau_{\begin{array}r d}$. \end{remark} \begin{remark} In the theory of quasimetric spaces (see Example \ref{ex.f-fcs}) a sequence satisfying \eqref{Cauchy-l} is called ``left $K$-Cauchy" and the corresponding notion of completeness, ``left $K$-completeness". If the sequence $(x_n)$ satisfies the condition \begin{equation}\langlebel{Cauchy-r} d(x_{n+k},x_{n})<\varepsilon\,, \end{equation} for all $n\ge n_0$ and all $k\in\mathbb N,$ then it is called ``right $K$-Cauchy", and the corresponding notion of completeness, ``right $K$-completeness". Some authors call a sequence $(x_n)$ satisfying \eqref{Cauchy-l} ``forward Cauchy" and ``backward Cauchy" if it satisfies \eqref{Cauchy-r}. Also the convergence given by $d(x,x_n)\to 0$ is called ``backward convergence", while that given by $d(x_n,x)\to 0$ is called ``forward convergence" (see, e.g. \cite{menuci13}). Combining these notions of Cauchy sequence and convergence one obtains various notions of completeness: ``forward-forward complete" meaning that every forward Cauchy sequence is forward convergent, with similar definitions for forward-backward, backward-forward, etc -- completeness. Due to the asymmetry of the quasimetric, there are several notions of Cauchy sequence (actually 7, see \cite{reily-subram82}), each of them agreeing with the usual notion of Cauchy (fundamental) sequence in the metric case. Considering $d$-convergence and $\begin{array}r d$-convergence, from these 7 notions of Cauchy sequence one obtains 14 notions of completeness (see the book \cite{Cobzas}). \end{remark} \subsection{Historical remarks and further results} The relaxed triangle inequality and the corresponding spaces were rediscovered several times under various names -- quasi-metric, near metric (in \cite{Deza}), metric type, etc. \begin{itemize} \item (1970) Coifman and de Guzman \cite{coif-guzman70} in connection with some problems in harmonic analysis (a b-metric is called ``distance" function); \item (1979) the results of Coifman and de Guzman were completed by Mac\'{\i}as and Segovia \cite{maci-sego79a,maci-sego79b}; \item (1989) Bakhtin \cite{bahtin89} called them ``quasi-metric spaces" and proved a contraction principle for such spaces; \item (1993) Czerwik introduced them under the name ``b-metric space", first for $s=2$ in \cite{czerw93}, and then for an arbitrary $s\ge 1$ in \cite{czerw98}, with applications to fixed points; \item (1998,2003) Fagin \emph{et al.} \cite{fagin03,fagin98} considered distances satisfying the $s$-relaxed triangle and polygonal inequalities with applications to some problems in theoretical computer science; \item (2010) Khamsi \cite{khamsi10a} introduced them under the name ``metric type spaces" and remarked that if $D$ is a cone metric on a set $X$ with values in a Banach space ordered by a normal cone $C$ with normality constant $K$, then $d(x,y)=\|D(x,y)\|,\, x,y\in X,$ is a b-metric on $X$ satisfying the $K$-relaxed polygonal inequality. \end{itemize} Some topological properties of b-metric spaces (e.g. compactness) were studied in \cite{khamsi10b}. Xia \cite{xia09} studied the properties of the space $C(T,X)$ of continuous functions from a compact metric space $T$ to a b-metric space $X$, and geodesics and intrinsic metrics in b-metric spaces. The results were applied to show that the optimal transport paths between atomic probability measures are geodesics in the intrinsic metric. An, Tuyen and Dung \cite{an-dung15} extended to b-metric spaces Stone's paracompactness theorem. \section{Generalized b-metric spaces} The notions of \emph{generalized metric}, meaning a mapping $d:X\times X\to [0,\infty]$ satisfying the axioms of a metric, and generalized metric space $(X,d)$ were introduced by W. A. J. Luxemburg in \cite{lux1}--\cite{lux3} in connection with the method of successive approximation and fixed points. These results were completed by A. F. Monna \cite{monna61} and M. Edelstein \cite{edelst64}. Further results were obtained by J. B. Diaz and B. Margolis \cite{diaz-margol68,margolis68} and C. F. K. Jung \cite{jung69}. G. Dezs\H{o} \cite{dezso00} considered generalized vector metrics, i.e. metrics with values in $\mathbb R^m_+\cup\{(+\infty)^m\}$, and extended to this setting Perov's fixed point theorem (see \cite{perov64} -- \cite{perov66}) as well as other fixed point results (Luxemburg, Jung, Diaz-Margolis, Kannan). For some recent results on generalized metric spaces see \cite{beer13} and \cite{czerw-krol16b}. Recently, G. Beer and J. Vanderwerf \cite{beer15}--\cite{beer-vdw15a} considered vector spaces equipped with norms that can take infinite values, called ``extended norms" (see also \cite{czerw-krol16a}). Following these ideas, we consider here the notion of \emph{generalized} b-\emph{metric} on a nonempty set $X$ as a mapping $d:X\times X\to [0,\infty]$ satisfying the conditions (i)--(iii) from (\ref{def.b-metric}). If $d$ satisfies further the condition \eqref{s-b-metric}, then $d$ is called a \emph{generalized strong} b-\emph{metric} and the pair $(X,d)$ a \emph{generalized strong} b-\emph{metric space}. Let $(X, d)$ be a generalized b-metric space. As in Jung \cite{jung69}, it follows that \begin{equation}\langlebel{zad3} x \sim y \stackrel{d}{\Longleftrightarrow} d(x,y) < + \infty, \quaduad x,y \in X, \end{equation} is an equivalence relation on $X$. Denoting by $X_i, \, i \in I$, the equivalence classes corresponding to $\sim$ and putting $d_i = d |_{X_i \times X_i}, \, i \in I$, then $(X_i, d_i)$ is a b-metric space (a strong b-metric space if $(X,d)$ is a generalized strong b-metric space) for every $i\in I$. Therefore, $X$ can be uniquely decomposed into equivalence classes $X_i, \, i \in I$, called the \emph{canonical decomposition} of $X$. By analogy to \cite{jung69} we have. \begin{theo}\langlebel{t.2} Let $(X,d)$ be a generalized b-metric space and $X_i, \, i \in I,$ its canonical decomposition. Then the following hold. \begin{enumerate} \item[\rm 1. ] The space $(X,d)$ is complete if and only if $(X_i,d_i)$ is complete for every $i \in I$. \item[\rm 2. ] If $\,(Y_i, d_i), \, i \in I$, are b-metric spaces (with the same $s$) and $Y_i \cap Y_j = \emptyset $ for all $i \neq j$ in $I$, then \begin{equation}\langlebel{zad4} d(x,y) := \left\{ \begin{array}{lcl} d_i(x,y) & \mbox{if} & x,y \in Y_i, ^{*}xt{ for some } i \in I, \\ +\infty & \mbox{if} & x \in Y_i ^{*}xt{ and } y \in Y_j \\ & & ^{*}xt{ for some } i,j \in I ^{*}xt{ with } i \neq j, \\ \end{array}\right. \end{equation} is a generalized b-metric on $Y = \bigcup_{i \in I} Y_i$, with $\{Y_i : i\in I\}$ the family of equivalence classes corresponding to the equivalence relation (\ref{zad3}). \end{enumerate} The same results are true for generalized strong b-metric spaces. \end{theo} \subsection{The completion of generalized b-metric spaces} In this subsection we shall prove the existence of the completion of strong b-metric spaces. The existence of the completion of a generalized metric space was proved in \cite{czerw-krol16}. We start with the following lemma. \begin{lemma}\langlebel{le1} Let $(X,d)$ be a generalized b-metric space, ($Z,D$) a complete generalized b-metric space, with continuous generalized b-metrics $d,D$ and $Y$ a dense subset of $X$. Then for every isometric embedding $f: Y \rightarrow Z$ there exists a unique isometric embedding $F: X \rightarrow Z$ such that $F|_Y = f$. If, in addition, $X$ is complete and $f(Y)$ is dense in $Z$, then $F$ is bijective (i.e. $F$ is an isometry of $X$ onto $Z$). \end{lemma}\begin{proof} For the sake of completeness we include the simple proof of this result. For $x\in X$ let $(y_n)$ be a sequence in $Y$ such that $d(y_n,x)\to 0.$ Then $(y_n)$ is a Cauchy sequence in $(X,d)$ and the equalities $D(f(y_n),f(y_m))=d(y_n,y_m),\, m,n\in\mathbb{N},$ show that $(f(y_n))$ is a Cauchy sequence in $(Z,D)$. Since $(Z,D)$ is complete, there exists $z\in Z$ such that $D(f(y_n),z)\to 0$. If $(y'_n)$ is another sequence in $Y$ converging to $x$, then $(f(y'_n)) $ will converge to an element $z'\in Z$. By the continuity of the generalized b-metrics $d$ and $D$, $$D(z,z')=D(\lim_nf(y_n),\lim_nf(y'_n))= \lim_nD(f(y_n),f(y'_n))= \lim_nd(y_n,y'_n)=0\,,$$ showing that $z=z'$. So we can unambiguously define a mapping $F:X\to Z$ by $F(x)=\lim_nf(y_n)$, where $(y_n)$ is a sequence in $Y$ converging to $x\in X$. For $y\in Y$ taking $y_n=y,\, n\in\mathbb{N}$, it follows $F(y)=y$. For $x,x'\in X$, let $(y_n), (y'_n)$ be sequences in $Y$ converging to $x$ and $x'$, respectively. Then $$D(F(x),F(x'))=\lim_nD(f(y_n),f(y'_n))= \lim_nd(y_n,y'_n)=d(x,x')\,,$$ i.e. $F$ is an isometric embedding. If $f(Y)$ is dense in $Z$, then, for any $z\in Z$, there exists a sequence $(y_n)$ in $Y$ such that $ D(f(y_n),z)\to 0. $ It follows that $(f(y_n))$ is a Cauchy sequence in $Z$ and so, as $f$ is an isometry, $(y_n)$ will be a Cauchy sequence in $X$. As the space $X$ is complete, $(y_n)$ is convergent to some $x\in X$. But then $$ D(F(x),z)=\lim_nD(F(x),f(y_n))=\lim_n d(x,y_n)=0\,,$$ showing that $F(x)=z.$ \end{proof} \begin{remark} The proof can be adapted to show that, under the hypotheses of Lemma \ref{le1}, every uniformly continuous mapping $f:Y\to Z$ has a unique uniformly continuous extension to $X$. The notion of uniform continuity for mappings between generalized b-metric spaces is defined as in the metric case. \end{remark} Let $(X,d)$ be a generalized strong b-metric space with $X_i, \, i \in I$, the family of equivalence classes corresponding to (\ref{zad3}). For every $i \in I$, let $(Y_i, D_i)$ be a completion of the strong b-metric space $(X_i,d_i)$. Denote by $T_i : (X_i, d_i) \rightarrow (Y_i, D_i)$ the isometric embedding with $T_i (X_i)$ $D_i$-dense in $Y_i$ corresponding to this completion. Replacing, if necessary, $Y_i$ with $\overlineerline{Y_i} = Y_i \times \{ i\}, D_i$ with $\overlineerline{D_i} ((x,i),(y,i))=D_i(x,y),$ for $ x,y \in Y_i,$ and putting $\overlineerline{T_i}(x,i) = (T_i(x), i),\, x\in Y_i$, we may suppose, without restricting the generality, that $$ Y_i \cap Y_j = \emptyset ^{*}xt{ for all } i,j \in I ^{*}xt{ with } i \neq j\,. $$ Put $Y := \bigcup_{i \in I} Y_i$, and define $$ D: Y \times Y \rightarrow [0, \infty] $$ \noindent according to (\ref{zad4}) and $T: X \rightarrow Y$ by $$ T(x) := T_i(x), $$ where $i$ is the unique element of $I$ such that $x \in X_i$. We have the following result. \begin{theo}\langlebel{theorem-3} Let $(X,d)$ be a generalized strong b-metric space and $(Y, D)$ the generalized strong b-metric space defined above. Then \begin{enumerate} \item[\rm (i) ] $(Y, D)$ is a complete generalized strong b-metric space; \item[\rm (ii) ] $T: (X,d) \rightarrow (Y, D)$ is an isometric embedding with $T(X)$ $D$-dense in $Y$; \item[\rm (iii) ] any other complete generalized strong b-metric space $(Z,\varrho )$ that contains a $\rho$-dense isometric copy of $(X,d)$, is isometric to $(Y,D)$. \end{enumerate} \end{theo}\begin{proof} Since each strong b-metric space $(Y_i, D_i)$ is complete, Theorem \ref{t.2} implies that the generalized strong b-metric space $(Y,D)$ is complete. Let $x,y \in X$. If $x,y \in X_i$, for some $i \in I$, then $$ D(T(x), T(y)) = D_i (T_i(x), T_i(y)) = d_i (x,y) = d(x, y). $$ If $x \in X_i,\, y \in X_j$ with $i \neq j$, then $$ T(x) = T_i(x) \in Y_i ^{*}xt{ and } T(y) = T_j(x) \in Y_j\,, $$ so that $$D(T(x), T(y)) = D(T_i(x), T_j(y)) = + \infty = d(x,y).$$ Now for $ \xi \in Y$ there exists a unique $i \in I$ such that $\xi \in Y_i$. Since $T_i(X_i)$ is dense in $(Y_i, D_i)$, there exists a sequence $(x_n)$ in $X_i$ such that $$ 0 = \lim_{n \to \infty} D_i(T_i(x_n), \xi ) = \lim_{n \to \infty} D(T(x_n), \xi), $$ which means that $T(X)$ is $D$-dense in $(Y,D)$. Finally, to verify (iii), let $ S: (X,d) \rightarrow (Z,\rho)$ be an isometric embedding with $S(X)$ dense in $Z$. Define $R:T(X)\to X$ by $R(T(x))=x,\, x\in X$. Then $R$ is an isometry of $T(X)$ onto $X$ and $S\circ R$ is an isometric embedding of $T(X)$ into $Z$. Since $T(X)$ is dense in $Y$ and $S(R(T(X)))=S(X)$ is dense in $Z$, Lemma \ref{le1} yields the existence of an isometry $U$ of $Y$ onto $Z$, which ends the proof.\end{proof} \section{Fixed points in b-metric spaces} We shall prove some fixed point results in b-metric and in generalized b-metric spaces. \subsection{Fixed points in b-metric spaces} We start with the case of b-metric spaces. The second result is an extension to b-metric spaces of Theorem 4.1 from \cite{Dugundji-G}. Let $(X,d)$ be a b-metric space with $d$ satisfying the $s$-relaxed triangle inequality. We consider functions $\varphi:\mathbb R_+\to\mathbb R_+$ satisfying the conditions \begin{equation}\langlebel{def.phi} \begin{aligned} &{\rm (a)}\quaduad \varphi \;\mbox{ is nondecreasing and}\\ &{\rm (b)}\quaduad \lim_{n\to\infty}\varphi^n(t)=0\;\mbox{ for all }\; t>0\,.\\ \end{aligned}\end{equation} \begin{remark}\langlebel{re.phi} If $\varphi:\mathbb R_+\to\mathbb R_+$ satisfies the conditions (a) and (b) from above, then \begin{align*} {\rm(c) } \quaduad &\varphi(t)<t\;\mbox{ for all }\;t>0; \\ {\rm(d) } \quaduad &\lim_{n\to\infty}\varphi^n(0)=0\;\mbox{ and }\; \varphi(0)=0=\lim_{t\searrow 0}\varphi(t)\,. \end{align*} \end{remark} Indeed, if $\varphi(t)\ge t$ for some $t>0$, then, by (a), $\varphi^2(t)\ge \varphi(t)\ge t$ and, in general $\varphi^n(t)\ge t>0$ for all $n$, in contradiction to (b). Also, $0\le\varphi(0)\le\varphi(1)$ implies $0\le\varphi^2(0)\le \varphi^2(1)$ and in general $0\le\varphi^n(0)\le\varphi^n(1)$. Since $\lim_{n\to\infty}\varphi^n(1)=0,$ this yields (d). Similarly, $0\le\varphi(0)\le \varphi(t)<t$ for any $t>0$, implies $\varphi(0)=0=\lim_{t\searrow 0}\varphi(t)$. \begin{theo}\langlebel{t1.Czerw} Let $(X,d)$ be a complete b-metric space, where $d$ satisfies the $s$-relaxed triangle inequality and let $\varphi:\mathbb R_+\to\mathbb R_+$ be a function satisfying the conditions (a), (b) from \eqref{def.phi}. Then every mapping $f:X\to X$ satisfying the inequality \begin{equation}\langlebel{eq1.Cz} d(f(x),f(y))\le\varphi(d(x,y))\,, \end{equation} for all $x,y\in X$, has a unique fixed point $z$ and, for every $x\in X$, the sequence $\big(f^n(x)\big)_{n\in\mathbb N_0}$ converges to $z$ as $n\to \infty$. \end{theo}\begin{proof} We present the proof given in \cite{kaj-luk18}. Let $x\in X$. Put $x_n=T^nx,\, n\in\mathbb N_0\,,$ and let us show that $(x_n)_{n\in\mathbb N_0}$ is a Cauchy sequence. Observe first that \eqref{eq1.Cz} implies \begin{equation}\langlebel{eq2.Cz} d(T^nu,T^nv)\le\varphi^n(d(u,v))\,, \end{equation} for all $u,v\in X$ and $n\in\mathbb N.$ This implies \begin{equation}\langlebel{eq3.Cz} d(T^nx_{kn},x_{kn})\le\varphi^{kn}(d(x_n,x_0))\,, \end{equation} for all $k,n\in\mathbb N.$ Indeed, by \eqref{eq2.Cz}, $$ d(T^nx_{kn},x_{kn})= d(T^{kn}x_{n},T^{kn}x_{0})\le \varphi^{kn}(d(x_nx_0))\,.$$ From \eqref{eq3.Cz} and \eqref{def.phi}.(b) one obtains \begin{equation}\langlebel{eq3b.Cz} \lim_{k\to\infty}d(T^nx_{kn},x_{kn})=0\,, \end{equation} for every $n\in\mathbb N.$ Let $\varepsilon>0$ be given. Observe first that there exist $\begin{array}r k,\begin{array}r n\in\mathbb N$ s.t. \begin{equation}\langlebel{eq4.Cz}\begin{aligned} {\rm(i)}\quaduad &T^{\begin{array}r n}(B(x_{\begin{array}r k\begin{array}r n},\varepsilon)\subseteq B(x_{\begin{array}r k\begin{array}r n},\varepsilon);\\ {\rm(ii)}\quaduad & x_{k\begin{array}r n}\in B(x_{\begin{array}r k\begin{array}r n},\varepsilon)\;\mbox{ for all }\; k\ge \begin{array}r k;\\ {\rm(iii)}\quaduad & d(x_{k_1\begin{array}r n},x_{\begin{array}r k_2\begin{array}r n})<2s\varepsilon\;\mbox{ for all }\; k_1,k_2\ge \begin{array}r k. \end{aligned}\end{equation} Indeed, by \eqref{def.phi}.(b), there exists $\begin{array}r n\in\mathbb N$ s.t. \begin{equation}\langlebel{eq5.Cz} \varphi^n(\varepsilon)<\varepsilon/(2s)\;\mbox{ for all }\; n\ge\begin{array}r n, \end{equation} and, by \eqref{eq3b.Cz}, there exists $\begin{array}r k\in\mathbb N$ s.t. \begin{equation}\langlebel{eq6.Cz} d(T^{\begin{array}r n}x_{k\begin{array}r n},x_{k\begin{array}r n})<\varepsilon/(2s)\;\mbox{ for all }\; k\ge\begin{array}r k\,. \end{equation} But then, $d(u,x_{\begin{array}r k\begin{array}r n})<\varepsilon$ implies $$ d(T^{\begin{array}r n}u,T^{\begin{array}r n}x_{\begin{array}r k\begin{array}r n})\le\varphi^{\begin{array}r n}(d(u,x_{\begin{array}r k\begin{array}r n}))\le \varphi^{\begin{array}r n}(\varepsilon)<\varepsilon/(2s)\,,$$ so that \begin{align*} d(T^{\begin{array}r n}u,x_{\begin{array}r k\begin{array}r n})&\le s\left[ d(T^{\begin{array}r n}u,T^{\begin{array}r n}x_{\begin{array}r k\begin{array}r n})+d(T^{\begin{array}r n}x_{\begin{array}r k\begin{array}r n},x_{\begin{array}r k\begin{array}r n})\right]\\ &<s\left[\fracrac\varepsilon{2s}+\fracrac\varepsilon{2s}\right]=\varepsilon\,, \end{align*} showing that \eqref{eq4.Cz}.(i) holds. Since $x_{\begin{array}r k\begin{array}r n}\in B(x_{\begin{array}r k\begin{array}r n},\varepsilon)$ it follows that $x_{(\begin{array}r k+1)\begin{array}r n}=T^{\begin{array}r n}x_{\begin{array}r k\begin{array}r n} \in B(x_{\begin{array}r k\begin{array}r n},\varepsilon)$ and, in general, by induction, $ x_{(\begin{array}r k+j)\begin{array}r n} \in B(x_{\begin{array}r k\begin{array}r n},\varepsilon)$ for any $j\in\mathbb N_0.$ Now, by (ii), $x_{k_1\begin{array}r n},x_{k_2\begin{array}r n}\in B(x_{\begin{array}r k\begin{array}r n},\varepsilon)$ for all $k_1,k_2\ge \begin{array}r k,$ so that $$ d(x_{k_1\begin{array}r n},x_{k_2\begin{array}r n})\le s( d(x_{k_1\begin{array}r n},x_{\begin{array}r k\begin{array}r n})+d(x_{\begin{array}r k\begin{array}r n},x_{k_2\begin{array}r n})<2s\varepsilon\,,$$ showing that (iii) holds too. By \eqref{eq3b.Cz} for $n=1$ one obtains \begin{equation}s \lim_{k\to\infty}d(x_{k+1},x_k)=0\,.\end{equation}s It is easy to check that this implies \begin{equation}s \lim_{k\to\infty}d(x_{k\begin{array}r n+p},x_{k\begin{array}r n})=0 \;\mbox{ for }\; p=0,1,\dots,\begin{array}r n-1\,, \end{equation}s so there exists $k_0\in \mathbb N$ s.t. \begin{equation}\langlebel{eq8.Cz} d(x_{k\begin{array}r n+p},x_{k\begin{array}r n})<\varepsilon\;\mbox{ for all }\; k\ge k_0 \;\mbox{ and }\; p=0,1,\dots,\begin{array}r n-1\,. \end{equation} Let now $\widetildede k:=\max\{\begin{array}r k, k_0\}$ and let $m_1=k_1\begin{array}r n+p_1,\, m_2=k_2\begin{array}r n+p_2$ with $p_1,p_2\in\{0,1,\dots,\begin{array}r n-1\}$ and $k_1,k_2\ge \widetildede k$. Combining \eqref{eq8.Cz} and \eqref{eq4.Cz}.(iii) one obtains \begin{align*} d(x_{m_1},x_{m_2})&\le sd(x_{k_1\begin{array}r n+p_1},x_{k_1\begin{array}r n})+s^2d(x_{k_1\begin{array}r n},x_{k_2\begin{array}r n})+s^3d(x_{ k_2\begin{array}r n},x_{k_2\begin{array}r n+p_2})\\ &<(s+2s^3+s^2)\varepsilon\le 4s^3 \varepsilon\,, \end{align*} which shows that $(x_n)$ is a Cauchy sequence. The completeness of $X$ implies the existence of a point $z\in X$ s.t. $\lim_{n\to\infty}d(x_n,z)=0.$ We have $$ d(x_{n+1},Tz)=d(Tx_n,Tz)\le\varphi(d(x_n,z))\le d(x_n,z)$$ for all $n\in\mathbb N,$ so that \begin{align*} d(z,Tz)&\le s\left[d(z,x_{n+1})+d(x_{n+1},Tz)\right]\\ &\le s\left[d(z,x_{n+1})+d(x_{n},z)\right]\,. \end{align*} Letting $n\to \infty$ one obtains $d(z,Tz)=0,$ that is, $z=Tz.$ The uniqueness follows in the following way. Suppose $z_i=Tz_i,\, i=1,2.$ Then $$ d(z_1,z_2)=d(Tz_1,Tz_2)\le\varphi(d(z_1,z_2))\,.$$ By Remark \ref{re.phi}.(c) this can hold only for $d(z_1,z_2)=0,$ that is, for $z_1=z_2.$ \end{proof} Let $(X,d)$ be a b-metric space with $d$ satisfying the s-relaxed triangle inequality for some $s\ge 1.$ An important particular case of a function $\varphi$ satisfying the conditions (a),(b) from \eqref{def.phi} is $$ \varphi(t)=\alpha t,\; t\ge 0\,.$$ If $0<\alpha <1,$ then $$ \varphi^n(t)=\alpha^n t\to 0 \; \mbox{ as }\; n\to \infty\,.$$ Since $\varphi$ is also strictly increasing, it satisfies the conditions (a),(b) from \eqref{def.phi}. The inequality \eqref{eq1.Cz} becomes in this case \begin{equation*} d(f(x),f(y))\le \alpha d(x,y)\,, \end{equation*} for all $x,y\in X$. So, Theorem \ref{t1.Czerw} has as consequence the analog of Banach contraction principle in b-metric spaces. The following proposition also illustrates how various types of relaxed inequalities for the b-metric influence the form this principle takes. \begin{prop}\langlebel{c1.Bahtin} Let $(X,d)$ be a complete b-metric space, where $d$ satisfies the $s$-relaxed triangle inequality and $f:X\to X$ a mapping such that, for some $0<\alpha <1,$ \begin{equation}\langlebel{eq1.Bahtin} d(f(x),f(y))\le\alpha d(x,y), \end{equation} for all $ x,y\in X.$ Then $f$ has a unique fixed point $z$ and, for every $x\in X$, the sequence $\big(f^n(x)\big)_{n\in\mathbb{N}}$ converges to $z$ as $n\to \infty$. \begin{enumerate} \item[\rm 1. (\cite{bahtin89})] If further $0<\alpha<1/s,$ then the following evaluation of the order of convergence holds \begin{equation}\langlebel{ineq2.Bahtin} d(x_{n},z)\le\fracrac{s^2d(x_0,x_1)}{1-\alpha s}\cdot \alpha^{n}\,, \end{equation} for all $n\in\mathbb{N}.$ \item[\rm 2. (\cite{Kirk-Shah})] If $d$ satisfies the $s$-relaxed polygonal inequality, then the following evaluation of the order of convergence \begin{equation}\langlebel{ineq2.KS2} d(x_{n},z)\le \fracrac{s^2d(x_0,x_1)}{1-\alpha}\cdot \alpha^n,\quaduad n\in\mathbb N\,, \end{equation} holds for any $0<\alpha<1.$ \end{enumerate} \end{prop}\begin{proof} Although, as we have remarked, the first statement of the proposition follows from Theorem \ref{t1.Czerw}, we show a proof based on Theorem \ref{t.Stemp}. Our presentation follows \cite{an-dung15b}. Suppose that $d$ satisfies the $s$-relaxed triangle inequality, for some $s\ge 1.$ If $0<p\le 1$ is given by the equation $(2s)^p=1$, then, by Theorem \ref{t.Stemp}, the functional $\rho_p$ given by \eqref{def.p-metric} is a metric on $X$ satisfying the inequalities \begin{equation}\langlebel{ineq1.dung} \rho_p\le d^p\le 2\rho_p\,.\end{equation} For $x,y\in X$ let $x=x_0,x_1,\dots,x_n=y$ be an arbitrary chain in $X$ connecting $x$ and $y$. Then $y_i=f(x_i),\, i=0,1,\dots,n,$ is a chain in $X$ connecting $f(x)$ and $f(y)$. Consequently, by \eqref{def.p-metric} and \eqref{eq1.Bahtin}, \begin{equation}\langlebel{ineq2.dung} \rho_p(f(x),f(y))\le \sum_{i=0}^{n-1}d(y_i,y_{i+1})^p\le \alpha^p \sum_{i=0}^{n-1}d(x_i,x_{i+1})^p. \end{equation} Since the inequality between the extreme terms in \eqref{ineq2.dung} holds for all chains $x=x_0,x_1,\dots,x_n=y,\, n\in\mathbb N,$ connecting $x$ and $y$, it follows $$ \rho_p(f(x),f(y))\le\alpha^p\rho_p(x,y)\,,$$ for all $x,y\in X$, where $0<\alpha^p<1.$ Consequently, $f$ is a contraction with respect to $\rho_p$. The inequalities \eqref{ineq1.dung} and the completeness of $(X,d)$ imply the completeness of $(X,\rho_p)$ and so, by Banach's contraction principle, $f$ has a unique fixed point $z\in X$ and the sequence of iterates $(f^n(x))_{n\in\mathbb N}$ is $\rho_p$-convergent to $z$, for every $x\in X$. Appealing again to the inequalities \eqref{ineq1.dung}, it follows that $(f^n(x))_{n\in\mathbb N}$ is also $d$-convergent to $z$ for every $x\in X$. 1.\; The proof is similar to that of Banach's contraction principle in the metric case. Observe first that, \eqref{eq1.Bahtin} implies \begin{equation}\langlebel{eq2.Bahtin} d(f^n(x),f^n(y))\le\alpha^nd(x,y)\,, \end{equation} for all $n\in\mathbb{N}$ and $x,y\in X$. For $x_0\in X$ consider the sequence of iterates $$x_n=f(x_{n-1})=f^n(x_0),\quaduad n\in\mathbb{N}\,.$$ Let us prove that $(x_n)$ is a Cauchy sequence. By \eqref{s-relax-n} and \eqref{eq2.Bahtin}, \begin{equation}\langlebel{ineq3.Bahtin}\begin{aligned} d(x_{n},x_{n+k+1})&\le sd(x_{n},x_{n+1}) + s^2d(x_{n+1},x_{n+2})+\dots\\&+s^{k}d(x_{n+k-1},x_{n+k})+s^{k}d(x_{n+k},x_{n+k+1})\\ &\le \big(\alpha^{n}s+\alpha^{n+1}s^2+\dots+\alpha^{n+k-1}s^k\big)d(x_0,x_1)+\alpha^{n+k}s^{k}d(x_0,x_1)\\ &=\alpha^{n}s\left(\fracrac{1-(\alpha s)^k}{1-\alpha s} +\alpha^ks^{k-1}\right)d(x_0,x_1)\\&=\alpha^{n}s \fracrac{1-(\alpha s)^{k-1}\alpha(s+1-\alpha s)}{1-\alpha s}d(x_0,x_1)\\&<\alpha^n\fracrac{s d(x_0,x_1)}{1-\alpha s}\,, \end{aligned}\end{equation} for all $n,k\in\mathbb{N}.$ Since $\lim_{n\to\infty}\alpha^{n}=0,$ this shows that $(x_n)$ is a Cauchy sequence. By the completeness of $(X,d)$ there exists $z\in X$ such that $\lim_{n\to\infty}d(x_n,z)=0$. We have \begin{align*} d(z,f(z))&\le sd(z,x_{n+1})+sd(x_{n+1},f(z))\\ &\le sd(z,x_{n+1})+s\alpha d(x_{n},z)\longrightarrow 0\;\mbox{ as }\; n\to \infty\,. \end{align*} Hence $d(z,f(z))=0$ and so $z=f(z).$ Taking into account \eqref{ineq3.Bahtin}, \begin{align*} d(x_{n},z)&\le s d(x_{n},x_{n+k+1})+s d(x_{n+k+1},z)\\ &<\alpha^n\fracrac{s^2 d(x_0,x_1)}{1-\alpha s}+s d(x_{n+k+1},z)\,. \end{align*} Letting $k\to\infty$, one obtains \eqref{ineq2.Bahtin}. Suppose now that there exists two points $z,z'\in X$ such that $f(z)=z$ and $f(z')=z'$. Then the relations $$ d(z,z')=d(f(z),f(z'))\le\alpha d(z,z') $$ show that $d(z,z')=0$, i.e. $ z=z'.$ 2.\; Let $x_0\in X$ and $x_n=f(x_{n-1}),\,n\in\mathbb{N}.$ Taking into account the relaxed polygonal inequality and \eqref{eq2.Bahtin}, we obtain \begin{align*} d(x_{n},x_{n+k})&\le s\sum_{i=0}^{k-1}d(x_{n+i},x_{n+i+1}) \le s(\alpha^n+\alpha^{n+1}+\dots+\alpha^{n+k})d(x_0,x_1)\\ &= s\alpha^n\,\fracrac{1-\alpha^{k+1}}{1-\alpha}\cdot d(x_0,x_1)< \fracrac{sd(x_0,x_1)}{1-\alpha}\cdot\alpha^n. \end{align*} Based on these relations the proof goes as in case 1. \end{proof} \begin{remark} The proof given here to statement 2 from Proposition \ref{c1.Bahtin} is simpler than that of Theorem 12.4 in \cite{Kirk-Shah}. \end{remark} \begin{remark} The proofs given in \cite{czerw93} and \cite{Kirk-Shah} to Theorem \ref{t1.Czerw} go in the following way. Let $x$ be a fixed element of $ X$ and $\varepsilon >0$. By \eqref{def.phi}.(b) there exists $m=m_\varepsilon\in\mathbb N$ such that \begin{equation}\langlebel{eq1.err} \varphi^{m}(\varepsilon)<\fracrac\varepsilon{2s}\,.\end{equation} One considers the sequence $x_k=f^{km}(x),\ k\in \mathbb N,$ and one shows that there exists $k_0\in\mathbb N$ such that \begin{equation}\langlebel{ineq1.Cauchy} d(x_k,x_{k'})<2s\varepsilon\,, \end{equation} for all $k,k'\ge k_0.$ One affirms that the inequality \eqref{ineq1.Cauchy} shows that $(x_k)$ is a Cauchy sequence, which is not surely true, because the inequality is true only for this specific $\varepsilon.$ Taking another $\varepsilon,$ say $0<\varepsilon'<\varepsilon,$ we find another number $m'=m_{\varepsilon'}$ (possibly different from $m$), such that \begin{equation}\langlebel{eq2.err} \varphi^{m'}(\varepsilon')<\fracrac{\varepsilon'}{2s}\,.\end{equation} The above procedure yields a sequence $x'_k=f^{km'}(x),\, k\in \mathbb N,$ satisfying, for some $k_1\in\mathbb N,$ \begin{equation}\langlebel{ineq2.Cauchy} d(x_k,x_{k'})<2s\varepsilon'\,, \end{equation} for all $k,k'\ge k_1.$ But the sequences $(x_k)$ and $(x'_k)$ can be totally different, so we cannot infer that the sequence $(x_k)$ is Cauchy. As we have shown this flaw was fixed in the paper \cite{kaj-luk18}.\end{remark} \begin{remark} Berinde \cite{beri93} considers comparison functions satisfying a condition stronger than $0<\alpha<1/s,$ namely $\sum_{k=1}^\infty \varphi^k(t)<\infty$, allowing estimations of the order of convergence similar to \eqref{ineq2.Bahtin}. He also shows that the sequence $x_n=f^n(x_0),\, n\in\mathbb N_0,$ is convergent to a fixed point of $f$ if and only if it is bounded. For various kinds of comparison functions, the relations between them an applications to fixed points, see \cite[\S3.0.3]{Rus-PP}. \end{remark} \subsection{Fixed points in generalized b-metric spaces} Theorem \ref{t1.Czerw} admits the following extension to generalized b-metric spaces. \begin{theo}\langlebel{theorem-5} Let $(X,d)$ be a complete generalized b-metric space and suppose that the mapping $f: X \to X$ is such that \begin{equation}\langlebel{zad6} d\left(f(x),f(y)\right) \le \varphi\left(d(x,y)\right)\,, \end{equation} for all $ x,y \in X$ with $d(x,y)<\infty,$ where the function $\varphi:\mathbb{R}_+\to \mathbb{R}_+$ satisfies the conditions (a),(b) from \eqref{def.phi}. Consider, for some $x \in X$, the sequence of successive approximations $\left(f^n (x) \right)_{n \in \mathbb{N}_0}$. Then either {\rm (A) }\;\; $ d (f^k(x), f^{k+1}(x)) = + \infty $ for all $k \in \mathbb{N}_0$,\\ or {\rm (B) }\;\; the sequence $\left(f^n (x) \right)_{n \in \mathbb{N}}$ is convergent to a fixed point of $f$. \end{theo} \begin{proof} Let $X=\bigcup_{i\in I}X_i$ be the canonical decomposition of $X$ corresponding to the equivalence relation \eqref{zad3}. Assume that (A) does not hold. Then $$ d(f^m(x), f^{m+1}(x)) < + \infty\,,$$ for some $m \in \mathbb{N}_0\,.$ If $i\in I$ is such that $ f^m(x), f^{m+1}(x) \in X_{i},$ then $$ d\left( f^{m+1}(x), f^{m+2}(x)\right) \le \varphi\left( d\left( f^{m}(x), f^{m+1}(x)\right)\right)<\infty\,,$$ implies $f^{m+2}(x)\in X_i\,,$ and so, by mathematical induction, $f^{m+k}(x)\in X_i$ for all $k\in\mathbb N_0\,.$ Since $$ z\in X_i\iff d(z,f^m(x))<\infty\,,$$ the inequality $$ d(f(z),f^{m+1}(x))\le \varphi(d(z,f^m(x))<\infty\,,$$ shows that the restriction $f_i=f|_{X_i}$ of $f$ to $X_i$ is a mapping from $X_i$ to $X_i$ satisfying $$ d(f_i(y),f_i(z))\le\varphi(d(y,z))\,,$$ for all $y,z\in X_i.$ By Theorem \ref{t.2}, $X_i$ is complete, so that, by Theorem \ref{t1.Czerw}, $\left( f^{m+k}(x)\right)_{k\in\mathbb N_0}$ is convergent to a fixed point $z_i\in X_i$ of $f_{i}$, which is a fixed point for $f$. \end{proof} \begin{remark} For $s = 1$ and $\varphi(t)=\alpha t,\, t\ge 0,$ where $0\le\alpha<1,$ we get the Diaz and Margolis \cite{diaz-margol68} fixed point theorem of the alternative. At the same time this extends Theorem 2 from \cite{czerw-krol17} and give simpler proofs to Theorems 2.1 and 3.1 from \cite{aydi-czerw18}. \end{remark} Proposition \ref{c1.Bahtin} also admits extensions to this setting as results of the alternative. We formulate only one of these results. \begin{corol}\langlebel{c2.Bahtin} Let $(X,d)$ be a complete b-metric space, where $d$ satisfies the $s$-relaxed triangle inequality and let $f:X\to X$ be a mapping satisfying, for some $ 0<\alpha<1,$ the inequality \begin{equation*} d(f(x),f(y))\le\alpha d(x,y)\,, \end{equation*} for all $x,y\in X$ with $d(x,y)<\infty.$ Then, for every $x\in X,$ either {\rm (A$'$) }\;\; $ d (f^k(x), f^{k+1}(x)) = + \infty $ for all $k \in \mathbb{N}_0$,\\ or {\rm (B$'$) }\;\; the sequence $\left(f^n (x) \right)_{n \in \mathbb{N}}$ is convergent to a fixed point of $f$. \end{corol} \section{Lipschitz functions} In this section we shall discuss the behavior of Lipschitz functions defined on or taking values in quasi-normed spaces and of Lipschitz functions on spaces of homogeneous type. \subsection{Quasi-normed spaces}\langlebel{Ss.quasi-normed space} We start by a brief presentation of an important class of b-metric spaces -- quasi-normed spaces. Good references are \cite{kalt03}, \cite{Kalton-F-sp}, \cite[pp. 156-166]{Kothe}, \cite{Rolew-MLS}. A \emph{quasi-norm} on a vector space $X$ (over $\mathbb K$ equal to $\mathbb R$ or $\mathbb C$) is a functional $\|\cdot\|:X\to\mathbb{R}_+$ for which there exists a real number $k\ge 1$ so that \begin{enumerate} \item[(QN1) ] $\|x\|=0\iff x=0;$ \item[(QN2) ] $\|\alpha x\|=|\alpha|\,\| x\|;$ \item[(QN3) ] $\|x+y\|\le k(\| x\|+\|y\|),$ \end{enumerate} for all $x,y\in X $ and $\alpha \in \mathbb{K}$. The pair $(X,\|\cdot\|)$ is called a quasi-normed space. A complete quasi-normed space is called a quasi-Banach space. If $k=1$, then $\|\cdot\|$ is a norm. The smallest constant $k$ for which the inequality (QN3) is satisfied for all $x,y\in X$ is called the \emph{modulus of concavity} of the quasi-normed space $X.$ For a linear operator $T$ from a quasi-normed space $(X,\|\cdot\|_X)$ to a normed space $(Y,\|\cdot\|_Y)$ put $$ \|T\|=\sup\{\|Tx\|_Y : x\in X, \,\|x\|_X\le 1\}\,.$$ In particular, \begin{equation}\langlebel{norm-quasi-Banach} \|x^*\|=\sup\{|x^*(x)| : x\in X,\|x\|_X\le 1\},\;\; x^*\in X^*, \end{equation} is a norm on the dual space $X^*=(X,\|\cdot\|_X)^*$. It follows that $T$ is continuous if and only if $\|T\|<\infty$ and, in this case, \begin{equation*} \|Tx\|_Y\le\|T\|\|x\|_X\,,\;\; x\in X, \end{equation*} $\|T\|$ being the smallest number $L\ge 0$ for which the inequality $\|Tx\|_Y\le L\|x\|_X$ holds for all $x\in X$. If $Y$ is also a quasi-normed space, then $\|\cdot\|$ is only a quasi-norm on the space $\mathcal L(X,Y)$ of all continuous linear operators from $X$ to $Y$. Two quasi-norms $\Vert \cdot\Vert_ 1,\Vert \cdot\Vert_ 2$ on a vector space $X$ are called \emph{equivalent} if they generate the same topology, or equivalently, if $$\Vert x_n-x\Vert_ 1\to 0\iff \Vert x_n-x\Vert_ 2\to 0\,,$$ for all sequences $(x_n)$ in $X$ and $x\in X$. As in the case of norms, the equivalence of two quasi-norms $\Vert \cdot\Vert_ 1,\Vert \cdot\Vert_ 2$ on a vector space $X$ is equivalent to the existence of two numbers $\alpha,\beta>0$ such that $$ \alpha\Vert x\Vert_ 1\le\Vert x\Vert_ 2\le\beta\Vert x\Vert_ 1\,,$$ for all $x\in X$. A subset $A$ of a topological vector space (TVS) $(X,\tau)$ is called \emph{bounded} if it is absorbed by any 0-neighborhood, i.e for every $V\in\mathcal V_\tau(0)$ there exists $t>0$ such that $A\subseteq t V.$ A TVS is called \emph{locally bounded} if it has a bounded 0-neighborhood. A quasi-normed space $(X,\|\cdot\|)$ is locally bounded, as the closed unit ball $B_X=\{x\in X : \|x\|\le 1\}$ is a bounded neighborhood of 0. One shows that, conversely, the topology of every locally bounded TVS is generated by a quasi-norm. A quasi-normed space $(X,\|\cdot\|)$ is normable (i.e. there exists a norm $\|\cdot\|_1$ on $X$ equivalent to the quasi-norm $\|\cdot\|$) if and only if 0 has a bounded convex neighborhood (implying that $X$ is locally convex). \begin{defi}\langlebel{def.F-norm} An $F$-\emph{norm} on a vector space $X$ is a mapping $\|\cdot\|:X\to\mathbb{R}_+$ satisfying the conditions \index{$F$-norm} \begin{enumerate} \item[(F1) ] $\|x\|=0\iff x=0;$ \item[(F2) ] $\|\langlembda x\|\le \|x\|$\; for all \; $\langlembda\in \mathbb{K}$ with $|\langlembda|\le 1;$ \item[(F3) ] $\|x+y\| \le \|x\| +\|y\|;$ \item[(F4) ] $\|x_n\|\to 0\;\Longrightarrow \|\langlembda x_n\|\to 0;$ \index{F-norm} \item[(F5) ] $\langlembda_n\to 0\;\Longrightarrow \|\langlembda_n x\|\to 0,$ \end{enumerate} for all $ x, y, x_n \in X$ $\langlembda,\langlembda_n\in \mathbb{K}.$ An $F$-\emph{space} is a vector space equipped with a complete $F$-norm. \end{defi}\index{$F$-norm} It follows that $d(x,y)=\|y-x\|,\, x,y\in X,$ is a translation-invariant metric on $X$ defining a vector topology. It is known that the metrizability of a TVS $(X,\tau)$ is equivalent to the existence of a countable basis of 0-neighborhoods, and in this case there exists a translation-invariant metric $d$ on $X$ generating the topology $\tau$. One shows, see \cite[p. 163]{Kothe}, that the topology of a metrizable TVS can be always given by an $F$-norm. If $(X,\tau)$ is a TVS, then the topology $\tau$ generates a uniformity $\mathcal W_\tau$ on $X $, a basis of it being given by the sets $$ W_U=\{(x,y)\in X^2 : y-x\in U\}\,,$$ where $U$ runs over a 0-neighborhood basis in $X.$ Any translation-invariant metric generating the topology $\tau$ generates the same uniformity $\mathcal W_\tau$, so that if $X$ is complete with respect to $\mathcal W_\tau$, then it is complete with respect to any translation-invariant metric generating the topology $\tau.$ \index{F-space} Typical examples of quasi-normed spaces are the spaces $L^p[0,1]$ and $\ell^p$ with $0<p<1$ equipped with the quasi-norms \begin{equation}\langlebel{def.Lp-qn} \|f\|_p=\left(\int_0^1|f(t)|dt\right)^{1/p}\; \mbox{ and }\; \|x\|_p=\left(\sum_{k=1}^\infty|x_k|^p\right)^{1/p}\,, \end{equation} for $f\in L^p[0,1]$ and $x=(x_k)_{k\in\mathbb{N}}\in\ell^p\,,$ respectively. The quasi-norms $\|\cdot\|_p$ satisfy the inequalities \begin{equation}\begin{aligned}\langlebel{ineq1.Lp} &\|f+g\|_p\le 2^{(1-p)/p}(\|f\|_p+\|g\|_p)\; \mbox{ and}\\ &\|x+y\|_p\le 2^{(1-p)/p}(\|x\|_p+\|y\|_p)\,, \end{aligned}\end{equation} for all $f,g\in L^p[0,1]$ and $x,y\in\ell^p.$ The constant $2^{(1-p)/p}>1$ is sharp, i.e. the moduli of concavity of the spaces $L^p[0,1]$ and $\ell^p$ are both equal to $2^{(1-p)/p}$. To show this, we start with the elementary inequalities \begin{equation}\langlebel{ineq.Lp} (a+b)^p\le a^p+b^p\le 2^{1-p}(a+b)^p\,,\end{equation} valid for all $a,b>0$. Let $f,g\in L^p[0,1].$ The first inequality from above implies $$ |f(t)+g(t)|^p\le (|f(t)|+|g(t)|)^p\le |f(t)|^p+|g(t)|^p\,, $$ for almost all $t\in [0,1], $ so that \begin{equation}\langlebel{ineq3.Lp}\begin{aligned} \Vert f+g\Vert_ p^p&=\int_0^1|f(t)+g(t)|^pdt\le \int_0^1|f(t)|^pdt+\int_0^1|g(t)|^p dt \\&=\Vert f\Vert_ p^p+\Vert g\Vert_ p^p\,. \end{aligned}\end{equation} This inequality and the second inequality from \eqref{ineq.Lp} yield \begin{align*} \Vert f+g\Vert_ p&=\left(\Vert f+g\Vert_ p^p\right)^{1/p}\le\left(\Vert f\Vert_ p^p+\Vert g\Vert_ p^p\right)^{1/p}\\ &\le 2^{(1-p)/p}(\Vert f\Vert_ p+\Vert g\Vert_ p)\,. \end{align*} Similar calculations can be done to show that $$ \Vert x+y\Vert_ p\le 2^{(1-p)/p}(\Vert x\Vert_ p+\Vert y\Vert_ p)\,, $$ for all $x,y\in\ell^p.$ To show that the constant $2^{(1-p)/p}$ is sharp take $x=(1,0,0\dots)$ and $y=(0,1,0\dots)$ in the case of the space $\ell^p$. Then $$ \Vert x+y\Vert_ p=2^{1/p}\; \mbox{ and }\; 2^{(1-p)/p}(\Vert x\Vert_ p+\Vert y\Vert_ p)=2^{(1-p)/p} \cdot 2=2^{1/p}\,,$$ that is, we have equality in the second inequality from \eqref{ineq1.Lp}. In the case of the space $L^p[0,1]$ take $f={\, \rm ch\,}i_{[0,\fracrac12)}$ and $g={\, \rm ch\,}i_{[\fracrac12,1]}$ to obtain equality in the first inequality from \eqref{ineq1.Lp}. \begin{remark} Apparently similar, the quasi-normed spaces $\ell^p$ and $L^p[0,1]$ drastically differ. For instance, the space $L^p[0,1]$ has trivial dual, $(L^p[0,1])^*=\{0\}$, while $(\ell^p)^*=\ell^\infty$, see \cite[pp. 156-158]{Kothe}. D. Pallaschke \cite{pallas73} and Ph. Turpin \cite{turpin73}) have shown that every compact endomorphism of $L^p,\, 0<p<1,$ is null. N. Kalton and J. H. Shapiro \cite{kalt-shapir75} showed that there exists a quasi-Banach space with trivial dual admitting non-trivial compact endomorphisms. The example is a quotient space of $H^p,\, 0<p<1.$ Here, $H^p,\, 0<p<1,$ denotes the classical Hardy quasi-Banach spaces of analytic functions in the unit disk of $\mathbb C$. \end{remark} A $p$-\emph{norm}, where $0<p\le 1,$ is a mapping $\|\cdot\|:X\to\mathbb{R}_+$ satisfying (QN1) and (QN2) and \begin{enumerate} \item[(QN$3'$) ] ${{\, \rm div \,}splaystyle \|x+y\|^p\le \|x\|^p+\|y\|^p\,,}$ for all $x,y\in X$.\end{enumerate} The quasi-norms of the spaces $L^p[0,1]$ and $\ell^p,\, 0<p<1,$ a $p$-norms, i.e. \begin{align*} &\|f+g\|^p_p\le \|f\|^p_p+\|g\|^p_p\;\mbox{ and}\\ &\|x+y\|^p_p\le \|x\|^p_p+\|y\|^p_p, \end{align*} for all $f,g\in L^p[0,1]$ and $x,y\in \ell^p.$ A famous result of T. Aoki \cite{aoki42} and S. Rolewicz \cite{rolew57} says that on any quasi-normed space $(X,\|\cdot\|)$ there exists a $p$-norm equivalent to $\|\cdot\|$, where $p$ is determined from the equality $2^{1/p}=k,$ $k$ being the constant from (QN3). Let $0<p\le 1.$ A subset $A$ of a vector space $X$ is called $p$-\emph{convex} if $\alpha x+\beta y\in A$ for all $x,y\in A$ and all $\alpha,\beta\ge 0$ with $\alpha^p+\beta^p=1, $ and $p$-\emph{absolutely convex} if $\alpha x+\beta y\in A$ for all $x,y\in A$ and all $\alpha,\beta\in\mathbb{K}$ with $|\alpha|^p+|\beta|^p\le 1. $ For $p=1$ one obtains the usual convex and absolutely convex sets, respectively. A TVS $X$ is $p$-normable if and only if it has a bounded $p$-convex 0-neighborhood, see \cite[p. 161]{Kothe}. One shows first that under this hypothesis there exists a bounded $p$-absolutely convex neighborhood $V$ of 0 and one defines the $p$-norm as the Minkowski functional corresponding to $V$, i.e. $\|x\|=\inf\{t : t>0,\, x\in tV\}.$\index{$p$-norm}\index{$p$-normed space}\index{set!$p$-convex}\index{set!$p$-absolutely convex} \begin{remark} In K\"othe \cite{Kothe} by a $p$-norm on a vector space $X$ one understands a mapping $\Vert \cdot\Vert':X\to\mathbb{R}_+$ such that $$ \Vert x\Vert '=0 \iff x=0, \quaduad \Vert \alpha x\Vert '=|\alpha|^p\Vert x\Vert '\;\;\mbox{ and }\;\; \Vert x+y\Vert '\le \Vert x\Vert '+\Vert y\Vert '\,,$$ for all $x,y\in X$ and $\alpha\in\mathbb{K}$. In this case the ``$p$-norm" corresponding to a bounded absolutely $p$-convex 0-neighborhood is given by $\Vert x\Vert '=\inf\{t^p : t>0,\, x\in tV\}.$ It follows that $\Vert \cdot\Vert $ is a $p$-norm in the sense given here if and only if $\Vert \cdot\Vert ^p$ is a $p$-norm in the sense given in \cite{Kothe}. \end{remark} ^{*}xtbf{The Banach envelope.}\index{quasi-Banach space!Banach envelope} Let $(X,\|\cdot\|)$ be a quasi-Banach space and $B_X=\{x\in X : \|x\|\le 1\}$ its closed unit ball. Denote by $\|\cdot\|_C$ the Minkowski functional of the set $C=\operatorname{co}(B_X)$. It is obvious that $\|\cdot\|_{C}$ is a seminorm on $X$ and a norm on the quotient space $X/N$, where $N=\{x\in X :\|x\|_C=0\}. $ Since, for $x\ne 0,\, x':=x/\|x\|\in B_X\subseteq C,$ it follows $\|x'\|_C\le 1,$ that is $\|x\|_C\le \|x\|.$ Denote by $\widehat X$ the completion of $X/N$ with respect to the quotient-norm $\|\cdot\|_{\widehat X}$ corresponding to $\|\cdot\|_C$, whose (unique) extension to $\widehat X$ is denoted also by $\|\cdot\|_{\widehat X}$. It follows $\|\widehat x\|_{\widehat X}\le \|x\|$ for all $x\in X$, hence the embedding $j:X\to\widehat X$ is continuous and one shows that $j(X)$ is dense in $\widehat X$. The space $\widehat X$ is called the \emph{Banach envelope} of the quasi-Banach space $X$. We distinguish two situations. I. $X$ \emph{has trivial dual}: $X^*=\{0\}$. In this case $C=\operatorname{co}(B_X)=X$ (see \cite[Proposition 2.1, p. 16]{Kalton-F-sp}) and so $\|\cdot\|_C\equiv 0,\ N=X$ and $X/X=\{\widehat 0\}$. It follows $\widehat X=\{0\}$ and $\widehat X^*=\{0\}=X^*.$ In particular $\widehat{L^p}=\{0\}$, where $L^p=L^p[0,1].$ II. $X$ \emph{has a separating dual}. This means that for every $x\ne 0$ there exists $x^*\in X^*$ with $x^*(x)\ne 0$ (e.g. $X=\ell^p$ with $0<p<1$). In this case $\|\cdot\|_C$ is a norm on $X$ which can be calculated by the formula \begin{equation}\langlebel{eq.quasi-Banach-envel} \|x\|_C=\sup\{|x^*(x)| : x^*\in X^*,\, \|x^*\|\le 1\}\,.\end{equation} where the norm of $x^*\in X^*$ is given by \eqref{norm-quasi-Banach}. Consequently $N=\{0\}$, $X/N=X$ and we can consider $X$ as a dense subspace of $\widehat X$ (in fact, continuously and densely embedded in $\widehat X$). It follows that: \begin{enumerate} \item[\rm (i) ] every continuous linear functional on $(X,\|\cdot\|)$ has a unique norm preserving extension to $(\widehat X,\|\cdot\|_{\widehat X})$; \item[\rm (ii) ] every continuous linear operator $T$ from $(X,\|\cdot\|)$ to a Banach space $Y$ has a unique norm preserving extension $\widehat T:(\widehat X,\|\cdot\|_{\widehat X})\to Y$. \end{enumerate} Consequently $(X,\|\cdot\|)^*$ can be identified with $(\widehat X,\|\cdot\|_{\widehat X})^*$ and the norm $\|\cdot\|_{\widehat X}$ can also be calculated by the formula \eqref{eq.quasi-Banach-envel} for all $x\in \widehat X$. One shows that the Banach envelope of $\ell^p$ is $\ell^1$, for every $0<p<1$. Another way to define the Banach envelope in the case of a quasi-Banach space with separating dual is via the embedding $j_X$ of $X$ into its bidual $X^{**}$ (see \cite{Kalton-F-sp}). Since $X^*$ separates the points of $X$, it follows that $j_X$ is injective of norm $\|j_X\|\le 1$ (in this case one can not prove that $\|j_X\|= 1$ because the Hahn-Banach extension theorem may fail for non-locally convex spaces). By \eqref{eq.quasi-Banach-envel} $$ \|x\|_C=\sup\{|x^*(x)| : \|x^*\|\le 1\}=\|j_X(x)\|_{X^{**}}\,,$$ so we can identify $\widehat X$ with the closure of $j_X(X)$ in $(X^{**},\|\cdot\|_{X^{**}})$ \subsection{Lipschitz functions and quasi-normed spaces} It turns out that some results concerning Banach space-valued Lipschitz functions fail in the quasi-Banach case and, in some cases, the validity of some of them forces the quasi-Banach space to be locally convex, i.e. a Banach space. In this subsection we consider only spaces over $\mathbb R.$ Let $(Z,d)$ be a b-metric space and $(Y,\|\cdot\|)$ a quasi-normed space. A function $f:Z\to Y$ is called Lipschitz if there exists $L\ge 0$ (called a Lipschitz constant for $f$) such that \begin{equation*} \|f(z)-f(z')\|\le L d(z,z')\,, \end{equation*} for all $z,z'\in Z$. One denotes by $\operatorname{Lip}(Z,Y)$ the space of all Lipschitz functions from $Z$ to $Y$. The \emph{Lipschitz norm} $\|f\|_L$ of $f $ is defined by $$ \|f\|_L=\sup\left\{\fracrac{\|f(z)-f(z')\|}{d(z,z')} : z,z'\in Z,\, z\ne z'\right\}\,.$$ It follows that $\|f\|_L$ is the smallest Lipschitz constant for $f$. Since $\|f\|=0$ if and only if $f=$ const, $\|\cdot\|$ is actually only a seminorm on $\operatorname{Lip}(Z,Y)$. To obtain a norm, one considers a fixed element $z_0\in Z$ and the space $$\operatorname{Lip}_0(Z,Y)=\{f\in\operatorname{Lip}(Z,Y) : f(z_0)=0\}\,.$$ If $Z=X$, where $X$ is a quasi-normed space, then one take $0$ for the fixed point $z_0$, and, in this case, $\operatorname{Lip}_0(X,Y)$ is a quasi-normed space, that is, $$ \|f+g\|_L\le k(\|f\|_L+\|g\|_L)\,,$$ for $f,g\in \operatorname{Lip}_0(X,Y)$, where $k\ge 1$ is the constant from (QN3). It is complete, provided $Y$ is a quasi-Banach space. If $Y=\mathbb R$, then one uses the notation $\operatorname{Lip}(X),\, \operatorname{Lip}_0(X)$ and $\operatorname{Lip}_0(X)$ is called the Lipschitz dual of the quasi-normed space $X$. We noted that the space $L^p=L^p[0,1]$ has trivial dual. F. Albiac \cite{albiac08} proved that it has also a trivial Lipschitz dual, i.e. $\operatorname{Lip}_0(L^p)=\{0\}.$ Later he showed that this is a more general phenomenon. \begin{prop}[F. Albiac \cite{albiac11}]Let $(X,\|\cdot\|)$ be a quasi-Banach space and $$ |||x|||:=\sup\{f(x) : f\in\operatorname{Lip}_0(X),\, \|f\|_L\le 1\}\,, x\in X\,.$$ Then \begin{enumerate} \item[\rm (i)]\;\; $|||\cdot|||$ is a seminorm on $X$; \item[\rm (ii)]\;\; if $\operatorname{Lip}_0(X)$ is nontrivial, then $X$ has a nontrivial dual, i.e. $X^*\ne\{0\}$; \item[\rm (iii)]\;\; if $X$ has a separating Lipschitz dual, then $X$ has a separating (linear) dual and $|||\cdot|||$ is a norm on $X$. \end{enumerate}\end{prop} One says that a family $\mathcal F$ of real valued functions on a quasi-normed space $X$ is separating if for every $x\ne y$ in $X$ there exists $f\in\mathcal F$ with $f(x)\ne f(y).$ It is known that every Lipschitz function $f$ from a subset of a metric space $(X,d)$ to $\mathbb R$ admits an extension to $X$ which is Lipschitz with the same Lipschitz constant (McShane's extension theorem). The following result shows that the validity of this result for every subset of a quasi-Banach space $X$ forces this space to be Banach. \begin{prop}[\cite{albiac11}] Let $(X,\|\cdot\|)$ be a quasi-Banach space. If for every subset $Z$ of $X$, every $L$-Lipschitz function $f:Z\to\mathbb{R}$ admits an $L'$-Lipschitz extension, for some $L'\ge L$, then the space $X$ is locally convex, i.e. it is a Banach space. \end{prop} It is known that every continuous linear operator from a quasi-Banach space $X$ to a Banach space $Y$ admits a norm preserving linear extension to the Banach envelope $\widehat X$ of $X$ to $Y$. F. Albiac \cite{albiac11} has shown that this is true for Lipschitz mappings too: every Lipschitz mapping $f:X\to Y$ admits a unique Lipschitz extension with the same Lipschitz constant $\widehat f:\widehat X\to Y$. Moreover, if $X,Y$ are normed spaces and $f:X\to Y$ is G\^ateaux differentiable on the interval $[x,y]:=\{x+t(y-x) : t\in[0,1]\},$ then \begin{equation}\langlebel{eq1.G-MVT} \|f(x)-f(y)\|\le \|x-y\|\sup\{\|f'(\xi)\| :\xi\in[x,y]\}\,.\end{equation} \begin{prop}[\cite{albiac11}] Let $(X,\|\cdot\|)$ be a quasi-Banach space. If every nonconstant G\^ateaux differentiable Lipschitz function $f:[0,1]\to X$ satisfies the mean value inequality \eqref{eq1.G-MVT} for all $x,y\in [0,1]$, then the space $X$ is locally convex, i.e. it is a Banach space. \end{prop} Let $\alpha>0.$ A function $f:(X_1,d_1)\to(X_2,d_2)$ between two b-metric spaces $X_1,X_2$ is called \emph{H\"older of order} $\alpha$ if there exists $L\ge 0$ such that \begin{equation}\langlebel{def.Hold-fcs} d_2(f(x),f(y))\le Ld_1(x,y)^\alpha\,, \end{equation} for all $x,y\in X_1.$ As a consequence of the mean value theorem, every function $f$ from $[0,1]$ to a Banach space $X$ which is H\"older of order $\alpha>1$ is constant, a fact that is no longer true if $X$ is a quasi-Banach space. \begin{example} Let $L^p=L^p[0,1]$ for $0<p<1.$ The function $f:[0,1]\to L^p$ given by $f(t)={\, \rm ch\,}i_{[0,t]}$ satisfies the equality $$ \|f(s)-f(t)\|_p =|s-t|^{1/p}\,,$$ for all $s,t\in [0,1]$, where $\|\cdot\|_p$ is the $L^p$-norm (see \eqref{def.Lp-qn}). \end{example} Indeed, for $0\le t<s\le 1,$ $$ \|f(s)-f(t)\|_p=\left(\int_t^s{\, \rm ch\,}i^p_{(t,s]}(u) du\right)^{1/p}=|s-t|^{1/p}\,.$$ The Riemann integral of a function $f:[a,b]\to X$, where $[a,b]$ is an interval in $\mathbb{R}$ and $X$ is a Banach space, can be defined as in the real case, by simply replacing the absolute value $|\cdot|$ with the norm sign $\|\cdot\|$, and has properties similar to those from the real case. For instance, the following result is true. \begin{prop}[\cite{albiac11}] Let $X$ be a Banach space. If $f:[a,b]\to X$ is continuous, then \begin{enumerate} \item[\rm(i) ] $f$ is Riemann integrable, and \item[\rm(ii) ] the function \begin{equation}\langlebel{eq1.R-int} F(t)=\int_a^tf(s)ds,\, t\in[a,b]\,,\end{equation} is differentiable with $F'(t)=f(t)$ for all $t\in[a,b]$. \end{enumerate}\end{prop} \begin{remark} However, there is a point where this analogy is broken, namely the Lebesgue criterion of Riemann integrability: a function $f:[a,b]\to\mathbb{R}$ is Riemann integrable if and only if it is continuous almost everywhere on $[a,b]$ (i.e. excepting a set of Lebesgue measure zero). In the infinite dimensional case this criterion does not hold in general, leading to the study of those Banach spaces for which it, or some weaker forms, are true , see, for instance, \cite{gordon91}, \cite{sofi12}, \cite{sofi16b} and the references quoted therein. \end{remark} In the case of quasi-Banach spaces the situation is different. By a result attributed to S. Mazur and W. Orlicz \cite{maz-orlicz48} (see also \cite[p. 122]{Rolew-MLS}) an $F$-space $X$ is locally convex if and only if every continuous function $f:[0,1]\to X$ is Riemann integrable. M. M. Popov \cite{popov94} investigated the Riemann integrability of functions defined on intervals in $\mathbb{R}$ with values in an $F$-space. Among other results, he proved that a Riemann integrable function $f:[a,b]\to X$ is bounded and that the function $F$ defined by \eqref{eq1.R-int} is uniformly continuous, but there exists a continuous function $f:[0,1]\to\ell^p$, where $0<1<p$, such that the function $F$ does not have a right derivative at $t=0.$ He asked whether any continuous function $f$ from $[0,1]$ to $L^p[0,1], \,0<p<1,$ (or more general, to a quasi-Banach space $X$ with $X^*=\{0\}$) admits a primitive. This problem was solved by N. Kalton \cite{kalton96} who proved that if $X$ is a quasi-Banach space with $X^*=\{0\}$, then every continuous function $f:[0,1]\to X$ has a primitive. Kalton considered the space $C^1_{Kal}(I,X),\,$ where $ I=[0,1]$ and $X$ is a quasi-Banach space, of all continuously differentiable functions $f:I\to X$ such that the function $\widetildede f:I^2\to X$ given for $s,t\in I$ by $\widetildede f(t,t)=f'(t)$ and $\widetildede f(s,t)=(f(s)-f(t))/(s-t)$ if $s\ne t$, is continuous. It follows that $C^1_{Kal}(I,X)$ is a quasi-Banach space with respect to the quasi-norm $$ \|f\|=\|f(0)\|+\|f\|_L\,.$$ The notation $C^1_{Kal}(I,X)$ was introduced in \cite{albiac-ans12a}; Kalton used the notation $C^1(I;X)$. Denote by $C(I,X)$ the Banach space (with respect to the sup-norm) of all continuous functions from $I$ to $X$. The \emph{core} of a quasi-Banach space $X$ is the maximal subspace $Z$ of $X$ (denoted by core$(X)$) with $Z^*=\{0\}$. One shows that such a subspace always exists, is unique and closed. Notice that core$(X)=\{0\}$ implies only that $X$ has a nontrivial dual, but not necessarily a separating one. In \cite{albiac-ans12b} it is shown that if $X$ is a quasi-Banach space with core$(X)=\{0\}$, then there exists a continuous function $f:[0,1]\to X$ failing to have a primitive. Kalton, \emph{op. cit}., called a quasi-Banach $X$ a $D$-\emph{space} if the mapping $$D:C^1_{Kal}(I,X)\to C(I,X)\,,$$ given by $Df=f',$ is surjective and proved the following result. \begin{theo}[\cite{kalton96}] Let $X$ be a quasi-Banach with core$(X)=\{0\}$. Then $X$ is a $D$-space if and only if $X$ is locally convex (or, equivalently, a Banach space). \end{theo} It is known that every continuously differentiable function from an interval $[a,b]\subseteq\mathbb{R}$ to a Banach space $X$ is Lipschitz with $\|f\|_L=\sup\{\|f'(t)\|:t\in[a,b]\}$ (a consequence of the Mean Value Theorem, see \eqref{eq1.G-MVT}). As it was shown in \cite{albiac-ans12a} this in no longer true in quasi-Banach spaces. \begin{theo} Let $X$ be a non-locally convex quasi-Banach space $X$. Then there exists a function $F:I\to X$ such that: \begin{enumerate} \item[\rm (i) ] $F$ is continuously differentiable on $I$; \item[\rm (ii) ] $F'$ is Riemann integrable on $I$ and $F(t)=\int_0^tF'(s)ds,\, t\in I$; \item[\rm (iii) ] $F$ is not Lipschitz on $I$.\end{enumerate} \end{theo} In \cite{albiac-ans13} it is proved that the usual rule of the calculation of the integral (called Barrow's rule by the authors, known also as Leibniz rule) holds in the quasi-Banach case in the following form. \begin{prop} Let $X$ be a quasi-Banach with separating dual. If $F:[a,b]\to X$ is differentiable with Riemann integrable derivative, then $$ \int_a^bF'(t) dt=F(b)-F(a)\,.$$ \end{prop} Another pathological result concerning differentiability of quasi-Banach valued Lipschitz functions was obtained by N. Kalton \cite[Theorem 3.3]{kalton81}. \begin{theo}Let $X$ be an $F$-space with trivial dual. Then for every pair of distinct points $x_0,x_1\in X$ there exists a function $f:[0,1]\to X$ such that $f(0)=x_0,\, f(1)=x_1$ and $$ \lim_{|s-t|\to0}\fracrac{f(s)-f(t)}{s-t}=0\;\mbox{ uniformly for }\; s,t\in[0,1]\,.$$ In particular $f'(t)=0$ for all $t\in[0,1].$ \end{theo} \begin{remark} N. Kalton \cite[Corollary 3.4]{kalton81} also remarked that if $X$ is an $F$-space and $x\in X\setminus\{0\}$, then in order to exist a function $f:[0,1]\to X$ such that $f(0)=0$, $f(1)=x$ and $f'(t)=0$ for all $ t\in [0,1],$ it is necessary and sufficient that $x\in$ core$(X).$ \end{remark} If $X$ is a Banach space and $f:[0,1]\to X$ is continuous then it is Riemann integrable and the average function ${\rm Ave}[f]:[a,b]\times[a,b]\to X$, given by $$ {\rm Ave}[f](s,t)=\begin{cases} \fracrac1{t-s}\int_s^tf(u)du \quaduad &\mbox{if }\; a\le s<t\le b,\\ f(c)\quaduad & \mbox{if } \; s=t=c\in[a,b],\\ \fracrac1{s-t}\int_t^sf(u)du \quaduad &\mbox{if }\;a\le t<s\le b, \end{cases}$$ is jointly continuous on $[a,b]\times[a,b]$, and so, separately continuous and bounded. Some pathological properties of the average function in the quasi-Banach case are examined in \cite{albiac-ans12a}, \cite{albiac-ans14} and \cite{popov94}. The analog of the Radon-Nikod\'ym Property for quasi-Banach spaces and its connections with the differentiability of Lipschitz mappings and martingales are discussed in \cite{albiac-ans16}. \subsection{Lipschitz functions on spaces of homogeneous type} Let $(X,d,\mu)$ be a space of homogeneous type (see Subsection \ref{Ss.homog-sp}). By $B$ we shall denote balls of the form $B(x,r).$ If $\varphi$ is a function integrable on bounded sets, then the \emph{mean value} of $\varphi$ on the ball $B$ is defined by \begin{equation}\langlebel{def.mv} m_B(\varphi)=\mu(B)^{-1}\int_B\varphi(x)d\mu(x)\,. \end{equation} For $1\le q<\infty$ and $0<\beta<\infty$ one denotes by $\operatorname{Lip}(q,\beta)$ the set of all functions $\varphi$, integrable on bounded sets, for which there exists a constant $C\ge 0$ such that \begin{equation}\langlebel{eq1.maci-Lip} \left(\fracrac{1}{\mu(B)}\int_B|\varphi(x)-m_B(\varphi)|^qd\mu(x)\right)^{1/q}\le C\mu(B)^\beta\,, \end{equation} for all balls $B$. The least constant $C$ for which \eqref{eq1.maci-Lip} holds will be denoted by $\|\varphi\|_{\beta,q}\,.$ We shall denote by $\operatorname{Lip}(\beta)$ the set of all functions $\varphi$ on $X$ such that there exists a constant $C\ge 0$ satisfying \begin{equation}\langlebel{eq2.maci-Lip} |\varphi(x)-\varphi(y)|\le Cd(x,y)^\beta\,, \end{equation} for all $x,y\in X$, i.e. it is H\"older of order $\beta$ (see \eqref{def.Hold-fcs}). The least $C\ge 0$ for which \eqref{eq2.maci-Lip} holds is denoted by $\|\varphi\|_{\beta}\,.$ The following results concerning these classes of Lipschitz functions were proved in \cite{maci-sego79a}. \begin{theo} Let $(X,d,\mu)$ be a space of homogeneous type. Then there exists a constant $C\ge 0$ (depending on $\beta$ and $q$ only) such that for every $\varphi\in\operatorname{Lip}(\beta,q)$ there exists a function $\partialsi$ satisfying \begin{align*} &{\rm (i)}\;\;\quaduad \varphi(x)=\partialsi(x)\; \mbox{ a.e. on }\; X,\; \mbox{and} \\ & {\rm (ii)}\;\, \quaduad |\partialsi(x)-\partialsi(y)|\le C\|\varphi\|_{\beta,q}\mu(B)^\beta\,, \end{align*} for any ball $B$ containing the $x,y$. \end{theo} \begin{theo} Let $(X,d,\mu)$ be a space of homogeneous type. Then, given $0<\beta<\infty$, there exists a b-metric $\delta$ on $X$ such that $(X,\delta,\mu)$ is a normal space of homogeneous type and for every $1\le q<\infty$ we have \begin{equation*} \varphi\in\operatorname{Lip}(\beta,q)\;\mbox{ of }\; (X,d,\mu)\iff\exists\partialsi \in\operatorname{Lip}(\beta)\;\mbox{ of }\; (X,\delta,\mu)\;\mbox{with}\; \varphi\overlineerset{{\rm a.e.}}{=}\partialsi. \end{equation*} Moreover, the norms $\|\varphi\|_{\beta,q}$ and $\|\partialsi\|_\beta$ are equivalent. \end{theo} \partialrovidecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \partialrovidecommand{\MR}{\relax\ifhmode\underlineskip\space\fraci MR } \partialrovidecommand{\MRhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \partialrovidecommand{\href}[2]{#2} \end{document}
\begin{document} \author{ Andrey Gogolev} \title[Pathological foliations]{How typical are pathological foliations in partially hyperbolic dynamics: an example} \begin{abstract} We show that for a large space of volume preserving partially hyperbolic diffeomorphisms of the 3-torus with non-compact central leaves the central foliation generically is non-absolutely continuous. \end{abstract} \maketitle \section{Introduction} Let $M$ be a smooth Riemannian manifold. In this paper we will consider continuous foliations of $M$ with smooth leaves. A {\it continuous foliation $\mathcal W$ with smooth leaves $\mathcal W(x)$, $x\in M$,} is a foliation given by continuous charts whose leaves are smoothly immersed and whose tangent distribution $T\mathcal W$ is continuous on $M$. Riemannian metric induces volume $m$ on $M$ as well as volume on the leaves of $\mathcal W$. Following Shub and Wilkinson~\cite{SW} we call such foliation $\mathcal W$ {\it pathological} if there is a full volume set on $M$ that meets every leaf of the foliation on a set of leaf-volume zero. According to Fubini Theorem, smooth foliations cannot be pathological, but continuous foliations might happen to be pathological. This phenomenon naturally appears for central foliations of partially hyperbolic diffeomorphisms and is also known as ``Fubini's nightmare." A diffeomorphism $f$ is called partially hyperbolic if the tangent bundle $TM$ splits into a $Df$-invariant direct sum of an exponentially contracting stable bundle, an exponentially expanding unstable bundle and a central bundle of intermediate growth (precise definitions appear in the next section). The first example of a pathological foliation was constructed by Katok and it has been circulating in dynamics community since the eighties. Katok suggested to consider one parameter family $\{A_t, t\in \mathbb R/\mathbb Z\}$ of area-preserving Anosov diffeomorphisms $C^1$-close to a hyperbolic automorphism $A$ of the 2-torus. By Hirsch-Pugh-Shub Theorem diffeomorphism $F(x,t)=(A_t(x),t)$ is partially hyperbolic with uniquely integrable central distribution. Then, under certain generic conditions (the metric entropy or periodic eigendata of $A_t$ should vary with $t$) on path $A_t$, one can show that the central foliation by embedded circles is pathological. See~\cite{Pes}, Section~7.4, or~\cite{HassP}, Section~6, for detailed constructions with proofs. A version of above construction on the square appeared in expository paper by Milnor~\cite{Milnor}. Milnor remarks that a different version of the construction, based on tent maps, has also been given by Yorke. Shub and Wilkinson~\cite{SW} came across the same phenomenon when looking for volume preserving non-uniformly hyperbolic systems in the neighborhood of $F_0\colon (x,t)\mapsto(A_0(x),t)$. They have showed existence of $C^1$-open set of diffeomorphisms in the $C^1$-neighborhood of $F_0$ with non-zero central exponent. Then one can argue that the central foliation is pathological using the following ``Ma\~n\'e's argument". By Oseledets' Theorem the set of Lyapunov regular points has full volume. If any central leaf intersected the set of regular points by a set of positive Lebesgue measure, then it would increase exponentially in length under the dynamics. But the lengths of central leaves are uniformly bounded. Work~\cite{SW} was further generalized by Ruelle~\cite{Ruelle}. Ruelle and Wilkinson~\cite{RW} also showed that conditional measures are in fact atomic. Case of higher dimensional central leaves was considered by Hirayama and Pesin~\cite{HirP}. They showed that central foliation is not absolutely continuous if it has compact leaves and the sum of the central exponents is nonzero on a set of positive measure. This work is devoted to the study of pathological foliations with one-dimensional non-compact leaves. Consider a hyperbolic automorphism $L$ of the 3-torus $\mathbb T^2T$ with eigenvalues $\nu$, $\mu$ and $\lambda$ such that $\nu<1<\mu<\lambda$. One can view $L$ as a partially hyperbolic diffeomorphism. It was noted in~\cite{GG} and independently in~\cite{SX} that for a small $C^1$-open set in the neighborhood of $L$ ``Ma\~n\'e's argument" can be applied to show that corresponding central foliations are pathological. In this paper we apply a completely different approach to show that {\it there is an open and dense set $\mathcal U$ of a large $C^1$-neighborhood of $L$ in the space of volume preserving partially hyperbolic diffeomorphisms such that all diffeomorphisms from $\mathcal U$ have pathological central foliations.} This result confirms a conjecture from~\cite{HirP}. {\bf Acknowledgement.} The author is grateful to Boris Hasselblatt and Anatole Katok for listening to the preliminary version of the proof of the result. The author would like to thank the referees for useful feedback. \section{Preliminaries} Here we introduce all necessary notions and some standard tools that we need for precise formulation of the result and the proof. The reader may consult~\cite{Pes} for an introduction on partially hyperbolic dynamics. \begin{definition} A diffeomorphism $f$ is called {\it Anosov} if there exists a $Df$-invariant splitting of the tangent bundle $TM=E^s_f\oplus E^u_f$ and constants $\lambda\in(0,1)$ and $C>0$ such that for $n>0$ $$ \|Df^nv\|\le C\lambda^n\|v\|,\;v\in E^s\;\;\;\mbox{and}\;\;\;\|Df^{-n}v\|\le C\lambda^{n}\|v\|,\;v\in E^u. $$ \end{definition} \begin{definition} A diffeomorphism $f$ is called {\it partially hyperbolic} if there exists a $Df$-invariant splitting of the tangent bundle $TM=E^s_f\oplus E^c_f\oplus E^u_f$ and positive constants $\nu_-<\nu_+<\mu_-<\mu_+<\lambda_-<\lambda_+$, $\nu_+<1<\lambda_-$, and $C>0$ such that for $n>0$ \begin{multline*} {\frac1C\nu_-^n\|v\|\le\|D(f^n)(x)v\|\le C\nu_{+}^n\|v\|,\;\;\; v\in E^s_f(x),}\\ \shoveleft{\frac1C\mu_{-}^n\|v\|\le\|D(f^n)(x)v\|\le C\mu_{+}^n\|v\|,\;\;\; v\in E_{f}^c(x),}\\ \shoveleft{\frac1C\lambda_{-}^n\|v\|\le\|D(f^n)(x)v\|\le C\lambda_{+}^n\|v\|,\;\;\; v\in E_{f}^u(x).} \end{multline*} \label{def_phd_usual} \end{definition} The following definition is equivalent to the above one. We will switch between the definitions when convenient. \begin{definition} A diffeomorphism $f$ is called {\it partially hyperbolic} if there exists a Riemannian metric on $M$, a $Df$-invariant splitting of the tangent bundle $TM=E^s_f\oplus E^c_f\oplus E^u_f$ and positive constants $\nu_-<\nu_+<\mu_-<\mu_+<\lambda_-<\lambda_+$, $\nu_+<1<\lambda_-$, such that \begin{multline*} {\nu_-\|v\|\le\|Df(x)v\|\le \nu_{+}\|v\|,\;\;\; v\in E^s_f(x),}\\ \shoveleft{\mu_{-}\|v\|\le\|Df(x)v\|\le \mu_{+}\|v\|,\;\;\; v\in E_{f}^c(x),}\\ \shoveleft{\lambda_{-}\|v\|\le\|Df(x)v\|\le \lambda_{+}\|v\|,\;\;\; v\in E_{f}^u(x).} \end{multline*} \label{def_phd} \end{definition} The distributions $E^s_f$, $E^c_f$ and $E^u_f$ are continuous. Moreover, distributions $E^s_f$ and $E_f^u$ integrate uniquely to foliations $\mathcal W_f^s$ and $\mathcal W_f^u$. When it does not lead to a confusion we drop dependence on the diffeomorphism. By $m_{\mathcal W^\sigma(\cdot)}$ or $m_\sigma$ we denote induced Riemannian volume on the leaves of $\mathcal W^\sigma$, $\sigma=s, c, u$. Induced volume on other submanifolds such as transversals to a foliation will be denoted analogously with appropriate subscript. We write $d$ for the distance induced by the Riemannian metric and $d^\sigma(\cdot,\cdot)$ for the distance induced by the restriction of the Riemannian metric to $T\mathcal W^\sigma$. If expanding foliation $\mathcal W^u$ is one-dimensional then it is convenient to work with the pseudo-distance $\tilde d^u(\cdot,\cdot)$ that is very well adapted to the dynamics. Let $$ D_f^u(x)=\|Df(x)\big|_{E_f^{u}(x)}\| $$ and $$ \rho_x(y)=\prod_{n\ge 1}\frac{D_f^u(f^{-n}(x))}{D_f^u(f^{-n}(y))}. $$ This infinite product converges and gives a continuous positive density $\rho_x(\cdot)$ on the leaf $\mathcal W^u(x)$. Define {\it pseudo-distance $\tilde d^u$} by integrating density $\rho_x(\cdot)$ $$ \tilde d^u(x,y)=\int_x^y\rho_x(z)dm_{\mathcal W^u(x)}(z). $$ Obviously, pseudo-distance is not even symmetric, still it is useful for computations as it satisfies the formula $$ \tilde d^u(f(x),f(y))=D_f^u(x)\tilde d^u(x,y) $$ verified by the following simple computation \begin{multline*} \tilde d^u(f(x),f(y))=\int_{f(x)}^{f(y)}\rho_{f(x)}(z)dm_{\mathcal W^u(f(x))}(z)\\ =\int_x^y\rho_{f(x)}(f(z))D_f^u(z)dm_{\mathcal W^u(x)}(z)\\ =\int_x^y\frac{D_f^u(x)}{D_f^u(z)}\rho_{x}(z)D_f^u(z)dm_{\mathcal W^u(x)}(z) =D_f^u(x)\tilde d^u(x,y). \end{multline*} A compact domain inside a leaf $\mathcal W^\sigma(x)$ of a foliation $\mathcal W^\sigma$ will be called {\it plaque} and will be denoted by $\EuScript P^\sigma$. We shall also write $\EuScript P^\sigma(x)$ when we need to indicate dependence on the point. Given a transversal $T$ to $\mathcal W$, consider a compact domain $X$ which is a union of plaques of $\mathcal W$, that is, $X=\cup_{x\in T}\EuScript P(x)$. Then by Rokhlin's Theorem there exists a unique system of conditional measures $\mu_x$, $x\in T$, such that for any continuous function $\varphi$ on $X$ $$ \int_X\varphi dm_X=\int_T\int_{\EuScript P(x)}\varphi d\mu_xd\hat m, $$ where $\hat m$ is projection of $m_X$ to $T$. \begin{definition} \label{def_ac} Foliation $\mathcal W$ is called {\it absolutely continuous} with respect to the volume $m$ if for any $T$ and $X$ as above the conditional measures $\mu_x$ have $L^1$ densities with respect to the volume $m_{\EuScript P(x)}$ for $\hat m$ a.~e. $x$. \end{definition} Now consider a compact domain $X$ as above and two transversal $T_1$ and $T_2$ so that $X=\cup_{x\in T_1}\EuScript P(x)=\cup_{x\in T_2}\EuScript P(x)$ with the same system of plaques. Then the holonomy map $p\colon T_1\to T_2$ along $\mathcal W$ is a homeomorphism. \begin{definition} Foliation $\mathcal W$ is called {\it transversally absolutely continuous} if any holonomy map $p$ as above is absolutely continuous, that is, $p_*m_{T_1}$ is absolutely continuous with respect to $m_{T_2}$. \end{definition} Transverse absolute continuity is a stronger property than absolute continuity. Stable and unstable foliations of Anosov and partially hyperbolic diffeomorphisms are known to be transversally absolutely continuous. \section{Formulation of the result} Let $L$ be a hyperbolic automorphism of 3-torus $\mathbb T^2T$ with positive real eigenvalues $\nu$, $\mu$ and $\lambda$, $\nu<1<\mu<\lambda$. Observe that $L$ can be viewed as a partially hyperbolic diffeomorphism with $L$-invariant splitting $T\mathbb T^2T=E_L^s\oplus E_L^{wu}\oplus E_L^{su}$, where ``wu" and ``su" stand for ``weak unstable" and ``strong unstable". Consider the space $\mbox{\it{Diff}}_m^{\;r}(\TTT)$ of $C^r$, $r\ge 2$, diffeomorphisms of $\mathbb T^2T$ that preserve volume $m$. Let $\mathcal U\subset\mbox{\it{Diff}}_m^{\;r}(\TTT)$ be the set of Anosov diffeomorphisms conjugate to $L$ via a conjugacy homotopic to identity and also partially hyperbolic. It is known that $\mathcal U$ is $C^1$-open (\emph{e.g., } see~\cite{Pes}, Theorem~3.6). Given $f$ in $\mathcal U$ denote by $E_f^s\oplus E_f^{wu}\oplus E_f^{su}$ corresponding $f$-invariant splitting. According to~\cite{BBI} distributions $E_f^{wu}$, $E_f^s\oplus E_f^{wu}$ and $E_f^u=E_f^{wu}\oplus E_f^{su}$ integrate uniquely to invariant foliations $\mathcal W^{wu}$, $\mathcal W^{s\oplus wu}$ and $\mathcal W^{u}$. It is known that $\mathcal W^s$ and $\mathcal W^{u}$ are $C^1$ and $\mathcal W^{su}$ is $C^1$ when restricted to the leaves of $\mathcal W^{u}$ (see, \emph{e.g., }~\cite{Hass, PSW}). We shall need the following statement that shows that the structure of weak unstable foliation is essentially linear. \begin{prop} \label{prop_slow_to_slow} Let $f\in\mathcal U$ and let $h_f$ be the conjugacy to the linear automorphism---$h_f\circ f=L\circ h_f$. Then $h_f(\mathcal W^{wu}_f)=\mathcal W^{wu}_L$. \end{prop} The proof will be given in the appendix. \begin{theoremA} There is a $C^1$-open and $C^{r}$-dense set $\mathcal V\subset\mathcal U$ such that $f\in\mathcal V$ if and only if the central foliation $\mathcal W^{wu}$ is non-absolutely continuous with respect to the volume $m$. \end{theoremA} \begin{remark} Since we know that $\mathcal W^{u}$ is $C^1$ the latter is equivalent to $\mathcal W^{wu}$ being non-absolutely continuous on almost every plaque of $\mathcal W^{u}$ with respect to the induced volume on the plaque. \end{remark} Now we describe set $\mathcal V$. Given $f\in\mathcal U$ and given a periodic point $x$ of period $p$ let $$ \lambda^{su}(x)=\|Df^p(x)\big|_{E_f^{su}(x)}\|^{1/p}. $$ Then set $\mathcal V$ can be characterized as follows. $$ \mathcal V=\{f\in\mathcal U:\mbox{there exist periodic points\;\;} x \mbox{\;and\;} y \mbox{\;with\;} \lambda^{su}(x)\neq\lambda^{su}(y)\}. $$ \begin{prop} \label{prop1} $$ \mathcal U\backslash\mathcal V=\{f\in\mathcal U:\mbox{\textup{for any periodic point}\;\;} x \;\;\lambda^{su}(x)=\lambda\}. $$ \end{prop} We defer the proof to the appendix. \section{Related questions} Our result does not give any information about the structure of singular conditional measures. \begin{q} Given $f\in\mathcal V$, what can one say about singular conditional measures on $\mathcal W^{wu}$? Are they atomic? What can be said about Hausdorff dimension of conditional measures? \end{q} It seems that our method can be generalized for analysis of central foliation of partially hyperbolic diffeomorphisms in a $C^1$ neighborhood of $F_0\colon (x,t)\mapsto(A_0(x),t)$. \begin{q} Is it true that a generic perturbation of $F_0$ has non-absolutely continuous central foliation? Can one give explicit conditions in terms of stable and unstable Lyapunov exponents of periodic central leaves for non-absolute continuity? \end{q} It would be interesting to generalize Theorem~A to the higher dimensional setting. Namely, let $L$ be an Anosov automorphism that leaves invariant a partially hyperbolic splitting $E_L^s\oplus E_L^{wu}\oplus E_L^{su}$, where $E_L^{wu}\oplus E_L^{su}$ is the splitting of the unstable bundle into weak and strong unstable subbundles. Let $n_1$, $n_2$ and $n_3$ be the dimensions of $E_L^s$, $E_L^{wu}$ and $E_L^{su}$ respectively. Let $\mathcal U$ be a small $C^1$ neighborhood of $L$ in the space of volume preserving diffeomorphisms. \begin{q} Is it possible to describe the set $$ \{f\in\mathcal U: \mathcal W^{wu}\; \mbox{is not absolutely continuous} \} $$ in terms of strong unstable spectra at periodic points in higher dimensional setting? \end{q} It will become clear from the discussion in the next section that the value of $n_1$ is not important. Also it seems likely that our approach works in the case when $n_2>1$ and $n_3=1$, and gives a result analogous to Theorem~A (the author does not claim to have done this). The picture gets much more complicated when $n_3>1$. It is possible that the major link in our argument $$ (\mathcal W^{wu}\;\mbox{is Lipschitz inside}\;\; \mathcal W^{u})\Leftrightarrow(\mathcal W^{wu}\;\mbox{is absolutely continuous inside}\;\;\mathcal W^{u}) $$ is no longer valid in this setting. However it is not immediately clear how to construct a counerexample. \section{Outline of the proof} Clearly $\mathcal V$ is $C^1$-open. Given a diffeomorphism $f\in\mathcal U\backslash\mathcal V$ we can compose it with a special diffeomorphism $h$ that is $C^r$-close to identity and equal to identity outside a small neighborhood of a fixed point so that strong unstable eigenvalue of $f$ and $h\circ f$ at the fixed point are different. This gives that $\mathcal V$ is $C^r$-dense. To show that weak unstable foliations of diffeomorphisms from $\mathcal V$ are non-absolutely continuous we start with some simple observations. First, notice that due to ergodicity conditional measures cannot have absolutely continuous and singular components simultaneously. Next, it follows from the absolute continuity of $\mathcal W^{u}$ and the uniqueness of the system of conditional measures of $m$ that the conditional measures of $m$ on the leaves of $\mathcal W^{wu}$ are equivalent to the conditional measures of the induced volume on the leaves of $\mathcal W^{u}$. Therefore we only need to look at two dimensional plaques of $\mathcal W^{u}$ foliated by plaques of $\mathcal W^{wu}$. It turns out that absolute continuity of $\mathcal W^{wu}$ inside the leaves of $\mathcal W^{u}$ is equivalent to $\mathcal W^{wu}$ being Lipschitz inside $\mathcal W^{u}$. Lipschitz property, in turn, can be related to the periodic eigenvalue data along $\mathcal W^{su}$. Pick a plaque $\EuScript Pu$ of $\mathcal W^{u}$ and let $T_1\subset\EuScript Pu$ and $T_2\subset\EuScript Pu$ be two smooth compact transversals to $\mathcal W^{wu}$ with holonomy map $p\colon T_1\to T_2$. If $p$ is Lipschitz for any choice of plaque and transversals then we say that $\mathcal W^{wu}$ is {\it Lipshitz inside} $\mathcal W^{u}$. Theorem~A follows from the following lemmas \begin{lemma} \label{lemma_Lip_pd} Foliation $\mathcal W^{wu}$ is Lipschitz inside $\mathcal W^{u}$ if and only if $f\in\mathcal U\backslash\mathcal V$. \end{lemma} \begin{lemma} \label{lemma_Lip_AC} Foliation $\mathcal W^{wu}$ is Lipschitz inside $\mathcal W^{u}$ if and only if $\mathcal W^{wu}$ is absolutely continuous inside $\mathcal W^{u}$. \end{lemma} \section{Proofs} Let us begin with a useful observation. If one needs to show that $\mathcal W^{wu}$ is Lipschitz in a plaque $\EuScript Pu$ then it is sufficient to check Lipschitz property of the holonomy map for pairs of transversals that belong to a smooth family that foliates $\EuScript Pu$, \emph{e.g., } plaques of $\mathcal W^{su}$. Therefore we can always assume that the transversals are plaques of $\mathcal W^{su}$. \begin{proof}[Proof of Lemma~\ref{lemma_Lip_pd}] First assume that $f\in\mathcal U\backslash\mathcal V$. Then Lipschitz property of $\mathcal W^{wu}$ is shown below by a standard argument that uses Livshits Theorem. Let $T_1$ and $T_2$ be two local leaves of $\mathcal W^{su}$ in a plaque $\EuScript Pu$ and let $p\colon T_1\to T_2$ be the holonomy along $\mathcal W^{wu}$. For $x,y\in T_1$ with $d^{su}(x,y)\ge 1$ the Lipschitz property \begin{equation} \label{Lip_prop_far} d^{su}(p(x),p(y))\le C d^{su}(x,y),\;\; d^{su}(x,y)\ge 1, \end{equation} follows from compactness for uniformly bounded plaques $\EuScript Pu$. It might happen that $f^n(x)$ and $f^n(p(x))$ are far from each other on $\mathcal W^{wu}(f^n(x))$. Hence we need~(\ref{Lip_prop_far}) with uniform $C$ not only on plaques $\EuScript Pu$ of bounded size but also on plaques that are long in the weak unstable direction. In this case~(\ref{Lip_prop_far}) cannot be guaranteed solely by compactness but easily follows from Proposition~\ref{prop_slow_to_slow}. For $x$ and $y$ close to each other we may use $\tilde d^{su}$ rather than $d^{su}$ since $\tilde d^{su}$ is given by an integral of a continuous density. Then $$ \frac{\tilde d^{su}(p(x),p(y))}{\tilde d^{su}(x,y)}= \prod_{i=0}^{n-1}\frac{D_f^{su}(f^i(p(x)))}{D_f^{su}(f^i(x))}\cdot\frac{\tilde d^{su}(f^n(p(x)),f^n(p(y)))}{\tilde d^{su}(f^n(x),f^n(y))}, $$ where $n$ is chosen so that $d^{su}(f^{n-1}(x),f^{n-1}(y))<1\le d^{su}(f^{n}(x),f^{n}(y))$. The Lipschitz estimate follows since according to the Livshits Theorem $D_f^{su}$ is cohomologous to $\lambda$ and therefore the product term equals to $F(f^n(x))F(f^n(p(x))(F(x)F(p(x)))^{-1}$ for some positive continuous transfer function $F$. Now let us take $f$ from $\mathcal V$. Specification implies that the closure of the set $\{\lambda^{su}(x): x\;\mbox{periodic}\}$ is an interval $[\lambda^{su}_-, \lambda^{su}_+]$. By applying Anosov Closing Lemma it is possible to change the Riemannian metric so that the constants $\lambda_-$ and $\lambda_+$ from Definition~\ref{def_phd} are equal to $\lambda^{su}_-/(1+\delta)$ and $\lambda^{su}_+(1+\delta)$ correspondingly. Here $\delta$ is an arbitrarily small number. Next we choose periodic points $a$ and $b$ such that $$ \max\left\{\frac{\lambda^{su}_+}{\lambda^{su}(a)}, \frac{\lambda^{su}(b)}{\lambda^{su}_-}\right\}\le1+\delta \;\;\; \mbox{and}\;\;\; \frac{\lambda^{su}(b)}{\lambda^{su}(a)}\le\frac{1}{(1+\delta)^{2/\gamma}}. $$ This is possible if $\delta$ is small enough. From now on $\delta$ will be fixed. Constant $\gamma$ does not depend on our choice of $a$ and $b$ and hence $\delta$. It will be introduced later. Denote by $n_0$ the least common period of $a$ and $b$. Take $\tilde a\in\mathcal W^{su}(a)$ such that $d^{s}u(a,\tilde a)=1$. If one considers an arc of a leaf of $W_L^{wu}$ of length $D$ then it is easy to see that this arc is $const/\sqrt D$-dense in $\mathbb T^2T$. Since conjugacy $h_f$ between $f$ and $L$ is H\"older continuous, Proposition~\ref{prop_slow_to_slow} implies that an arc of $\mathcal W^{wu}(a)$ of length $D$ is $C_1/D^\alpha$-dense in $\mathbb T^2T$, $\alpha>0$. It follows that there exists a point $c\in\mathcal W^{wu}(a)$ such that $d^{wu}(a,c)\le D$, $d(c,b)\le C_1/D^\alpha$ and $W^s(b)$ intersects the arc of strong unstable leaf $\mathcal W^{su}(c)$ that connects $c$ and $\tilde c=\mathcal W^{su}(c)\cap\mathcal W^{wu}(\tilde a)$ at point $\tilde b$ as shown on the Figure~\ref{fig_hold}. \begin{figure}\label{fig_hold} \end{figure} Take $N$ such that $d^{wu}(a,f^{-n_0N}(c))\le 1<d^{wu}(a,f^{-n_0(N-1)}(c))$. Now our goal is to show that the ratio $$ \frac{\tilde d^{su}(a,f^{-n_0N}(\tilde a))}{\tilde d^{su}(f^{-n_0N}(c),f^{-n_0N}(\tilde c))} $$ can be arbitrarily small which would imply that $\mathcal W^{su}$ is not Lipschitz. Note that we cannot take a smaller $N$ since $f^{-1}$-orbit of $c$ has to come to a local plaque about $a$. \begin{remark} We use $\tilde d^{su}$ for convenience. Somewhat messier estimates go through if one uses $d^{s}u$ directly. \end{remark} To estimate the denominator we split the orbit $\{c, f^{-1}(c),\ldots f^{-n_0N}(c)\}$ into two segments of lengths $N_1$ and $N_2$, $N_1+N_2=n_0N$. Choose $N_1$ so that $d(f^{-N_1}(b),f^{-N_1}(c))$ is still small enough to provide the estimate on the strong unstable derivative $$ D_f^{su}(f^{-i}(c))\le(1+\delta)\lambda^{su}(b), \;i=1,\ldots\; N_1+1. $$ The remaining derivatives will be estimated boldly $$ D_f^{su}(\cdot)\le\lambda_+. $$ Since $b$ and $\tilde b$ are exponentially close --- $d^s(b,\tilde b)\le C_1/D^\alpha\le const\cdot\mu_-^{-n_0N}$ --- we see that there exists $\beta=\beta(\alpha,\nu_-,\mu_-)$ which is independent of $N$ such that $N_1>\beta N_2$. Proposition~\ref{prop_slow_to_slow} implies that the ratio $\tilde d^{su}(a,\tilde a)/\tilde d^{su}(c,\tilde c)$ is bounded independently of $D$ (and $N$) by a constant $C_2$. We are ready to proceed with the main estimate. \begin{multline*} \frac{\tilde d^{su}(a,f^{-n_0N}(\tilde a))}{\tilde d^{su}(f^{-n_0N}(c),f^{-n_0N}(\tilde c))}= \prod_{i=1}^{n_0N+1}\frac{D_f^{su}(f^i(c))}{(\lambda^{su})^{n_0N}}\cdot\frac{\tilde d^{su}(a,\tilde a)}{\tilde d^{su}(c,\tilde c)}\\ \le(\lambda^{su}(a))^{-n_0N}(1+\delta)^{N_1}(\lambda^{su}(b))^{N_1}(1+\delta)^{N_2}(\lambda^{su}_+)^{N_2}C_2\\ \le(1+\delta)^{N_1+N_2}\left(\frac{\lambda^{su}(b)}{\lambda^{su}(a)}\right)^{N_1}\left(\frac{\lambda^{su}_+}{\lambda^{su}(a)}\right)^{N_2}C_2\\ \le(1+\delta)^{N_1+2N_2}\left(\frac{\lambda^{su}(b)}{\lambda^{su}(a)}\right)^{\gamma(N_1+2N_2)}C_2 \le\left(\frac{1}{1+\delta}\right)^{n_0N+N_2}C_2, \end{multline*} where $\gamma=\beta/\beta+2$ so that $N_1\ge\gamma(N_1+2N_2)$. The last expression goes to zero as $D\to\infty, N\to\infty$. Thus $\mathcal W^{wu}$ is not Lipschitz. \end{proof} \begin{proof}[Proof of Lemma~\ref{lemma_Lip_AC}] Obviously $\mathcal W^{wu}$ being Lipschitz implies transverse absolute continuity property and hence absolute continuity. We have to establish the other implication. Assume that $\mathcal W^{wu}$ is absolutely continuous in the sense of Definition~\ref{def_ac}. A priori, conditional densities are only $L^1$-functions. Our goal is to show that the densities are continuous. Moreover, for $m$ almost every $x$ the density $\rho_x(y)$ on a plaque $\EuScript Pwu$ satisfies the equation \begin{equation} \label{product_formula} \frac{\rho_x(y)}{\rho_x(x)}=\prod_{n\ge1}\frac{D_f^{wu}(f^{-n}(x))}{D_f^{wu}(f^{-n}(y))}, \end{equation} where $D_f^{wu}(z)=\|Df\big|_{E_f^{wu}(z)}(z)\|$. The expression on the right hand side of the formula is a positive continuous function in $y$. Consider a full volume set where positive ergodic averages coincide for all continuous functions. By absolute continuity this set should intersect a plaque $\EuScript Pwu$ by a positive leaf-volume $m_{wu}$ set $Y$. Denote by $m_Y$ restriction of $m_{wu}$ to $Y$. For any $y\in Y$ consider measures $$ \Delta_n(y)=\frac1n\sum_{i=0}^{n-1}\delta_{f^i(y)},\; \mu_n=\int_Y\Delta_n(y)dm_{wu}(y). $$ Sequences $\{\Delta_n(y)\}$, $y\in Y$, converge weakly to $m$. Hence $\mu_n$ converges to $m$ as well. Notice that $$ \mu_n=\frac1n\sum_{i=0}^{n-1}\int_Y\delta_{f^i(y)}dm_{wu}(y)=\frac1n\sum_{i=0}^{n-1}(f^i)_*(m_Y). $$ In case when $Y$ is a plaque of $\mathcal W^{wu}$ the latter expression is known to converge to a measure with absolutely continuous conditional densities on $\mathcal W^{wu}$ that satisfy~(\ref{product_formula}). This was established in~\cite{PS} in the context of $u$-Gibbs measures, however the proof works equally well for any uniformly expanding foliation such as $\mathcal W^{wu}$. For arbitrary measurable $Y$ same conclusion holds. One needs to use Lebesgue density argument to reduce the problem to the case when $Y$ is a finite union of plaques. \begin{remark} The argument presented above can also be found in~\cite{BDV}, Section~11.2.2, in the context of $u$-Gibbs measures. \end{remark} Take an $m$-typical plaque $\EuScript Pu$ whose boundaries are leaves of $\mathcal W^{wu}$ and transversals $T_1$ and $T_2$ as shown on the Figure~\ref{fig_lip}. Then plaque $\EuScript Pu$ is foliated by the plaques $\EuScript Pwu(x)$, $x\in T_1$. As usual, denote by $p\colon T_1\to T_2$ the holonomy map. Lipschitz property of $p$ will be established by comparing volumes of small rectangles $R_1$ and $R_2$ built on corresponding segments of $T_1$ and $T_2$. \begin{figure}\label{fig_lip} \end{figure} Denote by $\mu_\EuScript Pu$ the conditional measure on $\EuScript Pu$. The conditional densities $\rho_x(\cdot)$ of $m$ on the plaques $\EuScript Pwu(x), x\in T_1$, are the same as conditional densities with respect to $\mu_\EuScript Pu$. Fix $x, y\in T_1$ and small $\varepsilon>0$, $\varepsilon\ll m_{T_1}([x,y])$. Build rectangles $R_1$ and $R_2$ on the segments $[x,y]$ and $[p(x),p(y)]$ so that $m_{\EuScript Pwu(z)}(R_1\cap\EuScript Pwu(z))=m_{\EuScript Pwu(z)}(R_2\cap\EuScript Pwu(z))=\varepsilon$ for every $z\in[x,y]$. Then $$ \mu_\EuScript Pu(R_i)=\int_{[x,y]}d\hat\mu(z)\int_{\EuScript Pwu(z)\cap R_i}\rho_z(t)dm_{\EuScript Pwu(z)}(t),\;\; i=1,2, $$ where $\hat\mu$ is projection of $\mu_\EuScript Pu$ to $T_1$. These formulae together with uniform continuity of the conditional densities that is guaranteed by~(\ref{product_formula}) imply that the ratio $\mu_\EuScript Pu(R_1)/\mu_\EuScript Pu(R_2)$ is bounded away from zero and infinity uniformly in $x$ and $y$. Since $\mu_\EuScript Pu$ has positive continuous density with respect to $m_\EuScript Pu$ the same conclusion holds for $m_\EuScript Pu(R_1)/m_\EuScript Pu(R_2)$ and therefore also for $m_{T_1}([x,y])/m_{T_2}([p(x),p(y)])$. \end{proof} \section{Appendix} Appendix is devoted to the proofs of Propositions~\ref{prop_slow_to_slow} and~\ref{prop1}. Both proofs rely on simple growth arguments and a result of Brin, Burago and Ivanov. We will work on the universal cover $\mathbb R^3$ and we will indicate this by using tilde sign for lifted objects. For example, the lift of foliation $\mathcal W^{su}_f$ to $\mathbb R^3$ is denoted by ${\widetilde{\mathcal W}}^{su}_f$. The result of Brin, Burago and Ivanov~\cite{BBI} says that lifts of leaves of strong unstable foliation are quasi-isometric. Namely, if $d$ is the usual distance then $$ \exists C>0:\;\forall x,y\;\mbox{with}\; y\in{\tilde {\mathcal W}}^{su}(x),\;\; d^{s}u(x,y)\le Cd(x,y). $$ \begin{proof}[Proof of Proposition~\ref{prop_slow_to_slow}] We argue by contradiction. Assume that $\tilde h_f({\widetilde{\mathcal W}}^{wu}_f)\neq{\widetilde{\mathcal W}}^{wu}_L$. Then we can find points $a$, $b$ and $c$ with the following properties $$ b\in{\widetilde{\mathcal W}}^{wu}_L(a), c\notin{\widetilde{\mathcal W}}^{wu}_L(a), h_f^{-1}(c)={\widetilde{\mathcal W}}^{wu}_f(\tilde h_f^{-1}(a))\cap{\widetilde{\mathcal W}}^{su}_f(\tilde h_f^{-1}(b)). $$ We iterate automorphism $L$ and look at the asymptotic growth of the distance between these points. Obviously, distance between images of $a$ and $b$ grows as $\mu^n$, meanwhile distance between images of $a$ and $c$, and images of $b$ and $c$ grows as $\lambda^n$. Since conjugacy $\tilde h_f$ is $C^0$-close to $Id$ we have the same growth rates for the triple $\tilde h_f^{-1}(a)$, $\tilde h_f^{-1}(b)$ and $\tilde h_f^{-1}(c)$ as we iterate dynamics $\tilde f$. Points $\tilde h_f^{-1}(a)$ and $\tilde h_f^{-1}(c)$ lie on the same weak unstable manifold, therefore, constant $\mu_+$ from the Definition~\ref{def_phd_usual} is not less than $\lambda$. Then, obviously, $\lambda_->\lambda$. Since ${\widetilde{\mathcal W}}^{su}_f$ is quasi-isometric $$d(\tilde f^n(\tilde h_f^{-1}(c)),\tilde f^n(\tilde h_f^{-1}(b)))\approxd^{s}u(\tilde f^n(\tilde h_f^{-1}(c)),\tilde f^n(\tilde h_f^{-1}(b)))\gtrsim\lambda_-^n,\; n\to\infty. $$ On the other hand, we have already established that the distance between images of $\tilde h_f^{-1}(c)$ and $\tilde h_f^{-1}(b)$ diverges as $\lambda^n$. This gives us a contradiction. \end{proof} \begin{proof}[Proof of Proposition~\ref{prop1}] We argue by contradiction. Assume that $f\in\mathcal U\backslash\mathcal V$. Then for every periodic point $x$, $\lambda^{su}(x)=\bar\lambda\neq\lambda$. First assume that $\bar\lambda<\lambda$. Then constant $\lambda_+$ from Definition~\ref{def_phd_usual} can be taken to be equal to $\frac12(\lambda+\bar\lambda)$. Pick points $a$ and $b$, $b\in{\widetilde{\mathcal W}}^{su}(a)$. Then $$ d(\tilde f^n(a),\tilde f^n(b))\led^{s}u(\tilde f^n(a),\tilde f^n(b))\lesssim\lambda_+^n,\; n\to\infty. $$ By Proposition~\ref{prop_slow_to_slow} $\tilde h_f(b)\notin{\widetilde{\mathcal W}}^{wu}(\tilde h_f(a)$. Therefore, $$ d(\tilde L^n(\tilde h_f(a)),\tilde L^n(\tilde h_f(b)))\gtrsim\lambda^n,\;n\to\infty. $$ On the other hand, $$ d(\tilde L^n(\tilde h_f(a)),\tilde L^n(\tilde h_f(b)))= d(\tilde h_f(\tilde f^n(a)),\tilde h_f(\tilde f^n(b)))\lesssim\lambda_+^n,\;n\to\infty, $$ since $\tilde h_f$ is $C^0$-close to $Id$. The last two asymptotic inequalities contradict to each other. Now let us assume that $\bar\lambda>\lambda$. In this case we can take $\lambda_-$ from Definition~\ref{def_phd_usual} to be equal to $\frac12(\lambda+\bar\lambda)$. Take $a$ and $b$ as before. Since ${\widetilde{\mathcal W}}^{su}$ is quasi-isometric $$ d(\tilde f^n(a),\tilde f^n(b))\gtrsimd^{s}u(\tilde f^n(a),\tilde f^n(b))\gtrsim\lambda_-^n,\;n\to\infty. $$ On the other hand, $$ d(\tilde f^n(a),\tilde f^n(b))\approx d(\tilde h_f(\tilde f^n(a)),\tilde h_f(\tilde f^n(b)))=d(\tilde L^n(\tilde h_f(a)),\tilde L^n(\tilde h_f(b)))\lesssim\lambda^n,\;n\to\infty, $$ which gives us a contradiction in this case as well. \end{proof} \end{document}
\begin{document} \mainmatter \title{Towards the Formal Reliability Analysis of Oil and Gas Pipelines} \titlerunning{Lecture Notes in Computer Science: Authors' Instructions} \author{Waqar Ahmed$^1$ \and Osman Hasan$^1$ \and Sofiene Tahar$^2$ \and\\ Mohammad Salah Hamdi$^3$} \institute{$^1$School of Electrical Engineering and Computer Science (SEECS)\\ National University of Sciences and Technology (NUST)\\ Islamabad, Pakistan \\ \email{ \{12phdwahmad,osman.hasan\}@seecs.nust.edu.pk }\\ $^2$Electrical and Computer Engineering Department\\ Concordia University, Montreal, Canada \\ \email{[email protected]}\\ $^3$Information Systems Department \\ Ahmed Bin Mohammed Military College, Doha, Qatar \\ \email{[email protected]} } \maketitle \begin{abstract} It is customary to assess the reliability of underground oil and gas pipelines in the presence of excessive loading and corrosion effects to ensure a leak-free transport of hazardous materials. The main idea behind this reliability analysis is to model the given pipeline system as a Reliability Block Diagram (RBD) of segments such that the reliability of an individual pipeline segment can be represented by a random variable. Traditionally, computer simulation is used to perform this reliability analysis but it provides approximate results and requires an enormous amount of CPU time for attaining reasonable estimates. Due to its approximate nature, simulation is not very suitable for analyzing safety-critical systems like oil and gas pipelines, where even minor analysis flaws may result in catastrophic consequences. As an accurate alternative, we propose to use a higher-order-logic theorem prover (HOL) for the reliability analysis of pipelines. As a first step towards this idea, this paper provides a higher-order-logic formalization of reliability and the series RBD using the HOL theorem prover. For illustration, we present the formal analysis of a simple pipeline that can be modeled as a series RBD of segments with exponentially distributed failure times. \keywords{Reliability Block Diagrams, Formal Methods, Theorem Proving, Oil and Gas pipeline} \end{abstract} \section{Introduction} On April 20, 2010, methane gas leakage on the Deepwater Horizon oil rig operated by Transocean, a subcontractor of British Petroleum (BP), caused a big explosion \cite{bps_13b}. This leakage not only killed 11 workers instantly but destroyed and sank the rig, and caused millions of gallons of oil to pour into the Gulf of Mexico. The gushing well, about a mile under the sea, was finally brought under control after more than three months of frenetic attempts. The spill, which is considered to be the largest accidental marine oil spill in the history of the petroleum industry, caused extensive damage to marine and wildlife habitats as well as the Gulf's fishing and tourism industries and its impact still continues. Just like the BP pipeline, there are tens of thousands of miles long oil and gas pipelines around the world. All of these pipelines are aging and are becoming more and more susceptible to failures, which may lead to disasters like the BP one. Hence, it is very important to do rigorous reliability analysis of oil and gas pipelines to detect and rectify potential problems. The reliability analysis of a pipeline system involves a three-step process: (i) partitioning the given pipeline into segments and constructing its equivalent reliability block diagram (RBD), (ii) assessing the reliability of the individual segments and (iii) evaluating the reliability of the complete pipeline system based on the RBD and the reliability of its individual segments. The reliability of an individual segment is usually expressed in terms of its failure rate $\lambda$ and a random variable, like exponential \cite{Zhang_08} or Weibull random variable \cite{Kolowrocki_09}, which models the failure time. A single oil or gas pipeline can be simply modeled as a series RBD \cite{Zhang_08}. However, in many cases, these pipeline systems have either reserved components or subsystems and such pipeline systems exhibit a combination of series and parallel RBDs \cite{Soszynska_10}. The reliability analysis of oil and gas pipelines has predominantly been accomplished by first gathering data from in-line inspection tools to detect cracks, corrosion or damage \cite{pipe_integ_sol_13,pipe_check_13}. This information is then manipulated using the paper-and-pencil based analytical analysis and computer simulations to deliver diagnostics and insightful pipeline integrity reports (e.g. \cite{panday_98,Zhang_08,Soszynska_10}). However, due to the complex nature of large pipeline system analysis, paper-and-pencil proof methods are error prone and the exhaustive testing of all possible system behaviors using simulation is almost impossible. Thus, these traditional analysis techniques cannot guarantee accurate results, which is a severe limitation in the case of oil and gas pipelines as an uncaught system bug may endanger human and animal life or lead to a significant financial loss. The inaccuracy limitations of traditional analysis techniques can be overcome by using formal methods \cite{boca_09}, which use computerized mathematical reasoning to precisely model the system's intended behavior and to provide irrefutable proof that a system satisfies its requirements. Both model checking and theorem proving have been successfully used for the precise probabilistic analysis of a broad range of systems (e.g. \cite{hasan_ispass_08,KNP_10a,elleuch_11,hasan_jal_11,fruth_11}). However, to the best of our knowledge, no formal analysis approach has been used for the reliability analysis of oil and gas pipelines so far. The foremost requirement for conducting the formal reliability analysis of underground oil and gas pipelines is the ability to formalize RBDs recursively and continuous random variables. Model checking is a state-based formal method technique. The inherent limitations of model checking is the state-space explosion problem and the inability to model complex datatypes such as trees, lists and recursive definitions \cite{kaufman2004some}. On the other hand, higher-order logic \cite{cebrown_07} is a system of deduction with a precise semantics and can be used to formally model any system that can be described mathematically including recursive definitions, random variables, RBDs, and continuous components. Similarly, interactive theorem provers are computer based formal reasoning tools that allow us to verify higher-order-logic properties under user guidance. Higher-order-logic theorem provers can be used to reason about recursive definitions using induction methods \cite{kapur1996lemma}. Thus, higher-order-logic theorem proving can be used to conduct the formal analysis of oil and gas pipelines. A number of higher-order-logic formalizations of probability theory are available in higher-order logic (e.g. \cite{hurd_02,mhamdi_11,holzl_11}). Hurd's formalization of probability theory \cite{hurd_02} has been utilized to verify sampling algorithms of a number of commonly used discrete \cite{hurd_02} and continuous random variables \cite{hasan_cade_07} based on their probabilistic and statistical properties \cite{hasan_icnaam_07,hasan_fm_09}. Moreover, this formalization has been used to conduct the reliability analysis of a number of applications, such as memory arrays \cite{hasan_tc_10}, soft errors \cite{abbasi_13b} and electronic components \cite{abbasi_13}. However, Hurd's formalization of probability theory only supports having the whole universe as the probability space. This feature limits its scope and thus this probability theory cannot be used to formalize more than a single continuous random variable. Whereas, in the case of reliability analysis of pipelines, multiple continuous random variables are required. The recent formalizations of probability theory by Mhamdi \cite{mhamdi_11} and H\"{o}lzl~\cite{holzl_11} are based on extended real numbers (including $\pm\infty$) and provide the formalization of Lebesgue integral for reasoning about advanced statistical properties. These theories also allow using any arbitrary probability space that is a subset of the universe and thus are more flexible than Hurd's formalization. However, to the best of our knowledge, these foundational theories have not been used to formalize neither reliability and RBDs nor continuous random variables so far. In this paper, we use Mhamdi's formalization of probability theory \cite{mhamdi_11}, which is available in the HOL theorem prover \cite{norris_hol}, to formalize reliability and the commonly used series RBD, where its individual segments are modeled as random variables. Our formalization includes various formally verified properties of reliability and series RBD that facilitate formal reasoning about the reliability of some simple pipelines using a theorem prover. To analyze more realistic models of pipelines, it is required to formalize other RBDs, such as parallel, series-parallel and parallel-series \cite{Bilinton_1992}. In order to illustrate the utilization and effectiveness of the proposed idea, we utilize the above mentioned formalization to analyze a simple pipeline that can be modeled as a series RBD with an exponential failure time for individual segments. \section{Preliminaries} \label{sec_2} In this section, we give a brief introduction to theorem proving in general and the HOL theorem prover in particular. The intent is to introduce the main ideas behind this technique to facilitate the understanding of the paper for the reliability analysis community. We also summarize Mhamdi's formalization of probability theory \cite{mhamdi_11} in this section. \subsection{Theorem Proving} Theorem proving \cite{gordon_89} is a widely used formal verification technique. The system that needs to be analysed is mathematically modelled in an appropriate logic and the properties of interest are verified using computer based formal tools. The use of formal logics as a modelling medium makes theorem proving a very flexible verification technique as it is possible to formally verify any system that can be described mathematically. The core of theorem provers usually consists of some well-known axioms and primitive inference rules. Soundness is assured as every new theorem must be created from these basic or already proved axioms and primitive inference rules. The verification effort of a theorem in a theorem prover varies from trivial to complex depending on the underlying logic \cite{harrison_96a}. For instance, first-order logic \cite{fitting_96} utilizes the propositional calculus and terms (constants, function names and free variables) and is semi-decidable. A number of sound and complete first-order logic automated reasoners are available that enable completely automated proofs. More expressive logics, such as higher-order logic \cite{cebrown_07}, can be used to model a wider range of problems than first-order logic, but theorem proving for these logics cannot be fully automated and thus involves user interaction to guide the proof tools. For reliability analysis of pipelines, we need to formalize (mathematically model) random variables as functions and their distribution properties are verified by quantifying over random variable functions. Henceforth, first-order logic does not support such formalization and we need to use higher-order logic to formalize the foundations of reliability analysis of pipelines. \subsection{HOL Theorem Prover} HOL is an interactive theorem prover developed at the University of Cambridge, UK, for conducting proofs in higher-order logic. It utilizes the simple type theory of Church \cite{church_40} along with Hindley-Milner polymorphism \cite{milner_77} to implement higher-order logic. HOL has been successfully used as a verification framework for both software and hardware as well as a platform for the formalization of pure mathematics. The HOL core consists of only 5 basic axioms and 8 primitive inference rules, which are implemented as ML functions. Soundness is assured as every new theorem must be verified by applying these basic axioms and primitive inference rules or any other previously verified theorems/inference rules. We utilized the HOL theories of Booleans, lists, sets, positive integers, \emph{real} numbers, measure and probability in our work. In fact, one of the primary motivations of selecting the HOL theorem prover for our work was to benefit from these built-in mathematical theories. Table \ref{hol_basics} provides the mathematical interpretations of some frequently used HOL symbols and functions, which are inherited from existing HOL theories, in this paper. \begin{table}[!htb] \begin{center} \begin{tabular}{|c|c|c|} \hline {\bfseries HOL Symbol} & {\bfseries Standard Symbol} & {\bfseries Meaning} \\ \hline \hline $\mathtt{\wedge}$& $and$ & Logical $and$ \\ \hline $\mathtt{\vee}$ & $or$ & Logical $or$ \\ \hline $\mathtt{\neg}$ & $not$ & Logical $negation$ \\ \hline $\mathtt{::}$ & $cons$ & Adds a new element to a list \\ \hline $\mathtt{++}$ & $append$ & Joins two lists together \\ \hline $\mathtt{HD\ L}$ & $head$ & Head element of list $L$ \\ \hline $\mathtt{TL\ L}$ & $tail$ & Tail of list $L$\\ \hline $\mathtt{EL\ n\ L}$ & $element$ & $n^{th}$ element of list L \\ \hline $\mathtt{MEM\ a\ L}$ & $member$ & True if $a$ is a member of list $L$\\ \hline $\mathtt{\lambda x.t}$& $\lambda x.t$ & Function that maps $x$ to $t(x)$ \\ \hline $\mathtt{SUC\ n}$& $n + 1$ & Successor of a $num$ \\ \hline $\mathtt{lim(\lambda n.f(n))}$ & $\mathop {\lim }\limits_{n \to \infty } f(n)$ & Limit of a $real$ sequence $f$ \\ \hline \end{tabular} \caption{HOL Symbols and Functions} \label{hol_basics} \end{center} \end{table} \subsection{Probability Theory and Random Variables in HOL} Mathematically, a measure space is defined as a triple ($\Omega,\Sigma, \mu$), where $\Omega$ is a set, called the sample space, $\Sigma$ represents a $\sigma$-algebra of subsets of $\Omega$, where the subsets are usually referred to as measurable sets, and $\mu$ is a measure with domain $\Sigma$. A probability space is a measure space ($\Omega,\Sigma, Pr$), such that the measure, referred to as the probability and denoted by $Pr$, of the sample space is 1. In Mhamdi's formalization of probability theory \cite{mhamdi_11}, given a probability space $p$, the functions \texttt{space} and \texttt{subsets} return the corresponding $\Omega$ and $\Sigma$, respectively. This formalization also includes the formal verification of some of the most widely used probability axioms, which play a pivotal role in formal reasoning about reliability properties. Mathematically, a random variable is a measurable function between a probability space and a measurable space. A measurable space refers to a pair ($S,\mathcal{A}$), where $S$ denotes a set and $\mathcal{A}$ represents a nonempty collection of sub-sets of $S$. Now, if $S$ is a set with finite elements, then the corresponding random variable is termed as a discrete random variable and else it is called a continuous one. The probability that a random variable $X$ is less than or equal to some value $x$, $Pr(X \le x)$ is called the cumulative distribution function (CDF) and it characterizes the distribution of both discrete and continuous random variables. Mhamdi's formalization of probability theory \cite{mhamdi_11} also includes the formalization of random variables and the formal verification of some of their classical properties using the HOL theorem prover. \section{Reliability} \label{sec_3} In reliability theory \cite{Bilinton_1992}, reliability $R(t)$ of a system or component is defined as the probability that it performs its intended function until some time $t$. \begin{equation} \label{reliability_eq} R(t) = Pr (X > t) = 1 - Pr (X \le t) = 1 - F_X(t) \end{equation} where $F_X(t)$ is the CDF. The random variable $X$, in the above definition, models the time to failure of the system. Usually, this time to failure is modeled by the exponential random variable with parameter $\lambda$ that represents the failure rate of the system. Now, the CDF can be modeled in HOL as follows: \begin{flushleft} \texttt{\bf{Definition 1: }} \label{CDF_def} \emph{Cumulative Distributive Function} \\ \texttt{$\vdash$ $\forall$ p X x. CDF p X x = distribution p X \{y | y $\leq$ Normal x\} } \end{flushleft} \noindent where $p$ represents the probability space, $X$ is the random variable and $x$ represents a $real$ number. The function \texttt{Normal} converts a $real$ number to its corresponding value in the $extended-real$ data-type, i.e, the $real$ data-type including the positive and negative infinity. The function \texttt{distribution} accepts a probability space $p$, a random variable $X$ and a set and returns the probability of $X$ acquiring all the values of the given set in the probability space $p$. Now, Definition 1 can be used to formalize the reliability definition, given in Equation \ref{reliability_eq}, as follows: \begin{flushleft} \texttt{\bf{Definition 2: }} \label{Reliability_def} \emph{Reliability} \\ \texttt{$\vdash$ $\forall$ p X x. Reliability p X x = 1 - CDF p X x } \end{flushleft} We used the above mentioned formal definition of reliability to formal verify some of the classical properties of reliability in HOL. The first property in this regard relates to the fact that the reliability of a good component is 1, i.e., maximum, prior to its operation, i.e., at time 0. This property has been verified in HOL as the following theorem. \begin{flushleft} \texttt{\bf{Theorem 1: }} \label{Reliability_AT_ZERO} \emph{Maximum Reliability} \\ \texttt{$\vdash$ $\forall$ p X. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$ \\ \ \ \ \ \ \ \ ($\forall$ y. X y $\neq$ NegInf $\wedge$ X y $\neq$ PosInf) $\wedge$ \\ \ \ \ \ \ \ \ ($\forall$ z. 0 $\le$ z $\Rightarrow$ ($\lambda$x. CDF p X x) contl z) $\wedge$\\ \ \ \ \ \ \ \ ($\forall$ x. Normal 0 $\le$ X x) $\Rightarrow$\\ \ \ \ \ \ \ \ (Reliability p X 0 = 1) } \end{flushleft} \noindent The first two assumptions of the above theorem ensure that the variable \emph{p} represents a valid probability space based on the formalization of Mhamdi's probability theory \cite{mhamdi_11}. The third assumption constraints the random variable to be well-defined, i.e., it cannot acquire negative or positive infinity values. The fourth assumption states that the CDF of the random variable $X$ is a continuous function, which means that $X$ is a continuous random variable. This assumption utilizes the HOL function \texttt{contl}, which accepts a lambda abstraction function and a real value and ensures that the function is continuous at the given value. The last assumption ensures that the random variable $X$ can acquire positive values only since in the case of reliability this random variable always models time, which cannot be negative. The conclusion of the theorem represents our desired property that reliability at \emph{time=0} is \emph{1}. The proof of the Theorem 1 exploits some basic probability theory axioms and the following property according to which the probability of a continous random variable at a point is zero. The second main characteristic of the reliability function is its decreasing monotonicity, which is verified as the following theorem in HOL: \begin{flushleft} \texttt{\bf{Theorem 2: }} \label{Reliability_MONOTONE} \emph{Reliability is a Monotone Function} \\ \texttt{$\vdash$ $\forall$ p X a b. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$ \\ \ \ \ \ \ \ \ ($\forall$ y. X y $\neq$ NegInf $\wedge$ X y $\neq$ PosInf) $\wedge$ \\ \ \ \ \ \ \ \ ($\forall$ x. Normal 0 $\le$ X x) $\wedge$ a $\le$ b $\Rightarrow$\\ \ \ \ \ \ \ \ (Reliability p X (b)) $\leq$ (Reliability p X (a)) } \end{flushleft} \noindent The assumptions of this theorem are the same as the ones used for Theorem 1 except the last assumption, which describes the relationship between variables $a$ and $b$. The above property clearly indicates that the reliability cannot increase with the passage of time. The formal reasoning about the proof of Theorem 2 involves some basic axioms of probability theory and a property that the CDF is a monotonically increasing function. Finally, we verified that the reliability tends to 0 as the time approaches infinity. This property is verified under the same assumptions that are used for Theorem 1. \begin{flushleft} \texttt{\bf{Theorem 3: }} \label{Reliability_TENDS} \emph{Reliability Tends to Zero As Time Approaches Infinity} \\ \texttt{$\vdash$ $\forall$ p X. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$ \\ \ \ \ ($\forall$ y. X y $\neq$ NegInf $\wedge$ X y $\neq$ PosInf) $\wedge$ ($\forall$ x. Normal 0 $\le$ X x) $\Rightarrow$\\ \ \ \ \ \ \ \ (lim ($\lambda$n. Reliability p X (\&n)) = 0) } \end{flushleft} \noindent The HOL function \texttt{lim} models the limit of a real sequence. The proof of Theorem 3 primarily uses the fact that the CDF approches to 1 as its argument approaches infinity. These three theorems completely characterize the behavior of the reliability function on the positive real axis as the argument of the reliability is time and thus cannot be negative. The formal verification of these properties based on our definition ensure its correctness. Moreover, these formally verified properties also facilitate formal reasoning about reliability of systems, as will be demonstrated in Section \ref{sec_5} of this paper. The proof details about these properties can be obtained from our proof script \cite{waqar_ftscs_13}. \section{Formalization of Series Reliability Block Diagram} \label{sec_4} In a serially connected system \cite{Bilinton_1992}, depicted in Figure 1, the reliability of the complete system mainly depends upon the failure of a single component that has the minimum reliability among all the components of the system. In other words, the system stops functioning if any one of its component fails. Thus, the operation of such a system is termed as reliable at any time $t$, if all of its components are functioning reliably at this time $t$. If the event $A_{i}(t)$ represents the reliable functioning of the $i^{th}$ component of a serially connected system with $N$ components at time $t$ then the overall reliability of the system can be mathematically expressed as \cite{Bilinton_1992}: \begin{equation}\label{eq2} R_{series}(t) = Pr (A_{1}(t) \cap A_{2}(t) \cap A_{3}(t) \cdots \cap A_{N}(t)) \end{equation} \begin{figure} \caption{System with a Series Connection of Components} \label{series_fig} \label{series_fig_caption} \end{figure} \noindent Using the assumption of mutual independence of individual reliability events of a series system \cite{Bilinton_1992}, the above equation can be simplified as: \begin{equation}\label{eq3} R_{series}(t) = \prod_{i=1}^{N}R_{i}(t) \end{equation} Moreover, an intrinsic property of a series system is that its overall reliability is always less than or equal to the reliability of the sub-component with the least reliability. \begin{equation}\label{eq4} R_{series}(t) \leq min(R_{i}(t)) \end{equation} We proceed with the formalization of the series RBD by first formalizing the notion of mutual independence of more than two random variables, which is one of the most essential prerequisites for reasoning about the simplified expressions for RBD. Two events $A$ and $B$ are termed as mutually independent iff $Pr(A\cap B) = Pr(A)Pr(B)$. All the events involved in reliability modeling are generally assumed to be mutually independent. Since we often tackle the reliability assessment of systems with more than two components, we formalize the mutual independence of a list of random variables in this paper as follows: \begin{flushleft} \texttt{\bf{Definition 3: }} \label{mutual_indep_def} \emph{Mutual Independence of Events } \\ \texttt{$\vdash$ $\forall$ p L. mutual\_indep p L = \\ \ \ $\forall$ L1 n. PERM L L1 $\wedge$ 2 $\leq$ n $\wedge$ n $\leq$ LENGTH L $\Rightarrow$ \\ \ \ \ \ \ \ prob p (inter\_set p (TAKE n L1)) = \\ \ \ \ \ \ \ list\_prod (list\_prob p (TAKE n L1)) } \end{flushleft} \noindent The function \texttt{mutual\_indep} takes a list of events or sets $L$ along with the probability space $p$ as input and returns True if the given list of events are mutually independent in $p$. The formal definitions for the HOL functions used in the above definition are given in Table 1. The predicate \texttt{PERM} ensures that its two list arguments form a permutation of one another, the function \texttt{LENGTH} returns the length of a list, the function \texttt{TAKE} returns a list that contains the first $n$ elements of its argument list, the function \texttt{inter\_set} performs the intersection of all the sets in a list of sets and returns the probability space in case of an empty list argument, the function \texttt{list\_prob} returns a list of probabilities associated with the given list of events in the given probability space and the function \texttt{list\_prod} recursively multiplies all the elements of its argument list of real numbers. Thus, using these functions the function \texttt{mutual\_indep} ensures that for any 2 or more elements $n$, taken in any order, of the given list of events $L$, the property $Pr(\bigcap_{i=0}^nL_i) = \prod_{i=0}^nPr(L_i)$ holds. \begin{table} \begin{tabular}[c]{|l |l|} \hline \texttt{\textbf{Function Name}} \ \ & \texttt{\textbf{HOL Definition}} \\ \hline \hline \texttt{PERM} & \texttt{$\vdash$ $\forall$ L1 L2. PERM L1 L2 =} \\& \ \ \ \texttt{$\forall$ x. FILTER (\$= x) L1 = FILTER (\$= x)L2 } \\ \hline \texttt{LENGTH } & \texttt{$\vdash$ (LENGTH [] = 0 )} $\wedge$ \\ & \ \ \ \texttt{ $\forall$ h t. LENGTH (h::t) = SUC (LENGTH t)} \\ \hline \texttt{TAKE } & \texttt{$\vdash$ ($\forall$ n. TAKE n [] = [])} $\wedge$ \\ & \ \ \ \texttt{$\forall$ n x xs. TAKE n (x::xs) = if n = 0 then [] else } \\ & \ \ \ \ \ \texttt{x::TAKE (n - 1) xs}\\ \hline \texttt{inter\_set } & \texttt{$\vdash$ ($\forall$ p. inter\_set p [] = p\_space p )} $\wedge$ \\ & \ \ \ \texttt{$\forall$ p h t. inter\_set p (h::t) = h $\cap$ inter\_set p t} \\ \hline \texttt{list\_prod } & \texttt{$\vdash$ ($\forall$ list\_prod [] = 1)} $\wedge$ \\ & \ \ \ \texttt{$\forall$ h t. list\_prod (h::t) = h * list\_prod t} \\ \hline \texttt{list\_prob } & \texttt{$\vdash$ ($\forall$ p. list\_prob p [] = [])} $\wedge$ \\ & \ \ \ \texttt{$\forall$ p h t. list\_prob p (h::t) =} \\ & \ \ \ \ \ \ \ \texttt{prob p (h $\cap$ p\_space p) * list\_prob p t} \\ \hline \texttt{min} & \texttt{$\vdash$ $\forall$ x y. min x y = if x $\leq$ y then x else y }\\ \hline \texttt{min\_rel } & \texttt{$\vdash$ ($\forall$ f. min\_rel f [] = 1)} $\wedge$ \\ & \ \ \ \texttt{$\forall$ f h t. min\_rel f (h::t) = min (f h) (min\_rel f t)} \\ \hline \end{tabular}\caption{HOL Functions used in Definition 3} \end{table} Next, we propose to formalize the RBDs in this paper by using a list of events, where each event models the proper functioning of a single component at a given time based on the corresponding random variable. This list of events can be modeled as follows: \begin{flushleft} \texttt{\bf{Definition 4: }} \label{list_RV_def} \emph{Reliability Event List} \\ \texttt{$\vdash$ $\forall$ p x. rel\_event\_list p [] x = [] $\wedge$ \\ \ \ $\forall$ p x h t. rel\_event\_list p (h::t) x = \\ PREIMAGE h \{y | Normal x < y\} $\cap$ p\_space p :: rel\_event\_list p t x } \end{flushleft} \noindent The function \texttt{rel\_event\_list} accepts a list of random variables, representing the time to failure of individual components of the system, and a $real$ number $x$, which represents the time index where the reliability is desired, and returns a list of sets corresponding to the events that the individual components are functioning properly at the given time $x$. This list of events can be manipulated, based on the structure of the complete system, to formalize various RBDs. Similarly, the individual reliabilities of a list of random variables can be modeled as the following recursive function: \begin{flushleft} \texttt{\bf{Definition 5: }} \label{list_reliability_function_def} \emph{Reliability of a List of Random Variables } \\ \texttt{$\vdash$ $\forall$ p x . rel\_list p [] x = [] $\wedge$\\ \ \ $\forall$ p h t x. rel\_list p (h::t) x = \\ \ \ Reliability p h x :: rel\_list p t x } \end{flushleft} \noindent The function \texttt{rel\_list} takes a list of random variables and a $real$ number $x$, which represents the time index where the reliability is desired, and returns a list of the corresponding reliabilities at the given time $x$. It is important to note that all the above mentioned definitions are generic enough to represent the behavior of any RBD, like series, parallel, series-parallel and parallel-series. Now, using Equation (\ref{eq2}), the reliability of a serially connected structure can be defined as: \begin{flushleft} \texttt{\bf{Definition 6: }} \label{series_def} \emph{System with a Series Connection of Components } \\ \texttt{$\vdash$ $\forall$ p L. rel\_series p L = prob p (inter\_set p L) } \end{flushleft} \noindent The function \texttt{rel\_series} takes a list of random variables $L$, representing the failure times of the individual components of the system, and a probability space $p$ as input and returns the intersection of all the events corresponding to the reliable functioning of these components using the function \texttt{inter\_set}, given in Table 2. Based on this definition, we formally verified the result of Equation (\ref{eq2}) as follows: \begin{flushleft} \texttt{\bf{Theorem 4: }} \label{series_connected_system_def} \emph{Reliability of a System with Series Connections} \\ \texttt{$\vdash$ $\forall$ p L x. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$\\ \ \ \ \ \ 0 $\leq$ x $\wedge$ 2 $\leq$ LENGTH (rel\_event\_list p L x) $\wedge$ \\ \ \ \ mutual\_indep p (rel\_event\_list p L x) $\Rightarrow$ \\ \ (rel\_series p (rel\_event\_list p L x) = list\_prod (rel\_list p L x)) } \end{flushleft} \noindent The first two assumptions ensure that $p$ is a valid probability space based on Mhamdi's probability theory formalization \cite{mhamdi_11}. The next one ensures that the variable $x$, which models time, is always greater than or equal to 0. The next two assumptions of the above theorem guarantee that we have a list of at least two mutually exclusive random variables (or a system with two or more components). The conclusion of the theorem represents Equation (\ref{eq2}) using Definitions 4 and 6. The proof of Theorem 4 involves various probability theory axioms, the mutual independence of events and the fact that the probability of any event that is in the returned list from the function \texttt{rel\_event\_list} is equivalent to its reliability. More proof details can be obtained from our proof script \cite{waqar_ftscs_13}. Similarly, we verified Equation (4) as the following theorem in HOL: \begin{flushleft} \texttt{\bf{Theorem 5: }} \label{series_connected_system_min_reliability_def} \emph{Reliability of a System depends upon the minimum reliability of the connected components} \\ \texttt{$\vdash$ $\forall$ p L x. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$ \\ \ \ \ \ \ 0 $\leq$ x $\wedge$ 2 $\leq$ LENGTH (rel\_event\_list p L x) $\wedge$ \\ \ \ \ \ \ mutual\_indep p (rel\_event\_list p L x) $\Rightarrow$ \\ \ \ \ \ \ \ (rel\_series p (rel\_event\_list p L x) $\leq$ \\ \ \ \ \ \ \ \ \ min\_rel ($\lambda$ L. Reliability p L x) L) } \end{flushleft} The proof of the Theorem 5 uses several probability theory axioms and the fact that any subset of a mutually independent set is also mutually independent. The definitions, presented in this section, can be used to model parallel RBD \cite{Bilinton_1992} and formally verify the corresponding simplified reliability relationships as well. The major difference would be the replacement of the function \texttt{inter\_set} in Definition 6 by a function that returns the union of a given list of events. \section{Reliability Analysis of a Pipeline System} \label{sec_5} A typical oil and gas pipeline can be partitioned into a series connection of $N$ segments, where these segments may be classified based on their individual failure times. For example, a 60 segment pipeline is analyzed in \cite{Zhang_08} under the assumption that the segments, which exhibit exponentially distributed failure rates, can be sub-divided into 3 categories according to their failure rates ($\lambda$), i.e., 30 segments with $\lambda= 0.0025$, 20 segments with $\lambda= 0.0023$ and 10 segments with $\lambda= 0.015$. The proposed approach for reliability analysis of pipelines allows us to formally verify generic expressions involving any number of segments and arbitrary failure rates. In this section, we formally verify the reliability of a simple pipeline, depicted in Figure 2, with $N$ segments having arbitrary exponentially distributed failure times. \begin{figure} \caption{A Simple Pipeline} \label{series_pipeline_fig} \label{series_pipeline_caption_fig} \end{figure} We proceed with the formal reliability analysis of the pipeline, shown in Figure 2, by formalizing the exponential random variable in HOL. \begin{flushleft} \texttt{\bf{Definition 7: }} \label{Exponential_distribution_def} \emph{Exponential Distribution Function } \\ \texttt{$\vdash$ $\forall$ p X l. exp\_dist p X l = \\ \ \ \ $\forall$ x. (CDF p X x = if 0 $\leq$ x then 1 - exp (-l * x) else 0) } \end{flushleft} \noindent The predicate \texttt{exp\_dist} ensures that the random variable $X$ exhibits the CDF of an exponential random variable in probability space $p$ with failure rate $l$. We classify a list of exponentially distributed random variables based on this definition as follows: \begin{flushleft} \texttt{\bf{Definition 8: }} \label{list_of exponential_distribution_function_def} \emph{List of Exponential Distribution Functions} \\ \texttt{$\vdash$ $\forall$ p L. list\_exp p [] L = T $\wedge$\\ \ \ $\forall$ p h t L. list\_exp p (h::t) L = \\ \ \ exp\_dist p (HD L) h $\wedge$ list\_exp p t (TL L) } \end{flushleft} \noindent The \texttt{list\_exp} function accepts a list of failure rates, a list of random variables $L$ and a probability space $p$. It guarantees that all elements of the list $L$ are exponentially distributed with corresponding failure rates given in the other list within the probability space $p$. For this purpose, it utilizes the list functions \texttt{HD} and \texttt{TL}, which return the \emph{head} and \emph{tail} of a list, respectively. Next, we model the pipeline, shown in Figure 2, as a series RBD as follows: \begin{flushleft} \texttt{\bf{Definition 9: }} \label{Reliab_series_def} \emph{Reliability of Series Pipeline System } \\ \texttt{$\vdash$ $\forall$ p L . pipeline p L = rel\_series p L } \end{flushleft} \noindent Now, we can use Definition 8 to guarantee that the random variable list argument of the function \texttt{pipeline} contains exponential random variables only and thus verify the following simplified expression for the pipeline reliability. \begin{flushleft} \texttt{\bf{Theorem 6: }} \label{pipeline system} \emph{Series Pipeline System } \\ \texttt{$\vdash$ $\forall$ p L x C. prob\_space p $\wedge$ (events p = POW (p\_space p)) $\wedge$ \\ \ \ \ \ \ 0 $\leq$ x $\wedge$ 2 $\leq$ LENGTH (rel\_event\_list p L x) $\wedge$ \\ \ \ \ \ \ mutual\_indep p (rel\_event\_list p L x) $\wedge$ \\ \ \ \ \ \ \ list\_exp p C L $\wedge$ (LENGTH C = LENGTH L) $\Rightarrow$ \\ \ \ \ \ \ \ (pipeline p (rel\_event\_list p L x) = exp (-list\_sum C * x)) } \end{flushleft} \noindent The first five assumptions are the same as the ones used in Theorem 5. The sixth assumption \texttt{list\_exp p C L} ensures that the list of random variable $L$ contains all exponential random variables with corresponding failure rates given in list $C$. The next assumptions guarantees that the lengths of the two lists $L$ and $C$ are the same. While the conclusion of Theorem 6 represents desired reliability relationship for the given pipeline model. Here the function \texttt{list\_sum} recursively adds the elements of its list argument and is used to add the failure rates of all exponentially distributed random variables, which are in turn used to model the individual segments of the series RBD of the pipeline. The proof of Theorem 6 is based on Theorem 4 and some properties of the exponential function \texttt{exp}. The reasoning was very straightforward (about 100 lines of HOL code) compared to the reasoning for the verification of Theorem 4 \cite{waqar_ftscs_13}, which involved probability-theoretic guidance. This fact illustrates the usefulness of our core formalization for conducting the reliability analysis of pipelines. The distinguishing features of this formally verified result include its generic nature, i.e., all the variables are universally quantified and thus can be specialized to obtain the reliability of the given pipeline for any given parameters, and its guaranteed correctness due to the involvement of a sound theorem prover in its verification, which ensures that all the required assumptions for the validity of the result are accompanying the theorem. Another point worth mentioning is that the individual failure rates of the pipeline segments can be easily provided to the above theorem in the form of a list, i.e., $C$. The above mentioned benefits are not shared by any other computer based reliability analysis approach for oil and gas pipelines and thus clearly indicate the usefulness of the proposed approach. \section{Conclusions} \label{sec_6} Probabilistic analysis techniques have been widely utilized during the last two decades to assess the reliability of oil and gas pipelines. However, all of these probability theoretic approaches have been utilized using informal system analysis methods, like simulation or paper-and-pencil based analytical methods, and thus do not ensure accurate results. The precision of results is very important in the area of oil and gas pipeline condition assessment since even minor flaws in the analysis could result in the loss of human lives or heavy damages to the environment. In order to achieve this goal and overcome the inaccuracy limitation of the traditional probabilistic analysis techniques, we propose to build upon our proposed formalization of RBDs to formally reason about the reliability of oil and gas pipelines using higher-order-logic theorem proving. Building upon the results presented in this paper, the formalization of other commonly used RBDs, including parallel, series-parallel and parallel-series, and the Weibull random variable is underway. These advanced concepts are widely used in the reliability analysis of pipelines. However, their formalization requires some advanced properties of probability theory. For example, for formalizing the reliability block diagrams of the series-parallel and parallel-series structures, we need to first formally verify the principle of inclusion exclusion \cite{Trivedi_02}. We also plan to formalize the underlying theories to reason about more realistic series pipeline systems, such as multi-state variable piping systems, where each subcomponent of the pipeline system consists of many irreversible states from good to worst. We also plan to investigate artificial neural networks in conjunction with theorem proving to develop a hybrid semi-automatic pipeline reliability analysis framework. Besides the pipeline reliability analysis, the formalized reliability theory foundation presented in this paper, may be used for the reliability analysis of a number of other applications, including hardware and software systems. \section*{Acknowledgments} This publication was made possible by NPRP grant \# [5 - 813 - 1 134] from the Qatar National Research Fund (a member of Qatar Foundation). The statements made herein are solely the responsibility of the author[s]. \end{document}
\begin{document} \title{Data scraping, ingestation, and modeling: bringing data from cars.com into the intro stats class} \hypertarget{introduction}{ \subsection{Introduction}\label{introduction}} New tools have made it much easier for students to develop skills to work with interesting data sets as they begin to extract meaning from data. To fully appreciate the statistical analysis cycle, students benefit from repeated experiences collecting, ingesting, wrangling, analyzing data and communicating results. How can we bring such opportunities into the classroom? We describe a classroom activity, originally developed by Danny Kaplan (Macalester College), in which students can expand upon statistical problem solving by hand-scraping data from cars.com, ingesting these data into R, then carrying out analyses of the relationships between price, mileage, and model year for a selected type of car. Most students might be interested in car prices since many will be purchasing a car at some point in the near future. This activity can help them develop better understanding of factors associated with car prices. The revised GAISE (Guidelines for Assessment and Instruction in Statistics Education) College report (2016) notes the importance of multivariate thinking and the use of technology. Car prices, model year, and mileage are all factors to consider when purchasing or selling a car. Introductory statistics courses need to move beyond only addressing bivariate questions to be able to explore multivariate relationships. In an increasingly data-rich society, plenty of information is available to prospective car purchasers. Consumers can analyze and compare multiple cars to try to get the best deal. By gathering data by hand from cars.com then using this information to generate multivariable visualizations and model prices, students gain experience (1) working in groups, (2) practicing undertaking reproducible analyses, and (3) exploring a multivariate dataset. These key ideas of data generation, data ingestion, data visualization for multivariate analyses, and data modeling are reinforced throughout the activity. We begin by describing the activity, sharing examples of data, visualizations, and models, then suggesting possible extensions and providing concluding thoughts. Instructor materials and datasets associated with this activity can be found at \url{https://github.com/Amherst-Statistics/Cars-Scraping-Webinar}. \hypertarget{activity-class-one}{ \subsection{Activity: Class One}\label{activity-class-one}} Students work in pairs of two and use two computers to gather and hand-enter data concerning the cost of a specific model of a car, then analyze the variations in pricing, price associations with mileage and age, the rate at which cars depreciate, and the cost of driving one mile. One student reads off data from cars.com and the other enters the data into a spreadsheet. Each pair is assigned a different city. The first step of the activity involves gathering data from \href{https://www.cars.com}{\emph{cars.com}}. Using the `advanced filter' option, the model and make of the car are specified, along with the assigned location and restriction to recent years. Various components in price determination include the model, year, mileage, and location. \begin{figure} \caption{Cars.com example snapshot of used Toyota Prius models from Dallas} \end{figure} As an example, Figure 1 features a 2015 Toyota Prius from the Dallas area, priced at \$17,998 with 15,866 miles whereas the 2014 Toyota Prius is priced lower at \$10,995 but with a higher mileage of 81,076. Figure 2 illustrates data gathered and entered into an Excel sheet for a group assigned to find car prices in Dallas. \begin{figure} \caption{Student hand-scraped data for Dallas entered into an Excel spreadsheet} \end{figure} The data are entered into a spreadsheet (e.g., Excel, Open Office, or Google Spreadsheet) using a template \texttt{cars.csv} to ensure that the variable names are consistent between groups. Once the group has completed the hand-scraping of 30 or 35 cars, they will upload this spreadsheet into RStudio and run an instructor provided RMarkdown file (\texttt{cars.Rmd}). The RMarkdown file reads the data that they have uploaded to generate descriptive statistics, creates multivariate displays, and fits a multiple regression model. The students need to interpret the results and add their descriptions into the file. The scatterplot produced in Figure 3 uses student-gathered data for Toyota Prius to display the relationship between prices and mileage for Dallas cars. The scatterplot reflects how car prices depreciate as a function of mileage and model year. After the car's first year, the discrepancy in price based on mileage by year tends to diminish. The plot below displays a linear regression model for Prius prices in Dallas. \begin{figure} \caption{Toyota Prius prices in Dallas based on mileage} \end{figure} Here the ggformula interface to the ggplot2 graphics system is used because it provides a general modeling syntax similar to the `lm()' function in R. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(ggformula)} \KeywordTok{gf_point}\NormalTok{(price }\OperatorTok{~}\StringTok{ }\NormalTok{mileage, }\DataTypeTok{color =} \OperatorTok{~}\StringTok{ }\NormalTok{year, }\DataTypeTok{data =}\NormalTok{ Dallas) }\OperatorTok{ \StringTok{ }\KeywordTok{gf_lm}\NormalTok{()} \end{Highlighting} \end{Shaded} \begin{table}[ht] \centering \begin{tabular}{rrrrr} \hline & Estimate & Std. Error & t value & Pr($>$$|$t$|$) \\ \hline (Intercept) & 19721.0125 & 706.8204 & 27.90 & 0.0000 \\ mileage & -0.1075 & 0.0135 & -7.96 & 0.0000 \\ \hline \end{tabular} \end{table} The students then edit the RMarkdown file to interpret their results based on the model and the graphical displays. For the Dallas group, the summary output of the model in the table suggests that for every mile driven, the car's predicted value (determined by price) will decrease on average by about eleven cents. Common errors that students experience include issues with formatting (e.g., if they included dollar signs in the column for price) or problems where they used different variable names than specified in the assignment. To obtain credit for the first part of the assignment, students must: \begin{enumerate} \def\arabic{enumi}){\arabic{enumi})} \tightlist \item post the formatted file to RPubs (to allow a brief discussion of student findings and interpretations) \item email the csv file to the instructor \end{enumerate} \hypertarget{activity-class-two}{ \subsection{Activity: Class Two}\label{activity-class-two}} Prior to the next class period, the instructor collates the data from each group (in csv files) to create graphical displays, multiple regression models, and interpretations from the data from all of the cities. These results can be referenced as part of a future class discussion. The collation process will identify issues (e.g., inconsistent formatting or variable naming) in the individual datasets, which also provide an opportunity for discussion. Figure 4 displays the scatterplot visualizing the relationship between the price and mileage, where an interaction is included between the mileage and (categorical) model year, using data scraped from all of the cities (n = 830). \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(mosaic)} \KeywordTok{tally}\NormalTok{(}\OperatorTok{~}\StringTok{ }\NormalTok{location, }\DataTypeTok{data =}\NormalTok{ ds)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## location ## 40202 Atlanta Bangor, ME Baton Rouge Boston ## 40 40 40 40 40 ## Buffalo Chicago Cleveland Dallas Los Angeles ## 33 41 26 41 40 ## Minneapolis New Orleans NYC Phoenix Portland ## 59 33 40 39 40 ## Richmond Salt Lake City San Diego San Francisco Seattle ## 40 33 39 39 39 ## Tampa ## 40 \end{verbatim} We note that one group has included the zip code (needed to specify location in cars.com) instead of the city name. Also note that some groups only scraped 33 or 39 cars (to keep the class together on day one data scraping was cut off after a certain amount of time). \begin{figure} \caption{Price versus mileage stratified by year (n = 830)} \end{figure} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{gf_point}\NormalTok{(price }\OperatorTok{~}\StringTok{ }\NormalTok{mileage, }\DataTypeTok{color =} \OperatorTok{~}\StringTok{ }\NormalTok{year, }\DataTypeTok{data =}\NormalTok{ ds) }\OperatorTok{ \StringTok{ }\KeywordTok{gf_lm}\NormalTok{() }\OperatorTok{ \StringTok{ }\KeywordTok{gf_labs}\NormalTok{(}\DataTypeTok{y =} \StringTok{"price (US $)"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{table}[ht] \centering \begin{tabular}{rrrrr} \hline & Estimate & Std. Error & t value & Pr($>$$|$t$|$) \\ \hline (Intercept) & 17061.0694 & 868.8562 & 19.64 & 0.0000 \\ locationAtlanta & -1638.4149 & 462.5576 & -3.54 & 0.0004 \\ locationBangor, ME & -1689.6974 & 463.9047 & -3.64 & 0.0003 \\ locationBaton Rouge & -745.2125 & 474.3208 & -1.57 & 0.1166 \\ locationBoston & -563.6481 & 460.0693 & -1.23 & 0.2209 \\ locationBuffalo & -581.6074 & 484.2352 & -1.20 & 0.2301 \\ locationChicago & -2237.4990 & 456.4975 & -4.90 & 0.0000 \\ locationCleveland & -1491.5866 & 520.8768 & -2.86 & 0.0043 \\ locationDallas & -1078.1113 & 462.0475 & -2.33 & 0.0199 \\ locationLos Angeles & 2319.6793 & 460.0475 & 5.04 & 0.0000 \\ locationMinneapolis & -622.8958 & 423.7223 & -1.47 & 0.1419 \\ locationNew Orleans & -573.2974 & 498.8439 & -1.15 & 0.2508 \\ locationNYC & -594.5619 & 458.8934 & -1.30 & 0.1955 \\ locationPhoenix & -325.9632 & 463.8124 & -0.70 & 0.4824 \\ locationPortland & 65.2454 & 461.6683 & 0.14 & 0.8876 \\ locationRichmond & -744.3217 & 461.1860 & -1.61 & 0.1069 \\ locationSalt Lake City & -1954.0469 & 494.6800 & -3.95 & 0.0001 \\ locationSan Diego & 257.6979 & 461.9773 & 0.56 & 0.5771 \\ locationSan Francisco & 1578.2819 & 461.3929 & 3.42 & 0.0007 \\ locationSeattle & 2136.5419 & 463.0608 & 4.61 & 0.0000 \\ locationTampa & -2152.2974 & 462.1671 & -4.66 & 0.0000 \\ mileage & -0.0606 & 0.0095 & -6.38 & 0.0000 \\ year2012 & -251.3108 & 1135.1085 & -0.22 & 0.8248 \\ year2013 & 3237.2317 & 894.6854 & 3.62 & 0.0003 \\ year2014 & 3140.1907 & 888.3434 & 3.53 & 0.0004 \\ year2015 & 3252.5139 & 885.3063 & 3.67 & 0.0003 \\ year2016 & 8208.6105 & 874.4768 & 9.39 & 0.0000 \\ mileage:year2012 & 0.0171 & 0.0144 & 1.19 & 0.2345 \\ mileage:year2013 & -0.0180 & 0.0121 & -1.48 & 0.1394 \\ mileage:year2014 & -0.0034 & 0.0140 & -0.25 & 0.8060 \\ mileage:year2015 & -0.0099 & 0.0139 & -0.71 & 0.4778 \\ mileage:year2016 & -0.1819 & 0.0275 & -6.60 & 0.0000 \\ \hline \end{tabular} \end{table} The multiple regression output describes the relationship between the price based on location, mileage, year, and the interaction between mileage and year. This is a relatively sophisticated model, with 32 predictors. Example interpretations of this model are included below: LOCATION: After controlling for mileage and year, prices for a Toyota Prius in Boston are predicted to be \$564 less than in Louisville, Kentucky (the reference group). (Note the reference group is the first group in the data set, which by R's default is alphabetically. Here, it is Louisville, Kentucky as one group entered location as a zip code, 40202, rather than by name.) MILEAGE: Holding location constant, the predicted price of a Prius decreases on average by about six cents for an additional mile for Priuses of the model. INTERACTION: The interaction of mileage and year is more complicated to interpret, since it includes five regression coefficients. We would predict an additional average decrease in value of about eighteen cents per mile driven for 2016 models compared with 2011 models, after accounting for location. This is a great example of the \emph{new car effect}: there is a much higher rate of depreciation in value of newer cars in comparison to older models. Other aspects of the model lend themselves to discussion. There are two outliers (both from the same group) with very low prices. These are likely prices that were entered incorrectly. In addition, the functional form of the relationship between price and mileage (conditional on year) is not very linear (though the regression model is assuming linear relationships). We consider these as part of possible extensions of the activity. \hypertarget{extensions}{ \subsection{Extensions}\label{extensions}} In terms of introductory statistics, this activity works to develop students ability to undertake the entire data analysis cycle. They collect data by scraping information (by hand) from a website, then loading this into RStudio.\\ With the data set, students can practice interpreting interaction terms in the model. This practice will prove beneficial to students as data sets (and models) become increasingly complex in future statistics courses. In the model produced in Figure 4, two outliers are observed. The two points can be found in the data set by searching for Toyota Priuses priced well below the average. Both data points indicate a pricing at \$2,500 from Chicago, with one 2014 model and one 2015 model, and both of the same model type (four). The 2014 model has a mileage of 17,152 wherein the average price for a used car of similar mileage in Chicago is around \$15,550 and the 2015 model (with current mileage of 21,027) would be priced around \$16,000, according to the model. It appears that the large discrepancy between the price and mileage (well under the average predicted price by \$13,000) could be due to input error, such as a missing zero at the end of the value. Students should note these outliers and decide from inference whether or not to include them in the final model. We have introduced this activity early in the course so have not focused much on the functional form of the relationship between price and mileage (beyond noting that the relationship is not very linear, see Figure 5). Consideration of more flexible regression models could be undertaken to better reflect the underlying relationships. \begin{figure} \caption{Price versus mileage for 2013 vehicles (with superimposed smoother)} \end{figure} While students included additional information in their spreadsheets regarding trim models or add-on packages for the cars, this was not incorporated into the modeling. Additional data wrangling would be needed to bring this into the model as an additional predictor given the inconsistent and idiosyncratic ways that such information is made available by sellers in \emph{cars.com}. Potential pitfalls include that the predictions made from the linear models reflect only the cars in the data set and are not completely representative of all car prices and locations. The models produced also do not reflect consumer habits in its entirety as the data gathered only demonstrates cars that are for sale and not necessarily sales price: negotiation is important in determining sales price! Aspects of these biases and data limitations could form the basis of a discussion of design. \hypertarget{conclusions}{ \subsection{Conclusions}\label{conclusions}} This activity is intended to reinforce critical aspects outlined by the GAISE report, including teamwork, problem solving, and the use of data to make decisions. This activity encourages multivariate thinking through application facilitated by technology. The discovery of the \emph{new car effect} is not obvious in a bivariate analysis. Additional concepts such as data ingestion, regression modeling, and graphical visualizations are among the other key learning outcomes. Students are given the opportunity to gather data by hand and build models to extract meaningful inferences. The learning objectives of the cars.com activity permeate through other spheres of consumer habits and students gain independence in their ability to make the best consumer decisions. Financial literacy is an important capacity for students to develop. This activity may help prepare students to make better decisions when buying a car. A focus on conceptual understanding, integration of real data with a context and purpose, and a fostering of active learning are also critical to students' comprehension. The usage of technology to explore concepts and and analyze data, and assessments to improve and evaluate student learning are additional goals of this activity. \hypertarget{further-reading}{ \subsection{Further Reading}\label{further-reading}} GAISE College Report ASA Revision Committee, \emph{Guidelines for Assessment and Instruction in Statistics Education College Report 2016,} \url{http://www.amstat.org/education/gaise}. National Academies of Sciences, Engineering, and Medicine. 2018. \emph{Data Science for Undergraduates: Opportunities and Options.} Washington, DC: The National Academies Press. \url{https://doi.org/10.17226/25104}, \url{https://nas.edu/envisioningds} Ben Baumer, Mine Cetinkaya-Rundel, Andrew Bray, Linda Loi, \& Nicholas J. Horton (2014). RMarkdown: Integrating A Reproducible Analysis Tool into Introductory Statistics. Technology Innovations in Statistics Education, 8(1). Retrieved from \url{https://escholarship.org/uc/item/90b2f5xh} Randall Pruim, Daniel T. Kaplan, and Nicholas J. Horton. ``The Mosaic Package: Helping Students to Think with Data Using R.'' R, June 2017, journal.r-project.org/archive/2017/RJ-2017-024/. Nicholas J. Horton, Benjamin S. Baumer, \& Hadley Wickham (2015) Taking a Chance in the Classroom: Setting the Stage for Data Science: Integration of Data Management Skills in Introductory and Second Courses in Statistics, CHANCE, 28:2, 40-50, DOI: 10.1080/09332480.2015.1042739 \hypertarget{biographies}{ \subsection{Biographies}\label{biographies}} Sarah McDonald is a student at Amherst College, majoring in Statistics. Her areas of interest include applications of statistical analysis in consumer purchasing and behavioral habits. Her undergraduate research involves studying effective ways to integrate and facilitate computation in introductory statistics courses. Nicholas J. Horton is Beitzel Professor of Technology and Society and Professor of Statistics and Data Science at Amherst College, with interests in longitudinal regression, missing data methods, statistical computing, and statistical education. He received his doctorate in biostatistics from the Harvard School of Public Health in 1999, and has co-authored a series of books on statistical computing and data science. \end{document}
\begin{document} \renewcommand{References}{References} \thispagestyle{empty} \title[Expansion of Iterated Stratonovich Stochastic Integrals of Multiplicity 2] {Expansion of Iterated Stratonovich Stochastic Integrals of Multiplicity 2. Combined Approach Based on Generalized Multiple and Iterated Fourier Series} \author[D.F. Kuznetsov]{Dmitriy F. Kuznetsov} \address{Dmitriy Feliksovich Kuznetsov \newline\hphantom{iii} Peter the Great Saint-Petersburg Polytechnic University, \newline\hphantom{iii} Polytechnicheskaya ul., 29, \newline\hphantom{iii} 195251, Saint-Petersburg, Russia} \email{sde\[email protected]} \thanks{\sc Mathematics Subject Classification: 60H05, 60H10, 42B05, 42C10} \thanks{\sc Keywords: Iterated Stratonovich stochastic integral, Iterated Ito stochastic integral, Generalized multiple Fourier series, Generalized iterated Fourier series, Legendre polynomial, Trigonometric functions, Mean-square approximation, Expansion.} \maketitle {\small \begin{quote} \noindent{\sc Abstract.} The article is devoted to the expansion of iterated Stratonovich stochastic integrals of multiplicity 2 on the base of the combined approach of generalized multiple and iterated Fourier series. We consider two different parts of the expansion of iterated Stra\-to\-no\-vich stochastic integrals. The mean-square convergence of the first part is proved on the base of generalized multiple Fourier series converging in the sense of norm in Hilbert space $L_2([t, T]^2).$ The mean-square convergence of the second part is proved on the base of generalized iterated (double) Fourier series converging pointwise. At that, we prove the iterated limit transition for the second part of the expansion on the base of Lebesgue's Dominated Convergence Theorem. The results of the article can be applied to the numerical integration of Ito stochastic differential equations. \setlength{\baselineskip}{2.0em} \tableofcontents \setlength{\baselineskip}{1.2em} \end{quote} } \section{Introduction} Let $(\Omega,$ ${\rm F},$ ${\sf P})$ be a complete probability space, let $\{{\rm F}_t, t\in[0,T]\}$ be a nondecreasing right-continous family of $\sigma$-algebras of ${\rm F},$ and let ${\bf f}_t$ be a standard $m$-dimensional Wiener stochastic process, which is ${\rm F}_t$-measurable for any $t\in[0, T].$ We assume that the components ${\bf f}_{t}^{(i)}$ $(i=1,\ldots,m)$ of this process are independent. Let us consider the following collections of iterated Stratonovich and Ito stochastic integrals \begin{equation} \label{str} J^{*}[\psi^{(2)}]_{T,t}= \int\limits_t^{*T}\psi_2(t_2)\int\limits_t^{*t_{2}} \psi_1(t_1) d{\bf w}_{t_1}^{(i_1)}d{\bf w}_{t_2}^{(i_2)}, \end{equation} \begin{equation} \label{ito} J[\psi^{(2)}]_{T,t}=\int\limits_t^T\psi_2(t_2)\int\limits_t^{t_{2}} \psi_1(t_1) d{\bf w}_{t_1}^{(i_1)} d{\bf w}_{t_2}^{(i_2)}, \end{equation} \noindent where every $\psi_l(\tau)$ $(l=1,\ 2)$ is a nonrandom function at the interval $[t,T],$ ${\bf w}_{\tau}^{(i)}={\bf f}_{\tau}^{(i)}$ for $i=1,\ldots,m$ and ${\bf w}_{\tau}^{(0)}=\tau,$\ \ $i_1,\ldots,i_k = 0,\ 1,\ldots,m,$ $$ \int\limits^{*}\ \hbox{and}\ \int\limits $$ \noindent denote Stratonovich and Ito stochastic integrals, respectively (in this paper, we use the definition of the Stratonovich stochastic integral from \cite{KlPl2}). Further, we will denote as $\{\phi_j(x)\}_{j=0}^{\infty}$ the complete orthonormal systems of Legendre polynomials and trigonometric functions in the space $L_2([t, T])$. Also we will pay a special attention on the following well-known facts connecting to these two systems of functions \cite{Gob}. {\it Suppose that the function $f(x)$ is bounded at the interval $[t, T].$ Moreover, its derivative $f'(x)$ is a continuous function at the interval $[t, T]$ except may be the finite number of points of the finite discontinuity. Then the Fourier series $$ \sum\limits_{j=0}^{\infty} C_j\phi_j(x),\ \ \ C_j=\int\limits_t^{T}f(x)\phi_j(x)dx $$ \noindent converges at any internal point $x$ of the interval $[t, T]$ to the value $\left(f(x+0)+f(x-0)\right)/2$ and converges uniformly to $f(x)$ on any closed interval of continuity of the function $f(x)$ laying inside $[t, T]$. At the same time, the Fourier--Legendre series converges if $x=t$ and $x=T$ to $f(t+0)$ and $f(T-0)$ correspondently, and the trigonometric Fourier series converges if $x=t$ and $x=T$ to $\left(f(t+0)+f(T-0)\right)/2$ in the case of periodic continuation of the function $f(x)$.} \section{Expansion of Iterated Stratonovich Stochastic Integrals of Multiplicity 2} The use of generalized multiple and iterated Fourier series by various complete orthonormal systems of functions in the space $L_2([t, T])$ for the expansion of iterated Ito and Stratonovich stochastic integrals is reflected in a number of works of the author \cite{2013}-\cite{01}. In these papers, several new approaches to the mean-square approximation of iterated stochastic integrals were proposed and developed. One of the mentioned approaches (the so-called combined approach) for the expansion of iterated Stratonovich stochastic integrals of multiplicities 1 to 4 based on generalized multiple and iterated Fourier series has been considered in \cite{2013}-\cite{2017-1xxyz}. In this article, we consider the case of second multiplicity of iterated Stratonovich stochastic integrals. At that, we prove the mean-square convergence of the expansion of iterated Stratonovich stochastic integrals using the another method in comparison with the method from \cite{2013}, \cite{2013a}. {\bf Theorem 1}\ \cite{2013}-\cite{2017-1xxyz}. {\it Suppose that the following conditions are fulfilled{\rm :} {\rm 1}. Every $\psi_l(\tau)$ $(l=1,\ 2)$ is a continuously differentiable nonrandom function at the interval $[t, T]$. {\rm 2}. $\{\phi_j(x)\}_{j=0}^{\infty}$ is the complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T])$. Then the iterated Stratonovich stochastic integral of the second multiplicity $$ J^{*}[\psi^{(2)}]_{T,t}={\int\limits_t^{*}}^T\psi_2(t_2) {\int\limits_t^{*}}^{t_2}\psi_1(t_1)d{\bf w}_{t_1}^{(i_1)} d{\bf w}_{t_2}^{(i_2)}\ \ \ (i_1, i_2=0, 1,\ldots,m) $$ \noindent is expanded into the multiple series $$ J^{*}[\psi^{(2)}]_{T,t}= \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p_1, p_2\to \infty}}$\cr }} } \sum\limits_{j_1=0}^{p_1} \sum\limits_{j_2=0}^{p_2} C_{j_2j_1}\zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)} $$ \noindent that converges in the mean-square sense, where {\rm l.i.m.} is a limit in the mean-square sense, $$ \zeta_{j}^{(i)}= \int\limits_t^T \phi_{j}(\tau) d{\bf w}_{\tau}^{(i)} $$ \noindent are independent standard Gaussian random variables for various $i$ or $j$ {\rm(}if $i\ne 0${\rm)}, $$ C_{j_2 j_1}= \int\limits_t^T \psi_2(t_2)\phi_{j_2}(t_2)\int\limits_t^{t_2} \psi_1(t_1)\phi_{j_1}(t_1)dt_1 dt_2 $$ \noindent is the Fourier coefficient.} {\bf Remark 1.}\ {\it It should be noted that Theorem {\rm 1} is proved in \cite{2017-1}, \cite{200}, and \cite{xxxxx} {\rm (}also see \cite{2017-1xx}-\cite{2017-1xxyz}{\rm )} by three different ways. The proof from \cite{2017-1} is based on double integration by parts while the proof from \cite{200} is based on the generalized multiple {\rm (}double{\rm )} Fourier series summarized by Pringsheim method in the square $[t, T]^2.$ At the same time, the generalized iterated {\rm (}double{\rm )} Fourier series have been applied in the proof from \cite{xxxxx}. Note that the double Fourier--Legendre series and double trigonometric Fourier series have been used in \cite{2017-1}, \cite{200}, and \cite{xxxxx} (also see \cite{2017-1xx}-\cite{2017-1xxyz}). Below we consider the fourth proof of Theorem {\rm 1}, which is simpler than the proofs from \cite{2017-1}, \cite{200}.} {\bf Proof.} Let us consider some auxiliary lemmas from \cite{2013} (also see \cite{2017}-\cite{14}, \cite{2006}-\cite{26a}). At that, we will consider the particular case of these lemmas for $k=2.$ Consider the partition $\{\tau_j\}_{j=0}^N$ of the interval $[t,T]$ such that \begin{equation} \label{1111} t=\tau_0<\ldots <\tau_N=T,\ \ \ \Delta_N= \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm max}\cr $\stackrel{}{{}_{0\le j\le N-1}}$\cr }} }\Delta\tau_j\to 0\ \ \hbox{if}\ \ N\to \infty,\ \ \ \Delta\tau_j=\tau_{j+1}-\tau_j. \end{equation} {\bf Lemma 1}\ \cite{2013} (also see \cite{2017}-\cite{14}, \cite{2006}-\cite{26a}). {\it Suppose that every $\psi_l(\tau)$ $(l=1,\ 2)$ is a continuous nonrandom function at the interval $[t, T]$. Then \begin{equation} \label{30.30} J[\psi^{(2)}]_{T,t}= \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{N\to \infty}}$\cr }} } \sum_{j_2=0}^{N-1}\sum_{j_1=0}^{j_{2}-1} \psi_1(\tau_{j_1})\psi_2(\tau_{j_2}) \Delta{\bf w}_{\tau_{j_1}}^{(i_1)}\Delta{\bf w}_{\tau_{j_2}}^{(i_2)}\ \ \ \hbox{\rm w. p. 1}, \end{equation} \noindent where $J[\psi^{(2)}]_{T,t}$ is the stochastic integral {\rm (\ref{ito}),} $\Delta{\bf w}_{\tau_{j}}^{(i)}= {\bf w}_{\tau_{j+1}}^{(i)}-{\bf w}_{\tau_{j}}^{(i)}$ $(i=0, 1,\ldots,m)$, $\left\{\tau_{j}\right\}_{j=0}^{N}$ is the partition of the interval $[t,T]$ satisfying the condition {\rm (\ref{1111});} hereinafter w.~p.~{\rm 1} means with probability {\rm 1}. } Let us define the following multiple stochastic integral \begin{equation} \label{30.34} \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{N\to \infty}}$\cr }} }\sum_{j_1,j_2=0}^{N-1} \Phi\left(\tau_{j_1},\tau_{j_2}\right) \Delta{\bf w}_{\tau_{j_1}}^{(i_1)}\Delta{\bf w}_{\tau_{j_2}}^{(i_2)} \stackrel{\rm def}{=}J[\Phi]_{T,t}^{(2)}, \end{equation} \noindent where $\Phi(t_1,t_2):\ [t, T]^2\to\mathbb{R}$ is a nonrandom function (the properties of this function will be specified further), $\Delta{\bf w}_{\tau_{j}}^{(i)}= {\bf w}_{\tau_{j+1}}^{(i)}-{\bf w}_{\tau_{j}}^{(i)}$ $(i=0, 1,\ldots,m)$, $\left\{\tau_{j}\right\}_{j=0}^{N}$ is the partition of the interval $[t,T]$ satisfying the condition {\rm (\ref{1111})}. Denote \begin{equation} \label{dom1} D_2=\{(t_1,t_2):\ t\le t_1<t_2\le T\}. \end{equation} We will use the same symbol $D_2$ to denote the open and closed domains corresponding to the domain $D_2$ defined by (\ref{dom1}). However, we always specify what domain we consider (open or closed). Also we will write $\Phi(t_1,t_2)\in C(D_2)$ if $\Phi(t_1,t_2)$ is a continuous nonrandom function of two variables in the closed domain $D_2$. Let us consider the iterated Ito stochastic integral $$ I[\Phi]_{T,t}^{(2)}\stackrel{\rm def}{=} \int\limits_t^T\int\limits_t^{t_2} \Phi(t_1,t_2)d{\bf w}_{t_1}^{(i_1)}d{\bf w}_{t_2}^{(i_2)}, $$ \noindent where $\Phi(t_1,t_2)\in C(D_2).$ {\bf Lemma 2}\ \cite{2013} (also see \cite{2017}-\cite{14}, \cite{2006}-\cite{26a}). {\it Suppose that $\Phi(t_1,t_2)\in C(D_2)$ or $\Phi(t_1,t_2)$ is a continuous nonrandom function in the open domain $D_2$ and bounded at its boundary. Then \begin{equation} \label{30.52} I[\Phi]_{T,t}^{(2)}=\hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{N\to \infty}}$\cr }} } \sum_{j_k=0}^{N-1} \sum_{j_1=0}^{j_{2}-1} \Phi(\tau_{j_1},\tau_{j_2}) \Delta {\bf w}_{\tau_{j_1}}^{(i_1)}\Delta {\bf w}_{\tau_{j_2}}^{(i_2)}\ \ \ \hbox{{\rm w. p. 1}}, \end{equation} \noindent where $\Delta{\bf w}_{\tau_{j}}^{(i)}= {\bf w}_{\tau_{j+1}}^{(i)}-{\bf w}_{\tau_{j}}^{(i)}$ $(i=0, 1,\ldots,m)$, $\left\{\tau_{j}\right\}_{j=0}^{N}$ is the partition of the interval $[t,T]$ satisfying the condition {\rm (\ref{1111})}. } {\bf Lemma 3}\ \cite{2013} (also see \cite{2017}-\cite{14}, \cite{2006}-\cite{26a}). {\it Suppose that every $\varphi_l(\tau)$ $(l=1,\ 2)$ is a continuous nonrandom function at the interval $[t, T]$. Then \begin{equation} \label{30.39} J[\varphi_1]_{T,t}J[\varphi_2]_{T,t}=J[\Phi]_{T,t}^{(2)}\ \ \ \hbox{w. p. {\rm 1}}, \end{equation} \noindent where $$ \Phi(t_1,t_2)=\varphi_1(t_1)\varphi_2(t_2),\ \ \ \ J[\varphi_l]_{T,t} =\int\limits_t^T \varphi_l(\tau) d{\bf w}_{\tau}^{(i_l)}\ \ \ (l=1,\ 2) $$ \noindent and the stochastic integral $J[\Phi]_{T,t}^{(2)}$ is defined by the equality {\rm (\ref{30.34}),}\ $i_1, i_2=0, 1,\ldots,m.$ } In accordance to the standard relations between Stratonovich and Ito stochastic integrals we have w.~p.~1 \cite{KlPl2} \begin{equation} \label{oop51} J^{*}[\psi^{(2)}]_{T,t}= J[\psi^{(2)}]_{T,t}+ \frac{1}{2}{\bf 1}_{\{i_1=i_2\ne 0\}} \int\limits_t^T\psi_1(t_1)\psi_2(t_1)dt_1, \end{equation} \noindent where ${\bf 1}_A$ is the indicator of the set $A$. Let us define the function $K^{*}(t_1,t_2)$ at the square $[t,T]^2$ as follows \begin{equation} \label{1999.1} K^{*}(t_1,t_2)=\psi_1(t_1)\psi_2(t_2) \Biggl({\bf 1}_{\{t_1<t_{2}\}}+ \frac{1}{2}{\bf 1}_{\{t_1=t_{2}\}}\Biggr) =K(t_1,t_2)+\frac{1}{2}{\bf 1}_{\{t_1=t_{2}\}}\psi_1(t_1)\psi_2(t_2), \end{equation} \noindent where $$ K(t_1,t_2)= \begin{cases} \psi_1(t_1)\psi_2(t_2),\ &t_1<t_2\cr\cr 0,\ &\hbox{\rm otherwise} \end{cases},\ \ \ t_1, t_2\in[t, T] $$ \noindent and ${\bf 1}_A$ is the indicator of the set $A$. {\bf Lemma 4}\ \cite{2013}, \cite{2017}-\cite{14}, \cite{xxxxx}. {\it Under the conditions of Theorem {\rm 1} the following relation \begin{equation} \label{30.36} J[{K^{*}}]_{T,t}^{(2)}= J^{*}[\psi^{(2)}]_{T,t} \end{equation} \noindent is valid w.~p.~{\rm 1}, where $J[{K^{*}}]_{T,t}^{(2)}$ is defined by the equality {\rm (\ref{30.34})}.} {\bf Proof.} Substituting (\ref{1999.1}) into (\ref{30.34}) and using Lemmas 1 and 2, it is easy to see that \begin{equation} \label{30.37} J[{K^{*}}]_{T,t}^{(2)} = J[\psi^{(2)}]_{T,t}+ \frac{1}{2}{\bf 1}_{\{i_1=i_2\ne 0\}} \int\limits_t^T\psi_1(t_1)\psi_2(t_1)dt_1 =J^{*}[\psi^{(2)}]_{T,t}\ \ \ \hbox{w.\ p.\ 1}. \end{equation} Let us consider the following generalized double Fourier sum $$ \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2j_1}\phi_{j_1}(t_1)\phi_{j_2}(t_2), $$ \noindent where $C_{j_2j_1}$ is the Fourier coefficient of the form \begin{equation} \label{1} C_{j_2j_1}=\int\limits_{[t,T]^2} K^{*}(t_1,t_2)\phi_{j_1}(t_1)\phi_{j_2}(t_2)dt_1dt_2. \end{equation} Substitute the relation $$ K^{*}(t_1,t_2)= \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2j_1}\phi_{j_1}(t_1)\phi_{j_2}(t_2)+ K^{*}(t_1,t_2) -\sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2j_1}\phi_{j_1}(t_1)\phi_{j_2}(t_2) $$ \noindent with finite $p_1$ and $p_2$ into $J[{K^{*}}]_{T,t}^{(2)}.$ Then, using Lemma 3, we obtain \begin{equation} \label{proof1} J^{*}[\psi^{(2)}]_{T,t}= \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2j_1} \zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}+ J[R_{p_1p_2}]_{T,t}^{(2)}\ \ \ \hbox{w. p. {\rm 1}}, \end{equation} \noindent where the stochastic integral $J[R_{p_1p_2}]_{T,t}^{(2)}$ is defined in accordance with (\ref{30.34}) and \begin{equation} \label{30.46} R_{p_1p_2}(t_1,t_2)= K^{*}(t_1,t_2)- \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2j_1}\phi_{j_1}(t_1)\phi_{j_2}(t_2), \end{equation} $$ \zeta_{j}^{(i)}=\int\limits_t^T \phi_{j}(\tau) d{\bf w}_{\tau}^{(i)}, $$ $$ J[R_{p_1p_2}]_{T,t}^{(2)}=\int\limits_t^T\int\limits_t^{t_2} R_{p_1p_2}(t_1,t_2)d{\bf w}_{t_1}^{(i_1)}d{\bf w}_{t_2}^{(i_2)} +\int\limits_t^T\int\limits_t^{t_1} R_{p_1p_2}(t_1,t_2)d{\bf w}_{t_2}^{(i_2)}d{\bf w}_{t_1}^{(i_1)}+ $$ $$ +{\bf 1}_{\{i_1=i_2\ne 0\}} \int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1. $$ Let us consider the case $i_1, i_2\ne 0$ (another cases can be considered absolutely analogously). Using standard estimates for moments of stochastic integrals \cite{1}, we obtain $$ {\sf M}\left\{\left(J[R_{p_1p_2}]_{T,t}^{(2)}\right)^{2} \right\}= $$ $$ ={\sf M}\left\{\left(\int\limits_t^T\int\limits_t^{t_2} R_{p_1p_2}(t_1,t_2)d{\bf w}_{t_1}^{(i_1)}d{\bf w}_{t_2}^{(i_2)} +\int\limits_t^T\int\limits_t^{t_1} R_{p_1p_2}(t_1,t_2)d{\bf w}_{t_2}^{(i_2)}d{\bf w}_{t_1}^{(i_1)} \right)^2\right\}+ $$ $$ +{\bf 1}_{\{i_1=i_2\ne 0\}} \left(\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1\right)^2\le $$ $$ \le 2\left(\int\limits_t^T\int\limits_t^{t_2} \left(R_{p_1p_2}(t_1,t_2)\right)^{2}dt_1 dt_2 + \int\limits_t^T\int\limits_t^{t_1} \left(R_{p_1p_2}(t_1,t_2)\right)^{2}dt_2 dt_1\right)+ $$ $$ + {\bf 1}_{\{i_1=i_2\ne 0\}} \left(\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1\right)^2= $$ \begin{equation} \label{newbegin1} = 2\int\limits_{[t, T]^2} \left(R_{p_1p_2}(t_1,t_2)\right)^{2}dt_1 dt_2 + {\bf 1}_{\{i_1=i_2\ne 0\}} \left(\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1\right)^2. \end{equation} We have $$ \int\limits_{[t, T]^2} \left(R_{p_1p_2}(t_1,t_2)\right)^{2}dt_1 dt_2= $$ $$ = \int\limits_{[t, T]^2} \Biggl( K^{*}(t_1,t_2)- \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2}C_{j_2 j_1} \phi_{j_1}(t_1)\phi_{j_2}(t_2)\Biggr)^2 dt_1 dt_2= $$ $$ =\int\limits_{[t, T]^2} \Biggl( K(t_1,t_2)- \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2}C_{j_2 j_1} \phi_{j_1}(t_1)\phi_{j_2}(t_2)\Biggr)^2 dt_1 dt_2. $$ The function $K(t_1,t_2)$ is piecewise continuous in the square $[t, T]^2$. At this situation it is well-known that the generalized multiple Fourier series of the function $K(t_1,t_2)\in L_2([t, T]^2)$ is converging to this function in the square $[t, T]^2$ in the mean-square sense, i.e. $$ \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm lim}\cr $\stackrel{}{{}_{p_1,p_2\to \infty}}$\cr }} }\Biggl\Vert K(t_1,t_2)- \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2} C_{j_2 j_1}\prod_{l=1}^{2} \phi_{j_l}(t_l)\Biggr\Vert_{L_2([t,T]^2)}=0, $$ \noindent where $$ \left\Vert f\right\Vert_{L_2([t,T]^2)}=\left(\int\limits_{[t,T]^2} f^2(t_1,t_2)dt_1dt_2\right)^{1/2}. $$ So, we obtain \begin{equation} \label{newbegin2} \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm lim}\cr $\stackrel{}{{}_{p_1,p_2\to \infty}}$\cr }} } \int\limits_{[t, T]^2} \left(R_{p_1p_2}(t_1,t_2)\right)^{2}dt_1 dt_2=0. \end{equation} Note that $$ \int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1= $$ $$ = \int\limits_t^T \left( \frac{1}{2}\psi_1(t_1)\psi_2(t_1) - \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2}C_{j_2 j_1} \phi_{j_1}(t_1)\phi_{j_2}(t_1)\right) dt_1= $$ $$ = \frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 - \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2}C_{j_2 j_1} \int\limits_t^T\phi_{j_1}(t_1)\phi_{j_2}(t_1)dt_1= $$ $$ = \frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 - \sum_{j_1=0}^{p_1}\sum_{j_2=0}^{p_2}C_{j_2 j_1} {\bf 1}_{\{j_1=j_2\}}= $$ \begin{equation} \label{newbegin3} = \frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 - \sum_{j_1=0}^{{\rm min}\{p_1,p_2\}}C_{j_1 j_1}. \end{equation} From (\ref{newbegin3}) we obtain \begin{equation} \label{dds1} \lim\limits_{p_1\to\infty} \lim\limits_{p_2\to\infty}\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1 = \lim\limits_{p_1\to\infty} \varlimsup\limits_{p_2\to\infty}\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1= \end{equation} $$ =\frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 - \lim\limits_{p_1\to\infty} \sum_{j_1=0}^{p_1}C_{j_1 j_1}= $$ $$ =\frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 - \sum_{j_1=0}^{\infty}C_{j_1 j_1} = $$ \begin{equation} \label{s1} =\lim\limits_{p_1,p_2\to\infty}\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1, \end{equation} \noindent where $\varlimsup$ means ${\rm lim\ sup}.$ Note that the existence of the limit $$ \lim\limits_{p_1\to\infty} \sum_{j_1=0}^{p_1}C_{j_1 j_1} $$ \noindent is proved in \cite{2017-1xx}-\cite{2017-1xxyz} (Sect.~2.1.2) for the polynomial and trigonometric cases. If we prove the following relation \begin{equation} \label{s11} \lim\limits_{p_1\to\infty} \lim\limits_{p_2\to\infty}\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1=0, \end{equation} \noindent then from (\ref{s1}) we get \begin{equation} \label{44} \frac{1}{2}\int\limits_t^T \psi_1(t_1)\psi_2(t_1)dt_1 = \sum_{j_1=0}^{\infty}C_{j_1 j_1}, \end{equation} \begin{equation} \label{444} \lim\limits_{p_1,p_2\to\infty}\int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1=0. \end{equation} From (\ref{newbegin1}), (\ref{newbegin2}), and (\ref{444}) we obtain $$ \lim\limits_{p_1,p_2\to\infty} {\sf M}\left\{\left(J[R_{p_1p_2}]_{T,t}^{(2)}\right)^{2} \right\}=0 $$ \noindent and Theorem 1 will be proved. We have proved the equality (\ref{44}) in \cite{2017} (Theorem 3, p.~A.59), \cite{2017-1} (Theorem 5.3, p.~A.294) or \cite{200}, \cite{201} (also see \cite{2017-1xx}-\cite{2017-1xxyz}). However, here we consider another and more simple proof of the relation (\ref{44}). Let us expand the function $K^{*}(t_1,t_2)$ (see (\ref{1999.1})) using the variable $t_1$, when $t_2$ is fixed, into the generalized Fourier series at the interval $(t, T)$ \begin{equation} \label{leto8001yes1} K^{*}(t_1,t_2)= \sum_{j_1=0}^{\infty}C_{j_1}(t_2)\phi_{j_1}(t_1)\ \ \ (t_1\ne t, T), \end{equation} \noindent where $$ C_{j_1}(t_2)=\int\limits_t^T K^{*}(t_1,t_2)\phi_{j_1}(t_1)dt_1=\psi_2(t_2) \int\limits_t^{t_2}\psi_1(t_1)\phi_{j_1}(t_1)dt_1. $$ The equality (\ref{leto8001yes1}) is satisfied pointwise in each point of the interval $(t, T)$ with respect to the variable $t_1$, when $t_2\in [t, T]$ is fixed, due to a piecewise smoothness of the function $K^{*}(t_1,t_2)$ with respect to the variable $t_1\in [t, T]$ ($t_2$ is fixed). Note also that due to well-known properties of the Fourier--Legendre series and trigonometric Fourier series, the series (\ref{leto8001yes1}) converges when $t_1=t$ and $t_1=T$. Obtaining (\ref{leto8001yes1}), we also used the fact that the right-hand side of (\ref{leto8001yes1}) converges when $t_1=t_2$ (point of a finite discontinuity of the function $K(t_1,t_2)$) to the value $$ \frac{1}{2}\left(K(t_2-0,t_2)+K(t_2+0,t_2)\right)= \frac{1}{2}\psi_1(t_2)\psi_2(t_2)= K^{*}(t_2,t_2). $$ The function $C_{j_1}(t_2)$ is a continuously differentiable one at the interval $[t, T]$. Let us expand it into the generalized Fourier series at the interval $(t, T)$ \begin{equation} \label{leto8002yes} C_{j_1}(t_2)= \sum_{j_2=0}^{\infty}C_{j_2 j_1}\phi_{j_2}(t_2)\ \ \ (t_2\ne t, T), \end{equation} \noindent where $$ C_{j_2 j_1}=\int\limits_t^T C_{j_1}(t_2)\phi_{j_2}(t_2)dt_2= \int\limits_t^T \psi_2(t_2)\phi_{j_2}(t_2)\int\limits_t^{t_2} \psi_1(t_1)\phi_{j_1}(t_1)dt_1 dt_2 $$ \noindent and the equality (\ref{leto8002yes}) is satisfied pointwise at any point of the interval $(t, T)$ (the right-hand side of (\ref{leto8002yes}) converges when $t_2=t$ and $t_1=T$). Let us substitute (\ref{leto8002yes}) into (\ref{leto8001yes1}) \begin{equation} \label{leto8003yes} K^{*}(t_1,t_2)= \sum_{j_1=0}^{\infty}\sum_{j_2=0}^{\infty}C_{j_2 j_1} \phi_{j_1}(t_1)\phi_{j_2}(t_2),\ \ \ (t_1, t_2)\in (t, T)^2. \end{equation} Futhermore, the series on the right-hand side of (\ref{leto8003yes}) converges at the boundary of the square $[t, T]^2$. From (\ref{30.46}) and (\ref{leto8003yes}) we obtain \begin{equation} \label{y1} \lim\limits_{p_1\to\infty} \lim\limits_{p_2\to\infty}R_{p_1p_2}(t_1,t_1)=0,\ \ \ t_1\in (t, T). \end{equation} Since the integral $$ \int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1 $$ \noindent exists as Riemann integral, then this integral equals to the corresponding Lebesgue integral. Moreover, the following equality $$ \lim\limits_{p_1\to\infty}\lim\limits_{p_2\to\infty} R_{p_1p_2}(t_1,t_1)=0\ \ \ \hbox{when}\ \ \ t_1\in [t, T] $$ \noindent holds with accuracy up to sets of measure zero. According to (\ref{30.46}), (\ref{leto8001yes1})--(\ref{leto8003yes}), we have $$ R_{p_1p_2}(t_1,t_2)=\left(K^{*}(t_1,t_2)-\sum\limits_{j_1=0}^{p_1} C_{j_1}(t_2)\phi_{j_1}(t_1)\right)+ $$ $$ +\left( \sum\limits_{j_1=0}^{p_1}\left(C_{j_1}(t_2)- \sum\limits_{j_2=0}^{p_2} C_{j_2j_1}\phi_{j_2}(t_2)\right) \phi_{j_1}(t_1)\right). $$ Then, appling two times (we mean here an iterated passage to the limit $\lim\limits_{p_1\to\infty}\varlimsup\limits_{p_2\to\infty}$) the Lebesgue's Dominated Convergence Theorem, we obtain (see (\ref{dds1})) $$ \lim\limits_{p_1\to\infty}\lim\limits_{p_2\to\infty} \int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1= \lim\limits_{p_1\to\infty}\varlimsup\limits_{p_2\to\infty} \int\limits_t^T R_{p_1p_2}(t_1,t_1)dt_1=0. $$ Note that the developement of the approach from this article can be found in \cite{2017-1xx}-\cite{2017-1xxyz}, \cite{xxxxx} (also see references in these papers). \section{Some Recent Results on Expansion of Iterated Stratonovich Stochastic Integrals of Multiplicities 3 to 6} Recently, a new approach to the expansion and mean-square approximation of iterated Stratonovich stochastic integrals has been obtained \cite{2017-1xx} (Sections~2.10--2.16), \cite{32} (Sections~13--19), \cite{15a} (Sections~5--11), \cite{arxiv-11} (Sections~7--13), \cite{new-art-1-xxy} (Sections~4--9). Let us formulate four theorems that were obtained using this approach. {\bf Theorem 2}\ \cite{2017-1xx}, \cite{32}, \cite{15a}, \cite{arxiv-11}, \cite{new-art-1-xxy}.\ {\it Suppose that $\{\phi_j(x)\}_{j=0}^{\infty}$ is a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T]).$ Furthermore, let $\psi_1(\tau), \psi_2(\tau),$ $\psi_3(\tau)$ are continuously dif\-ferentiable nonrandom functions on $[t, T].$ Then, for the iterated Stra\-to\-no\-vich stochastic integral of third multiplicity $$ J^{*}[\psi^{(3)}]_{T,t}={\int\limits_t^{*}}^T\psi_3(t_3) {\int\limits_t^{*}}^{t_3}\psi_2(t_2) {\int\limits_t^{*}}^{t_2}\psi_1(t_1) d{\bf w}_{t_1}^{(i_1)} d{\bf w}_{t_2}^{(i_2)}d{\bf w}_{t_3}^{(i_3)}\ \ \ (i_1,i_2,i_3=0,1,\ldots,m) $$ \noindent the following relations \begin{equation} \label{fin1} J^{*}[\psi^{(3)}]_{T,t} =\hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \sum\limits_{j_1, j_2, j_3=0}^{p} C_{j_3 j_2 j_1}\zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}\zeta_{j_3}^{(i_3)}, \end{equation} \begin{equation} \label{fin2} {\sf M}\left\{\left( J^{*}[\psi^{(3)}]_{T,t}- \sum\limits_{j_1, j_2, j_3=0}^{p} C_{j_3 j_2 j_1}\zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}\zeta_{j_3}^{(i_3)}\right)^2\right\} \le \frac{C}{p} \end{equation} \noindent are fulfilled, where $i_1, i_2, i_3=0,1,\ldots,m$ in {\rm (\ref{fin1})} and $i_1, i_2, i_3=1,\ldots,m$ in {\rm (\ref{fin2})}, constant $C$ is independent of $p,$ $$ C_{j_3 j_2 j_1}=\int\limits_t^T\psi_3(t_3)\phi_{j_3}(t_3) \int\limits_t^{t_3}\psi_2(t_2)\phi_{j_2}(t_2) \int\limits_t^{t_2}\psi_1(t_1)\phi_{j_1}(t_1)dt_1dt_2dt_3 $$ \noindent and $$ \zeta_{j}^{(i)}= \int\limits_t^T \phi_{j}(\tau) d{\bf f}_{\tau}^{(i)} $$ \noindent are independent standard Gaussian random variables for various $i$ or $j$ {\rm (}in the case when $i\ne 0${\rm );} another notations are the same as in Theorem~{\rm 1}.} {\bf Theorem 3}\ \cite{2017-1xx}, \cite{32}, \cite{15a}, \cite{arxiv-11}, \cite{new-art-1-xxy}.\ {\it Let $\{\phi_j(x)\}_{j=0}^{\infty}$ be a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T]).$ Furthermore, let $\psi_1(\tau), \ldots,$ $\psi_4(\tau)$ be continuously dif\-ferentiable nonrandom functions on $[t, T].$ Then, for the iterated Stra\-to\-no\-vich stochastic integral of fourth multiplicity \begin{equation} \label{fin0} J^{*}[\psi^{(4)}]_{T,t}={\int\limits_t^{*}}^T\psi_4(t_4) {\int\limits_t^{*}}^{t_4}\psi_3(t_3) {\int\limits_t^{*}}^{t_3}\psi_2(t_2) {\int\limits_t^{*}}^{t_2}\psi_1(t_1) d{\bf w}_{t_1}^{(i_1)} d{\bf w}_{t_2}^{(i_2)}d{\bf w}_{t_3}^{(i_3)}d{\bf w}_{t_4}^{(i_4)} \end{equation} \noindent the following relations \begin{equation} \label{fin3} J^{*}[\psi^{(4)}]_{T,t} =\hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \sum\limits_{j_1, j_2, j_3,j_4=0}^{p} C_{j_4j_3 j_2 j_1}\zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}\zeta_{j_3}^{(i_3)}\zeta_{j_4}^{(i_4)}, \end{equation} \begin{equation} \label{fin4} {\sf M}\left\{\left( J^{*}[\psi^{(4)}]_{T,t}- \sum\limits_{j_1, j_2, j_3, j_4=0}^{p} C_{j_4 j_3 j_2 j_1}\zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}\zeta_{j_3}^{(i_3)} \zeta_{j_4}^{(i_4)} \right)^2\right\} \le \frac{C}{p^{1-\varepsilon}} \end{equation} \noindent are fulfilled, where $i_1, \ldots , i_4=0,1,\ldots,m$ in {\rm (\ref{fin0}),} {\rm (\ref{fin3})} and $i_1, \ldots, i_4=1,\ldots,m$ in {\rm (\ref{fin4}),} constant $C$ does not depend on $p,$ $\varepsilon$ is an arbitrary small positive real number for the case of complete orthonormal system of Legendre polynomials in the space $L_2([t, T])$ and $\varepsilon=0$ for the case of complete orthonormal system of trigonometric functions in the space $L_2([t, T]),$ $$ C_{j_4 j_3 j_2 j_1}= $$ $$ = \int\limits_t^T\psi_4(t_4)\phi_{j_4}(t_4) \int\limits_t^{t_4}\psi_3(t_3)\phi_{j_3}(t_3) \int\limits_t^{t_3}\psi_2(t_2)\phi_{j_2}(t_2) \int\limits_t^{t_2}\psi_1(t_1)\phi_{j_1}(t_1)dt_1dt_2dt_3dt_4; $$ \noindent another notations are the same as in Theorem~{\rm 2}.} {\bf Theorem 4}\ \cite{2017-1xx}, \cite{32}, \cite{15a}, \cite{arxiv-11}, \cite{new-art-1-xxy}.\ {\it Assume that $\{\phi_j(x)\}_{j=0}^{\infty}$ is a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T])$ and $\psi_1(\tau), \ldots,$ $\psi_5(\tau)$ are continuously dif\-ferentiable nonrandom functions on $[t, T].$ Then, for the iterated Stra\-to\-no\-vich stochastic integral of fifth multiplicity \begin{equation} \label{fin7} J^{*}[\psi^{(5)}]_{T,t}={\int\limits_t^{*}}^T\psi_5(t_5) \ldots {\int\limits_t^{*}}^{t_2}\psi_1(t_1) d{\bf w}_{t_1}^{(i_1)} \ldots d{\bf w}_{t_5}^{(i_5)} \end{equation} \noindent the following relations \begin{equation} \label{fin8} J^{*}[\psi^{(5)}]_{T,t} =\hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \sum\limits_{j_1,\ldots,j_5=0}^{p} C_{j_5 \ldots j_1}\zeta_{j_1}^{(i_1)}\ldots \zeta_{j_5}^{(i_5)}, \end{equation} \begin{equation} \label{fin9} {\sf M}\left\{\left( J^{*}[\psi^{(5)}]_{T,t}- \sum\limits_{j_1, \ldots, j_5=0}^{p} C_{j_5 \ldots j_1}\zeta_{j_1}^{(i_1)}\ldots \zeta_{j_5}^{(i_5)} \right)^2\right\} \le \frac{C}{p^{1-\varepsilon}} \end{equation} \noindent are fulfilled, where $i_1, \ldots , i_5=0,1,\ldots,m$ in {\rm (\ref{fin7}),} {\rm (\ref{fin8})} and $i_1, \ldots, i_5=1,\ldots,m$ in {\rm (\ref{fin9}),} constant $C$ is independent of $p,$ $\varepsilon$ is an arbitrary small positive real number for the case of complete orthonormal system of Legendre polynomials in the space $L_2([t, T])$ and $\varepsilon=0$ for the case of complete orthonormal system of trigonometric functions in the space $L_2([t, T]),$ $$ C_{j_5 \ldots j_1}= \int\limits_t^T\psi_5(t_5)\phi_{j_5}(t_5)\ldots \int\limits_t^{t_2}\psi_1(t_1)\phi_{j_1}(t_1)dt_1\ldots dt_5; $$ \noindent another notations are the same as in Theorems~{\rm 2, 3}.} {\bf Theorem 5}\ \cite{2017-1xx}, \cite{32}, \cite{15a}, \cite{arxiv-11}.\ {\it Suppose that $\{\phi_j(x)\}_{j=0}^{\infty}$ is a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T]).$ Then, for the iterated Stra\-to\-no\-vich stochastic integral of sixth multiplicity \begin{equation} \label{after10001qu1} J_{T,t}^{*(i_1\ldots i_6)}={\int\limits_t^{*}}^T \ldots {\int\limits_t^{*}}^{t_2} d{\bf w}_{t_1}^{(i_1)} \ldots d{\bf w}_{t_6}^{(i_6)} \end{equation} \noindent the following expansion $$ J_{T,t}^{*(i_1\ldots i_6)} =\hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \sum\limits_{j_1, \ldots, j_6=0}^{p} C_{j_6 \ldots j_1}\zeta_{j_1}^{(i_1)}\ldots \zeta_{j_6}^{(i_6)} $$ \noindent that converges in the mean-square sense is valid, where $i_1, \ldots, i_6=0, 1,\ldots,m,$ $$ C_{j_6 \ldots j_1}= \int\limits_t^T\phi_{j_6}(t_6)\ldots \int\limits_t^{t_2}\phi_{j_1}(t_1)dt_1\ldots dt_6; $$ \noindent another notations are the same as in Theorems~{\rm 2--4}.} \section{Theorems 1--5 from Point of View of the Wong--Zakai Approximation} The iterated Ito stochastic integrals and solutions of Ito SDEs are complex and important functionals from the independent components ${\bf f}_{s}^{(i)},$ $i=1,\ldots,m$ of the multidimensional Wiener process ${\bf f}_{s},$ $s\in[0, T].$ Let ${\bf f}_{s}^{(i)p},$ $p\in\mathbb{N}$ be some approximation of ${\bf f}_{s}^{(i)},$ $i=1,\ldots,m$. Suppose that ${\bf f}_{s}^{(i)p}$ converges to ${\bf f}_{s}^{(i)},$ $i=1,\ldots,m$ if $p\to\infty$ in some sense and has differentiable sample trajectories. A natural question arises: if we replace ${\bf f}_{s}^{(i)}$ by ${\bf f}_{s}^{(i)p},$ $i=1,\ldots,m$ in the functionals mentioned above, will the resulting functionals converge to the original functionals from the components ${\bf f}_{s}^{(i)},$ $i=1,\ldots,m$ of the multidimentional Wiener process ${\bf f}_{s}$? The answere to this question is negative in the general case. However, in the pioneering works of Wong E. and Zakai M. \cite{W-Z-1}, \cite{W-Z-2}, it was shown that under the special conditions and for some types of approximations of the Wiener process the answere is affirmative with one peculiarity: the convergence takes place to the iterated Stratonovich stochastic integrals and solutions of Stratonovich SDEs and not to iterated Ito stochastic integrals and solutions of Ito SDEs. The piecewise linear approximation as well as the regularization by convolution \cite{W-Z-1}-\cite{Watanabe} relate the mentioned types of approximations of the Wiener process. The above approximation of stochastic integrals and solutions of SDEs is often called the Wong--Zakai approximation. Let ${\bf f}_{s},$ $s\in[0, T]$ be an $m$-dimensional standard Wiener process with independent components ${\bf f}_{s}^{(i)},$ $i=1,\ldots,m.$ It is well known that the following representation takes place \cite{Lipt}, \cite{7e} \begin{equation} \label{um1x} {\bf f}_{\tau}^{(i)}-{\bf f}_{t}^{(i)}= \sum_{j=0}^{\infty}\int\limits_t^{\tau} \phi_j(s)ds\ \zeta_j^{(i)},\ \ \ \zeta_j^{(i)}= \int\limits_t^T \phi_j(\tau)d{\bf f}_{\tau}^{(i)}, \end{equation} \noindent where $\tau\in[t, T],$ $t\ge 0,$ $\{\phi_j(x)\}_{j=0}^{\infty}$ is an arbitrary complete orthonormal system of functions in the space $L_2([t, T]),$ and $\zeta_j^{(i)}$ are independent standard Gaussian random variables for various $i$ or $j.$ Moreover, the series (\ref{um1x}) converges for any $\tau\in [t, T]$ in the mean-square sense. Let ${\bf f}_{\tau}^{(i)p}-{\bf f}_{t}^{(i)p}$ be the mean-square approximation of the process ${\bf f}_{\tau}^{(i)}-{\bf f}_{t}^{(i)},$ which has the following form \begin{equation} \label{um1xx} {\bf f}_{\tau}^{(i)p}-{\bf f}_{t}^{(i)p}= \sum_{j=0}^{p}\int\limits_t^{\tau} \phi_j(s)ds\ \zeta_j^{(i)}. \end{equation} From (\ref{um1xx}) we obtain \begin{equation} \label{um1xxx} d{\bf f}_{\tau}^{(i)p}= \sum_{j=0}^{p} \phi_j(\tau)\zeta_j^{(i)} d\tau. \end{equation} Consider the following iterated Riemann--Stieltjes integral \begin{equation} \label{um1xxxx} \int\limits_t^T \psi_k(t_k)\ldots \int\limits_t^{t_2}\psi_1(t_1) d{\bf w}_{t_1}^{(i_1)p_1}\ldots d{\bf w}_{t_k}^{(i_k)p_k}, \end{equation} \noindent where $i_1,\ldots,i_k=0,1,\ldots,m,$\ \ $p_1,\ldots,p_k\in\mathbb{N},$ \begin{equation} \label{um1xxx1} d{\bf w}_{\tau}^{(i)p}= \left\{\begin{matrix} d{\bf f}_{\tau}^{(i)p}\ &\hbox{\rm for}\ \ \ i=1,\ldots,m\cr\cr\cr d\tau\ &\hbox{\rm for}\ \ \ i=0 \end{matrix} ,\right. \end{equation} \noindent and $d{\bf f}_{\tau}^{(i)p}$ in defined by the relation (\ref{um1xxx}). Let us substitute (\ref{um1xxx}) into (\ref{um1xxxx}) \begin{equation} \label{um1xxxx1} \int\limits_t^T \psi_k(t_k)\ldots \int\limits_t^{t_2}\psi_1(t_1) d{\bf w}_{t_1}^{(i_1)p_1}\ldots d{\bf w}_{t_k}^{(i_k)p_k}= \sum\limits_{j_1=0}^{p_1} \ldots \sum\limits_{j_k=0}^{p_k} C_{j_k \ldots j_1}\prod\limits_{l=1}^k \zeta_{j_l}^{(i_l)}, \end{equation} \noindent where $$ \zeta_j^{(i)}=\int\limits_t^T \phi_j(\tau)d{\bf w}_{\tau}^{(i)} $$ \noindent are independent standard Gaussian random variables for various $i$ or $j$ (in the case when $i\ne 0$), ${\bf w}_{s}^{(i)}={\bf f}_{s}^{(i)}$ for $i=1,\ldots,m$ and ${\bf w}_{s}^{(0)}=s,$ $$ C_{j_k \ldots j_1}=\int\limits_t^T\psi_k(t_k)\phi_{j_k}(t_k)\ldots \int\limits_t^{t_2} \psi_1(t_1)\phi_{j_1}(t_1) dt_1\ldots dt_k $$ \noindent is the Fourier coefficient. Consider the following iterated Stratonovich stochastic integrals \begin{equation} \label{str1} J^{*}[\psi^{(k)}]_{T,t}= \int\limits_t^{*T}\psi_k(t_k)\ldots \int\limits_t^{*t_{2}} \psi_1(t_1) d{\bf w}_{t_1}^{(i_1)}\ldots d{\bf w}_{t_k}^{(i_k)}, \end{equation} \noindent where every $\psi_l(\tau)$ $(l=1,\ldots,k)$ is a continuously differentiable nonrandom function at the interval $[t,T];$ another notations are the same as in (\ref{str}). To best of our knowledge \cite{W-Z-1}-\cite{Watanabe} the approximations of the Wiener process in the Wong--Zakai approximation must satisfy fairly strong restrictions \cite{Watanabe} (see Definition 7.1, pp.~480--481). Moreover, approximations of the Wiener process that are similar to (\ref{um1xx}) were not considered in \cite{W-Z-1}, \cite{W-Z-2} (also see \cite{Watanabe}, Theorems 7.1, 7.2). Therefore, the proof of analogs of Theorems 7.1 and 7.2 \cite{Watanabe} for approximations of the Wiener process based on its series expansion (\ref{um1x}) should be carried out separately. Thus, the mean-square convergence of the right-hand side of (\ref{um1xxxx1}) to the appropriate iterated Stratonovich stochastic integral (\ref{str1}) does not follow from the results of the papers \cite{W-Z-1}, \cite{W-Z-2} (also see \cite{Watanabe}, Theorems 7.1, 7.2). However, in \cite{KlPl2} (Sect.~5.8, pp.~202--204), \cite{KPS} (pp.~82-84), \cite{KPW} (pp.~438-439), \cite{Zapad-9} (pp.~263-264) the authors use (without rigorous proof) the Wong--Zakai approximation \cite{W-Z-1}-\cite{Watanabe} within the frames of the method of approximation of iterated Stratonovich stochastic integrals based on the Karhunen--Loeve expansion of the Brownian bridge process \cite{Mi2}. From the other hand, Theorems 1--5 from this paper can be considered as the proof of the Wong--Zakai approximation for the iterated Stratonovich stochastic integrals (\ref{str1}) of multiplicities 2 to 6 based on the approximation (\ref{um1xx}) of the Wiener process. At that, the Riemann--Stieltjes integrals (\ref{um1xxxx}) of multiplicities 2 to 6 converge in the mean-square sense to the appropriate Stratonovich stochastic integrals (\ref{str1}). Recall that $\{\phi_j(x)\}_{j=0}^{\infty}$ (see (\ref{um1x}), (\ref{um1xx}), and Theorems 1--5) is a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([t, T])$. To illustrate the above reasoning, consider two examples for the case $k=2,$ $\psi_1(\tau),$ $\psi_2(\tau)\equiv 1;$ $i_1, i_2=1,\ldots,m.$ The first example relates to the piecewise linear approximation of the multidimensional Wiener process (these approximations were considered in \cite{W-Z-1}-\cite{Watanabe}). Let ${\bf b}_{\Delta}^{(i)}(t),$ $t\in[0, T]$ be the piecewise linear approximation of the $i$th component ${\bf f}_t^{(i)}$ of the multidimensional standard Wiener process ${\bf f}_t,$ $t\in [0, T]$ with independent components ${\bf f}_t^{(i)},$ $i=1,\ldots,m,$ i.e. $$ {\bf b}_{\Delta}^{(i)}(t)={\bf f}_{k\Delta}^{(i)}+ \frac{t-k\Delta}{\Delta}\Delta{\bf f}_{k\Delta}^{(i)}, $$ \noindent where $$ \Delta{\bf f}_{k\Delta}^{(i)}={\bf f}_{(k+1)\Delta}^{(i)}- {\bf f}_{k\Delta}^{(i)},\ \ \ t\in[k\Delta, (k+1)\Delta),\ \ \ k=0, 1,\ldots, N-1. $$ Note that w.~p.~1 \begin{equation} \label{pridum} \frac{d{\bf b}_{\Delta}^{(i)}}{dt}(t)= \frac{\Delta{\bf f}_{k\Delta}^{(i)}}{\Delta},\ \ \ t\in[k\Delta, (k+1)\Delta),\ \ \ k=0, 1,\ldots, N-1. \end{equation} Consider the following iterated Riemann--Stieltjes integral $$ \int\limits_0^T \int\limits_0^{s} d{\bf b}_{\Delta}^{(i_1)}(\tau)d{\bf b}_{\Delta}^{(i_2)}(s),\ \ \ i_1,i_2=1,\ldots,m. $$ Using (\ref{pridum}) and additive property of the Riemann--Stieltjes integral, we can write w.~p.~1 $$ \int\limits_0^T \int\limits_0^{s} d{\bf b}_{\Delta}^{(i_1)}(\tau)d{\bf b}_{\Delta}^{(i_2)}(s)= \int\limits_0^T \int\limits_0^{s} \frac{d{\bf b}_{\Delta}^{(i_1)}}{d\tau}(\tau)d\tau \frac{d {\bf b}_{\Delta}^{(i_2)}}{d s}(s) ds = $$ $$ = \sum\limits_{l=0}^{N-1}\int\limits_{l\Delta}^{(l+1)\Delta} \left( \sum\limits_{q=0}^{l-1}\int\limits_{q\Delta}^{(q+1)\Delta} \frac{\Delta{\bf f}_{q\Delta}^{(i_1)}}{\Delta}d\tau+ \int\limits_{l\Delta}^{s} \frac{\Delta{\bf f}_{l\Delta}^{(i_1)}}{\Delta}d\tau\right) \frac{\Delta{\bf f}_{l\Delta}^{(i_2)}}{\Delta}ds= $$ $$ =\sum\limits_{l=0}^{N-1}\sum\limits_{q=0}^{l-1} \Delta{\bf f}_{q\Delta}^{(i_1)} \Delta{\bf f}_{l\Delta}^{(i_2)}+ \frac{1}{\Delta^2}\sum\limits_{l=0}^{N-1} \Delta{\bf f}_{l\Delta}^{(i_1)} \Delta{\bf f}_{l\Delta}^{(i_2)} \int\limits_{l\Delta}^{(l+1)\Delta} \int\limits_{l\Delta}^{s}d\tau ds= $$ \begin{equation} \label{oh-ty} =\sum\limits_{l=0}^{N-1}\sum\limits_{q=0}^{l-1} \Delta{\bf f}_{q\Delta}^{(i_1)} \Delta{\bf f}_{l\Delta}^{(i_2)}+ \frac{1}{2}\sum\limits_{l=0}^{N-1} \Delta{\bf f}_{l\Delta}^{(i_1)} \Delta{\bf f}_{l\Delta}^{(i_2)}. \end{equation} Using (\ref{oh-ty}), it is not difficult to show that \begin{equation} \label{uh-111} \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{N\to \infty}}$\cr }} } \int\limits_0^T \int\limits_0^{s} d{\bf b}_{\Delta}^{(i_1)}(\tau)d{\bf b}_{\Delta}^{(i_2)}(s)= \int\limits_0^T \int\limits_0^{s} d{\bf f}_{\tau}^{(i_1)}d{\bf f}_{s}^{(i_2)}+ \frac{1}{2}{\bf 1}_{\{i_1=i_2\}}\int\limits_0^T ds= \int\limits_0^{*T} \int\limits_0^{*s} d{\bf f}_{\tau}^{(i_1)}d{\bf f}_{s}^{(i_2)}, \end{equation} \noindent where $\Delta\to 0$ if $N\to\infty$ ($N\Delta=T$). Obviously, (\ref{uh-111}) agrees with Theorem 7.1 (see \cite{Watanabe}, p.~486). The next example relates to the approximation of the Wiener process based on its series expansion (\ref{um1x}) for $t=0$, where $\{\phi_j(x)\}_{j=0}^{\infty}$ is a complete orthonormal system of Legendre polynomials or trigonometric functions in the space $L_2([0, T])$. Consider the following iterated Riemann--Stieltjes integral \begin{equation} \label{abcd1} \int\limits_0^T \int\limits_0^{s} d{\bf f}_{\tau}^{(i_1)p}d{\bf f}_{s}^{(i_2)p},\ \ \ i_1,i_2=1,\ldots,m, \end{equation} \noindent where $d{\bf f}_{\tau}^{(i)p}$ is defined by the relation (\ref{um1xxx}). Let us substitute (\ref{um1xxx}) into (\ref{abcd1}) \begin{equation} \label{set18} \int\limits_0^T \int\limits_0^{s} d{\bf f}_{\tau}^{(i_1)p}d{\bf f}_{s}^{(i_2)p}= \sum\limits_{j_1,j_2=0}^p C_{j_2 j_1} \zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}, \end{equation} \noindent where $$ C_{j_2 j_1}= \int\limits_0^T \phi_{j_2}(s)\int\limits_0^s \phi_{j_1}(\tau)d\tau ds $$ \noindent is the Fourier coefficient; another notations are the same as in (\ref{um1xxxx1}). As we noted above, approximations of the Wiener process that are similar to (\ref{um1xx}) were not considered in \cite{W-Z-1}, \cite{W-Z-2} (also see Theorems 7.1, 7.2 in \cite{Watanabe}). Furthermore, the extension of the results of Theorems 7.1 and 7.2 \cite{Watanabe} to the case under consideration is not obvious. On the other hand, we can apply Theorem 1 from this paper and obtain from (\ref{set18}) the desired result \begin{equation} \label{umen-bl} \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \int\limits_0^T \int\limits_0^{s} d{\bf f}_{\tau}^{(i_1)p}d{\bf f}_{s}^{(i_2)p}= \hbox{\vtop{\offinterlineskip\halign{ \hfil#\hfil\cr {\rm l.i.m.}\cr $\stackrel{}{{}_{p\to \infty}}$\cr }} } \sum\limits_{j_1,j_2=0}^p C_{j_2 j_1} \zeta_{j_1}^{(i_1)}\zeta_{j_2}^{(i_2)}= \int\limits_0^{*T} \int\limits_0^{*s} d{\bf f}_{\tau}^{(i_1)}d{\bf f}_{s}^{(i_2)}. \end{equation} \end{document}
\begin{document} \title{{\sc Type-2 Fuzzy Initial Value Problems for Second-order T2FDEs} \markboth{SUSK}{Type-2 Fuzzy Initial Value Problems for Second-order T2FDEs} \begin{abstract} Type-2 fuzzy differential equations (T2FDEs) of order 1 are already known and the solution method of type-2 fuzzy initial value problems (T2FIVPs) for them was given by M. Mazandarani and M. Najariyan \cite{MN} in 2014. We give the solution method of second-order T2FIVPs in this paper. Furthermore, we would like to propose new notations for type-2 fuzzy theory where symbols tend to be complicated and misleading. In particular, the Hukuhara differential symbols introduced experimentally in this paper will give us clearler meanings and expressions. \end{abstract} {\small {\bf Keywords}: Type-1 / type-2 fuzzy number, Type-1 / type-2 fuzzy-valued function, Type-1 / type-2 fuzzy H-derivative, Type-1 / type-2 fuzzy differential equation, Type-1 / type-2 fuzzy initial value problem. } {\small {\bf 2010 Mathematics Subject Classification}: 03E72, 26E50, 34A07, 35E15, 65L05. } {\small {\bf Corresponding Author}: Norihiro Someyama $<${\tt [email protected]}$>$ } {\small \tableofcontents } \section{Introduction} In 1965, fuzzy set theory \cite{Z1} was introduced as the origin of mathematical theory of ambiguity by L.A. Zadeh (1921-2017). A fuzzy set $A$ on the universal set $X$ is characterized via the membership function $\mu_A:X\to [0,1]$ and the membership function value $\mu_A(x)$ which represents the grade of ambiguity for each $x\in X$. Any membership function however is basically formed from its individual function values determined by the subjectivity of the observer. So, the grades $\{\mu_A(x):x\in X\}$ may also contain ambiguity. Focusing on this, Zadeh \cite{Z2} introduced type-2 fuzzy set theory in 1975. Type-2 fuzzy sets are fuzzy sets whose grades are fuzzy. In other words, the type-2 fuzzy set includes not only the uncertainty of the data, but also the membership function which indicates the uncertainty. Then, we consider the membership function of a type-2 fuzzy set ${\cal A}$ as $\mu_{{\cal A}}:X\to [0,1]^{[0,1]}$. The concept of fuzzy derivatives was proposed by Chang $\&$ Zadeh \cite{CZ}. In addition, fuzzy derivatives using the extension principle were proposed by Dubois $\&$ Prade \cite{DP}, and some other concepts related to fuzzy derivatives were discussed by Puri $\&$ Ralescu \cite{PR1}. Fuzzy initial value problems have been researched since Kaleva \cite{K} and Bede $\&$ Gal \cite{BG}, and several attempts have been proposed to define the differentiability of fuzzy functions. Among them, Hukuhara differentiability and strongly generalized differentiability \cite{BG,PR1} have attracted particular attention. For the sake of simplicity, `type-1 / type-2 fuzzy differential equations' and `type-1 / type-2 fuzzy initial value problems' are often abbreviated as `T1/T2FDEs' and `T1/T2FIVPs' respectively in this paper. For example, suppose that we have a highly experienced expert and an inexperienced student measure the temperature of a certain substance and ask them to indicate the membership function of temperature (More specifically, see Section 5 of \cite{MN}). In this case, there is a possibility that different membership functions will be expressed, and the former and the latter can be recognaized as the principle set and the foot-print set (see Definition \ref{df:FP}-\ref{df:PS}), respectively. Therefore, type-2 fuzzy sets are useful if the exact form of the membership function is not known, or if the grade of the membership function itself is ambiguous or inaccurate. Since there are so many problems where the exact form of the membership function cannot be determined, type-2 fuzzy sets are suitable for dealing with high levels of uncertainty that involve more complicated calculations. Moreover, the parameters and variables appearing in differential equations in real problems are usually very imprecise, but we may well be able to model them by type-2 fuzzy theory and hence T2FDEs. In 2014, Mazandarani $\&$ Najariyan \cite{MN} studied first-order T2FDEs and took up some concrete T2FIVPs for them. Related to this, we study, in this paper, second order T2FDEs and T2FIVPs for them. We also present and prove other theorems that may not be known in the case of type-1 and type-2 first order. We attack the case of crisp coefficients in this paper. The physical phenomena of our world are generally represented by second-order differential equations, so we believe that our study will be necessary and useful in fields such as mathematical physics. In fact, we can think that it is appropriate to set initial values as fuzzy numbers if an expert measures the values. Furthermore, although the accuracy of experiments is improving remarkably in science, it is useful to discuss the accuracy of old experiments and that of present experiments as the foot-print set and the principle set, respectively. Incidentally, it may be necessary to seek the best notation because type-2 fuzzy theory tends to be complicated in notations. We would like to propose some new notations on a trial basis in this paper. \section{Preparation} We prepare definitions of terms, notations and known results on type-2 fuzzy theory required in this paper. The knowledges on type-1 fuzzy theory will be also required, but we put them in the appendix because writing them in this section makes them redundant. For convenience, we use a notation such as `the fuzzy set $A:X\to [0,1]$' and basically write $A(x)$ for the grade of $A$. `Crisp' expresses `non-fuzzy'. \subsection{Type-2 Fuzzy Numbers} We first introduce type-2 fuzzy sets. A type-2 fuzzy set is defined by its membeship function with a fixed input order (See Remark \ref{rem:psmf}, 1), later for details). \begin{df}[\cite{ML}] \label{df:T2FS} A type-2 fuzzy set ${\cal A}$ on $X$ is characterized by \begin{align} \label{eq:pvrep} {\cal A}:=\{(x,u;\nu_{{\cal A}}(x,u)):x\in X,u\in R(\mu_{{\cal A}}(x))\subset [0,1]\} \end{align} where \begin{itemize} \item $\nu_{{\cal A}}$ is the membership function of ${\cal A}$ from the ordered pair $(x,u)$ to $[0,1]$, \item $R(\mu_{{\cal A}}(x))$, for each $x\in X$, is the range of $\mu_{{\cal A}}:X\to [0,1]^{[0,1]}$ called the primary membership function of ${\cal A}$, \item $x$ is called the primary variable of ${\cal A}$, \item $u$ is called the secondary variable of ${\cal A}$. \end{itemize} \end{df} {\small \begin{rem} \label{rem:psmf} \quad \begin{itemize} \item[1)] $\nu_{{\cal A}}$ is not just a two-variable function $X\times R(\mu_{{\cal A}})\to [0,1]$. The conventional two-variable function has no restriction on the order of inputting variables, but in the case of $\nu_{{\cal A}}$, $x$ is input and then $u$ must be input. However, there is another definition of type-2 fuzzy sets. See Conclusion of this paper. \item[2)] (\ref{eq:pvrep}) is often called the point-valued representation of ${\cal A}$. \item[3)] The above $\mu_{\cal A}(x)$ (resp. $u$) should be recognized as the fuzzy grade (resp. the grade of the fuzzy grade) of ${\cal A}$ at $x\in X$. \end{itemize} \end{rem} } \begin{df}[\cite{ZM}] Let ${\cal A}$ be a type-2 fuzzy set on $X$ and $x_0\in X$ a fixed point. We define the type-1 fuzzy set \begin{align*} \kappa_{{\cal A}}(x_0):=\int_{u\in R(\mu_{{\cal A}}(x_0))}\nu_{{\cal A}}^{x_0}(u)/u \end{align*} where $\nu_{{\cal A}}^{x_0}:=\nu_{{\cal A}}(x_0,\,\cdot\,):R(\mu_{{\cal A}}(x_0))\to [0,1]$ is called the secondary membership function of ${\cal A}$ at $x_0$. Moreover, $\nu_{{\cal A}}(x_0,u)$ is called the secondary grade of $x_0$. Here, similar to the well-known notation for type-1 fuzzy sets, the above integral symbol does not mean the conventional continuous sum, but just a continuous union. \end{df} \begin{df}[\cite{H}, Definition 2.8.1] Let ${\cal A}$ be a type-2 fuzzy set on $X$. The $\beta$-cut set of ${\cal A}$ is defined by \begin{align*} [{\cal A}]_{\beta}:=\left\langle \underline{A}_{\beta},\ \overline{A}_{\beta} \right\rangle :=\int_{x\in X}\int_{u\in R(\mu_{{\cal A}})}\{(x,u):\nu_{{\cal A}}^x(u)\ge \beta\} \end{align*} for any $\beta \in [0,1]$. Then, $\underline{A}_{\beta}$ and $\overline{A}_{\beta}$ are called the lower membership function and the upper membership function of ${\cal A}$ respectively. Moreover, the $\alpha$-cut set of $[{\cal A}]_{\beta}$ is defined by \begin{align} \label{eq:[A]ba} [{\cal A}]_{\beta}^{\alpha}:=\left\langle [\underline{A}_{\beta}]_{\alpha},\ [\overline{A}_{\beta}]_{\alpha} \right\rangle \end{align} for any $\alpha \in [0,1]$, where $\underline{A}_{\beta}$ and $\overline{A}_{\beta}$ are type-1 fuzzy numbers on $X$ that appear if ${\cal A}$ is cut by $\beta$. \end{df} {\small \begin{rem} The $\beta$-cut set of a type-2 fuzzy set is also called the $\beta$-plane of it. Strictly speaking, the $\alpha$-cut set of the $\beta$-cut set of a type-2 fuzzy set ${\cal A}$ should be represented as $[[{\cal A}]_{\beta}]_{\alpha}$, but we will write it like (\ref{eq:[A]ba}) since that is annoying. \end{rem} } The argument of type-2 fuzzy sets can be reduced to that of the level cut sets as with type-1 sets. In fact, it is known \cite{H} that the level cut sets make up the original type-2 fuzzy set ${\cal A}$: \begin{align*} {\cal A}=\int_{x\in X}\left(\int_{u\in R(\mu_{{\cal A}}(x))}\nu_{{\cal A}}^x(u)/u\right)\biggr/ x =\bigcup_{\beta\in [0,1]}\beta[{\cal A}]_{\beta} =\bigcup_{\beta\in [0,1]}\beta \bigcup_{\alpha\in [0,1]}\alpha[{\cal A}]_{\beta}^{\alpha} \end{align*} where $\alpha[{\cal A}]_{\beta}^{\alpha}:X\to \{0,\alpha\}$ is a type-1 fuzzy set. Thus, it is sufficient to consider and argue $\beta$-cut sets or these $\alpha$-cut sets for most problems. We hereinafter write $S_{{\cal A}}(x_0;\beta)$ for the $\beta$-cut set of the secondary membership function $\nu_{{\cal A}}^{x_0}$ of ${\cal A}$ and do the same for ${\cal B}$. We hereafter omit the description `$\alpha\in [0,1]$' and `$\beta\in [0,1]$' when we argue $(\alpha,\beta)$-cut sets. \begin{df} Let ${\cal A}$ and ${\cal B}$ be type-2 fuzzy sets on $X$. We denote the $\beta$-cut sets of them by \begin{align} \label{eq:ABbetac} [{\cal A}]_{\beta}:=\left\langle \underline{A}_{\beta},\ \overline{A}_{\beta} \right\rangle,\quad [{\cal B}]_{\beta}:=\left\langle \underline{B}_{\beta},\ \overline{B}_{\beta} \right\rangle. \end{align} Then, ${\cal A}={\cal B}$ if and only if $S_{{\cal A}}(x;\beta)=S_{{\cal B}}(x;\beta)$ for all $x\in X$ and any $\beta\in [0,1]$. \end{df} \begin{df}[\cite{H}] \label{df:+kop} Let ${\cal A}$ and ${\cal B}$ be type-2 fuzzy sets on $X$ and $k\in \mathbb R$. We denote the $\beta$-cut sets of ${\cal A}$ and ${\cal B}$ by (\ref{eq:ABbetac}). The sum ${\cal A}+{\cal B}$ of ${\cal A}$ and ${\cal B}$ is defined by \begin{align*} [{\cal A}+{\cal B}]_{\beta}:=\left\langle [\underline{A}_{\beta}+\underline{B}_{\beta}]_{\alpha},\ [\overline{A}_{\beta}+\overline{B}_{\beta}]_{\alpha}\right\rangle =\left\langle [\underline{A}_{\beta}]_{\alpha}+[\underline{B}_{\beta}]_{\alpha},\ [\overline{A}_{\beta}]_{\alpha}+[\overline{B}_{\beta}]_{\alpha}\right\rangle. \end{align*} Moreover, the scalar multiple $k{\cal A}$ of ${\cal A}$ is defined by \begin{align*} [k{\cal A}]_{\beta}:=\left\langle [k\underline{A}_{\beta}]_{\alpha},\ [k\overline{A}_{\beta}]_{\alpha}\right\rangle =\left\langle k[\underline{A}_{\beta}]_{\alpha},\ k[\overline{A}_{\beta}]_{\alpha}\right\rangle. \end{align*} \end{df} \begin{df} Let ${\cal A},{\cal B}$ be type-2 fuzzy sets on $X$. We denote the $\beta$-cut sets of them by (\ref{eq:ABbetac}). Then, the order relationship between them, ${\cal A}\le {\cal B}$, is defined as \begin{align*} [{\cal A}]_{\beta}\le [{\cal B}]_{\beta} \end{align*} for any $\beta \in [0,1]$. In particular, the non-negativity (resp. positivity) of a type-2 fuzzy set ${\cal A}$, ${\cal A}\ge 0$ (resp. ${\cal A}>0$), is defined by \begin{align*} [\underline{A}_{\beta}]_{\alpha}\ge 0\ {\rm and}\ [\overline{A}_{\beta}]_{\alpha}\ge 0\quad ({\rm resp.}\ [\underline{A}_{\beta}]_{\alpha}>0\ {\rm and}\ [\overline{A}_{\beta}]_{\alpha}>0) \end{align*} for any $\alpha,\beta \in [0,1]$. \end{df} The following concepts are important to consider the type-2 version of a type-1 triangular fuzzy number and it is effective when we actually solve concrete T2FIVPs in Section \ref{sec:ExT2FDE}. \begin{df}[\cite{Me}] \label{df:FP} Let ${\cal A}$ be a type-2 fuzzy set on $X$. The union of all secondary domains \begin{align*} {\rm FP}({\cal A}):=\bigcup_{x\in X}R(\mu_{{\cal A}}(x)) \end{align*} of ${\cal A}$ is called the foot-print set (or foot-print of uncertainty) of ${\cal A}$. \end{df} \begin{df}[\cite{H}, Definition 2.3.9] \label{df:PS} Let ${\cal A}$ be a type-2 fuzzy set on $X$. Suppose that there exists at least one $u\in R(\mu_{{\cal A}}(x))$ satisfying \begin{align*} \nu_{{\cal A}}^x(u)=\nu_{{\cal A}}(x,u)=1 \end{align*} for any $x\in X$. If we rewrite $u_x$ for each such point $u\in R(\mu_{{\cal A}}(x))$, every $u_x$ is equal to the membership function value of the type-1 fuzzy set which is uniquely determined. Then, that type-1 fuzzy set is called the principle set of ${\cal A}$ and is denoted by ${\rm P}({\cal A})$. \end{df} \begin{figure} \caption{Foot-print set} \end{figure} \begin{figure} \caption{Principle set} \end{figure} \begin{df}[\cite{H}, Section 3.4] Let ${\cal A}\in \mathscr{T}^2(\mathbb R)$. ${\cal A}$ is perfect if and only if \begin{itemize} \item[i)] the upper and lower membership functions of ${\rm FP}({\cal A})$ are equal as type-1 fuzzy numbers, and \item[ii)] the upper and lower membership functions of ${\rm P}({\cal A})$ are equal as type-1 fuzzy numbers. \end{itemize} Moreover, if a perfect ${\cal A}$ also satisfies that \begin{itemize} \item[iii)] ${\cal A}$ can be completely determined by using its ${\rm FP}({\cal A})$ and ${\rm P}({\cal A})$, \end{itemize} such a ${\cal A}$ is called the perfect quasi-type-2 fuzzy number on $\mathbb R$. The space of them is denoted by $\mathscr{QT}^2(\mathbb R)$. \end{df} {\small \begin{ex} Consider ${\cal A}\in \mathscr{T}^2(\mathbb R)$ such that \begin{itemize} \item Primary: $\mu_{{\cal A}}(x)=\max\{1-|x-2|,\ 0\}$, \item Secondary: $\begin{array}{rl}\nu_{{\cal A}}^{x_0}(u)&\hspace{-2.5mm}=\max\{1-10|u-x_0|,\ 0\} \\ &\hspace{-2.5mm}=\max\Bigl\{1-10\Bigl|u-\max\{1-|x-2|,0\}\Bigr|,\ 0\Bigr\}\quad (0\le u\le 1).\end{array}$ \end{itemize} Then, the lower membership function of ${\rm FP}({\cal A})$ is given by \begin{align*} \max\Bigl\{1-10\Bigl|u-\max\{1-|x-2|,0\}\Bigr|,\ 0\Bigr\}=0, \end{align*} whereas, for e.g. $x\in [1,3]$, solving \begin{align*} \max\Bigl\{1-10\Bigl|u-1+|x-2|\Bigr|,\ 0\Bigr\}=0 \end{align*} implies \begin{align*} u=1-|x-2|\pm \frac{1}{10}. \end{align*} The lower membership function of ${\rm FP}({\cal A})$ in this case, thus, is given by \begin{align*} u=\frac{9}{10}-|x-2|<1. \end{align*} Hence, this is not normal, so $u\notin \mathscr{T}^1$. This thing implies that ${\cal A}$ is {\bf not} perfect. \end{ex} } \begin{figure} \caption{Perfect quasi-type-2 fuzzy number} \end{figure} It is pointed out in \cite{MN} that the `triangular fuzzy number' in the type-2 world is characterized as follows: ${\cal A}\in \mathscr{QT}^2(\mathbb R)$ is triangular if and only if $[{\cal A}]_{\beta}^{\alpha}$ has \begin{align} [\underline{A}_{\beta}]_{\alpha}&=\left[L_{\underline{A}_{\beta}}^{\alpha},\ R_{\underline{A}_{\beta}}^{\alpha}\right], \label{eq:uAba}\\ L_{\underline{A}_{\beta}}^{\alpha}&=X_{A_{1}}^{\alpha}-(1-\beta)\left(X_{A_{1}}^{\alpha}-L_{\underline{A}_{0}}^{\alpha}\right), \label{eq:LuAba}\\ R_{\underline{A}_{\beta}}^{\alpha}&=Y_{A_{1}}^{\alpha}+(1-\beta)\left(R_{\underline{A}_{0}}^{\alpha}-Y_{A_{1}}^{\alpha}\right), \label{eq:RuAba}\\ L_{\underline{A}_{0}}^{\alpha}&=C_{{\cal A}}-(1-\alpha)\left(C_{{\cal A}}-L_{\underline{A}_{0}}\right), \label{eq:LuA0a}\\ R_{\underline{A}_{0}}^{\alpha}&=C_{{\cal A}}+(1-\alpha)\left(R_{\underline{A}_{0}}-C_{{\cal A}}\right) \label{eq:RuA0a} \end{align} and \begin{align} [\overline{A}_{\beta}]_{\alpha}&=\left[L_{\overline{A}_{\beta}}^{\alpha},\ R_{\overline{A}_{\beta}}^{\alpha}\right], \label{eq:oAba}\\ L_{\overline{A}_{\beta}}^{\alpha}&=X_{A_{1}}^{\alpha}-(1-\beta)\left(X_{A_{1}}^{\alpha}-L_{\overline{A}_{0}}^{\alpha}\right), \label{eq:LoAba}\\ R_{\overline{A}_{\beta}}^{\alpha}&=Y_{A_{1}}^{\alpha}+(1-\beta)\left(R_{\overline{A}_{0}}^{\alpha}-Y_{A_{1}}^{\alpha}\right), \label{eq:RoAba}\\ L_{\overline{A}_{0}}^{\alpha}&=C_{{\cal A}}-(1-\alpha)\left(C_{{\cal A}}-L_{\overline{A}_{0}}\right), \label{eq:LoA0a}\\ R_{\overline{A}_{0}}^{\alpha}&=C_{{\cal A}}+(1-\alpha)\left(R_{\overline{A}_{0}}-C_{{\cal A}}\right) \label{eq:RoA0a} \end{align} where \begin{align} X_{A_1}^{\alpha}&=C_{{\cal A}}-(1-\alpha)(C_{{\cal A}}-X_{A_1}), \label{eq:XA1a}\\ Y_{A_1}^{\alpha}&=C_{{\cal A}}+(1-\alpha)(Y_{A_1}-C_{{\cal A}}) \label{eq:YA1a} \end{align} are, in this paper, called the {\it left principle number} and {\it right principle number} of ${\cal A}$ respectively and $C_{{\cal A}}$ stands for the core of ${\cal A}$, that is, the crisp value $[{\cal A}]_{1}^{1}$. These meet \begin{align*} L_{\overline{A}_{0}}^{\alpha} \le X_{A_{1}}^{\alpha} \le L_{\underline{A}_{0}}^{\alpha} \le C_{{\cal A}} \le R_{\underline{A}_{0}}^{\alpha} \le Y_{A_{1}}^{\alpha} \le R_{\overline{A}_{0}}^{\alpha}. \end{align*} (See Figure \ref{fig:LXLCRYR} later.) In particular, the supports of ${\cal A}$, \begin{align*} [\underline{A}_{0}]_{\alpha}=\left[L_{\underline{A}_{0}}^{\alpha},R_{\underline{A}_{0}}^{\alpha}\right] \quad {\rm and}\quad [\overline{A}_{0}]_{\alpha}=\left[L_{\overline{A}_{0}}^{\alpha},R_{\overline{A}_{0}}^{\alpha}\right], \end{align*} represent the $\alpha$-cut sets of the lower and upper membership functions of ${\rm FP}({\cal A})$ respectively. Also, \begin{align*} [A_{1}]_{\alpha} = \left[X_{A_{1}}^{\alpha},Y_{A_{1}}^{\alpha}\right] \end{align*} is the $\alpha$-cut set of ${\rm P}({\cal A})$. The triangular type-1 number $u$ is determined by its left end $l$, core $c$ and right end $r$: \begin{align*} u=\langle\!\langle l,c,r \rangle\!\rangle, \end{align*} but the triangular perfect quasi-type-2 fuzzy number ${\cal A}$ is determined by its upper left end $L_{\overline{A}_0}$, left principle number $X_{A_1}$, lower left end $L_{\underline{A}_0}$, core $C_{{\cal A}}$, lower right end $R_{\underline{A}_0}$, right principle number $Y_{A_1}$ and upper right end $R_{\overline{A}_0}$: \begin{align*} {\cal A}=\langle\!\langle L_{\overline{A}_0},X_{A_1}, L_{\underline{A}_0};C_{{\cal A}};R_{\underline{A}_0},Y_{A_1},R_{\overline{A}_0} \rangle\!\rangle. \end{align*} \begin{comment} \begin{figure} \caption{A view of ${\cal A} \label{fig:LXLCRYR} \end{figure} \end{comment} \begin{figure} \caption{A view of ${\cal A} \label{fig:LXLCRYR} \end{figure} We here reconfirm the significance of type-2 fuzzy numbers. For example, let us denote \[ \underset{\sim}{3}=\mbox{`about $3$'} \] by a triangular fuzzy number. Then, the core of $\underset{\sim}{3}$ is of course $3$, but how should we determine the left and right ends of it? The simple representation of $\underset{\sim}{3}$ is the (more precise) isosceles triangular fuzzy number with some $\delta>0$: \[ \underset{\sim}{3}=\langle\!\langle 3-\delta,3,3+\delta \rangle\!\rangle. \] So it is important to determine this $\delta$ appropriately, but it is generally difficult to determine $\delta$ objectively. By setting $3-\delta$ (resp. $3+\delta$) as the left (resp. right) principle number and reconsidering `about $3$' as the triangular perfect quasi-type-2 fuzzy number, (\ref{eq:uAba})-(\ref{eq:RuAba}) and (\ref{eq:oAba})-(\ref{eq:RoAba}) thus determine subjective $\delta$. Herein lies the necessity and usefulness of type-2 fuzzy notion. \subsection{Type-2 Fuzzy Number-valued Functions} We introduce the following Hung-Yang distance so as to consider the type-2 fuzzy topology. \begin{df}[\cite{HY}] Let ${\cal A},{\cal B}$ be type-2 fuzzy sets on $X$. A distance between ${\cal A}$ and ${\cal B}$ is defined as \begin{align} d_{{\rm HY}}({\cal A},{\cal B}) :=\int_a^bH_{{\rm f}}(\kappa_{{\cal A}}(x),\kappa_{{\cal B}}(x))\,dx \end{align} where \begin{align*} H_{{\rm f}}(\kappa_{{\cal A}}(x),\kappa_{{\cal B}}(x))&:=\frac{\int_0^1\beta d_{{\rm H}}(S_{{\cal A}}(x;\beta),S_{{\cal B}}(x;\beta))\,d\beta}{\int_0^1\beta \,d\beta} \\ &\hspace{1mm} =2\int_0^1\beta d_{{\rm H}}(S_{{\cal A}}(x;\beta),S_{{\cal B}}(x;\beta))\,d\beta \end{align*} and the above integrals are defined in the sense of Riemann. We denote the space of type-2 fuzzy numbers on $X$ equipped with $d_{{\rm HY}}$-topology by $\mathscr{T}^2(X)$. \end{df} {\small \begin{rem} See Theorem 2.3 of \cite{MN} to make sure $\mathscr{T}^2(X)$ is a crisp metric space, that is, $d_{{\rm HY}}$ satisfies the metric axiom. \end{rem} } \begin{df}[\cite{MN}, Definition 4.1] Let ${\cal A},{\cal B}\in \mathscr{T}^2(X)$. If there exists some ${\cal C}\in \mathscr{T}^2(X)$ such that \begin{align*} {\cal A}={\cal B}+{\cal C}, \end{align*} we call ${\cal C}$ the T2-Hukuhara difference of ${\cal A}$ and ${\cal B}$. Then, we write ${\cal C}$ as ${\cal A}-{\cal B}$ as with type-1. \end{df} \begin{thm}[\cite{MN}, Theorem 4.1] Let ${\cal A},{\cal B}\in \mathscr{T}^2(X)$. We denote the $\beta$-cut sets of them by (\ref{eq:ABbetac}). Then, the $\beta$-cut set of the T2-Hukuhara difference of ${\cal A}$ and ${\cal B}$ is the T1-Hukuhara difference of the upper and lower membership functions of ${\cal A},{\cal B}$: \begin{align*} [{\cal A}-{\cal B}]_{\beta} =\left\langle \underline{(A-B)}_{\beta},\ \overline{(A-B)}_{\beta} \right\rangle =\left\langle \underline{A}_{\beta}-\underline{B}_{\beta},\ \overline{A}_{\beta}-\overline{B}_{\beta} \right\rangle. \end{align*} \end{thm} Zadeh's extension principle derives the type-2 fuzzy number-valued function ${\cal F}:\mathscr{T}^2(I)\to \mathscr{T}^2(\mathbb R)$ via a crisp function $f:I\to \mathbb R$ in the same way as type-1. We consider, in this paper, the case of $\mathscr{T}^2(I)=I$. Also, type-2 fuzzy number-valued functions are simply called type-2 fuzzy functions. If the $\beta$-cut set of ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ is represented by \begin{align*} [{\cal F}(x)]_{\beta}:=\left\langle [\underline{F}_{\beta}(x)]_{\alpha},\ [\overline{F}_{\beta}(x)]_{\alpha} \right\rangle, \end{align*} we write \begin{align*} [\underline{F}_{\beta}(x)]_{\alpha}&:=[\underline{F}_{\beta,-,\alpha}(x),\ \underline{F}_{\beta,+,\alpha}(x)], \\ [\overline{F}_{\beta}(x)]_{\alpha}&:=[\overline{F}_{\beta,-,\alpha}(x),\ \overline{F}_{\beta,+,\alpha}(x)] \end{align*} for all $x\in I$ and any $\alpha,\beta\in [0,1]$. We use daggers $\dag,\ddag$ introduced in Appendix \ref{app:T1FNVF} as the symbol for type-2 fuzzy derivatives. \begin{df}[\cite{MN}, Definition 4.4] \label{df:T2Hd} Let ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ and $h>0$ be a crisp number. ${\cal F}$ is T2-differentiable in the first form at some $x_0\in I$ if and only if there exist ${\cal F}(x_0+h)-{\cal F}(x_0)$ and ${\cal F}(x_0)-{\cal F}(x_0-h)$ satisfying that the fuzzy limit \begin{align} \label{eq:1T2d} {\cal F}^\dag(x_0) :=\lim_{h\downarrow 0}\frac{{\cal F}(x_0+h)-{\cal F}(x_0)}{h} =\lim_{h\downarrow 0}\frac{{\cal F}(x_0)-{\cal F}(x_0-h)}{h} \end{align} exists. Moreover, ${\cal F}$ is T2-differentiable in the second form at some $x_0\in I$ if and only if there exist ${\cal F}(x_0)-{\cal F}(x_0+h)$ and ${\cal F}(x_0-h)-{\cal F}(x_0)$ satisfying that the fuzzy limit \begin{align} \label{eq:2T2d} {\cal F}^\ddag(x_0) :=\lim_{h\uparrow 0}\frac{{\cal F}(x_0)-{\cal F}(x_0+h)}{-h} =\lim_{h\uparrow 0}\frac{{\cal F}(x_0-h)-{\cal F}(x_0)}{-h} \end{align} exists. Here the above differences (resp. limits) are due to the meaning of T2-Hukuhara (resp. $d_{{\rm HY}}$). If ${\cal F}$ is T2-differentiable in both senses at any $x\in I$, ${\cal F}^\dag$ and ${\cal F}^\ddag$ is called the (1)-T2-derivative and (2)-T2-derivative of ${\cal F}$, respectively. \end{df} {\small \begin{rem} \quad \begin{itemize} \item[1)] Like Remark \ref{rem:thirdfourth}, we shall ignore T2-derivatives in the third and fourth forms. The limits of both the forms become crisp numbers as with type-1. This thing has already been mentioned in Note 4.1 of \cite{MN}. \item[2)] As with type-1, second-order T2-derivatives are obtained by applying first-order T2-derivatives to (\ref{eq:1T2d}) and (\ref{eq:2T2d}). \end{itemize} \end{rem} } In what follows, ${\cal F}^{\dag\dag}$ is called the (1,1)-T2-derivative of ${\cal F}$, and the other cases are similar. \begin{thm}[\cite{MN}, Theorem 4.2] \label{thm:MNpara} Let ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ be T2-differentiable on $I$. Then, the parametric forms of its T2-derivatives are given by \begin{itemize} \item[1)] the (1)-parametric form: \begin{align*} [{\cal F}^\dag(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^\dag_{\beta}(x)]_{\alpha},\ [\overline{F}^\dag_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}'_{\beta,-,\alpha}(x),\underline{F}'_{\beta,+,\alpha}(x)],\ [\overline{F}'_{\beta,-,\alpha}(x),\overline{F}'_{\beta,+,\alpha}(x)]\right\rangle, \end{align*} \item[2)] the (2)-parametric form: \begin{align*} [{\cal F}^\ddag(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^\ddag_{\beta}(x)]_{\alpha},\ [\overline{F}^\ddag_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}'_{\beta,+,\alpha}(x),\underline{F}'_{\beta,-,\alpha}(x)],\ [\overline{F}'_{\beta,+,\alpha}(x),\overline{F}'_{\beta,-,\alpha}(x)]\right\rangle. \end{align*} \end{itemize} \end{thm} \begin{thm}[\cite{MN}, Corollary 4.1] \label{thm:MNtpq} Let ${\cal F}:I\to \mathscr{QT}^2(\mathbb R)$ be triangular, that is, \begin{align*} {\cal F}(x)=\langle\!\langle L_{\overline{F}_0(x)},X_{F_1(x)},L_{\underline{F}_0(x)};C_{{\cal F}(x)};R_{\underline{F}_0(x)},Y_{F_1(x)},R_{\overline{F}_0(x)} \rangle\!\rangle. \end{align*} \begin{itemize} \item[1)] If ${\cal F}$ is (1)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\dag}(x)=\langle\!\langle L_{\overline{F}_0^{\dag}(x)},X_{F_1^{\dag}(x)},L_{\underline{F}_0^{\dag}(x)};C_{{\cal F}^{\dag}(x)};R_{\underline{F}_0^{\dag}(x)},Y_{F_1^{\dag}(x)},R_{\overline{F}_0^{\dag}(x)} \rangle\!\rangle. \end{align*} \item[2)] If ${\cal F}$ is (2)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\ddag}(x)=\langle\!\langle R_{\overline{F}_0^{\ddag}(x)},Y_{F_1^{\ddag}(x)},R_{\underline{F}_0^{\ddag}(x)};C_{{\cal F}^{\ddag}(x)};L_{\underline{F}_0^{\ddag}(x)},X_{F_1^{\ddag}(x)},L_{\overline{F}_0^{\ddag}(x)} \rangle\!\rangle. \end{align*} \end{itemize} \end{thm} \section{Main Theorems and the Proofs} \subsection{Second-order Differentiation and Continuity of Type-2 Fuzzy Number-valued Functions} \begin{thm} \label{thm:FT2sdba} Let ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ be second-order T2-differentiable on $I$. Then, the parametric forms of its second-order T2-derivatives are given by \begin{itemize} \item[1)] the (1,1)-parametric form: \begin{align*} [{\cal F}^{\dag\dag}(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^{\dag\dag}_{\beta}(x)]_{\alpha},\ [\overline{F}^{\dag\dag}_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}''_{\beta,-,\alpha}(x),\underline{F}''_{\beta,+,\alpha}(x)],\ [\overline{F}''_{\beta,-,\alpha}(x),\overline{F}''_{\beta,+,\alpha}(x)]\right\rangle, \end{align*} \item[2)] the (1,2)-parametric form: \begin{align*} [{\cal F}^{\dag\ddag}(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^{\dag\ddag}_{\beta}(x)]_{\alpha},\ [\overline{F}^{\dag\ddag}_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}''_{\beta,+,\alpha}(x),\underline{F}''_{\beta,-,\alpha}(x)],\ [\overline{F}''_{\beta,+,\alpha}(x),\overline{F}''_{\beta,-,\alpha}(x)]\right\rangle, \end{align*} \item[3)] the (2,1)-parametric form: \begin{align*} [{\cal F}^{\ddag\dag}(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^{\ddag\dag}_{\beta}(x)]_{\alpha},\ [\overline{F}^{\ddag\dag}_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}''_{\beta,+,\alpha}(x),\underline{F}''_{\beta,-,\alpha}(x)],\ [\overline{F}''_{\beta,+,\alpha}(x),\overline{F}''_{\beta,-,\alpha}(x)]\right\rangle, \end{align*} \item[4)] the (2,2)-parametric form: \begin{align*} [{\cal F}^{\ddag\ddag}(x)]_{\beta}^{\alpha}&=\left\langle[\underline{F}^{\ddag\ddag}_{\beta}(x)]_{\alpha},\ [\overline{F}^{\ddag\ddag}_{\beta}(x)]_{\alpha}\right\rangle \\ &=\left\langle[\underline{F}''_{\beta,-,\alpha}(x),\underline{F}''_{\beta,+,\alpha}(x)],\ [\overline{F}''_{\beta,-,\alpha}(x),\overline{F}''_{\beta,+,\alpha}(x)]\right\rangle. \end{align*} \end{itemize} \end{thm} \begin{proof} The proof can be obtained by substituting ${\cal F}^{\dag}$ or ${\cal F}^{\ddag}$ for ${\cal F}$ in Theorem \ref{thm:MNpara}. \end{proof} \begin{thm} Let ${\cal F}:I\to \mathscr{QT}^2(\mathbb R)$ be triangular, that is, \begin{align*} {\cal F}(x)=\langle\!\langle L_{\overline{F}_0(x)},X_{F_1(x)},L_{\underline{F}_0(x)};C_{{\cal F}(x)};R_{\underline{F}_0(x)},Y_{F_1(x)},R_{\overline{F}_0(x)} \rangle\!\rangle. \end{align*} \begin{itemize} \item[1)] If ${\cal F}$ is (1,1)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\dag\dag}(x)=\langle\!\langle L_{\overline{F}_0^{\dag\dag}(x)},X_{F_1^{\dag\dag}(x)},L_{\underline{F}_0^{\dag\dag}(x)};C_{{\cal F}^{\dag\dag}(x)};R_{\underline{F}_0^{\dag\dag}(x)},Y_{F_1^{\dag\dag}(x)},R_{\overline{F}_0^{\dag\dag}(x)} \rangle\!\rangle. \end{align*} \item[2)] If ${\cal F}$ is (1,2)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\dag\ddag}(x)=\langle\!\langle R_{\overline{F}_0^{\dag\ddag}(x)},Y_{F_1^{\dag\ddag}(x)},R_{\underline{F}_0^{\dag\ddag}(x)};C_{{\cal F}^{\dag\ddag}(x)};L_{\underline{F}_0^{\dag\ddag}(x)},X_{F_1^{\dag\ddag}(x)},L_{\overline{F}_0^{\dag\ddag}(x)} \rangle\!\rangle. \end{align*} \item[3)] If ${\cal F}$ is (2,1)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\ddag\dag}(x)=\langle\!\langle R_{\overline{F}_0^{\ddag\dag}(x)},Y_{F_1^{\ddag\dag}(x)},R_{\underline{F}_0^{\ddag\dag}(x)};C_{{\cal F}^{\ddag\dag}(x)};L_{\underline{F}_0^{\ddag\dag}(x)},X_{F_1^{\ddag\dag}(x)},L_{\overline{F}_0^{\ddag\dag}(x)} \rangle\!\rangle. \end{align*} \item[4)] If ${\cal F}$ is (2,2)-T2-differentiable on $I$, then \begin{align*} {\cal F}^{\ddag\ddag}(x)=\langle\!\langle L_{\overline{F}_0^{\ddag\ddag}(x)},X_{F_1^{\ddag\ddag}(x)},L_{\underline{F}_0^{\ddag\ddag}(x)};C_{{\cal F}^{\ddag\ddag}(x)};R_{\underline{F}_0^{\ddag\ddag}(x)},Y_{F_1^{\ddag\ddag}(x)},R_{\overline{F}_0^{\ddag\ddag}(x)} \rangle\!\rangle. \end{align*} \end{itemize} \end{thm} \begin{proof} The proof can be obtained by substituting ${\cal F}^{\dag}$ or ${\cal F}^{\ddag}$ for ${\cal F}$ in Theorem \ref{thm:MNtpq}. \end{proof} \begin{df} Let ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ and $h>0$ be a crisp number. ${\cal F}$ is T2-continuous on $I$ if and only if there exists the limit in $d_{{\rm HY}}$: \begin{align} \label{eq:conti} \lim_{h\downarrow 0}\{{\cal F}(x+h)-{\cal F}(x)\} =\lim_{h\downarrow 0}\{{\cal F}(x)-{\cal F}(x-h)\} =0 \end{align} for any $x\in I$. We write ${\cal C}(I;\mathscr{T}^2(\mathbb R))$ for the space of T2-continuous fuzzy functions. \end{df} \begin{thm} \label{thm:difconti} If ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ is T2-differentiable on $I$, then ${\cal F}$ is T2-continuous on $I$. \end{thm} \begin{proof} ${\cal F}$ is T2-differentiable on $I$ by the assumption, so the limit \begin{align*} \lim_{h\downarrow 0}\frac{{\cal F}(x+h)-{\cal F}(x)}{h}=\lim_{h\downarrow 0}\frac{{\cal F}(x)-{\cal F}(x-h)}{h} \end{align*} exists. Also, we can denote \begin{align} {\cal F}(x+h)-{\cal F}(x)&=\frac{{\cal F}(x+h)-{\cal F}(x)}{h}h \label{eq:diffFx+h-Fx}\\ {\cal F}(x)-{\cal F}(x-h)&=\frac{{\cal F}(x)-{\cal F}(x-h)}{h}h \label{eq:diffFx-Fx-h} \end{align} for any $x\in I$. Hence, we have (\ref{eq:conti}) by letting $h\downarrow 0$ in both sides of (\ref{eq:diffFx+h-Fx}) and (\ref{eq:diffFx-Fx-h}). \end{proof} \subsection{Differentiation of Four Rules of Type-2 Fuzzy Number-valued Functions} We say in this paper that ${\cal F}:I\to \mathscr{T}^2(\mathbb R)$ is second-order T2-differentiable on $I$ in the same case of differentiability, if ${\cal F}^{\dag}$ and ${\cal F}^{\ddag}$ are (1)-T2-differentiable and (2)-T2-differentiable on $I$, respectively. \begin{thm} \label{thm:F+GsHdd} Let ${\cal F},{\cal G}:I\to \mathscr{T}^2(\mathbb R)$ be second-order T2-differentiable on $I$ in the same case of differentiability. Then, ${\cal F}+{\cal G}:I\to \mathscr{T}^2(\mathbb R)$ is second-order T2-differentiable on $I$ and \begin{align} ({\cal F}+{\cal G})^{\dag}(x)&={\cal F}^{\dag}(x)+{\cal G}^{\dag}(x), \label{eq:F+Gd}\\ ({\cal F}+{\cal G})^{\ddag}(x)&={\cal F}^{\ddag}(x)+{\cal G}^{\ddag}(x), \\ ({\cal F}+{\cal G})^{\dag\dag}(x)&={\cal F}^{\dag\dag}(x)+{\cal G}^{\dag\dag}(x), \\ ({\cal F}+{\cal G})^{\ddag\ddag}(x)&={\cal F}^{\ddag\ddag}(x)+{\cal G}^{\ddag\ddag}(x) \label{eq:F+Gdddd} \end{align} for $x\in I$. Moreover, if there exists the T2-Hukuhara difference ${\cal F}-{\cal G}$, then all of (\ref{eq:F+Gd})-(\ref{eq:F+Gdddd}) hold even if $+$ is replaced with the T2-Hukuhara difference $-$. \end{thm} \begin{proof} It is obvious for sums from Definition \ref{df:T2Hd}. We prove only \begin{align} ({\cal F}-{\cal G})^{\dag}(x)&={\cal F}^{\dag}(x)-{\cal G}^{\dag}(x), \label{eq:HF-Gd}\\ ({\cal F}-{\cal G})^{\dag\dag}(x)&={\cal F}^{\dag\dag}(x)-{\cal G}^{\dag\dag}(x) \label{eq:HF-Gdd} \end{align} for $x\in I$. The other two cases can be shown in the same way. Since ${\cal F}-{\cal G}$ exists, there is some type-2 fuzzy function ${\cal W}:I\to \mathscr{T}^2(\mathbb R)$ such that \begin{align*} {\cal F}(x)&={\cal G}(x)+{\cal W}(x), \\ {\cal F}(x\pm h)&={\cal G}(x\pm h)+{\cal W}(x\pm h) \end{align*} where $h>0$ is a crisp number. Then, we have \begin{align} \label{eq:Fx+h-Fx} \begin{aligned} {\cal F}(x+h)-{\cal F}(x)&=({\cal G}(x+h)+{\cal W}(x+h))-({\cal G}(x)+{\cal W}(x)) \\ &=({\cal G}(x+h)-{\cal G}(x))+({\cal W}(x+h)-{\cal W}(x)) \end{aligned} \end{align} by virtue of Lemma \ref{lem:Hddis} mentioned later. Similarly, we also have \begin{align} \label{eq:Fx-Fx-h} {\cal F}(x)-{\cal F}(x-h) =({\cal G}(x)-{\cal G}(x-h))+({\cal W}(x)-{\cal W}(x-h)). \end{align} ${\cal F}$ and ${\cal G}$ are T2-differentiable on $I$, so ${\cal F}(x+h)-{\cal F}(x)$, ${\cal F}(x)-{\cal F}(x-h)$, ${\cal G}(x+h)-{\cal G}(x)$ and ${\cal G}(x)-{\cal G}(x-h)$ exist for any $x\in I$. Thus, the following derivative in $d_{{\rm HY}}$ exists: \begin{align*} ({\cal F}-{\cal G})^{\dag}(x)=\lim_{h\downarrow 0}\frac{{\cal W}(x+h)-{\cal W}(x)}{h}=\lim_{h\downarrow 0}\frac{{\cal W}(x)-{\cal W}(x-h)}{h},\quad x\in I. \end{align*} This limit is ${\cal F}^{\dag}(x)-{\cal G}^{\dag}(x)$, $x\in I$, from (\ref{eq:Fx+h-Fx}) and (\ref{eq:Fx-Fx-h}). Hence we have gained (\ref{eq:HF-Gd}). (\ref{eq:HF-Gdd}) can be obtained by repeating the above discussion for $({\cal F}-{\cal G})^{\dag}$. This completes the proof. \end{proof} \begin{lem} \label{lem:Hddis} Let $u_j,v_j\in \mathscr{T}^1(\mathbb R)\ (j=1,2)$. Suppose that $(u_1+v_1)-(u_2+v_2)$, $u_1-u_2$ and $v_1-v_2$ exist. Then, the distributive law for T1-Hukuhara differences in the following sense hold: \begin{align*} (u_1+v_1)-(u_2+v_2)=(u_1-u_2)+(v_1-v_2). \end{align*} Moreover, the same result holds for type-2 fuzzy numbers. \end{lem} \begin{proof} We prove only for type-1 fuzzy numbers, since the definition of T2-Hukuhara differences for type-2 fuzzy numbers is essentially equal to that for type-1 fuzzy numbers. The T1-Hukuhara difference is the difference between the left ends and the right ends of two intervals. Let the $\alpha$-cuts of $u_j,v_j\ (j=1,2)$ be represented as \begin{align*} [u_j]_{\alpha}:=[u_{j,-}(\alpha),u_{j,+}(\alpha)],\quad [v_j]_{\alpha}:=[v_{j,-}(\alpha),v_{j,+}(\alpha)]. \end{align*} Then, we have the following and finish the proof: \begin{align*} &[(u_1+v_1)-(u_2+v_2)]_{\alpha} \\ &=[u_1+v_1]_{\alpha}-[u_2+v_2]_{\alpha} \\ &=([u_1]_{\alpha}+[v_1]_{\alpha})-([u_2]_{\alpha}+[v_2]_{\alpha}) \\ &=([u_{1,-}(\alpha),u_{1,+}(\alpha)]+[v_{1,-}(\alpha),v_{1,+}(\alpha)])-([u_{2,-}(\alpha),u_{2,+}(\alpha)]+[v_{2,-}(\alpha),v_{2,+}(\alpha)]) \\ &=[u_{1,-}(\alpha)+v_{1,-}(\alpha),\ u_{1,+}(\alpha)+v_{1,+}(\alpha)]-[u_{2,-}(\alpha)+v_{2,-}(\alpha),\ u_{2,+}(\alpha)+v_{2,+}(\alpha)] \\ &=[(u_{1,-}(\alpha)-u_{2,-}(\alpha))+(v_{1,-}(\alpha)-v_{2,-}(\alpha)),\ (u_{1,+}(\alpha)-u_{2,+}(\alpha))+(v_{1,+}(\alpha)-v_{2,+}(\alpha))] \\ &=[u_{1,-}(\alpha)-u_{2,-}(\alpha),\ u_{1,+}(\alpha)-u_{2,+}(\alpha)]+[v_{1,-}(\alpha)-v_{2,-}(\alpha),\ v_{1,+}(\alpha)-v_{2,+}(\alpha)] \\ &=[u_1-u_2]_{\alpha}+[v_1-v_2]_{\alpha} \\ &=[(u_1-u_2)+(v_1-v_2)]_{\alpha}. \end{align*} \end{proof} {\small \begin{rem} We can generally have the similar results to Theorem \ref{thm:F+GsHdd} for any order $N\in \mathbb N\cup \{0\}$. \end{rem} } \begin{thm} \label{thm:F-GT2d} Let ${\cal F},{\cal G}:I\to \mathscr{T}^2(\mathbb R)$ be second-order T2-differentiable on $I$ such that \begin{itemize} \item if ${\cal F}$ is (1,1)-T2-differentiable then ${\cal G}$ is (2,1)-T2-differentiable, or \item if ${\cal F}$ is (1,2)-T2-differentiable then ${\cal G}$ is (2,2)-T2-differentiable, or \item if ${\cal F}$ is (2,1)-T2-differentiable then ${\cal G}$ is (1,1)-T2-differentiable, or \item if ${\cal F}$ is (2,2)-T2-differentiable then ${\cal G}$ is (1,2)-T2-differentiable. \end{itemize} Suppose that there exists the T2-Hukuhara difference ${\cal F}(x)-{\cal G}(x)$ for any $x\in I$. Then, ${\cal F}-{\cal G}:I\to \mathscr{T}^2(\mathbb R)$ is second-order T2-differentiable on $I$ and \begin{align} ({\cal F}-{\cal G})^{\dag}(x)&={\cal F}^{\dag}(x)+(-1){\cal G}^{\ddag}(x), \label{eq:F-Gd}\\ ({\cal F}-{\cal G})^{\ddag}(x)&={\cal F}^{\ddag}(x)+(-1){\cal G}^{\dag}(x), \\ ({\cal F}-{\cal G})^{\dag\dag}(x)&={\cal F}^{\dag\dag}(x)+(-1){\cal G}^{\ddag\dag}(x), \label{eq:F-Gdd}\\ ({\cal F}-{\cal G})^{\dag\ddag}(x)&={\cal F}^{\dag\ddag}(x)+(-1){\cal G}^{\ddag\ddag}(x), \\ ({\cal F}-{\cal G})^{\ddag\dag}(x)&={\cal F}^{\ddag\dag}(x)+(-1){\cal G}^{\dag\dag}(x), \\ ({\cal F}-{\cal G})^{\ddag\ddag}(x)&={\cal F}^{\ddag\ddag}(x)+(-1){\cal G}^{\dag\ddag}(x) \end{align} for $x\in I$. \end{thm} \begin{proof} We prove only (\ref{eq:F-Gd}) and (\ref{eq:F-Gdd}) in the first case. The other cases and equations can be similarly proved. Let us first prove (\ref{eq:F-Gd}) in the first case. Since, on $I$, ${\cal F}$ is (1)-T2-differentiable and ${\cal G}$ is (2)-T2-differentiable, ${\cal F}(x+h)-{\cal F}(x)$, ${\cal F}(x)-{\cal F}(x-h)$, ${\cal G}(x)-{\cal G}(x+h)$ and ${\cal G}(x-h)-{\cal G}(x)$ exist for any $x\in I$. Moreover, there exists a type-2 fuzzy number-valued function ${\cal W}_1$ such that \begin{align*} {\cal F}(x+h)&={\cal F}(x)+{\cal W}_1(x,h), \\ {\cal F}(x)&={\cal F}(x-h)+{\cal W}_2(x,h), \end{align*} for $h>0$, with \begin{align*} \lim_{h\downarrow 0}\frac{{\cal W}_1(x,h)}{h} =\lim_{h\downarrow 0}\frac{{\cal W}_2(x,h)}{h} ={\cal F}^{\dag}(x) \end{align*} for $x\in I$ and \begin{align*} {\cal G}(x)&={\cal G}(x+h)+{\cal W}_3(x,h), \\ {\cal G}(x-h)&={\cal G}(x)+{\cal W}_4(x,h), \end{align*} for $h>0$, with \begin{align*} \lim_{h\downarrow 0}\frac{{\cal W}_3(x,h)}{h} =\lim_{h\downarrow 0}\frac{{\cal W}_4(x,h)}{h} =(-1){\cal G}^{\ddag}(x) \end{align*} for $x\in I$. Thus, we have \begin{align*} {\cal F}(x+h)+{\cal G}(x)={\cal F}(x)+{\cal G}(x+h)+{\cal W}_1(x,h)+{\cal W}_3(x,h), \end{align*} but we obtain \begin{align*} {\cal F}(x+h)-{\cal G}(x+h)={\cal F}(x)-{\cal G}(x)+{\cal W}_1(x,h)+{\cal W}_3(x,h) \end{align*} since ${\cal F}(x)-{\cal G}(x)$ exists for any $x\in I$ by the assumption. This implies that $\{{\cal F}(x+h)-{\cal G}(x+h)\}-\{{\cal F}(x)-{\cal G}(x)\}$ exists and \begin{align} \label{eq:fracFh-Gh} \frac{\{{\cal F}(x+h)-{\cal G}(x+h)\}-\{{\cal F}(x)-{\cal G}(x)\}}{h}=\frac{{\cal W}_1(x,h)}{h}+\frac{{\cal W}_3(x,h)}{h} \end{align} for any $x\in I$. Similarly, we can find the existence of $\{{\cal F}(x)-{\cal G}(x)\}-\{{\cal F}(x-h)-{\cal G}(x-h)\}$ and obtain \begin{align} \label{eq:fracF-G} \frac{\{{\cal F}(x)-{\cal G}(x)\}-\{{\cal F}(x-h)-{\cal G}(x-h)\}}{h}=\frac{{\cal W}_2(x,h)}{h}+\frac{{\cal W}_4(x,h)}{h} \end{align} for any $x\in I$. Both the left-hand sides of (\ref{eq:fracFh-Gh}) and (\ref{eq:fracF-G}) have the common limit $({\cal F}-{\cal G})^{\dag}$ as $h\downarrow 0$. Similarly, both the right-hand sides of (\ref{eq:fracFh-Gh}) and (\ref{eq:fracF-G}) have the common limit ${\cal F}^{\dag}+(-1){\cal G}^{\ddag}$ as $h\downarrow 0$. Hence, we have obtained (\ref{eq:F-Gd}). Let us next prove (\ref{eq:F-Gdd}) in the first case. We can however derive it from (\ref{eq:F-Gd}) and Theorem \ref{thm:F+GsHdd} immediately. This completes the proof. \end{proof} \begin{thm} \label{thm:cftimesff} Let $f:I\to \mathbb R$ be second-order differentiable and ${\cal G}:I\to \mathscr{T}^2(\mathbb R)$ be second-order T2-differentiable on $I$. Then, \begin{itemize} \item[1)] if $f(x)f'(x)>0$ and ${\cal G}$ is (1)-T2-differentiable, then $f{\cal G}$ is (1)-T2-differentiable and \begin{align} (f{\cal G})^{\dag}(x) =f'(x){\cal G}(x)+f(x){\cal G}^{\dag}(x) \end{align} for $x\in I$; \item[2)] if $f(x)f'(x)<0$ and ${\cal G}$ is (2)-T2-differentiable, then $f{\cal G}$ is (2)-T2-differentiable and \begin{align} \label{eq:f.Gdd} (f{\cal G})^{\ddag}(x) =f'(x){\cal G}(x)+f(x){\cal G}^{\ddag}(x) \end{align} for $x\in I$; \item[3)] if $f(x)f'(x)>0$, $f'(x)f''(x)>0$ and ${\cal G}$ is (1,1)-T2-differentiable, then $f{\cal G}$ is (1,1)-T2-differentiable and \begin{align} (f{\cal G})^{\dag\dag}(x) =f''(x){\cal G}(x)+2f'(x){\cal G}^{\dag}(x)+f(x){\cal G}^{\dag\dag}(x) \end{align} for $x\in I$; \item[4)] if $f(x)f'(x)<0$, $f'(x)f''(x)<0$ and ${\cal G}$ is (2,2)-T2-differentiable, then $f{\cal G}$ is (2,2)-T2-differentiable and \begin{align} \label{eq:f.Gdddd} (f{\cal G})^{\ddag\ddag}(x) =f''(x){\cal G}(x)+2f'(x){\cal G}^{\ddag}(x)+f(x){\cal G}^{\ddag\ddag}(x) \end{align} for $x\in I$. \end{itemize} \end{thm} \begin{proof} We prove only 2) and 4) because the other two cases are proved in the same way. \begin{itemize} \item[2)] We suppose that $f(x)>0$ and $f'(x)<0$. Let $h>0$ be a crisp number. Since $f$ is differentiable, there exist $\varepsilon_j(x,h)\ (j=1,2)$ such that \begin{align} f(x)&=f(x+h)+\varepsilon_1(x,h), \label{eq:1approx}\\ f(x-h)&=f(x)+\varepsilon_2(x,h). \label{eq:2approx} \end{align} (Recall the first-order approximation of the differentiable function.) Remark that $\varepsilon_1(x,h)>0$ since $\varepsilon_1(x,h)=f(x)-f(x+h)$ and $f$ is monotone decreasing. The same applies to $\varepsilon_2(x,h)$. Since ${\cal G}$ is (2)-T2-differentiable on $I$, ${\cal G}(x)-{\cal G}(x+h)$ and ${\cal G}(x-h)-{\cal G}(x)$ exist for any $x\in I$, that is, there exist type-2 fuzzy functions ${\cal W}_j\ (j=1,2)$ such that \begin{align*} {\cal G}(x)&={\cal G}(x+h)+{\cal W}_1(x,h), \\ {\cal G}(x-h)&={\cal G}(x)+{\cal W}_2(x,h). \end{align*} We thus have \begin{align} {\cal G}(x)&={\cal G}(x+h)+{\cal W}_1(x,h), \label{eq:GGx+h1}\\ {\cal G}(x-h)&={\cal G}(x)+{\cal W}_2(x,h). \label{eq:GGx+h2} \end{align} One has \begin{align*} f(x){\cal G}(x)&=\{f(x+h)+\varepsilon_1(x,h)\}\{{\cal G}(x+h)+{\cal W}_1(x,h)\} \\ &=f(x+h){\cal G}(x+h)+f(x+h){\cal W}_1(x,h)+\varepsilon_1(x,h){\cal G}(x+h)+\varepsilon_1(x,h){\cal W}_1(x,h) \end{align*} by virtue of (\ref{eq:1approx}) and (\ref{eq:GGx+h1}), so $f(x){\cal G}(x)-f(x+h){\cal G}(x+h)$ exists and \begin{align} \label{eq:fGx-fGx+h} \begin{aligned} &\frac{f(x){\cal G}(x)-f(x+h){\cal G}(x+h)}{-h} \\ &\qquad =f(x+h)\frac{{\cal W}_1(x,h)}{-h}+\frac{\varepsilon_1(x,h)}{-h}{\cal G}(x+h)+\frac{\varepsilon_1(x,h)}{-h}{\cal W}_1(x,h) \end{aligned} \end{align} for any $x\in I$. Similarly, from (\ref{eq:2approx}) and (\ref{eq:GGx+h2}), $f(x-h){\cal G}(x-h)-f(x){\cal G}(x)$ exists and we also have \begin{align} \label{eq:fGx-h-fGx} \begin{aligned} &\frac{f(x-h){\cal G}(x-h)-f(x){\cal G}(x)}{-h} \\ &\qquad =f(x-h)\frac{{\cal W}_2(x,h)}{-h}+\frac{\varepsilon_2(x,h)}{-h}{\cal G}(x-h)+\frac{\varepsilon_2(x,h)}{-h}{\cal W}_2(x,h) \end{aligned} \end{align} for any $x\in I$. Theorem \ref{thm:difconti} implies ${\cal G}\in {\cal C}(I;\mathscr{T}^2(\mathbb R))$, so ${\cal W}_j(x,h)\to 0\ (j=1,2)$ as $h\downarrow 0$. Therefore (\ref{eq:f.Gdd}) is obtained by letting $h\downarrow 0$ in both sides of (\ref{eq:fGx-fGx+h}) and (\ref{eq:fGx-h-fGx}). We can also gain the same result in the case that $f(x)<0$ and $f'(x)>0$. Hence, (\ref{eq:f.Gdd}) has been derived. \item[4)] (\ref{eq:f.Gdddd}) is immediately proved by repeating (\ref{eq:f.Gdd}). \end{itemize} This completes the proof. \end{proof} \subsection{Differentiation of Composite Type-2 Fuzzy Number-valued Functions} We finally deal with the composition of crisp and type-1 / type-2 fuzzy functions and its derivative. \begin{thm} \label{thm:compo} Let $f:I\to \mathbb R$ be differentiable and $G:\mathbb R\to \mathscr{T}^1(\mathbb R)$ T1-differentiable. We consider the type-1 fuzzy composite function $G\circ f:I\to \mathscr{T}^1(\mathbb R)$. Suppose that T1-Hukuhara differences \begin{align*} (G\circ f)(x+h)-(G\circ f)(x)\quad {\rm and}\quad (G\circ f)(x)-(G\circ f)(x-h) \end{align*} exist for any $x\in I$ and $h>0$ sufficiently small. Then, $G\circ f$ is T1-differentiable on $I$ and \begin{align} (G\circ f)^{\dag}&=G^{\dag}(f(x))f'(x), \label{eq:compo1}\\ (G\circ f)^{\ddag}&=G^{\ddag}(f(x))f'(x). \label{eq:compo2} \end{align} Moreover, the same result holds for ${\cal G}:\mathbb R\to \mathscr{T}^2(\mathbb R)$. \end{thm} \begin{proof} We prove only for type-1 $G$ because we can prove for type-2 ${\cal G}$ in the same way. Let $h>0$ be a crisp number. The $\alpha$-cut set of the right difference quotient of $(G\circ f)$ is \begin{align*} &\left[\frac{(G\circ f)(x+h)-(G\circ f)(x)}{h}\right]_{\alpha} \\ &=\left[\frac{(G\circ f)_{-,\alpha}(x+h)-(G\circ f)_{-,\alpha}(x)}{h},\ \frac{(G\circ f)_{+,\alpha}(x+h)-(G\circ f)_{+,\alpha}(x)}{h}\right] \\ &=\left[\frac{G_{-,\alpha}(f(x+h))-G_{-,\alpha}(f(x))}{h},\ \frac{G_{+,\alpha}(f(x+h))-G_{+,\alpha}(f(x))}{h}\right] \\ &=\left[\frac{G_{-,\alpha}(f(x+h))-G_{-,\alpha}(f(x))}{f(x+h)-f(x)}\frac{f(x+h)-f(x)}{h},\ \frac{G_{+,\alpha}(f(x+h))-G_{+,\alpha}(f(x))}{f(x+h)-f(x)}\frac{f(x+h)-f(x)}{h}\right]. \end{align*} Similarly, the $\alpha$-cut set of the left difference quotient of $(G\circ f)$ is \begin{align*} &\left[\frac{(G\circ f)(x)-(G\circ f)(x-h)}{h}\right]_{\alpha} \\ &=\left[\frac{G_{-,\alpha}(f(x))-G_{-,\alpha}(f(x-h))}{f(x)-f(x-h)}\frac{f(x)-f(x-h)}{h},\ \frac{G_{+,\alpha}(f(x))-G_{+,\alpha}(f(x-h))}{f(x)-f(x-h)}\frac{f(x)-f(x-h)}{h}\right]. \end{align*} The rightmost-hand sides of the above two equations converge as $h\downarrow 0$, since $f(x\pm h)\to f(x)$ as $h\downarrow 0$ by virtue of the continuity of $f$. Thus (\ref{eq:compo1}) can be obtained. (\ref{eq:compo2}) is similar. Hence, this completes the proof. \end{proof} \section{Type-2 Fuzzy Initial Value Problems for Second-order T2FDEs \label{sec:ExT2FDE}} We actually solve, in this section, some concrete type-2 fuzzy initial value problems for second-order T2FDEs. We write all first-order Hukuhara derivatives $^{\dag}$ and $^{\ddag}$ together as ${\rm D}$. That is, ${\rm D}^2$ denotes all second-order Hukuhara derivatives $^{\dag\dag}$, $^{\dag\ddag}$, $^{\ddag\dag}$ and $^{\ddag\ddag}$ together. Italic numbers \begin{align*} {\it 0},{\it 1},{\it 2},{\it 3},{\it 4},{\it 5},{\it 6},{\it 7},{\it 8},{\it 9} \end{align*} stand for concrete type-2 fuzzy numbers. We abbreviate the type-2 fuzzy initial value problem (resp. condition) as T2FIVP (resp. T2FIVC) in this subsection. We consider T2FIVPs on $I=[0,r]$ for some $r>0$ or $I=[0,+\infty)$: \begin{align} \label{eq:gT2FDE1} \begin{cases} {\rm D}^2{\cal Y}(x)+a{\rm D}{\cal Y}(x)+b{\cal Y}(x)=0, \\ {\cal Y}(0)={\cal U}\in \mathscr{T}^2(\mathbb R), \\ {\rm D}{\cal Y}(0)={\cal V}\in \mathscr{T}^2(\mathbb R). \end{cases} \end{align} \begin{df} Let ${\cal Y}:I\to \mathscr{T}^2(\mathbb R)$ be second-order T2-differentiable. We denote the $\beta$-cut set of ${\cal Y}={\cal Y}(x)$ by \begin{align*} [{\cal Y}(x)]_{\beta}=\left\langle \underline{Y}_{\beta}(x),\ \overline{Y}_{\beta}(x)\right\rangle. \end{align*} Then, ${\cal Y}$ is the $(i,j)$-type-2 fuzzy solution of (\ref{eq:gT2FDE1}), $(i,j)\in \{1,2\}^2$, if and only if, for each $\beta\in [0,1]$, \begin{itemize} \item[i)] ${\rm D}_{i}\underline{Y}_{\beta}$, ${\rm D}_{i}\overline{Y}_{\beta}$, ${\rm D}^2_{i,j}\underline{Y}_{\beta}$, ${\rm D}^2_{i,j}\overline{Y}_{\beta}$ exist on $I$, and \item[ii)] $\underline{Y}_{\beta}$ and $\overline{Y}_{\beta}:I\to \mathscr{T}^1(\mathbb R)$ satisfy \begin{align*} \begin{cases} [{\rm D}^2_{i,j}\underline{Y}_{\beta}(x)]_{\alpha}+[a{\rm D}_{i}\underline{Y}_{\beta}(x)]_{\alpha}+[b\underline{Y}_{\beta}(x)]_{\alpha}=0,\quad x\in I, \\ \underline{Y}_{\beta}(0)=\underline{u}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ {\rm D}_{i}\underline{Y}_{\beta}(0)=\underline{v}_{\beta}\in \mathscr{T}^1(\mathbb R) \end{cases} \end{align*} and \begin{align*} \begin{cases} [{\rm D}^2_{i,j}\overline{Y}_{\beta}(x)]_{\alpha}+[a{\rm D}_{i}\overline{Y}_{\beta}(x)]_{\alpha}+[b\overline{Y}_{\beta}(x)]_{\alpha}=0,\quad x\in I, \\ \overline{Y}_{\beta}(0)=\overline{u}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ {\rm D}_{i}\overline{Y}_{\beta}(0)=\overline{v}_{\beta}\in \mathscr{T}^1(\mathbb R) \end{cases} \end{align*} for any $\alpha\in [0,1]$, respectively. \end{itemize} \end{df} {\small \begin{rem} The type-2 fuzzy solution generally becomes the type-1 fuzzy solution if $\beta=1$; it becomes the crisp solution if $\alpha=\beta=1$. \end{rem} } We solve problems in the case of crisp coefficients. We have four candidate solutions for type-1 fuzzy differential equations of order 2, but we have eight of them for type-2 ones of order 2. Recall Definition \ref{df:+kop1} and \ref{df:+kop} for operations. `({\tt A.C.})' represents `ANSWER COMPLETED'. \subsection{Case of Positive Coefficients} We begin with easy problems, that is, T2FIVPs of order 2 in the case of crisp coefficients. \begin{prob} \label{prob:P1} Let ${\cal Y}:[0,1]\to \mathscr{T}^2(\mathbb R)$ be a type-2 fuzzy function. Then, solve T2FIVPs: \begin{numcases} {} {\rm D}^2{\cal Y}(x)+3{\rm D}{\cal Y}(x)=0, \label{eq:T2FDE1}\\ {\cal Y}(0)={\it 5}\in \mathscr{QT}^2(\mathbb R), \\ {\rm D}{\cal Y}(0)={\it 1}\in \mathscr{QT}^2(\mathbb R). \end{numcases} \end{prob} \begin{sol} We consider the $(\alpha,\beta)$-cut set of (\ref{eq:T2FDE1}): \begin{align*} \left\langle[\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+3[\underline{Y}^{\dag}_{\beta}(x)]_{\alpha},\ [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+3[\overline{Y}^{\dag}_{\beta}(x)]_{\alpha}\right\rangle=0, \quad \alpha\in [0,1]. \end{align*} This can be solved by (1,1) or (2,2)-T1-differentiation, so we have the following two T1FIVPs: \begin{numcases} {} [\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+3[\underline{Y}^{\dag}_{\beta}(x)]_{\alpha}=[\underline{Y}''_{\beta,-,\alpha}(x)+3\underline{Y}'_{\beta,-,\alpha}(x),\ \underline{Y}''_{\beta,+,\alpha}(x)+3\underline{Y}'_{\beta,+,\alpha}(x)]=0, \label{eq:udd3d}\\ \underline{Y}_{\beta}(0)=\underline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \underline{Y}_{\beta}^{\dag}(0)=\underline{1}_{\beta}\in \mathscr{T}^1(\mathbb R), \end{numcases} and \begin{numcases} {} [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+3[\overline{Y}^{\dag}_{\beta}(x)]_{\alpha}=[\overline{Y}''_{\beta,-,\alpha}(x)+3\overline{Y}'_{\beta,-,\alpha}(x),\ \overline{Y}''_{\beta,+,\alpha}(x)+3\overline{Y}'_{\beta,+,\alpha}(x)]=0, \label{eq:odd3d}\\ \overline{Y}_{\beta}(0)=\overline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \overline{Y}_{\beta}^{\dag}(0)=\overline{1}_{\beta}\in \mathscr{T}^1(\mathbb R). \end{numcases} We can thus solve the given T2FIVP because the solution method of type-1 fuzzy differential equations is well known. (\ref{eq:udd3d}) can be solved in the form of \begin{align} \label{eq:uYbpma} \underline{Y}_{\beta,\pm,\alpha}(x)=\underline{C}_{1,\pm}e^{-3x}+\underline{C}_{2,\pm}, \end{align} thus, \begin{align} \label{eq:uYdbpma} \underline{Y}_{\beta,\pm,\alpha}'(x)=-3\underline{C}_{1,\pm}e^{-3x}, \end{align} where these equations represent two equations, one for the upper sign and the other for the lower sign. Similarly, (\ref{eq:odd3d}) can be solved in the form of \begin{align} \label{eq:oYbpma} \overline{Y}_{\beta,\pm,\alpha}(x)=\overline{C}_{1,\pm}e^{-3x}+\overline{C}_{2,\pm}, \end{align} thus, \begin{align} \label{eq:oYdbpma} \overline{Y}_{\beta,\pm,\alpha}'(x)=-3\overline{C}_{1,\pm}e^{-3x}, \end{align} where these equations represent two equations, one for the upper sign and the other for the lower sign. Let us determine type-2 fuzzy initial value ${\it 5}$ by setting them as the triangular quasi-type-2 fuzzy number \begin{align*} {\it 5}=\langle\!\langle 3.5,\ 4,\ 4.5\ ;\ 5\ ;\ 5.5,\ 6,\ 6.5\rangle\!\rangle. \end{align*} Since (\ref{eq:LoA0a}), (\ref{eq:LuA0a}), (\ref{eq:RuA0a}), (\ref{eq:RoA0a}), (\ref{eq:XA1a}) and (\ref{eq:YA1a}) imply that \begin{align*} L_{\overline{5}_{0}}^{\alpha}&=5-(1-\alpha)(5-3.5)=\frac{3}{2}\alpha+\frac{7}{2}, \\ L_{\underline{5}_{0}}^{\alpha}&=5-(1-\alpha)(5-4.5)=\frac{1}{2}\alpha+\frac{9}{2}, \\ R_{\underline{5}_{0}}^{\alpha}&=5+(1-\alpha)(5.5-5)=-\frac{1}{2}\alpha+\frac{11}{2}, \\ R_{\overline{5}_{0}}^{\alpha}&=5+(1-\alpha)(6.5-5)=-\frac{3}{2}\alpha+\frac{13}{2}, \\ X_{5_1}^{\alpha}&=5-(1-\alpha)(5-4)=\alpha+4, \\ Y_{5_1}^{\alpha}&=5+(1-\alpha)(6-5)=-\alpha+6, \end{align*} we have \begin{align*} L_{\underline{5}_{\beta}}^{\alpha}&=(\alpha+4)-(1-\beta)\left\{(\alpha+4)-\left(\frac{1}{2}\alpha+\frac{9}{2}\right)\right\}=\frac{1}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{9}{2}, \\ R_{\underline{5}_{\beta}}^{\alpha}&=(-\alpha+6)+(1-\beta)\left\{\left(-\frac{1}{2}\alpha+\frac{11}{2}\right)-(-\alpha+6)\right\}=-\frac{1}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{11}{2} \end{align*} and \begin{align*} L_{\overline{5}_{\beta}}^{\alpha}&=(\alpha+4)-(1-\beta)\left\{(\alpha+4)-\left(\frac{3}{2}\alpha+\frac{7}{2}\right)\right\}=\frac{3}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{7}{2}, \\ R_{\overline{5}_{\beta}}^{\alpha}&=(-\alpha+6)+(1-\beta)\left\{\left(-\frac{3}{2}\alpha+\frac{13}{2}\right)-(-\alpha+6)\right\}=-\frac{3}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{13}{2} \end{align*} from (\ref{eq:LuAba})-(\ref{eq:RuAba}) and (\ref{eq:LoAba})-(\ref{eq:RoAba}). Therefore, it follows that \begin{align} [\underline{Y}_{\beta}(0)]_{\alpha}=[\underline{5}_{\beta}]_{\alpha}&=\left[\frac{1}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{9}{2},\ -\frac{1}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{11}{2}\right], \label{eq:u5ba}\\ [\overline{Y}_{\beta}(0)]_{\alpha}=[\overline{5}_{\beta}]_{\alpha}&=\left[\frac{3}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{7}{2},\ -\frac{3}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{13}{2}\right] \label{eq:o5ba} \end{align} from (\ref{eq:uAba}) and (\ref{eq:oAba}). Next, let us determine type-2 fuzzy initial value ${\it 1}$ by setting them as the triangular quasi-type-2 fuzzy number \begin{align*} {\it 1}=\langle\!\langle -0.5,\ 0,\ 0.5\ ;\ 1\ ;\ 1.5,\ 2,\ 2.5\rangle\!\rangle. \end{align*} Therefore it follows that, in the same way as above, \begin{align} [\underline{Y}_{\beta}^{\dag}(0)]_{\alpha}=[\underline{1}_{\beta}]_{\alpha}&=\left[\frac{1}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{1}{2},\ -\frac{1}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{3}{2}\right], \label{eq:u1ba}\\ [\overline{Y}_{\beta}^{\dag}(0)]_{\alpha}=[\overline{1}_{\beta}]_{\alpha}&=\left[\frac{3}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta-\frac{1}{2},\ -\frac{3}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{5}{2}\right]. \label{eq:o1ba} \end{align} Substituting (\ref{eq:u5ba}) and (\ref{eq:u1ba}) for (\ref{eq:uYbpma}), we have \begin{align} \underline{Y}_{\beta,-,\alpha}(x)&=\left(-\frac{1}{6}\alpha-\frac{1}{6}\alpha\beta+\frac{1}{6}\beta-\frac{1}{6}\right)e^{-3x}+\left(\frac{2}{3}\alpha+\frac{2}{3}\alpha\beta-\frac{2}{3}\beta+\frac{14}{3}\right), \label{eq:uYb-a}\\ \underline{Y}_{\beta,+,\alpha}(x)&=\left(\frac{1}{6}\alpha+\frac{1}{6}\alpha\beta-\frac{1}{6}\beta-\frac{1}{2}\right)e^{-3x}+\left(-\frac{2}{3}\alpha-\frac{2}{3}\alpha\beta+\frac{2}{3}\beta+6\right). \end{align} Similarly, substituting (\ref{eq:o5ba}) and (\ref{eq:o1ba}) for (\ref{eq:oYbpma}), we have \begin{align} \overline{Y}_{\beta,-,\alpha}(x)&=\left(-\frac{1}{2}\alpha+\frac{1}{6}\alpha\beta-\frac{1}{6}\beta+\frac{1}{6}\right)e^{-3x}+\left(2\alpha-\frac{2}{3}\alpha\beta+\frac{2}{3}\beta+\frac{10}{3}\right), \\ \overline{Y}_{\beta,+,\alpha}(x)&=\left(\frac{1}{2}\alpha-\frac{1}{6}\alpha\beta+\frac{1}{6}\beta-\frac{5}{6}\right)e^{-3x}+\left(-2\alpha+\frac{2}{3}\alpha\beta-\frac{2}{3}\beta+\frac{22}{3}\right). \label{eq:oYb+a} \end{align} Hence, the desired solution is composed of the $(\alpha,\beta)$-cut set \begin{align*} [{\cal Y}(x)]_{\beta}^{\alpha}=\left\langle\left[\underline{Y}_{\beta,-,\alpha}(x),\underline{Y}_{\beta,+,\alpha}(x)\right],\ \left[\overline{Y}_{\beta,-,\alpha}(x),\overline{Y}_{\beta,+,\alpha}(x)\right]\right\rangle \end{align*} with (\ref{eq:uYb-a})-(\ref{eq:oYb+a}). The same result is obtained by solving the given problem in the (1,2)-case. ({\tt A.C.}) \end{sol} \begin{comment} \begin{figure} \caption{The crisp and fuzzy solution of Problem \ref{prob:P1} \label{fig:P1a1/3b1/2} \end{figure} \end{comment} \begin{figure} \caption{The crisp and fuzzy solution of Problem \ref{prob:P1} \label{fig:P1a1/3b1/2} \end{figure} {\small \begin{rem} Letting $\alpha=\beta=1$ in (\ref{eq:uYb-a})-(\ref{eq:oYb+a}), we have the crisp solution \[ \textcolor{magenta}{y(x)=-\frac{1}{3}e^{-3x}+\frac{16}{3}} \] to the crisp IVP: \[ y''(x)+3y'(x)=0,\quad y(0)=5,\quad y'(0)=1. \] We can thus find that type-2 (or type-1) fuzzy differential equation theory is an extension to crisp one. Also, the solution if $(\alpha,\beta)=(1/3,1/2)$ with the parametric forms \begin{align*} \textcolor{teal}{[\underline{Y}_{1/2}(x)]_{1/3}}&\textcolor{teal}{=\left[-\frac{1}{6}e^{-3x}+\frac{14}{3},\ -\frac{1}{2}e^{-3x}+6\right]}, \\ \textcolor{orange}{[\overline{Y}_{1/2}(x)]_{1/3}}&\textcolor{orange}{=\left[-\frac{1}{18}e^{-3x}+\frac{38}{9},\ -\frac{11}{18}e^{-3x}+\frac{58}{9}\right]} \end{align*} is as shown in Figure \ref{fig:P1a1/3b1/2}. \end{rem} } \subsection{Case of Negative Coefficients in the Sense of Hukuhara Differences} \begin{prob} \label{prob:P2} Let ${\cal Y}:[0,1]\to \mathscr{T}^2(\mathbb R)$ be a type-2 fuzzy function. Then, solve T2FIVPs: \begin{numcases} {} {\rm D}^2{\cal Y}(x)-{\cal Y}(x)=0, \label{eq:P2d2-}\\ {\cal Y}(0)={\it 5}\in \mathscr{QT}^2(\mathbb R), \\ {\rm D}{\cal Y}(0)={\it 1}\in \mathscr{QT}^2(\mathbb R). \end{numcases} \end{prob} \begin{sol} We consider the $(\alpha,\beta)$-cut set of (\ref{eq:P2d2-}): \begin{align*} \left\langle[\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}-[\underline{Y}_{\beta}(x)]_{\alpha},\ [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}-[\overline{Y}_{\beta}(x)]_{\alpha}\right\rangle=0, \quad \alpha\in [0,1]. \end{align*} This can be solved by (1,1) or (2,2)-T1-differentiation. So, considering (1,1)-case, we have the following two T1FIVPs: \begin{numcases} {} [\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}-[\underline{Y}_{\beta}(x)]_{\alpha}=[\underline{Y}''_{\beta,-,\alpha}(x)-\underline{Y}_{\beta,-,\alpha}(x),\ \underline{Y}''_{\beta,+,\alpha}(x)-\underline{Y}_{\beta,+,\alpha}(x)]=0, \label{eq:P2-1u}\\ \underline{Y}_{\beta}(0)=\underline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \underline{Y}_{\beta}^{\dag}(0)=\underline{1}_{\beta}\in \mathscr{T}^1(\mathbb R), \label{eq:P2uYbd0} \end{numcases} and \begin{numcases} {} [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}-[\overline{Y}_{\beta}(x)]_{\alpha}=[\overline{Y}''_{\beta,-,\alpha}(x)-\overline{Y}_{\beta,-,\alpha}(x),\ \overline{Y}''_{\beta,+,\alpha}(x)-\overline{Y}_{\beta,+,\alpha}(x)]=0, \label{eq:dd2H-1a}\\ \overline{Y}_{\beta}(0)=\overline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \overline{Y}_{\beta}^{\dag}(0)=\overline{1}_{\beta}\in \mathscr{T}^1(\mathbb R). \label{eq:P2oYbd0} \end{numcases} That is, rewriting problems (\ref{eq:P2-1u})-(\ref{eq:P2uYbd0}) and (\ref{eq:dd2H-1a})-(\ref{eq:P2oYbd0}), we have \begin{align} \underline{Y}_{\beta,-,\alpha}(x)&=\underline{C}_{-}e^{-x}+\underline{D}_{-}e^{x}, \label{eq:P2first}\\ \underline{Y}_{\beta,-,\alpha}(0)&=\frac{1}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{9}{2}, \\ \underline{Y}_{\beta,-,\alpha}^{\dag}(0)&=\frac{1}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{1}{2}; \end{align} \begin{align} \underline{Y}_{\beta,+,\alpha}(x)&=\underline{C}_{+}e^{-x}+\underline{D}_{+}e^{x}, \\ \underline{Y}_{\beta,+,\alpha}(0)&=-\frac{1}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{11}{2}, \\ \underline{Y}_{\beta,+,\alpha}^{\dag}(0)&=-\frac{1}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{3}{2}; \end{align} \begin{align} \overline{Y}_{\beta,-,\alpha}(x)&=\overline{C}_{-}e^{-x}+\overline{D}_{-}e^{x}, \\ \overline{Y}_{\beta,-,\alpha}(0)&=\frac{3}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta+\frac{7}{2}, \\ \overline{Y}_{\beta,-,\alpha}^{\dag}(0)&=\frac{3}{2}\alpha-\frac{1}{2}\alpha\beta+\frac{1}{2}\beta-\frac{1}{2}; \end{align} \begin{align} \overline{Y}_{\beta,+,\alpha}(x)&=\overline{C}_{+}e^{-x}+\overline{D}_{+}e^{x}, \\ \overline{Y}_{\beta,+,\alpha}(0)&=-\frac{3}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{13}{2}, \\ \overline{Y}_{\beta,+,\alpha}^{\dag}(0)&=-\frac{3}{2}\alpha+\frac{1}{2}\alpha\beta-\frac{1}{2}\beta+\frac{5}{2}. \label{eq:P2last} \end{align} We can obtain $\underline{Y}_{\beta,\pm,\alpha}(x)$ and $\overline{Y}_{\beta,\pm,\alpha}(x)$ by solving these: \begin{align} [\underline{Y}_{\beta}(x)]_{\alpha}&=[\underline{Y}_{\beta,-,\alpha}(x),\ \underline{Y}_{\beta,+,\alpha}(x)] \nonumber\\ &=\left[2e^{-x}+\frac{1}{2}(\alpha+\alpha\beta-\beta+5)e^x,\ 2e^{-x}+\frac{1}{2}(-\alpha-\alpha\beta+\beta+7)e^x\right], \label{eq:P2[uYb]a}\\ [\overline{Y}_{\beta}(x)]_{\alpha}&=[\overline{Y}_{\beta,-,\alpha}(x),\ \overline{Y}_{\beta,+,\alpha}(x)] \nonumber\\ &=\left[2e^{-x}+\frac{1}{2}(3\alpha-\alpha\beta+\beta+3)e^x,\ 2e^{-x}+\frac{1}{2}(-3\alpha+\alpha\beta-\beta+9)e^x\right]. \label{eq:P2[oYb]a} \end{align} Hence, the desired solution is composed of the $(\alpha,\beta)$-cut \begin{align*} [{\cal Y}(x)]_{\beta}^{\alpha}=\left\langle\left[\underline{Y}_{\beta,-,\alpha}(x),\underline{Y}_{\beta,+,\alpha}(x)\right],\ \left[\overline{Y}_{\beta,-,\alpha}(x),\overline{Y}_{\beta,+,\alpha}(x)\right]\right\rangle. \end{align*} with (\ref{eq:P2[uYb]a}) and (\ref{eq:P2[oYb]a}). The same result is obtained by solving the given problem in the (2,2)-case. ({\tt A.C.}) \end{sol} \begin{comment} \begin{figure} \caption{The crisp and fuzzy solution of Problem \ref{prob:P2} \label{fig:P2a1/3b1/2} \end{figure} \end{comment} \begin{figure} \caption{The crisp and fuzzy solution of Problem \ref{prob:P2} \label{fig:P2a1/3b1/2} \end{figure} {\small \begin{rem} Letting $\alpha=\beta=1$ in (\ref{eq:P2[uYb]a}) and (\ref{eq:P2[oYb]a}), we have the crisp solution \[ \textcolor{magenta}{y(x)=2e^{-x}+3e^x} \] to the crisp IVP: \[ y''(x)-y'(x)=0,\quad y(0)=5,\quad y'(0)=1. \] We can thus find that type-2 (or type-1) fuzzy differential equation theory is an extension to crisp one. Also, the solution if $(\alpha,\beta)=(1/3,1/2)$ with the parametric forms \begin{align*} \textcolor{teal}{[\underline{Y}_{1/2}(x)]_{1/3}}&\textcolor{teal}{=\left[2e^{-x}+\frac{5}{2}e^x,\ 2e^{-x}+\frac{7}{2}e^x\right]}, \\ \textcolor{orange}{[\overline{Y}_{1/2}(x)]_{1/3}}&\textcolor{orange}{=\left[2e^{-x}+\frac{13}{6}e^x,\ 2e^{-x}+\frac{23}{6}e^x\right]} \end{align*} is as shown in Figure \ref{fig:P2a1/3b1/2}. \end{rem} } \subsection{Case of Negative Coefficients in the Usual Sense} \begin{prob} \label{prob:P3} Let ${\cal Y}:[0,1]\to \mathscr{T}^2(\mathbb R)$ be a type-2 fuzzy function. Then, solve T2FIVPs: \begin{numcases} {} {\rm D}^2{\cal Y}(x)+(-1){\cal Y}(x)=0, \label{eq:d2B3}\\ {\cal Y}(0)={\it 5}\in \mathscr{QT}^2(\mathbb R), \\ {\rm D}{\cal Y}(0)={\it 1}\in \mathscr{QT}^2(\mathbb R). \end{numcases} \end{prob} \begin{sol} We consider the $(\alpha,\beta)$-cut set of (\ref{eq:d2B3}): \begin{align*} \left\langle[\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+(-1)[\underline{Y}_{\beta}(x)]_{\alpha},\ [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+(-1)[\overline{Y}_{\beta}(x)]_{\alpha}\right\rangle=0, \quad \alpha\in [0,1]. \end{align*} This can be solved by (1,2) or (2,1)-T1-differentiation. So, considering (1,2)-case, we have the following two T1FIVPs: \begin{numcases} {} [\underline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+(-1)[\underline{Y}_{\beta}(x)]_{\alpha}=[\underline{Y}''_{\beta,+,\alpha}(x)-\underline{Y}_{\beta,+,\alpha}(x),\ \underline{Y}''_{\beta,-,\alpha}(x)-\underline{Y}_{\beta,-,\alpha}(x)]=0, \label{eq:P3-1u}\\ \underline{Y}_{\beta}(0)=\underline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \underline{Y}_{\beta}^{\dag}(0)=\underline{1}_{\beta}\in \mathscr{T}^1(\mathbb R), \label{eq:P3uYbd0} \end{numcases} and \begin{numcases} {} [\overline{Y}^{\dag\dag}_{\beta}(x)]_{\alpha}+(-1)[\overline{Y}_{\beta}(x)]_{\alpha}=[\overline{Y}''_{\beta,+,\alpha}(x)-\overline{Y}_{\beta,+,\alpha}(x),\ \overline{Y}''_{\beta,-,\alpha}(x)-\overline{Y}_{\beta,-,\alpha}(x)]=0, \label{eq:dd2-1a}\\ \overline{Y}_{\beta}(0)=\overline{5}_{\beta}\in \mathscr{T}^1(\mathbb R), \\ \overline{Y}_{\beta}^{\dag}(0)=\overline{1}_{\beta}\in \mathscr{T}^1(\mathbb R). \label{eq:P3oYbd0} \end{numcases} We thus gain the same lower and upper equations (\ref{eq:P2first})-(\ref{eq:P2last}) as Problem \ref{prob:P2}. By solving problems (\ref{eq:P3-1u})-(\ref{eq:P3uYbd0}) and (\ref{eq:dd2-1a})-(\ref{eq:P3oYbd0}), we can obtain $\underline{Y}_{\beta,\pm,\alpha}(x)$ and $\overline{Y}_{\beta,\pm,\alpha}(x)$ respectively. Hence, the desired solution is the same as \ref{prob:P2} and is composed of the $(\alpha,\beta)$-cut \begin{align*} [{\cal Y}(x)]_{\beta}^{\alpha}=\left\langle\left[\underline{Y}_{\beta,-,\alpha}(x),\underline{Y}_{\beta,+,\alpha}(x)\right],\ \left[\overline{Y}_{\beta,-,\alpha}(x),\overline{Y}_{\beta,+,\alpha}(x)\right]\right\rangle. \end{align*} with (\ref{eq:P2[uYb]a}) and (\ref{eq:P2[oYb]a}). The same result is obtained by solving the given problem in the (2,1)-case. ({\tt A.C.}) \end{sol} {\small \begin{rem} Problem \ref{prob:P2} and \ref{prob:P3} imply that Hukuhara differentiation solves the problem of how the negative coefficients of T1/T2FDEs should be treated. As a result, the negative coefficient may be unified into the Hukuhara difference. \end{rem} } \section{Conclusion} We have obtained some theorems on type-2 fuzzy calculus and concrete solution methods of T2FIVPs for second-order T2FDEs with constant coefficients in this paper. T2FDEs with fuzzy coefficients can actually be assumed, but the solution method is exactly the same, although it becomes more complicated than T2FDEs with constant coefficients. Type-2 fuzzy theory is almost equivalent to considering two (i.e. left and right) type-1 fuzzy sets in the end. At first glance, it sounds like type-2-discussions are not necessary and type-1-discussions are sufficient. For example, we can actually discuss the problem of measurement by a highly experienced expert and an inexperienced student, that is explained in Introduction, by comparing their type-1 membership functions. However, it is possible to see and catch the difference between the two as integrated information by virtue of concepts of the foot-print and principle sets. The same is true when the foot-print set (resp. principle set) is regarded as the old (resp. present) experiment in the case that parameters of second-order differential equations are determined by experimental rules. We believe that here is the importance and usefulness of type-2 fuzzy theory or T2FDEs. When we want to judge whether a certain value is close to $3$, for example, the type-2 fuzzy number is used if the grade of $3$ is `about $0.7$'. In this paper, we set Definition \ref{df:T2FS} to describe that situation. There are other definitions of a type-2 fuzzy set. For example, we can also define a type-2 fuzzy set ${\cal A}$ by \begin{align*} &{\cal A}:=\{((x,\mu_0(x)),\ \mu_{{\cal A}}(\mu_0(x),u)):x\in X\}, \\ &\mu_0:X\to [0,1], \\ &\mu_{{\cal A}}:X\times R(\mu_0)\to [0,1] \end{align*} as the definition that differs our Definition \ref{df:T2FS}. Here $R(\mu_0)$ denotes the range of $\mu_0$ called the primary membership function. This definition considers the membership function $\mu_{{\cal A}}$ to be a two-variable function that implicitly contains the type-1 membership function $\mu_0$. In particular, a `triangular' type-2 fuzzy number ${\cal U}$ can be also defined as \begin{align*} \mu_{{\cal U}}(\mu_0(x),u):= \begin{cases} \displaystyle 1-\frac{|u-\mu_0(x)|}{\min\{\mu_0(x),1-\mu_0(x)\}}, & \mu_0(x)\neq \{0,1\}; \\ \begin{cases} 1, & u=\mu_0(x); \\ 0, & {\rm otherwise}; \end{cases} & \mu_0(x)=\{0,1\}. \end{cases} \end{align*} Then, primary $\mu_0$ should be decided subjectively by us. For example, if we set $\mu_0(x)=\langle\!\langle 1,2,3\rangle\!\rangle$, the graph of ${\cal U}$ is as shown Figure \ref{fig:TTFN}. \begin{figure} \caption{`Triangular' type-2 fuzzy number with $\mu_0(x)=\langle\!\langle 1,2,3\rangle\!\rangle$} \label{fig:TTFN} \end{figure} This definition fits our senses better, but the calculations will be complicated. In fact, it can be seen that the sides are curved unlike perfect quasi-type-2 fuzzy numbers. We would like to study the differential equations for type-2 fuzzy numbers in the case of this definition as a future task. \appendix \section{Appendix} In this appendix, the notations, definitions and theorems on type-1 fuzzy numbers are written. \subsection{Type-1 Fuzzy Numbers} \begin{df} A fuzzy set $u:\mathbb R\to [0,1]$ is the type-1 fuzzy number if and only if \begin{itemize} \item[i)] $u$ is normal, that is, there exists an $x_0\in \mathbb R$ such that $u(x_0)=1$; \item[ii)] $u$ is fuzzy convex, that is, $u(tx+(1-t)y)\ge \min\{u(x),u(y)\}$ for any $x,y\in \mathbb R$ and $t\in [0,1]$; \item[iii)] $u$ is upper semi-continuous; \item[iv)] the support ${\rm supp}(u):={\rm cl}(\{x\in \mathbb R:u(x)>0\})$ of $u$ is compact, where ${\rm cl}(S)$ stands for the closure of the crisp set $S$. \end{itemize} \end{df} It is well known in the study of fuzzy theory so far that the argument of fuzzy sets can be reduced to that of the level cut sets. The $\alpha$-cut set $[A]_{\alpha}$ of a type-1 fuzzy set $A$ on $X$ is defined by \begin{align*} [A]_{\alpha}&:=\{x\in X:A(x)\ge \alpha\},\quad \alpha\in (0,1]; \\ [A]_0&:={\rm supp}(u), \hspace{2.1cm} \alpha=0, \end{align*} and these level cut sets make up the original type-1 fuzzy set $A$: \begin{align*} A=\bigcup_{\alpha\in [0,1]}\alpha[A]_{\alpha} \end{align*} where $\alpha[A]_{\alpha}:X\to \{0,\alpha\}$ is a type-1 fuzzy set. Thus, it is sufficient to consider and argue $\alpha$-cut sets for most problems. In particular, the type-1 fuzzy number is the fuzzy set whose arbitrary $\alpha$-cut set is a bounded and closed interval, so we put the following notation. \begin{df} We denote the $\alpha$-cut set of a type-1 fuzzy number $u:\mathbb R\to [0,1]$ by \begin{align} \label{eq:para} [u]_{\alpha}=[u_{-,\alpha},u_{+,\alpha}] \end{align} for any $\alpha\in [0,1]$. (\ref{eq:para}) is called the parametric form of $u$. \end{df} {\small \begin{rem} We write $0$ for crisp zero in this paper and its $\alpha$-cut set is represented as the closed interval $[0,0]$. In the same way, we regard a crisp real number $r$ as the closed interval $[r,r]$ and bring crisp numbers into the group of fuzzy numbers. \end{rem} } \begin{rem} Each end of $[u]_{\alpha}=[u_{-,\alpha},u_{+,\alpha}]$ should satisfy the followings: \begin{itemize} \item[1)] $u_{-,\alpha}$ is bounded, monotone increasing, left-continuous with respect to $\alpha\in (0,1]$ and right-continuous on $\alpha=0$; \item[2)] $u_{+,\alpha}$ is bounded, monotone decreasing, left-continuous with respect to $\alpha\in (0,1]$ and right-continuous on $\alpha=0$; \item[3)] $u_{-,\alpha}\le u_{+,\alpha}$ for any $\alpha\in [0,1]$. \end{itemize} In short, $[u]_{\alpha}$ that does not satisfy any of the above property cannot be called the fuzzy number. \end{rem} We hereafter omit the description `$\alpha\in [0,1]$' when we argue $\alpha$-cut sets or parametric forms. \begin{df} Let $u,v$ be type-1 fuzzy numbers on $\mathbb R$. We denote the $\alpha$-cut sets of $u,v$ by \begin{align} \label{eq:uvalpha} [u]_{\alpha}=[u_{-,\alpha},u_{+,\alpha}],\quad [v]_{\alpha}=[v_{-,\alpha},v_{+,\alpha}] \end{align} respectively. Then, $u=v$ if and only if $[u]_{\alpha}=[v]_{\alpha}$, i.e. \begin{align*} u_{-,\alpha}=v_{-,\alpha}\quad {\rm and}\quad u_{+,\alpha}=v_{+,\alpha} \end{align*} for any $\alpha\in [0,1]$. \end{df} \begin{df} \label{df:+kop1} Let $u,v$ be type-1 fuzzy numbers on $\mathbb R$ and $k\in \mathbb R$. We denote the $\alpha$-cut sets of them by (\ref{eq:uvalpha}). The sum $u+v$ of $u$ and $v$ is defined by \begin{align*} [u+v]_{\alpha}:=[u]_{\alpha}+[v]_{\alpha}, \end{align*} i.e. \begin{align*} [u_{-,\alpha},u_{+,\alpha}]+[v_{-,\alpha},v_{+,\alpha}] =[u_{-,\alpha}+v_{-,\alpha},\ u_{+,\alpha}+v_{+,\alpha}]. \end{align*} Moreover, the scalar multiple $ku$ of $u$ is defined by \begin{align*} [ku]_{\alpha}:=k[u]_{\alpha} = \begin{cases} [ku_{-,\alpha},ku_{+,\alpha}], & k\ge 0; \\ [ku_{+,\alpha},ku_{-,\alpha}], & k<0. \end{cases} \end{align*} \end{df} \begin{df} Let $u,v$ be type-1 fuzzy numbers on $\mathbb R$. We denote the $\alpha$-cut sets of them by (\ref{eq:uvalpha}). The product $uv$ of $u$ and $v$ is defined by \begin{align*} [uv]_{\alpha}=[u]_{\alpha}[v]_{\alpha} =\left[\min_{i,j\in \{-,+\}}u_{i,\alpha}v_{j,\alpha},\ \max_{i,j\in \{-,+\}}u_{i,\alpha}v_{j,\alpha}\right] \end{align*} for any $\alpha\in [0,1]$. \end{df} \begin{df} Let $u,v$ be type-1 fuzzy numbers on $\mathbb R$. We denote the $\alpha$-cut sets of them by (\ref{eq:uvalpha}). Then, the order relationship between them, $u\le v$, is defined as \begin{align*} [u]_{\alpha}\le [v]_{\alpha}, \quad {\rm i.e.}\quad u_{-,\alpha}\le v_{-,\alpha}\ {\rm and}\ u_{+,\alpha}\le v_{+,\alpha} \end{align*} for any $\alpha \in [0,1]$. In particular, the non-negativity (resp. positivity) of a type-1 fuzzy number $u$, $u\ge 0$ (resp. $u>0$), is defined by \begin{align*} u_{-,\alpha}\ge 0\quad ({\rm resp.}\ u_{-,\alpha}>0) \end{align*} for any $\alpha \in [0,1]$. \end{df} \begin{df} Let $u,v$ be type-1 fuzzy numbers on $\mathbb R$. We denote the $\alpha$-cut sets of them by (\ref{eq:uvalpha}). The fuzzy Hausdorff distance $d_{{\rm H}}$ of $u$ and $v$ is defined by \begin{align*} d_{{\rm H}}(u,v):=\sup_{\alpha\in [0,1]}\max\{|u_{-,\alpha}-v_{-,\alpha}|,|u_{+,\alpha}-v_{+,\alpha}|\}. \end{align*} We write $\mathscr{T}^1(\mathbb R)$ for the type-1 fuzzy number space equipped with the $d_{{\rm H}}$-topology. \end{df} \subsection{Type-1 Fuzzy Number-valued Functions \label{app:T1FNVF}} Let $I$ be an interval which is a proper subset of $\mathbb R$ or let $I=\mathbb R$. We can consider the type-1 fuzzy function $F:\mathscr{T}^1(I)\to \mathscr{T}^1(\mathbb R)$ by using Zadeh's extension principle for a crisp function $f:I\to \mathbb R$. We set $\mathscr{T}^1(I)=I$ in this paper, that is, we consider crisp-variable type-1 fuzzy number-valued functions exclusively. The $\alpha$-cut set of $F:I\to \mathscr{T}^1(\mathbb R)$ is represented by \begin{align*} [F(x)]_{\alpha}:=[F_{-,\alpha}(x),\ F_{+,\alpha}(x)] \end{align*} for all $x\in I$ and any $\alpha\in [0,1]$. We define the difference of type-1 fuzzy numbers as the following sense, so as to consider the difference quotient of $F$. \begin{df} Let $u,v\in \mathscr{T}^1(\mathbb R)$. If there exists some $w\in \mathscr{T}^1(\mathbb R)$ such that $u=v+w$, we write $w=u-v$ and call it the T1-Hukuhara difference of $u$ and $v$. \end{df} {\small \begin{rem} For any $u\in \mathscr{T}^1(\mathbb R)$, $-u$ stands for $0-u$, i.e. \begin{align*} [-u]_{\alpha}=[-u_{-,\alpha},-u_{+,\alpha}], \end{align*} whereas \begin{align*} [(-1)u]_{\alpha}=[-u_{+,\alpha},-u_{-,\alpha}]. \end{align*} We should remark that, in general, \begin{align*} u+(-1)v\neq u-v \end{align*} for any $u,v\in \mathscr{T}^1(\mathbb R)$. \end{rem} } We adopt, in this paper, dagger ${\dag}$ and double dagger ${\ddag}$ to denote fuzzy derivatives in the sense of Hukuhara. We use prime $'$ as the crisp derivative notation. \begin{df} Let $F:I\to \mathscr{T}^1(\mathbb R)$ and $h>0$ be a crisp number. $F$ is T1-differentiable in the first form at some $x_0\in I$ if and only if there exist $F(x_0+h)-F(x_0)$ and $F(x_0)-F(x_0-h)$ satisfying that the fuzzy limit \begin{align*} F^\dag(x_0) :=\lim_{h\downarrow 0}\frac{F(x_0+h)-F(x_0)}{h} =\lim_{h\downarrow 0}\frac{F(x_0)-F(x_0-h)}{h} \end{align*} exists. Moreover, $F$ is T1-differentiable in the second form at some $x_0\in I$ if and only if there exist $F(x_0)-F(x_0+h)$ and $F(x_0-h)-F(x_0)$ satisfying that the fuzzy limit \begin{align*} F^\ddag(x_0) :=\lim_{h\downarrow 0}\frac{F(x_0)-F(x_0+h)}{-h} =\lim_{h\downarrow 0}\frac{F(x_0-h)-F(x_0)}{-h} \end{align*} exists. Here the above differences (resp. limits) are due to the meaning of T1-Hukuhara (resp. $d_{{\rm H}}$). If $F$ is T1-differentiable in both senses at any $x\in I$, $F^\dag$ and $F^\ddag$ is called the (1)-T1-derivative and (2)-T1-derivative of $F$, respectively. \end{df} {\small \begin{rem} \label{rem:thirdfourth} In addition to the above two forms, it is possible to consider the following two forms, that is, \begin{itemize} \item[i)] the third form: \begin{align} \label{eq:thirdf} \lim_{h\downarrow 0}\frac{F(x_0+h)-F(x_0)}{h} =\lim_{h\downarrow 0}\frac{F(x_0-h)-F(x_0)}{-h}, \end{align} \item[ii)] the fourth form: \begin{align} \label{eq:fourthf} \lim_{h\downarrow 0}\frac{F(x_0)-F(x_0+h)}{-h} =\lim_{h\downarrow 0}\frac{F(x_0)-F(x_0-h)}{h}. \end{align} \end{itemize} However, it is known (\cite{BG}, Theorem 7) that (\ref{eq:thirdf}) becomes the crisp number if there exist $F(x_0+h)-F(x_0)$ and $F(x_0-h)-F(x_0)$. (\ref{eq:fourthf}) is also so if $F(x_0)-F(x_0+h)$ and $F(x_0)-F(x_0-h)$. We shall thus ignore the third and fourth forms. \end{rem} } {\small \begin{rem} We mention the validity of our dagger symbols $\dag,\ddag$ meaning as fuzzy derivatives. First of all, we can avoid confusion between the crisp derivative and the fuzzy derivative by using $\dag$ and $\ddag$ (see e.g. Theorems \ref{thm:Ffda}, \ref{thm:Ff2da}, \ref{thm:cftimesff} and \ref{thm:compo} later). Secondly, if we want to the number of the differential order to 2 or less, we can use $\dag,\ddag$ in the same sense as prime $'$ to clarify what kind of fuzzy derivative it is (in particular, see Theorems \ref{thm:MNpara}, \ref{thm:FT2sdba}, \ref{thm:F+GsHdd} and \ref{thm:F-GT2d} later). Moreover, the acronym for a letter is often used to represent a mathematical concept or quantity, such as $\Delta$ ({\bf D}elta) for differences or $D$ for derivatives. From this point of view, `dagger' has an appropriate acronym {\bf D} to represent derivatives. \end{rem} } By using this definition repeatedly, we find that the $n$th-order T1-derivative of $F$ has $2^n$ forms. For example, when applied to fuzzy differential equations, we need to choose the most appropriate solution from these $2^n$ solutions. \begin{thm} \label{thm:Ffda} Let $F:I\to \mathscr{T}^1(\mathbb R)$ be T1-differentiable on $I$. Then, the parametric forms of its T1-derivatives are given by \begin{itemize} \item[1)] the first parametric form: \begin{align*} [F^\dag(x)]_{\alpha}=[F_{-,\alpha}'(x),F_{+,\alpha}'(x)], \end{align*} \item[2)] the second parametric form: \begin{align*} [F^\ddag(x)]_{\alpha}=[F_{+,\alpha}'(x),F_{-,\alpha}'(x)]. \end{align*} \end{itemize} \end{thm} \begin{thm}[\cite{K}, Theorem 5.2; \cite{CCRF}, Theorem 5] \label{thm:Ff2da} Let $F:I\to \mathscr{T}^1(\mathbb R)$ be second-order T1-differentiable on $I$. Then, the parametric forms of the second-order T1-derivatives are given by \begin{itemize} \item[1)] the first and first parametric form: \begin{align*} [F^{\dag\dag}(x)]_{\alpha}=[F_{-,\alpha}''(x),F_{+,\alpha}''(x)], \end{align*} \item[2)] the first and second parametric form: \begin{align*} [F^{\dag\ddag}(x)]_{\alpha}=[F_{+,\alpha}''(x),F_{-,\alpha}''(x)], \end{align*} \item[3)] the second and first parametric form: \begin{align*} [F^{\ddag\dag}(x)]_{\alpha}=[F_{+,\alpha}''(x),F_{-,\alpha}''(x)], \end{align*} \item[4)] the second and second parametric form: \begin{align*} [F^{\ddag\ddag}(x)]_{\alpha}=[F_{-,\alpha}''(x),F_{+,\alpha}''(x)]. \end{align*} \end{itemize} \end{thm} {\small \begin{center} {\bf Acknowledgement} \end{center} The authors are deeply grateful to Dr. Jiro Inaida for his valuable and heartfelt advice on fuzzy number theory. } {\small \noindent {\bf The Authors}: \noindent {\sc Norihiro Someyama}; \quad Completed Ph.D. program without a Ph.D. degree, Major in Mathematics and Mathematical Physics \noindent {\sc Hiroaki Uesu};\quad Ph.D., Major in Mathematics and Information Theory \noindent {\sc Kimiaki Shinkai};\quad Ph.D., Major in Mathematics and Mathematical Education \noindent {\sc Shuya Kanagawa};\quad Ph.D., Major in Mathematics and Statistics } \end{document}
\begin{document} \title{Quantum decoherence in the rotation of small molecules} \author{A.~Adelsw\"ard and S.~Wallentowitz\footnote[7]{email: [email protected]}} \address{Emmy--Noether Nachwuchsgruppe ``Kollektive Quantenmessung und R\"uckkopplung an Atomen und Molek\"ulen'', Fachbereich Physik, Universit\"at Rostock, Universit\"atsplatz 3, D-18051 Rostock, Germany} \begin{abstract} The dynamics of non-polar diatomic molecules interacting with a far-detuned narrow-band laser field, that only may drive rotational transitions, is studied. The rotation of the molecule is considered both classically and quantum mechanically, providing links to features known from the heavy symmetric top. In particular, quantum decoherence in the molecular rotation, being induced by spontaneous Raman processes, is addressed. It is shown how this decoherence modifies the rotational dynamics in phase space. \end{abstract} \pacs{33.80.-b, 03.65.Yz, 42.50.Ct} \section{Introduction} \label{sec:1} Molecules interacting with electromagnetic fields foster a variety of dynamical phenomena. Specific wave packets of the internuclear vibration have been excited by shaped fs laser pulses~\cite{fs-pulse}. Such vibrational quantum states have then been reconstructed by molecular emission tomography~\cite{met1,met2}. Furthermore, control of the internal molecular quantum state could be achieved~\cite{ctrl1,ctrl2,ctrl3,ctrl4,ctrl5}, for eventually enhancing chemical reactions. Besides these applications of pulsed fields, also interactions with cw laser fields or static fields reveal interesting effects. For instance, the axis of paramagnetic molecules can be aligned in pendular states by applying magnetic fields~\cite{herschbach-B1,herschbach-B2}. Furthermore, it has been shown how electric fields align polar molecules~\cite{herschbach-E}. Non-polar molecules, on the other hand, seem to be rather isolated from the influence of electric fields. They are infrared inactive and thus their rotation is essentially undamped and isolated from the electromagnetic vacuum. Clearly this does not hold for the internuclear vibration and, in addition, the rotational statistics determines via ro-vibrational coupling its relaxation and decoherence properties~\cite{wal-mol1,wal-mol2}. Nevertheless, a permanent dipole moment being absent, non-polar molecules still can be polarised by electrical fields and thus feel an effective alignment force when an anisotropic polarisability prevails. The quantum-mechanical eigenstates and energies have been discussed in Ref.~\cite{herschbach-pol}, where it has been shown that the dependence of the eigenenergies on the field intensity can be used for providing a dipole trapping force. Recent experiments have implemented such a dipole trap for molecules~\cite{PA-trap-dipole}. In general the topic of producing~\cite{mol-PA1,mol-PA2,mol-PA3,mol-PA-BEC1,mol-PA-BEC2,mol-PA-BEC3}, cooling~\cite{buffergas,e-field}, and trapping of ultra-cold molecules~\cite{PA-trap-dipole,mol-trap-magn} became a major focus of research in recent years. Given the ultra-low temperatures of a condensed atomic or possibly molecular gas, the molecular rotation, representing the lowest energy scale apart from centre-of-mass motion, then should play an important role. For sufficiently cold molecules one might expect decoherence first to appear on the rotational energy scale before affecting the energetically higher degrees of freedom, such as the internuclear vibration. Moreover, the molecular rotation provides a toolbox for implementing various textbook examples of mechanics, such as spherical pendula and the motion of rigid bodies. In particular, we will focus here on homonuclear, diatomic molecules in a far-detuned linear-polarised laser field. This is a setup relevant also for trapping of photo-associated ultra-cold molecules in a dipole trap~\cite{PA-trap-dipole}. In Sec.~\ref{sec:2} a brief review is given on the classical description of the motion of the molecular axis. A link to a quantum description in terms of stimulated two-photon Raman processes is then presented in Sec.~\ref{sec:3}. Moreover, the theory is extended there by including spontaneous Raman processes in form of a quantum master equation. The effects due to these spontaneous processes on the molecular rotation are evaluated in Sec.~\ref{sec:4} where we provide a phase-space picture by using a Wigner function for the molecular rotation. \section{Classical mechanical analogue} \label{sec:2} \subsection{Classical dynamics} \label{sec:2.1} Consider a diatomic molecule in an electric field of amplitude $E_0$ pointing towards the $z$ direction. The potential energy in such a field is given by \begin{equation} \label{eq:pot-erg} V(\vartheta) = -d E_0 \cos\vartheta - {\textstyle\frac{1}{2}} E_0^2 \Delta \alpha \cos^2\!\vartheta , \end{equation} where $d$ is the permanent dipole moment of the molecule and $\Delta\alpha \!=\! \alpha_\parallel \!-\! \alpha_\perp$ denotes the anisotropy of the polarisability with respect to fields parallel and perpendicular to the molecular axis. The direction of this axis with respect to the electric field is specified by the enclosing angle $\vartheta$, cf.~Fig.~\ref{fig:geometry}. \begin{figure} \caption{Diatomic molecule in an electric field pointing in $z$ direction. The angles $\vartheta$ and $\varphi$ specify the orientation of the molecular axis with respect to the laboratory frame.} \label{fig:geometry} \end{figure} The molecular axis specifies the $z'$-axis of a body-fixed coordinate system, whose orientation with respect to the laboratory frame is specified by Euler angles. From the Lagrange equations one obtains the equations of motion for these as \begin{eqnarray} \label{eq:motion-euler} \Theta \big( \ddot{\vartheta} - \dot{\varphi}^2 \sin\vartheta \cos\vartheta \big) + \frac{\partial V(\vartheta)}{\partial \vartheta} = 0 , \\ \label{eq:Lz-const} \dot{\varphi} \, \sin^2\!\vartheta = {\rm const} , \end{eqnarray} where $\Theta$ is the moment of inertia perpendicular to the molecular axis. Moreover, the conservation of energy $E$ reads \begin{equation} \label{eq:E-const} \Theta \big( \dot{\vartheta}^2 + \dot{\varphi}^2 \sin^2\!\vartheta \big) = 2 \left[ E \!-\! V(\vartheta) \right] . \end{equation} Note that for our case the relevant Euler angles $\vartheta$ and $\varphi$ coincide with the spherical angles as depicted in Fig.~\ref{fig:geometry}. The angular-momentum $\mathbf{L}$ can be expressed in the laboratory frame by the angles and their time derivatives as \begin{equation} \label{eq:L-vec} \mathbf{L} = \Theta \left( \begin{array}{c} \dot{\vartheta} \, \cos\varphi - \dot{\varphi} \, \sin\vartheta \cos\vartheta \sin\varphi \\ \dot{\vartheta} \, \sin\varphi + \dot{\varphi} \, \sin\vartheta \cos\vartheta \cos\varphi \\ \dot{\varphi} \, \sin^2\!\vartheta \end{array} \right) . \end{equation} From Eqs~(\ref{eq:Lz-const}) and (\ref{eq:L-vec}) it is seen that the component of the angular momentum in direction of the applied field is a constant of motion: $L_z \!=\! {\rm const}$. Thus the tip of the angular-momentum moves in a plane normal to the electric-field direction. Using the constant of motion $L_z$, Eq.~(\ref{eq:E-const}) can be rewritten as \begin{equation} \frac{\Theta \dot{\vartheta}^2}{2} = E - U(\vartheta) , \end{equation} where the new potential reveals a centrifugal-type barrier \begin{equation} U(\vartheta) = \frac{T_z}{ \sin^2\!\vartheta} - U_d \cos\vartheta - U_\alpha \cos^2\!\vartheta . \end{equation} For later purposes we have introduced here the potential depths $U_d \!=\! dE_0$, $U_\alpha \!=\! \Delta \alpha E_0^2/ 2$ and the $z$-part of the kinetic energy $T_z \!=\! L_z^2 / (2\Theta)$. A molecule with permanent dipole moment, $d \!\neq\! 0$, omitting its anisotropic polarisability, $\alpha_\parallel \!\approx\! \alpha_\perp$, reveals a potential of a spherical pendulum, as shown in Fig.~\ref{fig:d-potential}. \begin{figure} \caption{Nutation potential $U(\vartheta)$ for a polar molecule scaled by the potential depth $U_d$. Solid (dashed) curve corresponds to $T_z / U_d \!=\! 0$ $(0.1)$.} \label{fig:d-potential} \end{figure} The motion in $\varphi$ with constant angular momentum $L_z$ is superimposed by a nutation in $\vartheta$, which is equivalently described by a heavy symmetrical top with vanishing moment of inertia along the (molecular) body axis. The harmonically approximated nutation frequency is in this case $\omega \!=\! \sqrt{U_d/ \Theta}$. \begin{figure} \caption{Nutation potential $U(\vartheta)$ for a nonpolar, polarisable molecule scaled by $U_\alpha$. Solid (dashed) curve corresponds to $\kappa_z \!=\! T_z / U_\alpha \!=\! 0$ $(0.1)$.} \label{fig:alpha-potential} \end{figure} However, for the case of non-polar ($d \!=\! 0$) but polarisable molecules ($\alpha_\parallel \!>\! \alpha_\perp$), which is the focus of this paper, the anisotropic polarisability produces a $\cos^2\!\vartheta$ potential, cf.~Fig.~\ref{fig:alpha-potential}, which for $L_z \!\neq\! 0$ cannot be cast into the form of a spherical pendulum. Nevertheless, a spherical-pendulum-type motion can be expected, since also for this case the solution $\vartheta(t)$ can be obtained in terms of elliptic integrals. Expressed in terms of Jacobian elliptic functions~\cite{as} it reads, \begin{equation} \label{eq:theta-solution} \fl \cos\vartheta(t) = \frac{ \cos \vartheta_0 \, {\rm cn}( \omega t, m ) - \sqrt{(\cos^2\!\vartheta_0 \!-\! c_{l}) (c_{u} \!-\! \cos^2 \! \vartheta_0) \, m / c_u } \, {\rm sn}( \omega t, m ) \, {\rm dn}(\omega t, m)}{1 - (c_u \!-\! \cos^2 \! \vartheta_0) \, {\rm sn}^2 (\omega t, m) \, m/ c_u} , \end{equation} where $\vartheta_0$ is the initial angle at time $t \!=\! 0$. The constants appearing in Eq.~(\ref{eq:theta-solution}) are given as \begin{eqnarray} \label{eq:alpha} m & = & \frac{1}{\epsilon \!+\! 1} , \\ c_{l,u} & = & \frac{m \!-\! \frac{1}{2}}{m} \mp \sqrt{ \frac{1}{4m^2} - \kappa_z} , \\ \omega & = & \sqrt{\frac{2U_\alpha}{\Theta} \frac{c_u}{m}} , \end{eqnarray} where the scaled energies $\epsilon \!=\! E/U_\alpha$ and $\kappa_z \!=\! T_z/U_\alpha$ have been used. \subsection{Classical types of motion} \label{sec:2.2} In the asymptotic limit of large energies $\epsilon \!\gg\! 1$, ${\rm sn}, {\rm cn} \!\to\! \sin, \cos$ and ${\rm dn} \!\to\! 1$ so that $\cos\vartheta(t) \!=\! \cos(\vartheta_0 \!+\! \omega t)$ results, which corresponds to the free rotator case with angular velocity $\omega \!\to\! \sqrt{2 E / \Theta}$. In this limit all three vector components of the angular momentum are conserved quantities. For lower energies the potential barriers lead to modifications, supporting several types of motion. For $\kappa_z \!=\! 0$ unbound motion occurs for $\epsilon \!>\! 0$ where the amplitude of the oscillation of $\vartheta$ attains the full $\pi$ range, see solid curve in Fig.~\ref{fig:kappaz=0}. \begin{figure} \caption{Nutation angle $\vartheta$ over scaled time $\omega t$ for $\kappa_z \!=\! 0$, $\vartheta_0 \!=\! 0$, $L_y(0)\!=\! 0$ and $2 \Theta U_\alpha / \hbar^2 \!=\! 0.025$. Scaled energies and $x$-components of angular momentum are: $\epsilon \!=\! 0.024$, $L_x(0) / \hbar \!=\! 0.16$ (solid curve); $\epsilon \!=\! -0.1$, $L_x(0) / \hbar \!=\! 0.15$ (dashed curve).} \label{fig:kappaz=0} \end{figure} Given an initial angular momentum only in $x$ direction, the molecular axis then continuously rotates around the $x$ axis so that while $L_x$ is modulated due to the potential barriers, it does not change its sign, see solid curve in Fig.~\ref{fig:Ly-kappaz=0}. On the other hand, for the same case of $\kappa_z \!=\! 0$, for energies $\epsilon \!<\! 0$ the angle $\vartheta$ is subject to a reflection at the light-induced potential barrier. Thus the range of $\vartheta$ values is restricted, as seen for the dashed line in Fig.~\ref{fig:kappaz=0}. In addition, since $\dot\vartheta$ changes its sign at the reflecting barrier, the angular momentum $L_x$ correspondingly crosses zero and thus rotations with positive and negative orientations subsequently interchange, cf. dashed line in Fig.~\ref{fig:Ly-kappaz=0}. Note that for $\kappa_z \!>\! 0$ the motion of $\vartheta$ is additionally bound by reflections at the centrifugal barriers near $\vartheta \!=\! 0$ and $\pi$. \begin{figure} \caption{$L_x / \hbar$ over scaled time $\omega t$ for the same parameters as in Fig.~4.} \label{fig:Ly-kappaz=0} \end{figure} \section{Quantum dynamics of the molecular rotation} \label{sec:3} \subsection{Implementation via two-photon Raman transitions} For implementing the dynamics described so far, not only static electric fields can be used. Also linear polarised cw optical fields, that are far detuned from electronic resonances, have been shown to produce a similar interaction~\cite{herschbach-pol}. In this way one can take advantage of large polarisabilities of non-polar molecules to attain deep potential wells for the nutation angle. The specific excitation scheme for a dimer, depicting only the two lowest electronic potential surfaces that provide a $\leftexp{1}{\Sigma}\!\leftrightarrow\! \leftexp{1}\Sigma$ dipole transition, is shown in Fig.~\ref{fig:level-scheme}. \begin{figure} \caption{Excitation scheme for implementing rotational Raman transitions.} \label{fig:level-scheme} \end{figure} In order to avoid resonant vibronic transitions and to be left with the sought two-photon processes, a sufficiently large detuning $\Delta$ from the bare electronic resonance frequency is required. Given an optical field of spectral width much smaller than vibrational frequencies $\omega_\nu$, stimulated Raman transitions occur that only affect the rotational degree of freedom. However, the relative rate of spontaneous vs stimulated Raman transitions, $\Gamma/\Delta$, with $\Gamma$ being the bare electronic linewidth of the dipole transition, shows that even for large detuning, $\Delta \!\gg\! \Gamma$, a certain number of spontaneous processes occur. These processes not only lead to an incoherent effect on the molecular rotation, such as relaxation and quantum decoherence, but also excite the internuclear vibration via Franck--Condon transitions. Nevertheless, as long as the laser remains detuned from resonant vibronic transitions, the electronic and vibrational degrees of freedom can be eliminated to obtain equations of motion for the molecular rotation alone. This is achieved by consistently eliminating the electronic coherences and electronic excited-state populations, keeping only terms up to second order in the molecule-light interaction. In Franck--Condon approximation the vibrational degree of freedom can then be traced over~\cite{mol-rot}. The requirement of avoiding resonant vibronic transitions leads to a maximum interaction time on which the molecule stays vibrationally sufficiently cold, which can be estimated as \begin{equation} t \!\ll\! \Delta^2 / (\eta^2 \Gamma \omega_\nu \Omega_{\rm R}) . \end{equation} Here $\Omega_{\rm R} \!=\! |d_{\rm ge}E_0|^2 / (4 \hbar^2 \Delta)$ is the Raman Rabi frequency with $d_{\rm ge}$ being the electric-dipole matrix element between electronic ground and excited states. The linear polarised electric field is specified by $E(t) \!=\! \hat{n}_z \, E_0 \cos(\omega_0 t)$ with $\omega_0$ being an optical frequency far-detuned from vibronic resonance and $\hat{n}_z$ being the unit vector in $z$ direction. Using these parameters the light-induced potential depth results as $U_\alpha \!=\! \hbar\Omega_{\rm R}$. The parameter $\eta$ is the ratio of the difference of internuclear equilibrium distances in ground and excited electronic states to the spatial extent of the vibrational ground-state wave packet. Since $\eta \!<\! 10$ for alkali dimers, in the weak-field regime [$\Omega_R \!<\! \hbar /(2\Theta)$] this time corresponds to a very large number of free rotational periods. Under these conditions the equations of motion for the orientation of the molecular axis in the far detuned optical field can be cast into a master equation of Lindblad form~\cite{mol-rot}: \begin{eqnarray} \label{eq:master} \dot{\hat{\sigma}} = - \frac{i}{\hbar} [ \hat{T} \!+\! \hat{V} , \hat{\sigma} ] + \sum_{i=x,y,z} \left( \hat{S}_i \, \hat{\sigma} \, \hat{S}^\dagger_i - {\textstyle\frac{1}{2}} \, \hat{S}^\dagger_i \hat{S}_i \, \hat{\sigma} - {\textstyle\frac{1}{2}} \, \hat{\sigma} \, \hat{S}^\dagger_i \hat{S}_i \right) . \end{eqnarray} Here $\hat{\sigma}$ is the reduced rotational density operator of the molecule and the free kinetic energy is given by~\cite{LJ-remark} \begin{equation} \label{eq:ham-mol} \hat{T} = \hat{\bf J}^2 / (2\Theta) . \end{equation} The stimulated rotational Raman transitions then provide the sought potential in the form \begin{equation} \label{eq:raman-ham} \hat{V} = - \hbar\Omega_{\rm R} \, \hat{n}_z^2 , \end{equation} where in terms of spherical angles the unit-vector operator $\hat{n}_z$ reads $\langle \vartheta, \varphi| \hat{n}_z \!=\! \cos\vartheta \, \langle \vartheta, \varphi|$, so that \begin{equation} \label{eq:raman-ham2} \langle \vartheta, \varphi | \, \hat{V} = - U_\alpha \cos^2(\vartheta) \, \langle \vartheta, \varphi| \end{equation} recovers the potential as discussed for the classical treatment. The spontaneous rotational Raman transitions are given in terms of the operators $\hat{S}_i$, with $i \!=\! x,y,z$ denoting the polarisation of the spontaneously emitted photon. They read \begin{equation} \label{eq:def-S} \hat{S}_i = \sqrt{\frac{\Gamma \Omega_{\rm R}}{\Delta}} \; \hat{n}_i \, \hat{n}_z , \end{equation} with $\hat{n}_i$ ($i \!=\! x,y,z$) being unit vectors pointing into the corresponding directions. In the representation of spherical angles these jump operators can be cast in vector form to: \begin{equation} \label{eq:def-S2} \langle \vartheta, \varphi| \, \hat{\mathbf{S}} = \sqrt{\frac{\Gamma \Omega_{\rm R}}{\Delta}} \left[ \begin{array}{c} \cos\varphi \sin\vartheta \cos\vartheta \\ \sin\varphi \sin\vartheta \cos\vartheta \\ \cos^2\vartheta \end{array} \right] \langle \vartheta, \varphi| . \end{equation} \subsection{Stimulated Raman processes} For vanishing bare electronic linewidth, $\Gamma \!=\! 0$, a unitary evolution is recovered from the master equation~(\ref{eq:master}) that can be reformulated in terms of the Schr\"odinger equation \begin{equation} \label{eq:schroedinger} i \hbar \, \partial_t \, |\Psi\rangle = (\hat{T} \!+\! \hat{V} ) \, |\Psi\rangle , \end{equation} with the Hamiltonian given by Eqs~(\ref{eq:ham-mol}) and (\ref{eq:raman-ham}). In the $|\vartheta, \varphi\rangle$ representation the evolution of the wavefunction $\Psi(\vartheta,\varphi) \!=\! \langle \vartheta, \varphi| \Psi\rangle$ reads \begin{equation} \label{eq:schroedinger2} \fl i \hbar \, \dot{\Psi}(\vartheta,\varphi) = - \left\{ \frac{\hbar^2}{2\Theta} \left[ \frac{1}{\sin\vartheta} \partial_\vartheta \left( \sin\vartheta \, \partial_\vartheta \right) + \frac{1}{\sin^2\vartheta} \, \partial_\varphi^2 \right] + U_\alpha \cos^2\vartheta \right\} \, \Psi(\vartheta, \varphi) . \end{equation} Using the ansatz $\Psi(\vartheta, \varphi) \!=\! S_{lm}(z) \exp(im\varphi)$, with $\hbar m \!=\! J_z$ being a constant of motion, and $z \!=\! \cos\vartheta$, the energy eigenstates are obtained from the angular oblate spheroidal wave equation~\cite{as} \begin{equation} \label{eq:schroedinger2} \left\{ \frac{\hbar^2}{2\Theta} \left[ \partial_z \left[ (1 \!-\! z^2) \partial_z \right]- \frac{m^2}{1 \!-\! z^2} \right] + U_\alpha z^2 + E_{lm} \right\} S_{lm}(z) = 0 . \end{equation} The corresponding eigenenergies $E_{lm}$ can be expanded in powers of the interaction potential $U_\alpha$ as \begin{equation} \label{eq:energies} E_{lm} = \frac{\hbar^2}{2\Theta} \, l(l \!+\! 1) - \frac{U_\alpha}{2} \left[ 1 + \frac{1 \!-\! 4m^2}{(2l \!-\! 1) (2l \!+\! 3)} \right] + \ldots \, . \end{equation} They manifest a partial removal of the degeneracy with respect to $m$, as shown in Fig.~\ref{fig:E-levels}. The properties of these eigenstates and their angular confinement have been discussed in detail in Ref.~\cite{herschbach-E}. \begin{figure} \caption{Change of energy eigenvalues due to the light interaction for the value $2\Theta U_\alpha / \hbar^2 \!=\! 1$.} \label{fig:E-levels} \end{figure} The quantum dynamics of a system prepared quite analogously as discussed for the classical treatment, i.e. with an initial average angular momentum pointing only in $x$ direction, is shown in Fig.~\ref{fig:Lx-dynamics}. Here an initial coherent angular-momentum state~\cite{Arecchi} of the form \begin{equation} \label{eq:coh-state} \fl | j, \alpha, \beta \rangle = \sum_{m=-j}^j \sqrt{ 2j \choose j \!+\! m } \big[\sin (\alpha/2) \big]^{j+m} \big[\cos (\alpha/2) \big]^{j-m} \, e^{-i(j + m)\beta} |jm\rangle , \end{equation} for $j \!=\! 2$, $\alpha \!=\! \pi/2$, and $\beta \!=\! 0$ has been used. Classically that would correspond to the case $\kappa_z \!=\! 0$ where reflections in the motion of $\vartheta$ can only occur due to the light-induced potential. But from Eq.~(\ref{eq:coh-state}) it is clear that we deal with an initial state containing states $|j,m\rangle$ with all possible values for $m$: \begin{eqnarray} \label{eq:coh-state2} |\Psi_1\rangle & = & |j\!=\! 2 ,\alpha \!=\! \pi/2, \beta \!=\!0\rangle \nonumber \\ & = & {\textstyle\frac{1}{4}} \, |2,-2\rangle + {\textstyle\frac{1}{2}} \, |2,-1\rangle + {\textstyle\sqrt{\frac{3}{8}}} \, |2,0\rangle + {\textstyle\frac{1}{2}} \, |2,1\rangle + {\textstyle\frac{1}{4}} \, |2,2\rangle . \end{eqnarray} Thus part of the wave packet in $\vartheta$ will also be reflected by centrifugal barriers. In Fig.~\ref{fig:Lx-dynamics} an oscillation of $\langle \hat{J}_x \rangle$ between positive and negative values is observed that is characteristic for reflections of the wavefunction at potential barriers. However, the transient behaviour at the crossing of zero indicates that part of the wave packet in $\vartheta$ is transmitted through the barrier, and that reflected and transmitted parts interfere for the short time where they spatially overlap. \begin{figure} \caption{Dynamics of $\langle \hat{J} \label{fig:Lx-dynamics} \end{figure} \subsection{Spontaneous processes} When spontaneous processes are included in the dynamics, i.e for $\Gamma \!>\! 0$, the full master equation~(\ref{eq:master}) must be solved. Due to the scaling of complexity of this numerical problem with the number of required rotational levels, it is advantageous to employ a quantum trajectory method. Then individual realisations of state vectors can be calculated and finally an ensemble average over the set of realisation reproduces the sought density matrix. Individual trajectories are comprised of non-unitary evolutions with the effective Hamiltonian \begin{equation} \label{eq:Heff} \hat{H}_{\rm eff} = \hat{T} + \hat{V} \left( 1 + \frac{i \Gamma}{2\Delta} \right) , \end{equation} describing the conditioned evolution when no spontaneous processes occur, intermitted by spontaneous Raman processes described by the application of the jump operators $\hat{S}_i$. As can be seen from their definition~(\ref{eq:def-S2}), these operators may excite the motion in the angle $\varphi$, by producing via $\hat{S}_x$ and $\hat{S}_y$ a weighting of the corresponding probability amplitude $\Psi(\vartheta,\varphi)$ in $\varphi$. Thus motion in $\varphi$ and consequently the angular momentum component $\hat{J}_z$ are getting excited by spontaneous Raman processes. This can be observed in Fig.~\ref{fig:Lz2}, where the variance of $\hat{J}_z$ is plotted over time. For $\Gamma \!>\! 0$ a monotonous increase of the variance is observed, indicating that the kinetic energy is subject to heating. Of course the spontaneous Raman scattering will also lead to a suppression of coherent processes and the oscillations of the average angular momentum will be damped, cf. solid curve in Fig.~\ref{fig:Lx-dynamics}. \begin{figure} \caption{Variance of $\hat{J} \label{fig:Lz2} \end{figure} \section{Decohering dynamics in phase space} \label{sec:4} \subsection{Spherical Wigner functions} In the same way as the angular-momentum coherent states are a generalisation of the familiar harmonic-oscillator coherent states, also the concept of phase-space quasi-probability distributions can be defined on the angular-momentum phase space~\cite{Agarwal}. Here we employ a Wigner function, which in the $(2j \!+\! 1)$-dimensional Hilbert space of angular momentum $j$, is defined in terms of the density operator $\hat{\sigma}$ and the spherical harmonics, $Y_{sm}(\theta, \varphi)$, as defined in Refs~\cite{defn-w:Schleich, defn-w:Benedict} \begin{eqnarray} \label{eq:W-fn} W_j(\vartheta, \varphi) & = & \sqrt{\frac{2j\!+\!1}{4\pi}} \sum_{s=0}^{2j} \sum_{m=-s}^s Y_{sm}(\vartheta, \varphi) \, {\rm Tr}(\hat{T}_{j,sm}^\dagger \hat{\sigma} ) . \end{eqnarray} The multipole operators are defined by \begin{eqnarray} \label{eq:multipole} \hat{T}_{j,sm} & = & \sqrt{2s \!+\! 1} \sum_{m'm''} (-1)^{j-m'} \left( \begin{array}{ccc} j & s & j \\ -m' & m & m'' \end{array} \right) |j m' \rangle \langle j m''| , \end{eqnarray} with ${j_1 \, j_2 \, j_3 \choose m_1 \, m_2 \, m_3}$ being the Wigner 3j symbol. The factor in front of Eq.~(\ref{eq:W-fn}) ensures normalisation to unity. As the laser interaction will result in a change of the rotational quantum number $j$ of the molecule we consider a sum of Wigner functions over all possible values of $j$: \begin{equation} \label{eq:wigner} W(\vartheta, \varphi) = \sum_{j=0}^\infty W_j(\vartheta, \varphi) . \end{equation} Albeit the sum over $j$, this phase-space distribution is not a complete description of the rotational quantum state. However, it proves useful for illustrating the laser-induced dynamics. For alternative approaches defining phase-space distributions independent of the particular value of $j$, see Refs~\cite{foeldi,brif,klimov}. Moreover, it should be noted that even though the angular-momentum coherent states are analogous to the harmonic-oscillator ones in many respects, the Wigner function of angular-momentum coherent states is weakly negative. This negativity decreases with increasing value for $j$ and reaches zero only in the (classical) limit $j\!\to\!\infty$. \subsection{Time evolution of the Wigner function} Figure~\ref{fig:W-fn_coh-state} shows the time evolution of the system starting initially from the state $|\Psi_1\rangle$, cf.~Eq.~(\ref{eq:coh-state2}). The phase-space distribution is shown for a scaled time $\tau \!=\! t\hbar/(2 \Theta) \!=\! 660$ after the first full period of oscillation of $\langle \hat{J}_x \rangle$ (upper left plot), and then at later times in steps of quarter periods. One can see that the system almost returns to its initial coherent state after one period, when the spontaneous emission is neglected (left column). The peak in phase space has then returned to its initial position at $\vartheta \!=\! \pi/2$ and $\varphi \!=\! 0$. In the course of time the peak splits and after a half period, at $\tau=990$, two peaks appear at $\vartheta \!=\! \pi/2$, $\varphi \!=\! 0,\pi$. That corresponds to an average angular momentum pointing now in negative $x$ direction. Note that between half and full cycles, at $\tau \!=\! 830$ and $\tau \!=\! 1165$, the phase-space distribution develops several smaller peaks and also negative regions of the Wigner function appear. Including the spontaneous Raman processes, the structures in phase-space generally are smoothed and negativities are suppressed. This can be seen from the right column of plots in Fig.~\ref{fig:W-fn_coh-state}. \begin{figure} \caption{Time evolution of the coherent state $|\Psi_1\rangle$. From top to bottom the figures show the Wigner function at scaled times $\tau \!=\! 660$, $\tau \!=\! 830$, $\tau \!=\! 990$, and $\tau \!=\! 1165$. Parameters are the same as in Fig.~\ref{fig:Lx-dynamics} \label{fig:W-fn_coh-state} \end{figure} As another example, in Fig.~\ref{fig:W-fn_sup-state} a superposition of two coherent angular-momentum states, \begin{equation} \label{eq:sup-state} |\Psi_2 \rangle = \sqrt{\frac{2}{5}} \Big( \, | j \!=\! 2, \alpha \!=\! \pi/2, \beta \!=\! \pi/4 \rangle + | j \!=\! 2, \alpha \!=\! \pi/2, \beta \!=\! -\pi/4 \rangle \Big) , \end{equation} has been taken as the initial quantum state. This state has a strong negative region of the Wigner function centred at $\vartheta \!=\! \pi/2$, $\varphi \!=\! 0$. The plots of the Wigner function for this initial state show the same oscillatory behaviour as Fig.~\ref{fig:W-fn_coh-state}, with the negative peak being shifted to $\varphi \!=\! 0,\pi$ at half periods. \begin{figure} \caption{Time evolution of the initial superposition state $|\Psi_2\rangle$. Parameters are the same as in Fig.~\ref{fig:W-fn_coh-state} \label{fig:W-fn_sup-state} \end{figure} However, comparing the time evolution of these two cases one can see that the difference between the unitary and non-unitary evolution is much more pronounced in the case of the coherent state. Especially at the times between completed half and full cycles (at $\tau \!=\! 830$ and $\tau \!=\! 1165$), when also the Wigner function of the time-evolved coherent state shows relative strong negative regions, one observes that these negativities have become weaker in the presence of spontaneous emission. The populations in the $j$-level manifolds for the final interaction time of Figs~\ref{fig:W-fn_coh-state} and \ref{fig:W-fn_sup-state} are shown in Fig.~\ref{fig:jpops}. It can be seen that for both the coherent state and the superposition of coherent states the populations in $j \!=\! 0,4$ have increased by a few percent at the expense of the population in the initial manifold $j \!=\! 2$. \begin{figure} \caption{Populations $p_j$ at the interaction time $\tau \!=\! 1165$ for the coherent angular-momentum state (light bars) and the superposition of coherent angular-momentum states (dark bars). Parameters are the same as for Figs~\ref{fig:W-fn_coh-state} \label{fig:jpops} \end{figure} That the coherent state in this example actually is much more sensitive to decoherence may seem counter-intuitive at first sight, but one should keep in mind that the angular-momentum coherent state is not classical in the usual sense of a harmonic-oscillator coherent state. That is, as has been seen above its Wigner function in general is not positive everywhere. Furthermore, angular-momentum coherent states apparently are not in general the most robust states against decoherence, as are harmonic oscillator coherent states for linearly coupled reservoirs. The substantial difference in how these two cases are affected by decoherence due to spontaneous processes can also be observed in Fig.~\ref{fig:purity}, where the time evolution of the purity of the density operator is shown. It can be seen that the superposition state~(\ref{eq:sup-state}) develops much slower into a mixed state compared to the coherent state. \begin{figure} \caption{Purity for the initial coherent state (solid curve) and initial superposition state (dashed curve). Parameters are the same as in Fig.~\ref{fig:Lx-dynamics} \label{fig:purity} \end{figure} The type of decoherence mechanism is in general determined by the form of the operators $\hat{S}_i$, see Eqs~(\ref{eq:def-S}) and (\ref{eq:def-S2}). In our case these operators differ from raising and lowering operators $\hat{J}_\pm \!=\! \hat{J}_x \!\pm\! i \hat{J}_y$, and thus our decoherence mechanism itself is different from that discussed in Refs~\cite{braun,foeldi2}, though for special choices of initial conditions particular features may be similar. \section{Summary and conclusions} We have considered a diatomic homonuclear molecule in a far-detuned, linear-polarised light field. First the classical motion of the molecule was studied. For the case of a molecule with a permanent dipole, the potential was seen to be that of a spherical pendulum. For non-polar molecules, however, the potential is of a different form, but the solution for the nutation angle still could be found in terms of elliptic integrals. The motion was then treated quantum mechanically, where stimulated and spontaneous Raman processes were taken into account by a master equation. Solving this equation both for an angular-momentum coherent state and a superposition state, the effects of the decoherence induced by the spontaneous processes where compared. It was seen that, in this particular example, the superposition state was more robust to decoherence than the coherent state. \section*{References} \end{document}
\begin{document} \thanks{\hglue-4.5mm\fontsize{9.6}{9.6}\selectfont\copyright\,2009 by Omar Boukhadra. Provided for non-commercial research and education use. Not for reproduction, distribution or commercial use. \\\cour{$^*$E-mail address~: [email protected]}} \maketitle \centerline{\textit{Centre de Math\'ematiques et Informatique (CMI),}} \centerline{\textit{Universit\'e de Provence};} \centerline{\textit{D\'epartement de Math\'ematiques, Universit\'e de Constantine}} \begin{abstract} We study models of discrete-time, symmetric, $\Z^{d}$-valued random walks in random environments, driven by a field of i.i.d. random nearest-neighbor conductances $\omega_{xy}\in[0,1]$, with polynomial tail near 0 with exponent $\gamma>0$. We first prove for all $d\geq5$ that the return probability shows an anomalous decay (non-Gaussian) that approches (up to sub-polynomial terms) a random constant times $n^{-2}$ when we push the power $\gamma$ to zero. In contrast, we prove that the heat-kernel decay is as close as we want, in a logarithmic sense, to the standard decay $n^{-d/2}$ for large values of the parameter $\gamma$. \newline\\ \noindent \textit{\textbf{keywords}}~: Random walk, Random environments, Markov chains, Random conductances, Percolation. \newline \noindent \textit{\textbf{MSC}}~: 60G50; 60J10; 60K37. \end{abstract} \secdef\sct\sect{\textbf{Introduction and results}} \label{} The main purpose of this work is the derivation of heat-kernel bounds for random walks $(X_n)_{n\in\N}$ among polynomial lower tail random conductances with exponent $\gamma>0$, on $\Z^d, d>4$. We show that the heat-kernel exhibits opposite behaviors, anomalous and standard, for small and large values of $\gamma$. Random walks in reversible random environments are driven by the transition matrix \begin{equation} \label{protra} P_{\omega}(x,y)=\frac{\omega_{xy}}{\pi_{\omega}(x)}. \end{equation} where $(\omega_{xy})$ is a family of random (non-negative) conductances subject to the symmetry condition~$\omega_{xy}=\omega_{yx}$. The sum $\pi_\omega(x)=\sum_y\omega_{xy}$ defines an invariant, reversible measure for the corresponding discrete-time Markov chain. In most situations~$\omega_{xy}$ are non-zero only for nearest neighbors on~$\Z^d$ and are sampled from a shift-invariant, ergodic or even i.i.d.\ measure~$\Q$. One general class of results is available for such random walks under the additional assumptions of uniform ellipticity, \begin{displaymath} \exists\alpha>0:\quad \Q(\alpha<\omega_{b}<1/\alpha)=1 \end{displaymath} and the boundedness of the jump distribution, \begin{displaymath} \exists R<\infty:\, \vert x\vert\geq R\, \Rightarrow \, P_{\omega}(0,x)=0,\quad \Q-a.s. \end{displaymath} One has then the standard local-CLT like decay of the heat-kernel ($c_1,c_2$ are absolute constants), as proved by Delmotte~\cite{del}: \begin{equation} \label{heat-kernel} P^{n}_{\omega}(x,y)\leq\frac{c_{1}}{n^{d/2}}\exp\left \{-c_{2}\frac{\vert x-y\vert^{2}}{n}\right\}. \end{equation} Once the assumption of uniform ellipticity is relaxed, matters get more complicated. The most-intensely studied example is the simple random walk on the infinite cluster of supercritical bond percolation on~$\Z^d$, $d\ge2$. This corresponds to~$\omega_{xy}\in\{0,1\}$ i.i.d. with~$\Q(\omega_b=1)>p_c(d)$ where~$p_c(d)$ is the percolation threshold (cf. \cite{G}). Here an annealed invariance principle has been obtained by De Masi, Ferrari, Goldstein and Wick~\twocite{demas1}{demas2} in the late 1980s. More recently, Mathieu and R\'emy~\cite{Mathieu-Remy} proved the on-diagonal (i.e., $x=y$) version of the heat-kernel upper bound \eqref{heat-kernel}---a slightly weaker version of which was also obtained by Heicklen and Hoffman~\cite{Heicklen-Hoffman}---and, soon afterwards, Barlow~\cite{Barlow} proved the full upper and lower bounds on $P_\omega^n(x,y)$ of the form \eqref{heat-kernel}. (Both these results hold for $n$ exceeding some random time defined relative to the environment in the vicinity of~$x$ and~$y$.) Heat-kernel upper bounds were then used in the proofs of quenched invariance principles by Sidoravicius and Sznitman~\cite{Sidoravicius-Sznitman} for $d\ge4$, and for all $d\ge2$ by Berger and Biskup~\cite{BB} and Mathieu and Piatnitski~\cite{Mathieu-Piatnitski}. We consider in our case a family of symmetric, irreducible, nearest-neighbor Markov chains on~$\Z^d$, $d\ge5$, driven by a field of i.i.d. bounded random conductances $\omega_{xy}\in[0,1]$ and subject to the symmetry condition~$\omega_{xy}=\omega_{yx}$. These are constructed as follows. Let $\Omega$ be the set of functions $\omega:\Z^d\times\Z^d \rightarrow \R_{+}$ such that $\omega_{xy}>0$ iff $x \sim y$, and $\omega_{xy}=\omega_{yx}$ ( $x \sim y$ means that $x$ and $y$ are nearest neighbors). We call elements of $\Omega$ environments. We choose the family $\{ \omega_{b}, b=(x,y),x \sim y, b\in\Z^d\times\Z^d\}$ i.i.d according to a law $\Q$ on $(R^{\ast}_{+})^{\Z^d}$ such that \begin{equation} \label{1} \begin{array}{ll} \omega_{b}\leq 1 & \text{for all } b;\\ \Q(\omega_{b} \leq a)\sim a^{\gamma} & \text{when } a\downarrow 0, \end{array} \end{equation} where $\gamma>0$ is a parameter. Therefore, the conductances are $\Q$-a.s. positive. In a recent paper, Fontes and Mathieu~\cite{Fontes-Mathieu} studied continuous-time random walks on~$\Z^d$ which are defined by generators~$\LL_\omega$ of the form \begin{displaymath} (\mathcal{L}_{\omega}f)(x)=\sum_{y\sim x}\omega_{xy}[f(y)-f(x)], \end{displaymath} with conductances given by \begin{displaymath} \omega_{xy}=\omega(x)\wedge \omega(y) \end{displaymath} for i.i.d.\ random variables~$\omega(x)>0$ satisfying \eqref{1}. For these cases, it was found that the annealed heat-kernel, $\int \text{d}\Q(\omega) P^{\omega}_{0}(X_{t}=0)$, exhibits an \emph{anomalous decay}, for $\gamma< d/2$. Explicitly, from \cite{Fontes-Mathieu}, Theorem 4.3, we have \begin{equation} \label{fms} \int \text{d}\Q(\omega) P^{\omega}_{0}(X_{t}=0)=t^{-(\gamma\wedge\frac{d}{2})+o(1)}, \quad t\rightarrow\infty. \end{equation} In addition, in a more recent paper, Berger, Biskup, Hoffman and Kozma \cite{berger}, provided universal upper bounds on the quenched heat-kernel by considering the nearest-neighbor simple random walk on~$\Z^d$, $d\ge2$, driven by a field of i.i.d. bounded random conductances $\omega_{xy}\in[0,1]$. The conductance law is i.i.d.\ subject to the condition that the probability of $\omega_{xy}>0$ exceeds the threshold $p_c(d)$ for bond percolation on~$\Z^d$. For environments in which the origin is connected to infinity by bonds with positive conductances, they studied the decay of the $2n$-step return probability $P_\omega^{2n}(0,0)$. They have proved that $P_\omega^{2n}(0,0)$ is bounded by a random constant times $n^{-d/2}$ in $d=2,3$, while it is $o(n^{-2})$ in~$d\ge5$ and $O(n^{-2}\log n)$ in $d=4$. More precisely, from \cite{berger}, Theorem 2.1, we have for almost every $\omega\in\{0\in \mathcal{C}_{\infty}\}$ ($\mathcal{C}_{\infty}$ represents the set of sites that have a path to infinity along bonds with positive conductances), and for all $n\geq1$. \begin{equation} \label{trans} P_\omega^n(0,0)\le C(\omega)\, \begin{cases} n^{-d/2},\qquad&d=2,3, \\ n^{-2}\log n,\qquad&d=4, \\ n^{-2},\qquad&d\ge5,\end{cases} \end{equation} where $C(\omega)$ is a random positive variable.\\ On the other hand, to show that those general upper bounds (cf. \eqref{trans}) in $d\geq5$ represent a real phenomenon, they produced examples with anomalous heat-kernel decay approaching $1/n^2$, for i.i.d. laws $\Q$ on bounded nearest-neighbor conductances with \textit{lower tail much heavier than polynomial} and with~$\Q(\omega_b>0)>p_c(d)$. We quote Theorem 2.2 from \cite{berger}~: \begin{theorem} \label{thm2} (1) Let~$d\ge5$ and $\kappa>1/d$. There exists an i.i.d.\ law~$\Q$ on bounded, nearest-neighbor conductances with~$\Q(\omega_b>0)>p_c(d)$ and a random variable~$C=C(\omega)$ such that for almost every~$\omega\in\{0\in\mathcal{C}_\infty\}$, \begin{equation} \label{lower-bd} P_\omega^{2n}(0,0)\ge\ C(\omega)\frac{\text e^{-(\log n)^\kappa}}{n^2}, \qquad n\ge1. \end{equation} \noindent (2) Let $d\ge5$. For every increasing sequence $\{\lambda_n\}_{n=1}^\infty$, $\lambda_n\to\infty$, there exists an i.i.d.\ law $\Q$ on bounded, nearest-neighbor conductances with~$\Q(\omega_b>0)>p_c(d)$ and an a.s.\ positive random variable~$C=C(\omega)$ such that for almost every~$\omega\in\{0\in\mathcal{C}_\infty\}$, \begin{equation} \label{2.4} P_\omega^n(0,0)\ge \frac{C(\omega)}{\lambda_nn^2} \end{equation} along a subsequence that does not depend on~$\omega$. \end{theorem} The distributions that they use in part~(1) of Theorem~\ref{thm2} have a tail near zero of the general form \begin{equation} \Q(\omega_{xy}<s) \approx |\log(s)|^{-\theta} \end{equation} with~$\theta>0$. Berger, Biskup , Hoffman and Kozma \cite{berger} called attention to the fact that the construction of an estimate of the anomalous heat-kernel decay for random walk among polynomial lower tail random conductances on $\Z^d$, seems to require subtle control of heat-kernel \emph{lower} bounds which go beyond the estimates that can be easily pulled out from the literature. In the present paper, we give a response to this question and show that every distribution with an appropriate power-law decay near zero, can serve as such example, and that when we push the power to zero. The lower bound obtained for the return probability approaches (up to sub-polynomial terms) the upper bound supplied by \cite{berger} and that for all $d\geq5$. Here is our first main result whose proof is given in section~\ref{ahkd}~: \begin{theorem} \label{th} Let $d\geq5$. There exists a positive constant $\delta(\gamma)$ depending only on $d$ and $\gamma$ such that $\Q$-a.s., there exists~$C=C(\omega)<\infty$ and for all $n\geq1$ \begin{equation} \label{min} P^{2n}_{\omega}(0,0)\geq \frac{C}{n^{2+\delta(\gamma)}}\quad \text{and}\quad \delta(\gamma)\xrightarrow[\gamma \to 0]{}0. \end{equation} \end{theorem} \begin{remark} \label{rem} \begin{enumerate} \item The proof tells us in fact, with \eqref{trans}, that for $d\geq5$ we have almost surely \begin{equation} \label{esup} \begin{split} & -2[1+d(2d-1)\gamma]\leq \liminf_{n} \frac{\log P^{2n}_{\omega}(0,0)}{\log n}\\ &\qquad\qquad\qquad\qquad\qquad\qquad\leq \limsup_{n} \frac{\log P^{2n}_{\omega}(0,0)}{\log n}\leq -2. \end{split} \end{equation} \item As we were reminded by M. Biskup and T.M. Prescott, the invariance principle (CLT) (cf Theorem 2.1. in \cite{BP} and Theorem 1.3 in \cite{QIP}) automatically implies the ``usual'' lower bound on the heat-kernel under weaker conditions on the conductances. Indeed, the Markov property and reversibility of~$X$ yield $$ P^{\omega}_{0}(X_{2n}=0)\geq \frac{\pi_\omega(0)}{2d}\sum_{x\in \mathcal{C}_{\infty}\atop \vert x\vert\leq \sqrt{n}}P^{\omega}_{0}(X_{n}=x)^{2}. $$ Cauchy-Schwarz then gives $$ P^{\omega}_{0}(X_{2n}=0)\geq P^{\omega}_{0}(\vert X_{n}\vert \leq \sqrt{n})^{2}\frac{\pi_\omega(0)/2d}{\vert \mathcal{C}_{\infty}\cap [-\sqrt{n},+\sqrt{n}]^{d}\vert}. $$ Now the invariance principle implies that $P^{\omega}_{0}(\vert X_{n}\vert \leq \sqrt{n})^{2}$ has a positive limit as~$n\to\infty$ and the Spatial Ergodic Theorem shows that $\vert \mathcal{C}_{\infty}\cap [-\sqrt{n},+\sqrt{n}]^{d}\vert$ grows proportionally to~$n^{d/2}$. Hence we get $$ P ^{\omega}_{0}(X_{2n}=0)\geq \frac{C(\omega)}{n^{d/2}}, \quad n\geq 1, $$ with~$C(\omega)>0$ a.s. on the set~$\{0\in \mathcal{C}_{\infty}\}$. Note that, in $d=2,3$, this complements nicely the ``universal'' upper bounds derived in~\cite {berger}. In $d=4$, the decay is at most $n^{-2}\log n$ and at least $n^{-2}$. \end{enumerate} \end{remark} The result of Fontes and Mathieu \eqref{fms} (cf. \cite{Fontes-Mathieu}, Theorem 4.3) encourages us to believe that the quenched heat-kernel has a standard decay when $\gamma\geq d/2$, but the construction seems to require subtle control of heat-kernel upper bounds. In the second result of this paper whose proof is given in section~\ref{shd}, we prove, for all $d\geq5$, that the heat-kernel decay is as close as we want, in a logarithmic sense, to the standard decay $n^{-d/2}$ for large values of the parameter $\gamma$. For the cases where $d=2,3$, we have a standard decay of the quenched return probability under weaker conditions on the conductances (see Remark \ref{rem}). \begin{theorem} \label{thm} Let $d\geq5$. There exists a positive constant $\delta(\gamma)$ depending only on $d$ and $\gamma$ such that $\Q$-a.s., \begin{equation} \label{min} \limsup_{n\rightarrow+\infty}\sup_{x\in\Z^d}\frac{\log P^{n}_{\omega}(0,x)}{\log n}\leq -\frac{d}{2}+\delta(\gamma)\quad \text{and}\quad \delta(\gamma)\xrightarrow[\gamma \to +\infty]{}0 . \end{equation} \end{theorem} In what follows,, we refer to $P^{\omega}_{x}(\cdot)$ as the \textit{quenched} law of the random walk $X=(X_{n})_{n\geq 0}$ on $((\Z^d)^{\N}, \mathcal{G})$ with transitions given in \eqref{protra} in the environment~$\omega$, where $\mathcal{G}$ is the $\sigma-$algebra generated by cylinder functions, and let $\mathbb{P}:=\Q\otimes P^\omega_0$ be the so-called \textit{annealed} semi-direct product measure law defined by $$ \Prob(F\times G)=\int_F \Q(\text{d}\omega)P^\omega_0(G), \quad F\in \mathcal{F}, G\in \mathcal{G}. $$ where $\mathcal{F}$ denote the Borel $\sigma-$algebra on $\Omega$ (which is the same as the $\sigma-$algebra generated by cylinder functions). \secdef\sct\sect{\textbf{Anomalous heat-kernel decay}} \label{ahkd} In this section we provide the proof of Theorem \ref{th}. We consider a family of bounded nearest-neighbor conductances~$(\omega_b)\in\Omega=[0,1]^{\B^d}$ where~$b$ ranges over the set~$\B^d$ of unordered pairs of nearest neighbors in~$\Z^d$. The law $\Q$ of the~$\omega$'s will be i.i.d.\ subject to the conditions given in \eqref{1}.\\ We prove this lower bound by following a different approach of the one ado\-pted by Berger, Biskup , Hoffman and Kozma \cite{berger} to prove \twoeqref{lower-bd}{2.4}. In fact, they prove that in a box of side length~$\ell_n$ there exists a configuration where a strong bond with conductance of order 1, is separated from other sites by bonds of strength~$1/n$, and (at least) one of these ``weak'' bonds is connected to the origin by a ``strong'' path not leaving the box. Then the probability that the walk is back to the origin at time~$n$ is bounded below by the probability that the walk goes directly towards the above pattern (this costs $ e^{O(\ell_n)}$ of probability) then crosses the weak bond (which costs~$1/n$), spends time $n-2\ell_n$ on the strong bond (which costs only $O(1)$ of probability), then crosses a weak bond again (another factor of~$1/n$) and then heads towards the origin to get there on time (another $ e^{O(\ell_n)}$ term). The cost of this strategy is $O(1) e^{O(\ell_n)}n^{-2}$ so if $\ell_n=o(\log n)$ then we get leading order~$n^{-2}$.\\ Our method for proving Theorem \ref{th} is, in fact, simple - we note that due to the reversibility of the walk and with a good use of Cauchy-Schwartz, one does not need to condition on the exact path of the walk, but rather show that the walker has a relatively large probability of staying within a small box around the origin. Our objective will consist in showing that for almost every~$\omega$, the probability that the random walk when started at the origin is at time~$n$ inside the box~$B_{n^{\delta}}=[-3n^{\delta},3n^{\delta}]^{d}$, is greater than~$c/n$ (where $c$ is a constant and $\delta=\delta(\gamma)\downarrow 0$). Hence we will get $P^{2n}_{\omega}(0,0)/\pi(0) \geq c/n^{2+\delta d}$ by virtue of the following inequality which, for almost every environment~$\omega$, derives from the reversibility of $X$, Cauchy-Schwarz inequality and \eqref{1}~: \begin{eqnarray} \label{minun} \frac{P^{2n}_{\omega}(0,0) }{\pi_\omega(0)} &\geq& \sum_{y\in B_{n^{\delta}}}\frac{P^{n}_{\omega}(0,y)^{2}}{\pi_\omega(y)} \nonumber\\ &\geq& \left(\sum_{y\in B_{n^{\delta}}}P^{n}_{\omega}(0,y)\right)^{2} \frac{1}{\pi_\omega(B_{n^{\delta}})} \nonumber\\ &\geq& \frac{P^{\omega}_{0}(X_{n}\in B_{n^{\delta}})^{2}}{\# B_{n^{\delta}}}. \end{eqnarray} In order to do this, our strategy is to show that the random walk meets a \textit{trap}, with positive probability, before getting out from $[-3n^{\delta},3n^{\delta}]^{d}$, where, by definition, a trap is an edge of conductance of order $1$ that can be reached only by crossing an edge of order $1/n$. The random walk, being imprisoned in the trap inside the box $[-3n^{\delta},3n^{\delta}]^{d}$, will not get out from this box before time~$n$ with positive probability. Then the Markov property yields $P^{\omega}_{0}(X_n\in [-3n^{\delta},3n^{\delta}]^{d})\geq c/n$. Thus, we will be brought to follow the walk until it finds a specific configuration in the environment. First, we will need to prove one lemma. Let $B_{N}=[-3N,3N]^{d}$ be the box centered at the origin and of radius $3N$ and define $\partial B_{N}$ to be its inner boundary, that is, the set of vertices in $B_N$ which are adjacent to some vertex not in $B_N$. We have $\#B_N\leq (7N)^{d}$. Let~$H_{0}=0$ and define $H_{N}$, $N\geq1$, to be the hitting time of $\partial B_{N}$, i.e. \begin{displaymath} H_{N}=\inf \{n\geq0:X_{n}\in \partial B_{N}\}. \end{displaymath} The box~$B_{N}$ being finite for $N$ fixed, we have then $H_{N}<\infty$ a.s., \mbox{$\forall N\geq 1$.} Let $\hat{e}_{i}, \, i=1,\ldots, d$, denote the canonical unit vectors in $\Z^{d}$, and let $x\in \Z^{d}$, with $x:=(x_{1},\ldots,x_{d})$. Define $i_{0}:=\max\{i:\vert x_{i}\vert\geq\vert x_{j}\vert, \forall j\neq i\}$ and let $\epsilon (x): \Z^{d}\rightarrow \{-1,1\}$ be the function such that \begin{displaymath} \epsilon (x)= \begin{cases} +1 & \text{if } x_{i_{0}}\geq 0 \\ -1 & \text{if } x_{i_{0}}<0 \end{cases} \end{displaymath} Now, let $\alpha, \xi$ be positive constants such that $\Q(\omega_{b}\geq\xi)>0$. Define $\AAA_{N}(x)$ to be the event that the configuration near $x, y=x+\epsilon(x)\hat{e}_{i_{0}}$ and $z=x+2\epsilon(x)\hat{e}_{i_{0}}$ is as follows: \begin{enumerate} \item $\frac{1}{2} N^{-\alpha}< \omega_{xy}\leq N^{-\alpha}$. \item $\omega_{yz}\geq\xi$. \item every other bond emanating out of $y$ or $z$ has $\omega_{b}\leq N^{-\alpha}$. \end{enumerate} The event~$\AAA_{N}(x)$ so constructed involves a collection of $4d-1$ bonds that will be denoted by $\CC(x)$, i.e. \begin{eqnarray*} \begin{split} & \CC(x):=\{[x,y],[y,z],[y,y^i],[z,z^i],[z,z^i_0]; y=x+\epsilon(x)\hat{e}_{i_{0}},z=x+2\epsilon(x)\hat{e}_{i_{0}},\\ & \qquad \qquad \qquad \qquad\qquad \qquad y^i=y\pm\hat{e}_{i}, z^i=z\pm\hat{e}_{i},\forall i\neq i_0,z^i_{0}=z+\epsilon(x)\hat{e}_{i_0} \} \end{split} \end{eqnarray*} Let us note that if $x\in \partial B_N$, for some $N\geq 1$, the collection $\CC(x)$ is outside the box~$B_N$ and if $y\in \partial B_K$, for $K\neq N$, we have $\CC(x)\cap \CC(y)=\emptyset$. \\ If the bonds of the collection $\CC(x)$ satisfy the conditions of the event $\AAA_{N}(x)$, we agree to call it a \textit{trap} that we will denote by $\mathfrak{P}_{N}$. The lemma says then that~: \begin{lemma} \label{I} The family $\{\AAA^{k}_{N}=\AAA_{N}(X_{H_{k}})\}^{N-1}_{k=0}$ is $\mathbb{P}$-independent for each $N$. \end{lemma} \begin{proof} The occurrence of the event $\AAA_{N}(X_{H_{k}})$ means that the random walk $X$ has met a trap~$\mathfrak{P}_{N}$ situated outside of the box~$B_{k}$ when it has hit for the first time the boundary of the box~$B_{k}$. Let $q_{N}$ be the $\Q$-probability of having the configuration of the trap $\mathfrak{P}_{N}$. We have $q_{N}=\Q(\AAA_{N}(x))=\mathbb{P}[\AAA_{N}(X_{H_{k}})],\, \forall x\in \partial B_{k}$ and $\forall k\leq N-1$. Indeed, by virtue of the i.i.d. character of the conductances and the Markov property, when the random walk hits the boundary of $B_{k}$ for the first time at some element~$x$, the probability that the collection $\CC(x)$ constitutes a trap, i.e., satisfies the conditions of the event $\AAA_N(x)$, depends only on the edges of the collection $\CC(x)$, which have not been visited before. \\ Let $k_{1}< k_{2}\leq N-1$ and $x\in \partial B_{k_{2}}$, we have then \begin{eqnarray*} \Prob\left[\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x,\AAA^{k_{2}}_{N}\right] &=& \Prob\left[\left\{\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x\right\}\cap\AAA_N(x)\right]\\ &=& \Prob\left[\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x\right]\Prob\left[\AAA_N(x)\right]\\ &=& q_N\Prob\left[\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x\right], \end{eqnarray*} since the events $\{\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x\}$ and $\AAA_{N}(x)$ depend respectively on the conductances of the bonds of $B_{k_{2}}$ and the conductances of the bonds of the collection $\CC(x)$ which is situated outside the box $B_{k_{2}}$ when $x\in \partial B_{k_{2}}$. Thus \begin{eqnarray*} \mathbb{P}\left[\AAA^{k_{1}}_{N}\AAA^{k_{2}}_{N}\right] &=& \sum_{x\in \partial B_{k_{2}}}\Prob\left[\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x,\AAA^{k_{2}}_{N}\right] \\ &=& q_{N}\sum_{x\in \partial B_{k_{2}}}\Prob\left[\AAA^{k_{1}}_{N}, X_{H_{k_{2}}}=x\right] \\ &=& q_{N}\mathbb{P}\left[\AAA^{k_{1}}_{N}\right]=q^{2}_{N}. \end{eqnarray*} With some adaptations, this reasoning remains true in the case of more than two events $\AAA^{k}_{N}$. \end{proof} We come now to the proof of Theorem~\ref{th}.\\ \begin{proofsect}{Proof of Theorem~\ref{th}} Let $d\geq5$ and $\gamma>0$. Set $\alpha=\frac{1-\epsilon}{(4d-2)\gamma}$ for arbitrary positive constant $\epsilon<1$ (the constant $\alpha$ is the same used in the definition of the event $\AAA_ N(x)$). As seen before (cf. \eqref{minun}), for almost every environment~$\omega$, the reversibility of $X$, Cauchy-Schwarz inequality and \eqref{1} give \begin{equation} \label{minun2} \frac{P^{2n}_{\omega}(0,0) }{\pi_\omega(0)} \geq \frac{P^{\omega}_{0}(X_{n}\in B_{n^{1/\alpha}})^{2}}{\# B_{n^{1/\alpha}}}, \end{equation} By the assumption \eqref{1} on the conductances and the definition of the event $\AAA_N (x)$, the probability of having the configuration of the trap $\mathfrak{P}_{N}$ is greater than $cN^{-(1-\epsilon)}$ (where $c$ is a constant that we use henceforth as a generic constant). Indeed, when~$N$ is large enough, we have \begin{eqnarray*} q_{N} &=& \Q\left(\frac{1}{2} N^{-\alpha}< \omega_{xy}\leq N^{-\alpha}\right) \Q(\omega_{yz}\geq\xi) \left[\Q(\omega_{b}\leq N^{-\alpha})\right]^{4d-3} \geq \frac{c}{N^{1-\epsilon}}. \end{eqnarray*} Consider now the following event $$ \Lambda_{N}:=\bigcup^{N-1}_{k=0}\AAA^{k}_{N}. $$ The event~$\Lambda_{N}$ so defined may be interpreted as follows~: \textit{at least, one among the $N$ disjoint collections $\CC(X_{H_{k}}),\, k\leq N-1$, constitutes a trap $\mathfrak{P}_{N}$}. The events $\AAA^{k}_{N}$ being independent by lemma \ref{I}, we have \begin{eqnarray} \label{7} \mathbb{P}[\Lambda^{c}_{N}] &\leq& \left(1-cN^{\epsilon-1}\right)^{N} \nonumber \\ &\leq& \exp\left\{N\log\left(1-cN^{\epsilon-1}\right)\right\}\nonumber \\ &\leq& \exp\left\{-cN^{\epsilon}\right\}. \end{eqnarray} Chebychev inequality and \eqref{7} then give \begin{equation} \label{cantelli} \sum^{\infty}_{N=1}\Q\left\{\omega: P^{\omega}_{0}(\Lambda^{c}_{N})\geq 1/2\right\} \leq 2\sum^{\infty}_{N=1}\mathbb{P}[\Lambda^{c}_{N}]<+\infty. \end{equation} It results by Borel-Cantelli lemma that for almost every $\omega$, there exists $N_{0}\geq1$ such that for each $N\geq N_{0}$, the event $\AAA_{N}(x)$ occurs inside the box $B_{N}$ with positive probability (greater than~$1/2$) on the path of $X$, for some $x\in B_{N-1}$. For almost every~$\omega$, one may say that $X$ meets with positive probability a trap $\mathfrak{P}_{N}$ at some site $x\in B_{N-1}$ before getting outside of $B_{N}$. Suppose that~$N\ge N_0$ and let~$n$ be such that~$N^{\alpha}\leq n<(N+1)^{\alpha}$. Define $$ D_{N}:= \left\{ \begin{array}{ll} \inf\{k\leq N-1: \AAA^{k}_{N}\,\text{occurs}\} & \text{if} \quad \Lambda_{N}\,\text{occurs}\\ +\infty & \text{otherwise}, \end{array} \right. $$ to be the rank of the first among the~$N$ collections $\CC(X_{H_{k}}),\, k\leq N-1$, that constitutes a trap $\mathfrak{P}_{N}$. If $D_{N}=k$, the random variable~$D_{N}$ so defined depends only on the steps of $X$ up to time~$H_{k}$. Thus, if $D_{N}=k$, we have $X_{H_{k}}\in B_{N-1}$ and $\CC(X_{H_{k}})$ constitutes a trap $\mathfrak{P}_{N}$. So, if we set $X_{H_{k}}=x$, the bond~$[x,y]$ (of the trap $\mathfrak{P}_{N}$) will have then a conductance of order $N^{-\alpha}$. In this case, the probability for the random walk, when started at~$X_{H_{k}}=x$, to cross the bond $[x,y]$ is by the property (1) of the definition of the event~$\AAA_N(x)$ above greater than \begin{equation} \label{b1} \frac{(1/2)N^{-\alpha}}{\pi_{\omega}(x)}\geq \frac{1/2}{2dN^{\alpha}}= \frac{1}{4dN^{\alpha}}. \end{equation} Here we use the fact that $\pi_{\omega}(x)\leq 2d$ by virtue of \eqref{1}. This implies by the Markov property and by \eqref{b1} that \begin{equation} \label{if} \begin{split} & P^{\omega}_{0}(X_{n}\in B_{N}|D_{N}\leq N-1)\\ & \qquad =\sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(X_{n}\in B_{N},D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)} \\ &\qquad\geq \sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(H_{N}\geq n, D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)} \\ & \qquad \geq \sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)} P^{\omega}_{x}(H_{N}\geq n)\\ & \qquad \geq \sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)} P^{\omega}_{y}(H_{N}\geq n)P^{\omega}_{x}(X_{1}=y)\\ &\qquad \geq \frac{1}{4dN^{a}}\sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)}P^{\omega}_{y}(H_{N}\geq n) \\ & \qquad \geq \frac{1}{4dn}\sum^{N-1}_{k=0}\sum_{x\in B_{k}}\frac{P^{\omega}_{0}(D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)}P^{\omega}_{y}(H_{N}\geq n). \end{split} \end{equation} If the trap $\mathfrak{P}_{N}$ retains enough the random walk~$X$, we will have $ H_{N}\geq n$, when it starts at $y$ (always the same $y=x+\epsilon(x)\hat{e}_{i_{0}}$ of the collection $\CC(x)$). Let \begin{displaymath} E_N:=\bigcup^{n-1}_{j=0}\left\{X_{j}\, \text{\textit{steps outside of the trap}} \,\mathfrak{P}_{N}\right\} \end{displaymath} and we say ``\textit{$X_{j}$ steps outside of the trap $\mathfrak{P}_{N}$ }", when $X_{j+1}$ is on a site of the border of the trap $\mathfrak{P}_{N}$, i.e. $X_{j+1}=y\pm\hat{e}_{i}$, $\forall i\neq i_0$, or $X_{j+1}=x$ (resp. $X_{j+1}=z\pm\hat{e}_{i}$, $\forall i\neq i_0$, or $X_{j+1}=z+\epsilon (z)\hat{e}_{i_0}$) if $X_{j}=y$ (resp. if $X_j=z$). The complement of $E_N$ is in fact the event that $X$ does not leave the trap during its first $n$ jumps, i.e. $X$ jumps $n$ times, starting at $y$, in turn on $z$ and $y$, which, according to the configuration of the trap, costs for each jump a probability greater than $$ \frac{\xi}{\xi+(2d-1)N^{-\alpha}}. $$ Then, we have by the Markov property $$ P^{\omega}_{y}(H_{N}\geq n)\geq P^{\omega}_{y}(E^c_N)\geq \left(\frac{\xi}{\xi+(2d-1)N^{-\alpha}}\right)^n, $$ and since by the choice of $N^{\alpha}\leq n<(N+1)^{\alpha}$ $$ \left(\frac{\xi}{\xi+(2d-1)N^{-\alpha}}\right)^n \xrightarrow[n \to +\infty]{} e^{-(2d-1)/\xi}, $$ it follows for all~$N$ large enough that \begin{equation} P^{\omega}_{y}(H_{N}\geq n)\geq\frac{ e^{-(2d-1)/\xi}}{2}. \end{equation} So, putting this in \eqref{if}, we obtain \begin{eqnarray*} P^{\omega}_{0}(X_{n}\in B_{N}|D_{N}\leq N-1) &\geq & \frac{e^{-(2d-1)/\xi}}{8dn}\sum^{N-1}_{k=0}\sum_{x\in B_{N-1}}\frac{P^{\omega}_{0}(D_{N}=k, X_{H_{k}}=x)}{P^{\omega}_{0}(D_{N}\leq N-1)} \\ &\geq& \frac{e^{-(2d-1)/\xi}}{8d n}. \end{eqnarray*} Now, according to \eqref{cantelli}, we have $P^{\omega}_{0}(D_{N}\leq N-1)\geq \ffrac{1}{2}$. Then we deduce $$ P^{\omega}_{0}(X_{n}\in B_{N})\geq P^{\omega}_{0}(X_{n}\in B_{N}|D_{N}\leq N-1)P^{\omega}_{0}(D_{N}\leq N-1)\geq \frac{e^{-(2d-1)/\xi}}{16d n}. $$ A fortiori, we have $$ P^{\omega}_{0}(X_{n}\in B_{n^{1/\alpha}})\geq P^{\omega}_{0}(X_{n}\in B_N)\geq \frac{e^{-(2d-1)/\xi}}{16 d n}. $$ Thus, for all $N\geq N_{0}$, by replacing the last inequality in \eqref{minun2}, we obtain $$ P^{2n}_{\omega}(0,0)\geq \frac{\pi(0)\left(e^{-(2d-1)/\xi}/16d\right)^{2}7^{-d}}{n^{2+\delta(\gamma)}}. $$ where $\delta(\gamma):=d(4d-2)\gamma/(1-\epsilon)$. When we let $\epsilon\longrightarrow 0$, we get \eqref{esup}. \end{proofsect} \secdef\sct\sect{\textbf{Standard heat-kernel decay}} \label{shd} We give here the proof of Theorem~\ref{thm}. Let us first give some definitions and fix some notations besides those seen before. Consider a Markov chain on a countable state-space~$V$ with transition probability denoted by $\cmss P(x,y)$ and invariant measure denoted by~$\pi$. Define~$\cmss Q(x,y)=\pi(x)\cmss P(x,y)$ and for each~$S_1,S_2\subset V$, let \begin{equation} \label{QSS} \cmss Q(S_1,S_2)=\sum_{x\in S_1}\sum_{y\in S_2}\cmss Q(x,y). \end{equation} For each~$S\subset V$ with~$\pi(S)\in(0,\infty)$ we define \begin{equation} \label{PhiS} \Phi_S=\frac{\cmss Q(S,S^c)}{\pi(S)} \end{equation} and use it to define the isoperimetric profile \begin{equation} \label{Phi-inf} \Phi(r)=\inf\bigl\{\Phi_S\colon \pi(S)\le r\bigr\}. \end{equation} (Here~$\pi(S)$ is the measure of~$S$.) It is easy to check that we may restrict the infimum to sets~$S$ that are connected in the graph structure induced on~$V$ by $\cmss P$. To prove Theorem \ref{thm}, we combine basically two facts. On the one hand, we use Theorem~2 of Morris and Peres~\cite{MP} that we summarize here~: Suppose that~$\cmss P(x,x)\ge\sigma$ for some~$\sigma\in(0,1/2]$ and all~$x\in V$. Let~$\epsilon>0$ and~$x,y\in V$. Then \begin{equation} \label{MP-bound} \cmss P^n(x,y)\le\epsilon\pi(y) \end{equation} for all~$n$ such that \begin{equation} \label{LK-bound} n\ge 1+\frac{(1-\sigma)^2}{\sigma^2}\int_{4[\pi(x)\wedge\pi(y)]}^{4/\epsilon}\frac4{u\Phi(u)^2}\,\text d u. \end{equation} Let $B_{N+1}=[-(N+1),N+1]^d$ and $\BB_{N+1}$ denote the set of nearest-neighbor bonds of $B_{N+1}$, i.e., $\BB_{N+1}=\{b=(x,y): x,y\in B_{N+1}, x\sim y\}$. Call $\Z^d_e$ the set of even points of $\Z^d$, i.e., the points $x:=(x_1,\ldots,x_d)$ such that $\vert\sum^{d}_{i=1}x_i\vert=2k$, with $k\in\N$ ($0\in \N$), and equip it with the graph structure defined by~: two points $x,y\in \Z^d_e\subset\Z^d$ are neighbors when they are separated in $\Z^d$ by two steps, i.e. $$ \sum^{d}_{i=1}\vert x_i-y_i\vert=2. $$ We operate the following modification on the environment~$\omega$ by defining $\tilde{\omega}_b=1$ on every bond $b\notin\BB_{N+1}$ and $\tilde{\omega}_b=\omega_b$ otherwise. Then, we will adapt the machinery above to the following setting \begin{equation} V=\Z^d_e,\quad\cmss P= P^2_{\tilde{\omega}}\quad\text{and}\quad\pi=\pi_{\tilde{\omega}}, \end{equation} with the objects in \twoeqref{QSS}{Phi-inf} denoted by~$\cmss Q_{\tilde{\omega}}$, $\Phi_S^{({\tilde{\omega}})}$ and~$\Phi_{\tilde{\omega}}(r)$. So, the random walk associated with $P^2_{\tilde{\omega}}$ moves on the even points. On the other hand, we need to know the following standard fact that gives a lower bound of the conductances of the box $B_{N}$. For a proof, see \cite{Fontes-Mathieu}, Lemma~3.6. \begin{lemma} \label{L} Under assumption~\eqref{1}, \begin{equation} \label{LL} \lim_{N\rightarrow+\infty}\frac{\log\inf_{b\in\BB_{N}}\omega_b}{\log N}=-\frac{d}{\gamma},\qquad \Q-a.s. \end{equation} \end{lemma} Thus, for arbitrary $\mu>0$, we can write $\Q-$a.s., for all $N$ large enough \begin{equation} \label{mu} \inf_{b\in\BB_{N+1}}\omega_b\geq N^{-(\frac{d}{\gamma}+\mu)}. \end{equation} Our next step involves extraction of appropriate bounds on surface and volume terms. \begin{lemma} \label{lemma-adapt} Let~$d\ge2$ and set $\alpha(N):=N^{-(\frac{d}{\gamma}+\mu)}$, for arbitrary $\mu>0$. Then, for a.e. $\omega$, there exists a constant~$c>0$ such that the following holds: For $N$ large enough and any finite connected~$\Lambda\subset \Z^d_e$, we have \begin{equation} \label{Q-actual} \cmss Q_{\tilde{\omega}}(\Lambda,\Z^d_e\setminus\Lambda)\ge c \alpha(N)^2\pi_{\tilde{\omega}}(\Lambda)^{\frac{d-1}d}. \end{equation} \end{lemma} The proof of lemma \ref{lemma-adapt} will be a consequence of the following well-known fact of isoperimetric inequalities on $\Z^d$ (see \cite{Woess}, Chapter I, \S~4). For any connected~$\Lambda\subset\Z^d$, let~$\partial\Lambda$ denote the set of edges between~$\Lambda$ and~$\Z^d\setminus\Lambda$. Then, there exists a constant $\kappa$ such that \begin{equation} \label{ii} |\partial\Lambda|\ge \kappa|\Lambda|^{\frac{d-1}{d}} \end{equation} for every finite connected $\Lambda\subset\Z^d$. This remains true for $\Z^d_e$. \begin{proofsect}{Proof of lemma~\ref{lemma-adapt}} For some arbitrary $\mu>0$, set $\alpha:=\alpha(N)=N^{-(\frac{d}{\gamma}+\mu)}$ and let~$N\gg1$. For any finite connected~$\Lambda\subset \Z^d_e$, we claim that \begin{equation} \label{Q-bd} {\cmss Q}_{\tilde{\omega}}(\Lambda,\Z^d_e\setminus\Lambda)\ge \frac{\alpha^2}{2d}\,| \partial\Lambda| \end{equation} and \begin{equation} \label{vol-bd} \pi_{\tilde{\omega}}(\Lambda)\le 2d|\Lambda|. \end{equation} Then, Lemma~\ref{L} gives a.s. $\inf_{b\in \BB_N}\omega(b)>\alpha$ and by virtue of \eqref{ii}, we have $|\partial\Lambda|\ge \kappa|\Lambda|^{\frac{d-1}{d}}$, then~\eqref{Q-actual} will follow from \twoeqref{Q-bd}{vol-bd}. It remains to prove \twoeqref{Q-bd}{vol-bd}. The bound \eqref{vol-bd} is implied by~$\pi_{\tilde{\omega}}(x)\le2d$. For \eqref{Q-bd}, since~$P^2_\omega$ represents two steps of a random walk, we get a lower bound on~$\cmss Q_\omega(\Lambda,\Z^d_e\setminus\Lambda)$ by picking a site~$x\in\Lambda$ which has a neighbor~$y\in\Z^d$ that has a neighbor~$z\in\Z^d_e$ on the outer boundary of~$\Lambda$. By Lemma~\ref{L}, if $x$ or $z\in B_{N+1}$, the relevant contribution is bounded by \begin{equation} \label{aa} \pi_{\tilde{\omega}}(x) P^2_{\tilde{\omega}}(x,z)\ge\pi_{\tilde{\omega}}(x)\frac{\tilde{\omega}_{xy}}{\pi_{\tilde{\omega}}(x)}\frac{\tilde{\omega}_{yz}}{\pi_{\tilde{\omega}}(y)}\ge\frac{\alpha^2}{2d}. \end{equation} For the case where $x,z\notin\Z^d_e\cap B_{N+1}$, clearly the left-hand side of \eqref{aa} is bounded by $1/(2d)>\alpha^{2}/(2d)$. Once~$\Lambda$ has at least two elements, we can do this for~$(y,z)$ ranging over all bonds in~$\partial\Lambda$, so summing over $(y,z)$ we get~\eqref{Q-bd}. \end{proofsect} Now we get what we need to estimate the decay of $P^{2n}_\omega(0,0)$. \begin{proofsect}{Proof of Theorem~\ref{thm}} Let $d\geq5$, $\gamma>8d$ and choose $\mu>0$ such that $$ \mu<\frac{1}{8}-\frac{d}{\gamma}. $$ Let $n=\lfloor N/2\rfloor$, $N\gg1$, and consider the random walk on $\tilde{\omega}$. We will derive a bound on~$\Phi_\Lambda^{({\tilde{\omega}})}$ for connected~$\Lambda\subset \Z^d_e$. Henceforth~$c$ denotes a generic constant. Observe that \eqref{Q-actual} implies \begin{equation} \Phi_\Lambda^{({\tilde{\omega}})}\ge c\alpha^2\pi_{\tilde{\omega}}(\Lambda)^{-1/d}. \end{equation} Then, we conclude that \begin{equation} \Phi_{\tilde{\omega}}(r)\ge c \alpha^2r^{-1/d} \end{equation} The relevant integral is thus bounded by \begin{eqnarray} \frac{(1-\sigma)^2}{\sigma^2}\int_{4[\pi(0)\wedge \pi(x)]}^{4/\epsilon}\frac{4}{u\Phi_{\tilde{\omega}}(u)^2}\,\text d u &\le& c\alpha^{-4}\sigma^{-2}\epsilon^{-2/d} \end{eqnarray} for some constant~$c>0$. Setting~$\epsilon$ proportional to $n^{\frac{4d^2}{\gamma}+4\mu d-\frac{d}{2}}$, and noting \mbox{$\sigma\ge\alpha^2/(2d)$}, the right-hand side is less than~$n$ and by setting $\delta(\gamma)=4d^2/\gamma$, we will get \begin{equation} \label{bb} P^{2n}_{\tilde{\omega}}(0,x)\leq \frac{c}{n^{\frac{d}{2}-\delta(\gamma)-4\mu d}},\qquad \forall x\in \Z^d_e. \end{equation} As the random walk will not leave the box $B_N$ by time $2n$, we can replace ${\tilde{\omega}}$ by $\omega$ in \eqref{bb}, and since $P^{2n}_\omega(0,x)=0$ for each $x\notin B_N$, then after letting $\mu\rightarrow0$, we get $$ \limsup_{n\rightarrow+\infty}\sup_{x\in\Z^d}\frac{\log P^{2n}_{\omega}(0,x)}{\log n}\leq -\frac{d}{2}+\delta(\gamma). $$ This proves the claim for even~$n$; for odd~$n$ we just concatenate this with a single step of the random walk. \end{proofsect} \secdef\sct\sect*{Acknowledgments} \noindent I express my gratitude to my father Youcef Bey. I wish to thank my Ph.D. advisor, Pierre Mathieu for suggesting and discussions on this problem, and Abdelatif Bencherif-Madani for his support. I also would like to thank the referees for their careful reading and comments that led to an improvement of the paper. \end{document}
\begin{document} \title{Bound Genuine Multisite Entanglement: Detector of Gapless-Gapped Quantum Transitions in Frustrated Systems} \author{Aditi Sen(De) and Ujjwal Sen} \affiliation{Harish-Chandra Research Institute, Chhatnag Road, Jhunsi, Allahabad 211 019, India} \begin{abstract} We define a multiparty entanglement measure, called generalized geometric measure, that can detect and quantify genuine multiparty entanglement for any number of parties. The quantum phase transitions in exactly solvable models like the anisotropic XY model can be detected by this measure. We find that the multisite measure can be a useful tool to detect quantum phenomena in more complex systems like quasi 2D and 2D frustrated Heisenberg antiferromagnets. We propose an order parameter, called bound generalized geometric measure, in the spirit of bound quantum states, that can recognize the gapless and gapped phases of the frustrated models by its sign. The vanishing of the order parameter therefore signals the transition between such phases. \end{abstract} \maketitle \section{Introduction and Main Results} The rapid development of the theory of entanglement over the last decade or so \cite{HHHH-RMP}, and its usefulness in communication systems and computational devices, as well as the experimental observations of entangled states in a variety of distinct physical systems \cite{ref-boi}, have attracted a lot of attention from different branches of physics, including condensed matter and ultra-cold gases \cite{ref-reviews1, ref-reviews2}. It has been argued that entanglement can be used as a ``universal detector'' of quantum phase transitions, with most of the studies being on the behavior of \emph{bipartite} entanglement \cite{Wootters, logneg}. A more natural way to study the many-body systems would be to consider multipartite entanglement, as almost all naturally occurring multisite quantum states are genuinely multi-party entangled. Such an enterprise is however limited by the intricate nature of entanglement theory in the multisite scenario. In particular, only a few multisite entanglement measures are known, and moreover their computation are difficult \cite{HHHH-RMP}. Multipartite states can have different hierarchies according to their entanglement quality and quantity. The simplest example is for three-particle states, where there are fully separable, biseparable, and genuine multipartite entangled states. A measure of genuine multiparty entanglement, quantifies, so to say, the ``purest'' form multiparty entanglement. In this paper, we define an entanglement measure, called generalized geometric measure (GGM), that can detect and quantify \emph{genuine} multiparticle entanglement. Interestingly, the measure is computable for arbitrary pure states of multiparty systems in arbitrary dimensions and arbitrary number of parties, and therefore can turn out to be a useful tool to detect quantum many-body phenomena, like quantum phase transitions. In this respect, GGM has the potential of gaining the same status in applications of multiparty entanglement theory, as that of logarithmic negativity \cite{logneg} in the bipartite domain. As an initial testing ground, we use the GGM to successfully detect quantum phase transitions in the anistropic XY model on a chain of spin-1/2 particles \cite{Barouch-McCoy}. Our main aim however is to apply the measure to states of frustrated spin systems, for which the phase diagrams are not exactly known. Frustrated many-body systems are a center of interest in condensed matter physics due to the typically rich and novel phase diagrams in such systems. Moreover, experimental realizations of many metal oxides, including those exhibiting high-Tc superconductivity, typically have frustrated interactions in their Hamiltonians \cite{snajh-er-jor,5mlParacetamol-diyechhi}. As paradigmatic representatives of such systems, we consider (i) the quasi 2D antiferromagnetic \(J_1-J_2\) Heisenberg model with nearest neighbor couplings, \(J_1\), and next-nearest neighbor couplings, \(J_2\) \cite{Majumdar-Ghosh, White-Affleck, Mikeska}, and (ii) the frustrated \(J_1-J_2\) model on a square lattice \cite{snajh-er-jor} (see Fig. 1). \begin{figure} \caption{(Color online.) Two-dimensional \(J_1-J_2\) model, with vertical and horizontal couplings, \(J_1\), and diagonal couplings, \(J_2\) The predicted phase diagram is also schematically shown.} \label{fig-chhobi} \end{figure} For studying such systems, we introduce an order parameter which is the difference between the GGM (\({\cal E}\)) and its second derivative with respect to the system parameter, \(\mu\), that drives the transitions in the system. We call the quantity as ``bound GGM'', and is given by \begin{equation} {\cal E}_B \equiv {\cal E} - \frac{d^2 {\cal E}}{d\mu^2}. \nonumber \end{equation} The ground state manifold of the quasi 2D \(J_1-J_2\) system is not known exactly, except at the Majumdar-Ghosh point \cite{Majumdar-Ghosh}, i.e. for \(\alpha = J_2/J_1 = 0.5 \), where the system is highly frustrated, and presents two dimer states as its ground states. However, exact diagonalization and group theoretical studies show that the system is gapless, and hence critical, in the weakly frustrated regime, namely \(0 \leq \alpha \lesssim 0.24\) \cite{Majumdar-Ghosh, White-Affleck}. For higher coupling ratio \(\alpha\), the system enters a dimerized regime, and is gapped \cite{Majumdar-Ghosh, eita-AKLT}. We study the GGM for this system by exact diagonalization, and show that the bound GGM vanishes at the fluid-dimer transition point \(\alpha \approx 0.24\). Note here that it is known that bipartite entanglement cannot detect the gapless phase \cite{bipartite_MG} (cf. \cite{Chhajlany}). We find that the bound GGM is positive in the gapless phase while it becomes negative in the gapped one. The Majumdar-Ghosh point can also be detected by the GGM. Finally we apply our measure of genuine multisite entanglement to the ground state of the 2D Heisenberg system. As depicted in Fig. 1, the N{\' e}el and collinear ordered phases (the gapless phases dissociated by a phase having a finite gap between the singlet ground state and the excited states. We show that the bound GGM can detect both the quantum phase transitions -- from the N{\' e}el phase to the dimerized one at \(\alpha \approx 0.38\), as well as the transition from the dimer to the collinear phase at \(\alpha \approx 0.69\), as predicted, even for relatively small system-size. Like in the \(J_1-J_2\) ring, the positivity (negativity) of the bound GGM indicates the gapless (gapped) phase. Armed with these findings, we propose that the bound GGM can potentially be used for detecting gapped/gapless phases in many-body systems: \begin{eqnarray} \label{onek-deri-hoye-gyalo} {\cal E}_B > 0 \Rightarrow \mbox{gapless}, \quad {\cal E}_B < 0 \Rightarrow \mbox{gapped}. \end{eqnarray} This leads to an analogy with the thermodynamics of bound entanglement \cite{wwwww, HHHH-RMP}. Analogous to the first law of thermodynamics, internal energy = free energy + work done, a thermodynamic equation of entanglement was written: Entanglement cost = distillable entanglement + bound entanglement, where the bound entanglement is the amount of entanglement necessary to keep the transition (under local quantum operations and classical communication) from becoming irreversible. As another face of this entanglement-energy analogy, a negative value of \({\cal E}_B\), assuming the thesis in Eq. (\ref{onek-deri-hoye-gyalo}), indicates that the system needs a nonzero amount of energy to free itself from its ground state. We hope that this can help us in a quantification of the first law of the emerging entanglement thermodynamics \cite{thermodynamics, HHHH-RMP}. This is the reason for calling \({\cal E}_B\) as \emph{bound} GGM \cite{qqqqq}. \section{Generalized Geometric Measure} Let us begin by defining the generalized geometric measure. As mentioned above, GGM will quantify the genuineness of multiparty entanglement. An \(N\)-party pure quantum state is said to be genuinely \(N\)-party entangled, if it is not a product across any bipartite partition. The simplest examples of genuine tripartite entangled states are the Greenberger-Horne-Zeilinger \cite{GHZ} and W \cite{W-state} states. The GGM of an \(N\)-party pure quantum state \(|\psi\rangle\) is defined as \begin{equation} {\cal E} ( |\psi\rangle ) = 1 - \Lambda^2_{max} (|\psi\rangle ), \end{equation} where \(\Lambda_{max} (|\psi\rangle ) = \max | \langle \phi|\psi\rangle |\), with the maximization being over all pure states \(|\phi\rangle\) that are not genuinely \(N\)-party entangled. Note that the maximization performed in GGM is different from the maximization in the geometric measure of Ref. \cite{GM} (cf. \cite{hierarchy}). \subsection{Properties} Clearly, \({\cal E}\) is vanishing for all pure multiparty states that are not genuine multiparty entangled, and non-vanishing for others. We considered this quantity for four-party states in Ref. \cite{amadertele}, and showed it to be a mono- tonically decreasing quantity under local quantum operations and classical communication (LOCC). Applications of GGM to quantum many-body systems requires us to find its properties for an arbitrary number of parties. Let \(|\psi\rangle\) be an \(N\)-party pure quantum state in the tensor product Hilbert space \({\cal H}_{A_1} \otimes {\cal H}_{A_2} \otimes \ldots \otimes {\cal H}_{A_N}\). Therefore, the maximization in \begin{equation}\Lambda_{\max}(|\psi\rangle_{A_1 A_2 \ldots A_N}) = \max_{|\phi\rangle_{A_1 A_2 \ldots A_N}} |\langle \phi | \psi \rangle| \end{equation} is over all pure quantum states \(|\phi\rangle_{A_1 A_2 \ldots A_N}\), in \({\cal H}_{A_1} \otimes {\cal H}_{A_2} \otimes \ldots \otimes {\cal H}_{A_N}\), that are not genuinely multiparty entangled, which is a rather large class of states. Note however, that the square of \(\Lambda_{\max}(|\psi\rangle_{A_1 A_2 \ldots A_N})\) can be interpreted as the Born probability of some outcome in a quantum measurement on the state \(|\psi\rangle\). Now, entangled measurements cannot be worse than the product ones for any set of subsystems. Therefore, in the maximization, we do not need to consider the \(|\phi\rangle_{A_1 A_2 \ldots A_N}\) that are product in a partition of \(A_1, A_2, \ldots, A_N\) into three, four, ... sets. The only \(|\phi\rangle_{A_1 A_2 \ldots A_N}\) that are to be considered are the ones that are a product in a \emph{bi}-partition of \(A_1, A_2, \ldots, A_N\). This greatly reduces the class over which the maximization is carried out. Let \({\cal A}: {\cal B}\) be such a bi-partition. Then, \(\max |\langle \phi | \psi \rangle|\), where the maximization is carried over the \(|\phi\rangle\) that are product across \({\cal A}: {\cal B}\), is the maximal Schmidt coefficient, \(\lambda_{{\cal A}: {\cal B}}\), of the state \(|\psi\rangle_{A_1 A_2 \ldots A_N}\) in the \({\cal A}: {\cal B}\) bipartite split. \(\Lambda_{\max}(|\psi\rangle_{A_1 A_2 \ldots A_N})\) is therefore the maximum of all such maximal Schmidt coefficients in bipartite splits. Note that the \(\lambda\)'s involved in this closed form for \(\Lambda_{max}\) are all increasing under LOCC \cite{Vidal-Nielsen}. We have therefore proven the following theorem.\\ \noindent \textbf{Theorem.} \emph{The generalized geometric measure of \(|\psi\rangle_{A_1 A_2 \ldots A_N}\) is given by \begin{equation} {\cal E}(|\psi\rangle) = 1 - \max \{\lambda^2_{{\cal A}: {\cal B}} | {\cal A} \cup {\cal B} = \{1,2,\ldots, N\}, {\cal A} \cap {\cal B} = \emptyset\}. \end{equation} It is computable for a multiparty pure state of an arbitrary number of parties, and of arbitrary dimensions. Also, it is monotonically decreasing under LOCC. } \section{Anisotropic XY model} The one-dimensional XY model with \(N\) lattice sites is described by the Hamiltonian \begin{equation} \label{eq_XY_H} H_{XY} = \frac{J}{2} \left(\sum_{i=1}^{N} (1 + \gamma) \sigma^x_i \sigma^x_{i+1} + (1 - \gamma) \sigma^y_i \sigma^y_{i+1}\right) + h \sum_{i=1}^{N} \sigma_i^z, \end{equation} where \(J\) is the coupling constant, \(\gamma \in [0,1] \) is the anistropy parameter, \(\sigma\)'s are the Pauli matrices, and \(h\) represents the magnetic field in the transverse direction. The quantum transverse Ising and the transverse XX models correspond to two extreme values of \(\gamma\), which are resepectively \(\gamma =1\) and \(\gamma =0\). This model can be diagonalized by the Jordan-Wigner transformation \cite{Barouch-McCoy}. Apart from its other interests, it is the simplest model which shows a \emph{quantum} phase transition, driven by the magnetic field, at zero temperature. It is known to be detectable by using bipartite entanglement measures \cite{fazio-Nielsen}, like concurrence \cite{Wootters}. However, evaluating GGM will additionally quantify the nature of genuine multiparty entanglement of the ground state in this model, especially as it crosses the transition point. The diagonalization of this model can be achieved by introducing the Majorana fermions \begin{equation} c_{2l -1} = (\Pi_{i=1}^{l-1} \sigma_i^z)\sigma^x_l; \quad c_{2l} = (\Pi_{i=1}^{l-1} \sigma_i^z)\sigma^y_l. \end{equation} The Hamiltonian in Eq. (\ref{eq_XY_H}) thereby reduces to a quadratic fermionic Hamiltonian \cite{Barouch-McCoy}. The eigenvalues of the reduced density matrix of \(L\) sites of the ground state of this system can be obtained by using the above formalism \cite{ref-reviews2}, and is given by \begin{equation} e_{x_1 x_2 \ldots x_l} = \prod_{i=1}^{L} \frac{1 + (-1)^{x_i} \nu_i}{2}, \quad x_i = 0, 1 \phantom{,} \forall i, \end{equation} where \(\nu_i\)'s are the eigenvalues of \(G_L\), which in turn is given by \(B_L = G_L \otimes \left[\begin{array}{cc} 0 & 1 \\ -1 & 0 \\ \end{array}\right]\), with \begin{equation} \label{beRal-er-talobya-sho} G_L=\left[ \begin{array}{cccc} g_0 & \cdot & \cdot & g_{L-1} \\ \cdot & \cdot & \cdot & \cdot\\ - g_{L-1} & \cdot & \cdot & g_{0} \\ \end{array}\right], B_L = \left[ \begin{array}{cccc} \Pi_0 & \cdot & \cdot & \Pi_{L-1} \\ \cdot & \cdot & \cdot & \cdot\\ - \Pi_{L-1} & \cdot & \cdot & \Pi_{0} \\ \end{array}\right]. \nonumber \end{equation} Here, \(\Pi_l = \left[ \begin{array}{cc} 0 & g_l \\ -g_{-l} & 0 \\ \end{array}\right]\), and the real coefficients, \(g_l\), are given by \begin{equation} g_l = \frac{1}{2 \pi} \int_{0}^{2 \pi} d\phi e^{- i l \phi} \frac{\cos \phi - \lambda - i \gamma \sin \phi}{|\cos \phi - \lambda - i \gamma \sin \phi|}, \end{equation} where \(\lambda = J/h\). The derivative of GGM of the ground state, for different anistropy parameters \(\gamma\), clearly shows a logarithmic divergence at the transverse field given by \(\lambda =1\), as seen in Fig. 2. Note also that the ground state of the transverse Ising model (\(\gamma =1\)) has higher genuine multipartite entanglement as compared to the ground states for other values of \(\gamma\). This result may help us to understand the success of the dynamical states of the transverse Ising model as a substrate for efficient quantum computation \cite{raus-briegel}. \begin{figure} \caption{(Color online.) GGM of the transverse XY model. The GGM (actually \({\cal E} \label{fig-chhobi-ek} \end{figure} \section{Quasi 2D Frustrated \(J_1-J_2\) Model} We will now consider the frustrated quasi two-dimensional \(J_1-J_2\) Heisenberg model, in the case when both the nearest neighbor couplings, \(J_1\), and the next-nearest neighbor couplings, \(J_2\), are antiferromagnetic. Apart from its other interests, the intense interest for studying this model lies in the fact that it is similar to real systems, like \(\mbox{SrCuO}_{2}\) \cite{experiment}. The Hamiltonian of this model, with \(N\) lattice sites on a chain, is \begin{equation} H_{1D} = J_1 \sum_{i=1}^{N} \vec{\sigma}_i \cdot \vec{\sigma}_{i+1} + J_2 \sum_{i=1}^N \vec{\sigma}_i \cdot \vec{\sigma}_{i+2}, \end{equation} where \(J_1\) and \(J_2\) are both positive, and where periodic boundary condition in assumed. The ground state and the energy gap of this model were studied by using exact diagonalization, density matrix renormalization group method, bosonization technique, etc \cite{Mikeska}. For an even number of sites, the ground state at the Majumdar-Ghosh point (\(\alpha = J_2/J_1 = 0.5\)), is doubly degenerate, and the ground state manifold is spanned by the two dimers \(|\psi_{MG}^{\pm}\rangle = \Pi_{i=1}^{N/2} (|0\rangle_{2i} |1\rangle_{2i \pm 1} - |1\rangle_{2i} |0\rangle_{2i \pm 1})\), and the model is gapped at this point \cite{Majumdar-Ghosh}. For \(\alpha=0\), the Hamiltonian reduces to the \(s=1/2\) Heisenberg antiferromagnet and hence the the ground state, which is a spin fluid state having gapless excitations \cite{Griffiths-Yang}, can be obtained by Bethe ansatz. It is known that at \(\alpha \approx 0.2411\), a phase transtion from fluid to dimerization occurs \cite{gap_transition}. The genuine multipartite entanglement measure clearly signals the Majumdar-Ghosh point (See Fig. 3). The fluid-dimer tranition at \(\alpha \approx 0.24\) can also be detected by the vanishing of the bound GGM as the order parameter (for \(\mu = \alpha\)) (see Fig. 3). Moreover, \({\cal E}_B >0\) signals the gapless phase, while \({\cal E}_B <0\) indicates the gapped phase. \begin{figure} \caption{ (Color online.) GGM and bound GGM for the quasi-2D frustrated antiferromagnet. The left figure is for the GGM on the vertical axis against \(\alpha\) on the horizontal. The Majumdar-Ghosh point at \(\alpha = 0.5\) is clearly signaled. The figure on the right is for the bound GGM on the vertical axis, against \(\alpha\) on the horizontal, and the fluid-dimer transition is signaled by the vanishing of the bound GGM. The two curves are for \(8\) (red circles) and \(10\) (blue squares) spins in both the figures. The GGM, bound GGM, and \(\alpha\) are all dimensionless. } \label{fig-chhobi-dui} \end{figure} \section{2D Frustrated \(J_1-J_2\) Model} We now consider spin-1/2 particles on a square lattice, where nearest neighbor spins (both vertical and horizontal) on the lattice are coupled by Heisenberg interactions, with coupling strengths \(J_1\), and where all diagonal spins are coupled by Heisenberg interactions, with coupling strengths \(J_2\) (see Fig. 1). This 2D model have attracted a lot of interest \cite{many_theory} due to its connection with the high \(T_c\)-superconductors and its similarity with magnetic materials like \(\mbox{Li}_2 \mbox{VOSiO}_4\) and \(\mbox{Li}_{2}\mbox{VOGeO}_4\) \cite{synthesis}. Although the different phases of the ground state of this model is well-studied, there seem to exist reasons to believe in further secrets hidden. The Hamiltonian of the system is therefore given by \begin{equation} H_{2D} = J_1 \sum_{\langle i,j \rangle} \vec{\sigma}_i \cdot \vec{\sigma}_{j} + J_2 \sum_{i,j \in {\cal D}} \vec{\sigma}_i \cdot \vec{\sigma}_{j}. \end{equation} Both \(J_1\) and \(J_2\) are antiferromagnetic (\(>0\)). In the classical limit, the model exhibits only a first-order phase transition from N{\' e}el to collinear at \(\alpha = J_2/J_1 = 0.5\). The phase diagram changes its nature, when quantum fluctuations are present, and in this case, the exact phase boundaries are not known. It is expected that two long range ordered (LRO) ground state phases are separated by quantum paramagnetic phases without LRO. Different methods, like exact diagonalization, series expansion methods, field-theory methods \cite{Richter10-Kim-Singh}, etc., applied to this model, predict that the first transition from N{\' e}el to dimer accurs at \(\alpha \approx 0.38\) while other one happens at \(\alpha \approx 0.66\). Recent experimental observations and proposals of detecting such phases in the laboratory demand the precise quantification of the phase diagram of this model at low temperature. Towards this aim, we show that even for relatively small system size, the order parameter based on the genuine multipartite entanglement measure (the \({\cal E}_B\), introduced above) can detect and quantify the phase diagram accurately. We perform exact diagonalization to find the ground state of the model, and we show that both the transitions -- N{\' e}el to dimer and dimer to collinear can be signaled by the bound GGM. A synopsis of these facts is given in Fig. 4. Precisely, we have found that \({\cal E}_B\) vanishes at \(\alpha \approx 0.38\) and again at \(\alpha \approx 0.69\). As observed in the case of the quasi 2D \(J_1-J_2\) model, \({\cal E}_B\) is positive in the gapless phases while it is negative in the intermediate gapped phase. \begin{figure} \caption{ (Color online.) GGM and bound GGM for the 2D frustrated antiferromagnet. The horizontal axes in both the figures represent \(\alpha\). The left figure is for the GGM on the vertical axis while the right one is for the bound GGM. The vanishing of the bound GGM signals both the N{\' e} \label{fig-chhobi-tin} \end{figure} \section{Conclusions} Multipartite entangled states can be classified according to their separability in different partitions. Due to the complex classification, it is hard to obtain a unique multipartite entanglement measure. Instead of quantifying all the classes of multipartite states, we define an entanglement measure, called generlized geometric measure, that quantifies the ``purest'' form of multiparty entanglement, the genuine multipartite entanglement. This is akin to the situation in bipartite pure states, where there is essentially a unique entanglement measure, while mixed bipartite states allows a number of such measures \cite{HHHH-RMP}. In the case of multiparty states, we find ``pure'' and ``non-pure'' forms of entanglement, even within the class of pure states, where the ``pure'' part can be quantified by the generalized geometric measure defined here. Moreover, we found that the measure can be reduced to a simplified closed form, and hence is computable for arbitrary dimensions and arbitrary number of parties. We then applied this measure to detect phase diagrams in quantum many-body systems. After successfully verifying that the measure can detect quantum fluctuation driven phase transitions in the exactly solvable models like the XY Hamiltonian, we applied the generalized geometric measure to frustrated models like quasi two dimensional and two dimensional antiferromagnetic \(J_1-J_2\) models. In the latter case, the phase diagram is not known exactly, although there has been several predictions by different methods. In this paper, we show that an order parameter, called bound GGM, based on the multi-site entanglement measure defined, can signal the phase boundaries in both the models. Moreover, we found the the order parameter is positive when the system is gapless and negative in the gapped phase. We propose that the sign of the bound GGM can indicate whether a many-body system is gapped or gapless, and point to its implication for the first law of entanglement thermodynamics. \acknowledgments We acknowledge partial support from the Spanish MEC (TOQATA (FIS2008-00784)). \end{document}
\begin{document} \title[] {Null $\varphi $--Slant Curves in a Main Class of\\3-Dimensional Normal Almost Contact \\ B-Metric Manifolds} \author[G. Nakova]{Galia Nakova} \address{ University of Veliko Tarnovo "St. Cyril and St. Methodius" \\ Faculty of Mathematics and Informatics \\ 2 Teodosii Tarnovski Str., Veliko Tarnovo 5003, Bulgaria} \email{[email protected]} \keywords{Almost contact B-metric manifolds, Slant curves, Null curves, $\varphi $-slant null curves} \subjclass{53C15, 53C50} \begin{abstract} We introduce a new type of slant curves in almost contact B-metric manifolds, called $\varphi $-slant curves, by an additional condition which is specific for these manifolds. In this paper we study $\varphi $-slant null curves in a class of 3-dimensional normal almost contact B-metric manifolds and prove that for non-geodesic of them there exists a unique Frenet frame for which the original parameter is distinguished. We investigate some of $\varphi $-slant null curves and with respect to the associated B-metric on the manifold and find relationships between the corresponding Frenet frames and curvatures. We construct the examined curves in a 3-dimensional Lie group and give their matrix representation. \end{abstract} \newcommand{i.\,e. }{i.\,e. } \newcommand{\mathfrak{g}}{\mathfrak{g}} \newcommand{\mathcal{D}}{\mathcal{D}} \newcommand{\mathcal{F}}{\mathcal{F}} \newcommand{\mathrm{diag}}{\mathrm{diag}} \newcommand{\mathrm{End}}{\mathrm{End}} \newcommand{\mathrm{Im}}{\mathrm{Im}} \newcommand{\mathrm{id}}{\mathrm{id}} \newcommand{\mathrm{Hom}}{\mathrm{Hom}} \newcommand{\mathrm{Rad}}{\mathrm{Rad}} \newcommand{\mathrm{rank}}{\mathrm{rank}} \newcommand{\mathrm{const}}{\mathrm{const}} \newcommand{{\rm tr}}{{\rm tr}} \newcommand{\mathrm{ltr}}{\mathrm{ltr}} \newcommand{\mathrm{codim}}{\mathrm{codim}} \newcommand{\mathrm{Ker}}{\mathrm{Ker}} \newcommand{\mathbb{R}}{\mathbb{R}} \newcommand{\mathbb{K}}{\mathbb{K}} \newcommand{\thmref}[1]{Theorem~\ref{#1}} \newcommand{\propref}[1]{Proposition~\ref{#1}} \newcommand{\corref}[1]{Corollary~\ref{#1}} \newcommand{\secref}[1]{\S\ref{#1}} \newcommand{\lemref}[1]{Lemma~\ref{#1}} \newcommand{\dfnref}[1]{Definition~\ref{#1}} \newcommand{\end{equation}}{\end{equation}} \newcommand{\be}[1]{\begin{equation}\label{#1}} \maketitle \section{Introduction}\label{sec-1} In the Lorentzian geometry there exist three types of curves according to the causal character of their tangent vector -- spacelike, timelike and null (lightlike) curves. Studying the geometry of null curves is of special interest since they have very different properties compared to spacelike and timelike curves. The general theory of null curves is developed in \cite{D-B, D-J}, where there are established important applications of these curves in general relativity. Let $\bf{F}$ be a Frenet frame along a null curve $C$ on a Lorentzian manifold. According to \cite{D-B}, $\bf{F}$ and the Frenet equations with respect to $\bf{F}$ depend on both the parametrization of $C$ and the choice of a screen vector bundle. However, if a non-geodesic null curve $C$ is properly parameterized, then there exists only one Frenet frame, called a Cartan Frenet frame, for which the corresponding Frenet equations of $C$, called Cartan Frenet equations, have minimum number of curvature functions (\cite{D-J}). In this paper we consider 3-dimensional almost contact B-metric manifolds $(M,\varphi,\xi,\eta,g)$, which are Lorentzian manifolds equipped with an almost contact B-metric structure. We study $\varphi $-slant null curves in considered manifolds belonging to the class $\mathcal{F}_4$ of the Ganchev-Mihova-Gribachev classification given in \cite{GaMGri}. The class $\mathcal{F}_4$ consists of normal almost contact B-metric manifolds and it is analogous to the class of $\alpha $-Sasakian manifolds in the theory of almost contact metric manifolds. A slant curve $C(t)$ on $(M,\varphi,\xi,\eta,g)$, defined by the condition $g(\dot C,\xi )=a={\rm const}$ for the tangent vector $\dot C$, is a natural generalization of a cylindrical helix in an Euclidean space. Slant curves and in particular Legendre curves (which are slant curves with $a=0$) in almost contact metric and almost paracontact metric manifolds have been investigated intensively by many authors \cite{I-L, W} and the references therein. In the present work we introduce a new type of slant curves in almost contact B-metric manifolds, called $\varphi $-slant curves, by the additional condition $g(\dot C,\varphi \dot C)=b={\rm const}$. For these manifolds, in contrast to the almost contact metric and almost paracontact metric manifolds, $b$ is a non-zero function in general. The paper is organized as follows. Section 2 is a brief review of almost contact B-metric manifolds and geometry of null curves in a 3-dimensional Lorentzian manifold. First in Section 3 we show that in a 3-dimensional almost contact B-metric manifold there exist no $\varphi $-slant null curves such that $a=b=0$. Then we prove that for a non-geodesic $\varphi $-slant null curve $C(t)$ in a 3-dimensional $\mathcal{F}_4$-manifold there exists a unique Frenet frame ${\bf F}_1$ for which the original parameter $t$ is distinguished, as well as we express ${\bf F}_1$ in terms of the almost contact B-metric structure. Also, we find the curvatures $k_1(t)$ and $k_2(t)$ with respect to ${\bf F}_1$. On an almost contact B-metric manifold there exist two B-metrics $g$ and $\widetilde g$. For that reason in Section 4 we consider some $\varphi $-slant null curves with respect to $g$ in a 3-dimensional $\mathcal{F}_4$-manifold and prove that with respect to $\widetilde g$ these curves are $\varphi $-slant non-null curves. Moreover, we obtain relationships between the Frenet frames and the corresponding curvatures with respect to $g$ and $\widetilde g$. In the last Section 4 we construct $\varphi $-slant null curves in a 3-dimensional Lie group endowed with an almost contact B-metric structure of an $\mathcal{F}_4$-manifold. We find a matrix representation of considered curves. \section{Preliminaries}\label{sec-2} A $(2n+1)$-dimensional smooth manifold $(M,\varphi,\xi ,\eta ,g)$ is called an almost contact manifold with B-metric (or {\it an almost contact B-metric manifold}) \cite{GaMGri} if it is endowed with an almost contact structure $(\varphi ,\xi ,\eta )$ consisting of an endomorphism $\varphi $ of the tangent bundle, a Reeb vector field $\xi $ and its dual 1-form $\eta $, satisfying the following relations: \begin{align*} \varphi^2X=-X+\eta(X)\xi, \qquad \quad \eta(\xi )=1. \end{align*} Also, $M$ is equipped with a semi-Riemannian metric $g$, called {\it a B-metric} \cite{GaMGri}, determined by \[ g(\varphi X,\varphi Y)=-g(X,Y)+\eta (X)\eta (Y). \] Here and further $X$, $Y$, $Z$ are tangent vector fields on $M$, i.\,e. $X,Y,Z \in TM$. Immediate consequences of the above conditions are: \begin{align*} \eta \circ \varphi =0, \quad \varphi \xi =0, \quad {\rm rank}(\varphi)=2n, \quad \eta (X)= g(X,\xi ), \quad g(\xi,\xi )=1. \end{align*} The distribution $\mathbb {D}: x \in M \longrightarrow \mathbb {D}_x\subset T_xM$, where \[ \mathbb D_x=Ker \eta=\{X_x\in T_xM: \eta (X_x)=0\} \] is called {\it a contact distribution} generated by $\eta $. Then the tangent space $T_xM$ at each $x\in M$ is the following orthogonal direct sum \[ T_xM=\mathbb D_x\oplus span_\mathbb R\{\xi _x\} . \] The tensor field $\varphi $ induces an almost complex structure on each fibre on $\mathbb D$. Since $g$ is non-degenerate metric on $M$ and $\xi $ is non-isotropic, the contact distribution $\mathbb D$ is non-degenerate and the restriction $g_{\vert \mathbb D}$ of the metric $g$ on $\mathbb D$ is of signature $(n,n)$. \\ The tensor field ${\widetilde g}$ of type $(0,2)$ given by ${\widetilde g}(X,Y)=g(X,\varphi Y)+\eta (X)\eta (Y)$ is a B-metric, called {\it an associated metric} to $g$. Both metrics $g$ and ${\widetilde g}$ are necessarily of signature $(n+1,n)$ $(+\ldots + -\ldots -)$. \\ Let $\nabla$ be the Levi-Civita connection of $g$. The tensor field $F$ of type $(0,3)$ on $M$ is defined by $F(X,Y,Z)=g((\nabla_X\varphi)Y,Z)$ and it has the following properties: \[ F(X,Y,Z)=F(X,Z,Y)=F(X,\varphi Y,\varphi Z)+\eta (Y)F(X,\xi,Z)+\eta (Z)F(X,Y,\xi ). \] Moreover, we have \begin{align}\label{2.1} F(X,\varphi Y,\xi )=(\nabla _X\eta )Y=g(\nabla _X\xi,Y). \end{align} The following 1-forms, called \emph{Lee forms}, are associated with $F$: \[ \theta (X)=g^{ij}F(e_i,e_j,X), \quad \theta ^*(X)=g^{ij}F(e_i,\varphi e_j,X), \quad \omega (X)=F(\xi,\xi,X), \] where $\{e_i\}, \, i=\{1,\ldots,2n+1\}$ is a basis of $T_xM$, $x\in M$, and $(g^{ij})$ is the inverse matrix of $(g_{ij})$. \\ A classification of the almost contact B-metric manifolds with respect to $F$ is given in \cite{GaMGri} and eleven basic classes $\mathcal{F}_i$ $(i=1,2,\dots,11)$ are obtained. If $(M,\varphi,\xi,\eta,g)$ belongs to the class $\mathcal{F}_i$ then it is called an \emph{$\mathcal{F}_i$-manifold}. \\ The special class $\mathcal{F}_0$ is the intersection of all basic classes. It is known as the class of the {\it cosymplectic B-metric manifolds}, i.\,e. the class of the considered manifolds with parallel structure tensors with respect to $\nabla$, namely $\nabla \varphi =\nabla \xi =\nabla \eta =\nabla g=\nabla {\widetilde g}=0$ and consequently $F=0$. \\ The lowest possible dimension of the considered manifolds is three. The class of the 3-dimensional almost contact B-metric manifolds is $\mathcal{F}_1\oplus \mathcal{F}_4\oplus \mathcal{F}_5\oplus \mathcal{F}_8\oplus \mathcal{F}_9\oplus \mathcal{F}_{10}\oplus \mathcal{F}_{11}$ \cite{GaMGri}. According to \cite{ManIv13}, the class of the normal almost contact B-metric manifolds is $\mathcal{F}_1\oplus\mathcal{F}_2\oplus\mathcal{F}_4\oplus\mathcal{F}_5\oplus\mathcal{F}_6$, since the Nijenhuis tensor of almost contact structure vanishes there. Hence, the class of the 3-dimensional normal almost contact B-metric manifolds is $\mathcal{F}_1\oplus\mathcal{F}_4\oplus\mathcal{F}_5$. \\ Let us remark that only in the classes $\mathcal{F}_1$, $\mathcal{F}_4$, $\mathcal{F}_5$ and $\mathcal{F}_{11}$ the structure tensor $F$ is expressed explicitly by the 1-forms $\theta $, $\theta ^*$, $\omega $ and the basic tensors of type $(0,2)$ $g$, $\widetilde g$, $\eta \otimes \eta $ of the manifold. In this reason, the classes $\mathcal{F}_1$, $\mathcal{F}_4$, $\mathcal{F}_5$, $\mathcal{F}_{11}$ are called {\it main classes}. In this paper we consider 3-dimensional almost contact B-metric manifolds $(M,\varphi,\xi ,\eta ,g)$ belonging to the class $\mathcal{F}_4$, which is determined by (see \cite{GaMGri}) \begin{align}\label{2.2} \begin{array}{ll} \mathcal{F}_4 : F(X,Y,Z)=-\frac{\theta (\xi )}{2}\{g(\varphi X,\varphi Y)\eta (Z)+g(\varphi X,\varphi Z)\eta (Y)\}. \end{array} \end{align} Taking into account \eqref{2.1} and \eqref{2.2} we have \begin{align}\label{2.3} \nabla _X\xi= \frac{\theta (\xi )}{2}\varphi X . \end{align} The equality \eqref{2.3} shows that the class $\mathcal{F}_4$ is similar to the class of $\alpha $-Sasakian manifolds in the theory of almost contact metric manifolds.\\ Let ${\widetilde \nabla }$ be the Levi-Civita connection of ${\widetilde g}$. We consider the symmetric tensor field $\Phi $ of type $(1,2)$ defined by $\Phi (X,Y)={\widetilde \nabla }_XY-\nabla _XY$. For a 3-dimensional $\mathcal{F}_4$-manifold the following equality holds \cite{MM}: \begin{align}\label{2.4} {\widetilde \nabla }_XY-\nabla _XY=\frac{\theta (\xi )}{2}\{g(X,\varphi Y)-g(\varphi X,\varphi Y)\}\xi . \end{align} Let us remark that on a 3-dimensional almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$ the metric $g$ has signature $(2,1)$, i.\,e. $(M,g)$ is a 3-dimensional Lorentzian manifold. In the remainder of this section we briefly recall the main notions about null curves in a 3-dimensional Lorentzian manifold $M$ for which we refer to \cite{D-B, D-J}. Let $C: I\longrightarrow M$ be a smooth curve in $M$ given locally by \[ x_i=x_i(t), \quad t\in I\subseteq {\mathbb{R}}, \quad i\in \{1,2,3\} \] for a coordinate neighborhood $U$ of $C$. The tangent vector field is given by \[ \frac{{\rm d}}{{\rm d}t}=(\dot {x}_1, \dot {x}_2, \dot {x}_3)=\dot {C}, \] where we denote $\frac{{\rm d}x_i}{{\rm d}t}$ by $\dot {x}_i$ for $i\in \{1,2,3\}$. The curve $C$ is called a {\it regular curve} if $\dot {C}\neq 0$ holds everywhere. Let a regular curve $C$ be {\it a null (lightlike) curve} in $(M, g)$, i.\,e. at each point $x$ of $C$ we have \begin{align}\label{2'} g(\dot {C},\dot {C})=0,\qquad \dot {C}\neq 0. \end{align} A general Frenet frame on $M$ along $C$ is denoted by ${\bf F}=\{\dot {C}, N, W\}$ and the vector fields in ${\bf F}$ are determined by \begin{align}\label{3'} g(\dot {C},N)=g(W,W)=1, \quad g(N,N)=g(N,W)=g(\dot {C},W)=0. \end{align} In \cite[Theorem 1.1, p. 53]{D-B} it was proved that for a given $W$ there exists a unique $N$ satisfying the corresponding equalities in \eqref{3'}. The following general Frenet equations with respect to ${\bf F}$ and $\nabla $ of $(M, g)$ are known from \cite{D-B, D-J} \begin{align}\label{general Frenet eq} \begin{array}{lll} \nabla _{\dot {C}}\dot {C}=h\dot {C}+k_1W \\ \nabla _{\dot {C}}N=-hN+k_2W \\ \nabla _{\dot {C}}W=-k_2\dot {C}-k_1N, \end{array} \end{align} where $h$, $k_1$ and $k_2$ are smooth functions on $U$. The functions $k_1$ and $k_2$ are called {\it curvature functions} of $C$. The general Frenet frame ${\bf F}$ and its general Frenet equations \eqref{general Frenet eq} are not unique as they depend on the parameter and the choice of the screen vector bundle $S(TC^\bot )={\rm span}W$ of $C$ (for details see \cite[pp. 56-58]{D-B}, \cite[pp. 25-29]{D-J}). It is known \cite[p. 58]{D-B} that there exists a parameter $p$ called a {\it distinguished parameter}, for which the function $h$ vanishes in \eqref{general Frenet eq}. The pair $(C(p), {\bf F})$, where ${\bf F}$ is a Frenet frame along $C$ with respect to a distinguished parameter $p$, is called a {\it framed null curve} (see \cite{D-J}). In general, $(C(p), {\bf F})$ is not unique since it depends on both $p$ and the screen distribution. A Frenet frame with the minimum number of curvature functions is called {\it Cartan Frenet frame} of a null curve $C$. In \cite{D-J} it is proved that if the null curve $C(p)$ is non-geodesic such that for $\ddot{C}=\frac{{\rm d}}{{\rm d}p}\dot{C}$ the condition $g(\ddot {C},\ddot {C})=k_1=1$ holds, then there exists only one Cartan Frenet frame ${\bf F}$ with the following Frenet equations \begin{align}\label{Cartan Frenet eq} \begin{array}{lll} \nabla _{\dot {C}}\dot {C}=W, \quad \nabla _{\dot {C}}N=\tau W, \quad \nabla _{\dot {C}}W=-\tau \dot {C}-N. \end{array} \end{align} The latter equations are called the {\it Cartan Frenet equations} of $C(p)$ whereas $\tau $ is called a \emph{torsion function} and it is invariant upto a sign under Lorentzian transformations. A null curve together with its Cartan Frenet frame is called a {\it Cartan framed null curve}. Note that some authors \cite{H-I} term a framed null curve $(C(p), {\bf F})$ Cartan framed null curve and a Frenet frame ${\bf F}$ along $C$ with respect to a distinguished parameter $p$ -- Cartan Frenet frame. \section{Framed $\varphi $-slant null curves with respect to the original parameter in 3-dimensional ${\mathcal{F}}_4$-manifolds}\label{sec-3} Let us consider a smooth curve $C$ in an almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$. We say that $C$ is a {\it slant curve} on $M$ if $g(\dot {C},\xi )=\eta (\dot {C})=a$ and $a$ is a real constant. The curve $C$ is called a {\it Legendre curve} if $a=0$.\\ A distinguishing feature of the almost contact B-metric manifolds from the almost contact metric and almost paracontact metric manifolds is that $g(X,\varphi X)$ is not zero in general. Motivated by this fact we define a new type slant curves. \begin{defn}\label{Definition 3.1} We say that a smooth curve $C(t)$ in an almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$ is {\it $\varphi $-slant} if \begin{align}\label{3.1} g(\dot {C}(t),\xi )=\eta (\dot {C}(t))=a \quad \text{and}\quad g(\dot {C}(t),\varphi \dot {C}(t))=b, \end{align} where $a$ and $b$ are real constants. \end{defn} \begin{rem}\label{Remark 3.1} Let $C(t)$ be a slant or a $\varphi $-slant curve. If we change the parameter $t$ of $C(t)$ with another parameter $p$, then we have $\dot C(p)=\dot C(t)\frac{{\rm d}t}{{\rm d}p}$. Hence \eqref{3.1} becomes \begin{align*} \begin{array}{ll} g(\dot {C}(p),\xi )=\eta (\dot {C}(p))=\frac{{\rm d}t}{{\rm d}p}\eta (\dot {C}(t))=\frac{{\rm d}t}{{\rm d}p}a \quad \text{and} \\ \\ g(\dot {C}(p),\varphi \dot {C}(p))={\left(\frac{{\rm d}t}{{\rm d}p}\right)}^2g(\dot {C}(t),\varphi \dot {C}(t))={\left(\frac{{\rm d}t}{{\rm d}p}\right)}^2b . \end{array} \end{align*} Therefore $g(\dot C(p),\xi )$ and $g(\dot {C}(p),\varphi \dot {C}(p))$ are constant if and only if $t=\alpha p+\beta $, where $\alpha, \, \beta $ are constant, i.e in general slant and $\varphi $-slant curves are not invariant under a reparameterization. Our aim in the present paper is to study $\varphi $-slant null curves with respect to its original parameter. \end{rem} \begin{prop}\label{Proposition 3.2} In a 3-dimensional almost contact B-metric manifold \\ $(M,\varphi,\xi,\eta,g)$ there exist no $\varphi $-slant null curves such that $a=b=0$. \end{prop} \begin{proof} Let us assume that there exists a $\varphi $-slant null curve $C$ in $M$ such that $a=b=0$. Then we have $g(\varphi \dot {C},\varphi \dot {C})=-g(\dot {C}, \dot {C})+ \eta (\dot {C})\eta (\dot {C})=0$. From $\eta (\dot {C})=\eta (\varphi \dot {C})=0$ it follows that $\dot {C}$ and $\varphi \dot {C}$ belong to the contact distribution $\mathbb {D}$ of $M$ along $C$. Since $\dot {C}$ and $\varphi \dot {C}$ are linearly independent, they form a basis of $\mathbb {D}$ at each point $x$ of $C$. Hence, for an arbitrary vector field $X \in \mathbb {D}_{\vert C}$ we have $X=u\dot {C}+v\varphi \dot {C}$ for some functions $u$ and $v$. By using $g(\dot {C},\dot {C})=g(\varphi \dot {C},\varphi \dot {C})=0$ and the second equality in \eqref{3.1} we obtain $g(X,X)=0$. The last implies a contradiction since $g_{\vert \mathbb D}$ is of signature $(1,1)$, which confirms our assertion. \end{proof} Now, taking into account Proposition \ref{Proposition 3.2}, it is easy to see that the triad of vector fields $\{\dot {C}, \xi, \varphi \dot {C}\}$ is a basis of $T_xM$ at each point $x$ of a $\varphi $-slant null curve in a 3-dimensional almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$. By using this basis, in \cite{M-N} H. Manev and the author of this paper obtained the following result for a slant null curve $C$ in $(M,\varphi,\xi,\eta,g)$ satisfying the conditions \eqref{3.1}, where $(a,b)\neq (0,0)$ and b is a function:\\ If ${\bf F}=\{\dot {C}, N, W\}$ is a general Frenet frame on $M$ along $C$ which has the same positive orientation as a basis $\{\dot {C}, \xi, \varphi \dot {C}\}$ at each $x\in C$, then \begin{align}\label{3.21} W=\alpha \xi +\beta \dot {C}+\mathfrak{g}amma \varphi \dot {C} , \end{align} \begin{align}\label{3.22} N=\lambda \xi+\mu \dot {C}+\nu \varphi \dot {C}, \end{align} where $\beta $ is an arbitrary function and $\alpha, \mathfrak{g}amma, \lambda, \mu, \nu $ are the following functions \begin{align}\label{3.3} \begin{split} \alpha ={-\frac{b}{\sqrt{a^4+b^2}}},\qquad \mathfrak{g}amma ={\frac{a}{\sqrt{a^4+b^2}}}, \end{split} \end{align} \begin{align}\label{3.4} \begin{split} \lambda &=\frac{a^3+\beta b\sqrt{a^4+b^2}} {a^4+b^2}, \qquad \mu =-\frac{a^2+\beta ^2\left(a^4+b^2\right)}{2\left(a^4+b^2\right)}, \\[4pt] \nu &=\frac{b-\beta a \sqrt{a^4+b^2}}{a^4+b^2}. \end{split} \end{align} Moreover, the functions $h$ and $k_1$ in \eqref{general Frenet eq} with respect to ${\bf F}$ are given by \begin{align}\label{3.5} \begin{array}{ll} h=-\lambda g(\dot {C},\nabla _{\dot {C}}\xi )+\frac{\nu }{2}\left[\dot {C}\left(b\right)-F(\dot {C},\dot {C},\dot {C})\right],\\ \\ k_1=-\alpha g(\dot {C},\nabla _{\dot {C}}\xi )+\frac{\mathfrak{g}amma }{2}\left[\dot {C}\left(b\right)-F(\dot {C},\dot {C},\dot {C})\right]. \end{array} \end{align} \begin{rem}\label{Remark 3.2} The equalities \eqref{3.21}, \eqref{3.22}, \eqref{3.3}, \eqref{3.4} and \eqref{3.5} hold also in case of a $\varphi $-slant null curve in a 3-dimensional $(M,\varphi,\xi,\eta,g)$, i.e. when b is a constant. \end{rem} \begin{prop}\label{Proposition 3.3} Let $C$ be a $\varphi $-slant null curve in a 3-dimensional $\mathcal{F}_4$-manifold $(M,\varphi,\xi,\eta,g)$. If ${\bf F}=\{\dot {C}, N, W\}$ is a general Frenet frame along $C$, then for the functions $k_1$ and $h$ in \eqref{general Frenet eq} we have \begin{align}\label{3.61} h=-\beta \frac{\theta (\xi )\sqrt{a^4+b^2}}{2} , \end{align} \begin{align}\label{3.62} k_1=\frac{\theta (\xi )\sqrt{a^4+b^2}}{2}. \end{align} \end{prop} \begin{proof} By using \eqref{2.2} and \eqref{2.3} we find \begin{align}\label{3.66} F(\dot {C},\dot {C},\dot {C})=-\theta (\xi )a^3\quad \text{and} \quad g(\dot {C},\nabla _{\dot {C}}\xi )=\frac{\theta (\xi )b}{2} . \end{align} Substituting the above equalities and $\dot {C}\left(b\right)=0$ in \eqref{3.5}, we get \begin{align}\label{3.7} h=\frac{\theta (\xi )}{2}(-\lambda b+\nu a^3), \qquad k_1=\frac{\theta (\xi )}{2}(-\alpha b+a^3). \end{align} By virtue of \eqref{3.3} and \eqref{3.4} we obtain \[ -\lambda b+\nu a^3=-\beta \sqrt{a^4+b^2}, \qquad -\alpha b+a^3=\sqrt{a^4+b^2}. \] The latter equalities and \eqref{3.7} imply \eqref{3.61} and \eqref{3.62}. \end{proof} \begin{cor}\label{Corollary 3.4} A $\varphi $-slant null curve $C$ in a 3-dimensional $\mathcal{F}_4$-manifold $M$ is geodesic if and only if $M$ is an $\mathcal{F}_0$-manifold. \end{cor} \begin{proof} It is known \cite{D-B} that a null curve is geodesic if and only if $k_1$ vanishes. From \eqref{3.62} and $a^4+b^2\neq 0$ it follows that $k_1=0$ if and only if $\theta (\xi )=0$ that is $M\in \mathcal{F}_0$. \end{proof} \begin{thm}\label{Theorem 3.5} Let $C(t)$ be a non-geodesic $\varphi $-slant null curve in a 3-dimensional ${\mathcal{F}}_4$-manifold $(M,\varphi,\xi,\eta,g)$. Then there exists a unique Frenet frame ${\bf F_1}=\{\dot {C}, N_1, W_1\}$ for which the original parameter $t$ of $C(t)$ is distinguished and the vector fields $W_1$, $N_1$ are given by \begin{align}\label{3.8} \begin{array}{lll} \displaystyle W_1=\alpha \xi +\mathfrak{g}amma \varphi \dot {C}=-\frac{b}{\sqrt{a^4+b^2}}\xi +\frac{a}{\sqrt{a^4+b^2}}\varphi \dot {C} , \\ \\ \displaystyle N_1=\lambda _1\xi +\mu _1\dot {C}+\nu _1\varphi \dot {C}\\ \quad \, \, \, =\displaystyle \frac{a^3}{a^4+b^2}\xi -\frac{a^2}{2(a^4+b^2)}\dot {C}+\frac{b}{a^4+b^2}\varphi \dot {C}. \end{array} \end{align} The function $k_2$ with respect to ${\bf F_1}$ is given by \begin{align}\label{3.9} k_2=\frac{a^2\theta (\xi )}{4\sqrt{a^4+b^2}} . \end{align} \end{thm} \begin{proof} By using \eqref{3.61} and \eqref{3.62} we have $h=-\beta k_1$. Then, having in mind \eqref{3.21}, the first equality in \eqref{general Frenet eq} becomes \begin{align}\label{3.99} \nabla _{\dot C}\dot C=h\dot C+k_1W=-\beta k_1\dot C+k_1(\alpha \xi +\beta \dot {C}+\mathfrak{g}amma \varphi \dot {C})=k_1(\alpha \xi +\mathfrak{g}amma \varphi \dot {C}). \end{align} The vector field $W_1=\alpha \xi +\mathfrak{g}amma \varphi \dot {C}=\displaystyle-\frac{b}{\sqrt{a^4+b^2}}\xi +\frac{a}{\sqrt{a^4+b^2}}\varphi \dot {C}$ is obtained from \eqref{3.21} for $\beta =0$ and therefore $g(W_1,W_1)=1$. Replacing $\beta $ with $0$ in \eqref{3.4} we get \begin{align}\label{3.11} \lambda _1=\frac{a^3}{a^4+b^2}, \quad \mu _1=-\frac{a^2}{2(a^4+b^2)}, \quad \nu _1=\frac{b}{a^4+b^2}. \end{align} Hence, the unique vector field $N_1$ corresponding to $W_1$ is given by $N_1=\lambda _1\xi +\mu _1\dot {C}+\nu _1\varphi \dot {C}$. Comparing \eqref{3.99} with the first equality in \eqref{general Frenet eq} we infer that $h=0$ with respect to the Frenet frame ${\bf F_1}=\{\dot {C}, N_1, W_1\}$, where $W_1$ and $N_1$ are determined by \eqref{3.8}. Thus, the original parameter $t$ of $C(t)$ is distinguished with respect to ${\bf F_1}$. Now, let we take another Frenet frame ${\bf F^*}=\{\dot {C}, N^*, W^*\}$ along $C$ with respect to $t$ and $W^*$. Since for a given $C$ the vector field $W$ depends only on $\beta $, we have $W^*=\alpha \xi +\beta ^*\dot {C}+\mathfrak{g}amma \varphi \dot {C}= W_1+\beta ^*\dot {C}$. The unique vector field $N^*$ corresponding to $W^*$ is given by $N^*=\lambda ^*\xi +\mu ^*\dot {C}+\nu ^*\varphi \dot {C}$, where $\lambda ^*$, $\mu ^*$ and $\nu ^*$ are obtained replacing $\beta $ with $\beta ^*$ in \eqref{3.4}. For the first equality in \eqref{general Frenet eq} with respect to ${\bf F^*}$ we have \begin{align*} \nabla _{\dot C}\dot C=h^*\dot C+k_1^*W^* , \end{align*} where $h^*=-\beta ^*k_1^*$. From the above equality we find $k_1^*=g(\nabla _{\dot C}\dot C, W^*)=g(\nabla _{\dot C}\dot C,W_1)=k_1$. Hence $h^*=-\beta ^*k_1$. The parameter $t$ is distinguished with respect to ${\bf F^*}$ if and only if $h^*$ vanishes. Since $C(t)$ is non-geodesic, it follows that $h^*=0$ if and only if $\beta ^*=0$. Thus, ${\bf F^*}={\bf F_1}$.\\ From the second equality in \eqref{general Frenet eq} with respect to ${\bf F_1}$ we derive \[ k_2=g(\nabla _{\dot {C}}N_1,W_1). \] Taking into account \eqref{3.8}, the latter equality becomes \begin{align}\label{3.12} \begin{array}{ll} k_2=\alpha \left(\lambda _1g(\nabla _{\dot {C}}\xi ,\xi )+\mu _1g(\nabla _{\dot {C}}\dot {C},\xi )+ \nu _1g(\nabla _{\dot {C}}\varphi \dot {C},\xi )\right)\\ \qquad +\mathfrak{g}amma \left(\lambda _1g(\nabla _{\dot {C}}\xi ,\varphi \dot {C})+\mu _1g(\nabla _{\dot {C}}\dot {C},\varphi \dot {C})+ \nu _1g(\nabla _{\dot {C}}\varphi \dot {C},\varphi \dot {C})\right). \end{array} \end{align} The equalities $g(\xi ,\xi )=1$ and $g(\varphi \dot {C},\varphi \dot {C})=a^2$ imply \begin{align}\label{3.13} g(\nabla _{\dot {C}}\xi ,\xi )=g(\nabla _{\dot {C}}\varphi \dot {C},\varphi \dot {C})=0 . \end{align} By using $g(\dot {C} ,\xi )=a$, $g(\varphi \dot {C} ,\xi )=0$ and \eqref{2.3} we receive \begin{align}\label{3.14} \begin{array}{ll} g(\nabla _{\dot {C}}\dot {C},\xi )=-g(\dot {C},\nabla _{\dot {C}}\xi )=\displaystyle -\frac{\theta (\xi )b}{2} ,\\ g(\nabla _{\dot {C}}\varphi \dot {C},\xi )=-g(\nabla _{\dot {C}}\xi ,\varphi \dot {C})=\displaystyle -\frac{\theta (\xi )a^2}{2} . \end{array} \end{align} With the help of the following expressions \begin{align*} \begin{array}{ll} F(\dot {C},\dot {C},\dot {C})=g(\nabla _{\dot {C}}\dot {C},\varphi \dot {C})-g(\varphi (\nabla _{\dot {C}}\dot {C}),\dot {C}) ,\\ \\ 0=\dot {C}(b)=g(\nabla _{\dot {C}}\dot {C},\varphi \dot {C})+g(\dot {C},\nabla _{\dot {C}}\varphi \dot {C}) \end{array} \end{align*} and the first equality in \eqref{3.66} we find \begin{align}\label{3.15} g(\nabla _{\dot {C}}\dot {C},\varphi \dot {C})=-\frac{1}{2}F(\dot {C},\dot {C},\dot {C})= \frac{1}{2}\theta (\xi )a^3 . \end{align} Substituting \eqref{3.13}, \eqref{3.14} and \eqref{3.15} in \eqref{3.12} we obtain \begin{align}\label{3.16} k_2=-\frac{\alpha \theta (\xi )}{2}(\mu _1b+\nu _1a^2)+\frac{\mathfrak{g}amma \theta (\xi )a^2}{2}(\lambda _1+\mu _1a) . \end{align} Finally, substituting \eqref{3.3} and \eqref{3.11} in \eqref{3.16} we get \eqref{3.9}. \end{proof} From now on in this paper, we deal with the pair $(C(t), {\bf F_1})$, where $C(t)$ is a $\varphi $-slant null curve in a 3-dimensional $\mathcal{F}_4$-manifold for which the original parameter is distinguished and ${\bf F_1}$ is the unique Frenet frame of $C(t)$ from \thmref{Theorem 3.5}. The Frenet equations of $(C(t), {\bf F_1})$ are \begin{align}\label{3.17} \begin{array}{lll} \nabla _{\dot {C}}\dot {C}=k_1W_1 \\ \nabla _{\dot {C}}N_1=k_2W_1 \\ \nabla _{\dot {C}}W_1=-k_2\dot {C}-k_1N_1, \end{array} \end{align} where $k_1$ and $k_2$ are given by \eqref{3.62} and \eqref{3.9}, respectively. \begin{defn} A framed null curve with $k_2=0$ is called {\it a generalized null cubic}. \end{defn} Substituting $a=0$ in \eqref{3.62}, \eqref{3.8} and \eqref{3.9}, we state \begin{prop}\label{Proposition 3.6} Let $(C(t), {\bf F_1})$ be a Legendre $\varphi $-slant null curve in a 3-dimensional $\mathcal{F}_4$-manifold $(M,\varphi,\xi,\eta,g)$. Then we have \par \item (i) $k_1=\displaystyle\frac{\vert b\vert\theta (\xi )}{2}$. \par \item (ii) The vector fields $N_1$, $W_1$ from ${\bf F_1}=\{\dot {C}, N_1, W_1\}$ are given by $N_1=\frac{1}{b}\varphi \dot {C}$, $W_1=-\epsilon \xi $, where $\epsilon =\{{\rm sign} \, b\}=\{\pm 1\}$. \par \item (iii) $(C(t), {\bf F_1})$ is a generalized null cubic. \end{prop} As a generalization of the magnetic curves in \cite{Bejan} was introduced the notion of $F$-geodesics in a manifold $M$ endowed with a (1,1)-tensor field $F$ and with a linear connection $\nabla $. \begin{defn}\label{Definition 3.2}\cite{Bejan} A smooth curve $\mathfrak{g}amma : I\longrightarrow M$ in a manifold $(M,F,\nabla )$ is an $F$-geodesic if $\mathfrak{g}amma (t)$ satisfies $\nabla _{\dot {\mathfrak{g}amma }(t)}\dot {\mathfrak{g}amma }(t)=F\dot {\mathfrak{g}amma }(t)$. \end{defn} Note that an $F$-geodesic is not invariant under a reparameterization.\\ Using \eqref{3.62} and \eqref{3.8} the first equality in \eqref{3.17} becomes \begin{align}\label{3.18} \nabla _{\dot {C}}\dot {C}=\displaystyle-\frac{b\theta (\xi )}{2}\xi + \displaystyle\frac{a\theta (\xi )}{2}\varphi \dot {C} . \end{align} By virtue of \eqref{3.18} we establish the truth of the following \begin{prop}\label{Proposition 3.7} A $\varphi $-slant null curve $(C(t), {\bf F_1})$ in a 3-dimensional $\mathcal{F}_4$-manifold $(M,\varphi,\xi,\eta,g)$ is a $\varphi $-geodesic if and only if $b=0$ and $\theta (\xi )=\frac{a}{2}$. \end{prop} \section{Some non-null $\varphi $-slant curves in a 3-dimensional $\mathcal{F}_4$-manifold induced from $\varphi $-slant null curves}\label{sec-4} A curve $\mathfrak{g}amma : I\longrightarrow M$ in a 3-dimensional Lorentzian manifold $(M,g)$ is said to be {\it a unit speed curve} (or $\mathfrak{g}amma $ is parameterized by arc length $s$) if $g(\mathfrak{g}amma ^\prime ,\mathfrak{g}amma ^\prime)=\epsilon _1=\pm 1$, where $\mathfrak{g}amma ^\prime =\frac{{\rm d}\mathfrak{g}amma }{{\rm d}s}$ is the velocity vector field. A unit speed curve $\mathfrak{g}amma $ is said to be {\it spacelike} or {\it timelike} if $\epsilon _1=1$ or $\epsilon _1=-1$, respectively. A unit speed curve $\mathfrak{g}amma $ is said to be {\it a Frenet curve} if one of the following three cases holds \cite{W}: \begin{itemize} \item $\mathfrak{g}amma $ is of osculating order 1 that is $\nabla _{\mathfrak{g}amma ^\prime }\mathfrak{g}amma ^\prime =0$, i.e. $\mathfrak{g}amma $ is a geodesic; \item $\mathfrak{g}amma $ is of osculating order 2, i.e. there exist two orthonormal vector fields $E_1$, $E_2$ and a positive function $k$ (the curvature) along $\mathfrak{g}amma $ such that $E_1=\mathfrak{g}amma ^\prime $, $g(E_2,E_2)=\epsilon _2=\pm 1$ and \[ \nabla _{\mathfrak{g}amma ^\prime }E_1=\epsilon _2kE_2 , \quad \nabla _{\mathfrak{g}amma ^\prime }E_2=-\epsilon _1kE_1; \] \item $\mathfrak{g}amma $ is of osculating order 3, i.e. there exist three orthonormal vector fields $E_1$, $E_2$, $E_3$ and two positive functions $k$ (the curvature) and $\tau $ (the torsion) along $\mathfrak{g}amma $ such that $E_1=\mathfrak{g}amma ^\prime $, $g(E_2,E_2)=\epsilon _2=\pm 1$, $g(E_3,E_3)=\epsilon _3=\pm 1$, $\epsilon _3=-\epsilon _1\epsilon _2$ and \[ \nabla _{\mathfrak{g}amma ^\prime }E_1=\epsilon _2kE_2 , \quad \nabla _{\mathfrak{g}amma ^\prime }E_2=-\epsilon _1kE_1+\epsilon _3\tau E_3 , \quad \nabla _{\mathfrak{g}amma ^\prime }E_3=-\epsilon _2\tau E_2. \] \end{itemize} As in the case of Riemannian geometry, a Frenet curve in a 3-dimensional Lorentzian manifold is a geodesic if and only if its curvature $k$ vanishes. Also a curve $\mathfrak{g}amma $ with a curvature $k$ and a torsion $\tau $ is called \cite{I}: \begin{itemize} \item {\it a pseudo-circle} if $k=const$ and $\tau =0$; \item {\it a helix} if $k=const$ and $\tau =const$; \item {\it a proper helix} if $\mathfrak{g}amma $ is a helix which is not a circle; \item {\it a generalized helix} if $\displaystyle\frac{k}{\tau}=const$ but $k$ and $\tau $ are not constant. \end{itemize} Taking into account Remark \ref{Remark 3.1} we state:\\ A Frenet curve $\mathfrak{g}amma (s)$ in an almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$ is said to be slant if $\eta (\mathfrak{g}amma ^\prime (s))=a=const$. (see \cite{W})\\ We say that a Frenet curve $\mathfrak{g}amma (s)$ in an almost contact B-metric manifold $(M,\varphi,\xi,\eta,g)$ is $\varphi $-slant if \begin{align*} \eta (\mathfrak{g}amma ^\prime (s))=a=const \quad \text{and}\quad g(\mathfrak{g}amma ^\prime (s),\varphi \mathfrak{g}amma ^\prime (s))=b=const. \end{align*} Since there exist two B-metrics $g$ and $\widetilde g$ on an almost contact B-metric manifold $M$, we can consider a curve $\mathfrak{g}amma $ in $M$ with respect to both $g$ and $\widetilde g$. In this section we investigate non-null curves with respect to $\widetilde g$ induced from two types of $\varphi $-slant null curves with respect to $g$ in a 3-dimensional $\mathcal{F}_4$-manifold. \begin{thm}\label{Theorem 4.1} Let $(C(t), {\bf F_1})$ be a Legendre $\varphi $-slant null curve with respect to $g$ in a 3-dimensional $\mathcal{F}_4$-manifold. The curve $C$ with respect to $\widetilde g$ is \par \item (i) spacelike if $b>0$ or timelike if $b<0$; \par (ii) a Legendre $\varphi $-slant curve; \par (iii) a geodesic. \end{thm} \begin{proof} (i) Since $(C(t), {\bf F_1})$ is a Legendre $\varphi $-slant null curve, from Proposition \ref{Proposition 3.2} it follows that $b\neq 0$. Thus $\widetilde g(\dot C,\dot C)=b\neq 0$. Now, we parameterize $C(t)$ by its arc length parameter $\widetilde s$ with respect to $\widetilde g$ given by \[ \widetilde s=\int^{t}_{0}\sqrt{\vert \widetilde g(\dot C,\dot C)\vert }{\rm d}u=\int^{t}_{0}\sqrt{\vert b\vert }{\rm d}u =\sqrt{\vert b\vert }t . \] Then for the tangent vector $C^\prime (\widetilde s)=\displaystyle\dot C(t)\frac{{\rm d}t}{{\rm d}\widetilde s}=\frac{\dot C(t)}{\sqrt{\vert b\vert }}$ of the curve $C(\widetilde s)$ we have $\widetilde g(C^\prime ,C^\prime )=\displaystyle\frac{b}{\vert b\vert }=\pm 1$ which confirms the assertion (i). \par (ii) By direct calculations we find \begin{align}\label{4.1} \begin{array}{ll} \widetilde \eta (C^\prime )=\widetilde g(C^\prime ,\xi )=\displaystyle\eta (C^\prime )=\frac{1}{\sqrt{\vert b\vert }}\eta (\dot C)=0 , \\ \\ \displaystyle\widetilde g(C^\prime ,\varphi C^\prime )=\frac{1}{\vert b\vert }\widetilde g(\dot C,\varphi \dot C)=0 . \end{array} \end{align} The equalities \eqref{4.1} show that $C(\widetilde s)$ is a Legendre $\varphi $-slant curve.\\ (iii) By virtue of \eqref{2.4} we get \begin{align}\label{4.2} \widetilde \nabla _{C^\prime }C^\prime =\frac{1}{\vert b\vert }\widetilde \nabla _{\dot C}\dot C= \frac{1}{\vert b\vert }\left(\nabla _{\dot C}\dot C+\frac{b\theta (\xi )}{2}\xi \right). \end{align} Now, we find $\nabla _{\dot C}\dot C$ with the help of Proposition \ref{Proposition 3.6} \begin{align*} \nabla _{\dot C}\dot C=k_1W_1=\frac{\vert b\vert \theta (\xi )}{2}(-\epsilon \xi )=-\frac{b\theta (\xi )}{2}\xi . \end{align*} The latter equality and \eqref{4.2} imply $\widetilde \nabla _{C^\prime }C^\prime =0$, i.e. $C(\widetilde s)$ is a geodesic. \end{proof} \begin{thm}\label{Theorem 4.2} Let $(C(t), {\bf F_1})$ be a $\varphi $-slant null curve with respect to $g$ in a 3-dimensional $\mathcal{F}_4$-manifold $M$ and $b=0$. The curve $C$ with respect to $\widetilde g$ is a $\varphi $-slant spacelike curve in $M$ such that: \par \item (i) If $(C(t), {\bf F_1})$ is non-geodesic, then $C$ is a Frenet curve of osculating order 3. The orthonormal vector fields $E_1$, $E_2$, $E_3$ with respect to $\widetilde g$, the curvature $\widetilde k$ and the torsion $\widetilde \tau $ along $C$ are given as follows: \begin{align}\label{4.3} E_1(\widetilde s)=C^\prime (\widetilde s)=\frac{\dot C}{\vert a \vert}, \quad \widetilde g(E_1,E_1)=\epsilon _1=1 , \end{align} where $\widetilde s$ is the arc length parameter of $C(t)$ with respect to $\widetilde g$; \begin{align}\label{4.4} E_2(\widetilde s)=\widetilde \epsilon \left(\frac{1}{a} \varphi \dot C-\xi \right)=\widetilde \epsilon \left(-\frac{1}{2a}\dot C-aN_1+W_1\right), \, \widetilde g(E_2,E_2)=\epsilon _2=1, \end{align} where $\widetilde \epsilon =\{{\rm sign} \, k_1(t)\}=\{{\rm sign}\, \theta (\xi )(t)\}=\{\pm 1\}$, $\epsilon =\{{\rm sign} \, a\}=\{\pm 1\}$ and $ k_1(t)$ is the curvature of $(C(t), {\bf F_1})$; \begin{align}\label{4.5} E_3(\widetilde s)=\frac{1}{\vert a\vert}(\varphi \dot C-\dot C)=\epsilon \left(-\frac{1}{a}\dot C+W_1\right), \quad \widetilde g(E_3,E_3)=\epsilon _3=-1; \end{align} \begin{align}\label{4.6} \widetilde k(\widetilde s)=\frac{\vert k_1(t)\vert }{a^2}=\frac{\vert \theta (\xi )(t)\vert }{2}, \quad \widetilde \tau (\widetilde s)=k(\widetilde s). \end{align} (ii) If $(C(t), {\bf F_1})$ is a geodesic, then $C(\widetilde s)$ is also a geodesic. \end{thm} \begin{proof} Since $b=0$ for $(C(t), {\bf F_1})$, from Proposition \ref{Proposition 3.2} it follows that $a\neq 0$. First, for further use we compute: \begin{align}\label{4.7} \begin{array}{lll} \widetilde g(\dot C,\dot C)=g(\dot C,\varphi \dot C)+(\eta (\dot C))^2=a^2 ,\, \, \widetilde g(\dot C,\varphi \dot C)=g(\varphi \dot C,\varphi \dot C)=a^2,\\ \widetilde g(\varphi \dot C,\varphi \dot C)=-\widetilde g(\dot C,\dot C)+(\eta (\dot C))^2=0. \end{array} \end{align} The curvature $k_1(t)$ and the vector fields $W_1$, $N_1$ from the frame ${\bf F_1}$ along $C(t)$ we obtain by substituting $b=0$ in \eqref{3.62} and \eqref{3.8}. Thus we have \begin{align}\label{4.8} k_1(t)=\frac{a^2\theta (\xi )(t)}{2} , \end{align} \begin{align}\label{4.9} W_1=\frac{1}{a}\varphi \dot C , \end{align} \begin{align}\label{4.10} N_1=\frac{1}{a}\xi -\frac{1}{2a^2}\dot C . \end{align} Since $\widetilde g(\dot C,\dot C)=a^2\neq 0$, analogously as in the proof of Theorem \ref{Theorem 4.1} we parameterize $C(t)$ with respect to its arc length parameter $\widetilde s=\vert a\vert t$. Then it is easy to see that for the vector field $E_1(\widetilde s)=C^\prime (\widetilde s)$ the equality $\widetilde g(E_1,E_1)=1$ holds. Hence $C(\widetilde s)$ is a spacelike curve with respect to $\widetilde g$. By straightforward calculations we obtain \begin{align*} \begin{array}{ll} \widetilde \eta (C^\prime )=\widetilde g(C^\prime ,\xi )=\displaystyle\eta (C^\prime )=\frac{1}{\vert a\vert }\eta (\dot C)=\frac{a}{\vert a\vert }=\epsilon , \\ \\ \displaystyle\widetilde g(C^\prime ,\varphi C^\prime )=\frac{1}{a^2}\widetilde g(\dot C,\varphi \dot C)=\frac{1}{a^2}a^2=1 . \end{array} \end{align*} From the above equalities it is clear that the spacelike curve $C(\widetilde s)$ is a $\varphi $-slant (non-Legendre) curve in $M$.\\ (i) By virtue of \eqref{2.4} we find \begin{align}\label{4.11} \widetilde \nabla _{C^\prime }C^\prime =\frac{1}{a^2}\widetilde \nabla _{\dot C}\dot C= \frac{1}{a^2}\left(\nabla _{\dot C}\dot C-\frac{a^2\theta (\xi )}{2}\xi \right). \end{align} From the first equality in \eqref{3.17} and \eqref{4.9} we get \begin{align*} \nabla _{\dot C}\dot C=k_1W_1=k_1\frac{1}{a}\varphi \dot C . \end{align*} We substitute the latter equality in \eqref{4.11}. Then having in mind \eqref{4.8} we obtain \begin{align*} \widetilde \nabla _{C^\prime }C^\prime =\frac{k_1}{a^2}\left(\frac{1}{a}\varphi \dot C-\xi \right) . \end{align*} We rewrite the above equality in the following equivalent form \begin{align}\label{4.12} \widetilde \nabla _{C^\prime }C^\prime =\frac{\widetilde \epsilon k_1}{a^2}\widetilde \epsilon \left(\frac{1}{a}\varphi \dot C-\xi \right)=\frac{\widetilde \epsilon k_1}{a^2}E_2(\widetilde s), \end{align} where we put $E_2(\widetilde s)=\widetilde \epsilon \left(\frac{1}{a}\varphi \dot C-\xi \right)$ and $\widetilde \epsilon =\{{\rm sign} \, k_1(t)\}=\{{\rm sign}\, \theta (\xi )(t)\}=\{\pm 1\}$. By direct calculations we check that $\widetilde g(E_2,E_2)=1$ and $\widetilde g(E_1,E_2)=0$. From \eqref{4.10} we derive $\xi =aN_1+\frac{1}{2a}\dot C$ and hence $E_2=\widetilde \epsilon \left(-\frac{1}{2a}\dot C-aN_1+W_1\right)$. With the help of \eqref{4.12} and \eqref{4.8} we find \begin{align*} \widetilde k(\widetilde s)=\vert \widetilde \nabla _{C^\prime }C^\prime \vert =\sqrt{\displaystyle\vert \widetilde g(\widetilde \nabla _{C^\prime }C^\prime ,\widetilde \nabla _{C^\prime }C^\prime )\vert }=\frac{\vert k_1(t)\vert }{a^2}=\frac{\vert \theta (\xi )(t)\vert }{2}. \end{align*} Thus we establish the truth of the first equality in \eqref{4.6} and \eqref{4.12} becomes \begin{align}\label{4.13} \widetilde \nabla _{C^\prime }C^\prime =\widetilde kE_2 . \end{align} Since $(C(t), {\bf F_1})$ is non-geodesic, from Corollary \ref{Corollary 3.4} it follows that $\theta (\xi )\neq 0$ along $C$. Hence $C(\widetilde s)$ is also non-geodesic.\\ Now, we compute \begin{align}\label{4.14} \widetilde \nabla _{C^\prime }E_2=\widetilde \nabla _{C^\prime }\widetilde \epsilon\left(\frac{1}{a}\varphi \dot C-\xi \right)=\frac{1}{\vert a\vert }\left(\frac{\widetilde \epsilon}{a}\widetilde \nabla _{\dot C}\varphi \dot C-\widetilde \epsilon \widetilde \nabla _{\dot C}\xi \right). \end{align} Further, by using \eqref{2.4} we get \begin{align}\label{4.15} \widetilde \nabla _{\dot C}\varphi \dot C=\nabla _{\dot C}\varphi \dot C+ \frac{a^2\theta (\xi )}{2}\xi . \end{align} From the well known formula $(\nabla _{\dot C}\varphi )\dot C=\nabla _{\dot C}\varphi \dot C- \varphi (\nabla _{\dot C}\dot C)$ we express \begin{align}\label{4.16} \nabla _{\dot C}\varphi \dot C=(\nabla _{\dot C}\varphi )\dot C+\varphi (\nabla _{\dot C}\dot C) . \end{align} By virtue of \eqref{2.2} we find \begin{align*} (\nabla _{\dot C}\varphi )\dot C=-\frac{a\theta (\xi )}{2}(a\xi +\varphi ^2\dot C) . \end{align*} Taking into account \eqref{4.8} and \eqref{4.9} we have \begin{align*} \varphi (\nabla _{\dot C}\dot C)=\varphi (k_1W_1)=\frac{a^2\theta (\xi )}{2}\varphi ^2\dot C . \end{align*} Substituting the latter two equalities in \eqref{4.16} we obtain \begin{align}\label{4.17} \nabla _{\dot C}\varphi \dot C=-\frac{a^2\theta (\xi )}{2}\xi . \end{align} From \eqref{4.15} and \eqref{4.17} it follows \begin{align}\label{4.18} \widetilde \nabla _{\dot C}\varphi \dot C=0 . \end{align} By using \eqref{2.4} and \eqref{2.3} we get \begin{align}\label{4.19} \widetilde \nabla _{\dot C}\xi =\nabla _{\dot C}\xi =\frac{\theta (\xi )}{2}\varphi \dot C . \end{align} Substituting \eqref{4.18} and \eqref{4.19} in \eqref{4.14} we receive $\widetilde \nabla _{C^\prime }E_2=-\widetilde k\frac{1}{\vert a\vert }\varphi \dot C$. We rewrite the last equality in the following equivalent form \begin{align*} \widetilde \nabla _{C^\prime }E_2=-\widetilde kE_1+\widetilde kE_1-\widetilde k\frac{1}{\vert a\vert }\varphi \dot C=-\widetilde kE_1-\widetilde k\left(\frac{1}{\vert a\vert }\varphi \dot C-E_1\right) \end{align*} and put $E_3(\widetilde s)=\frac{1}{\vert a\vert }\varphi \dot C-E_1=\frac{1}{\vert a\vert }(\varphi \dot C-\dot C)=\epsilon \left(-\frac{1}{a}\dot C+W_1\right)$. Immediately we verify that $\widetilde g(E_3,E_3)=-1$, $\widetilde g(E_1,E_3)=\widetilde g(E_2,E_3)=0$. Now, we obtain \begin{align}\label{4.20} \widetilde \nabla _{C^\prime }E_2=-\widetilde kE_1-\widetilde \tau E_3 . \end{align} where $\widetilde \tau =\widetilde k$. Finally, we have \begin{align*} \widetilde \nabla _{C^\prime }E_3=\frac{1}{\vert a\vert }\widetilde \nabla _{\dot C}\frac{1}{\vert a\vert }(\varphi \dot C-\dot C)=\frac{1}{a^2}\left(\widetilde \nabla _{\dot C}\varphi \dot C-\widetilde \nabla _{\dot C}\dot C\right). \end{align*} Taking into account \eqref{4.13} and \eqref{4.18}, we infer \begin{align}\label{4.21} \widetilde \nabla _{C^\prime }E_3=-\widetilde \tau E_2 . \end{align} The equalities \eqref{4.13}, \eqref{4.20} and \eqref{4.21} show that $C(\widetilde s)$ is a Frenet curve of osculating order 3. Note that in our case $\epsilon _1=\epsilon _2=-\epsilon _3=1$.\\ (ii) The truth of the assertion follows from Corollary \ref{Corollary 3.4} and \eqref{4.6}. \end{proof} As an immediate consequence from Proposition \ref{Proposition 3.7} and Theorem \ref{Theorem 4.2} we obtain \begin{cor} Let $(C(t), {\bf F_1})$ and $C(\widetilde s)$ be the curves from Theorem \ref{Theorem 4.2}. Then \par (i) $C(\widetilde s)$ is a generalized helix. \par (ii) If $(C(t), {\bf F_1})$ is a $\varphi $-geodesic, then $C(\widetilde s)$ is a proper helix. \end{cor} \section{Null $\varphi $-slant curves in a Lie group as a 3-dimensional $\mathcal{F}_4$-manifold and their matrix representation} Let $G$ be a 3-dimensional real connected Lie group and $\mathfrak {g}$ be its Lie algebra with a basis $\{E_1, E_2, E_3\}$ of left invariant vector fields. We define an almost contact structure $(\varphi, \xi, \eta )$ and a left invariant B-metric $g$ as follows: \begin{align*}\label{} \begin{array}{llll} \varphi E_1=E_2, \, \, \varphi E_2=-E_1, \, \, \varphi E_3=0, \quad \xi =E_3, \, \, \eta (E_3)=1, \eta (E_1)=\eta (E_2)=0,\\ g(E_1,E_1)=-g(E_2,E_2)=g(E_3,E_3)=1, \quad g(E_i,E_j)=0, \, i\neq j \in\{1,2,3\}. \end{array} \end{align*} Let $(G,\varphi,\xi,\eta,g)$ be a 3-dimensional almost contact B-metric manifold such that the Lie algebra $\mathfrak {g}$ of $G$ is determined by the following commutators: \begin{align}\label{5.1} [E_1,E_3]=\alpha E_2 \quad [E_2,E_3]=-\alpha E_1, \quad [E_1,E_2]=0, \,\, \alpha \in \mathbb{R}, \, \, \alpha \neq 0. \end{align} Further we will show that if \eqref{5.1} holds, then $(G,\varphi,\xi,\eta,g)$ is an $\mathcal{F}_4$-manifold. By using the Koszul formula \begin{align}\label{5.2} 2g(\nabla _{E_i}E_j,E_k)=g([E_i,E_j],E_k)+g([E_k,E_i],E_j)+g([E_k,E_j],E_i) \end{align} we obtain the following equality for the components $F_{ijk}=F(E_i,E_j,E_k)$,\\ $i,j,k \in \{1,2,3\}$ of the tensor $F$: \begin{align}\label{5.3} \begin{array}{ll} 2F_{ijk}=g([E_i,\varphi E_j]-\varphi [E_i,E_j],E_k)+g(\varphi [E_k,E_i]-[\varphi E_k,E_i],E_j)\\ \quad\quad\,\,\,+g([E_k,\varphi E_j]-[\varphi E_k,E_j],E_i). \end{array} \end{align} By virtue of \eqref{5.1} and \eqref{5.3} we obtain that the non-zero components $F_{ijk}$ are \begin{align}\label{5.4} F_{113}=F_{131}=\alpha , \quad F_{223}=F_{232}=-\alpha . \end{align} For the tensor $F$ of a 3-dimensional $\mathcal{F}_4$-manifold, using \eqref{2.2}, we get \begin{align*} F(X,Y,Z)=\frac{\theta (\xi )}{2}\{(X^1Y^1-X^2Y^2)Z^3+(X^1Z^1-X^2Z^2)Y^3\}, \end{align*} where $X=X^iE_i$, $Y=Y^iE_i$, $Z=Z^iE_i$ are arbitrary vector fields. The latter equality and \eqref{5.4} imply that $(G,\varphi,\xi,\eta,g)$ is a 3-dimensional $\mathcal{F}_4$-manifold and $\alpha =\frac{\theta (\xi )}{2}$. With the help of \eqref{5.1} and \eqref{5.2} we find the components of the Levi-Civita connection $\nabla $. The non-zero ones of them are \begin{align}\label{5.5} \nabla _{E_1}E_2=\nabla _{E_2}E_1=\alpha \xi , \quad \nabla _{E_1}\xi =\alpha E_2, \quad \nabla _{E_2}\xi =-\alpha E_1. \end{align} Consider the curve $C(t)=e^{tX}$ on $G$, where $t\in {\mathbb{R}}$ and $X\in \mathfrak {g}$. Hence the tangent vector to $C(t)$ at the identity element $e$ of $G$ is $\dot {C}(0)=X$. Let the coordinates $(p,q,r)$ of $\dot C$ with respect to the basis $\{E_1, E_2, E_3\}$ are given by \begin{align}\label{5.6} p=-\epsilon \sqrt{\frac{\sqrt{a^4+b^2}-a^2}{2}}, \quad q=\sqrt{\frac{\sqrt{a^4+b^2}+a^2}{2}}, \quad r=a, \end{align} where $a, b\in \mathbb{R}$, $(a,b)\neq (0,0)$ and $\epsilon =\{{\rm sign}\, b\}=\{\pm 1\}$. It is easy to see that $g(\dot {C},\dot {C})=0$ and $\eta (\dot C)=a$. Also, having in mind that $\varphi \dot C=(-q,p,0)$, we have $g(\dot C,\varphi \dot C)=b$. Hence, $C(t)$ is a $\varphi $-slant null curve in $(G,\varphi,\xi,\eta,g)$. Furthermore, using \eqref{5.5}, one obtains \begin{align}\label{5.7} \begin{array}{lll} \nabla _{\dot C}{\dot C}=\alpha (-aqE_1+apE_2-b\xi )\\ \qquad \, \, \, =\displaystyle\alpha \sqrt{a^4+b^2}\left(\frac{-aq}{\sqrt{a^4+b^2}}E_1+\frac{ap}{\sqrt{a^4+b^2}}E_2-\frac{b}{\sqrt{a^4+b^2}}E_3\right)\\ \qquad \, \, \, =\alpha \sqrt{a^4+b^2}W_1, \end{array} \end{align} where the vector field \begin{align}\label{W_1} W_1=\left(\frac{-aq}{\sqrt{a^4+b^2}},\frac{ap}{\sqrt{a^4+b^2}},-\frac{b}{\sqrt{a^4+b^2}}\right) \end{align} is a spacelike unit. Then the unique $N_1$ corresponding to $W_1$ is given by \begin{align}\label{N_1} N_1=\left(-\frac{a^2p+2bq}{2(a^4+b^2)},\frac{-a^2q+2bp}{2(a^4+b^2)},\frac{a^3}{2(a^4+b^2)}\right). \end{align} Thus, by using \eqref{5.5}, we obtain \begin{align}\label{5.8} \begin{array}{ll} \nabla_{\dot C}N_1=\displaystyle\frac{\alpha a^2}{2\sqrt{a^4+b^2}}W_1, \\ \nabla_{\dot C}W_1=-\displaystyle\frac{\alpha a^2}{2\sqrt{a^4+b^2}}\dot C- \alpha \sqrt{a^4+b^2}N_1 . \end{array} \end{align} Comparing the equations \eqref{5.7} and \eqref{5.8} with \eqref{general Frenet eq} we get \begin{align*} h=0 , \qquad k_1=\alpha \sqrt{a^4+b^2}, \qquad k_2=\displaystyle\frac{\alpha a^2} {2\sqrt{a^4+b^2}} \end{align*} with respect to to the Frenet frame ${\bf F_1}=\{\dot C, N_1, W_1\}$.\\ Further, we find the matrix representation of $C(t)$ and ${\bf F}_1$. Let us recall that the adjoint representation $\rm Ad$ of $G$ is the following Lie group homomorphism \[ \rm {Ad} : G \longrightarrow Aut({\mathfrak{g}}). \] For $X\in {\mathfrak{g}}$, the map ${\rm {ad}}_X : {\mathfrak{g}}\longrightarrow {\mathfrak{g}}$ is defined by ${\rm {ad}}_X(Y)=[X,Y]$, where by ${\rm ad}_X$ is denoted ${\rm {ad}}(X)$. Due to the Jacobi identity, the map \[ \rm {ad} : {\mathfrak{g}} \longrightarrow End({\mathfrak{g}}) : X\longrightarrow ad_X \] is Lie algebra homomorphism, which is called adjoint representation of ${\mathfrak{g}}$. Since the set ${\rm End}({\mathfrak{g}})$ of all ${\mathbb{K}}$-linear maps from ${\mathfrak{g}}$ to ${\mathfrak{g}}$ is isomorphic to the set of all $(n\times n)$ matrices ${\rm M}(n,{\mathbb{K}})$ with entries in ${\mathbb{K}}$, $\rm {ad}$ is a matrix representation of ${\mathfrak{g}}$. We denote by $M_i$ the matrices of ${\rm ad}_{E_i}$ (i=1,2,3) with respect to the basis $\{E_1,E_2,E_3\}$ of ${\mathfrak{g}}$. Then for an arbitrary $X=x_1E_1+x_2E_2+x_3E_3$ ($x_1, x_2, x_3 \in {\mathbb{R}}$) in ${\mathfrak{g}}$ the matrix $A$ of ${\rm ad}_X$ is $A=x_1M_1+x_2M_2+x_3M_3$. Then by virtue of the well known identity $e^A={\rm {Ad}}\left(e^X\right)$ we find the matrix representation of the Lie group $G$. By using \eqref{5.1} we obtain $M_1$, $M_2$, $M_3$ and then $A$ \[ M_1=\left(\begin{array}{lll} 0 & 0 & 0 \cr 0 & 0 & \alpha \cr 0 & 0 & 0 \end{array}\right), \quad M_2=\left(\begin{array}{lcr} 0 & 0 & -\alpha \cr 0 & 0 & 0 \cr 0 & 0 & 0 \end{array}\right), \quad M_3=\left(\begin{array}{rll} 0 & \alpha & 0 \cr -\alpha & 0 & 0 \cr 0 & 0 & 0 \end{array}\right), \] \begin{align}\label{5.9} A=\left(\begin{array}{ccc} 0 & x_3\alpha & -x_2\alpha \cr -x_3\alpha & 0 & x_1\alpha \cr 0 & 0 & 0 \end{array}\right). \end{align} The characteristic polynomial of A is \[ P_A(\lambda )=-\lambda(\lambda ^2+x_3^2\alpha ^2) =0 . \] Hence for the eigenvalues $\lambda _i \, (i = 1, 2, 3)$ of $A$ we have \[ \lambda _1=0 , \quad \lambda _2=ix_3\alpha , \quad \lambda _3=-ix_3\alpha , \quad i^2=-1. \] By the assumption that $x_3\neq 0$, the eigenvectors \[ p_1=(x_1,x_2,x_3), \quad p_2=(1,i,0),\quad p_3=(i,1,0) \] corresponding to $\lambda _1, \lambda _2, \lambda _3$, respectively, are linearly independent for arbitrary $x_1$, $x_2$ and $x_3\neq 0$. For the change of basis matrix P and its inverse matrix $P^{-1}$ we get \[ P=\left(\begin{array}{rll} x_1 & 1 & i \cr x_2 & i & 1 \cr x_3 & 0 & 0 \end{array}\right) , \quad P^{-1}=\frac{1}{2x_3}\left(\begin{array}{ccc} 0 & 0 & 2 \cr x_3 & -ix_3 & -x_1+ix_2 \cr -ix_3 & x_3 & -x_2+ix_1 \end{array}\right) . \] By using that $e^A = Pe^JP^{-1}$, where $J$ is the diagonal matrix with elements $J_{ii} =\lambda _i$, we obtain the matrix representation of the Lie group $G$ in case $x_3\neq 0$ \begin{align}\label{5.10} \small G=\left\{e^A= \left(\begin{array}{ccc} \cos \alpha x_3 & \sin \alpha x_3 & \frac{x_1}{x_3}(1-\cos \alpha x_3)-\frac{x_2}{x_3}\sin \alpha x_3 \cr \cr -\sin \alpha x_3 & \cos \alpha x_3 & \frac{x_2}{x_3}(1-\cos \alpha x_3)+\frac{x_1}{x_3}\sin \alpha x_3 \cr \cr 0 & 0 & 1 \end{array}\right) \right\} . \end{align} The coordinates of the vector field $t\dot C\in {\mathfrak{g}}$, $t\in \mathbb{R}$, are $(tp,tq,ta)$, where $p,q$ are given by \eqref{5.6} and $a\neq 0$. Since ${\rm Ad}(C(t))={\rm Ad}\left(e^{t\dot c}\right)$, we find ${\rm Ad}(C(t))$ replacing $x_1$, $x_2$ and $x_3$ in \eqref{5.10} with $tp$, $tq$ and $ta$, respectively. Thus, for the matrix representation of a $\varphi $-slant null curve $C(t)$, which is not a Legendre curve, we have \begin{align*} {\rm Ad}(C(t))= \left(\begin{array}{ccc} \cos \alpha at & \sin \alpha at & \frac{p}{a}(1-\cos \alpha at)-\frac{q}{a}\sin \alpha at \cr \cr -\sin \alpha at & \cos \alpha at & \frac{q}{a}(1-\cos \alpha at)+\frac{p}{a}\sin \alpha at \cr \cr 0 & 0 & 1 \end{array}\right) . \end{align*} Finally, we may obtain the matrix representations of $\dot C$, $W_1$ and $N_1$ replacing $x_1$, $x_2$ and $x_3$ in \eqref{5.9} with their coordinates, determined by \eqref{5.6}, \eqref{W_1} and \eqref{N_1}, respectively. \end{document}
\begin{document} \title{Semi-regular varieties and variational Hodge conjecture} \selectlanguage{french} \begin{abstract} D'apr\`{e}s \cite{b1,fl} nous savons que sous-vari\'{e}t\'{e}s semi-r\'{e}guli\`{e}rs satisfaisent la conjecture de Hodge variationnelle, c'est-\`{a}-dire, donn\'{e} une famille de vari\'{e}t\'{e}s projectives, lisses $ \pi: \mc{X} \to B $, une fibre sp\'{e}ciale $ \mc{X}_o$ et un semi-r\'{e}guli\`{e}re sous-vari\'{e}t\'{e} $ Z \subset \mc{X}_o$, la classe de cohomologie correspondant \`{a} $ Z $ reste une classe Hodge (comme $ \mc{X}_o $ d\'{e}forme le long $B$) si et seulement si $ Z $ reste un le cycle alg\'{e}brique. Dans cet article, nous \'{e}tudions des exemples de tels sous-vari\'{e}t\'{e}s. En particulier, nous prouvons que toute lisse vari\'{e}t\'{e} projective $ Z $ de dimension $ n $ est une sous-vari\'{e}t\'{e} semi-r\'{e}guli\`{e}re d'une hypersurface projective lisse dans $ \mb{P}^{2n + 1} $ du grand degr\'{e} suffisant. \marginpar{\bf{e}}nd{abstract} \selectlanguage{english} \begin{abstract} Following \cite{b1, fl} we know that semi-regular sub-varieties satisfy the variational Hodge conjecture i.e., given a family of smooth projective varieties $\pi:\mc{X} \to B$, a special fiber $\mc{X}_o$ and a semi-regular subvariety $Z \subset \mc{X}_o$, the cohomology class corresponding to $Z$ remains a Hodge class (as $\mc{X}_o$ deforms along $B$) if and only if $Z$ remains an algebraic cycle. In this article, we investigate examples of such sub-varieties. In particular, we prove that any smooth projective variety $Z$ of dimension $n$ is a semi-regular sub-variety of a smooth projective hypersurface in $\mb{P}^{2n+1}$ of large enough degree. \marginpar{\bf{e}}nd{abstract} \section{Introduction} The aim of this article is to study examples of semi-regular varieties. The semi-regularity for a curve on a surface was first introduced by \cite{sev}. This was later generalized to arbitrary divisors on a complex manifold by Kodaira-Spencer in \cite{kod1}. In \cite{b1}, Bloch extended the notion to cycles corresponding to local complete intersection subschemes. This was further generalized by Buchweitz and Flenner in \cite{fl}. One of the motivations for the study of semi-regular varieties comes from the variational Hodge conjecture, namely these varieties satisfy the variational Hodge conjecture. In particular, Bloch in \cite{b1} and Buchweitz and Flenner in \cite{fl} noticed that for a smooth projective variety $X$ and a semi-regular local complete intersection subscheme $Z$ in $X$, any infinitesimal deformation of $X$ lifts the cohomology class of $Z$ (which is a Hodge class) to a Hodge class if and only if $Z$ lifts to a local complete intersection subscheme (in the deformed scheme). In the case of a smooth hypersurface $X$ in $\p3$, an effective divisor $C$ in $X$ is said to be \marginpar{\bf{e}}mph{semi-regular} if $h^1(\mo_X(C))=0$. If $C$ is smooth and $\deg(X)>\deg(C)+4$ then Serre duality implies that $h^1(\mo_X(C))=h^1(\mo_X(-C)(d-4))$ which is equal to zero because the Castelnuovo-Mumford regularity of $C$ is at most $\deg(C)$. But the description of the semi-regularity for subschemes which are not divisors is more complicated, as we see below in \S \ref{sem4}. The main result of this article generalizes the above case of divisors to higher codimension subvarieties (see \S \ref{sem7}). In particular, we prove \begin{thm}\label{sem1} Let $Z$ be a smooth subscheme in $\pn$ of codimension $n+1$. Then for $d \gg 0$, there exists a smooth degree $d$ hypersurface in $\pn$ containing $Z$ such that $Z$ is semi-regular in $X$. \marginpar{\bf{e}}nd{thm} We finally observe in Remark \ref{sem3} that for such a choice of $Z$ and $X$, the cohomology class of $Z$ in $H^{n,n}(X,\mb{Z})$ satisfies the variational Hodge conjecture for a family of degree $d$ hypersurfaces in $\pn$ with a special fiber $X$. \section{Bloch's Semi-regularity map}\label{sem4} \begin{para}\label{ph08} In \cite{b1}, Bloch generalizes the above definition of semi-regularity for divisors to any local complete intersection subscheme in a smooth projective variety over an algebraically closed field. We briefly recall the definition. Let $X$ be a smooth projective variety of dimension $n$ and $Z$ be a local complete intersection subscheme in $X$ of codimension $q$. Consider the composition morphism \[\Omega_X^{n-q+1} \times \bigwedge^{q-1} \N_{Z|X}^\vee \xrightarrow{1 \times \bigwedge^{q-1}\bar{d}} \Omega_X^{n-q+1} \times \Omega_X^{q-1} \otimes \mo_Z \xrightarrow{\bigwedge} K_X \otimes \mo_Z\] where \[\bar{d}:\N_{Z|X}^\vee \cong \I_{Z|X}/\I_{Z|X}^2 \to \Omega^1_X \otimes \mo_Z\]is the map induced by the differential $d:\I_{Z|X} \to \Omega^1_X$, with $\I_{Z|X}$ denoting the ideal sheaf of $Z$ in $X$. By adjunction, this induces a map, \[\Omega_X^{n-q+1} \to \bigwedge^{q-1}\N_{Z|X} \otimes K_X \cong \N_{Z|X}^\vee \otimes K_Z^0,\]where $K_Z^0:=\bigwedge^{q}\N_{Z|X} \otimes K_X$ is the \marginpar{\bf{e}}mph{dualizing sheaf}. Dualizing the induced map in cohomology, \[H^{n-q-1}(X,\Omega^{n-q+1}) \to H^{n-q-1}(Z,\N_{Z|X}^\vee \otimes K_Z^0), \mbox{ gives us } \pi:H^1(\N_{Z|X}) \to H^{q+1}(X,\Omega_X^{q-1}).\] \marginpar{\bf{e}}nd{para} \begin{defi} The map $\pi$ is called the \marginpar{\bf{e}}mph{semi-regularity map} and if it is injective we say that $Z$ is \marginpar{\bf{e}}mph{semi-regular}. \marginpar{\bf{e}}nd{defi} \section{Proof of Theorem \ref{sem1} and an application}\label{sem7} \begin{para} Before we come to the final result of this article we recall a result by Kleiman and Altman which tells us given a smooth subscheme in $\pn$ of codimension $n+1$ there exist a \marginpar{\bf{e}}mph{smooth} hypersurface in $\pn$ containing it. \marginpar{\bf{e}}nd{para} \begin{note} Let $Z$ be a projective subscheme in $\pn$. Denote by \[Z_e:=\{z \in Z| \dim \Omega^1_{Z,z}=e\}.\] \begin{thm}[{\cite[Theorem $7$]{kleim}}]\label{exi1} If for any $e>0$ such that $Z_e \not= \marginpar{\bf{e}}mptyset$ we have that $\dim Z_e+e$ is less than $2n+1$ then there exists a smooth hypersurface in $\pn$ containing $Z$. Moreover, if $Z$ is $d-1$-regular (in the sense of Castelnuovo-Mumford) then there exists a smooth degree $d$ such hypersurface containing $Z$. \marginpar{\bf{e}}nd{thm} \marginpar{\bf{e}}nd{note} We need the following proposition: \begin{prop}\label{sem2} Let $Z$ be a smooth subscheme in $\pn$ of codimension $n+1$ and $X$ be a smooth degree $d$ hypersurface in $\pn$ containing $Z$ for some $d \gg 0$. Then, for any integers $2 \le i <n$, $h^n\left(\bigwedge^{i-1}\T_Z \otimes \bigwedge^{n-i}\N_{Z|X}(d-4)\right)=0$. \marginpar{\bf{e}}nd{prop} \begin{proof} Since $X$ is a hypersurface in $\pn$, $\N_{X|\pn}$ is isomorphic to $\mo_X(d)$. Under this identification, we get the following normal short exact sequence, \[0 \to \N_{Z|X} \to \N_{Z|\pn} \to \mo_Z(d) \to 0.\] This gives rise to the following short exact sequence for $0 \le i \le n$: \[0 \to \bigwedge^{n-i}N_{Z|X} \to \bigwedge^{n-i}\N_{Z|\pn} \to \left( \bigwedge^{n-i-1}\N_{Z|X} \right) \otimes \mo_Z(d) \to 0.\] Denote by $\mc{F}_{j,k}:=\bigwedge^{j}\T_Z \otimes \mo_X(k)$ for some $j,k \in \mb{Z}_{\ge 0}$. Since $Z$ and $X$ are smooth, $\mc{F}_{j,k}$ is $\mo_Z$-locally free and hence $\mo_Z$-flat. Tensoring the previous short exact sequence by $\mc{F}_{j,k}$ then gives us the following short exact sequence, \[0 \to \mc{F}_{j,k} \otimes \bigwedge^{n-i}\N_{Z|X} \to \mc{F}_{j,k} \otimes \bigwedge^{n-i}\N_{Z|\pn} \to \mc{F}_{j,k} \otimes \bigwedge^{n-i-1}\N_{Z|X}(d) \to 0.\] By Serre's vanishing theorem, for $d \gg 0, l>0$ and $m \ge 1$, $H^m\left(\mc{F}_{j,ld-4} \otimes \bigwedge^{n-i}\N_{Z|\pn}\right)=0$, hence \begin{equation}\label{sem5} H^m\left(\mc{F}_{j,ld-4} \otimes \bigwedge^{n-i-1}\N_{Z|X}(d)\right) \cong H^{m+1}\left(\mc{F}_{j,ld-4} \otimes \bigwedge^{n-i}\N_{Z|X}\right). \marginpar{\bf{e}}nd{equation} Using Serre's vanishing theorem again for $d \gg 0$ and $i \ge 1$, $h^i\left(\bigwedge^{i-1}\T_Z((n-i+1)d-4)\right)=0$. Hence, using the isomorphism (\ref{sem5}) recursively, we get for $j=i-1$, \[h^n\left(\bigwedge^{i-1} \T_Z \otimes \bigwedge^{n-i}\N_{Z|X}(d-4)\right)=h^{n-1}\left(\bigwedge^{i-1} \T_Z \otimes \bigwedge^{n-i-1}\N_{Z|X}(2d-4)\right)=...\]\[...=h^i\left(\bigwedge^{i-1} \T_Z((n-i+1)d-4)\right)=0.\] This proves the proposition. \marginpar{\bf{e}}nd{proof} \begin{proof}[Proof of Theorem \ref{sem1}] The existence of a smooth hypersurface in $\pn$ containing $Z$ for $d \gg 0$ follows from Theorem \ref{exi1}. It suffices to prove that there exists a hypersurface $X$ in $\pn$ of degree $d \gg 0$ containing $Z$ such that the morphism from $H^{n-1}(\Omega_X^{n+1} \otimes \mo_Z)$ to $H^{n-1}(\N_{Z|X}^{\vee} \otimes \bigwedge^n \N_{Z|X} \otimes K_X)$, which is the dual to the semi-regularity map $\pi$ (see \ref{ph08}), is surjective. Consider the short exact sequence, \[0 \to \T_Z \to \T_X \otimes \mo_Z \to \N_{Z|X} \to 0.\] Consider the associated filtration, \[0=F^n \subset F^{n-1} \subset ... \subset F^0=\bigwedge^{n-1}(\T_X \otimes \mo_Z) \mbox{ satisfying } F^p/F^{p+1} \cong \bigwedge^p \T_Z \otimes \bigwedge^{n-1-p} \N_{Z|X} \] for all $p$. Taking $p=0$ we get the following short exact sequence \[0 \to F^1 \to \bigwedge^{n-1}(\T_X \otimes \mo_Z) \to \bigwedge^{n-1}\N_{Z|X} \to 0.\] Tensoring this by $K_X$ and looking at the associated long exact sequence, we get \[ ...\to H^{n-1}(\Omega_X^{n+1} \otimes \mo_Z) \to H^{n-1}(\N_{Z|X}^{\vee} \otimes \bigwedge^n \N_{Z|X} \otimes K_X) \to H^n(F^1(d-4)) \to ...\] It therefore suffices to prove that $h^n(F^1(d-4))=0$. We claim that it is sufficient to prove $h^n(F^{n-1}(d-4))=0$. Indeed, suppose $h^n(F^{n-1}(d-4))=0$. By Proposition \ref{sem2}, for any integer $2 \le i \le n-1$, we have \[ h^n\left(\bigwedge^{i-1} \T_Z \otimes \bigwedge^{n-i} \N_{Z|X}(d-4)\right)=0.\] Consider the following short exact sequence, where $2 \le p \le n-1$, \begin{equation}\label{sem8} 0 \to F^p \to F^{p-1} \to \bigwedge^{p-1} \T_Z \otimes \bigwedge^{n-p} \N_{Z|X} \to 0 \marginpar{\bf{e}}nd{equation} Tensoring (\ref{sem8}) by $K_X \cong \mo_X(d-4)$ and considering the corresponding long exact sequence, we can conclude $h^n(F^{n-2}(d-4))=0$ (substitute $p=n-1$). Recursively substituting $p=n-2, n-3,...,2$ in (\ref{sem8}), we observe that $h^n(F^i(d-4))=0$ for $i=1,...,n-2$. In particular $h^n(F^1(d-4))=0$. Hence, it suffices to prove $h^n(F^{n-1}(d-4))=0$. Note that, $F^{n-1} \cong \bigwedge^{n-1}\T_Z$ does not depend on the choice of $X$, hence independent of $d$. Therefore, by Serre's vanishing theorem, $h^n(F^{n-1}(d-4))=0$ for $d \gg 0$. This completes the proof of the theorem. \marginpar{\bf{e}}nd{proof} \begin{rem}\label{sem3} Notations as in Theorem \ref{sem1}. We now note that the theorem implies a very special case of the variational Hodge conjecture. Indeed, consider a family $\pi:\mc{X} \to S$ of smooth degree $d$ hypersurfaces in $\mb{P}^{2n+1}$ with $X$ as a special fiber. Denote by $\gamma$ the cohomology class of $Z$ in $X$. Then, using \cite[Theorem $7.1$]{b1} notice that $\gamma$ remains a Hodge class if and only if $Z$ remains an algebraic variety as $X$ deforms along $S$. \marginpar{\bf{e}}nd{rem} Humboldt Universit\"{a}t Zu Berlin, Institut f\"{u}r Mathematik, Unter den Linden $6$, Berlin $10099$, Germany,\\ E-mail address: [email protected]\\ Freie Universit√§t Berlin, FB Mathematik und Informatik, Arnimallee 3, 14195 Berlin, Germany.\\ E-mail address: [email protected] \marginpar{\bf{e}}nd{document}
\begin{document} \title{{ {Bilateral Shorted Operators and Parallel Sums}} \footnote{{\bf Keywords:} Schur complements, shorted operators, parallel sum, parallel substraction and minus order.} \footnote{{\bf 2000 AMS Subject Classification:} 47A64. } } \author {Jorge Antezana, Gustavo Corach and Demetrio Stojanoff \thanks{Partially supported by UBACYT I030, ANPCYT PICT 03-09521, PIP 2188/00 and UNLP 11/X350.} \footnote{E-mail addresses: [email protected], [email protected] and [email protected]}} \maketitle \vglue.3truecm \fontsize {10}{8}\selectfont \centerline{ {\bf Jorge Antezana and Demetrio Stojanoff }} \centerline{ Depto. de Matem\'atica, FCE-UNLP, La Plata, Argentina and IAM-CONICET. } \centerline{{\bf Gustavo Corach }} \centerline{ Depto. de Matem\'atica, FI-UBA and IAM-CONICET,} \centerline{ Saavedra 15, Piso 3 (1083), Buenos Aires, Argentina.} \fontsize {12}{14}\selectfont \vglue.5truecm \begin{abstract} In this paper we study shorted operators relative to two different subspaces, for bounded operators on infinite dimensional Hilbert spaces. We define two notions of ``complementability" in the sense of Ando for operators, and study the properties of the shorted operators when they can be defined. We use these facts in order to define and study the notions of parallel sum and substraction, in this Hilbertian context. \end{abstract} \section{Introduction} This paper is devoted to generalize two operations, coming from electrical network theory: parallel sum of matrices and shorting of matrices. In \cite{andersonduf}, W. N. Anderson Jr. and R. J. Duffin defined , for positive (semidefinite) matrices $A$ and $B$ the parallel sum $A\sump B= A(A+B)^\dagger B$. The motivation for studying this operation, and its name, come from the following fact: if two resistive n-port networks, with impedance matrices $A$ and $B$, are connected in parallel, then $A\sump B$ is the impedance matrix of the parallel connection. It should be mentioned that the impedance matrix of a resistive n-port network is a positive (semidefinite) $n\times n$ matrix. On the other side, in \cite{anderson} Anderson defined, for a positive $n\times n$ matrix $A$ and a subspace $\mathcal{S}$ of $\mathbb{C}^n$, the shorted matrix of $A$ by $\mathcal{S}$. Just to give an idea about $A_{/\mathcal{S}}$, suppose that $A$ has the block form $\begin{pmatrix}A_{11}& A_{12}\\ A_{21} & A_{22}\end{pmatrix}$ where $A_{11}$ is a $k\times k$ block and $A_{22}$ is an $(n-k)\times(n-k)$ block. If $\mathcal{S}$ is the subspace spanned by the first $k$ canonical vectors, then \[ A_{/\mathcal{S}}=\begin{pmatrix} A_{11}-A_{12} A_{22}^{\dagger} A_{21} & 0\\ 0 & 0 \end{pmatrix} \] where ${\dagger}$ denotes the Moore-Penrose inverse. (Some authors define $A_{/\mathcal{S}}$ as a linear transformation $\mathcal{S}\to\mathcal{S}$ avoiding the zeroes above). The name shorted comes from the fact that it gives the joint impedance of a resistive n-port, some of whose parts have been short circuited. Here $A$ is the impedance matrix of the original network and $A_{/\mathcal{S}}$ is the impedance matrix of the network after the short circuits. Both operations have been studied in Hilbert spaces context (see the historical notes below). One of the goals of this paper is to extend the shorting operation to bounded linear operator between two different Hilbert spaces, given a closed subspace on each one. The solution we get, which we call the bilateral shorted operator, comes from a notion of weak complementability, which is a refinement of a finite dimensional notion due to T. Ando \cite{Ando} and generalized by D. Carlson and E. V. Haynworth \cite{[CaHayn]}. The bilateral shorted operator has been studied in finite dimensions by S. K. Mitra and M. L. Puri \cite{[MP]} (see also the papers by H. Goller \cite{Goller} and Mitra and Prasad \cite{[Mitra1]}, who refined some results of \cite{[MP]}). However, their methods strongly depend on the existence of generalized inverses, so they can not be used for operators with non closed range. The second goal is to extend parallel summability for two bounded linear operators between different Hilbert spaces. It should be mentioned that C. R. Rao and S. K. Mitra \cite{[MR]}, and Mitra and K. M. Prasad \cite{[Mitra1]} have studied this extension in finite dimensional spaces. Again, generalized inverses are the main tool they use. In order to avoid generalized inverses, we frequently use what we call hereafter Douglas theorem, an extremely useful result due to R. G. Douglas \cite{douglas}, which we describe after fixing some notations. In these notes, $\mathcal{M}_nhcal{H}uno$ and $\mathcal{M}_nhcal{H}dos$ denote Hilbert spaces, $L(\mathcal{M}_nhcal{H})unodos$ is the space of all bounded linear operators between $\mathcal{M}_nhcal{H}uno$ and $\mathcal{M}_nhcal{H}dos$, we write $L(\mathcal{M}_nhcal{H}_i)=L(\mathcal{M}_nhcal{H}_i,\mathcal{M}_nhcal{H}_i)$ and $L(\mathcal{M}_nhcal{H})^+uno$ (resp. $L(\mathcal{M}_nhcal{H})^+dos$) the cone of all positive operators on $\mathcal{M}_nhcal{H}uno$ (resp. $\mathcal{M}_nhcal{H}dos$.). Recall that $C\inL(\mathcal{M}_nhcal{H})$ is called positive if $\pint{Cx,\ x}\geq 0$ for every $ x\in\mathcal{M}_nhcal{H}$. For every $C\inL(\mathcal{M}_nhcal{H})unodos$ its range is denoted by $R(C)$, its nullspace by $N(C)$. Given two selfadjoint operators $A,B \in L(\mathcal{M}_nhcal{H})$, $A\leq B$ means that $B-A\inL(\mathcal{M}_nhcal{H})^+$ (this is called the usual or L\"owner order). A projection is an idempotent (bounded linear) operator. Given a closed subspace $\mathcal{S}\subseteq\mathcal{M}_nhcal{H}_1$, by $P_\mathcal{S} \in L(\mathcal{M}_nhcal{H})uno$ is denoted the orthogonal projection onto $\mathcal{S}$. Douglas theorem states that given $A\in L(\mathcal{M}_nhcal{H})unodos$ and $B \in L( \mathcal{M}_nhcal{H}_3 ,\mathcal{M}_nhcal{H}dos )$, the following conditions are equivalent: $$ \mbox{1. $R(B) \subseteq R(A)$,}\ \ \mbox{2. $\exists\ \lambda\geq 0:$ $BB^* \leq \lambdambda \ AA^*$}\ \ \mbox{and 3. $\exists\ D \in L( \mathcal{M}_nhcal{H}_3 ,\mathcal{M}_nhcal{H}uno ):$ $B = AD$.} $$ With the additional condition $ R(D) \subseteq N(A)^\perp$, $D$ is unique and it is called the \textbf{reduced solution} of the equation $AX=B$; it holds that $\|D\|^2 = \inf \big\{\lambdambda \in \mathcal{M}_nhbb {R}\ : \ BB^* \le \lambdambda \ AA^* \big\} $ and $N(D) = N(B)$. \noindent We shall use the fact that each pair of closed subspaces $\mathcal{S}\subseteq\mathcal{M}_nhcal{H}_1$ and $\mathcal{T}\subseteq\mathcal{M}_nhcal{H}_2$ induces a representation of elements of $L(\mathcal{M}_nhcal{H})unodos$ by $2\times 2$ block matrices. In this sense, we identify each $A\inL(\mathcal{M}_nhcal{H})unodos$ with a $2\times 2$ matrix, let us say \begin{equation}\lambdabel{matrix} A = \begin{array}{c} \mathcal{T} \\ \mathcal{T}^\bot \end{array} \begin{pmatrix} A_{11} & A_{12} \\ A_{21} & A_{22} \end{pmatrix} \begin{array}{c} \mathcal{S} \\ \mathcal{S}^\bot \end{array}, \end{equation} where $A_{11}=\left.P_\mathcal{T} A\right|_\mathcal{S} \in L(\mathcal{S}, \mathcal{T})$, $A_{12}=\left.P_\mathcal{T} A\right|_{\mathcal{S} ^\bot} $, $A_{21}=\left.P_{\mathcal{T}^\bot} A\right|_\mathcal{S}\ $ and $\ A_{22}=\left.P_{\mathcal{T}^\bot} A\right|_{\mathcal{S}^\bot}$. \noindent {\bf Historical survey:} In 1947, M.G. Krein \cite{[K]} proved the existence of a maximum (with respect to the usual order) of the set $ \mathcal{M}(A,\mathcal{S})=\{C\inL(\mathcal{M}_nhcal{H})^+: C\leq A\ , \ R(C)\subseteq \mathcal{S}\}. $ Krein used this extremal operator in his theory of extension of symmetric operators. See the paper by Yu. L. Smul'jian \cite{Smul} for more results in similar directions. Many years later, W. N. Anderson Jr. \cite{anderson} rediscovered, for finite dimensional spaces, the existence of the maximum which will be denoted $A_{/\mathcal{S}}$ and called the shorted operator of $A$ by $\mathcal{S}$. Some time before, W. N. Anderson and R. J. Duffin \cite{andersonduf} had developed the binary matrix operation called parallel sum: if $A, B\in L(\mathcal{M}_nhbb{C}^n)^+$ the parallel sum $A\sump B$ is defined by the formula \[ A\sump B=A(A+B)^\dagger B. \] P. Fillmore and J. P. Williams \cite{fill} defined the parallel sum of positive (bounded linear) operators on a Hilbert space $\mathcal{M}_nhcal{H}$ and extended many of Anderson-Duffin's results. It should be mentioned that their definition of parallel sum is based on certain Douglas reduced solutions. Anderson and G. E. Trapp \cite{andtrapp} defined $A_{/\mathcal{S}}$ for a positive operator $A$ on $\mathcal{M}_nhcal{H}$ and a closed subspace $\mathcal{S}$ of $\mathcal{M}_nhcal{H}$, and proved that $A_{/\mathcal{S}}$ can be defined by means of parallel sums, and conversely: if $P$ is the orthogonal projection onto $\mathcal{S}$, then $A\sump nP$ converges to $A_{/\mathcal{S}}$ in the operator uniform norm; and for $A,B\inL(\mathcal{M}_nhcal{H})^+$, $A\sump B$ can be defined as the shorted operator of $\begin{pmatrix}A&A\\ A&A+B\end{pmatrix}\in L(\mathcal{M}_nhcal{H}L(\mathcal{M}_nhcal{H})lus \mathcal{M}_nhcal{H})^+$ by the subspace $\mathcal{M}_nhcal{H}L(\mathcal{M}_nhcal{H})lus \{0\}$. This is the approach we shall use here. The shorting of an operator is one of the manifestations of the Schur complement: if $M$ is a square matrix with block form $$ M=\begin{pmatrix} A&B\\ C&D\end{pmatrix}, $$ where $A$ and $D$ are also square blocks and $D$ is invertible, the classical Schur complement of $D$ in $M$ is $A-BD^{-1}C$ (see \cite{[Ca]}, \cite{Co} and \cite{[omelet]} for many results, applications and generalizations of this notion). T. Ando \cite{Ando} proposed a generalization of Schur complements which is closer to the idea of the shorted operators. If $A$ is a $n\times n$ complex matrix and $\mathcal{S}$ is a subspace of $\mathbb{C}^n$, $A$ is called \textbf{$\mathcal{S}$-complementable} if there are matrices $M_r$ and $M_l$ such that $PM_r=M_r$, $M_lP=M_l$, $PAM_r=PA$ and $M_lAP=AP$. (Here $P$ is the orthogonal projection onto $\mathcal{S}$). It holds $AM_r=M_l A M_r= M_l A$ and $AM_r$ does not depends on the particular choice of $M_r$ and $M_l$; Ando calls $A_{\mathcal{S}}=AM_r$ the Schur compression and $A_{/\mathcal{S}}=A-A_{\mathcal{S}}=A-AM_r$ the Schur complement of $A$ with respect to $\mathcal{S}$. He observes that, if $A$ is a positive $n\times n$ matrix and $\mathcal{S}$ is the subspace generated by $n-k$ last canonical vectors, then $A_{/\mathcal{S}}$ has the block form $$ \begin{pmatrix} A-BD^\dagger C &0\\ 0&0\end{pmatrix}, $$ and therefore, his definition extends the classical Schur complement. D Carlson and E. V. Haynworth \cite{[CaHayn]} observe that a similar construction could be done starting with $A\in C^{n\times m}$ and subspaces $\mathcal{S}\in\mathbb{C}^n$ and $\mathcal{T}\in\mathbb{C}^m$. They defined and studied the notion of operators which are complementable with respect to a pair $(\mathcal{S},\mathcal{T})$. As Anderson and Duffin remarked in \cite{andersonduf}, the impedance matrix is positive only for resistive networks. In order to study networks with reactive elements, parallel summation and shorting must be extended to not necessarily positive matrices and operators. C. R. Rao and S. K. Mitra \cite{[MR]} defined and studied parallel sums of $m\times n$ matrices and Mitra \cite{[MP]} used their results to define a sort of bilateral shorted operator by two subspaces, one in $\mathbb{C}^n$ and the other in $\mathbb{C}^m$. A common feature in both extension is the use of generalized inverses. It should be mentioned that these constructions can be applied to linear regression problems as in \cite{[MP]}, \cite{[MPut]}, \cite[Appendix]{[Mitra1]}. \noindent We summarize the contents of this paper. In section 3 we study notion of {\it complementability\rm} in infinite dimensional Hilbert spaces and we define the concept of {\it weakly complementability \rm } (see Definition \ref{definicion de la debil}). We also prove in this section the basic properties of (weakly or not) complementable triples and we show some criteria for each kind of complementability. In section 4, under some compatibility conditions between the operator $A$ and the subspaces $\mathcal{S}$ and $\mathcal{T}$, we define a bilateral shorted operator $\short{A}{\mathcal{S}}{\mathcal{T}}\inL(\mathcal{M}_nhcal{H})unodos$, and we study the usual properties of a shorting operation. As Mitra \cite{[Mitraminus]} proved for finite dimensional spaces, we show that $\short{A}{\mathcal{S}}{\mathcal{T}}$ is the maximum of a certain set for a situable order (the so called minus order) in $L(\mathcal{M}_nhcal{H})unodos$. The rest of the paper is devoted the notions of parallel addition and substraction of operators and their relationship with the shorted operator. The parallel addition is defined by means of the following device, due to Anderson and Trapp: given $A,B\inL(\mathcal{M}_nhcal{H})unodos$, we say that $A$ and $B$ are \textbf{weakly parallel summable (resp. parallel summable)} if the triple $\begin{pmatrix} A & A \\ A & A+B \end{pmatrix} \in L\big(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus \mathcal{M}_nhcal{H}uno , \mathcal{M}_nhcal{H}dos L(\mathcal{M}_nhcal{H})lus \mathcal{M}_nhcal{H}dos \big)$, $\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus \{0\}$, $\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus \{0\}$ is weakly complementable (resp. complementable). In this case we define the \textbf{parallel sum} of $A$ and $B$, denoted by $A\sump B\inL(\mathcal{M}_nhcal{H})unodos$, as follows: \[ \begin{pmatrix} A\sump B& 0 \\ 0 & 0 \end{pmatrix}=\left. \begin{pmatrix} A & A \\ A & A+B \end{pmatrix}\right/ \begin{array} {r} \\ {(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus \{0\},\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus \{0\})} \end{array} . \] We study the properties of this operator. Again, under the hypothesis of summability, all properties of the finite dimensional case are recovered in our context. In section 5 we define the notion of parallel substraction, we give some conditions which assures its existence and prove some of its properties. In section 6, we extend to the bilateral case some well known formulae for the shorted operator in terms of parallel sums and substractions showing that, as for positive operators, parallel and shorting operations can be defined one in terms of the other. \section{Preliminaries} We need the following two definitions of angles between subspaces in a Hilbert space; they are due, respectively, to Friedrichs and Dixmier (see \cite{[Di]} and \cite {[Fr]}, and the excellent survey by Deutsch \cite {[De]}). \begin{fed}\rm Given two closed subspaces $\mathcal{M}$ and $\mathcal{N}$, the {\it Friedrichs angle} between $\mathcal{M}$ and $\mathcal{N}$ is the angle in $[0,\pi/2]$ whose cosine is defined by \[ \angf{\mathcal{M}}{\mathcal{N}}=\sup\Big\{\,|\pint{x , \, y}|:\; x \in \mathcal{M}\ominus (\mathcal{M}\cap \mathcal{N}), \; y \in \mathcal{N}\ominus (\mathcal{M}\cap \mathcal{N})\;\mbox{and}\;\|x\|=\|y\|=1 \Big\}. \] The \textit{Dixmier angle} between $\mathcal{M}$ and $\mathcal{N}$ is the angle in $[0,\pi/2]$ whose cosine is defined by \[ \angd{\mathcal{M}}{\mathcal{N}}=\sup\Big\{\,|\pint{x , \, y}|: \; x\in \mathcal{M}, \;y\in \mathcal{N}\;\mbox{and}\;\|x\|=\|y\|=1 \Big\}. \] \end{fed} \noindent The next proposition collects the results on angles which are relevant to our work. \begin{pro}\lambdabel{propiedades elementales de los angulos} \begin{enumerate} \item [\rm 1. ] Let $\mathcal{M}$ and $\mathcal{N}$ be to closed subspaces of $\mathcal{M}_nhcal{H}$. Then \begin{enumerate} \item [\rm a. ] $\angf{\mathcal{M}}{\mathcal{N}}= \angf{\mathcal{M}^\bot}{\mathcal{N}^\bot}$ \item [\rm b. ] $\angf{\mathcal{M}}{\mathcal{N}}<1$ if and only if $\mathcal{M}+\mathcal{N}$ is closed. \item [\rm c. ] $\mathcal{M}_nhcal{H}=\mathcal{M}^\bot+\mathcal{N}^\bot$ if and only if \ $\angd{\mathcal{M}}{\mathcal{N}}<1$. \end{enumerate} \item [\rm 2. ] (Bouldin \cite{[Bo]}) Given $B\inL(\mathcal{M}_nhcal{H})unodos $ and $A\in L(\mathcal{M}_nhcal{H}dos , \mathcal{M}_nhcal{H}_3 )$ with closed range, then $R(AB)$ is closed if and only if $\angf{R(B)}{N(A)}<1 $. \end{enumerate} \end{pro} \section{Complementable operators} In this section we study complementable operators. We recall different characterizations of this notion, their extensions to infinite dimensional Hilbert spaces, and the relationships among them. The next definition, due to Carlson and Haynsworth \cite{[CaHayn]}, is an extension of Ando's generalized Schur complement \cite{Ando}. \begin{fed}\lambdabel{definicion de complementable}\rm Given two projections $P_r\inL(\mathcal{M}_nhcal{H})uno$ and $P_l\inL(\mathcal{M}_nhcal{H})dos$, an operator $A\inL(\mathcal{M}_nhcal{H})unodos$ is called $(P_r,P_l)$-complementable if there exist operators $M_r\inL(\mathcal{M}_nhcal{H})uno$ and $M_l\inL(\mathcal{M}_nhcal{H})dos$ such that \begin{enumerate} \item $(I-P_r)M_r=M_r\,$, \quad $(I-P_l)AM_r=(I-P_l)A$, \item $(I-P_l)M_l=M_l$ \quad and \quad $M_lA(I-P_r)=A(I-P_r)$. \end{enumerate} \end{fed} \noindent We shall prove later that this notion only depends on the images of $P_r$ and $P_l$. ike in the finite dimensional case, we have the following alternative characterization of complementability. We use freely matrix decompositions like \eqref{matrix}. \begin{pro}\lambdabel{las tres equivalencias} Let $P_r\inL(\mathcal{M}_nhcal{H})uno$ and $P_l\inL(\mathcal{M}_nhcal{H})dos$ be two projections whose ranges are $\mathcal{S}$ and $\mathcal{T}$ respectively. Given $A\inL(\mathcal{M}_nhcal{H})unodos$, the following statements are equivalent: \begin{enumerate} \item [\rm 1.] $A$ is $(P_r,P_l)$-complementable. \item [\rm 2.] $R(A_{21})\subseteq R(A_{22})$ and $R(A_{12}^*)\subseteq R(A_{22}^*)$. \item [\rm 3.] There exist two projections $\widehat{P}\in L(\mathcal{M}_nhcal{H})uno$ and $\widehat{Q}\in L(\mathcal{M}_nhcal{H})dos$ such that: \begin{align}\lambdabel{ayb} R(\widehat{P}^{\,*})=\mathcal{S}&& R(\widehat{Q})=\mathcal{T}&& R(A\widehat{P})\subseteq \mathcal{T}&& \mbox{and} && R((\widehat{Q}A)^*)\subseteq \mathcal{S}. \end{align} \end{enumerate} \end{pro} \proof $1\mathbb{R}ightarrow 2$: By definition \ref{definicion de complementable} it holds $ M_r= \begin{array}{c} \mathcal{T} \\ \mathcal{T}^\bot \end{array} \begin{pmatrix} \ 0 & 0 \ \\ \ C & D \ \end{pmatrix} \begin{array}{c} \mathcal{S} \\ \mathcal{S}^\bot \end{array}, $ and $A_{21}=A_{22}C$. Hence $R(A_{21})\subseteq R(A_{22})\,$. Similar arguments show that $R(A_{12}^*)\subseteq R(A_{22}^*)$. \noindent $2\mathbb{R}ightarrow 3$: Let $E$ and $F$ be the reduced solutions of $A_{21}=A_{22}X$ and $A_{12}^*=A_{22}^*X$, respectively. Note that $E \in L(\mathcal{S} , \mathcal{S} ^\perp )$ and $F \in L(\mathcal{T}^\perp , \mathcal{T} )$. If \begin{align*} \widehat{P}=\begin{pmatrix} I & 0 \\ -E & 0 \end{pmatrix}\begin{array}{c} \mathcal{S} \\ \mathcal{S}^\bot \end{array} \in L(\mathcal{M}_nhcal{H})uno && \mbox{and} && \widehat{Q}=\begin{pmatrix} I & -F \\ 0 & 0 \end{pmatrix}\begin{array}{c} \mathcal{T} \\ \mathcal{T}^\bot \end{array} \in L(\mathcal{M}_nhcal{H})dos \ , \end{align*} easy computations show that these projections satisfy \ecua{ayb}. \noindent $3\mathbb{R}ightarrow 1$: Define $M_r=I-\widehat{P}$ and $M_l=I-\widehat{Q}^*$. Then $R(M_r)=\mathcal{S}^\bot$ and $R(M_l)=\mathcal{T}^\bot$, so conditions 1. and 3. of Definition \ref{definicion de complementable} are satisfied. On the other hand \begin{align*} (I-Q)AM_r=(I-Q)A(I-\widehat{P})=(I-Q)A-(I-Q)A\widehat{P}=(I-Q)A, \peso{and} \end{align*} \begin{align*} M_lA(I-P)=(I-\widehat{Q}^*)A(I-P)=A(I-P)-\widehat{Q}^*A(I-P)=A(I-P). \end{align*} This shows that conditions 2. and 4. of Definition \ref{definicion de complementable} also hold. ${\blacksquare}$ \noindent The next characterization has been considered in \cite{CMS2} for self-adjoint operators in a Hilbert space. We prove an extension to our general setting. \begin{pro}\lambdabel{caracterizacion de la fuerte con subespacios} Let $P_r\inL(\mathcal{M}_nhcal{H})uno$ and $P_l\inL(\mathcal{M}_nhcal{H})dos$ be two projections with ranges $\mathcal{S}$ and $\mathcal{T}$, respectively, and let $A\inL(\mathcal{M}_nhcal{H})unodos$. Then the following statements are equivalent: \begin{enumerate} \item [\rm 1.] $A$ is $(P_r,P_l)$-complementable. \item [\rm 2.] $\mathcal{M}_nhcal{H}uno=\mathcal{S}^\bot+A^{-1}(\mathcal{T})$ and $\mathcal{M}_nhcal{H}dos=\mathcal{T}^\bot+A^{*-1}(\mathcal{S})$. \item [\rm 3.] $\angd{\mathcal{S}}{\overline{A^*(\mathcal{T}^\bot)}}<1$ and $\angd{\mathcal{T}}{\overline{A(\mathcal{S}^\bot)}}<1$. \end{enumerate} \end{pro} \proof $1\Longleftrightarrow 2$: Suppose that $A$ is $(P_r,P_l)$-complementable. By Proposition \ref{las tres equivalencias}, there exists a projection $P$ such that $R(P^*)=\mathcal{S}$ and $R(AP)\subseteq \mathcal{T}$. Then, $N(P)=\mathcal{S}^\bot$ and $R(P)\subseteq A^{-1}(\mathcal{T})$. Hence $\mathcal{M}_nhcal{H}uno=\mathcal{S}^\bot+A^{-1}(\mathcal{T})$. \noindent Conversely, suppose that $\mathcal{M}_nhcal{H}uno=\mathcal{S}^\bot+A^{-1}(\mathcal{T})$ and define $\mathcal{N}=\mathcal{S}^\bot\cap A^{-1}(\mathcal{T})$. Then $\mathcal{M}_nhcal{H}uno=\mathcal{S}^\botL(\mathcal{M}_nhcal{H})lus ( A^{-1}(\mathcal{T})\ominus \mathcal{N})$. Let $\widehat{P}$ be the oblique projection onto $A^{-1}(\mathcal{T})\ominus \mathcal{N}$ parallel to $\mathcal{S}^\bot$. Then, $ R(\widehat{P}^{\,*})=N(\widehat{P})^\bot=R(I-\widehat{P})^\bot=\mathcal{S} $, and $R(A\widehat{P})\subseteq \mathcal{T}$ because $R(\widehat{P})=A^{-1}(\mathcal{T})\ominus \mathcal{N}$. Similar arguments show that the existence of a projection $Q$ such that $R(Q)=\mathcal{T}$ and $R((QA)^*)\subseteq\mathcal{S}$ is equivalent to the identity $\mathcal{M}_nhcal{H}dos=\mathcal{T}^\bot+A^{*-1}(\mathcal{S})$. \noindent $2\Longleftrightarrow 3$: It follows from Proposition \ref{propiedades elementales de los angulos} (item 3) and the equality $A^*(\mathcal{T}^\bot)^\bot = A^{-1}(\mathcal{T} )$. ${\blacksquare}$ \begin{remdef}\rm Proposition \ref{caracterizacion de la fuerte con subespacios}, as well as Proposition \ref{las tres equivalencias}, shows that the notion of $(P_r,P_l)$-complementable operators only depends on $R(P_l)$ and $R(P_r)$. Hence, from now on we shall say that an operator $A\inL(\mathcal{M}_nhcal{H})unodos$ is \textbf{$(\ese,\ete)$-complementable } instead of $(P_r,P_l)$-complementable. ${\blacktriangle}$ \end{remdef} \noindent In finite dimensional spaces, given a fixed subspace $\mathcal{S}$, every positive operator $A$ is $(\mathcal{S},\, \mathcal{S})$-complementable. Indeed, if $ A=\begin{pmatrix} A_{11} & A_{12} \\ A_{21} & A_{22} \end{pmatrix} \begin{array}{c} \mathcal{S} \\ \mathcal{S}^\bot \end{array}, $ the inclusion $R(A_{21})\subseteq R(A_{22})$ always holds (see \cite{Smul} for details). However, in infinite dimensional Hilbert spaces, only the inclusion $R(A_{21})\subseteq R(A_{22}^{1/2})$ holds in general. As $R(A_{22})= R(A_{22}^{1/2})$ if and only if $A_{22}$ has closed range (which is the case in finite dimensional spaces), it is not difficult to find examples of positive operators which are not $(\mathcal{S},\, \mathcal{S})$-complementable (e.g., see example 5.5 of \cite{CMS2}). For this reason we consider the following weaker notion of complementability: \begin{fed}\lambdabel{definicion de la debil}\rm Let $\mathcal{S}\subseteq\mathcal{M}_nhcal{H}uno$ and $\mathcal{T}\subseteq\mathcal{M}_nhcal{H}dos$ be closed subspaces. An operator $A\inL(\mathcal{M}_nhcal{H})unodos$ is called {\bf $(\ese,\ete)$-weakly complementable } if $$ R(A_{21})\subseteq R(|A_{22}^*|^{1/2}) \peso{ and } R(A_{12}^*)\subseteq R(|A_{22}|^{1/2})\ ,$$ (according to the matrix decomposition of $A$ given in equation \eqref{matrix}). \end{fed} \begin{rem} Observe that, by Douglas theorem, $R(|A_{22}^*|) = R(A_{22}) $ and $R(|A_{22}|) = R(A_{22}^*) $. Therefore this notion is, indeed, weaker than the previously defined notion of complementability. However, if $R(A_{22})$ is closed, then $R(|A_{22}^*|)$ is also closed and $R(A_{22})=R(|A_{22}^*|)=R(|A_{22}^*|^{1/2})$. Thus, both notions of complementability coincide. ${\blacktriangle}$ \end{rem} \noindent As an easy consequence of Douglas theorem,we get the next alternative characterizations of $(\ese,\ete)$-weakly complementable operators. \begin{pro}\lambdabel{caracterizaciones de la debil} Given $A\inL(\mathcal{M}_nhcal{H})unodos$, and closed subspaces $\mathcal{S}\subseteq\mathcal{M}_nhcal{H}uno$ ,$\mathcal{T}\subseteq\mathcal{M}_nhcal{H}dos$, then the following statements are equivalent: \begin{enumerate} \item[\rm 1.] $A$ is $(\ese,\ete)$-weakly complementable. \item[\rm 2.] If $A_{22}=U|A_{22}|$ is the polar decomposition of $A_{22}$, then the equations $A_{21}=|A_{22}^*|^{1/2}U X$ and $A_{12}^*=|A_{22}|^{1/2}Y$ have solutions. \item[\rm 3.]$\displaystyle \sup_{x\in\mathcal{S}}\frac{\|A_{21}x\|^2}{\pint{|A_{22}^*|x,\,x}}<\infty\;$ and $\displaystyle \;\sup_{y\in\mathcal{T}}\frac{\|A_{12}^*y\|^2}{\pint{|A_{22}|y,\,y}}<\infty$. \end{enumerate} \end{pro} \section{Shorted Operators} Recall that, in the classic case, i.e., if $\mathcal{M}_nhcal{H}uno = \mathcal{M}_nhcal{H}dos = \mathcal{M}_nhcal{H}$, $\mathcal{S} = \mathcal{T}$ and $A \in L(\mathcal{M}_nhcal{H})^+$, Anderson and Trapp \cite{andtrapp} proved that $A_{/\mathcal{S}}=\left(\begin{matrix} A_{11}-C^*C & 0\\ 0 & 0 \end{matrix}\right)$, where $C$ is the reduced solution of $A_{22}^{1/2}X=A_{21}$. Following this approach, we shall extend the notion of shorted operators to operators between Hilbert spaces $\mathcal{M}_nhcal{H}uno$ and $\mathcal{M}_nhcal{H}dos\,$. Throughout this section, $\mathcal{S} \subseteq \mathcal{M}_nhcal{H}uno$ and $\mathcal{T}\subseteq\mathcal{M}_nhcal{H}dos$ are closed subspaces and each operator $A\inL(\mathcal{M}_nhcal{H})unodos$ is identified with a $2\times 2$ matrix induced by these subspaces, as in \eqref{matrix}. \begin{fed}\rm Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-weakly complementable, and let $F$ and $E$ be the reduced solutions of the equations $A_{21}=|A_{22}^*|^{1/2}U X$ and $A_{12}^*=|A_{22}|^{1/2}X$, respectively, where $U$ is the partial isometry of the polar decomposition of $A_{22}$. The \textbf{bilateral shorted operator} of $A$ to the subspaces $\mathcal{S}$ and $\mathcal{T}$ is \[ \short{A}{\mathcal{S}}{\mathcal{T}}=\begin{pmatrix} A_{11}-F^*E & 0\\ 0 & 0 \end{pmatrix}. \] \end{fed} \begin{rem} If $A_{22}$ has closed range, then $\short{A}{\mathcal{S}}{\mathcal{T}}=\begin{pmatrix} A_{11}-A_{12}A_{22}^{\dagger}A_{21} & 0\\ 0 & 0 \end{pmatrix}$. ${\blacktriangle}$ \end{rem} \noindent In the following proposition we collect some basic properties of shorted operators. The proof is straightforward. \begin{pro}\lambdabel{propiedades directas} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-weakly complementable. Then \begin{enumerate} \item[\rm 1.] for every $\alpha\in\mathcal{M}_nhbb{C}$, $\alpha A$ is $(\ese,\ete)$-weakly complementable, and $\short{(\alpha A)}{\mathcal{S}}{\mathcal{T}}=\alpha(\short{A}{\mathcal{S}}{\mathcal{T}})$. \item[\rm 2.] $A^*$ is $(\mathcal{T},\mathcal{S})$- weakly complementable, and $(\short{A}{\mathcal{S}}{\mathcal{T}})^*=\short{(A^*)}{\mathcal{T}}{\mathcal{S}}$. \item[\rm 3.] $\short{A}{\mathcal{S}}{\mathcal{T}}$ is $(\ese,\ete)$-weakly complementable and $\short{(\short{A}{\mathcal{S}}{\mathcal{T}})}{\mathcal{S}}{\mathcal{T}}=\short{A}{\mathcal{S}}{\mathcal{T}}$. \item[\rm 4.] if $A=A^*$ and $\mathcal{S}=\mathcal{T}$, then $\short{A}{\mathcal{S}}{\mathcal{S}}$ is self-adjoint. \end{enumerate} \end{pro} \noindent The next Proposition is similar to Theorem 1 by Butler and Morley in \cite{butmor}. For the reader's convenience, we include a proof adapted to our setting. First we need a lemma. \begin{lem}\lambdabel{el lema ese} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be such that $R(A^*)\subseteq R(|B|^{1/2})$. Suppose that there exist a sequence $\{y_n\}$ in $\mathcal{M}_nhcal{H}uno$, $d\in\mathcal{M}_nhcal{H}dos$ and a positive number $M$ satisfying $$ A y_n\xrightarrow[n\rightarrow\infty]{}d\ ,\quad B y_n\xrightarrow[n\rightarrow\infty]{}0\ ,\peso{and}\pint{|B|y_n,y_n}\leq M. $$ Then $d=0$. \end{lem} \begin{proof} Since $\||B|^{1/2}y_n\|^2=\pint{|B|y_n,y_n}\leq M$, we can suppose, with no loss of generality, that there exists $z\in\mathcal{M}_nhcal{H}$ such that $|B|^{1/2}y_n\xrightarrow[n\rightarrow\infty]{} z$. As $By_n\xrightarrow[n\rightarrow\infty]{} 0$, it holds $z\in N(|B|^{1/2})$. Let $C$ be the reduced solution of $A^*=|B|^{1/2}X$. Since $Ay_n\xrightarrow[n\rightarrow\infty]{} d$ we get $C^*z=d$ and $d=0$ because $N(|B^{1/2}|)\subseteq N(C^*)$.\end{proof} \begin{pro}\lambdabel{teorema uno} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-weakly complementable. Then, given $x\in\mathcal{S}$ there exist a sequence $\{y_n\}\subseteq \mathcal{S}^\bot$ and a positive number $M$ such that $$ A\begin{pmatrix} x\\ y_n \end{pmatrix}\xrightarrow[n\rightarrow\infty]{} \short{A}{\mathcal{S}}{\mathcal{T}} \begin{pmatrix} x\\ 0 \end{pmatrix} \ ,\peso{and}\pint{|A_{22}|y_n,\;y_n}\leq M \ , \ \ n\in \mathbb{N} . $$ Conversely, if there exists a sequence $\{z_n\}$ in $\mathcal{S}^\bot$, $d\in\mathcal{T}$, and a positive number $M$ such that \begin{equation}\lambdabel{nuevazo2} A\begin{pmatrix} x\\ z_n \end{pmatrix}\xrightarrow[n\rightarrow\infty]{} \begin{pmatrix} d\\ 0 \end{pmatrix} \ ,\peso{and}\pint{|A_{22}|z_n,\;z_n}\leq M, \end{equation} \noindent then $\begin{pmatrix} d\\ 0 \end{pmatrix} =\short{A}{\mathcal{S}}{\mathcal{T}}\begin{pmatrix} x\\ 0 \end{pmatrix} $. \end{pro} \begin{proof} Let $E$ and $F$ be the reduced solutions of $A_{21}=|A_{22}^*|^{1/2}UX$ and $A_{12}^*=|A_{22}|^{1/2}X\ , $ respectively. As $ R(E)\subseteq \overline{R(U^*|A_{22}^*|^{1/2})} =\overline{R(|A_{22}|^{1/2})}$, given $x\in\mathcal{M}_nhcal{H}uno$ there is a sequence $\{y_n\}$ such that $|A_{22}|^{1/2}y_n\xrightarrow[n\rightarrow\infty]{} -Ex$. Then \begin{eqnarray*} A_{21}x+A_{22} y_n&=& A_{21}x+U\,|A_{22}|^{1/2}|A_{22}|^{1/2}y_n \\ & = & A_{21}x+|A_{22}^*|^{1/2}U(|A_{22}|^{1/2}y_n) \xrightarrow[n\rightarrow\infty]{}0, \peso{and}\\ A_{11}x+A_{12} y_n&=& A_{11}x+F^*|A_{22}|^{1/2}y_n \\& = & A_{21}x+F^*(|A_{22}|^{1/2}y_n)\xrightarrow[n\rightarrow\infty]{} \short{A}{\mathcal{S}}{\mathcal{T}}(x). \end{eqnarray*} \noindent Finally, since the sequence $\{|A_{22}|^{1/2}y_n\}$ converges, then $\sup_{n\in\mathcal{M}_nhbb{N}}\pint{|A_{22}|y_n,\,y_n} <\infty$. Converselly, suppose that there exists another sequence $\{z_n\}$ in $\mathcal{S}^\bot$ which satisfies \eqref{nuevazo2}. If $w_n=y_n-z_n$, then $\pint{|A_{22}|w_n,\,w_n}\leq K$. On the other hand, $A_{11}\, x+A_{12} \, y_n \xrightarrow[n\rightarrow\infty]{} \ d$ and $A_{11}\, x+A_{12} \, z_n \xrightarrow[n\rightarrow\infty]{} \ \short{A}{\mathcal{S}}{\mathcal{T}} $. Therefore, $A_{12}\, w_n \xrightarrow[n\rightarrow\infty]{} d-\short{A}{\mathcal{S}}{\mathcal{T}}(x)$. In a similar way, we obtain that $A_{22}\, w_n \xrightarrow[n\rightarrow\infty]{} 0$. Therefore, by Lemma \ref{el lema ese}, we get that $d=\short{A}{\mathcal{S}}{\mathcal{T}}(x)$. \end{proof} \begin{cor}\lambdabel{rango en el caso weak} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-weakly complementable. Then \begin{eqnarray} R(A)\cap\mathcal{T}\subseteq &R(\short{A}{\mathcal{S}}{\mathcal{T}})&\subseteq \overline{R(A)}\cap\mathcal{T} \lambdabel{inclusion uno}\\ R(A^*)\cap\mathcal{S}\subseteq &R((\short{A}{\mathcal{S}}{\mathcal{T}})^*)&\subseteq \overline{R(A^*)}\cap\mathcal{S} \lambdabel{inclusion dos} \end{eqnarray} In particular, $R(\short{A}{\mathcal{S}}{\mathcal{T}})=R(A)\cap \mathcal{T}$ and $R((\short{A}{\mathcal{S}}{\mathcal{T}})^*)=R(A^*)\cap\mathcal{S}$ if $R(A)$ is closed. \end{cor} \begin{proof} Firstly, we shall prove that $R(\short{A}{\mathcal{S}}{\mathcal{T}})\subseteq \overline{R(A)}\cap\mathcal{T}$. Clearly, by definition, $R(\short{A}{\mathcal{S}}{\mathcal{T}})$ $\subseteq \mathcal{T}$. On the other hand, given $x\in\mathcal{M}_nhcal{H}uno$, by Proposition \ref{teorema uno}, there exists a sequence $\{y_n\}$ in $\mathcal{S}^\bot$ such that $ A\begin{pmatrix} Px \\ y_n \ \end{pmatrix}\xrightarrow[n\rightarrow\infty]{} \short{A}{\mathcal{S}}{\mathcal{T}} \begin{pmatrix} x \\ 0 \\ \end{pmatrix}. $ Thus $R(\short{A}{\mathcal{S}}{\mathcal{T}})\subseteq \overline{R(A)}$. \noindent In order to prove the first inclusion in (\ref{inclusion uno}), take $x\in R(A)\cap\mathcal{T}$, and let $z\in\mathcal{M}_nhcal{H}uno$ such that $Az=x$. If $P$ is the orthogonal projection onto $\mathcal{S}$, then $ A \begin{pmatrix} Pz \\ z-Pz \ \end{pmatrix}=\begin{pmatrix} x \\ 0 \\ \end{pmatrix}, $ and, by Proposition \ref{teorema uno}, we get $\short{A}{\mathcal{S}}{\mathcal{T}}(Pz)=x$. The other inclusions follow in the same way. \end{proof} \noindent Next, we shall study the shorting operation on $(\ese,\ete)$-complementable operators. \begin{pro}\lambdabel{teorema uno para compatibles} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable. For every $x\in\mathcal{S}$ there exists $y \in\mathcal{S}^\bot$ such that $$ A\begin{pmatrix} x \\ y \end{pmatrix}=\short{A}{\mathcal{S}}{\mathcal{T}}\begin{pmatrix} x \\ 0 \end{pmatrix}. $$ Moreover, there exist projections $P\inL(\mathcal{M}_nhcal{H})uno$ and $Q\inL(\mathcal{M}_nhcal{H})dos$ such that \begin{equation}\lambdabel{QA=AP} R(P^*)=\mathcal{S} \ , \quad R(Q)=\mathcal{T} \peso{ and } QA=AP=\short{A}{\mathcal{S}}{\mathcal{T}} \ . \end{equation} \end{pro} \begin{proof} By Proposition \ref{las tres equivalencias}, there exists a projector $P\inL(\mathcal{M}_nhcal{H})uno$ such that $R(P^*)=\mathcal{S}$ and $R(AP)\subseteq \mathcal{T}$. The matrix decomposition of $P$ with respect to $\mathcal{S}$ is $\begin{pmatrix} I & 0 \\ E & 0 \end{pmatrix}, $ where $I$ is the identity operator of $\mathcal{S}$ and $E\in L(\mathcal{S},\mathcal{S}^\bot)$. If $x\in \mathcal{S}$ and $y=Ex$, then $A\begin{pmatrix} x \\ y \end{pmatrix}=AP\begin{pmatrix} x \\ 0 \end{pmatrix}\in\mathcal{T}$. If $z_n=y$ for every $n\in\mathbb{N}$, the sequence $\{z_n\}$ satisfies \eqref{nuevazo2}. Hence, by Proposition \ref{teorema uno}, $A\begin{pmatrix} x \\ y \end{pmatrix}=\short{A}{\mathcal{S}}{\mathcal{T}}\begin{pmatrix} x \\ 0 \end{pmatrix}$. Therefore $AP=\short{A}{\mathcal{S}}{\mathcal{T}}$. In a similar way it can be proved that there exists $Q\inL(\mathcal{M}_nhcal{H})dos$ with $R(Q)=\mathcal{T}$ such that $QA=\short{A}{\mathcal{S}}{\mathcal{T}}$ \end{proof} \begin{rem}\lambdabel{teorema dos para compatibles} Note that we actually prove that if there exists a projection $P$ such that $R(P^*)=\mathcal{S}$ and $R(AP)\subseteq \mathcal{T}$, then, by Proposition \ref{teorema uno}, $AP=\short{A}{\mathcal{S}}{\mathcal{T}}$. This result, for positive operators, appeared in \cite{CMS2}, where the role of $P$ is played by a so-called $A$-selfadjoint projection, i.e., a projection which is selfadjoint with respect to the sesquilinear form $\pint{x,\ y}_A=\pint{Ax,\ y}$. The reader is referred to \cite{[CMS2]},\cite{[HN]} for more information about $A$-selfadjoint projections. ${\blacktriangle}$ \end{rem} \begin{cor}\lambdabel{Rango del shorted cuando es compatible.} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable. Then, $$ R(\short{A}{\mathcal{S}}{\mathcal{T}}) =R(A)\cap\mathcal{T} \quad \text{and} \quad N(\short{A}{\mathcal{S}}{\mathcal{T}})=\mathcal{S}^\bot+N(A) . $$ \end{cor} \begin{proof} By Corollary \ref{rango en el caso weak}, it holds $R(A)\cap\mathcal{T} \subseteq R(\short{A}{\mathcal{S}}{\mathcal{T}})$ and $$ \mathcal{S}^\bot+N(A)\subseteq \big(\mathcal{S} \cap \overline{R(A^*)}\ \big) ^\perp \subseteq R(\short{A}{\mathcal{S}}{\mathcal{T}}\ ^*)^\perp = N(\short{A}{\mathcal{S}}{\mathcal{T}}) . $$ On the other hand, by Proposition \ref{teorema uno para compatibles}, there exist two projections $P\inL(\mathcal{M}_nhcal{H})uno$ and $Q\inL(\mathcal{M}_nhcal{H})dos$ which satisfy \ecua{QA=AP}. Hence, $R(\short{A}{\mathcal{S}}{\mathcal{T}}) = R(AP) \subseteq R(A)$, and \[ N(\short{A}{\mathcal{S}}{\mathcal{T}})=N(AP)=N(P)L(\mathcal{M}_nhcal{H})lus \Big( R(P)\cap N(A) \Big) \subseteq \mathcal{S} ^\perp+N(A), \] because $N(P)=R(P^*)^\bot=\mathcal{S}^\bot$. \end{proof} \begin{rem} If $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable, then, by Corollary \ref{Rango del shorted cuando es compatible.}, the subspaces $\mathcal{S}^\bot+N(A)$, $\ \mathcal{S}+N(A)^\bot$, $\ \mathcal{T}^\bot + R(A)^\perp$ and $\ \mathcal{T} + \overline{R(A)}$ must be closed. Moreover, if $R(A)$ is closed then, by Proposition \ref{propiedades elementales de los angulos}, $A( \mathcal{S}^\perp ) $, $A^*(\mathcal{T}^\perp ) $, and $R(A_{22})$ are also closed. Hence, in this case, generalized inverse methods can be used. Nevertheless, by using the approach developed in this work, one can get almost all known properties of the Schur complements in finite dimensional spaces, for complementable operators in general Hilbert spaces, including those operators whose ranges are not closed. ${\blacktriangle}$ \end{rem} \noindent {\bf The minus partial order.} In \cite{[Mitraminus]}, Mitra proved (for matrices in $\mathbb{C}^{m\times n}$) that $\short{A}{\mathcal{S}}{\mathcal{T}}$ is the unique maximum of the set \[ \mathcal{M}^-(A,\mathcal{S},\mathcal{T})=\Big\{C\in\mathbb{C}^{m\times n}: \ C\leq^-A,\ \ R(C)\subseteq \mathcal{T} \peso{and} R(C^*)\subseteq \mathcal{S}\Big\}, \] where the partial ordering is the so called minus order: $C\leq^- A$ if $$ R(C)\cap R(A-C)=\{0\}\peso{and} R(C^*)\cap R(A^*-C^*)=\{0\}. $$ A similar result can be obtained in our setting with suitable changes. Firstly, we need to extend the minus order to infinite dimensional Hilbert spaces: \begin{fed}\lambdabel{minus order} \rm Given $A, B\inL(\mathcal{M}_nhcal{H})unodos$, we write $A\leq^- B$ if: \begin{align*} \mbox{\text{(a) }}\ \, \angd{\overline{R(A)}}{\overline{R(B-A)}}<1&& \mbox{and} && \mbox{\text{(b) }}\ \, \angd{\overline{R(A^*)}}{\overline{R(B^*-A^*)}}<1 \ . \end{align*} \end{fed} \begin{rem} In the finite dimensional case, condition (a) is equivalent to $R(A)\cap R(B-A)=\{0\}$ and condition (b) is equivalent to $R(A^*)\cap R(B^*-A^*)=\{0\}$. So, Definition \ref{minus order} extends the (finite dimensional) minus order. Also notice that $A\leq^- B$ if and only if $A^*\leq^- B^*$, by the symmetry of conditions (a) and (b). ${\blacktriangle}$ \end{rem} \noindent The next Proposition provides equivalent conditions to condition (a) in Definition \ref{minus order}, which are simpler to handle. A similar result for condition (b) can be obtained by taking adjoints. \begin{pro}\lambdabel{equivalencias para el minus} Given $A,B\inL(\mathcal{M}_nhcal{H})unodos$, the following statements are equivalent: \begin{enumerate} \item $\angd{\overline{R(A)}}{\overline{R(B-A)}}<1$. \item There exists a projection $Q\in L(\mathcal{M}_nhcal{H})dos$ such that $R(Q)=\overline{R(A)}$ and $A=QB$. \item There exists a projection $Q\in L(\mathcal{M}_nhcal{H})dos$ such that $A=QB$. \end{enumerate} \end{pro} \proof $1\Longrightarrow 2:\ $ Let $\mathcal{L}=\overline{R(A)}L(\mathcal{M}_nhcal{H})lus\overline{R(B-A)}$, which is closed by Proposition \ref{propiedades elementales de los angulos}. Let $Q \in L(\mathcal{M}_nhcal{H})dos$ be the projection with $R(Q)= \overline{R(A)}$ and $N(Q) = \overline{R(B-A)}L(\mathcal{M}_nhcal{H})lus \mathcal{L}^\perp$. Then, $QB=Q\Big((B-A)+A\Big)=QA=A$. \noindent $2\Longrightarrow 3:\ $ It is apparent. \noindent $3\Longrightarrow 1:\ $ Since $A=QB$ and $B-A=(I-Q)B$, it holds that $R(A)\subseteq R(Q)$ and $R(B-A)\subseteq R(I-Q)=N(Q)$. Hence, $\angd{\overline{R(A)}}{\overline{R(B-A)}}\le \angd{R(Q)}{N(Q)}<1$. ${\blacksquare}$ \begin{cor}\lambdabel{ring} Let $A, B \in L(\mathcal{M}_nhcal{H})unodos$. \begin{enumerate} \item If $A\leq^- B$, then $R(A) \subseteq R(B)$ and $R(A^*) \subseteq R(B^*)$. \item The relation $\leq^-$ is a partial order (i.e. it is reflexive, antisymmetric and transitive). \item If $A\leq^-B$ and $B$ is a projection, then $A$ is also a projection. \end{enumerate} \end{cor} \begin{proof} The first two statements follow easily from Proposition \ref{equivalencias para el minus}. If $A\leq^-B$ and $B^2 = B$, by Proposition \ref{equivalencias para el minus} applied to $A$ and $B$ (resp $A^*$ and $B^*$) there exist projections $P$ and $Q$ such that $R(P^*)=\overline{R(A^*)}$, $R(Q)=\overline{R(A)}$ and $A=QB=BP$. Then $A^2=(QB)(BP)=QBP=A$. \end{proof} \begin{teo}\lambdabel{mitraing} Let $A\in L(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable, and let \[ \mathcal{M}^-(A,\mathcal{S},\mathcal{T})=\Big\{C\inL(\mathcal{M}_nhcal{H})unodos: \ C\leq^-A,\ \ R(C)\subseteq \mathcal{T} \peso{and} R(C^*)\subseteq \mathcal{S}\Big\}. \] Then, $\displaystyle \short{A}{\mathcal{S}}{\mathcal{T}}=\max_{\leq^-}\ \mathcal{M}^-(A,\mathcal{S},\mathcal{T})$. \end{teo} \begin{proof} By Propositions \ref{teorema uno para compatibles} and \ref{equivalencias para el minus}, we know that $\short{A}{\mathcal{S}}{\mathcal{T}}\leq^- A$. On the other hand, by Corollary \ref{Rango del shorted cuando es compatible.}, $R(\short{A}{\mathcal{S}}{\mathcal{T}})\subseteq \mathcal{T}$ and $R((\short{A}{\mathcal{S}}{\mathcal{T}})^*)\subseteq \mathcal{S}$. Hence, $\short{A}{\mathcal{S}}{\mathcal{T}}\in \mathcal{M}^-(A,\mathcal{S},\mathcal{T})$. On the other hand, given $C\in \mathcal{M}^-(A,\mathcal{S},\mathcal{T})$, there exists a projection $E\inL(\mathcal{M}_nhcal{H})dos$ such that $C=EA$. Let $P\inL(\mathcal{M}_nhcal{H})uno$ be a projection as in Proposition \ref{teorema uno para compatibles} such that $R(P^*)=\mathcal{S}$ and $\short{A}{\mathcal{S}}{\mathcal{T}}=AP$. The inclusion $R(C^*)\subseteq \mathcal{S}$ implies that $P^*C^*=C^*$. Therefore \[ C=CP=EAP=E\short{A}{\mathcal{S}}{\mathcal{T}}. \] In a similar way, there exists a projection $F$ such that $C^*=F(\short{A}{\mathcal{S}}{\mathcal{T}})^*$. So, by Proposition \ref{equivalencias para el minus}, $C\leq^- \short{A}{\mathcal{S}}{\mathcal{T}}$. \end{proof} \begin{cor} Let $A \inL(\mathcal{M}_nhcal{H})$ be a projection. If $\mathcal{S}, \mathcal{T} \subseteq \mathcal{M}_nhcal{H}$ are closed subspaces such that $A$ is $(\ese,\ete)$-complementable , then $N(A) + \mathcal{S}^\perp$ is closed, $$ \mathcal{M}_nhcal{H} = \Big(R(A) \cap \mathcal{T}\Big) L(\mathcal{M}_nhcal{H})lus \Big(N(A) + \mathcal{S}^\perp \Big) \ , $$ and $\short{A}{\mathcal{S}}{\mathcal{T}} $ is the projection given by this decomposition. \end{cor} \begin{proof} By Theorem \ref {mitraing}, $\short{A}{\mathcal{S}}{\mathcal{T}} \le^- A$. Hence it must be a projection by Corollary \ref{ring}. The rest of the statement follows from Corollary \ref{Rango del shorted cuando es compatible.}. \end{proof} \noindent Next, we shall study the effect of shorting a shorted operator. The following proposition was proved for selfadjoint operators by Ando (see \cite{Ando}). \begin{cor}\lambdabel{Teorema sobre el shorted de un shorted} Let $A\inL(\mathcal{M}_nhcal{H})unodos$, and consider closed subspaces $\mathcal{S},\widehat\mathcal{S}$ of $\mathcal{M}_nhcal{H}_1$ and $\mathcal{T},\widehat\mathcal{T}$ of $\mathcal{M}_nhcal{H}_2$. Then, it holds \begin{equation}\lambdabel{shorteds iterados} \short{\left(\short{A}{\mathcal{S}}{\mathcal{T}}\right)}{\widehat\mathcal{S}}{\widehat\mathcal{T}}= \short{A}{\mathcal{S}\cap\widehat\mathcal{S}}{\mathcal{T}\cap\widehat\mathcal{T}}. \end{equation} if every operator is complementable with respect to the corresponding pair of subspaces. \end{cor} \begin{proof} Strightforward calculations show that $\mathcal{M}^-(\short{A}{\mathcal{S}}{\mathcal{T}},\widehat\mathcal{S},\widehat\mathcal{T})=\mathcal{M}^-(A,\mathcal{S}\cap\widehat\mathcal{S},\mathcal{T}\cap\widehat\mathcal{T})$. Then apply Theorem \ref{mitraing}. \end{proof} \begin{rem} Actually, the last result holds with weaker hypothesis; in fact, it is only needed that any two of the three shorted operators exist. The reader is referred to \cite{Ando} for the proofs of these facts. Ando's proof, valid for a single subspace ($\mathcal{S}=\mathcal{T}$), can be easily extended to our setting. ${\blacktriangle}$ \end{rem} \section{Parallel sum and parallel substraction} The device of parallel sum of matrices has been developed by Anderson and Duffin in \cite{andersonduf}. The extension to general Hilbert spaces is due to Anderson and Trapp in \cite{andtrapp} (see also \cite{[Mitra1]} and \cite{[Mitra2]}). The key idea was to define parallel sum through shorted operators. In this section, we shall define parallel sum between operators following the ideas of Anderson and Trapp (see, in particular, \cite{andtrapp} section 4). Even in the scalar case, not every two operators are summable. So, we need to define the concept of summable operators. \begin{fed}\rm Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$. We say that $A$ and $B$ are \textbf{weakly parallel summable} if the next range inclusions hold: \begin{enumerate} \item [1. ] $R(A)\subseteq R(|A^*+B^*|^{1/2})$ and $R(B)\subseteq R(|A^*+B^*|^{1/2})$. \item [2. ] $R(A^*)\subseteq R(|A+B|^{1/2})$ and $ R(B^*)\subseteq R(|A+B|^{1/2})$. \end{enumerate} In this case, the \textbf{parallel sum} of $A$ and $B$, denoted by $A\sump B\inL(\mathcal{M}_nhcal{H})unodos$, is : \[ \begin{pmatrix} A\sump B& 0 \\ 0 & 0 \end{pmatrix}=\left.\begin{pmatrix} A & A \\ A & A+B \end{pmatrix}\right/ \begin{array} {r} \\ {(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus \{0\},\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus \{0\})} \end{array} . \] \end{fed} \begin{rem}\lambdabel{comentario1} Note that the pair $(A,B)$ is weakly summable if and only if the operator matrix $\begin{pmatrix} A & A \\ A & A+B \end{pmatrix}$ is $(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus \{0\},\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus \{0\})$-weakly complementable. Hence, the parallel sum is well defined. ${\blacktriangle}$ \end{rem} \begin{pro}\lambdabel{filmore williams} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be weakly parallel summable operators and let $E_A$, $E_B$, $F_A$ and $F_B$ be, respectively, the reduced solutions of the equations \begin{eqnarray} A\ =&|A^*+B^*|^{1/2}UX \ , \quad &B\ =|A^*+B^*|^{1/2}UX\lambdabel{dos}, \\ A^*=&|A+B|^{1/2}X\ , \quad &B^*=|A+B|^{1/2}X, \lambdabel{cuatro} \end{eqnarray} where $U$ is the partial isometry of the polar decomposition of $A+B$. Then: \begin{equation}\lambdabel{filwill} A\sump B=F_A^*E_B=F_B^*E_A, \end{equation} \end{pro} \begin{proof} Note that $|A^*+B^*|^{1/2}U = U|A+B|^{1/2}$. Then, adding in (\ref{dos}) and in (\ref{cuatro}), we get $$ |A+B|^{1/2} =E_A+E_B, \peso{and} |A^*+B^*|^{1/2}U = F_A^*+F_B^* \ , $$ by the uniqueness of the reduced solution. By its definition, $A\sump B= A-F_A^*E_A$. Then \begin{align*} A\sump B&=A-F^*_AE_A=F^*_A(|A+B|^{1/2}-E_A)=F_A^*E_B \ . \end{align*} The other equality follows in a similar way.\end{proof} \begin{cor} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be weakly parallel summable. Then $A\sump B=B\sump A$. \end{cor} \begin{cor} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be weakly parallel summable and suppose that the operator $A+B$ has closed range. Then $ A\sump B= A - A(A+B)^{\dagger}A=A(A+B)^{\dagger}B . $ \end{cor} \noindent Using Proposition \ref{teorema uno} we obtain the following analogous result with respect to parallel sum. \begin{pro}\lambdabel{sucesion para suma paralela.} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be weakly parallel summable and $x\in\mathcal{M}_nhcal{H}uno$. Then there exists a sequence $\{y_n\}$ and $M >0 $ such that $$ A(x+y_n) \xrightarrow[n\rightarrow\infty]{} A\sump B(x) \ , \quad B(y_n) \xrightarrow[n\rightarrow\infty]{} -A\sump B(x) \ , $$ and $\pint{|A+B|y_n,y_n} \leq M \ .$ Conversely, if there exist $d \in \mathcal{M}_nhcal{H}dos\,$, a sequence $\{y_n\}$ in $\mathcal{M}_nhcal{H}uno$ and a real number $M$ such that $$ A(x+y_n) \xrightarrow[n\rightarrow\infty]{} d \ , \quad B(y_n) \xrightarrow[n\rightarrow\infty]{} -d \ , \peso{and} \pint{|A+B|y_n,y_n} \leq M \ , $$ then $A\sump B(x) = d $. \end{pro} \begin{cor} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be weakly parallel summable. Then \[ R(A)\cap R(B)\subseteq R(A\sump B)\subseteq \overline{R(A)}\cap \overline{R(B)} \] \end{cor} \begin{proof} Given $x\in R(A)\cap R(B)$, let $y,z\in\mathcal{M}_nhcal{H}uno$ such that $Ay=Bz=x$. Then $A((y+z)-z)=x = B(-z)$. In consequence, taking $w=y+z$ and $y_n=-z$ for every $n\in\mathcal{M}_nhbb{N}$, by Proposition \ref{sucesion para suma paralela.} we have that $A\sump B(w)=x$, which prove the first inclusion. The second inclusion follows immediately from Proposition \ref{sucesion para suma paralela.}. \end{proof} \subsubsection*{Parallel summable operators} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$. As we have already pointed out in Remark \ref{comentario1}, the operator pair $(A,B)$ is weakly summable if and only if the block matrix \[ M = \begin{pmatrix} A & A \\ A & A+B \end{pmatrix}, \] is $(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus\{0\},\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus\{0\})$-weakly complementable. From this point of view, it is natural consider pairs of operators $(A,B)$ such that $M$ is $(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus\{0\},\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus\{0\})$-complementable. In this section we shall study such pairs of operators. \begin{fed}\rm \lambdabel{sumable} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$. We say that $A$ and $B$ are \textbf{parallel summable} if $$ R(A)\subseteq R(A+B) \peso{and} R(A^*)\subseteq R(A^*+B^{*}) \ . $$ Note that these conditions imply that $R(B)\subseteq R(A+B)$ and $R(B^*)\subseteq R(A^*+B^{*})$. \end{fed} \begin{rem} This notion is indeed stronger than weakly summability. For example, take $A,D\inL(\mathcal{M}_nhcal{H})^+$ such that $A\leq D$ but $R(A)\nsubseteq R(D)$. Denote $B=D-A\in L(\mathcal{M}_nhcal{H})^+$. By Douglas theorem, $R(A)\subseteq R(A^{1/2} )\subseteq R(D^{1/2} ) = R((A+B)^{1/2})$. Similarly, since $B \le D$, then also $R(B)\subseteq R(D^{1/2} ) =R((A+B)^{1/2})$. However, by hypothesis, the pair $(A,B)$ can not be parallel summable, because $R(A)\nsubseteq R(A+B)=R(D)$. \noindent Both notion coincides, for instance, if $R(A+B)$ is closed. In fact, in this case $R(A+B)=R(|(A+B)^*|)=R(|(A+B)^*|^{1/2})$ and $R((A+B)^*)=R(|A+B|)=R(|A+B|^{1/2})$. ${\blacktriangle}$ \end{rem} \noindent Clearly, for parallel summable operators, some of the already proved properties can be improved. Let us mention, for instance, the following ones. \begin{pro}\lambdabel{sucesion para la strong suma paralela.} Let $A,B\inL(\mathcal{M}_nhcal{H})unodos$ be parallel summable and $x\in\mathcal{M}_nhcal{H}uno$. Then, there exists $y\in\mathcal{M}_nhcal{H}uno$ such that $ A(x+y) = A\sump B(x)$ and $ By =-A\sump B(x) . $ Moreover, there are projections $P\in L(\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus\mathcal{M}_nhcal{H}uno)$, $Q\in L(\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus\mathcal{M}_nhcal{H}dos)$ such that $R(P^*)=\mathcal{M}_nhcal{H}unoL(\mathcal{M}_nhcal{H})lus\{0\}$, $R(Q)=\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus\{0\}$ and \[ Q\begin{pmatrix} A & A \\ A & A+B \end{pmatrix} =\begin{pmatrix} A & A \\ A & A+B \end{pmatrix} P=\begin{pmatrix} A\sump B & 0 \\ 0 & 0 \end{pmatrix}. \] \end{pro} \begin{proof} It follows immediately from Proposition \ref{teorema uno para compatibles}.\end{proof} \begin{cor}\lambdabel{rango de la strong suma paralela} If $A,B\inL(\mathcal{M}_nhcal{H})unodos$ are parallel summable, then $R(A\sump B )=R(A)\cap R(B)$. \end{cor} \subsubsection*{Parallel substraction} Given two operators $A,C\inL(\mathcal{M}_nhcal{H})unodos$, it seems natural to study the existence of a solution of the equation $ A\sump X=C, $ that is, if there exists an operator $B\inL(\mathcal{M}_nhcal{H})unodos$ parallel summable with $A$ such that $A\sump B=C$. For positive operators this question has been studied, for example, in \cite{andersonduftrapp}, \cite{PS}, \cite{andersonmortrapp} and \cite{PEKA}. Clearly, equation $A\sump X=C$ may have no solutions for some pair of operators $(A,C)$. Indeed, Corollary \ref{rango de la strong suma paralela} implies that, if equation $A\sump X=C$ has a solution, then $R(C)\subseteq R(A)$ and $R(C^*)\subseteq R(A^*)$, or, equivalently, $R(C-A)\subseteq R(A)$ and $R((C-A)^*)\subseteq R(A^*)$. In this section, we shall prove that, if $R(C-A)=R(A)$ and $R((C-A)^*)= R(A^*)$, then there exists a solution of equation $A\sump X=C$. Moreover, we shall find a distinguished solution, the \textit{parallel substraction of the operators} $C$ and $A$. Given $A\in L(\mathcal{M}_nhcal{H})unodos$, let $\mathcal{D}_A$ be the set of operators defined by \[ \mathcal{D}_A:=\{C\inL(\mathcal{M}_nhcal{H})unodos:\;R(C-A)=R(A)\;\;\mbox{and}\;\; R((C-A)^*)= R(A^*)\}. \] \begin{pro}\lambdabel{ida y vuelta} Let $A\inL(\mathcal{M}_nhcal{H})unodos$. Then the map $C\mapsto C\sump (-A)$ is a bijection between the sets $\mathcal{D}_A$ and $\mathcal{D}_{-A}$ with inverse $D\mapsto D\sump A$. \end{pro} \begin{proof} By the definition of summability, it is clear that $-A$ and $C$ are summable, for every $C \in \mathcal{D}_A$. Let $E$ be the reduced solution of $C-A=AX$ and let $Q$ be a projection onto $\mathcal{M}_nhcal{H}dosL(\mathcal{M}_nhcal{H})lus\{0\}$ such that $ Q\begin{pmatrix} -A & -A \\ -A & C-A \end{pmatrix}= \begin{pmatrix} C\sump (-A) & 0 \\ 0 & 0 \end{pmatrix}. $ Since $ \begin{pmatrix} C\sump (-A)+A & 0 \\ 0 & 0 \end{pmatrix} = Q\begin{pmatrix} 0 & -A \\ -A & C-A \end{pmatrix} \ \ \mbox{and}\ \ \begin{pmatrix} 0 & -A \\ -A & C-A \end{pmatrix}\begin{pmatrix} -E & 0 \\ -I & 0 \end{pmatrix}=\begin{pmatrix} A & 0 \\ 0 & 0 \end{pmatrix},$ we get that $$ \begin{pmatrix} C\sump (-A)+A & 0 \\ 0 & 0 \end{pmatrix}\begin{pmatrix} -E & 0 \\ -I & 0 \end{pmatrix}= Q \begin{pmatrix} A & 0 \\ 0 & 0 \end{pmatrix} =\begin{pmatrix} A & 0 \\ 0 & 0 \end{pmatrix}. $$ \noindent This implies that $R(A)\subseteq R(C\sump (-A)+A)$. As the other inclusion always holds, we get $R(A)=R(C\sump (-A)+A)$. In a similar way we can prove that $R(A^*)=R((C\sump (-A)+A)^*)$. Thus, the mapping $\Phi:\mathcal{D}_A\to \mathcal{D}_{-A}$ given by $\Phi(C)=C\sump (-A)$ is well defined. To prove that $\Phi^{-1}(D)=D\sump A$, take $C\in\mathcal{D}_A$ and $x\in\mathcal{M}_nhcal{H}uno$. Then there exists $y, z \in\mathcal{M}_nhcal{H}uno$ such that \begin{align*} C\sump (-A)(x+y)=\big(C\sump (-A)\big)\sump A(x)\ ,\ \ \ \ Ay=-(C\sump (-A))\sump A(x)\ , \end{align*} \begin{align*} C(x+y+z)= C\sump (-A)(x+y) \ , \ \ \mbox{and} \ \ Az=-(C\sump (-A))(x+y). \end{align*} So, $A(y+z)=0$ which implies that $C(y+z)=0$. Hence \[ Cx=C(x+y+z)=C\sump (-A)(x+y)=(C\sump (-A))\sump A(x)\ , \] and the proof is complete. \end{proof} \begin{cor}\lambdabel{unica solucion} Let $A\inL(\mathcal{M}_nhcal{H})unodos$. For every $C\in\mathcal{D}_A$, the equation \[ A\sump X=C \] has a solution. Moreover, $C\sump (-A)$ is the unique solution $X$ which also satisfies $$ R(A+X)=R(A) \peso { and } R((A+X)^*)=R(A^*) \ . $$ ${\blacksquare}$ \end{cor} \begin{fed}\lambdabel{definicion de diferencia paralela}\rm Given $A\in L(\mathcal{M}_nhcal{H})unodos$, and $C\in\mathcal{D}_A$, the \textbf{parallel substraction} between the operators $A$ and $C$, denoted by $C\difp A$, is defined as the unique solution of equation $A\sump X=C$ guaranteed by Proposition \ref{unica solucion} \end{fed} \begin{rem} Note that, according to our definition, it holds that $C\difp A=C\sump (-A)$; in particular, several properties of parallel sum are inherited by parallel substraction. ${\blacktriangle}$ \end{rem} \section{Shorted formulas using parallel sum} \lambdabel{seccion cinco} \noindent In this section we shall prove some formulas for shorted operator using parallel sums and substractions. Throughout this section $\mathcal{S}$ and $\mathcal{T}$ will be two fixed closed subspaces of $\mathcal{M}_nhcal{H}uno$ and $\mathcal{M}_nhcal{H}dos$, respectively. The following lemma was proved in \cite{PS} for pairs of positive operators. \begin{lem}\lambdabel{suma paralela con shorted} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable. Let $B\inL(\mathcal{M}_nhcal{H})unodos$ be such that $(A,B)$ and $(\short{A}{\mathcal{S}}{\mathcal{T}},B)$ are parallel sumable and $A\sump B$ is $(\ese,\ete)$-complementable. Then \[ \short{A}{\mathcal{S}}{\mathcal{T}}\sump B=\short{\big(A\sump B\big)}{\mathcal{S}}{\mathcal{T}} \ . \] \end{lem} \begin{proof} Let $x\in \mathcal{S}$. By Proposition \ref{sucesion para la strong suma paralela.}, there exists $y\in\mathcal{M}_nhcal{H}uno$ such that $$ \short{A}{\mathcal{S}}{\mathcal{T}}(x+y)= \short{A}{\mathcal{S}}{\mathcal{T}}\sump B(x)\ ,\peso{and} By=-\short{A}{\mathcal{S}}{\mathcal{T}}\sump B(x). $$ Let $z\in\mathcal{S}^\bot$ such that $A(x+y+z)=\short{A}{\mathcal{S}}{\mathcal{T}}(x+y)$. Then, $ A(x+y+z)= \short{A}{\mathcal{S}}{\mathcal{T}}\sump B(x)$ and $By=-\short{A}{\mathcal{S}}{\mathcal{T}}\sump B(x). $ So, $A\sump B\begin{pmatrix} x \\ z \end{pmatrix}=\short{A}{\mathcal{S}}{\mathcal{T}} \sump B(x)$, which implies, by Proposition \ref{teorema uno}, that $\short{(A\sump B)}{\mathcal{S}}{\mathcal{T}}x=\short{A}{\mathcal{S}}{\mathcal{T}}\sump B(x).$ \end{proof} \noindent The next technical result will be useful throughout this section. \begin{pro}\lambdabel{lema3 para Demetrios} Let $A\in L(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable . If $B\inL(\mathcal{M}_nhcal{H})unodos$ satisfies $R(B)=\mathcal{T}$ and $R(B^*)=\mathcal{S}$, then there exists $n_0 \in \mathbb{N}$ such that, for every $n\geq n_0$, $A$ and $nB$ are parallel summable. \end{pro} \noindent We need the following lemma. \begin{lem}\lambdabel{lema1 para Demetrios} Let $A,B\in L(\mathcal{M}_nhcal{H})unodos$ such that $R(A)\subseteq\mathcal{T}$, $R(A^*)\subseteq\mathcal{S}$, $R(B)=\mathcal{T}$ and $R(B^*)=\mathcal{S}$. Then there exists $n_0\in\mathcal{M}_nhbb{N}$ such that, for every $n\geq n_0$, $R(A+nB)=\mathcal{T}$ and $R((A+nB)^*)=\mathcal{S}$. \end{lem} \begin{proof} It suffices to prove that $\mathcal{T}\subseteq R(A+nB)$ and $\mathcal{S}\subseteq R((A+nB)^*)$, because the reverse inclusions hold by hypothesis. Since $R(B^*) = \mathcal{S}$, Douglas theorem assures that there exists $\alpha>0$ such that $B^*B\geq \alpha P_\mathcal{S}$. Then \begin{align*} |A+nB|^2&=A^*A+n^2\,B^*B+n\,(A^*B+B^*A) \geq \Big(\alpha n^2-n\,\|(A^*B+B^*A)\|\Big)P_\mathcal{S}. \end{align*} Take $n_1\in \mathbb{N}$ such that $\alpha n^2 > n\,\|(A^*B+B^*A)\|$ for every $n\geq n_1\,$. By Douglas theorem, $\mathcal{S} \subseteq R(|A+nB|)=R((A+nB)^*)$, for $n\geq n_1\,$. In a similar way, we can prove that there exists $n_2\in\mathcal{M}_nhbb{N}$ such that for every $n\geq n_2$, $\mathcal{T} \subseteq R((A+nB))$ holds. Hence, the statement is proved by taking $n_0=\max\{n_1,n_2\}$.\end{proof} \begin{proof}[Proof of Proposition \ref{lema3 para Demetrios}] Take, as in Proposition \ref{teorema uno para compatibles}, a projection $P \in L(\mathcal{M}_nhcal{H})uno $ such that $AP=\short{A}{\mathcal{S}}{\mathcal{T}}$ and $R(P^*)=\mathcal{S}$. Since $N(B)=\mathcal{S}^\bot = R(I-P)$, it holds that $B(I-P)=0$ and $BP=B$. By Lemma \ref{lema1 para Demetrios} there exists $n_1\in \mathbb{N}$ such that $R((\short{A}{\mathcal{S}}{\mathcal{T}}+nB)^*)=\mathcal{S}$, for every $n\geq n_1$. Fix $n \ge n_1$. Given $x\in\mathcal{M}_nhcal{H}uno\,$, there exists $y\in \mathcal{S}$ such that $\short{A}{\mathcal{S}}{\mathcal{T}}x= (\short{A}{\mathcal{S}}{\mathcal{T}}+nB)y$. If $z=Py+(I-P)x \in \mathcal{M}_nhcal{H}uno\,$, then \begin{eqnarray*} Ax&=& A\big(Px+(I-P)x\big) = \short{A}{\mathcal{S}}{\mathcal{T}}x + A(I-P)x \\&=& \big(\short{A}{\mathcal{S}}{\mathcal{T}}+nB\big)y+(A+nB)(I-P)x \\&=&(A+nB)Py +(A+nB)(I-P)x = (A+nB)z \ . \end{eqnarray*} This shows that $R(A)\subseteq R(A+nB)$. Following the same lines, it can be shown that there exists $n_2\in \mathbb{N}$ such that $R(A^*)\subseteq R(A+nB)^*$ for $n \ge n_2\,$. Thus, $A$ and $nB$ are parallel summable for $n\geq \max\{n_1,\ n_2\}$.\end{proof} \noindent Parallel sum may be defined in terms of shorted operators and the next Proposition shows a converse relation. \begin{pro}\lambdabel{Demetrios} Let $A\in L(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable. If $B\inL(\mathcal{M}_nhcal{H})unodos$ satisfies $R(B)=\mathcal{T}$ and $R(B^*)=\mathcal{S}$, then there exists $n_0 \in \mathbb{N}$ such that: \begin{enumerate} \item [\rm 1. ] The pair $(A, \ nB)$ is summable for every $n\geq n_0$, and \item [\rm 2. ] $\displaystyle \short{A}{\mathcal{S}}{\mathcal{T}}=\lim_{n\to\infty} A\sump (nB) $ (in the norm topology) . \end{enumerate} \end{pro} \noindent Firstly, we shall prove Proposition \ref{Demetrios} in the following particular case. \begin{lem}\lambdabel{lema2 para Demetrios} Let $A,B\in L(\mathcal{M}_nhcal{H})unodos$ be such that $R(A)\subseteq\mathcal{T}$, $R(A^*)\subseteq\mathcal{S}$, $R(B)=\mathcal{T}$ and $R(B^*)=\mathcal{S}$. Then, $A\sump (nB)\xrightarrow[n\rightarrow\infty]{}norm A$ . \end{lem} \begin{proof} Lemma \ref{lema1 para Demetrios} implies that there exists $n_0\geq 1$ such that, for every $n\geq n_0$, $A$ and $nB$ are parallel summable. Fix $n\geq n_0$. By definition, $ A\sump (nB)=A-F_n^*E_n, $ where $F_n$ and $E_n$ are, respectively, the reduced solution of $A^*=|A+nB|^{1/2}X$ and $A=|(A+nB)^*|^{1/2}U_nX$, and $U_n$ is the partial isometry of the polar decomposition of $A+nB$. We shall show that $\|E_n\|\xrightarrow[n\rightarrow\infty]{} 0$ (resp. $\|F_n\|\xrightarrow[n\rightarrow\infty]{} 0$), which clearly implies the desired norm convergence. By Douglas theorem, \begin{align}\lambdabel{esn} \|E_n\|&=\inf\big\{\lambda\in \mathbb{R} \ : \ A^*A\leq \lambda |A+nB|\, \big\} \ \ , \quad n \in \mathbb{N} \ , \end{align} and there exist $\alpha,\beta>0$ such that $A^*A\leq \beta P_\mathcal{S}$ and $B^*B\geq \alpha P_\mathcal{S}$. Then $(A^*A)^2 \le \beta^2 P_\mathcal{S}$, and \begin{align*} |A+nB|^2&=A^*A+n^2\,B^*B+n\,(A^*B+B^*A)\\ &\geq \big(\alpha n^2-n\,\|(A^*B+B^*A)\|\big)P_\mathcal{S} \geq \frac{\alpha n^2-n\,\|(A^*B+B^*A)\|}{\beta^2} \ (A^*A)^2 \ . \end{align*} Recall that L\"owner's theorem states that for every $r\in (0,1]$ $f(x)=x^r$ is operator monotone, i.e. if $0\leq A\leq B$, then $A^r\leq B^r$. Therefore, if $n$ is large enough, $$\displaystyle A^*A \le \frac{\beta}{\big(\alpha n^2-n\,\|(A^*B+B^*A)\|\big)^{1/2}} \ \ |A+nB| \ . $$ Hence, \eqref{esn} implies that $ \|E_n\| \xrightarrow[n\rightarrow\infty]{} \ 0 \ . $ Analogously, we get that $\|F_n\|\xrightarrow[n\rightarrow\infty]{} 0\,$. \end{proof} \begin{proof}[Proof of Proposition \ref{Demetrios}.] By Proposition \ref{lema3 para Demetrios}, there exists $n_0$ such that for every $n\geq n_0$ the pairs $(A,nB)$ and $(\short{A}{\mathcal{S}}{\mathcal{T}},nB)$ are parallel summable. Since the hypothesis of Lemma \ref{suma paralela con shorted} are satisfied, for every $n\geq n_0\,$, it holds that \[ A\sump nB=\short{(A\sump nB)}{\mathcal{S}}{\mathcal{T}}=\short{A}{\mathcal{S}}{\mathcal{T}}\sump nB \ . \] Then, by Lemma \ref{lema2 para Demetrios} with $\short{A}{\mathcal{S}}{\mathcal{T}}$ playing the role of $A$, we get $A\sump nB\xrightarrow[n\rightarrow\infty]{}\short{A}{\mathcal{S}}{\mathcal{T}}$.\end{proof} \noindent Our last result relates parallel sum, parallel substraction and shorted operators. \begin{pro}\lambdabel{shorted con suma y resta} Let $A\inL(\mathcal{M}_nhcal{H})unodos$ be $(\ese,\ete)$-complementable, and let $L\in L(\mathcal{M}_nhcal{H})unodos$ be such that $R(L)=\mathcal{T}$ and $R(L^*)=\mathcal{S}$. Then, there exists $n\in \mathbb{N}$ such that \begin{enumerate} \item [\rm 1.] $A$ and $nL$ are summable. \item [\rm 2.] $\short{A}{\mathcal{S}}{\mathcal{T}}\in\mathcal{D}_{-nL}$. \item [\rm 3.] $(A\sump nL)\difp nL=\short{A}{\mathcal{S}}{\mathcal{T}}$. \end{enumerate} \end{pro} \begin{proof} The first two assertions follows from Proposition \ref{lema3 para Demetrios} and Lemma \ref {lema1 para Demetrios}, respectively. Since $R\big((A\sump nL)\difp nL\big)\subseteq \mathcal{T}$ and $R\big(((A\sump nL)\difp nL)^*\big)\subseteq \mathcal{S}$ then, by Lemma \ref{suma paralela con shorted}, \[ (A\sump nL)\difp nL=\short{\big((A\sump nL)\difp nL\big)}{\mathcal{S}}{\mathcal{T}}= (\short{A}{\mathcal{S}}{\mathcal{T}}\sump nL)\difp nL. \] Finally, by Proposition \ref{ida y vuelta}, $ (\short{A}{\mathcal{S}}{\mathcal{T}}\sump nL)\difp nL=(\short{A}{\mathcal{S}}{\mathcal{T}}\difp -nL)\sump (-nL)=\short{A}{\mathcal{S}}{\mathcal{T}}\ , $ and the proof is complete. \end{proof} \begin{rem} Proposition \ref{Demetrios} was proved for positive operators by Anderson and Trapp in \cite{andtrapp} and by Pekarev and Smul'jian in \cite{PS}. It was also considered by Mitra and Puri who proved formula $\displaystyle \short{A}{\mathcal{S}}{\mathcal{T}}=\lim_{n\to\infty} A\sump (nB) $ of Proposition \ref {Demetrios} for rectangular matrices (see \cite{[MP]}). However, their proof can not be extended to infinite dimensional Hilbert spaces because it involves generalized inverses which, in our setting, only exist for closed range operators. Finally, the reader will find a generalization of Proposition \ref{shorted con suma y resta} for positive operators in \cite{PS}. \end{rem} \fontsize {9}{8}\selectfont \end{document}
\begin{document} \title{Online Sparse Linear Regression} \begin{abstract} We consider the online sparse linear regression problem, which is the problem of sequentially making predictions observing only a limited number of features in each round, to minimize regret with respect to the best sparse linear regressor, where prediction accuracy is measured by square loss. We give an {\em inefficient} algorithm that obtains regret bounded by $\tilde{O}(\sqrt{T})$ after $T$ prediction rounds. We complement this result by showing that no algorithm running in polynomial time per iteration can achieve regret bounded by $O(T^{1-\delta})$ for any constant $\delta > 0$ unless $\mathbb{N}P \subseteq \mathcal{B}PP$. This computational hardness result resolves an open problem presented in COLT 2014~\citep{open-problem} and also posed by \citet{andras}. This hardness result holds even if the algorithm is allowed to access more features than the best sparse linear regressor up to a logarithmic factor in the dimension. \end{abstract} \section{Introduction} In various real-world scenarios, features for examples are constructed by running some computationally expensive algorithms. With resource constraints, it is essential to be able to make predictions with only a limited number of features computed per example. One example of this scenario, from \citep{CBSS}, is medical diagnosis of a disease, in which each feature corresponds to a medical test that the patient in question can undergo. Evidently, it is undesirable to subject a patient to a battery of medical tests, for medical as well as cost reasons. Another example from the same paper is a search engine, where a ranking of web pages must be generated for each incoming user query and the limited amount of time allowed to answer a query imposes restrictions on the number of attributes that can be evaluated in the process. In both of these problems, predictions need to be made sequentially as patients or search queries arrive online, learning a good model in the process. In this paper, we model the problem of prediction with limited access to features in the most natural and basic manner as an online sparse linear regression problem. In this problem, an online learner makes real-valued predictions for the labels of examples arriving sequentially over a number of rounds. Each example has $d$ features that can be potentially accessed by the learner. However, in each round, the learner is restricted to choosing an arbitrary subset of features of size at most $k$, a budget parameter. The learner then acquires the values of the subset of features, and then makes its prediction, at which point the true label of the example is revealed to the learner. The learner suffers a loss for making an incorrect prediction (for simplicity, we use square loss in this paper). The goal of the learner is to make predictions with total loss comparable to the loss of the best sparse linear regressor with a bounded norm, where the term {\em sparse} refers to the fact that the linear regressor has nonzero weights on at most $k$ features. To measure the performance of the online learner, we use the standard notion of {\em regret}, which is the difference between the total loss of the online learner and the total loss of the best sparse linear regressor. While regret is the primary performance metric, we are also interested in {\em efficiency} of the online learner. Ideally, we desire an online learning algorithm that minimizes regret while making predictions {\em efficiently}, i.e., in polynomial time (as a function of $d$ and $T$). In this paper, we prove that this goal is impossible unless there is a randomized polynomial-time algorithm for deciding satisfiability of \textsf{3CNF}\xspace formulas, the canonical \mathbb{N}P-hard problem. This computational hardness result resolves open problems from \citep{open-problem} and \citep{andras}. In fact, the computational hardness persists even if the online learner is given the additional flexibility of choosing $k' = D \log(d) k$ features for any constant $D > 0$. In light of this result, in this paper we also give an {\em inefficient} algorithm for the problem which queries $k' \geq k + 2$ features in each round, that runs in $O({d \choose k} k')$ time per round, and that obtains regret bounded by $O(\tfrac{d^2}{(k' - k)^2}\sqrt{k \log(d)T})$. \section{Related Work and Known Results} A related setting is attribute-efficient learning \citep{CBSS,HK,KS}. This is a batch learning problem in which the examples are generated i.i.d., and the goal is to simply output a linear regressor using only a limited number of features per example with bounded excess risk compared to the optimal linear regressor, when given {\em full access} to the features at test time. While the aforementioned papers give efficient, near-optimal algorithms for this problem, these algorithms do not work in the online sparse regression setting in which we are interested because here we are required to make predictions only using a limited number of features. In \citep{open-problem}, a simple algorithm has been suggested, which is based on running a bandit algorithm in which the actions correspond to selecting one of ${d \choose k}$ subsets of coordinates of size $k$ at regular intervals, and within each interval, running an online regression algorithm (such as the Online Newton-Step algorithm of \citet{HKKA}) over the $k$ coordinates chosen by the bandit algorithm. This algorithm, with the right choice of interval lengths, has a regret bound of $O(k^2d^{k/3}T^{2/3}\log(T/d))$. The algorithm has exponential dependence on $k$ both in running time and the regret. Also, \citet{open-problem} sketches a different algorithm with performance guarantees similar to the algorithm presented in this paper; our work builds upon that sketch and gives tighter regret bounds. \citet{andras} consider a very closely related setting (called {\em online probing}) in which features and labels may be obtained by the learner at some cost (which may be different for different features), and this cost is factored into the loss of the learner. In the special case of their setting corresponding to the problem considered here, they given an algorithm, \textsc{LQDExp3}, which relies on discretizing all $k$-sparse weight vectors and running an exponential-weights experts algorithm on the resulting set with stochastic loss estimators, obtaining a $O(\sqrt{dT})$ regret bound. However the running time of their algorithm is prohibitive: $O((dT)^{O(k)})$ time per iteration. In the same paper, they pose the open problem of finding a computationally efficient no-regret algorithm for the problem. The hardness result in this paper resolves this open problem. On the computational hardness side, it is known that it is \mathbb{N}P-hard to compute the optimal sparse linear regressor \citep{FKT, natarajan}. The hardness result in this paper is in fact inspired by the work of \citet{FKT}, who proved that it is computationally hard to find even an approximately optimal sparse linear regressor for an ordinary least squares regression problem given a batch of labeled data. While these results imply that it is hard to {\em properly}\footnote{{\em Proper} learning means finding the optimal sparse linear regressor, whereas {\em improper} learning means finding an arbitrary predictor with performance comparable to that of the optimal sparse linear regressor.} solve the offline problem, in the online setting we allow {\em improper} learning, and hence these prior results don't yield hardness results for the online problem considered in this paper. \section{Notation and Setup} We use the notation $[d] = \{1, 2, \ldots, d\}$ to refer to the coordinates. All vectors in this paper are in $\mathbb{R}^d$, and all matrices in $\mathbb{R}^{d \times d}$. For a subset $S$ of $[d]$, and a vector $\mathbf{x}$, we use the notation $\mathbf{x}(S)$ to denote the projection of $\mathbf{x}$ on the coordinates indexed by $S$. We also use the notation $\mathbf{I}_S$ to denote the diagonal matrix which has ones in the coordinates indexed by $S$ and zeros elsewhere: this is the identity matrix on the subspace of $\mathbb{R}^d$ induced by the coordinates in $S$, as well as the projection matrix for this subspace. We use the notation $\|\cdot\|$ to denote the $\ell_2$ norm in $\mathbb{R}^d$ and $\|\cdot\|_0$ to denote the zero ``norm,'' i.e., the number of nonzero coordinates. We consider a prediction problem in which the examples are vectors in $\mathbb{R}^d$ with $\ell_2$ norm bounded by $1$, and labels are in the range $[-1, 1]$. We use square loss to measure the accuracy of a prediction: i.e., for a labeled example $(\mathbf{x}, y) \in \mathbb{R}^d \times [-1, 1]$, the loss of a prediction $\hat{y}$ is $(\hat{y} - y)^2$. The learner's task is to make predictions online as examples arrive one by one based on observing only $k$ out of $d$ features of the learner's choosing on any example (the learner is allowed to choose different subsets of features to observe in each round). The learner's goal is to minimize regret relative to the best $k$-sparse linear regressor whose $\ell_2$ norm is bounded by $1$. Formally, for $t = 1, 2, \ldots, T$, the learner: \begin{enumerate} \item selects a subset $S_t \subseteq [d]$ of size at most $k$, \item observes $\mathbf{x}_t(S_t)$, i.e., the values of the features of $\mathbf{x}_t$ restricted to the subset $S_t$, \item makes a prediction $\hat{y}_t \in [-1, 1]$, \item observes the true label $y_t$, and suffers loss $(\hat{y}_t - y_t)^2$. \end{enumerate} Define regret of the learner as \[\text{Regret}\ :=\ \sum_{t=1}^T (\hat{y}_t - y_t)^2 - \min_{\mathbf{w}:\ \|\mathbf{w}\|_0 \leq k,\ \|\mathbf{w}\| \leq 1} \sum_{t=1}^T (\mathbf{w} \cdot \mathbf{x}_t - y_t)^2. \] In case $\hat{y}_t$ is chosen using randomization, we consider expected regret instead. Given the \mathbb{N}P-hardness of computing the optimal $k$-sparse linear regressor \citep{FKT, natarajan}, we also consider a variant of the problem which gives the learner more flexibility than the comparator: the learner is allowed to choose $k' \geq k$ coordinates to query in each round. The definition of the regret remains the same. We call this the {\em $(k, k', d)$-online sparse regression problem}. We are interested in the following two goals\footnote{In this paper, we use the $\poly(\cdot)$ notation to denote a polynomially-bounded function of its arguments.}: \begin{enumerate} \item (No Regret) Make predictions $\hat{y}_t$ so that regret is bounded by $\poly(d) T^{1-\delta}$ for some $\delta > 0$. \item (Efficiency) Make these predictions efficiently, i.e., in $\poly(d, T)$ time per iteration. \end{enumerate} In this paper, we show it is possible to get an {\em inefficient} no-regret algorithm for the online sparse regression problem. Complementing this result, we also show that an {\em efficient} no-regret algorithm cannot exist, assuming the standard hardness assumption that $\mathbb{N}P \not\subseteq \mathcal{B}PP$. \section{Upper bound} \def\eta_\textsc{Hedge}{\eta_\textsc{Hedge}} \def\eta_\textsc{SGD}{\eta_\textsc{SGD}} In this section we give an {\em inefficient} algorithm for the $(k, k', d)$-online sparse regression problem which obtains an expected regret of $O(\frac{d^2}{(k'-k)^2}\sqrt{k \log(d)T})$. The algorithm needs $k'$ to be at least $k + 2$. It is inefficient because it maintains statistics for every subset of $[d]$ of size $k$, of which there are ${d \choose k}$. At a high level, the algorithm runs an experts algorithm (specifically, Hedge) treating all such subsets as experts. Each expert internally runs stochastic gradient descent only on the coordinates specified by the corresponding subset, ensuring low regret to any bounded norm parameter vector that is nonzero only on those coordinates. The Hedge algorithm ensures low regret to the best subset of coordinates, and thus the overall algorithm achieves low regret with respect to any $k$-sparse parameter vector. The necessity of using $k' \geq k + 2$ features in the algorithm is that the algorithm uses the additional $k' - k$ features to generate unbiased estimators for $\mathbf{x}_t\mathbf{x}_t^\top$ and $y_t \mathbf{x}_t$ in each round, which are needed to generate stochastic gradients for all the experts. These estimators have large variance unless $k'-k$ is large. The pseudocode is given in Algorithm~\ref{algorithm:osr}. In the algorithm, in round $t$, the algorithm generates a distribution $D_t$ over the subsets of $[d]$ of size $k$; for any such subset $S$, we use the notation $D_t(S)$ to denote the probability of choosing the set $S$ in this distribution. We also define the function $\Pi$ on $\mathbb{R}^d$ to be the projection onto the unit ball, i.e., for $\mathbf{w} \in \mathbb{R}^d$, $\Pi(\mathbf{w}) = \mathbf{w}$ if $\|\mathbf{w}\| \leq 1$, and $\Pi(\mathbf{w}) = \frac{1}{\|\mathbf{w}\|}\mathbf{w}$ otherwise. \begin{algorithm}[h] \caption{\textsc{Algorithm for Online Sparse Regression} \label{algorithm:osr}} \begin{algorithmic}[1] \STATE Define the parameters $p = \frac{k'-k}{d}$, $q = \frac{(k'-k)(k'-k-1)}{d(d-1)}$, $\eta_\textsc{Hedge} = q \sqrt{\frac{\ln(d)}{T}}$, and $\eta_\textsc{SGD} = q\sqrt{\frac{1}{T}}$. \STATE Let $D_1$ be the uniform distribution over all subsets of $[d]$ of size $k$. \STATE For every subset $S$ of $[d]$ of size $k$, let $\mathbf{w}_{S, 1} = \mathbf{z}ero$, the all-zeros vector in $\mathbb{R}^d$. \FOR{$t=1, 2, \ldots, T$} \STATE Sample a subset $\hat{S}_t$ of $[d]$ of size $k$ from $D_t$, and a subset $R_t$ of $[d]$ of size $k' - k$ drawn uniformly at random, independently of $\hat{S}_t$. \STATE Acquire $\mathbf{x}_t(S_t)$ for $S_t:=\hat{S}_t \cup R_t$. \STATE Make the prediction $\hat{y}_t = \mathbf{w}_{\hat{S}_t, t} \cdot \mathbf{x}_t$ and obtain the true label $y_t$. \STATE Compute the matrix $\mathbf{X}_t \in \mathbb{R}^{d \times d}$ and the vector $\mathbf{z}_t \in \mathbb{R}^d$ defined as follows: \[ \mathbf{X}_t(i,j) = \begin{cases} \frac{\mathbf{x}_t(i)^2}{p} & \text{ if } i = j \text{ and } i \in R_t \\ \frac{\mathbf{x}_t(i)\mathbf{x}_t(j)}{q} & \text{ if } i \neq j \text{ and } i, j \in R_t\\ 0 & \text{ otherwise,} \end{cases} \quad \text{ and } \quad \mathbf{z}_t(i) = \begin{cases} \frac{y_t\mathbf{x}_t(i)}{p} & \text{ if } i\in R_t\\ 0 & \text{ otherwise,} \end{cases}\] \STATE Update the distribution over the subsets: for all subsets $S$ of $[d]$ of size $k$, let \[ D_{t+1}(S)\ =\ D_t(S) \exp(-\eta_\textsc{Hedge}(\mathbf{w}_{S, t}^\top \mathbf{X}_t \mathbf{w}_{S, t} - 2\mathbf{z}_t^\top \mathbf{w}_{S, t} + y_t^2))/Z_t, \] where $Z_t$ is the normalization factor to make $D_{t+1}$ a distribution. \STATE For each subset $S$ of $[d]$ of size $k$, let \[ \mathbf{w}_{S, t+1}\ =\ \Pi(\mathbf{w}_{S, t} - 2\eta_\textsc{SGD} \mathbf{I}_S(\mathbf{X}_t \mathbf{w}_{S, t} - \mathbf{z}_t)).\] \ensuremath{\mathbb{E}}NDFOR \end{algorithmic} \end{algorithm} \begin{theorem} \label{thm:regret-bound} There is an algorithm for the online sparse regression problem with any given parameters $(k, k', d)$ such that $k' \geq k + 2$ running in $O({d \choose k} \cdot k')$ time per iteration with $O(\frac{d^2}{(k' - k)^2}\sqrt{k \log(d)T})$ expected regret. \end{theorem} \begin{proof} The algorithm is given in Algorithm~\ref{algorithm:osr}. Since the algorithm maintains a parameter vector in $\mathbb{R}^k$ for each subset of $[d]$ of size $k$, the running time is dominated by the time to sample from $D_t$ and update it, and the time to update the parameter vectors. The updates can be implemented in $O(k')$ time, so overall each round can be implemented in $O({d \choose k} \cdot k')$ time. We now analyze the regret of the algorithm. Let $\ensuremath{\mathbb{E}}_t[\cdot]$ denote the expectation conditioned on all the randomness prior to round $t$. Then, it is easy to check, using the fact that $k'-k\ge 2$, that the construction of $\mathbf{X}_t$ and $\mathbf{z}_t$ in Step 8 of the algorithm has the following property: \begin{equation} \label{eq:unbiased} \ensuremath{\mathbb{E}}_t[\mathbf{X}_t] = \mathbf{x}_t\mathbf{x}_t^\top \text{ and } \ensuremath{\mathbb{E}}_t[\mathbf{z}_t] = y_t\mathbf{x}_t. \end{equation} Next, notice that in Step 9, the algorithm runs the standard Hedge-algorithm update (see, for example, Section 2.1 in \citep{mwsurvey}) on ${d \choose k}$ experts, one for each subset of $[d]$ of size $k$, where, in round $t$, the cost of the expert corresponding to subset $S$ is defined to be\footnote{Recall that the costs in Hedge may be chosen adaptively.} \begin{equation} \label{eq:cost} \mathbf{w}_{S, t}^\top \mathbf{X}_t \mathbf{w}_{S, t} - 2\mathbf{z}_t^\top \mathbf{w}_{S, t} + y_t^2. \end{equation} It is easy to check, using the facts that $\|\mathbf{x}_t\| \leq 1$, $\|\mathbf{w}_{S, t}\| \leq 1$ and $p \geq q$, that the cost (\ref{eq:cost}) is bounded deterministically in absolute value by $O(\frac{1}{q}) = O(\frac{d^2}{(k' - k)^2})$. Let $\ensuremath{\mathbb{E}}_{D_t}[\cdot]$ denote the expectation over the random choice of $\hat{S}_t$ from the distribution $D_t$ conditioned on all other randomness up to and including round $t$. Since there are ${d \choose k}\le d^k$ experts in the Hedge algorithm here, the standard regret bound for Hedge~\citep[Theorem 2.3]{mwsurvey} with the specified value of $\eta_\textsc{Hedge}$ implies that for any subset $S$ of $[d]$ of size $k$, using $\ln {d \choose k}\le k \ln d$, we have \begin{equation} \label{eq:hedge-regret} \sum_{t=1}^T \ensuremath{\mathbb{E}}_{D_t}[\mathbf{w}_{\hat{S}_t, t} \mathbf{X}_t \mathbf{w}_{\hat{S}_t, t} - 2\mathbf{z}_t^\top \mathbf{w}_{\hat{S}_t, t} + y_t^2]\ \leq\ \sum_{t=1}^T (\mathbf{w}_{S, t}^\top \mathbf{X}_t \mathbf{w}_{S, t} - 2\mathbf{z}_t^\top \mathbf{w}_{S, t} + y_t^2) + O(\tfrac{d^2}{(k' - k)^2}\sqrt{k\ln(d)T}). \end{equation} Next, we note, using \eqref{eq:unbiased} and the fact that conditioned on the randomness prior to round $t$, $\mathbf{w}_{S, t}$ is completely determined, that (for any $S$) \begin{equation} \label{eq:hedge-unbiased} \ensuremath{\mathbb{E}}_t[\mathbf{w}_{S, t}^\top \mathbf{X}_t \mathbf{w}_{S, t} - 2\mathbf{z}_t^\top \mathbf{w}_{S, t} + y_t^2]\ =\ \mathbf{w}_{S, t}^\top \mathbf{x}_t\mathbf{x}_t^\top \mathbf{w}_{S, t} - 2y_t\mathbf{x}_t^\top \mathbf{w}_{S, t} + y_t^2\ =\ (\mathbf{w}_{S, t} \cdot \mathbf{x}_t - y_t)^2. \end{equation} Taking expectations on both sides of \eqref{eq:hedge-regret} over all the randomness in the algorithm, and using \eqref{eq:hedge-unbiased}, we get that for any subset $S$ of $[d]$ of size $k$, we have \begin{equation} \label{eq:experts-regret} \sum_{t=1}^T \ensuremath{\mathbb{E}}[(\mathbf{w}_{\hat{S}_t, t} \cdot \mathbf{x}_t - y_t)^2]\ \leq\ \sum_{t=1}^T \ensuremath{\mathbb{E}}[(\mathbf{w}_{S, t} \cdot \mathbf{x}_t - y_t)^2] + O(\tfrac{d^2}{(k' - k)^2}\sqrt{k\log(d)T}). \end{equation} The left-hand side of (\ref{eq:experts-regret}) equals $\sum_{t=1}^T \ensuremath{\mathbb{E}}[(\hat{y}_t - y_t)^2]$. We now analyze the right-hand side. For any given subset $S$ of $[d]$ of size $k$, we claim that in Step 10 of the algorithm, the parameter vector $\mathbf{w}_{S, t}$ is updated using stochastic gradient descent with the loss function $\ell_t(\mathbf{w}) := (\mathbf{x}_t^\top \mathbf{I}_S \mathbf{w} - y_t)^2$ over the set over $\{\mathbf{w}\ |\ ||\mathbf{w}||_2\le 1\}$, only on the coordinates in $S$, while the coordinates not in $S$ are fixed to $0$. To prove this claim, first, we note that the premultiplication by $\mathbf{I}_S$ in the update in Step 10 ensures that in the parameter vector $\mathbf{w}_{S, t+1}$ all coordinates that are not in $S$ are set to $0$, assuming that coordinates of $\mathbf{w}_{S,t}$ not in $S$ were 0. Next, at time $t$, consider the loss function $\ell_t(\mathbf{w}) = (\mathbf{x}_t^\top \mathbf{I}_S \mathbf{w} - y_t)^2$. The gradient of this loss function at $\mathbf{w}_{S, t}$ is \[\nabla \ell_t(\mathbf{w}_{S, t})\ =\ 2(\mathbf{x}_t^\top \mathbf{I}_S \mathbf{w}_{S, t} - y_t) \mathbf{I}_S \mathbf{x}_t\ =\ 2\mathbf{I}_S(\mathbf{x}_t\mathbf{x}_t^\top \mathbf{w}_{S, t} - y_t \mathbf{x}_t),\] where we use the fact that $\mathbf{I}_S \mathbf{w}_{S, t} = \mathbf{w}_{S, t}$ since $\mathbf{w}_{S, t}$ has zeros in coordinates not in $S$. Now, by \eqref{eq:unbiased}, we have \[ \ensuremath{\mathbb{E}}_t[2\mathbf{I}_S(\mathbf{X}_t \mathbf{w}_{S, t} - \mathbf{z}_t)]\ =\ 2\mathbf{I}_S(\mathbf{x}_t\mathbf{x}_t^\top \mathbf{w}_{S, t} - y_t \mathbf{x}_t),\] and thus, Step 10 of the algorithm is a stochastic gradient descent update as claimed. Furthermore, a calculation similar to the one for bounding the loss of the experts in the Hedge algorithm shows that the norm of the stochastic gradient is bounded deterministically by $O(\frac{1}{q})$, which is $O(\frac{d^2}{(k' - k)^2})$. Using a standard regret bound for stochastic gradient descent (see, for example, Lemma 3.1 in \citep{FKM}) with the specified value of $\eta_\textsc{SGD}$, we conclude that for any fixed vector $\mathbf{w}$ of $\ell_2$ norm at most $1$, we have, \[ \sum_{t=1}^T \ensuremath{\mathbb{E}}[(\mathbf{x}_t^\top \mathbf{I}_S \mathbf{w}_{S, t} - y_t)^2]\ \leq\ \sum_{t=1}^T (\mathbf{x}_t^\top \mathbf{I}_S \mathbf{w} - y_t)^2 + O(\tfrac{d^2}{(k' - k)^2}\sqrt{T}).\] Since $\mathbf{I}_S \mathbf{w}_{S, t} = \mathbf{w}_{S, t}$, the left hand side of the above inequality equals $\sum_{t=1}^T \ensuremath{\mathbb{E}}[(\mathbf{w}_{S, t} \cdot \mathbf{x}_t - y_t)^2]$. Finally, let $\mathbf{w}$ be an arbitrary $k$-sparse vector of $\ell_2$ norm at most $1$. Let $S = \{i\ |\ w_i \neq 0\}$. Note that $|S| \leq k$, and $\mathbf{I}_S(\mathbf{w})=\mathbf{w}$. Applying the above bound for this set $S$, we get \begin{equation} \label{eq:sgd-regret} \sum_{t=1}^T \ensuremath{\mathbb{E}}[(\mathbf{w}_{S, t} \cdot \mathbf{x}_t - y_t)^2]\ \leq\ \sum_{t=1}^T (\mathbf{w} \cdot \mathbf{x}_t - y_t)^2 + O(\tfrac{d^2}{(k' - k)^2}\sqrt{T}). \end{equation} Combining the inequality (\ref{eq:sgd-regret}) with inequality (\ref{eq:experts-regret}), we conclude that \[ \sum_{t=1}^T \ensuremath{\mathbb{E}}[(\hat{y}_t - y_t)^2]\ \leq\ \sum_{t=1}^T (\mathbf{w} \cdot \mathbf{x}_t - y_t)^2 + O(\tfrac{d^2}{(k' - k)^2}\sqrt{k \log(d)T}). \] This gives us the required regret bound. \end{proof} \section{Computational lower bound} In this section we show that there cannot exist an efficient no-regret algorithm for the online sparse regression problem unless $\mathbb{N}P \subseteq \mathcal{B}PP$. This hardness result follows from the hardness of approximating the \textsf{Set Cover}\xspace problem. We give a reduction showing that if there were an efficient no-regret algorithm $\textsf{Alg}_\textsf{OSR}$ for the online sparse regression problem, then we could distinguish, in randomized polynomial time, between two instances of the \textsf{Set Cover}\xspace problem: in one of which there is a small set cover, and in the other of which any set cover is large. This task is known to be \mathbb{N}P-hard for specific parameter values. Specifically, our reduction has the following properties: \begin{enumerate} \item If there is a small set cover, then in the induced online sparse regression problem there is a $k$-sparse parameter vector (of $\ell_2$ norm at most 1) giving $0$ loss, and thus the algorithm $\textsf{Alg}_\textsf{OSR}$ must have small total loss (equal to the regret) as well. \item If there is no small set cover, then the prediction made by $\textsf{Alg}_\textsf{OSR}$ in any round has at least a constant loss in expectation, which implies that its total (expected) loss must be large, in fact, linear in $T$. \end{enumerate} By measuring the total loss of the algorithm, we can distinguish between the the two instances of the \textsf{Set Cover}\xspace problem mentioned above with probability at least $3/4$, thus yielding a \mathcal{B}PP algorithm for an \mathbb{N}P-hard problem. The starting point for our reduction is the work of \citet{DinurSteurer} who give a polynomial-time reduction of deciding satisfiability of \textsf{3CNF}\xspace formulas to distinguishing instances of \textsf{Set Cover}\xspace with certain useful combinatorial properties. We denote the satisfiability problem of \textsf{3CNF}\xspace formulas by \textsf{SAT}\xspace. \begin{reduction} \label{reduction:dinur-steurer} For any given constant $D > 0$, there is a constant $c_D \in (0, 1)$ and a $poly(n^D)$-time algorithm that takes a $\textsf{3CNF}\xspace$ formula $\phi$ of size $n$ as input and constructs a \textsf{Set Cover}\xspace instance over a ground set of size $m = \poly(n^D)$ with $d = \poly(n)$ sets, with the following properties: \begin{enumerate} \item if $\phi \in \textsf{SAT}\xspace$, then there is a collection of $k = O(d^{c_D})$ sets, which covers each element {\em exactly once}, and \item if $\phi \notin \textsf{SAT}\xspace$, then no collection of $k' = \lfloor D\ln(d) k\rfloor $ sets covers all elements; i.e., at least one element is left uncovered. \end{enumerate} The \textsf{Set Cover}\xspace instance generated from $\phi$ can be encoded as a binary matrix $\mathbf{M}_\phi \in \{0, 1\}^{m \times d}$ with the rows corresponding to the elements of the ground set, and the columns correspond to the sets, such that each column is the indicator vector of the corresponding set. \end{reduction} Using this reduction, we now show how an efficient, no-regret algorithm for online sparse regression can be used to give a \mathcal{B}PP algorithm for \textsf{SAT}\xspace. \begin{algorithm}[t] \caption{\textsc{Algorithm $\textsf{Alg}_\textsf{SAT}$ for deciding satisfiability of \textsf{3CNF}\xspace formulas} \label{algorithm:sat-decider}} \begin{algorithmic}[1] \mathbb{R}EQUIRE A constant $D > 0$, and an algorithm $\textsf{Alg}_\textsf{OSR}$ for the $(k, k', d)$-online sparse regression problem with $k = O(d^{c_D})$, where $c_D$ is the constant from Reduction~\ref{reduction:dinur-steurer}, and $k' = \lfloor D \ln(d) k \rfloor$, that runs in $\poly(d, T)$ time per iteration with regret bounded by $p(d) \cdot T^{1-\delta}$ with probability at least $3/4$. \mathbb{R}EQUIRE A \textsf{3CNF}\xspace formula $\phi$. \STATE Compute the matrix $\mathbf{M}_\phi$ and the associated parameters $k, k', d, m$ from Reduction~\ref{reduction:dinur-steurer}. \STATE Run $\textsf{Alg}_\textsf{OSR}$ with the parameters $k, k', d$ for $T := \lceil \max\{(2p(d) mdk)^{1/\delta}, 256m^2d^2k^2\}\rceil$ iterations. \FOR{$t=1, 2, \ldots, T$} \STATE Sample a row of $\mathbf{M}_\phi$ uniformly at random; call it $\mathbf{\hat{x}}_t$. \STATE Sample $\sigma_t \in \{-1, 1\}$ uniformly at random independently of $\mathbf{\hat{x}}_t$. \STATE Set $\mathbf{x}_t = \frac{\sigma_t}{\sqrt{d}} \mathbf{\hat{x}}_t$ and $y_t = \frac{\sigma_t}{\sqrt{dk}}$. \STATE Obtain a set of coordinates $S_t$ of size at most $k'$ by running $\textsf{Alg}_\textsf{OSR}$, and provide it the coordinates $\mathbf{x}_t(S_t)$. \STATE Obtain the prediction $\hat{y}_t$ from $\textsf{Alg}_\textsf{OSR}$, and provide it the true label $y_t$. \ensuremath{\mathbb{E}}NDFOR \mathbf{I}F{$\sum_{t=1}^T (y_t - \hat{y}_t)^2 \leq \frac{T}{2mdk}$} \STATE Output ``satisfiable''. \ensuremath{\mathbb{E}}LSE \STATE Output ``unsatisfiable''. \ensuremath{\mathbb{E}}NDIF \end{algorithmic} \end{algorithm} \begin{theorem} \label{thm:hardness} Let $D > 0$ be any given constant. Suppose there is an algorithm, $\textsf{Alg}_\textsf{OSR}$, for the $(k, k', d)$-online sparse regression problem with $k = O(d^{c_D})$, where $c_D$ is the constant from Reduction~\ref{reduction:dinur-steurer}, and $k' = \lfloor D \ln(d) k\rfloor $, that runs in $\poly(d, T)$ time per iteration and has expected regret bounded by $\poly(d) T^{1-\delta}$ for some constant $\delta > 0$. Then $\mathbb{N}P \subseteq \mathcal{B}PP$. \end{theorem} \begin{proof} Since the expected regret of $\textsf{Alg}_\textsf{OSR}$ is bounded by $p(d) T^{1-\delta}$ (where $p(d)$ is a polynomial function of $d$), by Markov's inequality we conclude that with probability at least $3/4$, the regret of $\textsf{Alg}_\textsf{OSR}$ is bounded by $p(d) \cdot T^{1-\delta}$. Figure~\ref{algorithm:sat-decider} gives a randomized algorithm, $\textsf{Alg}_\textsf{SAT}$, for deciding satisfiability of a given \textsf{3CNF}\xspace formula $\phi$ using the algorithm $\textsf{Alg}_\textsf{OSR}$. Note that the feature vectors (i.e., the $\mathbf{x}_t$ vectors) generated by $\textsf{Alg}_\textsf{SAT}$ are bounded in $\ell_2$ norm by $1$, as required. It is clear that $\textsf{Alg}_\textsf{SAT}$ is a polynomial-time algorithm since $T$ is a polynomial function of $n$ (since $m, k, d, p(d)$ are polynomial functions of $n$), and $\textsf{Alg}_\textsf{OSR}$ runs in $\poly(d, T)$ time per iteration. We now claim that this algorithm correctly decides satisfiability of $\phi$ with probability at least $3/4$, and is hence a \mathcal{B}PP algorithm for $\textsf{SAT}\xspace$. To prove this, suppose $\phi \in \textsf{SAT}\xspace$. Then, there are $k$ sets in the \textsf{Set Cover}\xspace which cover all elements with each element being covered exactly once. Consider the $k$-sparse parameter vector $\mathbf{w}$ which has $\frac{1}{\sqrt{k}}$ in the positions corresponding to these $k$ sets and $0$ elsewhere. Note that $\|\mathbf{w}\| \leq 1$, as required. Note that since this collection of $k$ sets covers each element exactly once, we have $\mathbf{M}_\phi \mathbf{w} = \frac{1}{\sqrt{k}}\mathbf{1}$, where $\mathbf{1}$ is the all-1's vector. In particular, since $\mathbf{\hat{x}}_t$ is a row of $\mathbf{M}_\phi$, we have \[\mathbf{w}\cdot \mathbf{x}_t\ =\ \mathbf{w} \cdot \left(\frac{\sigma_t}{\sqrt{d}} \mathbf{\hat{x}}_t\right)\ =\ \frac{\sigma_t}{\sqrt{dk}}\ =\ y_t.\] Thus, $(\mathbf{w} \cdot \mathbf{x}_t - y_t)^2 = 0$ for all rounds $t$. Since algorithm $\textsf{Alg}_\textsf{OSR}$ has regret at most $p(d) \cdot T^{1-\delta}$ with probability at least $3/4$, its total loss $\sum_{t=1}^T (\hat{y}_t - y_t)^2$ is bounded by $p(d) \cdot T^{1-\delta} \leq \frac{T}{2mdk}$ (since $T \geq (2p(d) mdk)^{1/\delta}$) with probability at least $3/4$. Thus, in this case algorithm $\textsf{Alg}_\textsf{SAT}$ correctly outputs ``satisfiable'' with probability at least $3/4$. Next, suppose $\phi \notin \textsf{SAT}\xspace$. Fix any round $t$ and let $S_t$ be the set of coordinates of size at most $k'$ selected by algorithm $\textsf{Alg}_\textsf{OSR}$ to query. This set $S_t$ corresponds to $k'$ sets in the \textsf{Set Cover}\xspace instance. Since $\phi \notin \textsf{SAT}\xspace$, there is at least one element in the ground set that is not covered by any set among these $k'$ sets. This implies that there is at least one row of $\mathbf{M}_\phi$ that is $0$ in all the coordinates in $S_t$. Since $\mathbf{\hat{x}}_t$ is a uniformly random row of $\mathbf{M}_\phi$ chosen independently of $S_t$, we have \[ \Pr[\mathbf{x}_t(S_t) = \mathbf{z}ero]\ =\ \Pr[\mathbf{\hat{x}}_t(S_t) = \mathbf{z}ero]\ \geq\ \frac{1}{m}.\] Here, $\mathbf{z}ero$ denotes the all-zeros vector of size $k'$. Now, we claim that $\ensuremath{\mathbb{E}}[y_t \hat y_t\ |\ \mathbf{x}_t(S_t) = \mathbf{z}ero] = 0$. This is because the condition that $\mathbf{x}_t(S_t) = 0$ is equivalent to the condition that $\mathbf{\hat{x}}_t (S_t) = 0$. Since $y_t$ is chosen from $\{-\frac{1}{\sqrt{dk}}, \frac{1}{\sqrt{dk}}\}$ uniformly at random independently of $\mathbf{\hat{x}}_t$ and $\hat{y}_t$, the claim follows. The expected loss of the online algorithm in round $t$ can now be bounded as follows: \[\ensuremath{\mathbb{E}}[\left . (\hat{y}_t - y_t)^2\ \right |\ \mathbf{x}_t(S_t) = \mathbf{z}ero]\ =\ \ensuremath{\mathbb{E}}\left[\left . \hat{y}_t^2 + \frac{1}{dk} - 2y_t\hat y_t\ \right |\ \mathbf{x}_t(S_t) = \mathbf{z}ero\right]\] \[ =\ \ensuremath{\mathbb{E}}\left[\left . \hat y_t^2 + \frac{1}{dk}\ \right |\ \mathbf{x}_t(S_t) = \mathbf{z}ero\right]\ \geq\ \frac{1}{dk},\] and hence \[\ensuremath{\mathbb{E}}[(y_t - \hat{y}_t)^2]\ \geq\ \ensuremath{\mathbb{E}}[(y_t - \hat{y}_t)^2\ |\ \mathbf{x}_t(S_t) = \mathbf{z}ero]\cdot \Pr[\mathbf{x}_t(S_t) = \mathbf{z}ero]\ \geq\ \frac{1}{dk}\cdot \frac{1}{m}\ =\ \frac{1}{mdk}.\] Let $\ensuremath{\mathbb{E}}_t[\cdot]$ denote expectation of a random variable conditioned on all randomness prior to round $t$. Since the choices of $\mathbf{x}_t$ and $y_t$ are independent of previous choices in each round, the same argument also implies that $\ensuremath{\mathbb{E}}_t[(y_t - \hat{y}_t)^2]\ \geq\ \frac{1}{mdk}$. Applying Azuma's inequality (see Theorem 7.2.1 in \citep{AlonSpencer}) to the martingale difference sequence $\ensuremath{\mathbb{E}}_t[(y_t - \hat{y}_t)^2] - (y_t - \hat{y}_t)^2$ for $t = 1, 2, \ldots, T$, since each term is bounded in absolute value by $4$, we get \[ \Pr\left[\sum_{t=1}^T \ensuremath{\mathbb{E}}_t[(y_t - \hat{y}_t)^2] -(y_t - \hat{y}_t)^2 \geq 8\sqrt{T}\right]\ \leq\ \exp\left (-\tfrac{64T}{2\cdot 16T}\right )\ \leq\ \frac{1}{4}. \] Thus, with probability at least $3/4$, the total loss $\sum_{t=1}^T (\hat{y}_t - y_t)^2$ is greater than $\sum_{t=1}^T \ensuremath{\mathbb{E}}_t[(y_t - \hat{y}_t)^2] - 8\sqrt{T} \geq \frac{1}{mdk}T - 8\sqrt{T} \geq \frac{T}{2mdk}$ (since $T \geq 256m^2d^2k^2$). Thus in this case the algorithm correctly outputs ``unsatisfiable'' with probability at least $3/4$. \end{proof} \paragraph{Parameter settings for hard instances.} Theorem~\ref{thm:hardness} implies that for any given constant $D > 0$, there is a constant $c_D$ such that the parameter settings $k = O(d^{c_D})$, and $k' = \lfloor D \ln(d) k \rfloor$ yield hard instances for the online sparse regression problem. The reduction of \citet{DinurSteurer} can be ``tweaked''\footnote{This is accomplished by simply replacing the \textsf{Label Cover} instance they construct with polynomially many disjoint copies of the same instance.} so that the $c_D$ is arbitrarily close to $1$ for any constant $D$. We can now extend the hardness results to the parameter settings $k = O(d^\varepsilonilon)$ for any $\varepsilonilon \in (0, 1)$ and $k' = \lfloor D \ln(d) k \rfloor$ either by tweaking the reduction of \citet{DinurSteurer} so it yields $c_D = \varepsilonilon$ if $\varepsilonilon$ is close enough to $1$, or if $\varepsilonilon$ is small, by adding $O(d^{1/\varepsilonilon})$ all-zeros columns to the matrix $\mathbf{M}_\phi$. The two combinatorial properties of $\mathbf{M}_\phi$ in Reduction~\ref{reduction:dinur-steurer} are clearly still satisfied, and the proof of Theorem~\ref{thm:hardness} goes through. \section{Conclusions} In this paper, we prove that minimizing regret in the online sparse regression problem is computationally hard even if the learner is allowed access to many more features than the comparator, a sparse linear regressor. We complement this result by giving an inefficient no-regret algorithm. The main open question remaining from this work is what extra assumptions can one make on the examples arriving online to make the problem tractable. Note that the sequence of examples constructed in the lower bound proof is i.i.d., so clearly stronger assumptions than that are necessary to obtain any efficient algorithms. \end{document}
\begin{document} \begin{center} \vskip 1cm{\LARGE\bf Constant coefficient Laurent biorthogonal polynomials, Riordan arrays and moment sequences} \vskip 1cm \large Paul Barry\\ School of Science\\ Waterford Institute of Technology\\ Ireland\\ \href{mailto:[email protected]}{\tt [email protected]} \end{center} \vskip .2 in \begin{abstract} We study properties of constant coefficient Laurent biorthogonal polynomials using Riordan arrays. We give details of related orthogonal polynomials, and we explore relationships between the moments of these orthogonal polynomials, the moments of the defining Laurent biorthogonal polynomials, and the expansions of $T$-fractions. Closed form expressions are given for the polynomials and their moments. \end{abstract} \section{Introduction} Let $b_{n+1}$ and $c_n$ for $n \in \mathbb{N}$ be arbitrary nonzero constants. The monic \emph{Laurent biorthogonal polynomials} (LBPs) $P_n(x)$ \cite{PB, Kam, Zhedanov}, defined by the sequences $b_n$ and $c_n$, is the sequence of polynomials determined by the recurrence $$P_n(x)=(x-c_{n-1})P_{n-1}(x)-b_{n-1}xP_{n-2}(x), \quad \text{for}\quad n\ge 1,$$ with $P_0(x)=1, P_1(x)=x-c_0$. The LBP $P_n(x)$ is a monic polynomial in $x$ of exact degree $n$ of which the constant term does not vanish. This note will be concerned with the constant coefficient case, that is, we assume that $c_n=c$ and $b_n=b$. We let $P_n(x)=\sum_{k=0}^n a_{n,k}x^k$, and we call the lower triangular matrix $(a_{n,k})_{0 \le n,k \le \infty}$ the coefficient array of the family of LBPs $\{P_n(x)\}$. In the case that the defining sequences are constant ($b_n=b$ and $c_n=c$) we have the following result \cite{PB}. \begin{proposition} The coefficient array of the family of LBPs $\{P_n(x)\}$ defined by $$P_n(x)=(x-c)P_{n-1}(x)-bxP_{n-2}(x), \quad \text{for}\quad n\ge 1,$$ with $P_0(x)=1, P_1(x)=x-c$ is given by the Riordan array \cite{SGWW} $$\left(\frac{1}{1+ct}, \frac{x(1-bt)}{1+ct}\right).$$ \end{proposition} The Riordan array $\left(\frac{1}{1+ct}, \frac{x(1-bt)}{1+ct}\right)$ has been called a generalized Delannoy matrix \cite{Yang}. We recall that the Riordan array $(g(t), f(t))$, where $$g(t)=g_0 + g_1 t + g_2 t^2+ \ldots,$$ and $$f(t)=f_1 t + f_2 t^2+ f_3 t^2+ \ldots,$$ is the matrix with general $(n,k)$-th term given by $$a_{n,k}=[x^n]g(t)f(t)^k.$$ Here, $[x^n]$ is the functional which extracts the coefficient of $x^n$ \cite{MC}. From the above proposition, we have the following results. \begin{corollary} We have the following generating function for the family $\{P_n(x)\}$ in the case of constant coefficients. $$\sum_{n=0} P_n(x)t^n=\frac{1}{1+ct+xt(bt-1)}.$$ \end{corollary} \begin{proof} The result follows since the bivariate generating function of the Riordan array $\left(\frac{1}{1+ct}, \frac{y(1-bt)}{1+ct}\right)$ is given by $$\frac{\frac{1}{1+ct}}{1-x\frac{t(1-bt)}{1+ct}}=\frac{1}{1+ct+xt(bt-1)}.$$ \end{proof} \begin{corollary} We have $$P_n(x)=\sum_{k=0}^n \left(\sum_{j=0}^k \binom{k}{j}\binom{n-j}{n-k-j}(-b)^j(-c)^{n-k-j}\right)x^k.$$ \end{corollary} \begin{proof} We have $P_n(x)=\sum_{k=0}^n a_{n,k}x^k$, where $a_{n,k}$ is given by $$a_{n,k}=[t^n] \frac{1}{1+c t} \left(\frac{x(1-bt)}{1+ct}\right)^k.$$ Expanding this by the method of coefficients \cite{MC} leads to the desired expression. \end{proof} It is appropriate to call the inverse of the matrix $(a_{n,k})$ the \emph{moment matrix} of the LBPs $\{P_n(x)\}$, and to designate the elements $\mu_n$ of its first column as the \emph{moments}. We then have \begin{proposition} The moments $\mu_n$ of the LBPs $\{P_n(x)\}$ have generating function \begin{align*}\mu(x)&=\frac{c+2b-c^2t-c \sqrt{1-2(2b+c)t+c^2t^2}}{2b}\\ &=1+\frac{ct}{1-ct}C\left(\frac{bt}{(1-ct)^2}\right),\end{align*} where $$C(t)=\frac{1-\sqrt{1-4t}}{2t}$$ is the generating function of the Catalan numbers \seqnum{A000108}. \end{proposition} \begin{proof} In effect, the inverse of the Riordan array $\left(\frac{1}{1+ct}, \frac{t(1-bt)}{1+ct}\right)$ is given by $$\left(\frac{1}{1+ct}, \frac{t(1-bt)}{1+ct}\right)^{-1}=\left(1+\frac{ct}{1-ct}C\left(\frac{bt}{(1-ct)^2}\right), \frac{t}{1-ct}C\left(\frac{bt}{(1-ct)^2}\right)\right).$$ \end{proof} \begin{corollary} We have $$\mu_n= \sum_{k=0}^n \binom{2n-k-1}{2n-2k}C_{n-k} b^{n-k}c^k=0^n+c \sum_{k=0}^{n-1}\binom{n+k-1}{2k}C_k c^{n-k-1}b^k,$$ where $C_n=\frac{1}{n+1}\binom{2n}{n}$ is the $n$-th Catalan number. \end{corollary} The moments begin $$1, c, c(b + c), c(b + c)(2b + c), c(b + c)(5b^2 + 5bc + c^2),\ldots.$$ \begin{corollary} We have $$\mu(t)= \cfrac{1} {1-\cfrac{ct} {1-\cfrac{bt} {1-\cfrac{(b+c)t} {1-\cfrac{bt} {1-\cfrac{(b+c)t} {1-\cdots}}}}}}.$$ \end{corollary} \begin{proof} We solve for $u=u(t)$ where $$u=\frac{1}{1-\frac{bt}{1-(b+c)t}}.$$ Then we have $\mu(x)=\frac{1}{1-ctu}$. \end{proof} \begin{corollary} We have $$\mu(t)= \cfrac{1}{1-ct- \cfrac{bct^2}{1-(2b+c)t- \cfrac{b(b+c)t^2}{1-(2b+c)t- \cfrac{b(b+c)t^2}{1-(2b+c)t-\cdots}}}}.$$ \end{corollary} \begin{corollary} The Hankel transform $h_n=|\mu_{i+j}|_{0 \le i,j \le n}$ of the moments $\mu_n$ is given by $$h_n= (bc)^n (b(b+c))^{\binom{n}{2}}.$$ \end{corollary} \begin{proof} This follows from Heilermann's formula \cite{Kratt} since the coefficients of $t^2$ are $$bc, b(b+c), b(b+c), b(b+c),\ldots.$$ \end{proof} \begin{corollary} The moments $\mu_n$ of the LBPs $\{P_n(x)\}$ defined by $$P_n(x)=(x-c)P_{n-1}-bxP_{n-2}(x)$$ with $P_0(x)=1, P_1(x)=x-c$, are also the moments for the family of orthogonal polynomials $Q_n(x)$ whose coefficient array is given by the Riordan array $$\left(\frac{(1+bt)^2}{1+(2b+c)t+b(b+c)t^2}, \frac{t}{1+(2b+c)t+b(b+c)t^2}\right).$$ \end{corollary} \begin{proof} Calculation shows that the inverse of the above Riordan array is $$\left(\mu(x), \frac{1-(2b+c)t-\sqrt{1-2(2b+c)t+c^2t^2}}{2b(b+c)t}\right).$$ \end{proof} The orthogonal polynomials $Q_n(x)$ satisfy $$Q_n(x)=(x-(2b+c))Q_{n-1}(x)-b(b+c)Q_{n-2}(x),$$ with $$Q_0(x)=1, Q_1(x)=x-c, Q_2(x)=x^2-2x(b+c)+c(b+c).$$ For an exposition of the links between Riordan arrays and orthogonal polynomials, see \cite{Classical}. We finish this section by noting that we can use Lagrange inversion \cite{LI} to determine the elements of the inverse coefficient matrix $(a_{n,k})^{-1}=\left(\frac{1}{1+ct}, \frac{t(1-bt}{1+ct}\right)^{-1}$. We obtain the following result. \begin{proposition} The $(n,k)$-th element of the inverse of the coefficient matrix $(a_{n,k})$ is given by $$\frac{k}{n}\sum_{j=0}^n \binom{n}{j}\binom{2n-k-j-1}{n-k-j}c^jb^{n-k-j}+\frac{c(k+1)}{n}\sum_{j=0}^n \binom{n}{j}\binom{2n-k-j-2}{n-k-j-1}c^jb^{n-k-j-1}.$$ \end{proposition} Setting $k=0$ in this expression (which gives us the first column), we obtain that $\mu_0=1$ and $$\mu_n=\frac{c}{n}\sum_{j=0}^n \binom{n}{j}\binom{2n-j-2}{n-j-1}c^jb^{n-j-1}$$ for $n \ge 1$. \section{T-fractions and Toeplitz determinants} We begin this section by considering the $T$-fraction \cite{Jones} $$\tilde{\mu}(t)=\cfrac{1}{1-ct- \cfrac{bt}{1-ct- \cfrac{bt}{1-ct-\cdots}}}.$$ Solving the equation $$u = \frac{1}{1-ct-btu}$$ for $u=u(t)$, we see that $$\tilde{\mu}(t)=u(t)=\frac{1-ct-\sqrt{1-2(2b+c)t+c^2t^2}}{2bt}.$$ This expands to give the sequence $\tilde{\mu}_n$ that begins $$1, b + c, (b + c)(2b + c), (b + c)(5b^2 + 5bc + c^2),\ldots.$$ We have $$\mu(t)=1+t \tilde{\mu}(t).$$ \begin{proposition} The generating function $\tilde{\mu}(t)$ is the generating function of the moments of the family of orthogonal polynomials $\tilde{Q}_n(x)$ whose coefficient array is given by the Riordan array $$\left(\frac{1+bt}{1+(2b+c)t+b(b+c)t^2},\frac{t}{1+(2b+c)t+b(b+c)t^2}\right).$$ \end{proposition} \begin{proof} By the theory of Riordan arrays, the generating function of the first column of the inverse matrix is equal to $\tilde{\mu}(t)$. \end{proof} The orthogonal polynomials $\tilde{Q}_n(x)$ satisfy the recurrence $$\tilde{Q}_n(x)=(x-(2b+c))\tilde{Q}_{n-1}(x)-b(b+c)\tilde{Q}_{n-2}(x),$$ with $$\tilde{Q}_0(x)=1, \tilde{Q}_1(x)=x-(b+c).$$ \begin{proposition} We have $$\tilde{\mu}_n=\sum_{k=0}^n \binom{n+k}{2k}c^{n-k}b^k C_k.$$ \end{proposition}. \begin{proof} The matrix with general $(n,k)$-th term $\binom{n+k}{2k}c^{n-k}$ is the Riordan array $\left(\frac{1}{1-ct}, \frac{t}{(1-ct)^2}\right)$. Applying this Riordan array to the generating function $$\cfrac{1}{1- \cfrac{bt}{1- \cfrac{bt}{1-\cdots}}}$$ of the sequence $b^n C_n$, we obtain the generating function $$\tilde{\mu}(t)=\cfrac{1}{1-ct- \cfrac{bt}{1-ct- \cfrac{bt}{1-ct-\cdots}}}$$ of $\tilde{\mu}_n$. \end{proof} \begin{corollary} We have $$\mu_n=0^n+\sum_{k=0}^{n-1} \binom{n+k-1}{2k}c^{n-k-1}b^k C_k.$$ \end{corollary} The theory of LBPs is tied to that of $T$-fractions and Toeplitz determinants. In order to define the relevant Toeplitz matrices and determinants in our case, we must extend the moments $(\mu_n)_{n \ge 0}$ to a bi-infinite sequence $(\mu_n)_{-\infty \le n \le \infty}$. We do this as follows. We extend $\mu_n$ to $n < 0$ by setting $$\mu_n = \frac{\mu_{1-n}}{c^{1-2n}},\quad\text{for}\quad n<0.$$ We let $$t_n=|\mu_{-j+k}|_{j,k=0\cdots n}$$ and $$t'_n=|\mu_{1-j+k}|_{j,k=0 \cdots n}.$$ The theory of LBPs now gives us the following results. \begin{proposition} We have $$b=-\frac{t_{n-1}t'_{n+1}}{t_n t'_n}.$$ $$c=\frac{t_n t'_{n+1}}{t_{n+1} t'_n}.$$ The Toeplitz transform $t_n$ of the moments $\mu_n$ of the LBPs $\{P_n(x)\}$ is given by $$t_n = \left(-\frac{b}{c}\right)^{\binom{n+1}{2}}.$$ The polynomials $P_n(x)$ are given by $$P_n(x)=\frac{1}{t_n} \left| \begin{array}{ccccc} \mu_0 & \mu_1 & \cdots & \mu_{n-1} & \mu_n \\ \mu_{-1} & \mu_0 & \cdots & \mu_{n-2} & \mu_{n-1} \\ \vdots & \vdots & \ddots & \vdots & \vdots \\ \mu_{-n+1} & \mu_{-n+2} & \cdots & \mu_0 & \mu_1 \\ 1 & x & \cdots & x^{n-1} & x^n \\ \end{array} \right|.$$ \end{proposition} \section{Examples} \begin{example} In this example, we consider the case where $b=1$. By solving the equations $$u = \frac{1}{1+c t +t u}$$ and $$v=\frac{1}{1+\frac{(c+1)t}{1+tv}}$$ and comparing $u$ and $v$, we see that the two continued fractions $$u(t)=\cfrac{1}{1+ct+ \cfrac{t}{1+ct+ \cfrac{t}{1+ct+\ldots}}}$$ and $$v(t)=\cfrac{1}{1+ \cfrac{(c+1)t}{1+ \cfrac{t}{1+ \cfrac{(c+1)t}{1+ \cfrac{t}{1+\cdots}}}}}$$ are equal. Their common expansion $\tilde{\mu}_n$ begins $$1, c + 1, (c + 1)(c + 2), (c + 1)(c^2 + 5c + 5), (c + 1)(c^3 + 9c^2 + 21c + 14), \ldots.$$ As a sequence of polynomials in $c$, the corresponding coefficient array begins $$\left( \begin{array}{ccccccc} 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ 1 & 1 & 0 & 0 & 0 & 0 & 0 \\ 2 & 3 & 1 & 0 & 0 & 0 & 0 \\ 5 & 10 & 6 & 1 & 0 & 0 & 0 \\ 14 & 35 & 30 & 10 & 1 & 0 & 0 \\ 42 & 126 & 140 & 70 & 15 & 1 & 0 \\ 132 & 462 & 630 & 420 & 140 & 21 & 1 \\ \end{array} \right).$$ This is triangle \seqnum{A060693}, with general term $\binom{2n-k}{k}C_{n-k}$, which counts the number of Schroeder paths from $(0,0)$ to $(2n,0)$ with $k$ peaks. We have in this case $$\tilde{\mu}_n = \sum_{k=0}^n \binom{2n-k}{k}C_{n-k}c^k.$$ The above theory tells us that the sequence $\tilde{\mu}_n$ is the moment sequence for the orthogonal polynomials with coefficient array given by the Riordan array $$\left(\frac{1+t}{1+(c+2)t+(c+1)t^2}, \frac{t}{1+(c+2)t+(c+1)t^2}\right).$$ The corresponding LBP moment sequence $\mu_n$, which begins $$1, c, c(c + 1), c(c + 1)(c + 2), c(c + 1)(c^2 + 5c + 5), c(c + 1)(c^3 + 9c^2 + 21c + 14), \ldots,$$ is given by the first column of the inverse of the Riordan array $$\left(\frac{1}{1+ct}, \frac{t(1-t)}{1+ct}\right).$$ It is also given by the first column of the inverse of $$\left(\frac{(1+t)^2}{1+(c+2)t+(c+1)t^2}, \frac{t}{1+(c+2)t+(c+1)t^2}\right).$$ The link to Schroeder numbers is evident by taking $c=1$. In that case, we have $$\tilde{\mu}_n = \sum_{k=0}^n \binom{2n-k}{k}C_{n-k}=\sum_{k=0}^n \binom{n+k}{2k}C_k=S_n,$$ the $n$-th large Schroeder number \seqnum{A006318}. In this case the numbers $\tilde{\mu}_n$ begin $$1, 2, 6, 22, 90, 394, 1806, 8558, 41586,\ldots,$$ while the LBP sequence $\mu_n$ begins $$1,1, 2, 6, 22, 90, 394, 1806, 8558, 41586,\ldots.$$ By varying the parameter $c$, we find interpretations in terms of Schroeder paths where the level steps can have $c$ colors. \end{example} \begin{example} Given a Riordan array $A$, the matrix $P_A=A^{-1}\bar{A}$ is called its production matrix, where $\bar{A}$ denotes the matrix $A$ with its top row removed. For the LBP array $\left(\frac{1}{1+ct}, \frac{t(1-bt)}{1+ct}\right)$, its production matrix begins $$\left( \begin{array}{cccccc} c & 1 & 0 & 0 & 0 & 0 \\ b c & b+c & 1 & 0 & 0 & 0 \\ b^2 c & b (b+c) & b+c & 1 & 0 & 0 \\ b^3 c & b^2 (b+c) & b (b+c) & b+c & 1 & 0 \\ b^4 c & b^3 (b+c) & b^2 (b+c) & b (b+c) & b+c & 1 \\ b^5 c & b^4 (b+c) & b^3 (b+c) & b^2 (b+c) & b (b+c) & b+c \\ \end{array} \right).$$ This displays a structural property of Riordan arrays: after the first column, all columns have the same elements, apart from the descending zeros. We now take an example with non-constant $b_n$. Thus we let $b_n$ be the sequence that begins $$1,2,1,2,1,2,1,2,1,\ldots,$$ while we take $c_n=c=1$. Then the moment matrix corresponding to the LBPs given by $$P_n(x)=(x-1)P_{n-1}(x)- b_{n-1}xP_{n-2}(x),$$ with $P_0(x)=1, P_1(x)=x-1$, begins $$\left( \begin{array}{cccccccc} 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ 3 & 4 & 1 & 0 & 0 & 0 & 0 & 0 \\ 13 & 18 & 6 & 1 & 0 & 0 & 0 & 0 \\ 65 & 91 & 34 & 9 & 1 & 0 & 0 & 0 \\ 355 & 500 & 199 & 64 & 11 & 1 & 0 & 0 \\ 2061 & 2914 & 1206 & 430 & 90 & 14 & 1 & 0 \\ 12501 & 17721 & 7526 & 2856 & 670 & 135 & 16 & 1 \\ \end{array} \right).$$ This array has a production matrix which begins $$\left( \begin{array}{ccccccc} 1 & 1 & 0 & 0 & 0 & 0 & 0 \\ 2 & 3 & 1 & 0 & 0 & 0 & 0 \\ 2 & 3 & 2 & 1 & 0 & 0 & 0 \\ 4 & 6 & 4 & 3 & 1 & 0 & 0 \\ 4 & 6 & 4 & 3 & 2 & 1 & 0 \\ 8 & 12 & 8 & 6 & 4 & 3 & 1 \\ 8 & 12 & 8 & 6 & 4 & 3 & 2 \\ \end{array} \right).$$ This illustrates that the moment array is not a Riordan array; nevertheless, the production matrix does reflect the periodicity in the defining $b_n$ parameters. The sequence \seqnum{A155867}, which begins $$1,3,13,65,355,2061,\ldots$$ is given by $$\sum_{k=0}^n \binom{n+k}{2k} S_k.$$ \end{example} \begin{example} In this example, we consider the moments in the case where $b=c-1$. Thus we look at the first column of the matrix $\left(\frac{1}{1+ct}, \frac{t(1-(c-1)t)}{1+ct}\right)^{-1}$. The moments begin $$1, c, c(2c - 1), c(2c - 1)(3c - 2), c(2c - 1)(11c^2 - 15c + 5), c(2c - 1)(45c^3 - 93c^2 + 63c - 14),\ldots.$$ This sequence of polynomials in $c$ has a coefficient array that begins $$\left( \begin{array}{cccccc} 1 & 0 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 & 0 \\ 0 & -1 & 2 & 0 & 0 & 0 \\ 0 & 2 & -7 & 6 & 0 & 0 \\ 0 & -5 & 25 & -41 & 22 & 0 \\ 0 & 14 & -91 & 219 & -231 & 90 \\ \end{array} \right).$$ The second column is composed of the alternating sign Catalan numbers, while the diagonal is given by the augmented large Schroeder numbers. The general $(n,k)$-th element of this matrix is given by $$(-1)^{n-k}\sum_{j=0}^n \binom{n+j-1}{2j}\binom{j}{n-k}C_j,$$ so that the moments in this case are give by $$\mu_n=\sum_{k=0}^n (-1)^{n-k}\sum_{j=0}^n \binom{n+j-1}{2j}\binom{j}{n-k}C_j c^k.$$ The row sums of the matrix above are all equal to $1$. In this case, the Riordan array becomes $\left(\frac{1}{1+t}, \frac{t}{1+t}\right)^{-1}=\left(\frac{1}{1-t}, \frac{t}{1-t}\right)$, which is the binomial matrix with an all $1$s first column. The unsigned matrix $$\left( \begin{array}{cccccc} 1 & 0 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 & 0 \\ 0 & 1 & 2 & 0 & 0 & 0 \\ 0 & 2 & 7 & 6 & 0 & 0 \\ 0 & 5 & 25 & 41 & 22 & 0 \\ 0 & 14 & 91 & 219 & 231 & 90 \\ \end{array} \right)$$ corresponds to the case $b=c+1$. Its row sums begin $$1, 1, 3, 15, 93, 645,\ldots.$$ The sequence $0,1,3,15,93,\ldots$ is the expansion of the reversion of $\frac{x(1-2x)}{1+x}$, \seqnum{A103210}. \end{example} \begin{example} When $b=c$, we are dealing with the matrix $\left(\frac{1}{1+cx}, \frac{x(1-cx)}{1+cx}\right)$, which begins $$\left( \begin{array}{cccccc} 1 & 0 & 0 & 0 & 0 & 0 \\ -c & 1 & 0 & 0 & 0 & 0 \\ c^2 & -3 c & 1 & 0 & 0 & 0 \\ -c^3 & 5 c^2 & -5 c & 1 & 0 & 0 \\ c^4 & -7 c^3 & 13 c^2 & -7 c & 1 & 0 \\ -c^5 & 9 c^4 & -25 c^3 & 25 c^2 & -9 c & 1 \\ \end{array} \right).$$ The coefficients are those of the signed Delannoy triangle \seqnum{A008288}. In this case, the inverse or moment matrix begins $$\left( \begin{array}{cccccc} 1 & 0 & 0 & 0 & 0 & 0 \\ c & 1 & 0 & 0 & 0 & 0 \\ 2 c^2 & 3 c & 1 & 0 & 0 & 0 \\ 6 c^3 & 10 c^2 & 5 c & 1 & 0 & 0 \\ 22 c^4 & 38 c^3 & 22 c^2 & 7 c & 1 & 0 \\ 90 c^5 & 158 c^4 & 98 c^3 & 38 c^2 & 9 c & 1 \\ \end{array} \right).$$ In this case, the moments are the scaled large Schroeder numbers $\mu_n=c^n S_n$. \end{example} \section{Relations between the Riordan arrays} We let $$L=\left(\frac{1}{1+c t}, \frac{t(1-bt)}{1+ct}\right)$$ be the coefficient matrix of the LBPs $\{P_n(x)\}$. We recall that we have $$P_n(x)=(x-c)P_{n-1}(x)-bxP_{n-1}(x),$$ with $P_0(x)=1, P_1(x)=x-c$. The LBP moments $\mu_n$ are then the elements of the first column of $L^{-1}$. We let $$O=\left(\frac{(1+bt)^2}{1+(2b+c)t+b(b+c)t^2}, \frac{t}{1+(2b+c)t+b(b+c)t^2}\right).$$ This is the coefficient array of the family of orthogonal polynomials $Q_n(x)$ which satisfies the recurrence $$Q_n(x)=(x-(2b+c))Q_{n-1}(x)-b(b+c)Q_{n-2}(x),$$ with $Q_0(x)=1, Q_1(x)=x-c, Q_2(x)=x^2-2x(b+c)+c(b+c)$. The LBP moments $\mu_n$ are given by the elements of the first column of $O^{-1}$. We also let $$\tilde{O}=\left(\frac{1+bt}{1+(2b+c)t+b(b+c)t^2}, \frac{t}{1+(2b+c)t+b(b+c)t^2}\right).$$ This is the coefficient matrix of the family of orthogonal polynomials $\tilde{Q}_n(x)$ that satisfy the recurrence $$\tilde{Q}_n(x)=(x-(2b+c))\tilde{Q}_{n-1}(x)-b(b+c)\tilde{Q}_{n-2}(x),$$ with $\tilde{Q}_0(x)=1, \tilde{Q}_1(x)=x-(b+c)$. The moments $\tilde{\mu}_n$ are given by the elements of the first column of $\tilde{O}^{-1}$. Finally, we let $$B(b)=\left(\frac{1}{1-bt}, \frac{t}{1-bt}\right)$$ be the generalized binomial matrix with general $(n,k)$-th element $\binom{n}{k}b^{n-k}$. We have $B(b)^{-1}=B(-b)$. \begin{proposition} We have $$L=\left(1, \frac{t}{1-bt}\right)\cdot O.$$ \end{proposition} \begin{proof} We have $O=(g(t), f(t))$ where $g(t)=\frac{(1+t)^2}{1+(2b+c)t+b(b+c)t^2}$. The first element of the product above is then given by $$1.g\left(\frac{t}{1-bt}\right)=\frac{1}{1+ct}.$$ Similarly the second element is given by $$f\left(\frac{t}{1-bt}\right)=\frac{t(1-bt)}{1+ct}.$$ \end{proof} \begin{corollary} We have $$P(n,x)=\sum_{k=0}^n \binom{n-1}{n-k}b^{n-k} Q_k(x).$$ \end{corollary} \begin{proof} The general element of the Riordan array $\left(1, \frac{t}{1-bt}\right)$ is $\binom{n-1}{n-k}b^{n-k}$. \end{proof} This provides a Riordan array interpretation of the links between the constant coefficient Laurent biorthogonal polynomials and the orthogonal polynomials defined by the coefficient matrix $O$. \begin{proposition} We have $$L=B(b)\cdot \tilde{O}.$$ \end{proposition} \begin{corollary} We have $$P_n(x)=\sum_{k=0}^n \binom{n}{k}b^{n-k} \tilde{Q}_k(x).$$ \end{corollary} We can thus say that the LBP polynomials are the $b$-th binomial transform of the orthogonal polynomials $\tilde{Q}_n(x)$. The relationship between the polynomials $Q_n(x)$ and $\tilde{Q})_n(x)$ is governed by the following result. \begin{proposition} We have $$O=(1+bt,t)\cdot \tilde{O}.$$ \end{proposition} We can find a relationship between the LBP matrix $\left(\frac{1}{1+ct}, \frac{t(1-bt)}{1+ct}\right)$ and the simpler orthogonal polynomial coefficient array $\left(\frac{1}{1+(2b+c)t+b(b+c)t^2}, \frac{t}{1+(2b+c)t+b(b+c)t^2}\right)$. This is the array of the family of orthogonal polynomials $\hat{Q}_n(x)$ that satisfy $$\hat{Q}_n(x)=(x-(2b+c))\hat{Q}_{n-1}(x)-b(b+c)\hat{Q}_{n-2}(x)$$ with $\hat{Q}_0(x)=1, \hat{Q}_1(x)=x-(2b+c)$. \begin{proposition} We have $$L=\left(\frac{1}{(1-bt)^2}, \frac{t}{1-bt}\right)\cdot \left(\frac{1}{1+(2b+c)t+b(b+c)t^2}, \frac{t}{1+(2b+c)t+b(b+c)t^2}\right).$$ \end{proposition} We conclude from this that $$P_n(x)=\sum_{k=0}^n \binom{n+1}{k+1}b^{n-k}\hat{Q}_k(x).$$ \section{Conclusions} The structure of Laurent biorthogonal polynomials defined by constant coefficients are fully defined by the properties of the generalized Delannoy matrix given by the Riordan array $\left(\frac{1}{1+ct}, \frac{t(1-bt)}{1+ct}\right)$. By means of Riordan array analysis, we can show them to be the binomial transform of a related family of orthogonal polynomials. These constant coefficient Laurent biorthogonal polynomials can be defined by $T$-fractions which are related to colored Schroeder paths. In particular, the moments of these LBP polynomials count Schroeder paths with colored horizontal and down steps (where the colors are the same for each level). \hrule \noindent 2010 {\it Mathematics Subject Classification}: Primary 42C05; Secondary 11B83, 11C20, 15B05, 15B36, 33C45. \noindent \emph{Keywords:} Laurent biorthogonal polynomials, orthogonal polynomials, moments, Toeplitz determinant, Hankel determinant, Riordan array. \hrule \noindent (Concerned with sequences \seqnum{A000108}, \seqnum{A006318}, \seqnum{A060693}, \seqnum{A103210}, and \seqnum{A155867}.) \end{document}
\betaegin{document} \title{On slow minimal reals I} \alphauthor{Mohammad Golshani} \alphaddress{Mohammad Golshani, School of Mathematics, Institute for Research in Fundamental Sciences (IPM), P.O.\ Box: 19395--5746, Tehran, Iran.} \email{[email protected]} \thanks{The first author's research has been supported by a grant from IPM (No. 99030417). He also thanks Heike Mildenberger for her discussions on Shelah's creature forcing and the results of this paper.} \alphauthor{Saharon Shelah} \alphaddress{Einstein Institute of Mathematics, The Hebrew University of Jerusalem, Jerusalem, 91904, Israel, and Department of Mathematics, Rutgers University, New Brunswick, NJ 08854, USA.} \email{[email protected]} \thanks{ The second author's research has been partially supported by the European Research Council grant 338821. This is publication 1198 of second author.} \thanks{The authors thank the referee of the paper for his comments on early versions of the paper.} \subjclass[2020]{Primary 03E35} \deltaate{} \kappaeywords{Minimal real, creature forcing} \betaegin{abstract} Answering a question of Harrington, we show that there exists a proper forcing notion, which adds a minimal real $\eta \in \prod_{i<\omegaega} n^*_i$, which is eventually different from any old real in $\prod_{i<\omegaega} n^*_i$, where the sequence $\lambdaangle n^*_i \mid i<\omegaega \ranglegle$ grows slowly. \end{abstract} \maketitle \section{Introduction} The method of creature forcing was introduced by Shelah for solving problems related to cardinal invariants like the unbounded number or the splitting number, as well as questions of the existence of special kinds of $P$-points. We refer to \cite{r-shelah} for a more complete history about the development of the subject and its wide applications. Let us call a real $\betaold{r}: \omegaega \rightarrow \omegaega$ is slow, if for each $n > 0, \betaold{r}(n) \lambdaeq n^{g(n)}$ where $g: \omegaega \rightarrow \omegaega$ is non-decreasing with $\lambdaim_{n \rightarrow \infty}g(n)=\infty$ and $\lambdaim_{n \rightarrow \infty} \frac{g(n)}{\lambdaog_2(n)}=0$ In this paper, we use the method of tree creature forcing to answer a question of Leo Harrington\footnote{Harrington asked the question from the second author in personal communication.}. There are several examples of forcing notions like Silver-Prikry forcing \cite{Grigorieff}, Laver forcing \cite{groszek}, Jensen's minimal forcing \cite{jensen}, Rational perfect set forcing \cite{miller}, Sacks forcing \cite{sacks}, splitting forcing \cite{schilhan} and so on, which add a minimal real into the ground model. In these examples, the real is fast growing and can not be dominated by the ground model reals. On the other hand, given a sequence $\lambdaangle n^*_i \mid i<\omegaega \ranglegle$ of natural numbers which grows very fast, one can define a forcing notion which adds a real $\eta \in \prod_{i<\omegaega} n^*_i$, which is minimal and is eventually different from any old real in $\prod_{i<\omegaega} n^*_i$; see for example \cite{carl} and \cite{judah-shelah}. Motivated by these results, Harrington asked the second author if there is a proper forcing notion which adds a minimal real as above into a sequence $\lambdaangle n^*_i \mid i<\omegaega \ranglegle$ which grows slowly. In this paper we give abstract conditions, called local minimality condition and global minimality condition, such that if a tree creature forcing notion satisfies them, then it adds a slow minimal real. In a further paper we prove similar results for the measured creature forcing. The paper is organized as follows. In Section \ref{Creatures and tree creatures}, we introduce some preliminary results on creature forcing. Given a sequence $\lambdaangle n^*_i \mid i<\omegaega \ranglegle$ of natural numbers which grows slowly\footnote{See Section \ref{example for minimality} for some examples of such sequences.}, we define, in Section \ref{Minimality with creatures}, a class of creature forcing notions, where each of them adds a real $\eta \in \prod_{i<\omegaega} n^*_i$, which is minimal and is eventually different from any old real in $\prod_{i<\omegaega} n^*_i$. A very special case of the results of this section is proved in \cite{ci-shelah}. In Sections \ref{Sufficient conditions for global minimality condition} and \ref{example for minimality}, we use probabilistic arguments, to show that the class of such forcing notions is non-empty. We assume no familiarity with creature forcing, and present all required preliminaries, to make the paper as self contained as possible. \section{Creatures and tree creatures} \lambdaabel{Creatures and tree creatures} In this section, we briefly review some concepts from Shelah's creature forcing, that will be used in the rest of the paper. Our presentation follows \cite{mildenberger} and \cite{r-shelah}. \betaegin{definition} \betaegin{enumerate} \item [$(a)$] A quasi-tree $(T, \lambdahd)$ over $X$ is a set $T \subseteq X^{<\omegaega}$ with the initial segment relation, such that $T$ has a $\lambdahd$-minimal element, called the root of $T$, $\hbox{rt}(T).$ It is called a tree, if it is closed under initial segments of length $\gammaeq \lambdaen(\hbox{rt}(T))$. \item [$(b)$] For $\eta \in T, \Suc_T(\eta)$ is the set of all immediate successors of $\eta$ in $T$: \[ \Suc_T(\eta)=\{ \nu \in T: \eta \lambdahd \nu \text{~and~} \neg \exists \rho \in T (\eta \lambdahd \rho \lambdahd \nu) \}. \] \item [$(c)$] $\max(T)$ is the set of all maximal nodes (if there are any) in $T$: \[ \max(T)=\{\eta \in T: \neg \exists \rho \in T, \eta \lambdahd \rho \}. \] \item [$(d)$] $\alphalepht{T}=T \setminus \max(T)$. \item [$(e)$] For $\eta \in T, T^{[\eta]}=\{\nu \in T: \nu$ and $\eta$ are comparable $ \}$. \item [$(f)$] $\hbox{Lim }(T)$ is the set of all cofinal branches through $T$: \[ \hbox{Lim }(T)=\{\eta \in X^\omegaega: \exists^{\infty}n,~ \eta \restriction n \in T \}. \] \item [$(g)$] $T$ is well-founded if it has no cofinal branches. \item [$(h)$] a subset $J \subseteq T$ is a front of $T$, if $J$ consists of $\lambdahd$-incomparable elements and for any branch $\eta$ of $T$, $\eta \restriction n \in J,$ for some $n.$ \end{enumerate} \end{definition} Let $\chi$ be a sufficiently large enough regular cardinal. Let also $\mathbf{H}: \omegaega \to V \setminus \{\emptyset\}$ be a function such that for each $i<\omegaega,$ $|\mathbf{H}(i)| \gammaeq 2$. We call $\mathbf{H}(i)$ the reservoir at $i$. The forcing notions we define aim to add a function $g \in \prod_{i<\omegaega} \mathbf{H}(i).$ \betaegin{definition} \betaegin{enumerate} \item [$(a)$] A (weak) creature for $\mathbf{H}$ is a tuple $t=(\mathbf{nor}[t], \mathbf{val}[t], \mathbf{dis}[t])$, where \betaegin{itemize} \item [(1)] $\mathbf{nor}[t] \in \mathbb{R}^{\gammaeq 0}$. \item [(2)] $\mathbf{val}[t] \subseteq \betaigcup_{m_0 < m_1 < \omegaega} \{ (w, u) \in \prod_{i< m_0}\mathbf{H}(i) \times \prod_{i<m_1}\mathbf{H}(i): w \lambdahd u \}$. \item [(3)] $\mathbf{dis}[t] \in H(\chi).$ \end{itemize} The family of all creatures for $\mathbf{H}$ is denoted by $\text{CR}[\mathbf{H}]$. \item [$(b)$] If $t \in \text{CR}[\mathbf{H}],$ then $\text{basis}(t)= \deltaom(\mathbf{val}[t])$ and $\text{pos}(t)=\ranglege(\mathbf{val}[t])$. \item [$(c)$] A creature $t$ is a tree-creature, if $\text{basis}(t)=\{\eta\}$ is a singleton and no distinct elements of $\text{pos}(t)$ are $\lambdahd$-comparable. The family of all tree-creatures for $\mathbf{H}$ is denoted by $\text{TCR}[\mathbf{H}]$. We also set \[ \text{TCR}_{\eta}[\mathbf{H}] =\{ t \in \text{TCR}[\mathbf{H}]: \text{basis}(t)=\{\eta\} \}. \] \end{enumerate} \end{definition} \betaegin{definition} \betaegin{enumerate} \item [$(a)$] Let $K \subseteq \text{CR}[\mathbf{H}]$. A function $\Sigma: [K]^{\lambdaeq \omegaega} \to \mathcal{P}(K)$ is called a sub-composition operation on $K$, if \betaegin{itemize} \item [(1)] $\Sigma(\emptyset)=\emptyset$, and for each $s \in K, s \in \Sigma(s).$ \item [(2)] (transitivity) If $\mathcal{S} \in [K]^{\lambdaeq \omegaega}$ and for each $s \in \mathcal{S}, \mathcal{S}_s \in [K]^{\lambdaeq \omegaega}$ is such such that $s \in \Sigma(\mathcal{S}_s)$, then $\Sigma(\mathcal{S}) \subseteq \Sigma(\betaigcup_{s \in \mathcal{S}}\mathcal{S}_s)$. \end{itemize} \item [$(b)$] A pair $(K, \Sigma)$ is called a creating pair for $\mathbf{H}$ if $K \subseteq \text{CR}[\mathbf{H}]$ and $\Sigma$ is a sub-composition operation on $K$ \item [$(c)$] A sub-composition operation $\Sigma$ on $K \subseteq \text{TCR}[\mathbf{H}]$ is called a tree-composition operation on $K$. Such a pair $(K, \Sigma)$ is called a tree-creating pair for $\mathbf{H},$ if in addition: \betaegin{itemize} \item [(1)] If $\mathcal{S} \in [K]^{\lambdaeq \omegaega}$ and $\Sigma(\mathcal{S}) \neq \emptyset,$ then there exists a well-founded quasi tree $T \subseteq \betaigcup_{n<\omegaega} \prod_{i<n}\mathbf{H}(i)$ and a sequence $\lambdaangle s_\nu: \nu \in \alphalepht{T} \ranglegle \subseteq K$ such that $\mathcal{S}=\{ s_\nu: \nu \in \alphalepht{T} \}$, and for each $\nu \in \alphalepht{T}, s_\nu \in \text{TCR}_{\nu}[\mathbf{H}]$ and $\text{pos}(s_\nu)=\Suc_T(\nu)$. \item [(2)] If $t \in \Sigma(s_\nu: \nu \in \alphalepht{T}),$ then $t \in \text{TCR}_{\hbox{rt}(T)}[\mathbf{H}]$ and $\text{pos}(t) \subseteq \max(T)$ (where $T$ is as in (1)). \end{itemize} \end{enumerate} \end{definition} To each tree-creating pair $(K, \Sigma)$, we assign the forcing notion ${\mathbb{Q}}^{\text{tree}}(K, \Sigma)$ as follows: \betaegin{definition} Assume $(K, \Sigma)$ is a tree-creating pair for $\mathbf{H}$. Then ${\mathbb{Q}}^{\text{tree}}(K, \Sigma)$ consists of all sequences $p= \lambdaangle \mathfrak{c}^p_\eta: \eta \in T^p \ranglegle$, where \betaegin{enumerate} \item $T^p \subseteq \betaigcup_{n<\omegaega} \prod_{i<n}\mathbf{H}(i)$ is a tree with no maximal nodes, i.e., $\max(T^p)=\emptyset.$ \item $\mathfrak{c}^p_\eta \in \text{TCR}_{\eta}[\mathbf{H}] \cap K$ and $\text{pos}(\mathfrak{c}^p_\eta)=\Suc_{T^p}(\eta)$. \end{enumerate} Given $p, q \in {\mathbb{Q}}^{\text{tree}}(K, \Sigma)$, we set $p \lambdaeq q$ ($p$ is an extension of $q$), if $T^p \subseteq T^q$ and for each $\eta \in T^p,$ there exists a well-founded quasi-tree $T \subseteq (T^p)^{[\eta]}$ such that $\mathfrak{c}^p_\eta \in \Sigma(\mathfrak{c}^q_\nu: \nu \in \alphalepht{T}).$ \end{definition} The forcing notion ${\mathbb{Q}}^{\text{tree}}(K, \Sigma)$ is maximal among all forcing notions that we assign to $(K, \Sigma)$, in the sense that all other forcing notions that we define, are subsets of ${\mathbb{Q}}^{\text{tree}}(K, \Sigma)$. \section{Minimality with creatures} \lambdaabel{Minimality with creatures} In this section, we define a class of creature forcing notions which add a minimal real with slow splitting. \betaegin{definition} Let $(K, \Sigma)$ be a tree-creating pair for $\mathbf{H}$. The forcing notion ${\mathbb{Q}}^*(K, \Sigma)$ consists of all conditions $p= \lambdaangle \mathfrak{c}^p_\eta: \eta \in T^p \ranglegle \in {\mathbb{Q}}^{\text{tree}}(K, \Sigma)$ such that \betaegin{enumerate} \item $T^p \subseteq \omegaega^{<\omegaega}$ is a finite branching tree with $\max(T^p)=\emptyset.$ \item Each $\mathfrak{c}^p_\eta$ is finitary, i.e., $|\mathbf{val}[\mathfrak{c}^p_\eta]|$ is finite. \item $\eta \in \hbox{Lim }(T^p) \implies \lambdaim\inf (\mathbf{nor}[\mathfrak{c}^p_{\eta \restriction n}]: n \gammaeq \lambdaen(\hbox{rt}(T^p)))=\infty.$ \item Each $\mathfrak{c}^p_\eta$ is $2$-big, i.e., if $\mathbf{nor}[\mathfrak{c}^p_\eta] \gammaeq 1$ and $\text{pos}(\mathfrak{c}^p_\eta)= u_0 \cup u_1$ is a partition of $\text{pos}(\mathfrak{c}^p_\eta)$, then for some $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta), \mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta] - 1$ and for some $l<2, \text{pos}(\mathfrak{d}) \subseteq u_l.$ \end{enumerate} Given $p, q \in {\mathbb{Q}}^*(K, \Sigma),$ $p \lambdaeq q$ iff \betaegin{enumerate} \item $T^p \subseteq T^q.$ \item For each $\eta \in T^p, \mathfrak{c}^p_\eta \in \Sigma(\mathfrak{c}^q_\eta)$. \end{enumerate} \end{definition} A proof of the next lemma can be found in \cite{mildenberger} and \cite{r-shelah}, and is essentially based on the fact that all creatures involved in the forcing are $2$-big. \betaegin{lemma} \lambdaabel{lem:basic properties} \betaegin{enumerate} \item [(a)] $({\mathbb{Q}}^*(K, \Sigma), \lambdaeq)$ is proper. \item [(b)] (continuous reading of names) For any condition $p \in {\mathbb{Q}}^*(K, \Sigma),$ any $k<\omegaega$ and any ${\mathbb{Q}}^*(K, \Sigma)$-name $\lambdausim{f}: \omegaega \to \check{V}$, there exists $q$ such that \betaegin{itemize} \item [$(\alphalpha)$] $q \lambdaeq_k p,$ i.e., $q \lambdaeq p$ and if $\eta \in T^q$ and $\lambdaen(\eta) \lambdaeq k \vee \mathbf{nor}[\mathfrak{c}^q_\eta] \lambdaeq k$ then $\mathfrak{c}^q_\eta=\mathfrak{c}^p_\eta$, so $\Suc_{T^q}(\eta)=\Suc_{T^p}(\eta)$. \item [$(\betaeta)$] For every $n$, there exists a front $J_n$ of $T^q$ such that \[ \eta \in J_n \implies \exists a \in V, q^{[\eta]} \Vdash \text{``}\lambdausim{f}(n)= a\text{''}. \] \end{itemize} \end{enumerate} \end{lemma} We now state and prove the main result of this section, which gives sufficient conditions for the forcing notion ${\mathbb{Q}}^*(K, \Sigma)$ to add a minimal real. \betaegin{theorem} \lambdaabel{minimal real} Assume ${\mathbb{Q}}^*(K, \Sigma)$ satisfies the following conditions: \betaegin{enumerate} \item [(a)] (the local minimality condition) If $\mathbf{nor}[\mathfrak{c}^p_\eta] \gammaeq 1$ and $E$ is an equivalence relation on $\text{pos}(\mathfrak{c}^p_\eta)$, then for some $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ we have \betaegin{itemize} \item [$(\alphalpha)$] $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1.$ \item [$(\betaeta)$] $E \restriction \text{pos}(\mathfrak{d})$ is trivial: either every equivalence class is a singleton or it has one equivalence class. \end{itemize} \item [(b)] (the global minimality condition) For every $p \in {\mathbb{Q}}^*(K, \Sigma)$ and $k<\omegaega$ there are $q \in {\mathbb{Q}}^*(K, \Sigma)$ and $m > k, \lambdaen(\hbox{rt}(T^p))$ such that \betaegin{itemize} \item [$(\alphalpha)$] $q \lambdaeq_k p.$ \item [$(\betaeta)$] If $J$ is a front of $T^q$ consisting of sequences of length $> m,$ $S$ is a finite set and $\lambdaangle f_\rho: \rho\in J \ranglegle$ is a sequence of one-to-one functions from $\Suc_{T^q}(\rho)$ into $S$, then there exists a partition $S=S_0 \cup S_1$ of $S$ such that for every $\eta \in T^q \cap \omegaega^m,$ there are $r_0, r_1$ such that each $r_l \lambdaeq_{k}q^{[\eta]}$ and \[ \eta \lambdahd \rho \in J \cap T^{r_l} \implies f_\rho``[\Suc_{T^{r_l}}(\rho)] \subseteq S_l. \] \end{itemize} \end{enumerate} Then $\Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{\eta}=\betaigcup \{\hbox{rt}(T^p): p \in \deltaot{G}_{{\mathbb{Q}}^*(K, \Sigma)} \}$ is a minimal real''. \end{theorem} \betaegin{proof} Assume $p \in {\mathbb{Q}}^*(K, \Sigma)$ and $p \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{f}$ is a new real''. Without loss of henrality, we may assume that $p \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{f} \in$$^{\omegaega}2$. We show that there is $q \lambdaeq p,$ such that $q \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{\eta} \in V[\lambdausim{f}]$''. By Lemma \ref{lem:basic properties}(a) and \cite{r-shelah} and extending $p$ if necessary, we can assume that there exists a sequence $\lambdaangle (J_n, H_n): n < \omegaega \ranglegle$ such that: \betaegin{enumerate} \item $J_n$ is a front of $T^p$. \item $J_{n+1}$ is above $J_n$, i.e., $\forall \nu \in J_{n+1} \exists l<\lambdaen(\nu),~ \nu \restriction l \in J_n.$ \item $H_n: J_n \to \mathbf{H}(n)$ is such that for each $\nu \in J_n, p^{[\nu]} \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{f}(n)=H_n(\nu)$''. \end{enumerate} \betaegin{claim} \lambdaabel{3.5A} For every $k<\omegaega$ and $p' \lambdaeq p$ there are $q, \betaar{J}, \betaar{f}$ and $\betaar{n}$ such that: \betaegin{enumerate} \item[(a)] $q \lambdaeq_k p'$, \item[(b)] $\betaar{J}=\lambdaangle J_l: l<\omegaega \ranglegle$, where each $J_l$ is a front of $T^q$, \item[(c)] $J_{l+1}$ is above $J_l$, \item[(d)] $\betaar{n}=\lambdaangle n(l): l<\omegaega \ranglegle$, \item[(e)] $\betaar{f}=\lambdaangle f_\nu: \nu \in \betaigcup_{l<\omegaega} J_l \ranglegle$, \item[(f)] If $\nu \in J_l,$ then $f_\nu: \Suc_{T^q}(\nu) \to$$^{n(l)}2$ is such that: \betaegin{itemize} \item $f_\nu$ is one-to-one, \item if $\rho \in \Suc_{T^q}(\nu), \nu \in J_l$, then \[ q^{[\rho]} \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}\text{``}\lambdausim{f}\restriction n(l) = f_\nu(\rho)\text{''}. \] \end{itemize} \end{enumerate} \end{claim} \betaegin{proof} To start, let $q' \lambdaeq_k p'$ and $m > k, \lambdaen(\hbox{rt}(T^{p'}))$ witness (b)$(\betaeta)$. Let $n_0$ be such that \betaegin{itemize} \item $\nu \in T^{q'} \wedge \lambdaen(\nu) \gammaeq n_0 \implies \mathbf{nor}[\mathfrak{c}^{q'}_\nu] > k +|T^{q'} \cap \omegaega^{m}|.$ \item $n_0 > m.$ \item $J_{n_0}$ is above $T^{q'} \cap \omegaega^{m}$. \end{itemize} For every $n> n_0,$ we define a function $H^+_n$ with domain a subset of $\Lambda_n$ and values in $^{n}2$, a sequence $ \lambdaangle \mathfrak{d}^n_\nu: \nu \in \Lambda_n \ranglegle$, by downward induction on $\lambdaen(\nu)$ as follows: \underline{Case 1. $\nu \in J_n:$} Let $\rho=H^+_n(\nu) \in$$^{n}2$ be such that $$l<n \wedge \eta \in J_l \wedge \eta \unlhd \nu \implies \rho(l)=H_l(\eta)$$ and set $\mathfrak{d}^n_\nu=\mathfrak{c}^{q'}_\nu$. \underline{Case 2. $\Suc_{T^{q'}}(\nu) \subseteq \Lambda_n$ and $\lambdaangle H^+_n(\rho), \mathfrak{d}^n_\rho: \rho \in \Suc_{T^{q'}}(\nu) \ranglegle$ is defined:} Define an equivalence relation $E_\nu$ on $\Suc_{T^{q'}}(\nu)$ by \[ (\rho_1, \rho_2) \in E_\rho \iff H^+_n(\rho_1) = H^+_n(\rho_2). \] By the local minimality condition (a), there exists $\mathfrak{d}^n_\nu \in$ $\Sigma(c^{q'}_\nu)$ such that $\mathbf{nor}[\mathfrak{d}_\nu] \gammaeq \mathbf{nor}[c^{q'}_\nu]-1$ and $H^+_n \restriction \text{pos}(\mathfrak{d}_\nu)$ is constant or one-to-one. If $H^+_n \restriction \text{pos}(\mathfrak{d}_\nu)$ is constant, then choose such a $\mathfrak{d}^n_\nu$ and let $H^+_n(\nu)$ be that constant value. Otherwise let $H^+_n(\nu)$ be undefined. So $\deltaom(H^+_n)$ is an upward closed subset of $\Lambda_n$ which includes $J_n$. Next we show that there exists $n<\omegaega$ such that there is no $\eta \in T^{q'} \cap \omegaega^{n_0}$ with $\eta \in \deltaom(H^+_n).$ Suppose otherwise. Then as $T^{q'} \cap \omegaega^{n_0}$ is finite, there is $\eta \in T^{q'} \cap \omegaega^{n_0}$ such that $\exists^{\infty} n > n_0,~ \eta \in \deltaom(H^+_n).$ Let $D$ be an ultrafilter on $\omegaega$ such that $\{n: \eta \in \deltaom(H^+_n) \} \in D.$ Let \betaegin{center} $T=\{\rho \in T^{q'}: \eta \lambdahd \rho$ and $\forall l (\lambdaen(\eta) \lambdaeq l < \lambdaen(\rho) \implies \{ n: \rho \restriction (l+1) \in \text{pos}(\mathfrak{d}^n_{\rho \restriction l}) \} \in D) \}.$ \end{center} Let $q'' \lambdaeq q'$ be such that $T^{q''}=T$ and for every $\nu \in T,$ $\mathfrak{c}^{q''}_\nu = \mathfrak{d}^n_\nu$. Then $q'' \Vdash$``$\lambdausim{f} \in V$''. To see this, let $\rho \in \hbox{Lim }(T)$. Then, \betaegin{center} $q'' \Vdash$``$\lambdausim{f} \restriction (n_0, \omegaega) = \{(n, H^+_n(\rho \restriction n)): n > n_0 \}$'', \end{center} from which the result follows. We get a contradiction by the fact that $f$ is a new real. So we can find $n_1> m$ such that $\deltaom(H^+_{n_1}) \cap (T^{p'} \cap$ $\omegaega^{n_0})=\emptyset.$ By extending $q'$, let us assume that for each $n \gammaeq n_1$ and $\nu \in \deltaom(H^+_n)\setminus J_n, \Suc_{T^{q'}}(\nu)=\text{pos}(\mathfrak{d}_\nu^n).$ It then follows that, for each $n \gammaeq n_1,$ \[ \nu \in T^{q'} \cap \deltaom(H^+_n) ~\&~ \nu \lambdahd \nu' \in T^{q'} \cap \Lambda_n \implies H^+_n(\nu)=H^+_n(\nu'). \] For $l<\omegaega$ set $n(l)=n_1+l$ and \[ J_l=\{\rho \in \Lambda_{n(l)}: \rho \notin \deltaom(H^+_{n(l)}) \text{~but~} \Suc_{T^{q'}}(\rho) \subseteq \deltaom(H^+_{n(l)}) \}. \] Then $J_l$ is a front of $T^{q'}$ above $T^{q'} \cap$ $\omegaega^{n_0}$ and by our construction, for every $\rho \in J_l, $ $H^+_{n(l)} \restriction \text{pos}(\mathfrak{d}^{n(l)}_\rho)$ is one-to-one. For $l<\omegaega$ and $\nu \in J_l$ set $f_\nu=H^+_{n(l)} \restriction \text{pos}(\mathfrak{d}^{n(l)}_\nu)$. Then $f_\rho: \Suc_{T^{q'}}(\nu) \to$$^{n(l)}2$ is a one-to-one function. Finally let $q \lambdaeq_k p'$ be such that for each $l<\omegaega$ and $\rho \in J_{n(l)},$ $\Suc_{T^{q'}}(\rho)=\text{pos}(\mathfrak{d}^{n(l)}_\rho)$. \end{proof} \betaegin{claim} \lambdaabel{3.5B} Suppose $p, \lambdausim{f}$ are as above, $k, i< \omegaega$ and $p' \lambdaeq p.$ There there are $r, \Psi, n_\alphast$ and $n_\betaullet$ such that: \betaegin{enumerate} \item[(a)] $r \lambdaeq_k p'$, \item[(b)] $\Psi: \prod_{l< n_\betaullet} \mathbf{H}(l) \cap T^q \to \{0, 1\}$ \item[(c)] If $\nu \in T^r$, $\lambdaen(\nu)=n_\alphast$, then \[ r^{[\nu]} \Vdash_{{\mathbb{Q}}^*(K, \Sigma)} \text{``} \lambdausim{\eta}(i)=\Psi(\lambdausim{f} \restriction n_\betaullet)\text{''}. \] \end{enumerate} \end{claim} \betaegin{proof} Let $p', k, i$ be given as above. Without loss of generality we may assume that $k>i$. By the global minimality condition \ref{minimal real}(b), applied to $p'$ and $k+1$, we can find $q \lambdaeq_{k+1} p'$ and $m> k+1, \lambdaen(\hbox{rt}(T^{p'}))$ satisfying clauses ($\alphalpha$) and($\betaeta$) there. Let $\betaar{J}$ and $\betaar{f}$ be as in the conclusion of Lemma \ref{3.5A}. So for some $l, J=T^q \cap J_l$ is a front of $T^q$ above level $m$. Let $S=$$^{n(l)}2$. Hence we can apply \ref{minimal real}(b)($\betaeta$) and get $(S_0, S_1, \lambdaangle r_{\nu, j}: \nu \in T^q$ with $\lambdaen(\nu)=m,~ j<2 \ranglegle)$ as there. Let $r$ be a $k$-extension of $p'$ such that \[ T^r=\betaigcup\{ T^{r_{\nu, j}}: \nu \in T^q \cap~^{m}\omegaega \text{~and~} j=\nu(l) \}. \] Set also $n_\alphast=\max\{\lambdaen(\nu): \nu \in J \}$ and $n_\betaullet=m.$ Finally define $\Psi: \prod_{l< n_\betaullet} \mathbf{H}(l) \cap T^q \to \{0, 1\}$ by \[ \Psi(t)=j \iff j=\nu(l) \text{~where~}\nu \text{~is such that~} t \in T^{r_{\nu, j}}. \] It is easily seen that $r, n, \Psi, n_\alphast$ and $n_\betaullet$ are as required. \end{proof} \betaegin{claim} \lambdaabel{3.5C} For every $k$ and $p' \lambdaeq_k p$, there are $q, \betaar{n}$ and $\betaar{\Psi}$ such that \betaegin{enumerate} \item[(a)] $q \lambdaeq_k p,$ \item[(b)] $\betaar{n}=\lambdaangle n(l): l<\omegaega \ranglegle$, \item[(c)] $\betaar{\Psi}=\lambdaangle \Psi_l: l<\omegaega \ranglegle$, \item[(d)] $\Psi_l$ is a function from $T^q \cap$$^{n(l)}\omegaega \to 2,$ \item[(e)] If $\nu \in$$T^q \cap$$^{n(l)}\omegaega$, then \[ q^{[\nu]} \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}\text{``} \lambdausim{\eta}(l)=\Psi_l(\lambdausim{f} \restriction n(l))\text{''}. \] \end{enumerate} \end{claim} \betaegin{proof} By Lemma \ref{3.5B} we can find $\betaar{r}, \betaar{\Psi}, \betaar{n_\alphast}$ and $\betaar{n_\betaullet}$ such that: \betaegin{itemize} \item $\betaar{r}=\lambdaangle r_l: l<\omegaega \ranglegle$ is such that $r_0 \lambdaeq_k p'$ and for each $l<\omegaega,$ $r_{l+1} \lambdaeq_{k+l} r_l$, \item $\betaar{n_\alphast}= \lambdaangle n_{\alphast, l}: l<\omegaega \ranglegle$ and $\betaar{n_\betaullet}= \lambdaangle n_{\betaullet, l}: l<\omegaega \ranglegle$ are increasing sequences of natural numbers, \item $\betaar{\Psi}= \lambdaangle \Psi_l: l<\omegaega \ranglegle$, where $\Psi_l: \prod_{j< n_{\betaullet, l}} \mathbf{H}(l) \cap T^q \to \{0, 1\}$, \item If $\nu \in T^{r_l}$, $\lambdaen(\nu)=n_{\alphast, l}$, then \[ r_l^{[\nu]} \Vdash_{{\mathbb{Q}}^*(K, \Sigma)} \text{``} \lambdausim{\eta}(l)=\Psi(\lambdausim{f} \restriction n_{\betaullet, l})\text{''}. \] \end{itemize} For $l<\omegaega$ set $n(l)=n_{\alphast, l}$ and let $q=\lambdaim_{l \to \infty}r_l$ be the natural condition obtained by the fusion argument applied to the sequence $\betaar{r}.$ \end{proof} Finally we are ready to complete the proof of Theorem \ref{minimal real}. Suppose that $p \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{f} \in$$^{\omegaega}2$''. If $p \nVdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{f}\notin V$'', then for some $q \lambdaeq p, q \Vdash$``$\lambdausim{f}\in \check{V}$'' and we are done. So suppose otherwise. By Lemma \ref{3.5C}, there are $q \lambdaeq p$, $\betaar{n}$ and $\betaar{\Psi}$ as there. So for every $l<\omegaega,$ \[ q \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}\text{``}\lambdausim{\eta}(l)=\Psi_l(\lambdausim{f}\restriction n(l))\text{''}. \] It follows that $q \Vdash_{{\mathbb{Q}}^*(K, \Sigma)}$``$\lambdausim{\eta} \in V[\lambdausim{f}]$''. We are done. \end{proof} \section{Sufficient conditions for global minimality condition} \lambdaabel{Sufficient conditions for global minimality condition} In this section we give some sufficient conditions to guarantee the global minimality condition of Theorem \ref{minimal real}. \betaegin{definition} Given a finite set $S$, we define a probability space $(\Omega, \mathcal{F}, \text{Prob})$ as follows: \betaegin{itemize} \item $\Omega=\{(S_0, S_1): S_0 \cup S_1$ is a partition of $S \}$. \item $\mathcal{F}=\mathcal{P}(\Omega)$. \item For $A \subseteq \Omega,$ $\text{Prob}(A)= $$\deltafrac{|A|}{|\Omega|}$. \end{itemize} \end{definition} \betaegin{lemma} \lambdaabel{conditions fo rglobal minimality} Assume that for every $p=\lambdaangle \mathfrak{c}^p_\eta: \eta \in T^p \ranglegle \in {\mathbb{Q}}^*(K, \Sigma)$ and every $k<\omegaega$ there are $m> k, \lambdaen(\hbox{rt}(T^p))$ and $q$ such that: \betaegin{itemize} \item [(a)] $q \lambdaeq_k p.$ \item [(b)] If $\eta \in T^p$ and $\lambdaen(\eta)\gammaeq m,$ then $\mathbf{nor}[\mathfrak{c}^q_\eta] > k+ |T^q \cap$ $\omegaega^m|$. \item [(c)] If $J$ is a front of $T^q$ is such that for $\eta \in J,$ $\lambdaen(\eta) \gammaeq m$, $S$ is a finite set and $\lambdaangle f_\rho: \rho\in J \ranglegle$ is a sequence of one-to-one functions from $\Suc_{T^q}(\rho)$ into $S$, then we can find a sequence $\lambdaangle a_\eta: \eta \in \Lambda \ranglegle$, where \[ \Lambda = \{ \eta \in T^q: \lambdaen(\eta) \gammaeq m \text{~and~} \eta \text{~is below~}J \text{~or is in ~}J \} \] such that: \betaegin{enumerate} \item [$(\alphalpha)$] $a_\eta \in (0, 1)_{\mathbb{R}}$. \item [$(\betaeta)$] If $\eta \in J,$ then $a_\eta \gammaeq \text{Prob} ($for some $l \in \{0, 1 \},$ there is no $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ with $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1$ and $f_\eta``[\text{pos}(\mathfrak{d})] \subseteq S_l).$ \item [$(\alphalphamma)$] If $\eta \in \Lambda \setminus J,$ then $a_\eta \gammaeq \text{Prob} ($there is no $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ with $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1$ such that $\nu \in \text{pos}(\mathfrak{d}) \implies E_\nu$ does not occur), whenever $E_\nu$ are events for $\nu \in \text{pos}(\mathfrak{c}^q_\eta)$ each of probability $\lambdaeq a_\nu.$ \item [$(\deltaelta)$] $1 > \Sigma\{a_\eta: \eta \in T^q \cap$ $\omegaega^m \}$. \end{enumerate} \end{itemize} Then the global minimality condition of Theorem \ref{minimal real} holds. \end{lemma} \betaegin{proof} For $\eta \in \Lambda,$ let $\text{Good}_\eta$ be the event: ``For $l<2,$ there is $r_l \in {\mathbb{Q}}^*(K, \Sigma)$ such that $\hbox{rt}(T^{r_l})=\eta, r_l \lambdaeq_k q^{[\eta]}$ and $\forall \nu \in T^{r_l} \cap J,~ \Suc_{T^{r_l}}(\nu) \subseteq S_l$''. \betaegin{claim} For $\eta \in \Lambda, \text{Prob}(\text{Good}_\eta) \gammaeq 1 - a_\eta.$ \end{claim} \betaegin{proof} We prove the claim by downward induction on $\lambdaen(\eta)$. Suppose $\eta \in J.$ Let $\Theta_\eta$ denote the statement ``for some $l \in \{0, 1 \},$ there is no $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ with $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1$ and $f_\eta``[\text{pos}(\mathfrak{d})] \subseteq S_l)$''. So by clause $(c)(\betaeta),$ $a_\eta \gammaeq \text{Prob} (\Theta_\eta)$. This implies \[ 1 - a_\eta \lambdaeq 1- \text{Prob}(\Theta_\eta) \lambdaeq \text{Prob}(\text{Good}_\eta). \] Otherwise, $\eta \in \Lambda \setminus J$ and $\eta$ is not $\lambdahd$-maximal in $T^q.$ Let $\Phi_\eta$ denote the statement ``there is no $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ with $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1$ such that $\nu \in \text{pos}(\mathfrak{d}) \implies E_\nu$ does not occur''. By clause $(c)(\alphalphamma),$ $a_\eta \gammaeq \text{Prob} (\Phi_\eta)$, so \[ 1 - a_\eta \lambdaeq 1- \text{Prob}(\Phi_\eta) \lambdaeq \text{Prob}(\text{Good}_\eta). \] \end{proof} Since $1 > \Sigma\{a_\eta: \eta \in T^q \cap$ $\omegaega^m \}$, we can find a pair $(S_0, S_1) \in \Omega$ such that for every $\eta \in T^q \cap$ $\omegaega^m, \text{Good}_\eta$ occurs and hence we can choose $r_0, r_1$ as guaranteed by $\text{Good}_\eta$ and we are done. \end{proof} \betaegin{lemma} \lambdaabel{corollary to conditions fo rglobal minimality} Assume for a dense set of conditions $q \in {\mathbb{Q}}^*(K, \Sigma)$ there exists a sequence $\betaar a = \lambdaangle a_\eta: \eta \in T^q \ranglegle$ such that: \betaegin{itemize} \item [(a)] Each $a_\eta \in (0, 1]_{{\mathbb{R}}}$. \item [(b)] If $\eta \in \hbox{Lim }(T^q),$ then $\lambdaim_n \lambdaangle a_{\eta \restriction n}: n \gammaeq \lambdaen(\hbox{rt}(T^q)) \ranglegle=0.$ \item [(c)] For every large enough $m$, we have \betaegin{enumerate} \item [$(\alphalpha)$] $1 > \Sigma\{a_\eta: \eta \in T^q \cap$ $\omegaega^m \}$. \item [$(\betaeta)$] For any $\eta \in T^q \cap$ $\omegaega^m$, if $\lambdaangle E_\nu: \nu \in \Suc_{T^q}(\eta) \ranglegle$ is a sequence of events each of probability $\lambdaeq a_\nu,$ then we have $a_\eta \gammaeq \text{Prob} ($there is no $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$ with $\mathbf{nor}[\mathfrak{d}] \gammaeq \mathbf{nor}[\mathfrak{c}^p_\eta]-1$ such that $\nu \in \text{pos}(\mathfrak{d}) \implies E_\nu$ does not occur). \end{enumerate} \end{itemize} Then the global minimality condition of Theorem \ref{minimal real} holds. \end{lemma} \betaegin{proof} By Lemma \ref{conditions fo rglobal minimality}. \end{proof} \section{A forcing notion satisfying the conditions of Theorem \ref{minimal real}} \lambdaabel{example for minimality} In this section we introduce a forcing notion which satisfies the conditions of local and global minimality, hence it adds a minimal real. Let $h: \omegaega \to \omegaega$ be a non-decreasing function with $\lambdaim h(n)=\infty$ (e.g. $n \mapsto \lambdaog_2(\lambdaog_2 (n))$ or $n \mapsto \lambdaog_*(n)$, where $\lambdaog_*(n)$ is defined by recursion as $\lambdaog_*(0)=\lambdaog_*(1)=1$, $\lambdaog_*(\betaeth_{m+1}(2))=\lambdaog_*(\betaeth_m(2))+1$, for $m<\omegaega,$\footnote{Where $\betaeth_m(2)$ is defined inductively as $\betaeth_0(2)=1$ and $\betaeth_{m+1}(2)=2^{\betaeth_m(2)}$.} and for each $n$ with $\betaeth_m(2) \lambdaeq n < \betaeth_{m+1}(2)$, $\lambdaog_*(n)=\lambdaog_*(\betaeth_m(2))$). Let $\mathbf{H}:\omegaega \to V$ be defined by $$\mathbf{H}(n)=(\max\{1, n\})^{h(n)}.$$ Let $$K=\{ t\in TCR[\mathbf{H}]: \mathbf{nor}[t]= \lambdaog_2(\lambdaog_2(|\text{pos}(t)|)) \}.$$ Also let $\Sigma: [K]^{\lambdaeq \omegaega} \to \mathcal{P}(K)$ be such that it is non-empty only for singletons, and for every $t \in K,$ $$\Sigma(t) =\{ s \in K: \mathbf{val}[s] \subseteq \mathbf{val}[t] \}.$$ Clearly $(K, \Sigma)$ forms a tree creating pair. Let ${\mathbb{Q}}_h$ be defined as follows: \betaegin{itemize} \item [(A)] $p \in {\mathbb{Q}}_h$ if \betaegin{enumerate} \item $p= \lambdaangle \mathfrak{c}^p_\eta: \eta \in T^p \ranglegle \in {\mathbb{Q}}^*(K, \Sigma)$. \item $T^p \subseteq \betaigcup_{n<\omegaega} \prod_{i<n} \mathbf{H}(i)$ is a tree, such that for each non-maximal node $\eta \in T^p, |\Suc_{T^p}(\eta)| \gammaeq 4$. \item For $\eta \in T^p, \mathbf{nor}[\mathfrak{c}^p_\eta]=\mathbf{nor}(\Suc_{T^p}(\eta))= \lambdaog_2(\lambdaog_2(|\Suc_{T^p}(\eta)|))$. \item If $\eta \in \hbox{Lim }(T^p)$ and $i<\omegaega,$ then $\lambdaim_n \deltafrac{\mathbf{nor}(\Suc_{T^p}(\eta))}{n^i}$$=\infty.$ \end{enumerate} \item [(B)] $p \lambdaeq q$ iff $p \lambdaeq_{{\mathbb{Q}}^*(K, \Sigma)} q.$ \end{itemize} \betaegin{lemma} The forcing notion ${\mathbb{Q}}_h$ satisfies the local and global minimality conditions of Theorem \ref{minimal real}. \end{lemma} \betaegin{proof} \underline{${\mathbb{Q}}_h$ satisfies the local minimality condition:} Suppose $\mathbf{nor}[\mathfrak{c}^p_\eta] \gammaeq 1$ and $E$ is an equivalence relation on $\text{pos}(\mathfrak{c}^p_\eta)$. Consider the set $X=\{[\rho]_E: \rho \in \text{pos}(\mathfrak{c}^p_\eta) \}$. If there exists $\rho \in \text{pos}(\mathfrak{c}^p_\eta)$ such that $[\rho]_E$ has size $\gammaeq \sqrt{|\text{pos}(\mathfrak{c}^p_\eta)|}$, then set \[ \mathfrak{d} = \{ (\eta, \nu): \nu \in [\rho_*]_E \}. \] Clearly, $\mathbf{val}[\mathfrak{d}] \subseteq \mathbf{val}[\mathfrak{c}^p_\eta]$ and so $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$. Also, $E \restriction \mathfrak{d}$ is trivial. We also have $\hspace{2.cm}$ $\mathbf{nor}[\mathfrak{d}] = \lambdaog_2(\lambdaog_2(|\text{pos}(\mathfrak{d})|))$ $\hspace{3.1cm}$ $\gammaeq \lambdaog_2(\lambdaog_2(\sqrt{|\text{pos}(\mathfrak{c}^p_\eta)|}))$ $\hspace{3.1cm}$ $\gammaeq \lambdaog_2(\lambdaog_2(|\text{pos}(\mathfrak{c}^p_\eta)|)) - \lambdaog_2 2$ $\hspace{3.1cm}$ $\gammaeq \lambdaog_2(\lambdaog_2(|\text{pos}(\mathfrak{c}^p_\eta)|)) -1$ $\hspace{3.1cm}$ $= \mathbf{nor}[\mathfrak{\mathfrak{c}^p_\eta}]-1$. So we are done. Otherwise, each equivalence class $[\rho]_E$ has size less than $\sqrt{|\text{pos}(\mathfrak{c}^p_\eta)|}$. But then $|X| \gammaeq \sqrt{|\text{pos}(\mathfrak{c}^p_\eta)|},$ so pick $t_\rho \in [\rho]_E$ for each $\rho \in \text{pos}(\mathfrak{c}^p_\eta)$ and set \[ \mathfrak{d} = \{ (\eta, t_\rho): \rho \in \text{pos}(\mathfrak{c}^p_\eta) \}. \] Again, $\mathbf{val}[\mathfrak{d}] \subseteq \mathbf{val}[\mathfrak{c}^p_\eta]$ and so $\mathfrak{d} \in \Sigma(\mathfrak{c}^p_\eta)$. Also, $E \restriction \mathfrak{d}$ is trivial. As before, \betaegin{center} $\mathbf{nor}[\mathfrak{d}] = \lambdaog_2(\lambdaog_2(|\text{pos}(\mathfrak{d})|)) \gammaeq \mathbf{nor}[\mathfrak{\mathfrak{c}^p_\eta}]-1$, \end{center} and so we are done. The result follows. \underline{${\mathbb{Q}}_h$ satisfies the global minimality condition:} We check Lemma \ref{corollary to conditions fo rglobal minimality}. Let $p \in {\mathbb{Q}}_h$ and $k<\omegaega.$ Then we can find $q \in {\mathbb{Q}}_h$, $m_* > k, \lambdaen(\hbox{rt}(T^p))$ and $m_0$ such that: \betaegin{itemize} \item $q \lambdaeq_k p.$ \item $k < m_0 < m_*.$ \item $\eta \in T^q \wedge \mathbf{nor}[\mathfrak{c}^q_\eta] \lambdaeq k \implies \lambdaen(\eta) < m_0.$ \item If $n \gammaeq m_0,$ then $\lambdaangle |\Suc_{T^q}(\eta)|: \eta \in T^q \cap$ $\omegaega^n \ranglegle$ is constant. Moreover, for some non-decreasing function $h_1 \lambdaeq h$, $h_1: [m_0, \omegaega) \to \omegaega \setminus \{0\},$ we have $ |\Suc_{T^q}(\eta)|=\lambdaen(\eta)^{2^{h_1(\lambdaen(\eta))}}$. \item $h_1(m_*) > k+ |T^q \cap$ $\omegaega^{m_0}|$. \end{itemize} For $\eta \in T^q$, we define $b_\eta$ by \betaegin{center} $b_\eta=\text{Prob}$(for a partition $S_0 \cup S_1$ of $m_\eta=|\text{pos}(\mathfrak{c}^q_\eta)|$, we have $\betaigvee_{l<2} |S_l| < \sqrt{m_\eta}).$ \end{center} Then it is clear that \betaegin{center} $b_\eta \lambdaeq \deltafrac{2\cdot \Sigma\{m_\eta: i < \sqrt{m_\eta} \}}{2^{m_\eta}}$ \end{center} (the 2 is because of $\betaigvee_{l<2}$ and the sum is because of $\betaigvee_{i<\sqrt{m_\eta} }|S_l=i|$). So \[ b_\eta \lambdaeq \deltafrac{(m_\eta)^{\sqrt{m_\eta}}}{2^{m_\eta}} = \deltafrac{2^{\sqrt{m_\eta}\cdot \lambdaog_2(m_\eta)}}{ 2^{m_\eta}}. \] Thus for some $n_*,$ \[ \eta \in T^q, \lambdaen(\eta)=n \gammaeq n_* \implies b_\eta \lambdaeq 2^{\sqrt{n^{2^{h_1(n)}}}\cdot 2^{h_1(n)}\cdot \lambdaog_2(n)}\cdot 2^{-(n^{2^{h_1(n)}})} \lambdaeq 2^{-(n^{2^{h_1(n)}/2^4})}. \] For $\eta \in T^q, \lambdaen(\eta) \gammaeq n_*$ let \[ a_\eta = \Sigma\{ 2^{-(n^{2^{h_1(n)}}/3)}: n \gammaeq \lambdaen(\eta) \}. \] Then $a_\eta \lambdaeq 2^{-(n^{2^{h_1(n)}})}$, where $n=\lambdaen(\eta)$, because \[ n^{2^{h_1(n)}} +1 \lambdaeq (n+1)^{2^{h_1(n)}} \lambdaeq (n+1)^{2^{h_1(n+1)}}. \] We show that $a_\eta$'s, $\eta \in T^q$ are as required. It is clear that each $a_\eta \in (0,1)_{{\mathbb{R}}}$. Also, if $\eta \in \hbox{Lim }(T^q),$ then clearly $\lambdaim_n \lambdaangle a_{\eta \restriction n}: n \gammaeq \lambdaen(\hbox{rt}(T^q)) \ranglegle = 0$ ( as $a_\eta \lambdaeq 2^{-(n^{2^{h_1(n)}})}$). Also, for $n \gammaeq n_*$, we have \[ \Sigma\{a_\eta: \eta \in T^q \cap \omegaega^n \} \lambdaeq \Sigma\{2^{-(n^{2^{h_1(n)}})}: \eta \in T^q \cap \omegaega^n \} = |T^q \cap \omegaega^n|\cdot 2^{-(n^{2^{h_1(n)}})} < 1. \] Finally clause (c)$(\betaeta)$ of Lemma \ref{corollary to conditions fo rglobal minimality} holds by the choice of $b_\eta$'s and $a_\eta$'s. \end{proof} \betaegin{remark} We could also use the function $h(n)=\lambdaog_*(\lambdaog_*(n))$ and the norm $\mathbf{nor}[t]= \lambdaog_*(\lambdaog_*(|\text{pos}(t)|))$. In this case, there is no need to require the tree $T^p$ satisfies the extra property ``for each non-maximal node $\eta \in T^p, |\Suc_{T^p}(\eta)| \gammaeq 4$''. \end{remark} \betaegin{thebibliography}{99} \betaibitem{carl} Carl, Merlin; Schlicht, Philipp; Infinite computations with random oracles. Notre Dame J. Form. Log. 58 (2017), no. 2, 249--270. \betaibitem{ci-shelah} Ciesielski, Krzysztof; Shelah, Saharon; A model with no magic set. J. Symbolic Logic 64 (1999), no. 4, 1467--1490. \betaibitem{Grigorieff} Grigorieff, Serge, Combinatorics on ideals and forcing. Ann. Math. Logic 3 (1971), no. 4, 363--394. \betaibitem{groszek} Groszek, Marcia J.; Combinatorics on ideals and forcing with trees. J. Symbolic Logic 52 (1987), no. 3, 582--593. \betaibitem{jensen} Jensen, Ronald; Definable sets of minimal degree. Mathematical logic and foundations of set theory (Proc. Internat. Colloq., Jerusalem, 1968), pp. 122--128. North-Holland, Amsterdam, 1970. \betaibitem{judah-shelah} Judah, Haim; Shelah, Saharon; Forcing minimal degree of constructibility. J. Symbolic Logic 56 (1991), no. 3, 769--782. \betaibitem{mildenberger} Mildenberger, Heike; An introduction to forcing with creatures. A short course given at IPM, 2017. \betaibitem{miller} Miller, Arnold W.; Rational perfect set forcing. Axiomatic set theory (Boulder, Colo., 1983), 143--159, Contemp. Math., 31, Amer. Math. Soc., Providence, RI, 1984. \betaibitem{r-shelah} Roslanowski, Andrzej; Shelah, Saharon, Norms on possibilities. I. Forcing with trees and creatures. Mem. Amer. Math. Soc. 141 (1999), no. 671, xii+167 pp. \betaibitem{sacks} Sacks, Gerald E.; Forcing with perfect closed sets. 1971 Axiomatic Set Theory (Proc. Sympos. Pure Math., Vol. XIII, Part I, Univ. California, Los Angeles, Calif., 1967) pp. 331--355 Amer. Math. Soc., Providence, R.I. \betaibitem{schilhan} Schilhan, Jonathan; Tree forcing and definable maximal independent sets in hypergraphs, preprint. \end{thebibliography} \end{document}
\begin{document} \title{Alternating Iteratively Reweighted Minimization Algorithms for Low-Rank Matrix Factorization} \author{Paris V. Giampouras, Athanasios A. Rontogiannis and Konstantinos D. Koutroumbas \IEEEcompsocitemizethanks{\IEEEcompsocthanksitem The authors are with the Institute for Astronomy, Astrophysics, Space Applications and Remote Sensing at the National Observatory of Athens, Penteli, 15236, Greece.\protect} } \markboth{Journal of \LaTeX\ Class Files,~Vol.~14, No.~8, August~2015} {Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Computer Society Journals} \IEEEtitleabstractindextext{ \begin{abstract} Nowadays, the availability of large-scale data in disparate application domains urges the deployment of sophisticated tools for extracting valuable knowledge out of this huge bulk of information. In that vein, low-rank representations (LRRs) which seek low-dimensional embeddings of data have naturally appeared. In an effort to reduce computational complexity and improve estimation performance, LRR has been viewed via a matrix factorization (MF) perspective. Recently, low-rank MF (LRMF) approaches have been proposed for tackling the inherent weakness of MF i.e., the unawareness of the dimension of the low-dimensional space where data reside. Herein, inspired by the merits of iterative reweighted schemes for rank minimization, we come up with a generic low-rank promoting regularization function. Then, focusing on a specific instance of it, we propose a regularizer that imposes column-sparsity jointly on the two matrix factors that result from MF, thus promoting low-rankness on the optimization problem. The problems of denoising, matrix completion and non-negative matrix factorization (NMF) are redefined according to the new LRMF formulation and solved via efficient Newton-type algorithms with proven theoretical guarantees as to their convergence and rates of convergence to stationary points. The effectiveness of the proposed algorithms is verified in diverse simulated and real data experiments. \end{abstract} \begin{IEEEkeywords} matrix factorization, low-rank, iteratively reweighted, alternating minimization, matrix completion, NMF. \end{IEEEkeywords}} \maketitle \IEEEdisplaynontitleabstractindextext \IEEEpeerreviewmaketitle \IEEEraisesectionheading{\section{Introduction}\label{sec:introduction}} Low-rank representation (LRR) of data has recently attracted great interest since it appears in a wide spectrum of research fields and applications, such as signal processing, machine learning, quantum tomography, etc, \cite{theodoridis2015machine}. LRR shares similar characteristics with sparse representation and hence is in principle formulated as a NP-hard problem, \cite{fazel2002}. Convex relaxations have played a remarkable role in the course of making the problem tractable. In that respect, the nuclear norm has been extensively applied offering favorable results, optimal recovery performance, as well as a solid theoretical understanding, \cite{recht}. However, in the case of high-dimensional and large-scale datasets, conventional convex LRR approaches are confronted with inherent limitations related to their high computational complexity, \cite{hastie2015matrix}. To overcome these limitations matrix factorization (MF) methods have been introduced lately. MF gives rise to non-convex optimization problems and hence its theoretical understanding is a much more challenging task. Notably, a great effort has been recently devoted towards deriving a comprehensive theoretical framework of MF with the goal to reach to optimal recovery guarantees, \cite{Sun,ge2017,zhu}. MF presents significant computational merits by reducing the size of the emerging optimization problems. Thus, it leads to optimization algorithms of lower computational complexity as compared to relevant convex approaches. In addition, MF lies at the heart of a variety of problems dealing with the task of finding low-rank embeddings. In that respect, ubiquitous problems such as clustering, \cite{pompili}, blind source separation, matrix completion, \cite{wen} etc. have been seen in literature through the lens of MF. MF entails the use of two matrix factors with a fixed number of columns, which, in the most favorable case, coincides with the rank of the sought matrix. However, the rank of the matrix, which is usually much less than its dimensions, is unknown a priori. In light of this, a widespread approach is based on the following premise: overstate the number of columns of the matrix factors and then penalize their rank by using appropriate low-rank promoting regularizers. Along those lines, various regularizers have been recently proposed. Amongst them the most popular one is the variational characterization of the nuclear norm (proven to be a tight upper-bound of it) defined as the sum of the squared Frobenious norms of the factors \cite{srebro2005rank}. More recently, generalized versions of this approach have come to the scene. In that respect, in \cite{shang}, tight upper-bounds of the low-rank promoting Schatten-$p$ norms were presented under a general framework. In \cite{haeffele2014structured}, an alternative approach for promoting low-rankness via non-convex MF was described. The novelty of that approach comes from the incorporation of additional constraints on the matrix factors giving thus rise to an interesting low-rank structured MF framework. In \cite{hastie2015matrix}, a fast algorithm based on the above-mentioned variational characterization of the nuclear norm is presented. The derived algorithm is amenable to handling incomplete big-data, contrary to conventional convex and other non-MF based approaches. It should be noted that common characteristic of all state-of-the-art methods is the following: although the rank of the product of the matrix factors may decrease as a result of the penalization process, {\it the number of columns of the matrix factors (which has initially been overstated) remains fixed throughout the execution of the minimization algorithms}. Hence, the per iteration complexity remains unaltered, albeit the rank of the matrix factors may potentially decrease gradually to a large degree as the algorithms evolve. With the current work we capitalize on the latter (possibly undesirable in large-scale data applications) issue and propose a novel generic formulation for non-convex low-rank MF. To this end, recent ideas stemming from iterative reweighted approaches for low-rank matrix estimation, proposed in \cite{fornasier2011low,mohan2012iterative} as efficient alternatives for nuclear norm minimization, are now extended to the MF framework. This way, we come up with a novel alternating reweighted scheme for low-rank promotion in MF problems. As is shown, the recent low-rank MF schemes proposed in \cite{shang} can be cast as special occasions of the proposed formulation by suitably selecting the reweighting matrices applied on the matrix factors. Going one step further, we propose the selection of a common reweighting matrix that couples the matrix factors and leads to a joint column sparsity promoting regularization term, \cite{ssp2016,spars2017}. In doing so, {\it low-rank promotion now reduces to the task of jointly annihilating columns of the matrix factors}. Interestingly, {\it this way the computational complexity of the derived algorithms decreases progressively, since the size of the estimated matrix factors is reduced as the algorithms evolve}. In an effort to better highlight the efficiency and ubiquity of the proposed low-rank MF formulation, we address three popular problems in the machine learning literature, namely denoising, matrix completion and non-negative matrix factorization. These problems are accordingly formulated in Section 2. By exploiting novel optimization concepts, \cite{hong2016unified}, we appropriately minimize the arising non-smooth and non-separable cost functions. In this vein, novel second-order Newton-type algorithms are then devised in Section 3 with the goal to effectively exploit inherent characteristics of the emerging optimization problems. Convergence analysis of the algorithms at stationary points and their rates of convergence are given in Section 4. In Section 5, the merits of the resulting algorithms in terms of estimation performance and computational complexity, compared to relevant state-of-art algorithms, are illustrated on simulated and real data experiments. In order to test the effectiveness of the proposed algorithms on real applications involving large-scale data, the problems of hyperspectral image denoising, matrix completion in movies recommender systems and music signal decomposition are employed. \section{Low-rank matrix factorization}\label{sec:introduction} Low-rank matrix estimation per se has been addressed by a wealth of different approaches, lending itself to disparate applications. Focusing on the task of recovering low-rank matrices from linear measurements, we come up with the ubiquitous affine rank minimization problem, \cite{recht}, which is formulated as follows, \begin{align} \mathrm{min}\left[ \mathrm{rank}(\mathbf{X}) \right] \;\;\;\ s.t \;\;\;\;\ \mathbf{\mathcal{A}}(\mathbf{X})= \mathbf{b}, \label{pb_1:af_rank_minim} \end{align} where $\mathbf{\mathcal{A}}$ denotes the linear operator that maps $\mathbf{X}\in\mathcal{R}^{m\times n}$ to $\mathbf{b}\in\mathcal{R}^l$. Problem (\ref{pb_1:af_rank_minim}) is tantamount to solving the $\ell_0$ minimization problem on the singular values of $\mathbf{X}$ and hence is NP-hard. To this end various relaxation schemes have come to the scene in literature, many of which are based on the Schatten-$p$ norm\cite{Nie,Lu2014}. The Schatten-$p$ norm is defined as, \begin{align} \|\mathbf{X}\|_{\mathcal{S}_p} = \|\boldsymbol{\sigma}(\mathbf{X})\|_p, \end{align} where $\boldsymbol{\sigma}(\mathbf{X})$ denotes the vector of singular values of matrix $\mathbf{X}$ and $\|\cdot\|_p$ is the $\ell_p$ norm with $p\in[0,1]$. As is known, for $p=1$, the Schatten-$p$ norm reduces to the well-known nuclear norm $\|\mathbf{X}\|_{*}$, which has been proven to be the convex envelope of the rank, \cite{fazel2002}. Schatten-$p$ norms have played a significant role in numerous cases involving the rank minimization problem of (\ref{pb_1:af_rank_minim}) reformulating it as \begin{align} \mathrm{min} \|\mathbf{X}\|^p_{\mathcal{S}_p} \;\;\;\ s.t \;\;\;\;\ \mathbf{\mathcal{A}}(\mathbf{X})= \mathbf{b} \label{pb_1:schatten_rank_minim}. \end{align} Nowadays, Schatten-$p$ norm based minimization has been seen via a more intriguing perspective i.e. using an iterative reweighting approach. In this vein, inspired by iteratively reweighted least squares (LS) used in place of $\ell_1$ norm minimization for imposing sparsity,\cite{Daubechies}, in \cite{mohan2012iterative,fornasier2011low} the authors propose to minimize a reweighting Frobenious norm, i.e., $\|\mathbf{X}\mathbf{W}^{\frac{1}{2}}\|^2_F$. The equivalence of the Schatten-$p$ norm and the ones minimized in \cite{mohan2012iterative,fornasier2011low}, is mathematically expressed as follows, \begin{align} \|\mathbf{X}\|^p_{\mathcal{S}_p} & = \mathrm{tr}\{\left(\mathbf{X}^T\mathbf{X}\right)^{\frac{p}{2}}\} = \mathrm{tr}\{\left(\mathbf{X}^T\mathbf{X}\right)\left(\mathbf{X}^T\mathbf{X}\right)^{\frac{p-2}{2}}\} \nonumber \\ & = \mathrm{tr}\{\left(\mathbf{X}^T\mathbf{X}\right)\mathbf{W}\} = \|\mathbf{X}\mathbf{W}^{\frac{1}{2}}\|^2_F,\label{schafro} \end{align} where $\mathbf{W}$ is the symmetric weight matrix $\left(\mathbf{X}^T\mathbf{X}\right)^{\frac{p-2}{2}}$. This iterative reweighting scheme has been shown to offer significant merits in terms of the computational complexity of the derived algorithms, the estimation performance as well as the rate of convergence. Recently, low-rank matrix estimation has been effectively tackled using a {\it matrix factorization} approach. The crux of the relevant methods is that a low-rank matrix can be well represented by a matrix product i.e., $\mathbf{X}=\mathbf{U}\mathbf{V}^T$ with the inner dimension $r$ of the involved matrices quite smaller than the outer dimensions i.e., $r \ll \mathrm{min}(m,n)$. Needless to say that those ideas offer significant advantages when it comes to the processing of large scale and high-dimensional datasets (where both $m$ and $n$ are huge) by reducing the size of the involved variables, thus decreasing both the storage space required from $\mathcal{O}(mn)$ to $\mathcal{O}\left((m+n)r\right)$ as well as the computational complexity of the algorithms used. However, a downside of this approach is that an additional variable is brought up i.e., the inner dimension $r$ of the factorization. The task of finding the actual $r$ (which coincides with the rank of matrix $\mathbf{X}$) is relevant to the rank minimization problem and is referred in the literature also as dimensionality reduction, model order selection, etc. The latter has given rise to methods that select $r$ based on minimization of various criteria such as the Akaike information criterion (AIC), the Bayesian information criterion (BIC), the minimum distance length (MDL), \cite{squires2017rank}, etc. However, these methods can be computationally expensive especially in large scale datasets, since they require multiple runs of the algorithms. Modern approaches termed low-rank matrix factorization (LRMF) techniques, \cite{haeffele2014structured}, hinge on the following philosophy: a) overstate the rank $r$ of the product with $d\geq r$ and then b) impose low-rankness thereof by utilizing appropriate norms. This rationale has given rise to LRMF techniques that solve the following, \begin{align} \mathrm{min}\left[ \mathrm{rank}(\mathbf{U}\mathbf{V}^T) \right] \;\;\ s.t \;\;\ \mathbf{\mathcal{A}}(\mathbf{UV}^T)= \mathbf{b}. \label{LRMF} \end{align} Problem (\ref{LRMF}) has been addressed by different ways in literature. Among other approaches, the tight upper-bound of the nuclear norm defined as \begin{align} \|\mathbf{U}\mathbf{V}^T\|_{\ast} & = \underset{\mathbf{U}\in \mathcal{R}^{m\times d},\mathbf{V}\in\mathcal{R}^{n\times d} }{\mathrm{min}} \|\mathbf{U}\|_F\|\mathbf{V}\|_F \nonumber \\ & = \underset{\mathbf{U}\in \mathcal{R}^{m\times d},\mathbf{V}\in\mathcal{R}^{n\times d} }{\mathrm{min}} \frac{1}{2}\left(\|\mathbf{U}\|^2_F + \|\mathbf{V}\|^2_F \right) \label{upper_bound_nuclear} \end{align} is the most popular, \cite{srebro2005rank}. In fact, minimization of (\ref{upper_bound_nuclear}) favors low-rankness on $\mathbf{U}$ and $\mathbf{V}$ by inducing smoothness on these matrices. In \cite{shang,shang2016unified}, the authors derive the tight upper-bounds for all Schatten-$p$ norms with $p\in[0,1]$, (Theorem 1, \cite{shang2016unified}) i.e., \begin{align} \|\mathbf{UV}^T\|_{\mathcal{S}_p}^p& = \underset{\mathbf{U}\in\mathcal{R}^{m\times d},\mathbf{V}\in\mathcal{R}^{n\times d}}{\mathrm{min}} \|\mathbf{U}\|_{\mathcal{S}_{2p}} \|\mathbf{V}\|_{\mathcal{S}_{2p}} \nonumber \\ & = \underset{\mathbf{U}\in\mathcal{R}^{m\times d},\mathbf{V}\in\mathcal{R}^{n\times d}}{\mathrm{min}}\frac{1}{2}\left( \|\mathbf{U}\|_{\mathcal{S}_{2p}}^{2p} + \|\mathbf{V}\|_{\mathcal{S}_{2p}}^{2p} \right). \label{schatten_p_bounds} \end{align} Common denominator of the afore-mentioned low-rank matrix factorization approaches is their direct connection with the low-rank imposing Schatten-$p$ norms, since they provide tight upper-bounds thereof. In this work we aspire to apply ideas stemming from {\it iterative reweighting methods for low-rank matrix recovery}, to this challenging low-rank matrix factorization scenario. Therefore, generalizing the above-described low-rank promoting norm upper bounds, we propose to minimize the sum of reweighted (as in (\ref{schafro})) Frobenious norms of the individual factors $\mathbf{U}$ and $\mathbf{V}$. Hence, the newly introduced low-rank inducing function is defined as follows, \begin{align} h(\mathbf{U},\mathbf{V}) = \frac{1}{2}\left(\|\mathbf{U}\mathbf{W}_{\mathbf{U}}^{\frac{1}{2}}\|^2_F + \|\mathbf{V}\mathbf{W}_{\mathbf{V}}^{\frac{1}{2}}\|_F^2\right) \label{proposed_lr_term} \end{align} where the weight matrices $\mathbf{W}_{\mathbf{U}}$ and $\mathbf{W}_{\mathbf{V}}$ are appropriately selected. In the sequel, we adhere to a special instance of (\ref{proposed_lr_term}) which arises by setting $\mathbf{W}_{\mathbf{U}} = \mathbf{W}_{\mathbf{V}}=\mathbf{W}$ with \begin{align} \mathbf{W} & = \mathrm{diag}\Big(\left(\|\boldsymbol{\mathit{u}}_1\|^2_2 + \|\boldsymbol{\mathit{v}}_1\|^2_2\right)^{p-1},\left(\|\boldsymbol{\mathit{u}}_2\|^2_2 + \|\boldsymbol{\mathit{v}}_2\|^2_2\right)^{p-1}, \nonumber \\ & \dots,\left(\|\boldsymbol{\mathit{u}}_d\|^2_2 + \|\boldsymbol{\mathit{v}}_d\|^2_2\right)^{p-1} \Big), \label{weight_matrix} \end{align} where $\mathit{\boldsymbol{u}}_i$ and $\mathit{\boldsymbol{v}}_i$ are the $i$th columns of $\mathbf{U}$ and $\mathbf{V}$, respectively\footnote{If $\mathbf{U},\mathbf{V}$ had orthogonal columns, $\mathbf{W}$ in (\ref{weight_matrix}) would be equal to $(\mathbf{U}^T\mathbf{U}+\mathbf{V}^T\mathbf{V})^{p-1}$.}. It can be easily observed that by selecting a common $\mathbf{W}$ for $\mathbf{U}$ and $\mathbf{V}$ as defined in (\ref{weight_matrix}), matrices $\mathbf{U}$ and $\mathbf{V}$ are implicitly coupled w.r.t. their columns. By setting now $p=\frac{1}{2}$ and substituting (\ref{weight_matrix}) in (\ref{proposed_lr_term}) yields \begin{align} h(\mathbf{U},\mathbf{V}) = \frac{1}{2}\sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2}. \label{proposed_lrt} \end{align} Surprisingly, the resulting expression coincides with the (scaled by 1/2) group sparsity inducing $\ell_1/\ell_2$ norm of the concatenated matrix $[\begin{smallmatrix} \mathbf{U} \\ \mathbf{V} \end{smallmatrix} ]$ . Intuitively, the low-rank inducing properties of the proposed in (\ref{proposed_lrt}) joint column sparsity promoting term can be easily explained as follows. Let us consider the rank one decomposition of the matrix product $\mathbf{U}\mathbf{V}^T$, \begin{align} \mathbf{U}\mathbf{V}^T = \sum^d_{i=1} \mathit{\boldsymbol{u}}_i\mathit{\boldsymbol{v}}_i^T. \label{rank_one_decomp} \end{align} Clearly, due to the subadditivity property of the rank, eliminating rank one terms of the summation on the right side of (\ref{rank_one_decomp}) results to a relevant decrease of the rank of the product $\mathbf{U}\mathbf{V}^T$. Hence capitalizing on (\ref{proposed_lrt}), we are led to LRMF optimization problems having the form, \small \begin{align} \underset{\mathbf{U}\in\mathcal{R}^{m\times d},\mathbf{V}\in\mathcal{R}^{n\times d}}{\mathrm{min}} \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2} \;\ s.t \;\ \mathbf{\mathcal{A}}(\mathbf{UV}^T)= \mathbf{b}. \label{proposed_optim_problem} \end{align} \normalsize It should be noted that the idea of imposing jointly column sparsity first appeared in \cite{tan2009automatic}, albeit in a Bayesian framework tailored to the NMF problem. In \cite{tan2013automatic}, the emerging via the maximum a posteriori probability (MAP) approach optimization problem boils down to the minimization of the column sparsity promoting concave logarithm function. On the other hand, the proposed approach is related to the convex $\ell_1/\ell_2$ norm. The relevance of the proposed formulation to that of the Bayesian schemes proposed in \cite{tan2013automatic} is further highlighted in the next subsection, which describes an instance of problem (\ref{proposed_optim_problem}), as well as two other relevant problems. {\it Remark 1: The generic nature of the proposed low-rank promoting function defined in (\ref{proposed_lr_term}) is justified as it includes the previously mentioned MF-based low-rank promoting terms as special cases. Indeed, according to (\ref{schafro}) and by setting $\mathbf{W}_{\mathbf{U}}= (\mathbf{U}^T\mathbf{U})^{p-1}$ and $\mathbf{W}_{\mathbf{V}}= (\mathbf{V}^T\mathbf{V})^{p-1}$ in (\ref{proposed_lr_term}), we get the upper-bound of the Schatten-$p$ norm given in (\ref{schatten_p_bounds}), while for $p=1$, i.e., $\mathbf{W}_{\mathbf{U}} = \mathbf{W}_{\mathbf{V}} = \mathbf{I}_{d}$, we get the variational form of the nuclear norm defined in (\ref{upper_bound_nuclear}).} \subsection{Denoising, matrix completion and low-rank non-negative matrix factorization} {\textbf{Denoising}}. By assuming that a) the linear operator $\mathcal{A}$ reduces to a diagonal matrix and b) our measurements $\mathbf{Y} \in \mathcal{R}^{m\times n}$ are corrupted by i.i.d. Gaussian noise, we come up with the following optimization problem, \small \begin{align} \underset{\mathbf{U},\mathbf{V}}{\mathrm{min}} \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2} \;\;\;\ s.t \;\;\; \|\mathbf{Y} - \mathbf{UV}^T\|^2_F \leq \epsilon. \label{denoising} \end{align} \normalsize where $\epsilon$ is a small positive constant. By Lagrange theorem we know that (\ref{denoising}) can be equivalently written in the following form, \footnotesize \begin{align} \{\hat{\mathbf{U}},\hat{\mathbf{V}}\} = \underset{\mathbf{U},\mathbf{V}}{\mathrm{arg min}}\frac{1}{2}\|\mathbf{Y} - \mathbf{UV}^T\|^2_F + {\lambda} \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2} \label{proposed_denoising} \end{align} \normalsize where $\lambda$ denotes the Lagrange multiplier. {\it Proposition 1: The optimization problem (\ref{proposed_denoising}) is equivalent to the MAP minimization scheme arising by placing a Gaussian likelihood on $\mathbf{Y}$ and common, hierarchically formulated, group sparsity promoting Laplace priors on the columns of $\mathbf{U}$ and $\mathbf{V}$. } Proposition 1 can be proved following the same steps as those described in the Appendix of \cite{onlineVB2017}. We should point out that in the MAP based schemes of \cite{tan2013automatic}, the prior of $\mathbf{U}$ and $\mathbf{V}$ is the Student-t distribution. For this reason, the corresponding MAP optimization problems involve the concave logarithm function defined on the norms of the columns of $\mathbf{U}$ and $\mathbf{V}$. Contrary, in our case we come up with the $\ell_1/\ell_2$ norm of the matrix resulting by the concatenation of $\mathbf{U}$ and $\mathbf{V}$. As it is shown later, the simplicity and convexity of the proposed regularizer facilitates not only the derivation of new optimization algorithms, but also the theoretical analysis of their convergence behavior. \noindent{\textbf{Matrix completion}}. Another popular problem that follows the general model described by (\ref{proposed_optim_problem}) is matrix completion, as it is widely addressed via low-rank minimization. The main premise here lies in recovering missing entries of a matrix $\mathbf{Y}$ assuming high coherence among its elements, which gives rise to a low-rank structured matrix $\mathbf{X}$. The problem is thus set up as, \begin{align} \mathrm{min}\left[ \mathrm{rank}(\mathbf{X})\right]\;\;\;\ s.t. \;\;\;\; \mathcal{P}_{\Omega}(\mathbf{Y}) = \mathcal{P}_{\Omega}(\mathbf{X}), \end{align} where $\mathcal{P}_{\Omega}$ denotes the sampling operator on the set $\Omega$ of indexes of matrix $\mathbf{Y}$ where information is present. In the matrix factorization setting, the incomplete matrix $\mathbf{Y}$ is approximated by a matrix $\mathbf{X}$ expressed as $\mathbf{X}=\mathbf{U}\mathbf{V}^T$. As mentioned above, the rank $r$ of the reconstructed matrix $\mathbf{X}$ is generally unknown and hence it is overstated with $d\geq r$. This necessitates the penalization of the rank of the product $\mathbf{U}\mathbf{V}^T$, which in our case takes place with the proposed low-rank promoting term giving rise to the optimization problem, \begin{align} \underset{\mathbf{U},\mathbf{V}}{\mathrm{min}} \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2} \;\;\;\ s.t \;\;\;\;\ \mathcal{P}_{\Omega}(\mathbf{Y}) = \mathcal{P}_{\Omega}(\mathbf{UV}^T) \label{matrix_completion}. \end{align} Considering further the existence of additive i.i.d. Gaussian noise in $\mathbf{Y}$ we get, \begin{align} \{\hat{\mathbf{U}},\hat{\mathbf{V}}\} = & \underset{\mathbf{U},\mathbf{V}}{\mathrm{arg min}}\frac{1}{2}\|\mathcal{P}_{\Omega}(\mathbf{Y}) - \mathcal{P}_{\Omega}(\mathbf{UV}^T)\|^2_F \nonumber \\ & + \lambda \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2}. \label{proposed_mc} \end{align} \noindent{\textbf{Low-rank NMF}}. Finally, we formulate the relevant low-rank constrained non-negative matrix factorization (NMF) problem. The low-rank NMF differs from the classical NMF in the inclusion of the low-rank constraint on the factors $\mathbf{U}$ and $\mathbf{V}$, accounting thus for the unawareness of the true rank. As is shown in Section \ref{sec:experiments} this is very crucial in a class of applications such as music signal decomposition. The emerging optimization problem is given below, \begin{align} \{\hat{\mathbf{U}},\hat{\mathbf{V}}\} = \underset{\mathbf{U}\geq \mathbf{0},\mathbf{V}\geq \mathbf{0}}{\mathrm{arg min}}& \frac{1}{2}\|\mathbf{Y} - \mathbf{UV}^T\|^2_F \nonumber \\ &+ \lambda \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2} \label{proposed_low-rank_nmf} \end{align} where $\mathbf{U}\geq \mathbf{0}$ and $\mathbf{V}\geq \mathbf{0}$ stand for elementwise non-negativity of $\mathbf{U}$ and $\mathbf{V}$, respectively. Problem (\ref{proposed_low-rank_nmf}) deviates from the denoising one of (\ref{proposed_denoising}) in the incorporation of an additional contraint i.e., non-negativity of $\mathbf{U},\mathbf{V}$. In the next section three different algorithms, each one solving one of the problems of denoising, matrix completion and low-rank NMF, are developed and theoretically analyzed. \section{Minimization algorithms}\label{sec:introduction} Herein, we present three new efficient block coordinate minimization (BCM) algorithms for denoising, matrix completion and low-rank NMF, respectively. The alternating minimization of the proposed low-rank promoting function defined in (\ref{proposed_lrt}) w.r.t. the 'blocks' $\mathbf{U}$ and $\mathbf{V}$ lies at the heart of those algorithms. {\it Remark 2: The proposed low-rank promoting regularizer is a) non-smooth and b) non-separable w.r.t. $\mathbf{U}$ and $\mathbf{V}$.} Both the above-mentioned properties i.e., non-smoothness and non-separability induce severe difficulties in the optimization task that call for appropriate handling. More specifically, as it has been shown, \cite{tseng2001convergence}, in BCM schemes the respective algorithms might be led to irregular points i.e., coordinate-wise minima that are not necessarily stationary points of the minimized cost function. In light of this we follow a simple smoothing approach by including a small positive constant $\eta$ in the proposed regularizer, which becomes, \begin{align} \hat{h}(\mathbf{U},\mathbf{V}) = \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2 + \eta^2}. \label{proposed_low_rank_term} \end{align} This way we alleviate singular points i.e., points where the gradient is not continuous, and the resulting optimization problems become smooth. On the other hand, non-separability poses obstacles in getting closed-form expressions for the optimization variables $\mathbf{U}$ and $\mathbf{V}$. For this reason, each of the associative optimization problems is reformulated using appropriate relaxation schemes. By working in an alternating fashion, each of these schemes results in closed form expressions. Next, the proposed algorithms that solve denoising, matrix completion and non-negative matrix factorization are analytically described. \subsection{Denoising} In this section, we present a new algorithm designed for solving the denoising problem given in (\ref{proposed_denoising}). To this end, let us first define the respective cost function as, \small \begin{align} f(\mathbf{U},\mathbf{V}) = \frac{1}{2}\|\mathbf{Y} - \mathbf{U}\mathbf{V}^T\|_F^2 + \lambda \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2 + \eta^2}. \label{cost_function_denoising} \end{align} \normalsize It is obvious that minimizing (\ref{cost_function_denoising}) alternatingly w.r.t. $\mathbf{U}$ and $\mathbf{V}$ is infeasible, since exact analytical expressions can not be obtained as a result of the non-separable nature of the square root. To this end, at each iteration $k+1$ we solve two distinct subproblems i.e. a) given the latest available update $\mathbf{V}_k$ of $\mathbf{V}$, we minimize an approximate cost function w.r.t. $\mathbf{U}$ to get $\mathbf{U}_{k+1}$ and b) we use $\mathbf{U}_{k+1}$ in order to minimize another approximate cost function w.r.t. the second block variable of our problem i.e., matrix $\mathbf{V}$. Following the block successive upper-bound minimization (BSUM) philosophy, \cite{razaviyayn2013unified,hong2016unified}, we minimize at each iteration local tight upper-bounds of the respective cost functions. That said, $\mathbf{U}$ is updated by minimizing an approximate second order Taylor expansion of $f(\mathbf{U},\mathbf{V}_k)$ around the point $(\mathbf{U}_k,\mathbf{V}_k)$. Likewise, an approximate second-order Taylor expansion of $f(\mathbf{U}_{k+1},\mathbf{V})$ around $(\mathbf{U}_{k+1},\mathbf{V}_k)$ is utilized for obtaining $\mathbf{V}_{k+1}$. To be more specific $\mathbf{U}_{k+1}$ is computed by \begin{align} \mathbf{U}_{k+1} = \underset{\mathbf{U}}{\mathrm{argmin}}\;\l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k), \label{minUk} \end{align} where, \small \begin{align} l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) &= f(\mathbf{U}_k,\mathbf{V}_k) + \mathrm{tr}\{(\mathbf{U}-\mathbf{U}_k)^T\nabla_{\mathbf{U}}f(\mathbf{U}_k,\mathbf{V}_k)\} + \nonumber \\ &\frac{1}{2}\mathrm{vec}(\mathbf{U}-\mathbf{U}_k)^T\bar{\mathbf{H}}_{\mathbf{U}_k}\mathrm{vec}(\mathbf{U}-\mathbf{U}_k) \label{eq:upper_bound_l} \end{align} \normalsize and $\mathrm{vec}(\cdot)$ denotes the row vectorization operator. In (\ref{eq:upper_bound_l}), the true Hessian $\mathbf{H}_{\mathbf{U}_k}$ of $f(\mathbf{U},\mathbf{V}_k)$ at $\mathbf{U}_k$ has been approximated by the $md \times md$ positive-definite block diagonal matrix $\bar{\mathbf{H}}_{\mathbf{U}_k}$, which is expressed as \begin{equation} \bar{\mathbf{H}}_{\mathbf{U}_k} = \left[ \begin{array}{c c c c} \tilde{\mathbf{H}}_{\mathbf{U}_k} & \mathbf{0} & \dots & \mathbf{0} \\ \mathbf{0} & \tilde{\mathbf{H}}_{\mathbf{U}_k} & \ddots & \vdots \\ \vdots & \ddots & \ddots & \mathbf{0} \\ \mathbf{0} & \dots & \mathbf{0} & \tilde{\mathbf{H}}_{\mathbf{U}_k} \end{array} \right]. \label{hessian_bd} \end{equation} In the case of denoising (for reasons that will be explained later) the $d\times d$ diagonal block $\tilde{\mathbf{H}}_{\mathbf{U}_k}$ is defined as \begin{align} \tilde{\mathbf{H}}_{\mathbf{U}_k} = \mathbf{V}^T_k\mathbf{V}_k + \lambda\mathbf{D}_{(\mathbf{U}_k,\mathbf{V}_k)} \label{huk} \end{align} with \begin{align} \mathbf{D}_{(\mathbf{U},\mathbf{V})} = \mathrm{diag}\Big(\frac{1}{\sqrt{\|\boldsymbol{\mathit{u}}_1\|^2_2 + \|\boldsymbol{\mathit{v}}_1\|^2_2 + \eta^2}}, \nonumber \\ \frac{1}{\sqrt{\|\boldsymbol{\mathit{u}}_2\|^2_2 + \|\boldsymbol{\mathit{v}}_2\|^2_2 + \eta^2}},\dots,\frac{1}{\sqrt{\|\boldsymbol{\mathit{u}}_d\|^2_2 + \|\boldsymbol{\mathit{v}}_d\|^2_2 + \eta^2}}\Big). \label{definition_D} \end{align} As it is shown in the next section, due to the form of $\bar{\mathbf{H}}_{\mathbf{U}_k}$ in (\ref{hessian_bd}) and (\ref{huk}) and its relation to the exact Hessian $\mathbf{H}_{\mathbf{U}_k}$ of $f(\mathbf{U},\mathbf{V}_k)$ at $\mathbf{U}_k$, $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ bounds $f(\mathbf{U},\mathbf{V}_k)$ from above and hence the conditions set by the BSUM framework are satisfied. Actually, the approximation of the exact Hessian by using (\ref{hessian_bd}) leads to a closed-from expression for updating $\mathbf{U}$ and a dramatic decrease of the required computational complexity, as it will be further explained below. Following a similar path as above we come up with appropriate upper-bound functions for updating $\mathbf{V}$ i.e, \begin{align} \mathbf{V}_{k+1} = \underset{\mathbf{V}}{\mathrm{argmin}}\;\ g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k) \label{minVk} \end{align} with \footnotesize \begin{align} g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k) &= f(\mathbf{U}_{k+1},\mathbf{V}_k) + \mathrm{tr}\{(\mathbf{V}-\mathbf{V}_k)^T\nabla_{\mathbf{V}}f(\mathbf{U}_{k+1},\mathbf{V}_k)\} + \nonumber \\ &\frac{1}{2}\mathrm{vec}(\mathbf{V}-\mathbf{V}_k)^T\bar{\mathbf{H}}_{\mathbf{V}_k}\mathrm{vec}(\mathbf{V}-\mathbf{V}_k) \label{eq:upper_bound_g} \end{align} \normalsize and $\bar{\mathbf{H}}_{\mathbf{V}_k}$ being a block diagonal $md \times md$ matrix (similar to $\bar{\mathbf{H}}_{\mathbf{U}_k}$) whose $d\times d$ diagonal blocks $\tilde{\mathbf{H}}_{\mathbf{V}_k}$ are defined as \begin{align} \tilde{\mathbf{H}}_{\mathbf{V}_k} = \mathbf{U}^T_{k+1}\mathbf{U}_{k+1} + \lambda\mathbf{D}_{(\mathbf{U}_{k+1},\mathbf{V}_{k})}. \label{hvk} \end{align} By solving (\ref{minUk}) and (\ref{minVk}) we obtain analytical expressions for $\mathbf{U}_{k+1}$ and $\mathbf{V}_{k+1}$ that constitute the main steps of the proposed denoising algorithm given in Algorithm 1. {\it Remark 3: Interestingly, the update formulas for $\mathbf{U}$ and $\mathbf{V}$ derived before could have been derived from iteratively reweighted least squares (IRLS) minimization schemes \cite{beck2015convergence}. Indeed, the IRLS algorithm solves (\ref{minUk}) with $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ defined as, \small \begin{align} l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) = \frac{1}{2} \|\mathbf{Y} - \mathbf{UV}_k^T\|^2_F + \frac{\lambda}{2} \sum^d_{i}\frac{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}^k_i\|^2_2 + \eta^2}{\sqrt{\|\mathit{\boldsymbol{u}}^k_i\|^2_2 + \|\mathit{\boldsymbol{v}}^k_i\|^2_2 + \eta^2}} \nonumber \end{align} \normalsize and (\ref{minVk}) with a similar definition for $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$. It can be shown that solving these two new optimization problems, we get the same exact closed-form expressions for $\mathbf{U}_{k+1}$ and $\mathbf{V}_{k+1}$ as previously. } {\it Remark 4: For $\lambda>0$, approximation matrices $\bar{\mathbf{H}}_{\mathbf{U}_k}$ and $\bar{\mathbf{H}}_{\mathbf{V}_k}$ are always positive definite and hence invertible. In other words, both $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ are strictly convex and hence have unique minimizers. In addition, since approximations of the exact Hessians are used in the two block problems, we end up with quasi-Newton type update formulas for $\mathbf{U}$ and $\mathbf{V}$.} \begin{table} \centering \title{Algorithm 1: Alternating iteratively reweighted least squares (AIRLS) denoising algorithm} \begin{tabular}{|l|} \hline \\ Algorithm 1 :Alternating iteratively reweighted least \\ squares (AIRLS) denoising algorithm\\ \hline Input: $\mathbf{Y},\lambda>0$ \\ Initialize: $k=0, \mathbf{V}_0,\mathbf{U}_0, \mathbf{D}_{(\mathbf{U}_0,\mathbf{V}_0)}$ \\ \bf{repeat}\\ \hspace{0.5cm} $\mathbf{U}_{k+1} = \mathbf{Y}^T\mathbf{V}_{k}\left(\mathbf{V}^T_{{k}}\mathbf{V}_{k} + \lambda \mathbf{D}_{(\mathbf{U}_{k},\mathbf{V}_{k})}\right)^{-1}$ \\ \hspace{0.5cm} $\mathbf{V}_{k+1} = \mathbf{Y}\mathbf{U}^T_{k+1}\left(\mathbf{U}^T_{k+1}\mathbf{U}_{k+1} + \lambda \mathbf{D}_{(\mathbf{U}_{k+1},\mathbf{V}_{k})}\right)^{-1} $\\ \hspace{0.5cm}$k=k+1$\\ \bf{until} {\it convergence} \\ Output: $\hat{\mathbf{U}} = \mathbf{U}_{k+1}, \hat{\mathbf{V}} = \mathbf{V}_{k+1}$ \\ \hline \end{tabular} \end{table} \normalsize \subsection{Matrix completion} Next the matrix completion problem, under the matrix factorization setting stated in (\ref{proposed_mc}), is addressed. As mentioned earlier, matrix factorization offers scalability making the derived algorithms amenable to processing big and high dimensional data. It should be emphasized that in the proposed formulation of the problem (\ref{proposed_mc}), the impediments arising by the low-rank promoting term (Remark 2) are now complemented by the difficulty to get computationally efficient matrix-wise updates for $\mathbf{U}$ and $\mathbf{V}$, due to the presence of the sampling operator $\mathcal{P}_{\Omega}$ in the data fitting term. That said, the cost function is now modified as \small \begin{align} f(\mathbf{U},\mathbf{V}) = \frac{1}{2}\|\mathcal{P}_{\Omega}\left(\mathbf{Y} - \mathbf{U}\mathbf{V}^T\right)\|_F^2 + \lambda \sum^d_{i=1} \sqrt{\|\mathit{\boldsymbol{u}}_i\|^2_2 + \|\mathit{\boldsymbol{v}}_i\|^2_2 + \eta^2}. \label{cost_function_matrix_completion} \end{align} \normalsize As in the denoising problem, we utilize quadratic upper-bound functions based on approximate second-order Taylor expansions. Again, at each iteration, $\mathbf{U}$ and $\mathbf{V}$ are alternatingly updated by minimizing $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ defined in (\ref{eq:upper_bound_l}) and (\ref{eq:upper_bound_g}), with $\bar{\mathbf{H}}_{\mathbf{U}_k}$ and $\bar{\mathbf{H}}_{\mathbf{V}_k}$ as given before, but $f(\mathbf{U},\mathbf{V})$ is now defined as in (\ref{cost_function_matrix_completion}). The resulting update formulas are shown in Algorithm 2, where the new AIRLS matrix completion algorithm is presented. {\it Remark 5: The gain of using matrices $\bar{\mathbf{H}}_{\mathbf{U}_k}$ and $\bar{\mathbf{H}}_{\mathbf{V}_k}$ in the approximation of the exact Hessians of $f(\mathbf{U},\mathbf{V})$ (given either by (\ref{cost_function_denoising}) or (\ref{cost_function_matrix_completion})) w.r.t. $\mathbf{U}$ and $\mathbf{V}$ is twofold. Not only we remain in the BSUM framework, which offers favorable theoretical properties, but also we are able to update $\mathbf{U}$ and $\mathbf{V}$ at a very low computational cost. As it can be noticed in Algorithms 1 and 2, the inversions of $\bar{\mathbf{H}}_{\mathbf{U}_k}$ and $\bar{\mathbf{H}}_{\mathbf{V}_k}$ involved in the updates of $\mathbf{U}$ and $\mathbf{V}$ reduce to the inversion of the $d\times d$ matrices $\tilde{\mathbf{H}}_{\mathbf{U}_k}$ and $\tilde{\mathbf{H}}_{\mathbf{V}_k}$ thus inducing complexity in the order of $\mathcal{O}(d^3)$. Contrary, utilization of the exact Hessians w.r.t. $\mathbf{U}$ and $\mathbf{V}$ would have given rise to inversions with much higher computational complexity i.e., $\mathcal{O}(\mathrm{max}(m,n)\times d^3)$.} \begin{table} \centering \title{Algorithm 1 :Alternating iterative reweighted least squares matrix completion algorithm} \begin{tabular}{|l|} \hline \\ Algorithm 2: AIRLS matrix completion (AIRLS-MC) \\ algorithm \\ \hline Input: $\mathbf{Y},\delta$ \\ Initialize: $k=0, \mathbf{U}_0,\mathbf{V}_0,\mathbf{D}_{(\mathbf{U}_0,\mathbf{V}_0)}$ \\ \bf{repeat} \\ \hspace{0.5cm} $\mathbf{U}_{k+1} = \mathbf{U}_{k} - \Big( \mathcal{P}_{\Omega}\left(\mathbf{U}_{k}\mathbf{V}^T_{{k}} -\mathbf{Y}\right)\mathbf{V}_{k} $ \\ \hspace{0.5cm} $ + \mathbf{U}_{k}\mathbf{D}_{(\mathbf{U}_{k},\mathbf{V}_{k})}\Big)\left(\mathbf{V}^T_{{k}}\mathbf{V}_{k} + \lambda\mathbf{D}_{(\mathbf{U}_{k},\mathbf{V}_{k})}\right)^{-1}$ \\ \hspace{0.5cm} $\mathbf{V}_{k+1} = \mathbf{V}_{k} - \Big(\mathcal{P}_{\Omega}\left(\mathbf{V}_{k}\mathbf{U}_{k+1}^T - \mathbf{Y}^T \right)\mathbf{U}_{k+1}$ \\ \hspace{0.5cm} $ + \mathbf{V}_{k}\mathbf{D}_{(\mathbf{U}_{k+1},\mathbf{V}_{k})}\Big)\left(\mathbf{U}^T_{k+1}\mathbf{U}_{k+1} + \lambda\mathbf{D}_{(\mathbf{U}_{k+1},\mathbf{V}_{k})}\right)^{-1}$\\ \hspace{0.5cm} $k=k+1$\\ \bf{until} {\it convergence} \\ Output: $\hat{\mathbf{U}} = \mathbf{U}_{k+1}, \hat{\mathbf{V}} = \mathbf{V}_{k+1}$ \\ \hline \end{tabular} \end{table} \subsection{Non-negative matrix factorization} In what follows, we present a projected Newton-type method for efficiently addressing the nonnegative matrix factorization problem. It deserves to notice that we are now dealing with a constrained optimization problem since the solution set of the matrices $\mathbf{U}$ and $\mathbf{V}$ contains only elementwise nonnegative matrices. Following the same path presented above we aim at exploiting the curvature information of the formed cost function. However the constrained nature of the NMF problem induces some subtleties needed to be properly handled. More specifically, the proposed alternating minimization algorithm shall now update matrices $\mathbf{U}$ and $\mathbf{V}$ so that they a) always belong to the feasibility set and b) guarantee the descent direction of the cost function at each iteration. The proposed scheme is along the lines of the NMF algorithm proposed in \cite{gong2012efficient}. Each update of the factors takes place making use of the projected Newton method introduced in \cite{bertsekas1982projected}. Next, the minimization subproblems for updating the factors $\mathbf{U}$ and $\mathbf{V}$ are detailed. As in the previous algorithms, surrogate quadratic functions of $f(\mathbf{U},\mathbf{V}_k)$ and $f(\mathbf{U}_{k+1},\mathbf{V})$ are required for updating matrices $\mathbf{U}$ and $\mathbf{V}$ with $f(\mathbf{U},\mathbf{V})$ being the same as in eq. (\ref{cost_function_denoising}), but now the entries of $\mathbf{U}$ and $\mathbf{V}$ belong to the set of nonnegative reals. Let us now consider the so-called set of {\it active constraints} defined w.r.t. each row $\mathbf{u}_i$ of $\mathbf{U}$ at iteration $k$ as \begin{align} \mathcal{I}^k_{\mathbf{u}_i} = \{j| 0 \leq {u}^k_{ij} \leq \epsilon^k, [\nabla_{{\mathbf{U}}} f(\mathbf{U}_k,\mathbf{V}_k)]_{ij}>0 \}, \end{align} where $\epsilon^k = \mathrm{min}(\varepsilon,\|\mathbf{U}_k - \nabla_{\mathbf{U}}f(\mathbf{U}_k,\mathbf{V}_k)\|^2_F)$ (with $\varepsilon$ a small positive constant). A similar set $\mathcal{I}^k_{\mathbf{v}_i}$ is defined based on the rows $\mathbf{v}_i$ of matrix $\mathbf{V}$ i.e., \small \begin{align} \mathcal{I}^k_{\mathbf{v}_i} = \{j| 0 \leq {v}^k_{ij} \leq \epsilon^k, [\nabla_{{\mathbf{V}}} f(\mathbf{U}_{k+1},\mathbf{V}_k)]_{ij}>0 \}. \end{align}\normalsize As is analytically explained in \cite{gong2012efficient}, these sets contain the coordinates of the row elements of matrices $\mathbf{U}$ and $\mathbf{V}$ that belong to the boundaries of the constrained sets, and at the same time are stationary at iteration $k$. To derive a projected Newton NMF algorithm, we replace the exact Hessian of each subproblem, with a positive definite matrix that has been partially diagonalized at each iteration w.r.t. the sets of active constraints defined above. The positive definite matrices utilized in this case, denoted as $\bar{\mathbf{H}}^{\mathcal{I}_{\mathbf{U}}}_{\mathbf{U}}$ and $\bar{\mathbf{H}}^{\mathcal{I}_{\mathbf{V}}}_{\mathbf{V}}$, in analogy to $\bar{\mathbf{H}}_{\mathbf{U}}$ and $\bar{\mathbf{H}}_{\mathbf{V}}$ used in the cases of denoising and matrix completion, are block diagonal, but consist of $m$ and $n$, respectively, $d\times d$ {\it distinct} diagonal blocks. That is to say, the $i$th diagonal blocks of these matrices at iteration $k$, namely $\tilde{\mathbf{H}}^{\mathcal{I}_{\mathbf{u}_i}^k}_{\mathbf{U}}$ and $\tilde{\mathbf{H}}^{\mathcal{I}_{\mathbf{v}_i}^k}_{\mathbf{V}}$, are partially diagonalized versions of the $d\times d$ matrices $\tilde{\mathbf{H}}_{\mathbf{U}_k}$ and $\tilde{\mathbf{H}}_{\mathbf{V}_k}$ defined in (\ref{huk}) and (\ref{hvk}). More specifically, \[ [\tilde{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{u}_i}}_{\mathbf{U}}]_{pl} = \begin{cases} 0, \text{if} \ p\neq l, \text{and either}\ p\in \mathcal{I}^k_{\mathbf{u}_i} \ \text{or}\ l \in \mathcal{I}^k_{\mathbf{u}_i}\\ [\tilde{\mathbf{H}}_{\mathbf{U}_k}]_{pl} \ \text{otherwise} \end{cases} \] and $\tilde{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{v}_i}}_{\mathbf{V}}$ is defined similarly. Based on the above, the quadratic surrogate functions $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ are now expressed as, \small \begin{align} l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) = f(\mathbf{U}_k,\mathbf{V}_k)+ \mathrm{tr}\{\left(\mathbf{U}-\mathbf{U}_k\right)^T\nabla_{\mathbf{U}} f(\mathbf{U}_k,\mathbf{V}_k)\} \nonumber \\ + \frac{1}{2\alpha^k_{\mathbf{U}} }\mathrm{vec}\left(\mathbf{U}-\mathbf{U}_k\right)^T\bar{\mathbf{H}}^{{\mathcal{I}}^k_{\mathbf{U}}}_{\mathbf{U}}\mathrm{vec}\left(\mathbf{U}-\mathbf{U}_k\right) \end{align} \normalsize and \small \begin{align} g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)& = f(\mathbf{U}_{k+1},\mathbf{V}_k) +\nonumber \\ & \mathrm{tr}\{\left(\mathbf{V}-\mathbf{V}_k\right)^T\nabla_{\mathbf{V}} f(\mathbf{U}_{k+1},\mathbf{V}_k)\} + \nonumber \\ & \frac{1}{2\alpha^k_{\mathbf{V}} }\mathrm{vec}\left(\mathbf{V}-\mathbf{V}_k\right)^T\bar{\mathbf{H}}^{{\mathcal{I}^k_{\mathbf{V}}}}_{\mathbf{V}}\mathrm{vec}\left(\mathbf{V}-\mathbf{V}_k\right), \end{align} \normalsize where $\alpha^k_{\mathbf{U}}$ and $\alpha_{\mathbf{V}}^k$ denote step size parameters. Hence, $\mathbf{U}$ and $\mathbf{V}$ are updated by solving the following constrained minimization problems, \begin{align} \mathbf{U}_{k+1} = \underset{\mathbf{U} \geq \mathbf{0}}{\mathrm{argmin}} \ l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) \\ \text{and}\;\;\;\ \mathbf{V}_{k+1} = \underset{\mathbf{V}\geq \mathbf{0}}{\mathrm{argmin}} \ g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k) \end{align} giving rise to feasible updates in the form \begin{align} \mathrm{vec}(\mathbf{U}_{k+1}(\alpha^k_{\mathbf{U}}))& = [\mathrm{vec}(\mathbf{U}_k) - \nonumber \\ &\alpha^k_{\mathbf{U}} \left(\bar{\mathbf{H}}^{\mathcal{I}_{\mathbf{U}}^k}_{\mathbf{U}}\right)^{-1}\mathrm{vec}(\nabla_\mathbf{U} f(\mathbf{U}_k,\mathbf{V}_k))]_{+} \\ \mathrm{vec}(\mathbf{V}_{k+1}(\alpha_{\mathbf{V}}^k)) & = [\mathrm{vec}(\mathbf{V}_k) - \nonumber \\ &\alpha^k_{\mathbf{V}} \left(\bar{\mathbf{H}}^{\mathcal{I}_{\mathbf{V}}^k}_{\mathbf{V}}\right)^{-1}\mathrm{vec}(\nabla_\mathbf{V} f(\mathbf{U}_{k+1},\mathbf{V}_k))]_{+}, \end{align} where $[x]_+ = \max(x,0)$. The step size parameters $\alpha^k_{\mathbf{U}}$ and $\alpha_{\mathbf{V}}^k$ are calculated based on the Armijo rule on the projection arc, \cite{bertsekas1999nonlinear}, with the goal of achieving sufficient decrease of the initial cost function per iteration. Concretely, $\alpha^k_{\mathbf{U}}$ is set to $\alpha^k_{\mathbf{U}} = \beta_{\mathbf{U}}^{{m}_k}$ with $\beta_{\mathbf{U}}\in(0,1)$ and $m_k$ is the first nonnegative integer such that \begin{align} &f(\mathbf{U}_k) - f(\mathbf{U}_{k+1}(\alpha^k_{\mathbf{U}}))\geq \nonumber \\ &\sigma\Bigg\{\alpha^k_{\mathbf{U}}\sum_{\footnotesize i \notin \{\mathcal{I}^k_{\mathbf{u}_1} \cup \mathcal{I}^k_{\mathbf{u}_2} \cup\dots \cup \mathcal{I}^k_{\mathbf{u}_m}\}}\frac{\partial f(\mathbf{U}_k,\mathbf{V}_k) }{\partial \mathrm{vec}(\mathbf{U})_i}\times \nonumber \\ & \left(\left(\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{U}}}_{\mathbf{U}}\right)^{-1}\mathrm{vec}(\nabla_\mathbf{U} f(\mathbf{U}_k,\mathbf{V}_k))\right)_i +\nonumber \\ & \sum_{i\in \{\mathcal{I}^k_{\mathbf{u}_1} \cup \mathcal{I}^k_{\mathbf{u}_2} \cup\dots \cup \mathcal{I}^k_{\mathbf{u}_m}\} } \frac{\partial f(\mathbf{U}_k,\mathbf{V}_k) }{\partial \mathrm{vec}(\mathbf{U})_i}\times \mathrm{vec}(\mathbf{U}_k - \mathbf{U}_k(\alpha^k_{\mathbf{U}}))_i\Bigg\}. \label{armijo_rule} \end{align} where $\sigma$ is a constant scalar. The same process described above for selecting $\alpha^k_{\mathbf{U}}$ and hence updating $\mathbf{U}$ is subsequently adopted for $\alpha^k_{\mathbf{V}}$ and $\mathbf{V}$. The resulting alternating projected Newton-type algorithm for low-rank NMF is given in Algorithm 3. {\it Remark 6: The adopted Armijo-rule on the projection arc provides us guarantees regarding the monotonic decrease of the initial cost function per iteration as detailed in the next section. It should be noted that, contrary to the projected Newton NMF method of \cite{gong2012efficient}, in our case the diagonal matrices adopted are always positive definite and hence invertible offering stability to the derived algorithm. Finally, since the approximate Hesssian matrices used are partially diagonal, efficient implementations can be followed for reducing the computational cost.} \begin{table} \centering \title{Algorithm 3: Low-rank nonnegative matrix factorization\\ algorithm } \begin{tabular}{|l|} \hline \\ Algorithm 3: AIRLS nonnegative matrix factorizarion \\ (AIRLS-NMF) algorithm\\ \hline Input: $\mathbf{Y},\lambda,\beta_{\mathbf{U}},\beta_{\mathbf{V}},\sigma,\epsilon=10^{-6}$ \\ Initialize: $k=0, \mathbf{U}^0,\mathbf{V}^0,\mathbf{D}_{(\mathbf{U}_0,\mathbf{V}_0)}$ \\ \bf{repeat} \\ \hspace{0.5cm} Estimate the set of active constraints $\mathcal{I}^k_{\mathbf{U}}$\\ \hspace{0.5cm} $m_k=0$ \\ \hspace{0.5cm} \textbf{while} \text{eq. (\ref{armijo_rule})} \textbf{do} \\ \hspace{1cm} $m_k =m_k+1$, $\alpha^k_{\mathbf{U}} = \beta_{\mathbf{U}}^{m_k}$\\ \hspace{0.5cm} \bf{end} \\ \hspace{0.5cm} $\mathrm{vec}(\mathbf{U}_{k+1}) = [\mathrm{vec}({\mathbf{U}}_k) - $\\ \hspace{2cm}$\alpha^k_{\mathbf{U}} \left(\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{U}}}_{\mathbf{U}}\right)^{-1}\mathrm{vec}(\nabla_\mathbf{U} f(\mathbf{U}_k,\mathbf{V}_k))]_{+}$\\ \hspace{0.5cm} Estimate the set of active constraints $\mathcal{I}^k_{\mathbf{V}}$\\ \hspace{0.5cm} $m_k=0$ \\ \hspace{0.5cm} \textbf{while} \text{eq. (\ref{armijo_rule})} \textbf{do} \\ \hspace{1cm} $m_k=m_k+1$, $\alpha^k_{\mathbf{V}} = \beta_{\mathbf{V}}^{m_k}$\\ \hspace{0.5cm} \bf{end} \\ \hspace{0.5cm} $\mathrm{vec}(\mathbf{V}_{k+1}) = [\mathrm{vec}(\hat{\mathbf{V}}_k) -$\\ \hspace{2cm}$\alpha^k_{\mathbf{V}} \left(\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{V}}}_{\mathbf{V}}\right)^{-1}\mathrm{vec}(\nabla_\mathbf{V} f(\mathbf{U}_{k+1},\mathbf{V}_k))]_{+}$\\ \hspace{0.5cm} $k=k+1$\\ \bf{until} {\it convergence} \\ Output: $\hat{\mathbf{U}} = \mathbf{U}_{k+1}, \hat{\mathbf{V}} = \mathbf{V}_{k+1}$ \\ \hline \end{tabular} \end{table} {\it Remark 7: The proposed AIRLS, AIRLS-MC and AIRLS-NMF algorithms annihilate jointly columns of the matrices $\mathbf{U}$ and $\mathbf{V}$, as a result of the column sparsity imposing nature of the introduced low-rank promoting term. This key feature of the proposed algorithms let us incorporate a mechanism which prunes the columns that are zeroed as the algorithms evolve. By doing so, the per iteration computational complexity of the algorithms is gradually reduced, and this reduction may become significant, as is also highlighted in the experimental section.} \section{Convergence analysis} In this part of the paper we analyze the convergence behavior of the three algorithms presented in the previous section. Towards this, we first prove the following Lemma. {\it Lemma 1: The surrogate functions $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ minimized at each iteration of Algorithms 1 and 2 are tight upper-bounds of the corresponding $f(\mathbf{U},\mathbf{V}_k)$ and $f(\mathbf{U}_{k+1},\mathbf{V})$ with $f(\mathbf{U},\mathbf{V})$ defined in eqs. (\ref{cost_function_denoising}) and (\ref{cost_function_matrix_completion}) for the two algorithms, respectively.}\\ {\it Proof}: See Appendix. In non-negative matrix factorization, the proposed alternating projected Newton algorithm relies on the approximate Hessians $\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{U}}}_{\mathbf{U}}$ and $\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{V}}}_{\mathbf{V}}$ defined in the previous section. The following Lemma provides the conditions that ensure that this approach can also be placed within the upper-bound minimization framework. {\it Lemma 2: The surrogate function $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ upper bounds $f(\mathbf{U},\mathbf{V}_k)$, if $a^k_{\mathbf{U}}$ is bounded above by $\frac{\lambda_{min}(\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{U}}}_{\mathbf{U}})}{\lambda_{max}({\mathbf{H}}_{\mathbf{U}_k})}$. Similarly, $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)\geq f(\mathbf{U}_{k+1},\mathbf{V})$, if $a^k_{\mathbf{V}} \leq \frac{\lambda_{min}(\bar{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{V}}}_{\mathbf{V}})}{\lambda_{max}({\mathbf{H}}_{\mathbf{V}_k})}$, respectively.}\\[0cm] {\it Proof}: See Appendix. \\[0cm] Having shown that the proposed surrogate cost functions are upper bounds of the actual ones, in Proposition 2 given below the monotonic decrease of the initial cost functions per iteration of the respective algorithms is established. {\it Proposition 2: The sequences of $\{\mathbf{U}_k,\mathbf{V}_k\}$ generated by Algorithms 1, 2 and 3 decrease monotonically the respective cost functions i.e., \small \begin{equation} f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \leq f(\mathbf{U}_{k+1},\mathbf{V}_k) \leq f(\mathbf{U}_k,\mathbf{V}_k). \label{eq:prop_1} \end{equation}} \normalsize \\[-0.0cm] {\it Proof:} See Appendix. {\it Corolarry 1: The monotonically decreasing sequence of $f(\mathbf{U}_k,\mathbf{V}_k)$ converges as $k\rightarrow \infty$ to $f^{\infty}\geq 0$.} \\ {\it Proof:} It can be easily proved using Proposition 2, since the cost functions are bounded below by 0. \subsection{Rates of convergence and convergence to stationary points} Having shown that the updates $(\mathbf{U}_k,\mathbf{V}_k)$ generated by Algorithms 1, 2 and 3 monotinically decrease the corresponding cost functions, we herein derive the rates of convergence of the algorithms to a stationary point. The subsequent analysis is along the lines of the one presented in \cite{hastie2015matrix}. Given any $(\mathbf{U}, \mathbf{V})$ we define matrices $\mathbf{U}_{\ast}, \mathbf{V}_{\ast}$ arising by the following minimization problems \begin{align} \mathbf{U}_{\ast} = \underset{\mathbf{U}^{+}}{\mathrm{arg min}} \;\;\ l(\mathbf{U}^{+}|\mathbf{U},\mathbf{V} ) \\ \mathbf{V}_{\ast} = \underset{\mathbf{V}^{+}}{\mathrm{arg min}} \;\;\ g(\mathbf{V}^{+}|\mathbf{U}_{*},\mathbf{V} ). \end{align} Let us now denote as $\Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))$ and $\Delta^b((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))$ the measures of proximity between $(\mathbf{U},\mathbf{V})$ and $(\mathbf{U}_{\ast},\mathbf{V}_{\ast})$ which are defined as follows, \begin{align} & \Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast})) = \frac{1}{2}\Big(\|\mathbf{V}\left(\mathbf{U} - \mathbf{U}_{\ast}\right)^T\|^2_F + \nonumber \\ & \|\mathbf{U}_{\ast}\left(\mathbf{V}-\mathbf{V}_{\ast}\right)^T\|^2_F \Big) + \frac{\lambda}{2}\Big(\|\mathbf{D}_{(\mathbf{U},\mathbf{V})}^{\frac{1}{2}}\left(\mathbf{U} - \mathbf{U}_{\ast}\right)^T\|^2_F + \nonumber \\ & \|\mathbf{D}_{(\mathbf{U}_{\ast},\mathbf{V})}^{\frac{1}{2}}\left(\mathbf{V} - \mathbf{V}_{\ast}\right)^T\|^2_F \Big) \\ & \Delta^b((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast})) = \nonumber \\ &\frac{1}{2}\sum^m_{i=1}\left(\mathbf{u}_{i} - \mathbf{u}_{i,\ast}\right)^T[\mathbf{V}^T\mathbf{V}]_{\mathcal{I}_{\mathbf{u}_i}}\left(\mathbf{u}_{i} - \mathbf{u}_{i,\ast}\right) \nonumber \\ & + \frac{1}{2}\sum^n_{i=1} \left(\mathbf{v}_{i}-\mathbf{v}_{i,\ast}\right)^T[\mathbf{U}^T_{\ast}\mathbf{U}_{\ast}]_{\mathcal{I}_{\mathbf{v}_i}}\left(\mathbf{v}_{i}-\mathbf{v}_{i,\ast}\right) \nonumber \\ & + \frac{\lambda}{2}\Big(\|\mathbf{D}_{(\mathbf{U},\mathbf{V})}^{\frac{1}{2}}\left(\mathbf{U} - \mathbf{U}_{\ast}\right)^T\|^2_F + \nonumber \\ &\|\mathbf{D}_{(\mathbf{U}_{\ast},\mathbf{V})}^{\frac{1}{2}}\left(\mathbf{V} - \mathbf{V}_{\ast}\right)^T\|^2_F \Big) + \mathrm{tr}\{(\mathbf{U}-\mathbf{U}_{\ast})^T\nabla_{\mathbf{U}}f(\mathbf{U},\mathbf{V}) + \nonumber\\ &\mathrm{tr}\{(\mathbf{V}-\mathbf{V}_{\ast})^T\nabla_{\mathbf{V}}f(\mathbf{U}_\ast,\mathbf{V}) \} \end{align} where $[\mathbf{V}^T\mathbf{V}]_{\mathcal{I}_{\mathbf{u}_i}}$ and $[\mathbf{U}^T_{\ast}\mathbf{U}_{\ast}]_{\mathcal{I}_{\mathbf{v}_i}}$ are partially diagonalized versions of matrices $\mathbf{V}^T\mathbf{V}$ and $\mathbf{U}^T_{\ast}\mathbf{U}_{\ast}$ according to $\mathcal{I}_{\mathbf{u}_i}$ and $\mathcal{I}_{\mathbf{v}_i}$ respectively. {\it Lemma 3: Successive differences in the objective values of cost functions $f(\mathbf{U},\mathbf{V})$ corresponding to Algorithms 1,2 and 3 are bounded below as follows, For Algorithms 1 and 2: \footnotesize \begin{align} f(\mathbf{U}_k,\mathbf{V}_k) - f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \geq \Delta^a((\mathbf{U}_k,\mathbf{V}_k),(\mathbf{U}_{k+1},\mathbf{V}_{k+1})) \label{lemma_2_a} \end{align}\normalsize For Algorithm 3: \footnotesize \begin{align} f(\mathbf{U}_k,\mathbf{V}_k) - f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \geq \Delta^b((\mathbf{U}_k,\mathbf{V}_k),(\mathbf{U}_{k+1},\mathbf{V}_{k+1})). \label{lemma_2_b} \end{align} \normalsize } {\it Proof:} See Appendix. {\it Lemma 4: $\Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))=0$ if and only if $(\mathbf{U},\mathbf{V})$ generated by each of the Algorithms 1 and 2, is a fixed point of them. Likewise, $\Delta^b((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))=0$ if and only if ($\mathbf{U},\mathbf{V}$) generated by Algorithm 3 is also a fixed point.}\\ {\it Proof:} See Appendix. Note that $\Delta^a((\mathbf{U}_k,\mathbf{V}_k),(\mathbf{U}_{k+1},\mathbf{V}_{k+1}))$ and $\Delta^b((\mathbf{U}_k,\mathbf{V}_k),(\mathbf{U}_{k+1},\mathbf{V}_{k+1}))$ are actually used for quantifying the distance between $(\mathbf{U}_k,\mathbf{V}_k)$ and $(\mathbf{U}_{k+1},\mathbf{V}_{k+1})$ generated in successive iterations of the proposed algorithms. Thus, it is obvious that if the algorithms converge these measures will become equal to zero. For ease of notation, we will next denote these quantities as $\delta^a_k$ and $\delta^b_k$ respectively. Before proceeding further, we make the following assumption. \\ {\it Assumption 1: The eigenvalues of both $\mathbf{U}^T_k\mathbf{U}_k$ and $\mathbf{V}^T_k\mathbf{V}_k$ for $k\geq 1$ are uniformly bounded below and above by $l_L$ and $l_U$ respectively, i.e., \small \begin{align} l_{L}\mathbf{I}_d \preceq \mathbf{U}_k^T\mathbf{U}_k \preceq l_{U}\mathbf{I}_d \;\;\; \text{and} \;\;\; l_{L}\mathbf{I}_d \preceq \mathbf{V}_k^T\mathbf{V}_k \preceq l_{U}\mathbf{I}_d. \end{align} \normalsize } That said, the main result of this section is summarized in the following proposition. {\it Proposition 3: The sequences of $\{\mathbf{U}_k,\mathbf{V}_k\}$ generated by Algorithms 1,2, and 3 are bounded and hence have at least a limit point. This implies (by Bolzano-Weistrass theorem) that there exist subsequences that converge to the limit points. Actually, the limit points correspond to fixed points of the Algorithms 1,2 and 3, which are stationary points of the minimized cost functions. Finally, Algorithms 1,2 and 3 converge sublinearly, with their rates of convergence expressed as, \begin{align} \text{Algorithms 1,2} \;\;\;\; \underset{1\leq k \leq K}{\mathrm{min}}\delta_k^a \leq \frac{f(\mathbf{U}_1,\mathbf{V}_1) - f^{\infty}}{K} \\ \text{Algorithm 3} \;\;\;\; \underset{1\leq k \leq K}{\mathrm{min}} \delta_k^b \leq \frac{f(\mathbf{U}_1,\mathbf{V}_1) - f^{\infty}}{K}. \end{align} Proof:} See Appendix. Using Assumption 1 we can provide more refined information with regard to the rates of convergence, bringing into play the curvature characteristics of the cost functions as well as the regularization parameter $\lambda$. {\it Corollary 2: Under Assumption 1, we can derive the following convergence rate for Algortithms 1,2 and 3: \begin{align} \underset{1\leq k \leq K}{\mathrm{min}} \|\mathbf{U}_{k+1}-\mathbf{U}_k\|^2_F + \|\mathbf{V}_{k+1}-\mathbf{V}_k\|^2_F \leq \nonumber \\ \frac{4\tau}{2l_{L}\tau + \lambda} \frac{f(\mathbf{U}_1,\mathbf{V}_1) - f^{\infty}}{K}, \label{convergence_rate} \end{align} where $\tau = \underset{1\leq i \leq d}{\mathrm{max}}(\|\boldsymbol{\mathit{u}}_i\|^2_2,\|\boldsymbol{\mathit{v}}_i\|^2_2)$. }\\ {\it Proof:} It can be easily proved by suitably modifying $\delta^a_k$ and $\delta^b_k$ using the inequalities $l_{L}\|\mathbf{U}_{k}-\mathbf{U}_{k+1}\|^2_F \leq \|\mathbf{V}_k\left(\mathbf{U}_{k}-\mathbf{U}_{k+1}\right)\|^2_F \leq l_{U}\|\mathbf{U}_{k}-\mathbf{U}_{k+1}\|^2_F$ and $l_{L}\|\mathbf{V}_{k}-\mathbf{V}_{k+1}\|^2_F \leq \|\mathbf{U}_{k+1}\left(\mathbf{V}_{k}-\mathbf{V}_{k+1}\right)\|^2_F \leq l_{U}\|\mathbf{V}_{k}-\mathbf{V}_{k+1}\|^2_F$. \section{Experiments}\label{sec:experiments} Next simulated and real data experiments are provided for illustrating the key features of the proposed AIRLS, AIRLS-MC and AIRLS-NMF algorithms. For comparison purposes, the Maximum-Margin-Matrix Factorization (MMMF) method of \cite{rennie2005fast} is utilized in the denoising type problems. In matrix completion experiments the softImpute-ALS algorithm, \cite{hastie2015matrix}, is used. Finally, the ARD-NMF algorithm, \cite{tan2013automatic} is included in the non-negative matrix factorization type experiments. It should be noted that for the three proposed algorithms {\it a column pruning mechanism is applied}. That is, when a column of the matrix factors has been (approximately) zeroed, it is removed, thus reducing the column size of the factors (see Remark 7). As a result, the per iteration complexity is being reduced during the execution of the algorithms. \subsection{Simulated data experiments} Herein we highlight the benefits of the proposed AIRLS, AIRLS-MC and AIRLS-NMF algorithms on simulated data. To this end, the proposed algorithms are tested on two different experimental setups i.e. a) for checking the performance of AIRLS and AIRLS-NMF in the presence of noise and b) for testing the capacity of AIRLS-MC in dealing with different percentages of missing data. \begin{table*} \centering \begin{tabular}{|c |c | c |c| c| c| c| c| c| c| c| c| c|} \hline SNR & \multicolumn{6}{c|}{10}& \multicolumn{6}{c|}{20} \\ \hline rank & \multicolumn{3}{c}{5} & \multicolumn{3}{|c}{10} & \multicolumn{3}{|c}{5}& \multicolumn{3}{|c |}{10} \\\hline Algorithm & \small \# Iter & \small time(s) &\small NRE & \small \# Iter & \small time(s) &\small NRE & \small \# Iter & \small time(s) & \small NRE &\small \# Iter &\small time(s) &\small NRE \\ \hline \multicolumn{1}{|c|}{ MMMF} & 15 & 0,2774 & 0,1079& 15 & 0,2853 & 0,1152 & 40,31 & 0,7739 &0,0235 & 40,38 & 0,7666 & 0,0294\\ \hline \multicolumn{1}{|c|}{AIRLS} & 43,37 & 0,3949 & 0,0448 & 24,37 & 0,2426 & 0,0635 & 15,41 & 0,1571 &0,0142 & 35,68 & 0,3421 & 0,02 \\ \hline \end{tabular} \caption{Results obtained by MMMF and AIRLS on the simulated denoising experiment.} \label{table:results_denoising} \end{table*} \begin{table*} \centering \begin{tabular}{| c |c | c |c| c| c| c| c| c| c| c| c| c|} \hline SNR & \multicolumn{4}{c|}{10}& \multicolumn{4}{c|}{20} \\ \hline rank & \multicolumn{2}{c}{5} & \multicolumn{2}{|c}{10} & \multicolumn{2}{|c}{5}& \multicolumn{2}{|c|}{10} \\\hline Algorithm & est. rank &\small NRE & est. rank & \small NRE & est. rank &\small NRE & est. rank &\small NRE \\ \hline \multicolumn{1}{|c|}{ ARD-NMF} & 4,36 & 0,0778 & 100 & 0,1023 &4,66 & 0,0825 & 100 & 0,1008 \\ \hline \multicolumn{1}{|c|}{AIRLS-NMF} & 5,14 & 0,048 & 10,25 & 0,0706& 6,52 & 0,0181 & 10,23& 0,0291\\ \hline \end{tabular} \caption{Results obtained by ARD-NMF and AIRLS on the simulated NMF experiment.} \label{table:results_NMF} \end{table*} \subsubsection{AIRLS and AIRLS-NMF} In order to validate the performance of AIRLS and AIRLS-NMF in the presence of noise two different experimental settings are used. In both settings, a matrix $\mathbf{X}_{0}\in \mathbb{R}^{m\times n}$ with $m=500$, $n=500$ and varying rank $r \in \{5,10 \}$ is randomly generated. Concretely, matrix $\mathbf{X}_0$ is produced by the product of two matrices i.e., $\mathbf{U}_0\in \mathbb{R}^{m\times r}$ and $\mathbf{V}_0^T\in \mathbb{R}^{r \times n}$ having either a) zero-mean Gaussian entries of variance 1 or b) uniformly distributed non-negative entries in the range 0 to 1. The latter is used for testing the NMF algorithms. In both cases additive Gaussian i.i.d noise of different $\mathrm{SNR}\in \{10,20\}$ corrupts $\mathbf{X}_0$, thus resulting to the data matrix $\mathbf{Y}$, which is then provided as input to the tested algorithms. For the case of a) AIRLS is compared to the MMMF algorithm while in b) the ARD-NMF algorithm takes part in the respective experiments. Note that for the case of ARD-NMF of \cite{tan2013automatic}, the beta function of its data fitting term is reduced to the squared Frobenious norm. This way, both AIRLS-NMF and ARD-NMF rely on the same noise assumptions. As a quantitative metric we utilize the normalized reconstruction error defined as $\mathrm{NRE} = \frac{\|\mathbf{X}_0- \hat{\mathbf{U}}\hat{\mathbf{V}}^T\|_F}{\|\mathbf{X}_0\|_F}$. Since we are interested in the recovery performance of the algorithms, the low-rank promoting parameter $\lambda$ of the algorithms is selected from a set of values \{0.1,1,5,10,50,80,100,200\} via fine tuning in terms of the lowest achieved NRE. Moreover, for AIRLS-NMF we set $\beta_{\mathbf{U}}=\beta_{\mathbf{V}}=10^{-1}$ and $\sigma=10^{-2}$. The algorithms stop when either the relative decrease of the reconstructed data between two successive iterations i.e., $\frac{\|\hat{\mathbf{U}}_{k}\hat{\mathbf{V}}_{k}^T - \hat{\mathbf{U}}_{k+1}\hat{\mathbf{V}}_{k+1}^T\|_F}{\|\hat{\mathbf{U}}_{k}\hat{\mathbf{V}}_{k}^T\|_F}$ becomes less than $10^{-4}$ or 500 iterations are reached. 100 independent runs are performed for each algorithm and the average values of the various quantities (elapsed time, NRE, iterations executed and estimated rank) are provided in Tables \ref{table:results_denoising} and \ref{table:results_NMF}. The initial rank is set to $d=100$. In Table \ref{table:results_denoising}, the results of AIRLS and MMMF are given. Therein, it is shown that AIRLS offers better estimation performance than MMMF in all experiments. Interestingly, in most cases, this happens in less time than that spent by MMMF, although AIRLS in some instances required more iterations. This favorable characteristic of AIRLS is due to its {\it column pruning capability, which results to a much less average time per iteration}. In the case of the NMF problem, it can be observed by Table \ref{table:results_NMF} that AIRLS-NMF achieved lower NRE than that of ARD-NMF for all different choices of noise and rank of the sought matrices. Notably, AIRLS-NMF exhibited robustness in recovering the true rank in both cases examined i.e., $r \in \{5,10\}$, contrary to ARD-NMF which failed to estimate the true rank especially for $r=10$. \subsubsection{AIRLS-MC} To evaluate the performance of AIRLS-MC in different scenarios, we classify the experimental settings of this subsection according to the degrees of freedom ratio (FR), \cite{mohan2012iterative}, defined as $\mathrm{FR} = r (2n - r)/\mathrm{card}(\Omega)$. Recovery becomes harsher as FR is close to 1, whereas easier problems arise when it takes values close to 0. AIRLS-MC is compared to softImpute-ALS for FR equal to $0.4$ and $0.6$. In both cases a low-rank matrix $\mathbf{X}_{0}\in \mathbb{R}^{m\times n}$ with $m=1000$, $n=1000$ and rank $r=20$ is generated. The NRE defined above is used as the performance metric. For both algorithms, parameter $\lambda$ is fine tuned as described in the previous experiment and the initial rank is set to 100. Again, the algorithms run for 100 instances of each experiment and the mean values of iterations, NRE and time to converge are given in Table \ref{table:results-MC}. Moreover, the same stopping criteria mentioned previously are utilized. As is shown in Table \ref{table:results-MC}, AIRLS-MC offers higher accuracy than softImpute-ALS in both experiments. Interestingly, this happens in less time, although for FR=$0.6$ it requires more iterations to converge. Actually, this happens due to the fact that AIRLS-MC estimates the true rank of the matrix after a few iterations. That is, the column pruning mechanism mentioned above reduces gradually its computational complexity. \begin{table} \centering \resizebox{0.5\textwidth}{!}{ \begin{tabular}{| c |c | c |c| c| c| c| c| c| c| c| c| c|} \hline FR & \multicolumn{3}{c|}{0.4}& \multicolumn{3}{c|}{0.6} \\ \hline Algorithm & \small \# Iter & \small time(s) &\small NRE & \small \# Iter & \small time(s) &\small NRE \\ \hline \multicolumn{1}{|c|}{softImpute-ALS}& 295 & 218 & 0,1851 & 220 & 228 & 0,64 \\ \hline \multicolumn{1}{|c|}{AIRLS-MC} & 207 & 53 & 0,1499 & 731 & 174 & 0,27 \\ \hline \end{tabular}} \caption{Results of AIRLS-MC and softImpute-ALS on matrix completion experiment.} \label{table:results-MC} \end{table} \subsection{Real data experiments} In this section we validate the performance of the proposed algorithms on three different real data experiments. First, the AIRLS algorithm is tested in denoising a real hyperspectral image (HSI). Second, a collaborative filtering application is used for testing the matrix completion algorithms. Finally, a music signal decomposition problem is employed for comparing the performance of NMF algorithms. \begin{figure} \caption{Evaluation of AIRLS and MMMF on the Washigton DC AVIRIS dataset.} \label{fig:denoising_real_HSI} \end{figure} \subsubsection{Hyperspectral Image Denoising} In this experiment we utilize the Washigton DC Mall AVIRIS HSI captured at $m=210$ contiguous spectral bands in the 0.4 to 2.4 $\mu m$ region of the visible and infrared spectrum. The HSI consists of $n= 22500 \ (150\times 150)$ pixels. As is widely known, \cite{giampouras2016simultaneously}, hyperspectral data are highly coherent both in the spectral and the spatial domains. Therefore, by organizing the tested image in a matrix, whereby each column corresponds to the spectral bands and each row to the pixels, it turns out that this matrix can be well approximated by a low-rank one. This fact motivates us to exploit the low-rank structure of the HSI under study for efficiently denoising a highly corrupted version thereof by Gaussian i.i.d noise of $\mathrm{SNR}=6dB$. In Fig. \ref{fig:denoising_real_HSI}, false RGB images of the recovered HSIs by the proposed AIRLS algorithm and MMMF are provided. In both algorithms, the number of columns of the initial factors $\mathbf{U}_0$ and $\mathbf{V}_0$ is overstated to $d=100$ and the algorithms terminate when the relative decrease of the reconstructed HSI between two successive iterations reaches a value less than $10^{-4}$. Moreover, their low-rank promoting parameter $\lambda$ is selected so as to lead to solution matrices $\hat{\mathbf{U}}$ and $\hat{\mathbf{V}}$ of the same rank $r= 4$. As it can be noticed in Fig. \ref{fig:denoising_real_HSI}, AIRLS reconstructs the HSI in a significantly improved accuracy as compared to MMMF. This can be easily verified both by visually inspecting Figs. \ref{fig:denoising_real_HSI}a-\ref{fig:denoising_real_HSI}d and quantitatively in terms of the estimated NRE (Fig. \ref{fig:denoising_real_HSI}e). Notably, AIRLS converges in less iterations than those required by MMMF (Fig. \ref{fig:denoising_real_HSI}e), while at the same time less time per iteration is consumed, on average. The latter is achieved by virtue of the column pruning mechanism of AIRLS, which gradually reduces the size of matrix factors from $m\times 100$ and $n\times 100$ to $m\times 4$ and $n\times 4$, respectively. This way, after only a few initial iterations, when the rank starts to decrease, the per iteration time complexity of AIRLS becomes much smaller than that required in its early iterations, as well as the one of MMMF. \subsubsection{MC on Movielens 100K and 10M datasets} Herein, we focus on testing the performance of AIRLS-MC algorithm on a popular collaborative filtering application i.e. a movie recommender system. To this end, we utilize two well-studied in literature large datasets: the Movielens 100K and the Movielens 10M datasets. Both datasets contain ratings collected over various periods of time by users, with integer values ranging from 1-5. Since most of the entries are missing, matrix completion algorithms can be utilized for predicting them. By assuming that there exists a high degree of correlation amongst the rating of different users, a low-rank structure can be meaningfully adopted for these datasets. For validation purposes, each of them is splited into two disjoint sets i.e., a training and a test set (the ub.base, ub.test and the ra.train, ra.test are used for the 100K and the 10M dataset, respectively). Note that the 100K dataset contains 100000 ratings of 943 users on 1682 movies with each user having rated at least 20 movies. That said, we need to address a quite challenging matrix completion problem, since 93\% of the elements are missing. The situation is even harsher for the 10M dataset, which includes 1 million ratings from 72000 users on 10000 movies and 99\% missing data. The test sets ub.test and ra.test for both datasets contain exactly 10 ratings per user. The state-of-the-art softImpute-ALS algorithm is utilized in this experiment for comparison purposes. Finally, the normalized mean absolute value error (NMAE) defined as $\mathrm{NMAE} = \frac{\sum_{(i,j)\in \Omega}|[\mathbf{U}\mathbf{V}^T]_{ij} - [\mathbf{Y}]_{ij}|}{4\mathrm{card}(\Omega)}$ is used as a performance metric. First, we aim at illustrating the behavior of the proposed AIRLS-MC algorithm when it comes to the estimation performance and the speed of convergence. In that vein, for the case of the 100K dataset, the low-rank promoting parameter $\lambda$ of both AIRLS-MC and softImpute-ALS is selected according to two different scenarios: A) we choose $\lambda$ that achieves the minimum NMAE after convergence and B) we select $\lambda$ so that the estimated matrices by both the tested algorithms are of the same rank, equal to 10. It should be noted that the same stopping criterion used in the previous experiment is adopted also here. As it can be seen in Fig. \ref{fig:matrix_completion_100K} and Table \ref{table:movielens100K-results}, the proposed AIRLS-MC achieves better performance in terms of the NMAE for both scenarios A and B. The softImpute-ALS algorithm requires less iterations to converge than AIRLS-MC. However, the average per-iteration time complexity of AIRLS-MC is significantly less compared to its rival. As is mentioned above, this is attributed to the column pruning scheme which decreases to a large degree the computational burden of the algorithm. This favorable property, results to a much faster convergence of AIRLS-MC as compared to softImpute-ALS in terms of time. It should be noted that in scenario A, the estimated matrices $\hat{\mathbf{U}}$ and $\hat{\mathbf{V}}$ have rank equal to 6. On the other hand, for softImpute-ALS the solution matrices have rank equal to the one used at the initialization stage i.e., 100. In scenario B, softImpute-ALS converged faster than the proposed algorithm. However, this happened at the price of a remarkable deterioration of the NMAE. Lastly, from Fig. \ref{fig:matrix_completion_100K} it can be noticed that the relative objective of AIRLS-MC presents abrupt increases at some iterations. It was experimentally verified that those changes (which imply large decreases of the successive values of the objective function) take place at iterations that coincide with zeroings of the columns of the matrix factors. This fact advocates that larger gains are obtained at iterations where the rank is reduced, as we are approaching at the low-rank solution matrices. \begin{figure} \caption{Evaluation of AIRLS-MC and softImpute-ALS on the Movielens 100K dataset.} \label{fig:matrix_completion_100K} \end{figure} \begin{table} \centering \resizebox{0.5\textwidth}{!}{ \begin{tabular}{c c c | c | c | c | c |} \cline{4-7} & & & \small \# Iter & \small msec/iter & total time (sec) & NMAE \\ \cline{1-7} \multicolumn{1}{|c}{\multirow{4}{*}{\rotatebox[origin=c]{90}{scenario}}} & \multicolumn{1}{|c|}{\multirow{2}{*}{A}} & \multicolumn{1}{c|}{softImpute-ALS } & 278 & 104,2 & 28,9& 0,2254\\ \cline{3-7} \multicolumn{1}{|c}{} & \multicolumn{1}{|c|}{} & \multicolumn{1}{c|}{AIRLS-MC} & 957 & 19,5 & \textbf{18,7} & \textbf{0,1882} \\ \cline{2-7} \multicolumn{1}{|c}{} &\multicolumn{1}{|c|}{\multirow{2}{*}{B}} & \multicolumn{1}{c|}{softImpute-ALS} & 135 & 101,5 & \textbf{13,7} & 0,2873 \\ \cline{3-7} \multicolumn{1}{|c}{} &\multicolumn{1}{|c|}{}& \multicolumn{1}{c|}{AIRLS-MC} & 964 & 27,3 & 26,3 & \textbf{0,1918}\\ \hline \end{tabular}} \caption{Results obtained by AIRLS-MC and softImpute-ALS on Movielens 100K dataset.} \label{table:movielens100K-results} \end{table} Fig. \ref{fig:movielens10M} and Table \ref{table:results10M} show the performance of AIRLS-MC and softImpute-ALS on the 10M Movielens dataset. It should be noted that due to the large scale of this dataset the speed of convergence of the algorithms to a descent solution is of crucial importance. The parameter $\lambda$ of AIRLS-MC is now set to 3000, while for softImpute-ALS $\lambda$ is set, as proposed in \cite{hastie2015matrix}, to 50. The rank is initialized to 100 for both algorithms. In this experiment the relative tolerance criterion is set to $10^{-3}$. Interestingly, AIRLS-MC reaches a more accurate solution in terms of the NMAE (evaluated on the test set) in almost 1/3 of the time required by softIMpute-ALS. Again, AIRLS-MC requires more iterations to converge as compared to its competitor. Nevertheless, as it can be easily seen in Fig. \ref{fig:movielens10M}, after the initial iterations, when the rank starts to decrease and the column pruning mechanism is activated, the time per iteration of AIRLS-MC is dramatically reduced. \begin{table} \resizebox{0.5\textwidth}{!}{ \begin{tabular}{c |c | c | c| c |} \cline{2-5} & \small \# Iter & \small min/iter & total time (min) & NMAE \\ \hline \multicolumn{1}{|c|}{ softImpute-ALS} & 71 & 2,71 & 192,6 & 0,5485\\ \hline \multicolumn{1}{|c|}{AIRLS-MC} & 134 & 0,40 & 54,4 & 0,4645 \\ \hline \end{tabular}} \caption{Results obtained by AIRLS-MC and softImpute-ALS on Movielens 10M dataset.} \label{table:results10M} \end{table} \begin{figure} \caption{\footnotesize{Evaluation of AIRLS-MC and softImpute-ALS on 10M Movielens dataset.} \label{fig:movielens10M} \end{figure} \subsubsection{Music signal decomposition} Herein, we test the competence of AIRLS-NMF algorithm in decomposing a real music signal. For this reason, AIRLS-NMF is compared to the most relevant state-of-the-art algorithm i.e., ARD-NMF. In order to make as much fairer comparisons as possible between those two algorithms, the beta function of ARD-NMF algorithm of \cite{tan2013automatic} is reduced to the square Frobenious norm, by appropriately setting the respective parameter. This way, ARD-NMF, likewise to the proposed AIRLS-NMF, is based on Gaussian i.i.d noise assumptions. The music signal analyzed, is a short piano sequence i.e., a monophonic 15 seconds-long signal recorded in real conditions, as described in \cite{tan2013automatic}. As it can be noticed in Fig. \ref{fig:piano_seq}, it is composed of four piano notes that overlap in all the duration thereof. Following the same process as in \cite{tan2013automatic}, the original signal is tranformed into the frequency domain via the short-time Fourier transform (STFT). To this end, a Hamming window of size $L=1024$ is utilized. By appropriately setting up the overlapping between the adjacent frames we are led to a spectrogram whereby the signal is represented by 673 frames in 513 frequency bins. The power of this spectrogram is then provided as input to the tested algorithms. The initial rank is set to 20 and the same stopping criterion as in the previous experiments is utilized, with the threshold in this case set to $10^{-4}$. Moreover, for AIRLS-NMF the parameter setting described in the simulated data experiment is used i.e., we set $\beta_{\mathbf{U}}=\beta_{\mathbf{V}}=10^{-1}$ and $\sigma=10^{-2}$. Finally, the same process described in \cite{tan2013automatic} is followed for reconstructing the music components, i.e., rank one terms of the product $\hat{\mathbf{U}}\hat{\mathbf{V}}^T$ in the time domain. In Fig. \ref{fig:results-piano}, the first 10 components obtained by the two algorithms are ordered in decreasing values of the standard deviations of the time domain waveforms. As it can be noticed, AIRLS-NMF estimated the correct number of components, that is 6. Notably, the first four components of AIRLS-NMF correspond to the four notes while the rest two ones come from the sound of a hammer hitting the strings and the sound produced by the sustain pedal when it is released. On the contrary, ARD-NMF estimated 20 components, meaning that no rank minimization took place thus implying a data overfitting behavior. It should be emphasized that the favorable performance of AIRLS-NMF occurs though the noise is implicitly modeled as Gaussian i.i.d. Interestingly, as it can be seen in \cite{tan2013automatic}, AIRLS-NMF performed similarly to ARD IS-NMF, i.e., the version of ARD-NMF which makes more appropriate assumptions as to the noise statistics, by modeling it as Itakura-Saito. \begin{figure} \caption{Music score (top) and original audio signal (bottom)} \label{fig:piano_seq} \end{figure} \begin{figure} \caption{Music components obtained by (a) AIRLS-NMF and (b) ARD-NMF on the short piano sequence.} \label{fig:results-piano} \end{figure} \section{Conclusion} This paper presents a novel generic formulation of the low-rank matrix factorization problem. Borrowing ideas from iteratively reweighted approaches for rank minimization, a reweighted version of the sum of the squared Frobenious norms of the matrix factors i.e., a non-convex variational characterization of the nuclear norm, is defined. The proposed framework encapsulates other state-of-the-art approaches for low-rank imposition on the matrix factorization setting. By focusing on a specific instance of this scheme we generate a joint-column sparsity inducing regularizer that couples the columns of the matrix factors. The ubiquity of the proposed approach is demonstrated in the problems of denoising, matrix completion and nonnegative matrix factorization (NMF). To this end, under the block successive upper bound minimization (BSUM) framework, Newton-type algorithms are devised for addressing the afore-mentioned problems. The efficiency of the proposed algorithms in handling big and high-dimensional data as compared to other state-of-the-art algorithms is illustrated in a wealth of simulated and real data experiments. \setcounter{page}{1} \section*{Appendix} \subsection*{Proof of Lemma 1} In denoising and matrix completion, the surrogate functions $l(\mathbf{U}| \mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ given in eqs. (\ref{eq:upper_bound_l}) and (\ref{eq:upper_bound_g}), are twice continuously differentiable and constitute approximations of the second order Taylor expansions of the initial cost functions around ($\mathbf{U}_k,\mathbf{V}_k$) and ($\mathbf{U}_{k+1},\mathbf{V}_k$) respectively. In (\ref{eq:upper_bound_l}), the true Hessian $\mathbf{H}_{\mathbf{U}_k}$ of $f(\mathbf{U},\mathbf{V}_k)$ at $\mathbf{U}_k$ has been approximated by the $md \times md$ positive-definite block diagonal matrix $\bar{\mathbf{H}}_{\mathbf{U}_k}$ defined in (\ref{hessian_bd}). $\bar{\mathbf{H}}_{\mathbf{V}_k}$ is similarly defined. Our analysis is next focused on $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$. It can be easily shown that similar derivations can be made for $g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_{k})$. As it can be seen by eq. (\ref{eq:upper_bound_l}), $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ equals $f(\mathbf{U},\mathbf{V}_k)$ at $(\mathbf{U}_k,\mathbf{V}_k)$. In order to show that it majorizes $f(\mathbf{U},\mathbf{V}_k)$ for all other points closeby, it suffices to show that matrix $\mathbf{A} = \bar{\mathbf{H}}_{\mathbf{U}_k} - \mathbf{H}_{\mathbf{U}_k}$ is positive semi-definite \cite{razaviyayn2013unified}. Next we prove that for each of the two problems examined, the above-mentioned property holds for $\mathbf{A}$. In denoising $\tilde{\mathbf{H}}_{\mathbf{U}_k} = \mathbf{V}_k^T\mathbf{V}_k + \lambda\mathbf{D}_{(\mathbf{U}_k,\mathbf{V}_k)}$, where $\mathbf{D}_{(\mathbf{U}_k,\mathbf{V}_k)}$ is defined in eq. (\ref{definition_D}). Moreover for the exact Hessian $\mathbf{H}_{\mathbf{U}_k}$ at $\mathbf{U}_k$ we have \small \begin{align} &{\mathbf{H}}_{\mathbf{U}_k} = \nonumber \\ &\left[ \begin{array}{c c c c} \mathbf{V}_k^T\mathbf{V}_k+ \mathbf{K}_{11} & \mathbf{K}_{12} & \dots & \mathbf{K}_{1m} \\ \mathbf{K}_{12} & \mathbf{V}^T_k\mathbf{V}_k+ \mathbf{K}_{22} & \ddots & \vdots \\ \vdots & \ddots & \ddots & \mathbf{K}_{(m-1)m} \\ \mathbf{K}_{1m} & \dots & \mathbf{K}_{(m-1)m} & \mathbf{V}^T_k\mathbf{V}_k+ \mathbf{K}_{mm} \end{array} \right] \label{exact_Hessian_denoising} \end{align} \normalsize where \small \begin{align} &\mathbf{K}_{ij} = \nonumber \\ &\begin{cases} \mathrm{diag}\left(\frac{\|\boldsymbol{\mathit{u}}^k_1\|^2_2 + \|\boldsymbol{\mathit{v}}^k_1\|^2_2 - (u_{i1}^k)^2+ \eta^2}{\left(\|\boldsymbol{\mathit{u}}^k_1\|^2_2 + \|\boldsymbol{\mathit{v}}^k_1\|^2_2 + \eta^2\right)^{\frac{3}{2}}}, \cdots, \frac{\|\boldsymbol{\mathit{u}}^k_d\|^2_2 + \|\boldsymbol{\mathit{v}}^k_d\|^2_2 - (u_{id}^k)^2+ \eta^2}{\left(\|\boldsymbol{\mathit{u}}^k_d\|^2_2 + \|\boldsymbol{\mathit{v}}^k_d\|^2_2 + \eta^2\right)^{\frac{3}{2}}} \right), \text{if} \; i=j \\ \\ \mathrm{diag}\left(\frac{-u_{i1}^k u_{j1}^k}{\left(\|\boldsymbol{\mathit{u}}^k_1\|^2_2 + \|\boldsymbol{\mathit{v}}^k_1\|^2_2 + \eta^2\right)^{\frac{3}{2}}}, \cdots, \frac{-u_{id}^k u_{jd}^k}{\left(\|\boldsymbol{\mathit{u}}^k_d\|^2_2 + \|\boldsymbol{\mathit{v}}^k_d\|^2_2 + \eta^2\right)^{\frac{3}{2}}} \right), \text{if} \; i \neq j \end{cases} \label{kij} \end{align} \normalsize Hence matrix $\mathbf{A}$ takes the form given at the top of the next page. \tiny \begin{figure*}\label{eq:Amatrix} \end{figure*} \normalsize Elaborating on $\mathbf{A}$ we get from (\ref{eq:Amatrix}), (\ref{kij}) and (\ref{definition_D}), \begin{align} \mathbf{A}_{ij} = \mathrm{diag}\Big( \frac{u^k_{i1}u^k_{j1}}{\left(\|\boldsymbol{\mathit{u}}^k_1\|^2_2 + \|\boldsymbol{\mathit{v}}^k_1\|^2_2 + \eta^2\right)^{\frac{3}{2}}}, \cdots,\nonumber \\ \frac{u^k_{id}u^k_{jd}}{\left(\|\boldsymbol{\mathit{u}}^k_d\|^2_2 + \|\boldsymbol{\mathit{v}}^k_d\|^2_2 + \eta^2\right)^{\frac{3}{2}}}\Big).\label{Aij} \end{align} Notice that for \\ $\mathbf{B}_i = \mathrm{diag}\left(\frac{u^k_{i1}}{\left(\|\boldsymbol{\mathit{u}}^k_1\|^2_2 + \|\boldsymbol{\mathit{v}}^k_1\|^2_2 + \eta^2\right)^{\frac{3}{4}}},\dots,\frac{u^k_{id}}{\left(\|\boldsymbol{\mathit{u}}^k_d\|^2_2 + \|\boldsymbol{\mathit{v}}^k_d\|^2_2 + \eta^2\right)^{\frac{3}{4}}}\right)$, $\mathbf{A}_{ij}=\mathbf{B}_i^T\mathbf{B}_j$. So by defining $\mathbf{B}=[\mathbf{B}_1,\ldots,\mathbf{B}_d]$, it is straightforward that $\mathbf{A}=\mathbf{B}^T\mathbf{B}$, that is $\mathbf{A}$ is positive semi-definite. In matrix completion, the exact Hessian $\mathbf{H}_{\mathbf{U}_k}$ differs from that given in (\ref{exact_Hessian_denoising}) in the diagonal blocks only. More specifically, the $i$th diagonal block of $\mathbf{H}_{\mathbf{U}_k}$ takes now the form $\mathbf{V}^T\boldsymbol{\Phi}_i\mathbf{V} + \mathbf{K}_{ii}$, where $\boldsymbol{\Phi}_i$ is a $n \times n$ diagonal matrix containing ones on indexes included in the set $\Omega$ and related to the $i$th row of $\mathbf{Y} $ and zeros elsewhere. Since $\mathbf{V}^T\mathbf{V}- (\mathbf{V}^T\boldsymbol{\Phi}_i\mathbf{V})\succeq 0$, we can easily follow the same path as above for proving the semi-definiteness of the respective matrix $\mathbf{A}$. \subsection*{Proof of Lemma 2} Working as in the proof of Lemma 1, it can be shown that the surrogate functions are upper bounds of the actual cost functions, if matrices $\frac{1}{a^k_{\mathbf{U}}}\tilde{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{U}}}_{\mathbf{U}} - \mathbf{H}_{\mathbf{U}_k}$ and $\frac{1}{a^k_{\mathbf{V}}}\tilde{\mathbf{H}}^{\mathcal{I}^k_{\mathbf{V}}}_{\mathbf{V}} - \mathbf{H}_{\mathbf{V}_k}$ are positive semi-definite. By using inequalities in the form of $\lambda_{min}(\mathbf{A})\|\mathbf{x}\|^2_2\leq \|\mathbf{Ax}\|^2_2\leq \lambda_{max}(\mathbf{A})\|\mathbf{x}\|^2_2 $ (where $\lambda_{min}(\mathbf{A})$ and $\lambda_{max}(\mathbf{A})$ denote the minimum and the maximum eigenvalues of matrix $\mathbf{A}$, respectively) it can be easily verified that this property holds always, if $a^k_{\mathbf{U}}$ and $a^k_{\mathbf{V}}$ are bounded above as stated in the Lemma. \subsection*{Proof of Proposition 2} The following analysis is the same for the denoising and matrix completion problems. From Lemma 1 we have, \begin{align} l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) \geq f(\mathbf{U},\mathbf{V}_k) \end{align} Since $\mathbf{U}_{k+1} = \underset{\mathbf{U}}{\mathrm{arg min}}\;\;\; l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k) $ we get \begin{align} l(\mathbf{U}_{k+1}|\mathbf{U}_k,\mathbf{V}_k) \leq l(\mathbf{U}_k|\mathbf{U}_k,\mathbf{V}_k) \equiv f(\mathbf{U}_k,\mathbf{V}_k) \label{eq:prop_1_c} \end{align} and hence \begin{align} f(\mathbf{U}_{k+1},\mathbf{V}_k) \leq f(\mathbf{U}_k,\mathbf{V}_k). \label{eq:prop_1_a} \end{align} Following the same rationale, and since $\mathbf{V}_{k+1} = \underset{\mathbf{V}}{\mathrm{arg min}}\;\;\; g(\mathbf{V}|\mathbf{U}_{k+1},\mathbf{V}_k)$ we get \begin{align} g(\mathbf{V}_{k}|\mathbf{U}_{k+1},\mathbf{V}_k) \equiv f(\mathbf{U}_{k+1},\mathbf{V}_k) \geq \nonumber \\ g(\mathbf{V}_{k+1}|\mathbf{U}_{k+1},\mathbf{V}_k) \geq f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \label{eq:prop_1_b} \end{align} Combining (\ref{eq:prop_1_a}) and (\ref{eq:prop_1_b}) we get (\ref{eq:prop_1}). In nonnegative matrix factorization, by invoking Proposition 2.4.1 of \cite{bertsekas1999nonlinear}, we have that there exist an $\bar{a}_\mathbf{U}$ which guarantees that for every $a^k_{\mathbf{U}}\in (0,\bar{a}_{\mathbf{U}})$ we have \begin{align} f(\mathbf{U}_{k+1}(a^k_{\mathbf{U}}),\mathbf{V}_k) \leq f(\mathbf{U}_k,\mathbf{V}_k) \label{eq:prop_2_a} \end{align} Similarly, there exists $a^k_{\mathbf{V}}\in (0,\bar{a}_{\mathbf{V}})$ for which \begin{align} f(\mathbf{U}_{k+1}(a^k_{\mathbf{U}}),\mathbf{V}_{k+1}(a^k_{\mathbf{V}})) \leq f(\mathbf{U}_{k+1}(a^k_{\mathbf{U}}),\mathbf{V}_{k})) \label{eq:prop_2_b} \end{align} Relations (\ref{eq:prop_2_a}) and (\ref{eq:prop_2_b}) lead us to (\ref{eq:prop_1}). \subsection*{Proof of Lemma 3} Using Lemma 1, we have: \begin{enumerate} \item For Algorithms 1,2: \\[-0.1cm] \begin{align} f(\mathbf{U}_k,\mathbf{V}_k) - f(\mathbf{U}_{k+1},\mathbf{V}_k) \geq \nonumber \\ l(\mathbf{U}_k|\mathbf{U}_k,\mathbf{V}_k) - l(\mathbf{U}_{k+1}|\mathbf{U}_k,\mathbf{V}_k) \;\;\; \text{and} \label{eq_lemma_2_1}\\ f(\mathbf{U}_{k+1},\mathbf{V}_k) - f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \geq \nonumber \\ g(\mathbf{V}_k|\mathbf{U}_{k+1},\mathbf{V}_k) - g(\mathbf{V}_{k+1}|\mathbf{U}_{k+1},\mathbf{V}_k) \label{eq_lemma_2_2} \end{align} Adding (\ref{eq_lemma_2_1}) and (\ref{eq_lemma_2_2}) we reach to the following inequality \begin{align} &f(\mathbf{U}_k,\mathbf{V}_k) - f(\mathbf{U}_{k+1},\mathbf{V}_{k+1}) \geq \nonumber \\ &l(\mathbf{U}_k|\mathbf{U}_k,\mathbf{V}_k) - l(\mathbf{U}_{k+1}|\mathbf{U}_k,\mathbf{V}_k) \nonumber \\ & + g(\mathbf{V}_k|\mathbf{U}_{k+1},\mathbf{V}_k) - g(\mathbf{V}_{k+1}|\mathbf{U}_{k+1},\mathbf{V}_k) \label{eq_lemma_2_3} \end{align} Since $\mathbf{U}_{k+1}$ and $\mathbf{V}_{k+1}$ are stationary points of $l(\mathbf{U}|\mathbf{U}_k,\mathbf{V}_k)$ and $g(\mathbf{V}| \mathbf{U}_{k+1},\mathbf{V}_k)$ respectively\\ ($\nabla_{\mathbf{U}}l(\mathbf{U}_{k+1}|\mathbf{U}_k,\mathbf{V}_k)= \mathbf{0}$ and $\nabla_{\mathbf{V}}g(\mathbf{V}_{k+1}|\mathbf{U}_{k+1},\mathbf{V}_k)=\mathbf{0}$) and by their second order Taylor expansions around $(\mathbf{U}_{k+1},\mathbf{V}_k)$ and $(\mathbf{U}_{k+1},\mathbf{V}_{k+1})$ we have \begin{align} & l(\mathbf{U}_k|\mathbf{U}_k,\mathbf{V}_k) - l(\mathbf{U}_{k+1}| \mathbf{U}_k,\mathbf{V}_k) = \nonumber \\ & \frac{1}{2}\mathrm{tr}\{\left(\mathbf{U}_k - \mathbf{U}_{k+1}\right)\big(\mathbf{V}^T_k\mathbf{V}_k + \nonumber \\ &\lambda\mathbf{D}_{(\mathbf{U}_k,\mathbf{V}_k)}\big)\left(\mathbf{U}_k - \mathbf{U}_{k+1}\right)^T \} \\ &= \frac{1}{2}\|\mathbf{V}_k\left(\mathbf{U}_k - \mathbf{U}_{k+1}\right)^T \|_F^2 + \nonumber \\ &\frac{\lambda}{2}\|\mathbf{D}^{\frac{1}{2}}_{(\mathbf{U}_k,\mathbf{V}_k)}\left(\mathbf{U}_k - \mathbf{U}_{k+1}\right)^T \|^2_F \label{eq_lemma_2_4} \end{align} and \begin{align} &g(\mathbf{V}_k|\mathbf{U}_{k+1},\mathbf{V}_k) - g(\mathbf{V}_{k+1}| \mathbf{U}_{k+1},\mathbf{V}_k) = \nonumber \\ &\frac{1}{2}\mathrm{tr}\{\left(\mathbf{V}_k - \mathbf{V}_{k+1}\right)\big(\mathbf{U}^T_{k+1}\mathbf{U}_{k+1} + \nonumber \\ &\lambda\mathbf{D}_{(\mathbf{U}_{k+1},\mathbf{V}_{k})}\big)\left(\mathbf{V}_{k+1} - \mathbf{V}_{k}\right)^T \} \\ &= \frac{1}{2}\|\mathbf{U}_{k+1}\left(\mathbf{V}_k - \mathbf{V}_{k+1}\right)^T \|_F^2 + \nonumber \\ &\frac{\lambda}{2}\|\mathbf{D}^{\frac{1}{2}}_{(\mathbf{U}_{k+1},\mathbf{V}_k)}\left(\mathbf{V}_k - \mathbf{V}_{k+1}\right)^T \|^2_F \label{eq_lemma_2_5} \end{align} Combining (\ref{eq_lemma_2_4}), (\ref{eq_lemma_2_5}) and (\ref{eq_lemma_2_3}) we get inequality (\ref{lemma_2_a}). \item For Algorithm 3: \\ Inequality (\ref{lemma_2_b}) can be derived following a similar process as above. However there exist two subtle points which lead us to a slightly different lower bound compared to that of (\ref{lemma_2_a}). More concretely, the first part of $\Delta^b((\mathbf{U}_k,\mathbf{V}_k),(\mathbf{U}_{k+1},\mathbf{V}_{k+1})) $ is now determined by the approximate Hessian adopted for the NMF problem. Second, the constrained nature of the optimization problem is translated into the modified condition of stationarity, which results to the inclusion of two additional positive terms i.e., $\mathrm{tr}\{(\mathbf{U}_k-\mathbf{U}_{k+1})\nabla_{\mathbf{U}}f(\mathbf{U}_k,\mathbf{V}_k)$ and $\mathrm{tr}\{(\mathbf{V}_{k}-\mathbf{V}_{k+1})\nabla_{\mathbf{V}}f(\mathbf{U}_{k+1},\mathbf{V}_k)$. \end{enumerate} \subsection*{Proof of Lemma 4} If $(\mathbf{U},\mathbf{V})$ is a fixed point, i.e. $\mathbf{U} = \mathbf{U}_{\ast}$ and $\mathbf{V}=\mathbf{V}_{\ast}$, then it is easily shown that $\Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))=0$ and $\Delta^b((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))=0$. Conversely, using (\ref{eq_lemma_2_4}) and (\ref{eq_lemma_2_5}) and since all the summands of $\Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))$ are positive, we have that if $\Delta^a((\mathbf{U},\mathbf{V}),(\mathbf{U}_{\ast},\mathbf{V}_{\ast}))=0$ then \begin{align} l(\mathbf{U}|\mathbf{U},\mathbf{V}) - l(\mathbf{U}_{\ast}| \mathbf{U},\mathbf{V}) = 0 \;\; \text{and} \\ g(\mathbf{V}|\mathbf{U}_{\ast},\mathbf{V}) - g(\mathbf{V}_{\ast}| \mathbf{U}_{\ast},\mathbf{V}) = 0. \end{align} Since both $l(\mathbf{U}|\mathbf{U},\mathbf{V})$ and $ g(\mathbf{V}|\mathbf{U}_{\ast},\mathbf{V})$ are strictly convex functions, $\mathbf{U}_{\ast}$ and $\mathbf{V}_{\ast}$ are uniquely acquired. Hence the above equalities hold only if $(\mathbf{U},\mathbf{V})=(\mathbf{U}_{\ast},\mathbf{V}_{\ast})$, that is $(\mathbf{U},\mathbf{V})$ is a fixed point of Algorithms 1 and 2. The same procedure can be followed for proving the second argument of the Lemma concerning Algorithm 3. \subsection*{Proof of Proposition 3} From (\ref{lemma_2_a}) by adding $K$ successive terms we get, \begin{align} \sum^K_{k=1} \delta^a_k \leq f(\mathbf{U}_1,\mathbf{V}_1) - f(\mathbf{U}_K,\mathbf{V}_K) \leq f(\mathbf{U}_1,\mathbf{V}_1) - f^{\infty} < \infty \label{prop_3_sum} \end{align} Therefore, the sequence $\delta^a_k$ is bounded and hence it contains convergent subsequences. Moreover it can be shown that as $k\rightarrow \infty$, $\underset{1\leq k \leq K}{\mathrm{min}} \delta_k^a \rightarrow 0$. Hence by Lemma 3 we know that the limit points of $\delta^a_k$ are in fact fixed points of Algorithms 1 and 2. By (\ref{lemma_2_a}) and as a consequence of the continuity of the cost functions, it can be easily seen that these fixed points actually correspond to stationary points thereofs. The rates of convergence arise by substituting the fist part of inequality (\ref{prop_3_sum}) by $K \underset{1\leq k \leq K}{\mathrm{min}} \delta_k^a \leq \sum^K_{k=1} \delta^a_k $. The proof is exactly the same for Algorithm 3, using $\delta^b_k$ in place of $\delta^a_k$. \end{document}
\begin{document} \title{Random Sampling of Mellin Band-limited Signals} \begin{abstract} In this paper, we address the random sampling problem for the class of Mellin band-limited functions $\mathcal B_T$ which is concentrated on a bounded cube. It is established that any function in $\mathcal B_T$ can be approximated by an element in a finite-dimensional subspace of $\mathcal B_T.$ Utilizing the notion of covering number and Bernstein's inequality to the sum of independent random variables, we prove that the random sampling inequality holds with an overwhelming probability provided the sampling size is large enough. \\ \vskip0.001in \noindent Keywords: Sampling Inequality; Random sampling; Mellin Transform; Mellin band-limited functions; Reproducing kernel space \\ \vskip0.001in \noindent Mathematics Subject Classification(2020): 94A20, 41A05,42A61,42C15 \end{abstract} \section{Introduction} The theory of sampling and reconstruction has been receiving significant attention from the broad areas of approximation theory, signal processing, and digital communication. The problem of sampling for the function space $V$ is mainly concerned with the stable recovery of a function $f \in V$ from its sample values $\{f(x_{j})\}$ on some countable set $X=\{x_{j}:j \in J \}$, without losing any information. The sampling set $X$ is known as a stable sampling set. For the space of Mellin band-limited functions $\mathcal B_T$ (defined in Section \ref{section2}), the sampling problem is equivalent to finding a sampling set $\{x_j:j\in J\}$ such that the sampling inequality \begin{equation} \label{main inq} A \|f\|_{X_c^{2}(\mathbb R^{n}_{+})}^2 \leq \sum_{j \in J} |f(x_j)|^2 x_{j}^{2c} \leq B \|f\|_{X_c^{2}(\mathbb R^{n}_{+})}^2, \end{equation} holds for all $f$ in $\mathcal B_T$, for some $A,B>0$. Here $\mathbb R_{+}$ denotes the set of positive real numbers. The Banach space $X_c^2(\mathbb{R}^{n}_{+})$ is a collection of all measurable function $f$ with $$\|f\|_{X_c^{2}(\mathbb R^{n}_{+})}^2=\int_{\mathbb R_{+}^{n}} |x|^{2c-1}|f(x)|^2\, dx<\infty.$$ An important result in this direction was collectively given by Bertero and Pike \cite{bertero}, and Gori \cite{gori} which asserts that any function $f$ in $\mathcal B_T$ can be stably recovered from its sample at exponentially spaced sample points $\{ e^{\frac{k}{T}}:k\in \mathbb Z \}$ and the reconstruction formula is given by \begin{equation} \label{expsmpl} f(x)= \sum_{k \in \mathbb Z} f(e^{\frac{k}{T}})\, \text{lin}_{\frac{c}{T}}(e^{-k}x^{T}), \qquad x\in \mathbb R_{+}, \end{equation} where $\text{lin}_{c}(x) = x^{-c}\text{sinc}\,(\log x)$ and $\text{sinc}\,(u)= \frac{\sin \pi u}{\pi u}.$ In this case, the sampling set $\{e^{k/T}:k\in \mathbb Z\}$ is a stable sample set for $\mathcal B_T$ and $A=B=T.$ The exponentially spaced sampling set is useful in the application where the signal information is accumulated near zero. The exponential sampling method is also useful to deal with various real-world problems arising in science and engineering (see \cite{casasent,ostrowsky}). Butzer and Jansche \cite{butzer5} pioneered the mathematical study of the exponential sampling formula \eqref{expsmpl} employing the tools of the theory of Mellin transform. Some recent developments related to the exponential sampling problem can be observed in \cite{ownkant,nfo,bardaro7,mleb}.\par The signals are used to recover from their random measurements in the field of image processing \cite{chan}, compressed sensing \cite{candes,eldar}, and machine learning \cite{cucker1,cover}. The random sampling problem deals with the following question. What is the probability that the uniformly distributed random sample points satisfy the sampling inequality \eqref{main inq} for some class of functions in $\mathcal B_T$? Due to the randomness of the samples, the sampling inequality \eqref{main inq} may not hold surely for $\mathcal B_T$. So, the random sampling problem estimates the following probability $$ P \Big( A \|f\|_{X_c^{2}(\mathbb R^{n}_{+})}^2 \leq \sum_{j \in J} |f(x_j)|^2 x_{j}^{2c} \leq B \|f\|_{X_c^{2}(\mathbb R^{n}_{+})}^2 \Big) \geq 1-\epsilon,$$ for some class of Mellin band-limited function, where $\epsilon >0$ is arbitrary small.\par In the case of finite dimensional space, the random sampling problem was studied for the space of multivariate trigonometric polynomials \cite{2005,xian}. For the infinite-dimensional space of Fourier band-limited functions, the stable recovery from their random samples scattered throughout $\mathbb R^n$ was shown to be impossible \cite{groch}. This motivated to consider the random samples distributed on a bounded cube $C\subset \mathbb R^n$ and the class of functions concentrated on $C$. It was established in \cite{groch,groch1} that the class of Fourier band-limited functions concentrated on $C$ can be stably recovered from its samples at uniformly distributed random points on $C$. The random sampling problem has been investigated for the shift-invariant spaces in \cite{devrandom,fuhr,yang}. In recent years, the related random sampling problem has been studied on the sphere in \cite{pams}, for the image space of an idempotent integral operator \cite{sun2021,jma,rks3} which generalizes the space of shift-invariant space and the signal space of finite rate of innovation, see \cite{sun2010}. \par A Mellin band-limited function can not be Fourier band-limited at the same time, and it is analytically extensible to the Riemann surface of the (complex) logarithm (see \cite{bardaro4}). Since one must appropriately extend the idea of the Bernstein spaces, involving Riemann surfaces, the theory of Mellin band-limited functions differs greatly from the theory of Fourier band-limited functions. The main aim of the paper is to study the stability of the random sampling set for the space Mellin bandlimited functions, using the Mellin transform and its inversion theory. Motivated from \cite{groch}, we consider the problem of random sampling for the class of $\delta$-concentrated Mellin band-limited functions on $C_{R}:=[1/R, R]^n\subset \mathbb R_{+}^n$, i.e., $$\mathcal B_{T,\delta}:=\Big\{f \in \mathcal B_{T}: \int_{C_R} |f(x)|^2 x^{2c-1} dx \geq (1-\delta)\|f\|^{2}_{X_{c}^{2}(\mathbb R_{+}^n)} \Big\},$$ where $\delta\in (0,1),$ $R>1,$ and $c \in \mathbb{R}^n.$ \par The proposed plan of the paper goes as follows. Section \ref{section2} provides some preliminaries required to derive the desired results. In Section \ref{section3}, we first establish that any element in $\mathcal B_{T}$ can be approximated by an element in a finite-dimensional subspace of $\mathcal{B}_{T}$ with the help of the representation formula \eqref{expsmpl}. Then we show that any bounded subset of $\mathcal{B}_{T,\delta}$ is totally bounded with respect to $\|\cdot \|_{X_{c}^{\infty}(C_R)}$. In the end, by using the notion of covering number and the well-known Bernstein's inequality, we prove that the random sampling inequality holds with overwhelming probability for the functions concentrated on the compact set. \section{Preliminaries}\label{section2} In this section, we define some preliminary results, and basic notations used in the rest of the paper are given in Table 1. \par For $1\leq p<\infty$, let $L^{p}(\mathbb R^{n}_{+})$ be the space of $p-$integrable functions in the Lebesgue sense on $\mathbb{R}^{n}_{+}$ with usual $p-$norm. Moreover, $L^{\infty}(\mathbb R^{n}_{+})$ denotes the class of bounded measurable functions defined on $\mathbb{R}^{n}_{+}$ with norm $ \|f\|_{L^{\infty}(\mathbb R^{n}_{+})} := ess\sup_{x \in \mathbb{R}^{n}_{+}}|f(x)|.$ Let $c \in \mathbb R^n$ be fixed and for $1 \leq p \leq \infty,$ we define the space $$X_{c}^{p}(\mathbb{R}^{n}_{+}):=\big\{f:\mathbb{R}^{n}_{+} \rightarrow \mathbb{C}: (\cdot)^{c-1/p}f(\cdot) \in L^{p}(\mathbb{R}^{n}_{+})\big\}$$ equipped with norm $$\|f\|_{X_{c}^{p}(\mathbb{R}^{n}_{+})}= \|(\cdot)^{c-1/p} f(\cdot) \|_{L^{p}(\mathbb{R}^{n}_{+})}.$$ It is important to mention that $X_{c}^{p}(\mathbb{R}^{n}_{+})$ is Banach space (see \cite{butzer7}). Moreover, for $p=2$, $X_{c}^{2}(\mathbb{R}^{n}_{+})$ is a Hilbert space with the inner product $$ \langle f,g \rangle_{X_{c}^{2}(\mathbb{R}^{n}_{+})} := \int_{\mathbb R_{+}^{n}} f(x) \overline{g(x)} x^{2c-1} dx.$$ Let $c + it =:s \in \mathbb C^n.$ Then the multi-dimensional Mellin transform of any function $f \in X_{c}^{1}(\mathbb{R}^{n}_{+})$ is given by (see \cite{tuan}) $$\hat{M}[f](s) :=\int_{\mathbb{R}^{n}_{+}} f(x)\ x^{s-1}\ dx.$$ For a fixed $c \in \mathbb R^n,$ the corresponding inverse Mellin transform is defined as $$\hat{M}^{-1}[f](x) := \frac{1}{(2 \pi i)^n}\int_{c+i \mathbb{R}^{n}} \hat{M}[f](s) \ x^{-s}\ ds.$$ Riemann first employed Mellin transform in his renowned memoir on the Riemann zeta function. Later, Mamedov studied the Mellin transform and its properties in \cite{mamedeo}. The article \cite{butzer3} presents various theoretical aspects of Mellin transform along with its applications to different areas of mathematical analysis. For some significant development of the Mellin theory, we also refer to \cite{bardaro1,bardaro2,bardaro3,bardaro4}.\par \begin{defin} For $c,t \in \mathbb{R}^n$ and $T>0,$ any function $f \in X_{c}^{2}(\mathbb{R}^{n}_{+})$ is said to be Mellin band-limited to $[-T,T]^n$ if $\hat{M}[f](c+it)=0$ for all $\|t\|_{\infty}> T.$ We denote $\mathcal{B}_{T}$ the space of all Mellin band-limited functions, i.e., $$ \mathcal B_{T} = \big\{f \in X_{c}^{2}(\mathbb{R}^{n}_{+}): \hat{M}[f](c+it)=0 \ \mbox{ for \ all}\ \|t\|_{\infty} >T \big\}.$$ \begin{table}[H] \label{table1} \centering \begin{tabular}{|c|c|} \hline Notation & Remark\\ \hline $x:=\big(x(1),x(2),\dots,x(n)\big)\in \mathbb R^n$ & $x(i)\in \mathbb R$\\ \hline $\frac{x}{y}:=\Big(\frac{x(1)}{y(1)},\frac{x(2)}{y(2)},\dots,\frac{x(n)}{y(n)}\Big)$ & $x,y\in \mathbb R_{+}^n$\\ \hline $\log x:=\big(\log x(1),\log x(2),\dots,\log x(n)\big)$ & $x\in \mathbb R_{+}^n$\\ \hline $x^c:=x(1)^{c(1)}\cdot x(2)^{c(2)}\cdots x(n)^{c(n)}$ & $x\in \mathbb R_{+}^n$, $c\in \mathbb R^n$\\ \hline $\alpha^x:=\big( \alpha^{x(1)},\alpha^{x(2)},\dots,\alpha^{x(n)} \big)$ & $\alpha\in \mathbb R_{+}$, $x\in \mathbb R^n$\\ \hline $\text{sinc}\,(x):=\frac{\sin \pi x(1)}{\pi x(1)}\cdot \frac{\sin \pi x(2)}{\pi x(2)}\cdots \frac{\sin \pi x(n)}{\pi x(n)}$ & $x\in \mathbb R^n$\\ \hline $\text{lin}_{c}(x):=x^{-c}\text{sinc}\,(\log x)$ & $x\in \mathbb R_{+}^n$\\ \hline \end{tabular} \caption{Some notations used for the rest of the paper} \end{table} \end{defin} A Hilbert space $H$ of functions defined on $\Lambda$ is a reproducing kernel Hilbert space if for each $x\in \Lambda$, the point evaluation functional $f \mapsto f(x)$ is continuous, i.e., for each $x \in \Lambda,$ there exists positive constant $C_{x}$ such that $$|f(x)| \leq C_{x} \|f\| \qquad \forall\, f \in H.$$ \begin{lemma} \label{estmt} The space $\mathcal B_T$ is a reproducing kernel space. Moreover, for $x \in \mathbb{R}^{n}_{+},$ we have $$ | f(x)| \leq x^{-c} \| f\|_{X_{c}^{2}(\mathbb{R}^{n}_{+})}\ ,\ \ \forall f \in \mathcal B_T.$$ \end{lemma} \begin{proof} Let $f$ be Mellin band-limited to $[-T,T]$, and $T>0.$ Then from \cite[Theorem 4]{bardaro1}, we have \begin{equation} \label{rkp1} f(x)= T \int_{\mathbb R_{+}} f(y)\, \text{lin}_{\frac{c}{T}} \big((x/y)^T \big) \frac{dy}{y}, \qquad x\in \mathbb R_{+}. \end{equation} Using $n-$dimensional Mellin transform and inverse Mellin transform, we extend (\ref{rkp1}) for $n$-dimensional Mellin band-limited function. In particular, we have \begin{equation} \label{rkp} f(x)= T^n \int_{\mathbb R_{+}^n} f(y)\, \text{lin}_{\frac{c}{T}} \Big(\frac{x}{y} \Big)^T y^{-1}dy, \end{equation} where $\text{lin}_{\frac{c}{T}} \big(\frac{x}{y} \big)^T:=x^{-c}y^c \ \text{sinc}\,(T\log x-T\log y).$ In view of (\ref{rkp}) and H\"{o}lder's inequality, we have \begin{eqnarray*} |f(x)| & \leq & T^n x^{-c} \left( \int_{\mathbb{R}^{n}_{+}} |f(y)|^2 y^{2c-1} dy \right)^{1/2} \left( \int_{\mathbb{R}^{n}_{+}} \text{sinc}\,^2 \left(T \log(x/y) \right) y^{-1} dy \right)^{1/2} \\ & \leq & x^{-c} \|f\|_{X_{c}^{2}(\mathbb R_{+}^n)} \|\text{sinc}\,\|_{L^2(\mathbb{R}^{n})}\\ & \leq & x^{-c} \|f\|_{X_{c}^{2}(\mathbb R_{+}^n)}. \end{eqnarray*} This completes the proof. \end{proof} Now we see an example of Mellin band-limited function which is important due to its major application in approximation theory. For instance, it plays a vital role in the study of the convergence of generalized exponential sampling series, see \cite{bardaro7}. \begin{exam} \cite{bardaro7} Let $c\in \mathbb R, \alpha \geq 1,$ and $k \in \mathbb{N}$. Then the \textit{Mellin Jackson kernel} is defined as $$J_{\alpha,k}(x):= C_{\alpha,k}\ x^{-c} \text{sinc}\,^{2k} \left(\frac{\log x}{2 \alpha k \pi} \right),$$ where $x\in \mathbb R_{+}$ and $\displaystyle C^{-1}_{\alpha,k} := \int_{0}^{\infty} \text{sinc}\,^{2k} \left(\frac{\log x}{2 \alpha k \pi} \right)\frac{dx}{x}.$ Since $\hat{M}[J_{\alpha,k}](c+it)=0$ for $|t| \geq \frac{1}{\alpha},$ the function $J_{\alpha,\eta}$ is Mellin band-limited. In particular, for $k=1,$ this is well-known as \textit{Mellin Fejer kernel}, is given by $$F_{\rho}^{c}(x)= \frac{\rho}{2\pi}x^{-c}\text{sinc}\,^{2} \left( \frac{\rho \log \sqrt{x}}{\pi} \right).$$ \end{exam} \section{Main Results} \label{section3} In this section, we discuss the proposed results of the paper. We see that for each $f\in \mathcal B_{T},$ the following representation holds: $$ f(x)= \sum_{k \in \mathbb Z^n} f(e^{k/T})\,\text{lin}_{\frac{c}{T}}(e^{-k}x^{T}), \qquad x\in \mathbb R_{+}^n.$$ Now we aim to show that any $f \in \mathcal{B}_{T}$ can be approximated on $C_R$ by an element from a finite-dimensional subspace $\mathcal{B}^{N}_{T}$ of $\mathcal{B}_{T}$ where $\mathcal{B}^{N}_{T}$ is defined by $$\mathcal B_{T}^N= \Big\{ \sum_{k \in \mathbb Z^n \cap [-\frac{N}{2},\frac{N}{2}]^n} c_k\, \text{lin}_{\frac{c}{T}} \big(e^{-k} (\cdot)\big): c_k\in \mathbb R \Big\}.$$ \begin{thm} \label{fd} Let $\epsilon >0.$ Then for each $f\in \mathcal B_{T},$ there exists $f_{N} \in \mathcal B_{T}^N$ such that $$ \|f-f_{N} \|_{X_{c}^{\infty}(C_{R})} < \epsilon \|f\|_{X_c^2(\mathbb{R}^{n}_{+})},$$ whenever $ N > 4T \pi^{-2} \epsilon^{-2/n}+ 2 T \log R.$ \end{thm} \begin{proof} For any $f \in \mathcal{B}_{T}$ and $x \in C_{R},$ we have $f_N \in \mathcal{B}^{N}_{T}$ as $$f_{N}(x)= \sum_{k \in \mathbb Z^n \cap [-\frac{N}{2},\frac{N}{2}]^n} f(e^{k/T})\, \text{lin}_{\frac{c}{T}} (e^{-k}x^{T}).$$ In view of Cauchy-Schwarz inequality, we obtain \begin{align*} &x^c \big|f(x)-f_{N}(x) \big|\\ =& \bigg| x^c \sum_{k \in \mathbb{Z}^n \setminus[-\frac{N}{2},\frac{N}{2}]^n}f(e^{k/T})\, \text{lin}_{\frac{c}{T}} (e^{-k}x^{T}) \bigg| \\ =& \bigg| \sum_{k \in \mathbb{Z}^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} f(e^{k/T}) \ e^{\frac{\langle c,k \rangle_2}{T}}\, \text{sinc}\,(T \log x-k) \bigg| \\ \leq & \bigg(\sum_{k \in \mathbb{Z}^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} |f(e^{k/T})|^{2} e^{\frac{2\langle c,k \rangle_2}{T}} \bigg)^{1/2} \bigg(\sum_{k \in \mathbb{Z}^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} \text{sinc}\,^{2}(T \log x - k) \bigg)^{1/2}. \numberthis \label{est} \end{align*} In view of \cite[Theorem 4]{bardaro1}, for $j,k \in \mathbb Z^n,$ we have $$ T^n e^{\frac{-2\langle c,k \rangle_2}{T}} \big\langle \text{lin}_{\frac{c}{T}}(e^{-k} x^T),\text{lin}_{\frac{c}{T}}(e^{-j} x^T) \big\rangle_{X_c^2(\mathbb{R}^{n}_{+})} = \text{lin}_{\frac{c}{T}}(e^{j-k})= \delta_{j,k}\ ,$$ where $\delta_{j,k}=$ $ \begin{cases} 1, & \text{if } j=k,\\ 0, & \text{if } j\neq k. \end{cases} $ This gives \begin{eqnarray*} \|f\|_{X_{c}^{2}(\mathbb{R}^{n}_{+})}^{2} &=& \sum_{k \in \mathbb Z^n} \sum_{j \in \mathbb Z^n} f(e^{k/T}) \overline{f(e^{j/T})}\, \big\langle \text{lin}_{\frac{c}{T}} (e^{-k}x^{T}), \text{lin}_{\frac{c}{T}} (e^{-j}x^{T}) \big\rangle_{X_{c}^{2}(\mathbb{R}^{n}_{+})} \\ &=& \frac{1}{T^n} \sum_{k \in \mathbb Z^n} |f(e^{k/T})|^{2} e^{\frac{2\langle c,k \rangle_2}{T}}. \end{eqnarray*} Now for $\displaystyle x \in C_{R},$ we have \begin{align*} \sum_{k\in \mathbb Z^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} \text{sinc}\,^{2} \pi(T \log x - k)\leq & \pi^{-2n} \sum_{k \in \mathbb Z^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} \left(\prod_{i=1}^{n} \sin^{2}(T\log x_i-k_i) \,(T \log x_i - k_i)^{-2} \right) \\ \leq & \pi^{-2n}\int_{\mathbb R^n \setminus[-\frac{N}{2},\frac{N}{2}]^n} \left( \prod_{i=1}^{n} (T \log x_i - y_i)^{-2} \right) dy \\ \leq & \pi^{-2n}\int_{\mathbb R^n \setminus[-\frac{N}{2}+T \log R,\frac{N}{2}-T \log R]^n} y^{-2}\,dy \\ =& \frac{{4^n}}{\pi^{2n}(N-2T\log R)^n}. \end{align*} On combining these estimates, we obtain from (\ref{est}) that $$ \|f-f_{N} \|_{X_{c}^{\infty}(C_{R})} \leq \left(T^{n/2} \|f\|_{X_{c}^{2}(\mathbb{R}^{n}_{+})} \right) \left(\frac{4}{\pi^{2}(N-2T\log R)} \right)^{n/2}.$$ This implies that if $ N > 4T \pi^{-2} \epsilon^{-2/n}+ 2 T \log R,$ then $$\|f-f_{N} \|_{X_{c}^{\infty}(C_{R})} < \epsilon \|f\|_{X_c^2(\mathbb{R}^{n}_{+})}.$$ \color{black} This completes the proof. \end{proof} In order to derive the desired probability estimate for the sampling inequality \eqref{main inq} to hold, we first prove that the bounded subset of $\delta$-concentrated functions is totally bounded. \begin{lemma} \label{tb} The set $\mathcal B_{T,\delta}^{*}:= \big\{ f\in \mathcal B_{T,\delta}: \|f\|_{X_{c}^{2}(\mathbb R_{+}^n)}=1 \big\}$ is totally bounded with respect to $\|\cdot\|_{X_{c}^{\infty}(C_{R})}.$ \end{lemma} \begin{proof} It follows similar arguments as in \cite[Lemma 2.4]{jma}. By Theorem \ref{fd}, for given $\epsilon>0$ and $f \in \mathcal{B}^{*}_{T,\delta},$ there exists $f_N \in \mathcal{B}^{N}_{T}$ such that $\|f-f_N \|_{X_{c}^{\infty}(C_R)} < \frac{\epsilon}{2}.$ Let $\overline{B\left(0;\epsilon \right)}$ represents the closed ball in $\mathcal{B}^{N}_{T}$ having radius $\epsilon$ and centred at origin. Now from Lemma \ref{estmt}, we have $\| f\|_{X_{c}^{\infty}(C_{R})} \leq 1$ which implies that $f_N \in \overline{B\left(0;1+\frac{\epsilon}{2} \right)}.$ Since $\overline{B\left(0;1+\frac{\epsilon}{2} \right)}$ is totally bounded, the desired result follows. \end{proof} \begin{defin} The covering number of any bounded subset $Y$ of a Banach space $X$ with respect to $\beta$ is defined by $$\mathcal N(Y,\beta):=\min \Big\{\ell \in \mathbb{N}: \exists\ a_1,a_2,...,a_\ell \in X\ \mbox{such that}\ Y \subset \bigcup_{m=1}^{\ell} B(a_{m};\beta) \Big\},$$ where $B(a_{m};\beta)$ represents open ball of radius $\beta$ and centered at $a_m$ in $X.$ \end{defin} The following lemma provides a bound for the covering number of any closed ball in a finite-dimensional Banach space. \begin{lemma} \cite{cover} \label{cover} Let X be a Banach space of dimension $d$ and $\overline{B(0;s)}$ represents the closed ball of radius $s$ centered at the origin. Then the minimum number of open balls of radius $r$ required to cover $\overline{B(0;s)}$ is bounded by $\displaystyle \Big(\frac{4s}{r} \Big)^d.$ \end{lemma} \begin{rem} From Theorem \ref{fd} and Lemma \ref{tb}, for $f \in \mathcal{B}^{*}_{T,\delta},$ it can be seen that for $\|f-f_N \|_{X_{c}^{\infty}(C_R)} < \frac{\epsilon}{2},$ we need to choose $N$ in such a way that $$ N > \left[4T \pi^{-2} 2^{\frac{n}{2}} \epsilon^{-2/n}\right]+ 2 T \log R.$$ Particularly, if we set $\displaystyle N = 4T \pi^{-2} 2^{\frac{n}{2}} \epsilon^{-2/n} + 2 T \log R+1,$ then the dimension of $B_{N}^{T}$ will be bounded by \begin{eqnarray*} N^n &=& \left[T 2^{\frac{n}{2}+2} \pi^{-2} \epsilon^{-2/n} + 2 T \log R +1 \right]^n \\ &\leq & 2^n \left[\frac{T^n 2^{n\left(\frac{n}{2}+2 \right)}\pi^{-2n}}{\epsilon^2}+ (2 T \log R+1)^n \right]:= d_{\epsilon}. \end{eqnarray*} \color{black} Now from Lemma \ref{tb} and \ref{cover}, the bound for the covering number of $\mathcal B_{T,\delta}^{*}$ is given by \begin{equation} \label{covering} \mathcal N\left(\mathcal B_{T,\delta}^{*},\epsilon \right) \leq M \bigg(\overline{B\Big(0; 1 +\frac{\epsilon}{2} \Big)},\frac{\epsilon}{2} \bigg) \leq \exp \Big(d_{\epsilon} \ \log \Big(\frac{16}{\epsilon} \Big) \Big). \end{equation} \end{rem} \subsection{Random Sampling} In this section, we deduce the probability estimate concerning the random sampling inequality \eqref{main sampling inq}. We first define independent random variables on $\mathcal{B}_{T,\delta}$ by using random samples. The main tool to derive the probability estimate is the Bernstein inequality which utilizes the upper bounds of the random variables and their variance. \begin{lemma}[Bernstein's Inequality \cite{bern}] Let $Y_{j},$ $j=1,2,\dots,r$ be a sequence of bounded, independent random variables with $E[Y_{j}]=0,$ $Var[Y_{j}] \leq \sigma^2,$ and $\|Y_{j}\|_{\infty} \leq M$ for $j=1,2,\dots,r.$ Then \begin{equation} \label{Bernst} P \bigg(\Big|\sum_{j=1}^{r} Y_{j} \Big| \geq \lambda \bigg) \leq 2 \exp \Big(- \frac{\lambda^2}{2 r \sigma^2 + \frac{2}{3} M \lambda }\Big). \end{equation} \end{lemma} Let $S:=\big\{ x_{j}: j\in \mathbb N \big\}$ be i.i.d. random variables distributed uniformly on cube $C_{R}.$ For $f \in \mathcal{B}_{T,\delta},$ we define random variable as follows: \begin{equation} \label{random var} Z_{j}(f)= |f(x_{j})|^2 x_{j}^{2c-1} - \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1} dx. \end{equation} The set $\big\{Z_j(f)\big\}_{j \in \mathbb{N}}$ is a sequence of independent random variables. We also see that $$E\big[Z_j(f)\big]= \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} \left[ |f(y)|^2 y^{2c-1} - \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1} dx \right]dy=0$$ \begin{lemma}\label{var} Let $f,g \in \mathcal{B}_{T}$ with $\|f\|_{ X_{c}^{2}(\mathbb{R}^{n}_{+})}=\|g\|_{ X_{c}^{2}(\mathbb{R}^{n}_{+})}=1.$ Then the following estimates hold: \begin{enumerate}[label=(\roman*)] \item $Var\big[Z_{j}(f)\big] \leq \frac{R^{2n}}{(R^2 -1)^n},$ \item $\|Z_{j}(f)\|_{\infty} \leq R^{n},$ \item $\|Z_{j}(f)-Z_{j}(g)\|_{\infty} \leq 2 R^{n} \|f-g\|_{X_{c}^{\infty}(C_{R})}.$ \end{enumerate} \end{lemma} \begin{proof} Using the definition of variance and (\ref{random var}), we obtain \begin{eqnarray*} Var\big[Z_{j}(f)\big]&=& E\big[|f(x_{j})|^4 x_{j}^{4c-2}\big]- \big[E \big(|f(x_{j})|^2 x_{j}^{2c-1} \big) \big]^2 \\ & \leq & E\big[|f(x_{j})|^4 x_{j}^{4c-2}\big] \\ &=& \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} |f(x)|^4 x^{4c-2} dx \\ & \leq & \frac{R^{2n}}{(R^2 -1)^n} \|f \|^{2}_{X_{c}^{\infty}(C_{R})} \int_{C_{R}} |f(x)|^2 x^{2c-1} dx \\ &=& \frac{R^{2n}}{(R^2 -1)^n} \|f \|^{2}_{X_{c}^{\infty}(C_{R})} \|f \|^{2}_{X_{c}^{2}(C_{R})} \end{eqnarray*} Since $\|f \|^{2}_{X_{c}^{\infty}(C_{R})} \leq \|f\|^{2}_{X_{c}^{2}(\mathbb{R}^{n}_{+})},$ and $\|f\|_{ X_{c}^{2}(\mathbb{R}^{n}_{+})}=1,$ we have $$Var\big[Z_{j}(f)\big] \leq \frac{R^{2n}}{(R^2 -1)^n}.$$ Now since $f \in \mathcal{B}_{T}$ and $\|f\|_{X^{2}_{c}(\mathbb{R}^{n}_{+})}=1,$ we can write \begin{eqnarray*} \|Z_{j}(f)\|_{\infty}&=& \sup_{x_j \in S} \left||f(x_{j})|^2 x_{j}^{2c-1} - \frac{R^n}{(R^2 -1)^n} \int_{C_{R}}|f(x)|^{2} x^{2c-1} dx \right| \\ & \leq & \max \left\{R^n \|f\|_{X_{c}^{\infty}(C_{R})}^{2}, \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1} dx \right\} \\ &=& R^n \|f\|_{X_{c}^{\infty}(C_{R})}^{2} \leq R^n. \end{eqnarray*} Similarly for $(iv),$ we have \begin{align*} &\|Z_{j}(f)-Z_{j}(g)\|_{\infty}\\ =& \sup_{x_j \in S} \Big| \left( |f(x_{j})|^2 - |g(x_{j})|^2 \right) x_{j}^{2c-1}- \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} (|f(x)|^2-|g(x)|^2) \ x^{2c-1} dx \Big|\\ \leq & \max \left\{ (|f(x)|-|g(x)|)(\ |f(x)|+|g(x)|) x^{2c-1}, \ \frac{R^n}{(R^2 -1)^n} \int_{C_{R}} (|f(x)|^2 -|g(x)|^2) x^{2c-1} dx \right\} \\ \leq& 2 R^{n} \|f-g\|_{X_{c}^{\infty}(C_{R})}. \end{align*} This completes the proof. \end{proof} Using these estimates, we now proceed toward probability estimation. The following result will be helpful in this direction. \begin{thm}\label{prob est} Let $\{ x_{j}: j\in \mathbb N \}$ be the i.i.d. random variables drawn uniformly from the cube $C_{R}.$ Then the following holds \begin{multline*} P \bigg(\sup_{f \in \mathcal{B}^{*}_{T,\delta}} \Big| \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f) \Big| \geq \epsilon \bigg)\leq 2 \exp \bigg[2^n \bigg(\frac{16 T \pi^{-2} R^{2n} 2^{n\left(\frac{n}{2}+2 \right)}}{\epsilon^2}+ (2 T \log R+1)^n \bigg)\\ \log \left(\frac{64 R^n}{\epsilon} \right)- \frac{3 r \epsilon^2 (R^2-1)^n}{ 4 R^n (6 R^{n}+\epsilon(R^2-1)^n)} \bigg]. \end{multline*} \end{thm} \begin{proof} Let $\displaystyle M:=\mathcal N \left(\mathcal B_{T,\delta}^{*},\frac{\epsilon}{4 R^{n}} \right)$ be the covering number for the set $\mathcal{B}^{*}_{T,\delta}$ and $f_{1},f_{2},..., f_{M}$ be the elements of $\mathcal{B}^{*}_{T,\delta}.$ Then for any $f \in \mathcal{B}^{*}_{T,\delta},$ there exists $f_{m},\ 1 \leq m \leq M$ such that $\displaystyle \|f-f_{m}\|_{X^{\infty}_c(C_{R})} < \frac{\epsilon}{4 R^{n}}.$ From (iii) of Lemma \ref{var}, we have $$ \left|\frac{1}{r} \sum_{j=1}^{r} \left(Z_{j}(f)-Z_{j}(f_{m}) \right) \right| \leq 2 R^{n} \|f-f_{m}\|_{X^{\infty}_c(C_{R})} \leq \frac{\epsilon}{2}.$$ In view of (\ref{Bernst}), for given $f_m,\ 1 \leq m \leq M,$ we have $$ P \bigg( \bigg| \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f_{m}) \bigg| \geq \frac{\epsilon}{2} \bigg) \leq 2 \exp \left(- \frac{3 r \epsilon^2 (R^2-1)^n}{ 4 R^n (6 R^{n}+\epsilon(R^2-1)^n)} \right).$$ For fixed $m,$ we have \begin{eqnarray*} P \bigg( \sup_{ \left\{f: \|f-f_{m}\|_{X^{\infty}_c(C_{R})} \leq \frac{\epsilon}{4 R^{n}}\right\} } \bigg|\frac{1}{r} \sum_{j=1}^{r} Z_{j}(f) \bigg| \geq \epsilon \bigg) & \leq & P \bigg( \bigg| \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f_{m}) \bigg|\geq \frac{\epsilon}{2} \bigg) \\ & \leq & 2 \exp \left(-\frac{3 r \epsilon^2 (R^2-1)^n}{ 4 R^n (6 R^{n}+\epsilon(R^2-1)^n)} \right). \end{eqnarray*} Since $M$ is the covering number for $\mathcal{B}^{*}_{T,\delta},$ i.e., $$\mathcal{B}^{*}_{T,\delta} \subset \bigcup_{m=1}^{M} \left\{ f: \|f-f_{m}\|_{X^{\infty}_c(C_{R})} \leq \frac{\epsilon}{4 R^{n}} \right\},$$ we obtain \begin{eqnarray*} P \bigg(\sup_{f \in \mathcal{B}^{*}_{T,\delta}} \Big| \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f) \Big| \geq \epsilon \bigg) & \leq & \sum_{m=1}^{M} P \bigg( \sup_{ \left\{f: \|f-f_{m}\|_{X^{\infty}_c(C_{R})} \leq \frac{\epsilon}{4 R^{n}} \right\}} \left|\frac{1}{r}\sum_{j=1}^{r} Z_{j}(f) \right| \geq \epsilon \bigg) \\ & \leq & 2 M \exp \bigg(-\frac{3 r \epsilon^2 (R^2-1)^n}{ 4 R^n (6 R^{n}+\epsilon(R^2-1)^n)} \bigg). \end{eqnarray*} Using \eqref{covering}, we finally get \begin{multline*} P \bigg(\sup_{f \in \mathcal{B}^{*}_{T,\delta}} \Big| \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f) \Big| \geq \epsilon \bigg)\leq 2 \exp \bigg[2^n \bigg(\frac{16 T^n \pi^{-2n} R^{2n} 2^{n\left(\frac{n}{2}+2 \right)}}{\epsilon^2}+ (2 T \log R+1)^n \bigg)\\ \log \left(\frac{64 R^n}{\epsilon} \right)- \frac{3 r \epsilon^2 (R^2-1)^n}{ 4 R^n (6 R^{n}+\epsilon(R^2-1)^n)} \bigg]. \end{multline*} \end{proof} We now prove the following main result of the paper. \begin{thm} \label{main} Let $\{ x_j: j\in \mathbb N \}$ be the independent and identically distributed random variables that are uniformly distributed over $C_R.$ For $0< \mu < 1-\delta,$ the following inequality \begin{equation} \label{main sampling inq} \frac{R^{n-1}(1-\delta - \mu)}{(R^2-1)^n} \|f\|_{X_c^{2}(\mathbb{R}^{n}_{+})}^2 \leq \frac{1}{r} \sum_{j=1}^{r} |f(x_{j})|^2 x_{j}^{2c} \leq \frac{R^{n+1} (1+\mu)}{(R^2-1)^n} \|f\|_{X_c^{2}(\mathbb{R}^{n}_{+})}^2, \end{equation} holds for every $f \in \mathcal B_{T,\delta}$ with probability at least $\displaystyle 1- 2 \beta\exp \Big(-\frac{3r\mu^2}{4 (R^2-1)^n (6+\mu)} \Big),$ where $\displaystyle \beta:=\exp\bigg( 2^n \Big(\frac{16 T^n \pi^{-2n} (R^2-1)^{2n} \ 2^{n\left(\frac{n}{2}+2 \right)}}{\mu^2}+ (2 T \log R+1)^n \Big) \log \Big(\frac{64 (R^2-1)^n}{\mu} \Big) \bigg).$ \end{thm} \begin{proof} Let $\{ x_j:j\in J\}$ be the uniformly distributed samples taken from $C_R$ and define the following event $$ \mathcal{E}= \left \{\sup_{f \in \mathcal{B}^{*}_{T,\delta}} \left | \frac{1}{r} \sum_{j=1}^{r} Z_{j}(f) \right| \leq \frac{\mu R^n}{(R^2 -1)^n} \right \}.$$ The event $\mathcal{E}$ is equivalent to the event $$ \bigg| \sum_{j=1}^{r} |f(x_{j})|^2 x_{j}^{2c-1} - \frac{r R^n}{(R^2 -1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1} dx \bigg| \leq \frac{r \mu R^n}{(R^2 -1)^n}, \qquad f\in \mathcal B_{T,\delta}^{*}.$$ This implies that for all $f\in \mathcal B_{T,\delta}^{*}$ \begin{align*} \frac{rR^n}{(R^2-1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1}\,dx - \frac{r \mu R^n}{(R^2-1)^n} &\leq \sum_{j=1}^{r} |f(x_{j})|^2 x_{j}^{2c-1} \\ &\leq \frac{rR^n}{(R^2-1)^n} \int_{C_{R}} |f(x)|^2 x^{2c-1}\,dx + \frac{r \mu R^n}{(R^2-1)^n}\\ \frac{R^{n-1}(1-\delta - \mu)}{(R^2-1)^n} \leq \frac{1}{r} \sum_{j=1}^{r} |f(x_{j})|^2 x_{j}^{2c} &\leq \frac{R^{n+1}}{(R^2-1)^n}(1+\mu). \end{align*} It can be observed that the event $\mathcal{E}$ leads to the desired sampling inequality \eqref{main sampling inq}. From Theorem \ref{prob est}, the probability that the random samples $\{ x_{j}: j=1,2,\dots,r \}$ satisfy the above sampling inequality, is given by \begin{eqnarray*} P(\mathcal{E})&=& 1-P(\mathcal{E}^c)\\ & \geq & 1-2 \exp \left(-r \alpha+ \beta \right) \end{eqnarray*} where $\displaystyle \alpha:= \frac{3 \mu^2}{4 (R^2-1)^n (6+\mu)},\ $ and \\ $\displaystyle \beta:= 2^n \bigg(\frac{16 \pi^{-2n} T^n (R^2-1)^{2n} \ 2^{n\left(\frac{n}{2}+2 \right)}}{\mu^2}+ (2 T \log R+1)^n \bigg) \log \bigg(\frac{64 (R^2-1)^n}{\mu} \bigg).$ \end{proof} \begin{rem} From the above estimate of $P(\mathcal{E}),$ it is easy to see that the probability approaches to $1$ for sufficiently large sample size $r.$ Moreover, the sampling inequality \eqref{main sampling inq} is true for large value of $r$ such that $r > \frac{\beta}{\alpha}.$ \end{rem} \section*{Acknowledgment} S. Bajpeyi and S. Sivananthan gratefully thank financial assistance from the Department of Science and Technology, Government of India, via project no. CRG/2019/002412. \end{document}
\begin{document} \begin{abstract} {\small This article introduces new algorithms for the uniform random generation of labelled planar graphs. Its principles rely on Boltzmann samplers, as recently developed by Duchon, Flajolet, Louchard, and Schaeffer. It combines the Boltzmann framework, a suitable use of rejection, a new combinatorial bijection found by Fusy, Poulalhon and Schaeffer, as well as a precise analytic description of the generating functions counting planar graphs, which was recently obtained by Gim\'enez and Noy. This gives rise to an extremely efficient algorithm for the random generation of planar graphs. There is a preprocessing step of some fixed small cost; and the expected time complexity of generation is quadratic for exact-size uniform sampling and linear for approximate-size sampling. This greatly improves on the best previously known time complexity for exact-size uniform sampling of planar graphs with $n$ vertices, which was a little over $O(n^7)$. \emph{This is the extended and revised journal version of a conference paper with the title ``Quadratic exact-size and linear approximate-size random generation of planar graphs'', which appeared in the Proceedings of the International Conference on Analysis of Algorithms (AofA'05), 6-10 June 2005, Barcelona.}} \end{abstract} \maketitle \section{Introduction} \label{sec:intro} A graph is said to be planar if it can be embedded in the plane so that no two edges cross each other. In this article, we consider planar graphs that are \emph{labelled}, i.e., the $n$ vertices bear distinct labels in $[1..n]$, and \emph{simple}, i.e., with no loop nor multiple edges. Statistical properties of planar graphs have been intensively studied~\cite{BGHPS04,Ge,gimeneznoy}. Very recently, Gim\'enez and Noy~\cite{gimeneznoy} have solved \emph{exactly} the difficult problem of the asymptotic enumeration of labelled planar graphs. They also provide exact analytic expressions for the asymptotic probability distribution of parameters such as the number of edges and the number of connected components. However many other statistics on random planar graphs remain analytically and combinatorially intractable. Thus, it is an important issue to design efficient random samplers in order to observe the (asymptotic) behaviour of such parameters on random planar graphs. Moreover, random generation is useful to test the correctness and efficiency of algorithms on planar graphs, such as planarity testing, embedding algorithms, procedures for finding geometric cuts, and so on. Denise, Vasconcellos, and Welsh have proposed a first algorithm for the random generation of planar graphs~\cite{alain96random}, by defining a Markov chain on the set $\mathcal{G}_n$ of labelled planar graphs with $n$ vertices. At each step, two different vertices $v$ and $v'$ are chosen at random. If they are adjacent, the edge $(v,v')$ is deleted. If they are not adjacent and if the operation of adding $(v,v')$ does not break planarity, then the edge $(v,v')$ is added. By symmetry of the transition matrix of the Markov chain, the probability distribution converges to the uniform distribution on $\mathcal{G}_n$. This algorithm is very easy to describe but more difficult to implement, as there exists no simple linear-time planarity testing algorithm. More importantly, the rate of convergence to the uniform distribution is unknown. A second approach for uniform random generation is the \emph{recursive method} introduced by Nijenhuis and Wilf~\cite{NiWi79} and formalised by Flajolet, Van Cutsem and Zimmermann~\cite{FlZiVa94}. The recursive method is a general framework for the random generation of combinatorial classes admitting a recursive decomposition. For such classes, producing an object of the class uniformly at random boils down to producing the \emph{decomposition tree} corresponding to its recursive decomposition. Then, the branching probabilities that produce the decomposition tree with suitable (uniform) probability are computed using the \emph{coefficients} counting the objects involved in the decomposition. As a consequence, this method requires a preprocessing step where large tables of large coefficients are calculated using the recursive relations they satisfy. \begin{figure} \caption{Complexities of the random samplers of planar graphs ($O^{*} \label{table:compar} \end{figure} Bodirsky \emph{et al.} have described in~\cite{bodirsky} the first polynomial-time random sampler for planar graphs. Their idea is to apply the recursive method of sampling to a well known combinatorial decomposition of planar graphs according to successive levels of connectivity, which has been formalised by Tutte~\cite{Tut}. Precisely, the decomposition yields some recurrences satisfied by the coefficients counting planar graphs as well as subfamilies (connected, 2-connected, 3-connected), which in turn yield an explicit recursive way to generate planar graphs uniformly at random. As the recurrences are rather involved, the complexity of the preprocessing step is large. Precisely, in order to draw planar graphs with $n$ vertices (and possibly also a fixed number $m$ of edges), the random generator described in~\cite{bodirsky} requires a preprocessing time of order $O\left( n^7 (\log n)^2(\log \log n ) \right) $ and an auxiliary memory of size $O( n^5 \log n)$. Once the tables have been computed, the complexity of each generation is $O(n^3)$. A more recent optimisation of the recursive method by Denise and Zimmermann~\cite{denise99uniform} ---based on controlled real arithmetics--- should be applicable; it would improve the time complexity somewhat, but the storage complexity would still be large. In this article, we introduce a new random generator for labelled planar graphs, which relies on the same decomposition of planar graphs as the algorithm of Bodirsky \emph{et al}. The main difference is that we translate this decomposition into a random generator using the framework of Boltzmann samplers, instead of the recursive method. Boltzmann samplers have been recently developed by Duchon, Flajolet, Louchard, and Schaeffer in~\cite{DuFlLoSc04} as a powerful framework for the random generation of decomposable combinatorial structures. The idea of Boltzmann sampling is to gain efficiency by relaxing the constraint of exact-size sampling. As we will see, the gain is particularly significant in the case of planar graphs, where the decomposition is more involved than for classical classes, such as trees. Given a combinatorial class, a \emph{Boltzmann sampler} draws an object of size $n$ with probability proportional to $x^n$ (or proportional to $x^n/n!$ for labelled objects), where $x$ is a certain \emph{real} parameter that can be appropriately tuned. Accordingly, the probability distribution is spread over all the objects of the class, with the property that objects of the same size have the same probability of occurring. In particular, the probability distribution is uniform when restricted to a fixed size. Like the recursive method, Boltzmann samplers can be designed for any combinatorial class admitting a recursive decomposition, as there are explicit sampling rules associated with each classical construction (Sum, Product, Set, Substitution). The branching probabilities used to produce the decomposition tree of a random object are not based on the \emph{coefficients} as in the recursive method, but on the \emph{values} at $x$ of the generating functions of the classes intervening in the decomposition. In this article, we translate the decomposition of planar graphs into Boltzmann samplers and obtain very efficient random generators that produce planar graphs with a fixed number of vertices or with fixed numbers of vertices and edges uniformly at random. Furthermore, our samplers have an approximate-size version where a small tolerance, say a few percents, is allowed for the size of the output. For practical purpose, approximate-size random sampling often suffices. The approximate-size samplers we propose are very efficient as they have \emph{linear time complexity}. \begin{theorem}[Samplers with respect to number of vertices] \label{theo:planarsamp1} Let $n\in \mathbf{N}$ be a target size. An \emph{exact-size} sampler $\frak{A}_n$ can be designed so as to generate labelled planar graphs with $n$ vertices uniformly at random. For any tolerance ratio $\epsilon>0$, an \emph{approximate-size} sampler $\frak{A}_{n,\epsilon}$ can be designed so as to generate planar graphs with their number of vertices in $[n(1-\epsilon),n(1+\epsilon)]$, and following the uniform distribution for each size $k\in [n(1-\epsilon),n(1+\epsilon)]$. Under a real-arithmetics complexity model, Algorithm $\frak{A}_n$ is of expected complexity $O(n^2)$, and Algorithm $\frak{A}_{n,\epsilon}$ is of expected complexity $O(n/\epsilon)$. \end{theorem} \begin{theorem}[Samplers with respect to the numbers of vertices and edges] \label{theo:planarsamp2} Let $n\in \mathbf{N}$ be a target size and $\mu\in(1,3)$ be a parameter describing the ratio edges-vertices. An \emph{exact-size} sampler $\overline{\frak{A}}_{n,\mu}$ can be designed so as to generate planar graphs with $n$ vertices and $\lfloor \mu n\rfloor$ edges uniformly at random. For any tolerance-ratio $\epsilon>0$, an \emph{approximate-size} sampler $\overline{\frak{A}}_{n,\mu,\epsilon}$ can be designed so as to generate planar graphs with their number of vertices in $[n(1-\epsilon),n(1+\epsilon)]$ and their ratio edges/vertices in $[\mu (1-\epsilon),\mu (1+\epsilon)]$, and following the uniform distribution for each fixed pair (number of vertices, number of edges). Under a real-arithmetics complexity model, for a fixed $\mu\in(1,3)$, Algorithm $\overline{\frak{A}}_{n,\mu}$ is of expected complexity $O_{\mu}(n^{5/2})$. For fixed constants $\mu\in(1,3)$ and $\epsilon>0$, Algorithm $\overline{\frak{A}}_{n,\mu,\epsilon}$ is of expected complexity $O_{\mu}(n/\epsilon)$ (the bounding constants depend on $\mu$). \end{theorem} \noindent The samplers are completely described in Section~\ref{sec:sample_vertices} and Section~\ref{sec:sample_edges}. The expected complexities will be proved in Section~\ref{sec:complexity}. For the sake of simplicity, we give big $O$ bounds that might depend on $\mu$ and we do not care about quantifying the constant in the big $O$ in a precise way. However we strongly believe that a more careful analysis would allow us to have a uniform bounding constant (over $\mu\in(1,3)$) of reasonable magnitude. This means that not only the theoretical complexity is good but also the practical one. (As we review in Section~\ref{sec:implement}, we have implemented the algorithm, which easily draws graphs of sizes in the range of $10^5$.) \emph{Complexity model.} Let us comment on the model we adopt to state the complexities of the random samplers. We assume here that we are given an \emph{oracle}, which provides at unit cost the exact evaluations of the generating functions intervening in the decomposition of planar graphs. (For planar graphs, these generating functions are those of families of planar graphs of different connectivity degrees and pointed in different ways.) This assumption, called the ``oracle assumption", is by now classical to analyse the complexity of Boltzmann samplers, see~\cite{DuFlLoSc04} for a more detailed discussion; it allows us to separate the \emph{combinatorial complexity} of the samplers from the complexity of \emph{evaluating} the generating functions, which resorts to computer algebra and is a research project on its own. Once the oracle assumption is done, the scenario of generation of a Boltzmann sampler is typically similar to a branching process; the generation follows a sequence of \emph{random choices} ---typically coin flips biased by some generating function values--- that determine the shape of the object to be drawn. According to these choices, the object (in this article, a planar graph) is built effectively by a sequence of primitive operations such as vertex creation, edge creation, merging two graphs at a common vertex... The \emph{combinatorial complexity} is precisely defined as the sum of the number of coin flips and the number of primitive operations performed to build the object. The (combinatorial) complexity of our algorithm is compared to the complexities of the two preceding random samplers in Figure~\ref{table:compar}. Let us now comment on the preprocessing complexity. The implementation of $\frak{A}_{n,\epsilon}$ and $\frak{A}_n$, as well as $\overline{\frak{A}}_{n,\mu,\epsilon}$ and $\overline{\frak{A}}_{n,\mu}$, requires the storage of a fixed number of real constants, which are special values of generating functions. The generating functions to be evaluated are those of several families of planar graphs (connected, 2-connected, 3-connected). A crucial result, recently established by Gim\'enez and Noy~\cite{gimeneznoy}, is that there exist exact analytic equations satisfied by these generating functions. Hence, their numerical evaluation can be performed efficiently with the help of a computer algebra system; the complexity we have observed in practice (doing the computations with Maple) is of low polynomial degree $k$ in the number of digits that need to be computed. (However, there is not yet a complete rigorous proof of the fact, as the Boltzmann parameter has to approach the singularity in order to draw planar graphs of large size.) To draw objects of size $n$, the precision needed to make the probability of failure small is typically of order $\log(n)$ digits\footnote{Notice that it is possible to achieve perfect uniformity by calling adaptive precision routines in case of failure, see Denise and Zimmermann~\cite{denise99uniform} for a detailed discussion on similar problems.}. Thus the preprocessing step to evaluate the generating functions with a precision of $\log(n)$ digits has a complexity of order $\log(n)^k$ (again, this is yet to be proved rigorously). The following informal statement summarizes the discussion; making a theorem of it is the subject of ongoing research (see the recent article~\cite{PiSaSo07}): \noindent{\bf Fact.} \emph{With high probability, the auxiliary memory necessary to generate planar graphs of size $n$ is of order $O(\log(n))$ and the preprocessing time complexity is of order $O(\log(n)^k)$ for some low integer $k$.} \emph{Implementation and experimental results.} We have completely implemented the random samplers stated in Theorem~\ref{theo:planarsamp1} and Theorem~\ref{theo:planarsamp2}. Details are given in Section~\ref{sec:implement}, as well as experimental results. Precisely, the evaluations of the generating functions of planar graphs have been carried out with the computer algebra system Maple, based on the analytic expressions given by Gim\'enez and Noy~\cite{gimeneznoy}. Then, the random generator has been implemented in Java, with a precision of 64 bits for the values of generating functions (``double'' type). Using the approximate-size sampler, planar graphs with size of order 100,000 are generated in a few seconds with a machine clocked at 1GHz. In contrast, the recursive method of Bodirsky \emph{et al} is currently limited to sizes of about 100. Having the random generator implemented, we have performed some simulations in order to observe typical properties of random planar graphs. In particular we have observed a sharp concentration for the proportion of vertices of a given degree $k$ in a random planar graph of large size. \section{Overview} The algorithm we describe relies mainly on two ingredients. The first one is a recent correspondence, called the closure-mapping, between binary trees and (edge-rooted) 3-connected planar graphs~\cite{FuPoSc05}, which makes it possible to obtain a Boltzmann sampler for 3-connected planar graphs. The second one is a decomposition formalised by Tutte~\cite{Tut}, which ensures that any planar graph can be decomposed into 3-connected components, via connected and 2-connected components. Taking advantage of Tutte's decomposition, we explain in Section~\ref{sec:decomp} how to specify a Boltzmann sampler for planar graphs, denoted $\Gamma\mathcal{G}(x,y)$, from the Boltzmann sampler for 3-connected planar graphs. To do this, we have to extend the collection of constructions for Boltzmann samplers, as detailed in~\cite{DuFlLoSc04}, and develop new rejection techniques so as to suitably handle the rooting/unrooting operations that appear alongside Tutte's decomposition. Even if the Boltzmann sampler $\Gamma\mathcal{G}(x,y)$ already yields a polynomial-time uniform random sampler for planar graphs, the expected time complexity to generate a graph of size $n$ ($n$ vertices) is not good, due to the fact that the size distribution of $\Gamma \mathcal{G}(x,y)$ is too concentrated on objects of small size. To improve the size distribution, we \emph{point} the objects, in a way inspired by~\cite{DuFlLoSc04}, which corresponds to a \emph{derivation} (differentiation) of the associated generating function. The precise singularity analysis of the generating functions of planar graphs, which has been recently done in~\cite{gimeneznoy}, indicates that we have to take the second derivative of planar graphs in order to get a good size distribution. In Section~\ref{sec:efficient}, we explain how the derivation operator can be injected in the decomposition of planar graphs. This yields a Boltzmann sampler $\Gamma \mathcal{G}''(x,y)$ for ``bi-derived'' planar graphs. Our random generators for planar graphs are finally obtained as \emph{targetted samplers}, which call $\Gamma \mathcal{G}''(x,y)$ (with suitably tuned values of $x$ and $y$) until the generated graph has the desired size. The time complexity of the targetted samplers is analysed in Section~\ref{sec:complexity}. This eventually yields the complexity results stated in Theorems~\ref{theo:planarsamp1} and ~\ref{theo:planarsamp2}. The general scheme of the planar graph generator is shown in Figure~\ref{fig:relations}. \begin{figure} \caption{The chain of constructions from binary trees to planar graphs.} \label{fig:relations} \end{figure} \section{Boltzmann samplers} \label{sec:bolz} In this section, we define Boltzmann samplers and describe the main properties which we will need to handle planar graphs. In particular, we have to extend the framework to the case of \emph{mixed classes}, meaning that the objects have two types of atoms. Indeed the decomposition of planar graphs involves both (labelled) vertices and (unlabelled) edges. The constructions needed to formulate the decomposition of planar graphs are classical ones in combinatorics: Sum, Product, Set, Substitutions~\cite{BeLaLe,fla}. In Section~\ref{sec:rule}, for each of the constructions, we describe a \emph{sampling rule}, so that Boltzmann samplers can be assembled for any class that admits a decomposition in terms of these constructions. Moreover, the decomposition of planar graphs involves rooting/unrooting operations, which makes it necessary to develop new rejection techniques, as described in Section~\ref{sec:reject}. \subsection{Definitions} \label{sec:bolzdef} A combinatorial class $\mathcal{C}$ is a family of labelled objects (structures), that is, each object is made of $n$ atoms that bear distinct labels in $[1..n]$. In addition, the number of objects in any fixed size $n$ is finite; and any structure obtained by relabelling a structure in $\mathcal{C}$ is also in $\mathcal{C}$. The \emph{exponential} generating function of $\mathcal{C}$ is defined as $$C(x):=\sum_{\gamma\in\mathcal{C}}\frac{x^{|\gamma|}}{|\gamma|!},$$ where $|\gamma|$ is the size of an object $\gamma\in\mathcal{C}$ (e.g., the number of vertices of a graph). The radius of convergence of $C(x)$ is denoted by $\rho$. A positive value $x$ is called \emph{admissible} if $x\in(0,\rho)$ (hence the sum defining $C(x)$ converges if $x$ is admissible). Boltzmann samplers, as introduced and developed by Duchon \emph{et al.} in~\cite{DuFlLoSc04}, constitute a general and efficient framework to produce a random generator for any \emph{decomposable} combinatorial class $\mathcal{C}$. Instead of fixing a particular size for the random generation, objects are drawn under a probability distribution spread over the whole class. Precisely, given an admissible value for $C(x)$, the Boltzmann distribution assigns to each object of $\mathcal{C}$ a weight $$\mathbf{P}_x(\gamma)=\frac{x^{|\gamma|}}{|\gamma|!C(x)}\, .$$ Notice that the distribution is uniform, i.e., two objects with the same size have the same probability to be chosen. A \emph{Boltzmann sampler} for the labelled class $\mathcal{C}$ is a procedure $\Gamma \mathcal{C}(x)$ that, for each fixed admissible $x$, draws objects of $\mathcal{C}$ at random under the distribution $\mathbf{P}_x$. The authors of~\cite{DuFlLoSc04} give sampling rules associated to classical combinatorial constructions, such as Sum, Product, and Set. (For the unlabelled setting, we refer to the more recent article~\cite{FlFuPi07}, and to~\cite{BoFuPi06} for the specific case of plane partitions.) In order to translate the combinatorial decomposition of planar graphs into a Boltzmann sampler, we need to extend the framework of Boltzmann samplers to the bivariate case of \emph{mixed} combinatorial classes. A mixed class $\mathcal{C}$ is a labelled combinatorial class where one takes into account a second type of atoms, which are unlabelled. Precisely, an object in $\mathcal{C}=\cup_{n,m}\mathcal{C}_{n,m}$ has $n$ ``labelled atoms'' and $m$ ``unlabelled atoms'', e.g., a graph has $n$ labelled vertices and $m$ unlabelled edges. The labelled atoms are shortly called L-atoms, and the unlabelled atoms are shortly called U-atoms. For $\gamma\in\mathcal{C}$, we write $|\gamma|$ for the number of L-atoms of $\gamma$, called the \emph{L-size} of $\gamma$, and $||\gamma||$ for the number of U-atoms of $\gamma$, called the \emph{U-size} of $\gamma$. The associated generating function $C(x,y)$ is defined as $$C(x,y):=\sum_{\gamma\in\mathcal{C}}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}.$$ For a fixed real value $y>0$, we denote by $\rho_C(y)$ the radius of convergence of the function $x\mapsto C(x,y)$. A pair $(x,y)$ is said to be \emph{admissible} if $x\in (0,\rho_C(y))$, which implies that $\sum_{\gamma\in\mathcal{C}}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$ converges and that $C(x,y)$ is well defined. Given an admissible pair $(x,y)$, the \emph{mixed Boltzmann distribution} is the probability distribution $\mathbf{P}_{x,y}$ assigning to each object $\gamma\in\mathcal{C}$ the probability $$\mathbf{P}_{x,y}(\gamma)=\frac{1}{C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}.$$ An important property of this distribution is that two objects with the same size-parameters have the same probability of occurring. A \emph{mixed Boltzmann sampler} at $(x,y)$ ---shortly called Boltzmann sampler hereafter--- is a procedure $\Gamma \mathcal{C}(x,y)$ that draws objects of $\mathcal{C}$ at random under the distribution $\mathbf{P}_{x,y}$. Notice that the specialization $y=1$ yields a classical Boltzmann sampler for $\mathcal{C}$. \subsection{Basic classes and constructions} \label{sec:rule} We describe here a collection of basic classes and constructions that are used thereafter to formulate a decomposition for the family of planar graphs. The basic classes we consider are: \begin{itemize} \item The 1-class, made of a unique object of size 0 (both the L-size and the U-size are equal to 0), called the 0-atom. The corresponding mixed generating function is $C(x,y)=1$. \item The L-unit class, made of a unique object that is an L-atom; the corresponding mixed generating function is $C(x,y)=x$. \item The U-unit class, made of a unique object that is a U-atom; the corresponding mixed generating function is $C(x,y)=y$. \end{itemize} Let us now describe the five constructions that are used to decompose planar graphs. In particular, we need two specific substitution constructions, one at labelled atoms that is called L-substitution, the other at unlabelled atoms that is called U-substitution. \noindent{\bf Sum.} The sum $\mathcal{C}:=\mathcal{A}+\mathcal{B}$ of two classes is meant as a \emph{disjoint union}, i.e., it is the union of two distinct copies of $\mathcal{A}$ and $\mathcal{B}$. The generating function of $\mathcal{C}$ satisfies $$ C(x,y)=A(x,y)+B(x,y). $$ \noindent{\bf Product.} The partitional product of two classes $\mathcal{A}$ and $\mathcal{B}$ is the class $\mathcal{C}:=\mathcal{A}\star\mathcal{B}$ of objects that are obtained by taking a pair $\gamma=(\gamma_1\in\mathcal{A},\gamma_2\in\mathcal{B})$ and relabelling the L-atoms so that $\gamma$ bears distinct labels in $[1..|\gamma|]$. The generating function of $\mathcal{C}$ satisfies $$ C(x,y)=A(x,y)\cdot B(x,y). $$ \noindent{$\mathbf{Set_{\geq d}}$.} For $d\geq 0$ and a class $\mathcal{B}$ having no object of size 0, any object in $\mathcal{C}:=\Set_{\geq d}(\mathcal{B})$ is a finite set of at least $d$ objects of $\mathcal{B}$, relabelled so that the atoms of $\gamma$ bear distinct labels in $[1\,.\,.\,|\gamma|]$. For $d=0$, this corresponds to the classical construction $\Set$. The generating function of $\mathcal{C}$ satisfies $$ C(x,y)=\exp_{\geq d}(B(x,y)),\ \ \ \mathrm{where}\ \exp_{\geq d}(z):=\sum_{k\geq d}\frac{z^k}{k!}. $$ \noindent{\bf L-substitution.} Given $\mathcal{A}$ and $\mathcal{B}$ two classes such that $\mathcal{B}$ has no object of size $0$, the class $\mathcal{C}=\mathcal{A}\circ_L\mathcal{B}$ is the class of objects that are obtained as follows: take an object $\rho\in\mathcal{A}$ called the \emph{core-object}, substitute each L-atom $v$ of $\rho$ by an object $\gamma_v\in\mathcal{B}$, and relabel the L-atoms of $\cup_{v}\gamma_v$ with distinct labels from $1$ to $\sum_v |\gamma_v|$. The generating function of $\mathcal{C}$ satisfies $$ C(x,y)=A(B(x,y),y). $$ \noindent{\bf U-substitution.} Given $\mathcal{A}$ and $\mathcal{B}$ two classes such that $\mathcal{B}$ has no object of size $0$, the class $\mathcal{C}=\mathcal{A}\circ_U\mathcal{B}$ is the class of objects that are obtained as follows: take an object $\rho\in\mathcal{A}$ called the \emph{core-object}, substitute each U-atom $e$ of $\rho$ by an object $\gamma_e\in\mathcal{B}$, and relabel the L-atoms of $\rho\cup\left(\cup_{e}\gamma_e\right)$ with distinct labels from $1$ to $|\rho|+\sum_e |\gamma_e|$. We assume here that the U-atoms of an object of $\mathcal{A}$ are \emph{distinguishable}. In particular, this property is satisfied if $\mathcal{A}$ is a family of labelled graphs with no multiple edges, since two different edges are distinguished by the labels of their extremities. The generating function of $\mathcal{C}$ satisfies $$ C(x,y)=A(x,B(x,y)). $$ \begin{figure} \caption{The sampling rules associated with the basic classes and the constructions. For each rule involving partitional products, there is a relabelling step performed by an auxiliary procedure \textsc{DistributeLabels} \label{table:rules} \end{figure} \subsection{Sampling rules} \label{sec:rules_boltzma} A nice feature of Boltzmann samplers is that the basic combinatorial constructions (Sum, Product, Set) give rise to simple rules for assembling the associated Boltzmann samplers. To describe these rules, we assume that the exact values of the generating functions at a given admissible pair $(x,y)$ are known. We will also need two well-known probability distributions. \begin{itemize} \item A random variable follows a \emph{Bernoulli law} of parameter $p\in (0,1)$ if it is equal to 1 (or true) with probability $p$ and equal to 0 (or false) with probability $1-p$. \item Given $\lambda\in\mathbb{R}_{+}$ and $d\in\mathbb{Z}_{+}$, the \emph{conditioned Poisson law} $\Pois_{\geq d}(\lambda)$ is the probability distribution on $\mathbf{Z}_{\geq d}$ defined as follows: $$ \mathbb{P}(k)=\frac{1}{\exp_{\geq d}(\lambda)}\frac{\lambda^k}{k!},\ \mathrm{where}\ \exp_{\geq d}(z):=\sum_{k\geq d}\frac{z^k}{k!}. $$ For $d=0$, this corresponds to the classical Poisson law, abbreviated as $\Pois$. \end{itemize} Starting from combinatorial classes $\mathcal{A}$ and $\mathcal{B}$ endowed with Boltzmann samplers $\Gamma \mathcal{A}(x,y)$ and $\Gamma \mathcal{B}(x,y)$, Figure~\ref{table:rules} describes how to assemble a sampler for a class $\mathcal{C}$ obtained from $\mathcal{A}$ and $\mathcal{B}$ (or from $\mathcal{A}$ alone for the construction $\Set_{\geq d}$) using the five constructions described in this section. \begin{proposition} \label{prop:rules} Let $\mathcal{A}$ and $\mathcal{B}$ be two mixed combinatorial classes endowed with Boltzmann samplers $\Gamma \mathcal{A}(x,y)$ and $\Gamma \mathcal{B}(x,y)$. For each of the five constructions $\{+$, $\star$, $\Set_{\geq d}$, L-subs, U-subs$\}$, the sampler $\Gamma \mathcal{C}(x,y)$, as specified in Figure~\ref{table:rules}, is a valid Boltzmann sampler for the combinatorial class $\mathcal{C}$. \end{proposition} \begin{proof} 1) \emph{Sum:} $\mathcal{C}=\mathcal{A}+\mathcal{B}$. An object of $\mathcal{A}$ has probability $\frac{1}{A(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$ (by definition of $\Gamma \mathcal{A}(x,y)$) multiplied by $\frac{A(x,y)}{C(x,y)}$ (because of the Bernoulli choice) of being drawn by $\Gamma \mathcal{C}(x,y)$. Hence, it has probability $\frac{1}{C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$ of being drawn. Similarly, an object of $\mathcal{B}$ has probability $\frac{1}{C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$ of being drawn. Hence $\Gamma \mathcal{C}(x,y)$ is a valid Boltzmann sampler for $\mathcal{C}$. \noindent 2) \emph{Product:} $\mathcal{C}=\mathcal{A}\star\mathcal{B}$. Define a \emph{generation scenario} as a pair $(\gamma_1\in\mathcal{A},\gamma_2\in\mathcal{B})$, together with a function $\sigma$ that assigns to each L-atom in $\gamma_1\cup\gamma_2$ a label $i\in[1..|\gamma_1|+|\gamma_2|]$ in a bijective way. By definition, $\Gamma \mathcal{C}(x,y)$ draws a generation scenario and returns the object $\gamma\in\mathcal{A}\star\mathcal{B}$ obtained by keeping the secondary labels (the ones given by \textsc{DistributeLabels}). Each generation scenario has probability $$\left(\frac{1}{A(x,y)}\frac{x^{|\gamma_1|}}{|\gamma_1|!}y^{||\gamma_1||}\right)\left(\frac{1}{B(x,y)}\frac{x^{|\gamma_2|}}{|\gamma_2|!}y^{||\gamma_2||}\right)\frac{1}{(|\gamma_1|+|\gamma_2|)!}$$ of being drawn, the three factors corresponding respectively to $\Gamma \mathcal{A}(x,y)$, $\Gamma \mathcal{B}(x,y)$, and \textsc{DistributeLabels}($\gamma$). Observe that this probability has the more compact form $$ \frac{1}{|\gamma_1|!|\gamma_2|!}\frac{1}{C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||} .$$ Given $\gamma\in\mathcal{A}\star\mathcal{B}$, let $\gamma_1$ be its first component (in $\mathcal{A}$) and $\gamma_2$ be its second component (in $\mathcal{B}$). Any relabelling of the labelled atoms of $\gamma_1$ from $1$ to $|\gamma_1|$ and of the labelled atoms of $\gamma_2$ from $1$ to $|\gamma_2|$ induces a unique generation scenario producing $\gamma$. Indeed, the two relabellings determine unambiguously the relabelling permutation $\sigma$ of the generation scenario. Hence, $\gamma$ is produced from $|\gamma_1|!|\gamma_2|!$ different scenarios, each having probability $\frac{1}{|\gamma_1|!|\gamma_2|!C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. As a consequence, $\gamma$ is drawn under the Boltzmann distribution. \noindent 3) \emph{Set}$_{\geq d}$: $\mathcal{C}=\Set_{\geq d}(\mathcal{B})$. In the case of the construction $\Set_{\geq d}$, a \emph{generation scenario} is defined as a sequence $(\gamma_1\in\mathcal{B},\ldots,\gamma_k\in\mathcal{B})$ with $k\geq d$, together with a function $\sigma$ that assigns to each L-atom in $\gamma_1\cup\cdots\cup\gamma_k$ a label $i\in[1..|\gamma_1|+\cdots+|\gamma_k|]$ in a bijective way. Such a generation scenario produces an object $\gamma\in\Set_{\geq d}(\mathcal{B})$. By definition of $\Gamma \mathcal{C}(x,y)$, each scenario has probability $$\left( \frac{1}{\exp_{\geq d}(B(x,y))}\frac{B(x,y)^k}{k!}\right)\left(\prod_{i=1}^k \frac{x^{|\gamma_i|}y^{||\gamma_i||}}{B(x,y)|\gamma_i|!}\right)\frac{1}{(|\gamma_1|+\cdots+|\gamma_k|)!},$$ the three factors corresponding respectively to drawing $\Pois_{\geq d}(B(x,y))$, drawing the sequence, and the relabelling step. This probability has the simpler form $$\frac{1}{k!C(x,y)}\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}\prod_{i=1}^k\frac{1}{|\gamma_i|!}.$$ For $k\geq d$, an object $\gamma\in\Set_{\geq d}(\mathcal{B})$ can be written as a sequence $\gamma_1,\ldots,\gamma_k$ in $k!$ different ways. In addition, by a similar argument as for the Product construction, a sequence $\gamma_1,\ldots,\gamma_k$ is produced from $\prod_{i=1}^k|\gamma_i|!$ different scenarios. As a consequence, $\gamma$ is drawn under the Boltzmann distribution. \noindent 4) \emph{L-substitution}: $\mathcal{C}=\mathcal{A}\circ_L\mathcal{B}$. For this construction, a \emph{generation scenario} is defined as a core-object $\rho\in\mathcal{A}$, a sequence $\gamma_1,\ldots,\gamma_{|\rho|}$ of objects of $\mathcal{B}$ ($\gamma_i$ stands for the object of $\mathcal{B}$ substituted at the atom $i$ of $\rho$), together with a function $\sigma$ that assigns to each L-atom in $\gamma_1\cup\cdots\cup\gamma_{|\rho|}$ a label $i\in[1..|\gamma_1|+\cdots+|\gamma_{|\rho|}|]$ in a bijective way. This corresponds to the scenario of generation of an object $\gamma\in\mathcal{A}\circ_L\mathcal{B}$ by the algorithm $\Gamma \mathcal{C}(x,y)$, and this scenario has probability $$\left(\frac{1}{A(B(x,y),y)}\frac{B(x,y)^{|\rho|}}{|\rho|!}y^{||\rho||}\right)\left(\prod_{i=1}^{|\rho|}\frac{x^{|\gamma_i|}y^{||\gamma_i||}}{B(x,y)|\gamma_i|!}\right)\frac{1}{(|\gamma_1|+\cdots+|\gamma_{|\rho|}|)!},$$ which has the simpler form $$\frac{x^{|\gamma|}y^{||\gamma||}}{C(x,y)|\gamma|!}\frac{1}{|\rho|!}\prod_{i=1}^{|\rho|}\frac{1}{|\gamma_i|!}.$$ Given $\gamma\in\mathcal{A}\circ_L\mathcal{B}$, labelling the core-object $\rho\in\mathcal{A}$ with distinct labels in $[1..|\rho|]$ and each component $(\gamma_i)_{1\leq i\leq|\rho|}$ with distinct labels in $[1..|\gamma_i|]$ induces a unique generation scenario producing $\gamma$. As a consequence, $\gamma$ is produced from $|\rho|!\prod_{i=1}^{|\rho|}|\gamma_i|!$ scenarios, each having probability $\frac{x^{|\gamma|}y^{||\gamma||}}{C(x,y)|\gamma|!}\frac{1}{|\rho|!}\prod_{i=1}^{|\rho|}\frac{1}{|\gamma_i|!}$. Hence, $\gamma$ is drawn under the Boltzmann distribution. \noindent 5) \emph{U-substitution}: $\mathcal{C}=\mathcal{A}\circ_U\mathcal{B}$. A \emph{generation scenario} is defined as a core-object $\rho\in\mathcal{A}$, a sequence $\gamma_1,\ldots,\gamma_{||\rho||}$ of objects of $\mathcal{B}$ (upon giving a rank to each unlabelled atom of $\rho$, $\gamma_i$ stands for the object of $\mathcal{B}$ substituted at the U-atom of rank $i$ in $\rho$), and a function $\sigma$ that assigns to each L-atom in $\rho\cup\gamma_1\cup\cdots\cup\gamma_{||\rho||}$ a label $i\in[1..|\rho|+|\gamma_1|+\cdots+|\gamma_{||\rho||}|]$. This corresponds to the scenario of generation of an object $\gamma\in\mathcal{A}\circ_U\mathcal{B}$ by the algorithm $\Gamma \mathcal{C}(x,y)$; this scenario has probability $$\left(\frac{1}{A(x,B(x,y))}\frac{x^{|\rho|}}{|\rho|!}B(x,y)^{||\rho||}\right)\left(\prod_{i=1}^{||\rho||}\frac{x^{|\gamma_i|}y^{||\gamma_i||}}{B(x,y)|\gamma_i|!}\right)\left(\frac{1}{(|\rho|+|\gamma_1|+\cdots+|\gamma_{||\rho||}|)!}\right).$$ This expression has the simpler form $$\frac{x^{|\gamma|}y^{||\gamma||}}{C(x,y)|\gamma|!}\frac{1}{|\rho|!}\prod_{i=1}^{||\rho||}\frac{1}{|\gamma_i|!}.$$ Given $\gamma\in\mathcal{A}\circ_U\mathcal{B}$, labelling the core-object $\rho\in\mathcal{A}$ with distinct labels in $[1..|\rho|]$ and each component $(\gamma_i)_{1\leq i\leq||\rho||}$ with distinct labels in $[1..|\gamma_i|]$ induces a unique generation scenario producing $\gamma$. As a consequence, $\gamma$ is produced from $|\rho|!\prod_{i=1}^{||\rho||}|\gamma_i|!$ scenarios, each having probability $\frac{x^{|\gamma|}y^{||\gamma||}}{C(x,y)|\gamma|!}\frac{1}{|\rho|!}\prod_{i=1}^{||\rho||}\frac{1}{|\gamma_i|!}$. Hence, $\gamma$ is drawn under the Boltzmann distribution. \end{proof} \begin{example}\label{ex:binary} Consider the class $\mathcal{C}$ of rooted binary trees, where the (labelled) atoms are the inner nodes. The class $\mathcal{C}$ has the following decomposition grammar, $$\mathcal{C}= \left( \mathcal{C}+ \mathbf{1}\right)\star \mathcal{Z}\star \left( \mathcal{C}+ \mathbf{1}\right).$$ Accordingly, the series $C(x)$ counting rooted binary trees satisfies $C(x)=x\left( 1+C(x)\right) ^2$. (Notice that $C(x)$ can be easily evaluated for a fixed real parameter $x<\rho_C=1/4$.) Using the sampling rules for Sum and Product, we obtain the following Boltzmann sampler for binary trees, where $\{\bullet\}$ stands for a node: \begin{tabular}{ll} $\Gamma \mathcal{C}(x):$& return $(\Gamma(1+\mathcal{C})(x),\{\bullet\},\Gamma(1+\mathcal{C})(x))$ \{independent calls\} \end{tabular} \begin{tabular}{ll} $\Gamma(1+\mathcal{C})(x):$& if $\Bern\left(\frac{1}{1+C(x)}\right)$ return leaf\\ & else return $\Gamma \mathcal{C}(x)$ \end{tabular} \noindent Distinct labels in $[1..|\gamma|]$ might then be distributed uniformly at random on the atoms of the resulting tree $\gamma$, so as to make it well-labelled (see Remark~\ref{rk:labels} below). Many more examples are given in~\cite{DuFlLoSc04} for labelled (and unlabelled) classes specified using the constructions $\{+,\star,\Set\}$. \end{example} \begin{remark}\label{rk:labels} In the sampling rules (Figure~\ref{table:rules}), the procedure \textsc{DistributeLabels}($\gamma$) throws distinct labels uniformly at random on the L-atoms of $\gamma$. The fact that the relabelling permutation is always chosen uniformly at random ensures that the process of assigning the labels has no memory of the past, hence \textsc{DistributeLabels} needs to be called just once, at the end of the generation procedure. (A similar remark is given by Flajolet \emph{et al.} in~\cite[Sec. 3]{FlZiVa94} for the recursive method of sampling.) In other words, when combining the sampling rules given in Figure~\ref{table:rules} in order to design a Boltzmann sampler, we can forget about the calls to \textsc{DistributeLabels}, see for instance the Boltzmann sampler for binary trees above. In fact, we have included the \textsc{DistributeLabels} steps in the definitions of the sampling rules only for the sake of writing the correctness proofs (Proposition~\ref{prop:rules}) in a proper way. \end{remark} \subsection{Additional techniques for Boltzmann sampling} As the decomposition of planar graphs we consider is a bit involved, we need a few techniques in order to properly translate this decomposition into a Boltzmann sampler. These techniques, which are described in more detail below, are: bijections, pointing, and rejection. \subsubsection{Combinatorial isomorphisms} Two mixed classes $\mathcal{A}$ and $\mathcal{B}$ are said to be \emph{isomorphic}, shortly written as $\mathcal{A}\simeq\mathcal{B}$, if there exists a bijection $\Phi$ between $\mathcal{A}$ and $\mathcal{B}$ that preserves the size parameters, i.e., preserves the L-size and the U-size. (This is equivalent to the fact that the mixed generating functions of $\mathcal{A}$ and $\mathcal{B}$ are equal.) In that case, a Boltzmann sampler $\Gamma \mathcal{A}(x,y)$ for the class $\mathcal{A}$ yields a Boltzmann sampler for $\mathcal{B}$ via the isomorphism: $\Gamma \mathcal{B}(x,y): \gamma\leftarrow\Gamma \mathcal{A}(x,y);\ \mathrm{return}\ \Phi(\gamma)$. \subsubsection{L-derivation, U-derivation, and edge-rooting.}\label{sec:derive} In order to describe our random sampler for planar graphs, we will make much use of \emph{derivative} operators. The L-derived class of a mixed class $\mathcal{C}=\cup_{n,m}\mathcal{C}_{n,m}$ (shortly called the derived class of $\mathcal{C}$) is the mixed class $\mathcal{C}'=\cup_{n,m}\mathcal{C}'_{n,m}$ of objects in $\mathcal{C}$ where the greatest label is taken out, i.e., the L-atom with greatest label is discarded from the set of L-atoms (see the book by Bergeron, Labelle, Leroux ~\cite{BeLaLe} for more details and examples). The class $\mathcal{C}'$ can be identified with the pointed class $\mathcal{C}^{\bullet}$ of $\mathcal{C}$, which is the class of objects of $\mathcal{C}$ with a distinguished L-atom. Indeed the discarded atom in an object of $\mathcal{C}'$ plays the role of a pointed vertex. However the important difference between $\mathcal{C}'$ and $\mathcal{C}^{\bullet}$ is that the distinguished L-atom does not count in the L-size of an object in $\mathcal{C}'$. In other words, $\mathcal{C}^{\bullet}=\mathcal{Z}L\star\mathcal{C}'$. Clearly, for any integers $n,m$, $\mathcal{C}'_{n-1,m}$ identifies to $\mathcal{C}_{n,m}$, so that the generating function $C'(x,y)$ of $\mathcal{C}'$ satisfies \begin{equation} C'(x,y)=\sum_{n,m} |\mathcal{C}_{n,m}|\frac{x^{n-1}}{(n-1)!}y^m=\partial_x C(x,y). \end{equation} The U-derived class of $\mathcal{C}$ is the class $\underline{\mathcal{C}}$ of objects obtained from objects of $\mathcal{C}$ by discarding one U-atom from the set of U-atoms; in other words there is a distinguished U-atom that does not count in the U-size. As in the definition of the U-substitution, we assume that all the U-atoms are distinguishable, for instance the edges of a simple graph are distinguished by the labels of their extremities. In that case, $|\underline{\mathcal{C}}_{n,m-1}|=m|\mathcal{C}_{n,m}|$, so that the generating function $\underline{C}(x,y)$ of $\underline{\mathcal{C}}$ satisfies \begin{equation} \underline{C}(x,y)=\sum_{n,m} m|\mathcal{C}_{n,m}|\frac{x^{n}}{n!}y^{m-1}=\partial_y C(x,y). \end{equation} For the particular case of planar graphs, we will also consider \emph{edge-rooted} objects (shortly called rooted objects), i.e., planar graphs where an edge is ``marked'' (distinguished) and directed. In addition, the root edge, shortly called the root, is not counted as an unlabelled atom, and the two extremities of the root do not count as labelled atoms (i.e., are not labelled). The edge-rooted class of $\mathcal{C}$ is denoted by $\overrightarrow{\mathcal{C}}$. Clearly we have $\mathcal{Z}L^{\ 2}\star\overrightarrow{\mathcal{C}}\simeq 2\star \underline{\mathcal{C}}$. Hence, the generating function $\overrightarrow{C}(x,y)$ of $\overrightarrow{\mathcal{C}}$ satisfies \begin{equation} \overrightarrow{C}(x,y)=\frac{2}{x^2}\partial_y C(x,y). \end{equation} \subsubsection{Rejection.}\label{sec:reject} Using rejection techniques offers great flexibility to design Boltzmann samplers, since it makes it possible to adjust the distributions of the samplers. \begin{lemma}[Rejection] \label{lemma:rej} Given a combinatorial class $\mathcal{C}$, let $W:\mathcal{C}\mapsto\mathbf{R}^+$ and $p:\mathcal{C}\mapsto [0,1]$ be two functions, called \emph{weight-function} and \emph{rejection-function}, respectively. Assume that $W$ is summable, i.e., $\sum_{\gamma\in\mathcal{C}}W(\gamma)$ is finite. Let $\frak{A}$ be a random generator for $\mathcal{C}$ that draws each object $\gamma\in\mathcal{C}$ with probability proportional to $W(\gamma)$. Then, the procedure $$ \frak{A}_{\mathrm{rej}}:\mathrm{repeat}\ \frak{A}\rightarrow\gamma\ \mathrm{until}\ \mathrm{Bern}(p(\gamma));\ \mathrm{return}\ \gamma $$ is a random generator on $\mathcal{C}$, which draws each object $\gamma\in\mathcal{C}$ with probability proportional to $W(\gamma)p(\gamma)$. \end{lemma} \begin{proof} Define $W:=\sum_{\gamma\in\mathcal{C}}W(\gamma)$. By definition, $\frak{A}$ draws an object $\gamma\in\mathcal{C}$ with probability $P(\gamma):=W(\gamma)/W$. Let $p_{\mathrm{rej}}$ be the probability of failure of $\frak{A}_{\mathrm{rej}}$ at each attempt. The probability $P_{\mathrm{rej}}(\gamma)$ that $\gamma$ is drawn by $\frak{A}_{\mathrm{rej}}$ satisfies $ P_{\mathrm{rej}}(\gamma)=P(\gamma)p(\gamma)+p_{\mathrm{rej}}P_{\mathrm{rej}}(\gamma),$ where the first (second) term is the probability that $\gamma$ is drawn at the first attempt (at a later attempt, respectively). Hence, $P_{\mathrm{rej}}(\gamma)=P(\gamma)p(\gamma)/(1-p_{\mathrm{rej}})=W(\gamma)p(\gamma)/(W\cdot(1-p_{\mathrm{rej}}))$, i.e., $P_{\mathrm{rej}}(\gamma)$ is proportional to $W(\gamma)p(\gamma)$. \end{proof} Rejection techniques are very useful for us to change the way objects are rooted. Typically it helps us to obtain a Boltzmann sampler for $\mathcal{A}'$ from a Boltzmann sampler for $\underline{\mathcal{A}}$ and vice versa. As we will use this trick many times, we formalise it here by giving two explicit procedures, one from L-derived to U-derived objects, the other one from U-derived to L-derived objects. \fbox{ \begin{minipage}{12cm} \LtoU\\ \phantom{1}\hspace{.5cm} INPUT: a mixed class $\mathcal{A}$ such that $\displaystyle\alpha_{U/L}:=\mathrm{sup}_{\gamma\in\mathcal{A}}\frac{||\gamma||}{|\gamma|}$ is finite,\\ \phantom{1}\hspace{2cm}a Boltzmann sampler $\Gamma \mathcal{A}'(x,y)$ for the L-derived class $\mathcal{A}'$\\[0.2cm] \phantom{1}\hspace{.5cm} OUTPUT: a Boltzmann sampler for the U-derived class $\underline{\mathcal{A}}$, defined as:\\[0.2cm] \begin{tabular}{ll} $\Gamma \underline{\mathcal{A}}(x,y)$:& repeat $\gamma\leftarrow\Gamma \mathcal{A}'(x,y)$ \{at this point $\gamma\in\mathcal{A}'$\}\\ & \phantom{1}\hspace{.2cm}give label $|\gamma|+1$ to the discarded L-atom of $\gamma$;\\ & \phantom{1}\hspace{.2cm}\{so $|\gamma|$ increases by $1$, and $\gamma\in\mathcal{A}$\}\\ & until $\displaystyle\mathrm{Bern}\left(\frac{1}{\alpha_{U/L}}\frac{||\gamma||}{|\gamma|}\right)$;\\ & choose a U-atom uniformly at random and discard it\\ & $\ \ $ from the set of U-atoms; \{so $||\gamma||$ decreases by $1$, and $\gamma\in\underline{\mathcal{A}}$\}\\ & return $\gamma$ \end{tabular} \end{minipage}} \begin{lemma}\label{lem:LtoU} The procedure \LtoU yields a Boltzmann sampler for the class $\underline{\mathcal{A}}$ from a Boltzmann sampler for the class $\mathcal{A}'$. \end{lemma} \begin{proof} First, observe that the sampler is well defined. Indeed, by definition of the parameter $\alpha_{U/L}$, the Bernoulli choice is always valid (i.e., its parameter is always in $[0,1]$). Notice that the sampler\\ \phantom{1}\hspace{.4cm}$\gamma\leftarrow\Gamma \mathcal{A}'(x,y)$;\\ \phantom{1}\hspace{.4cm}give label $|\gamma|+1$ to the discarded L-atom of $\gamma$;\\ \phantom{1}\hspace{.4cm}return $\gamma$\\ \noindent is a sampler for $\mathcal{A}$ that outputs each object $\gamma\in\mathcal{A}$ with probability $\frac{1}{A'(x,y)}\frac{x^{|\gamma|-1}}{(|\gamma|-1)!}y^{||\gamma||}$, because $\mathcal{A}_{n,m}$ identifies to $\mathcal{A}'_{n-1,m}$. In other words, this sampler draws each object $\gamma\in\mathcal{A}$ with probability proportional to $|\gamma|\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. Hence, according to Lemma~\ref{lemma:rej}, the repeat-until loop of the sampler $\Gamma \underline{\mathcal{A}}(x,y)$ yields a sampler for $\mathcal{A}$ such that each object has probability proportional to $||\gamma||\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. As each U-atom has probability $1/||\gamma||$ of being discarded, the final sampler is such that each object $\gamma\in\underline{\mathcal{A}}$ has probability proportional to $\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. So $\Gamma\underline{\mathcal{A}}(x,y)$ is a Boltzmann sampler for $\underline{\mathcal{A}}$. \end{proof} We define a similar procedure to go from a U-derived class to an L-derived class: \fbox{ \begin{minipage}{12cm} \UtoL\\ \phantom{1}\hspace{.5cm} INPUT: a mixed class $\mathcal{A}$ such that $\displaystyle\alpha_{L/U}:=\mathrm{sup}_{\gamma\in\mathcal{A}}\frac{|\gamma|}{||\gamma||}$ is finite,\\ \phantom{1}\hspace{2cm}a Boltzmann sampler $\Gamma \underline{\mathcal{A}}(x,y)$ for the U-derived class $\underline{\mathcal{A}}$\\[0.2cm] \phantom{1}\hspace{.5cm} OUTPUT: a Boltzmann sampler for the L-derived class $\mathcal{A}'$, defined as:\\[0.2cm] \begin{tabular}{ll} $\Gamma \mathcal{A}'(x,y)$:& repeat $\gamma\leftarrow\Gamma \underline{\mathcal{A}}(x,y)$ \{at this point $\gamma\in\underline{\mathcal{A}}$\}\\ & \phantom{1}\hspace{.2cm}take the discarded U-atom of $\gamma$ back in the set of U-atoms;\\ & \phantom{1}\hspace{.2cm} \{so $||\gamma||$ increases by $1$, and $\gamma\in\mathcal{A}$\}\\ & until $\displaystyle\mathrm{Bern}\left(\frac{1}{\alpha_{L/U}}\frac{|\gamma|}{||\gamma||}\right)$;\\ & discard the L-atom with greatest label from the set of L-atoms;\\ & \{so $|\gamma|$ decreases by $1$, and $\gamma\in\mathcal{A}'$\}\\ & return $\gamma$ \end{tabular} \end{minipage}} \begin{lemma}\label{lem:UtoL} The procedure \UtoL yields a Boltzmann sampler for the class $\mathcal{A}'$ from a Boltzmann sampler for the class $\underline{\mathcal{A}}$. \end{lemma} \begin{proof} Similar to the proof of Lemma~\ref{lem:LtoU}. The sampler $\Gamma \mathcal{A}'(x,y)$ is well defined, as the Bernoulli choice is always valid (i.e., its parameter is always in $[0,1]$). Notice that the sampler\\ \phantom{1}\hspace{.4cm}$\gamma\leftarrow\Gamma \underline{\mathcal{A}}(x,y)$;\\ \phantom{1}\hspace{.4cm}take the discarded U-atom back to the set of U-atoms of $\gamma$;\\ \phantom{1}\hspace{.4cm}return $\gamma$\\ \noindent is a sampler for $\mathcal{A}$ that outputs each object $\gamma\in\mathcal{A}$ with probability $\frac{1}{\underline{A}(x,y)}||\gamma||\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||-1}$, (because an object $\gamma\mathcal{A}_{n,m}$ gives rise to $m$ objects in $\underline{\mathcal{A}}_{n,m-1}$), i.e., with probability proportional to $||\gamma||\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. Hence, according to Lemma~\ref{lemma:rej}, the repeat-until loop of the sampler $\Gamma \mathcal{A}'(x,y)$ yields a sampler for $\mathcal{A}$ such that each object $\gamma\in\mathcal{A}$ has probability proportional to $|\gamma|\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$, i.e., proportional to $\frac{x^{|\gamma|-1}}{(|\gamma|-1)!}y^{||\gamma||}$. Hence, by discarding the greatest L-atom (i.e., $|\gamma|\leftarrow|\gamma|-1$), we get a probability proportional to $\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$ for every object $\gamma\in\mathcal{A}'$, i.e., a Boltzmann sampler for $\mathcal{A}'$. \end{proof} \begin{remark}\label{remark:greatest_delete} We have stated in Remark~\ref{rk:labels} that, during a generation process, it is more convenient in practice to manipulate the shapes of the objects without systematically assigning labels to them. However, in the definition of the sampler $\Gamma \mathcal{A}'(x,y)$, one step is to remove the greatest label, so it seems we need to look at the labels at that step. In fact, as we consider here classes that are stable under relabelling, it is equivalent in practice to draw uniformly at random one vertex to play the role of the discarded L-atom. \end{remark} \section{Decomposition of planar graphs and Boltzmann samplers} \label{sec:decomp} Our algorithm starts with the generation of 3-connected planar graphs, which have the nice feature that they are combinatorially tractable. Indeed, according to a theorem of Whitney~\cite{Whitney33}, 3-connected planar graphs have a unique embedding (up to reflection), so they are equivalent to 3-connected planar maps. Following the general approach introduced by Schaeffer~\cite{S-these}, a bijection has been described by the author, Poulalhon, and Schaeffer~\cite{FuPoSc05} to enumerate 3-connected maps~\cite{FuPoSc05} from binary trees, which yields an explicit Boltzmann sampler for (rooted) 3-connected maps, as described in Section~\ref{sec:bolz3conn}. The next step is to generate 2-connected planar graphs from 3-connected ones. We take advantage of a decomposition of 2-connected planar graphs into 3-connected planar components, which has been formalised by Trakhtenbrot~\cite{trak} (and later used by Walsh~\cite{Wa} to count 2-connected planar graphs and by Bender, Gao, Wormald to obtain asymptotic enumeration~\cite{BeGa}). Finally, connected planar graphs are generated from 2-connected ones by using the well-known decomposition into blocks, and planar graphs are generated from their connected components. Let us mention that the decomposition of planar graphs into 3-connected components has been completely formalised by Tutte~\cite{Tut} (though we rather use here formulations of this decomposition on \emph{rooted} graphs, as Trakhtenbrot did). The complete scheme we follow is illustrated in Figure~\ref{fig:scheme_unrooted}. \begin{figure} \caption{The complete scheme to obtain a Boltzmann sampler for planar graphs. The classes are to be defined all along Section~\ref{sec:decomp} \label{fig:scheme_unrooted} \end{figure} \noindent\textbf{Notations.} Recall that a graph is $k$-connected if the removal of any set of $k-1$ vertices does not disconnect the graph. In the sequel, we consider the following classes of planar graphs: \begin{tabular}{l} $\mathcal{G}$: the class of all planar graphs, including the empty graph,\\ $\mathcal{G}c$: the class of connected planar graphs with at least one vertex,\\ $\mathcal{G}b$: the class of 2-connected planar graphs with at least two vertices,\\ $\mathcal{G}t$: the class of 3-connected planar graphs with at least four vertices. \end{tabular} \begin{figure} \caption{The connected planar graphs with at most four vertices (the 2-connected ones are surrounded). Below each graph is indicated the number of distinct labellings.} \label{fig:firstTerms} \end{figure} All these classes are considered as mixed, with labelled vertices and unlabelled edges, i.e., the L-atoms are the vertices and the U-atoms are the edges. Let us give the first few terms of their mixed generating functions (see also Figure~\ref{fig:firstTerms}, which displays the first connected planar graphs): $$ \displaystyle \begin{array}{rcl} G(x,y)&$\!\!\!=\!\!\!$&1+x+\frac{x^2}{2!}(1+y)+\frac{x^3}{3!}(1+3y+3y^2+y^3)+\cdots\\[0.1cm] G_1(x,y)&$\!\!\!=\!\!\!$&x+\frac{x^2}{2!}y+\frac{x^3}{3!}(3y^2+y^3)+\frac{x^4}{4!}(16y^3+15y^4+6y^5+y^6)+\cdots\\[0.1cm] G_2(x,y)&$\!\!\!=\!\!\!$&\frac{x^2}{2!}y+\frac{x^3}{3!}y^3+\frac{x^4}{4!}(3y^4\!+\!6y^5\!+\!y^6)+\frac{x^5}{5!}(12y^5\!+\!70y^6\!+\!100y^7\!+\!15y^8\!+\!10y^9)+\cdots\\[0.1cm] G_3(x,y)&$\!\!\!=\!\!\!$&\frac{x^4}{4!}y^6+\frac{x^5}{5!}(15y^8+10y^9)+\frac{x^6}{6!}(60y^9+432y^{10}+540y^{11}+195y^{12})+\cdots \end{array} $$ Observe that, for a mixed class $\mathcal{A}$ of \emph{graphs}, the derived class $\mathcal{A}'$, as defined in Section~\ref{sec:derive}, is the class of graphs in $\mathcal{A}$ that have one vertex discarded from the set of L-atoms (this vertex plays the role of a distinguished vertex); $\underline{\mathcal{A}}$ is the class of graph in $\mathcal{A}$ with one edge discarded from the set of U-atoms (this edge plays the role of a distinguished edge); and $\overrightarrow{\mathcal{A}}$ is the class of graphs in $\mathcal{A}$ with an ordered pair of adjacent vertices $(u,v)$ discarded from the set of L-atoms and the edge $(u,v)$ discarded from the set of U-atoms (such a graph can be considered as rooted at the directed edge $(u,v)$). \subsection{Boltzmann sampler for 3-connected planar graphs} \label{sec:bolz3conn} In this section we develop a Boltzmann sampler for 3-connected planar graphs, more precisely for \emph{edge-rooted} ones, i.e., for the class $\overrightarrow{\mathcal{G}t}$. Our sampler relies on two results. First, we recall the equivalence between 3-connected planar graphs and 3-connected maps, where the terminology of map refers to an explicit embedding. Second, we take advantage of a bijection linking the families of rooted 3-connected maps and the (very simple) family of binary trees, via intermediate objects that are certain quadrangular dissections of the hexagon. Using the bijection, a Boltzmann sampler for rooted binary trees is translated into a Boltzmann sampler for rooted 3-connected maps. \subsubsection{Maps} A \emph{map on the sphere} (\emph{planar map}, resp.) is a connected planar graph embedded on the sphere (on the plane, resp.) up to continuous deformation of the surface, the embedded graph carrying distinct labels on its vertices (as usual, the labels range from $1$ to $n$, the number of vertices). A planar map is in fact equivalent to a map on the sphere with a distinguished face, which plays the role of the unbounded face. The unbounded face of a planar map is called the \emph{outer face}, and the other faces are called the \emph{inner faces}. The vertices and edges of a planar map are said to be \emph{outer} or \emph{inner} whether they are incident to the outer face or not. A map is said to be \emph{rooted} if the embedded graph is edge-rooted. The \emph{root vertex} is the origin of the root. Classically, rooted planar maps are always assumed to have the outer face on the right of the root. With that convention, rooted planar maps are equivalent to rooted maps on the sphere (given a rooted map on the sphere, take the face on the right of the root as the outer face). See Figure~\ref{fig:primal}(c) for an example of rooted planar map, where the labels are forgotten\footnote{Classically, rooted maps are considered in the literature without labels on the vertices, as the root is enough to avoid symmetries. Nevertheless, it is convenient here to keep the framework of mixed classes for maps, as we do for graphs.}. \subsubsection{Equivalence between 3-connected planar graphs and 3-connected maps}\label{sec:equiv} A well known result due to Whitney~\cite{Whitney33} states that a labelled 3-connected planar graph has a unique embedding on the sphere up to continuous deformation and reflection (in general a planar graph can have many embeddings). Notice that any 3-connected map on the sphere with at least 4 vertices differs from its mirror-image, due to the labels on the vertices. Hence every 3-connected planar graph with at least 4 vertices gives rise exactly to two maps on the sphere. The class of 3-connected maps on the sphere with at least 4 vertices is denoted by $\mathcal{M}_3$. As usual, the class is mixed, the L-atoms being the vertices and the U-atoms being the edges. Whitney's theorem ensures that \begin{equation} \label{eq:M} \mathcal{M}_3\simeq 2\star\mathcal{G}t. \end{equation} Here we make use of the formulation of this isomorphism for \emph{edge-rooted} objects. The mixed class of rooted 3-connected planar maps with at least 4 vertices is denoted by $\overrightarrow{\mathcal{M}_3}$, where ---as for edge-rooted graphs--- the L-atoms are the vertices not incident to the root-edge and the U-atoms are the edges except the root. Equation~(\ref{eq:M}) becomes, for edge-rooted objects: \begin{equation} \overrightarrow{\mathcal{M}_3}\simeq2\star\overrightarrow{\mathcal{G}t}. \end{equation} Thanks to this isomorphism, finding a Boltzmann sampler $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$ for edge-rooted 3-connected planar graphs reduces to finding a Boltzmann sampler $\Gamma \overrightarrow{\mathcal{M}_3}(z,w)$ for rooted 3-connected maps, upon forgetting the embedding. \subsubsection{3-connected maps and irreducible dissections}\label{sec:primal_map} We consider here some quadrangular dissections of the hexagon that are closely related to 3-connected planar maps. (We will see that these dissections can be efficiently generated at random, as they are in bijection with binary trees.) Precisely, a \emph{quadrangulated map} is a planar map (with no loop nor multiple edges) such that all faces except maybe the outer one have degree 4; it is called a quadrangulation if the outer face has degree 4. A quadrangulated map is called \emph{bicolored} if the vertices are colored black or white such that any edge connects two vertices of different colors. A rooted quadrangulated map (as usual with planar maps, the root has the outer face on its right) is always assumed to be endowed with the unique vertex bicoloration such that the root vertex is \emph{black} (such a bicoloration exists, as all inner faces have even degree). A quadrangulated map with an outer face of degree more than 4 is called \emph{irreducible} if each 4-cycle is the contour of a face. In particular, we define an \emph{irreducible dissection of the hexagon} ---shortly called irreducible dissection hereafter--- as an irreducible quadrangulated map with an hexagonal outer face, see Figure~\ref{fig:primal}(b) for an example. A quadrangulation is called irreducible if it has at least 2 inner vertices and if every 4-cycle, except the outer one, delimits a face. Notice that the smallest irreducible dissection has one inner edge and no inner vertex (see Figure~\ref{fig:asymmetric}), whereas the smallest irreducible quadrangulation is the embedded cube, which has 4 inner vertices and 5 inner faces. We consider irreducible dissections as objects of the mixed type, the L-atoms are the black inner vertices and the U-atoms are the inner faces. It proves more convenient to consider here the irreducible dissections that are \emph{asymmetric}, meaning that there is no rotation fixing the dissection. The four non-asymmetric irreducible dissections are displayed in Figure~\ref{fig:asymmetric}(b), all the other ones are asymmetric either due to an asymmetric shape or due to the labels on the black inner vertices. We denote by $\mathcal{I}$ the mixed class of \emph{asymmetric} bicolored irreducible dissections. We define also $\mathcal{J}$ as the class of asymmetric irreducible dissections that carry a root (outer edge directed so as to have a black origin and the outer face on its right), where this time the L-atoms are the black vertices except two of them (say, the origin of the root and the next black vertex in ccw order around the outer face) and the U-atoms are all the faces, including the outer one. Finally, we define $\mathcal{Q}$ as the mixed class of rooted irreducible quadrangulations, where the L-atoms are the black vertices except those two incident to the outer face, and the U-atoms are the inner faces. Irreducible dissections are closely related to 3-connected maps, via a classical correspondence between planar maps and quadrangulations. Given a bicolored rooted quadrangulation $\kappa$, the \emph{primal map} of $\kappa$ is the rooted map $\mu$ whose vertex set is the set of black vertices of $\kappa$, each face $f$ of $\kappa$ giving rise to an edge of $\mu$ connecting the two (opposite) black vertices of $f$, see Figure~\ref{fig:primal}(c)-(d). The map $\mu$ is naturally rooted so as to have the same root-vertex as $\kappa$. \begin{theorem}[Mullin and Schellenberg~\cite{Mu}] The primal-map construction is a bijection between rooted irreducible quadrangulations with $n$ black vertices and $m$ faces, and rooted 3-connected maps with $n$ vertices and $m$ edges\footnote{More generally, the bijection holds between rooted quadrangulations and rooted 2-connected maps.}. In other words, the primal-map construction yields the combinatorial isomorphism \begin{equation} \mathcal{Q}\simeq\overrightarrow{\mathcal{M}_3}. \end{equation} In addition, the construction of a 3-connected map from an irreducible quadrangulation takes linear time. \end{theorem} The link between $\mathcal{J}$ and $\overrightarrow{\mathcal{M}_3}$ is established via the family $\mathcal{Q}$, which is at the same time isomorphic to $\overrightarrow{\mathcal{M}_3}$ and closely related to $\mathcal{J}$. Let $\kappa$ be a rooted irreducible quadrangulation, and let $e$ be the edge following the root in cw order around the outer face. Then, deleting $e$ yields a rooted irreducible dissection $\delta$. In addition it is easily checked that $\delta$ is asymmetric, i.e., the four non-asymmetric irreducible dissections, which are shown in Figure~\ref{fig:asymmetric}(b), can not be obtained in this way. Hence the so-called \emph{root-deletion mapping} is injective from $\mathcal{Q}$ to $\mathcal{J}$. The inverse operation---called the \emph{root-addition mapping}---starts from a rooted irreducible dissection $\delta$, and adds an outer edge from the root-vertex of $\delta$ to the opposite outer vertex. Notice that the rooted quadrangulation obtained in this way might not be irreducible. Precisely, a non-separating 4-cycle appears iff $\delta$ has an internal path (i.e., a path using at least one inner edge) of length 3 connecting the root vertex to the opposite outer vertex. A rooted irreducible dissection $\delta$ is called \emph{admissible} iff it has no such path. The subclass of rooted irreducible dissections that are admissible is denoted by $\mathcal{J}a$. We obtain the following result, already given in~\cite{FuPoSc05}: \begin{lemma} The root-addition mapping is a bijection between admissible rooted irreducible dissections with $n$ black vertices and $m$ faces, and rooted irreducible quadrangulations with $n$ black vertices and $m$ inner faces. In other words, the root-addition mapping realises the combinatorial isomorphism \begin{equation} \mathcal{J}a\simeq\mathcal{Q}. \end{equation} \end{lemma} To sum up, we have the following link between rooted irreducible dissections and rooted 3-connected maps: $$ \mathcal{J}\supset\ \mathcal{J}a\simeq\mathcal{Q}\simeq\overrightarrow{\mathcal{M}_3}. $$ Notice that we have a combinatorial isomorphism between $\mathcal{J}a$ and $\overrightarrow{\mathcal{M}_3}$: the root-edge addition combined with the primal map construction. For $\delta\in\mathcal{J}a$, the rooted 3-connected map associated with $\delta$ is denoted $\mathrm{Primal}(\delta)$. \begin{figure} \caption{(a) A binary tree, (b) the associated irreducible dissection $\delta$ (rooted and admissible), (c) the associated rooted irreducible quadrangulation $\kappa=\mathrm{Add} \label{fig:primal} \end{figure} As we see next, the class $\mathcal{I}$ (and also the associated rooted class $\mathcal{J}$) is combinatorially tractable, as it is in bijection with the simple class of binary trees; hence irreducible dissections are easily generated at random. \subsubsection{Bijection between binary trees and irreducible dissections} There exist by now several elegant bijections between families of planar maps and families of plane trees that satisfy simple context-free decomposition grammars. Such constructions have first been described by Schaeffer in his thesis~\cite{S-these}, and many other families of rooted maps have been counted in this way~\cite{Fusy06a,PS03a,PS03b,BoDiGu04}. The advantage of bijective constructions over recursive methods for counting maps~\cite{Tu63} is that the bijections yield efficient ---linear-time--- generators for maps, as random sampling of maps is reduced to the much easier task of random sampling of trees, see~\cite{Sc99}. The method has been recently applied to the family of 3-connected maps, which is of interest here. Precisely, as described in~\cite{FuPoSc05}, there is a bijection between binary trees and irreducible dissections of the hexagon, which, as we have seen, are closely related to 3-connected maps. We define an \emph{unrooted binary tree}, shortly called a binary tree hereafter, as a plane tree (i.e., a planar map with a unique face) where the degree of each vertex is either 1 or 3. The vertices of degree 1 (3) are called leaves (nodes, resp.). A binary tree is said to be bicolored if its nodes are bicolored so that any two adjacent nodes have different colors, see Figure~\ref{fig:primal}(a) for an example. In a bicolored binary tree the L-atoms are the black nodes and the U-atoms are the leaves. A bicolored binary tree is called \emph{asymmetric} if there is no rotation-symmetry fixing it. Figure~\ref{fig:asymmetric} displays the four non-asymmetric bicolored binary trees; all the other bicolored binary trees are asymmetric, either due to the shape being asymmetric, or due to the labels on the black nodes. We denote by $\mathcal{K}$ the mixed class of \emph{asymmetric} bicolored binary trees (the requirement of asymmetry is necessary so that the leaves are distinguishable). \begin{figure} \caption{(a) The four non-asymmetric bicolored binary trees. (b) The four non-asymmetric bicolored irreducible dissections.} \label{fig:asymmetric} \end{figure} The terminology of binary tree refers to the fact that, upon rooting a binary tree at an arbitrary leaf, the neighbours in clockwise order around each node can be classified as a father (the neighbour closest to the root), a right son, and a left son, which corresponds to the classical definition of rooted binary trees, as considered in Example~\ref{ex:binary}. \begin{proposition}[Fusy, Poulalhon, and Schaeffer~\cite{FuPoSc05}] \label{prop:bijbin3conn} For $n\geq 0$ and $m\geq 2$, there exists an explicit bijection, called the \emph{closure-mapping}, between bicolored binary trees with $n$ black nodes and $m$ leaves, and bicolored irreducible dissections with $n$ black inner nodes and $m$ inner faces; moreover the 4 non-asymmetric bicolored binary trees are mapped to the 4 non-asymmetric irreducible dissections. In other words, the closure-mapping realises the combinatorial isomorphism \begin{equation}\mathcal{K}\simeq \mathcal{I}.\end{equation} The construction of a dissection from a binary tree takes linear time. \end{proposition} Let us comment a bit on this bijective construction, which is described in detail in~\cite{FuPoSc05}. Starting from a binary tree, the closure-mapping builds the dissection face by face, each leaf of the tree giving rise to an inner face of the dissection. More precisely, at each step, a ``leg" (i.e., an edge incident to a leaf) is completed into an edge connecting two nodes, so as to ``close" a quadrangular face. At the end, an hexagon is created outside of the figure, and the leaves attached to the remaining non-completed legs are merged with vertices of the hexagon so as to form only quadrangular faces. For instance the dissection of Figure~\ref{fig:primal}(b) is obtained by ``closing'' the tree of Figure~\ref{fig:primal}(a). \subsubsection{Boltzmann sampler for rooted bicolored binary trees} \label{sec:boltz_binary_trees} We define a rooted bicolored binary tree as a binary tree with a marked leaf discarded from the set of U-atoms. Notice that the class of rooted bicolored binary trees such that the underlying unrooted binary tree is asymmetric is the U-derived class $\underline{\mathcal{K}}$. In order to write down a decomposition grammar for the class $\underline{\mathcal{K}}$---to be translated into a Boltzmann sampler---we define some refined classes of rooted bicolored binary trees (decomposing $\underline{\mathcal{K}}$ is a bit involved since we have to forbid the 4 non-asymmetric binary trees): $\mathcal{R}b$ is the class of \emph{black-rooted} binary trees (the root leaf is connected to a black node) with at least one node, and $\mathcal{R}w$ is the class of \emph{white-rooted} binary trees (the root leaf is connected to a white node) with at least one node. We also define $\mathcal{R}bas$ ($\mathcal{R}was$) as the class of black-rooted (white-rooted, resp.) bicolored binary trees such that the underlying unrooted binary tree is asymmetric. Hence $\underline{\mathcal{K}}=\mathcal{R}bas+\mathcal{R}was$. We introduce two auxiliary classes; $\mathcal{R}bh$ is the class of black-rooted binary trees except the (unique) one with one black node and two white nodes; and $\mathcal{R}wh$ is the class of white-rooted binary trees except the two ones resulting from rooting the (unique) bicolored binary tree with one black node and three white nodes (the 4th one in Figure~\ref{fig:asymmetric}(a)), in addition, the rooted bicolored binary tree with two leaves (the first one in Figure~\ref{fig:asymmetric}(a)) is also included in the class $\mathcal{R}wh$. The decomposition of a bicolored binary tree at the root yields a complete decomposition grammar, given in Figure~\ref{fig:grammar}, for the class $\underline{\mathcal{K}}=\mathcal{R}bas+\mathcal{R}was$. This grammar translates to a decomposition grammar involving only the basic classes $\{\mathcal{Z}L,\mathcal{Z}U\}$ and the constructions $\{+,\star\}$ ($\mathcal{Z}L$ stands for a black node and $\mathcal{Z}U$ stands for a non-root leaf): \begin{equation}\label{eq:grammar} \left\{ \begin{array}{rcl} \underline{\mathcal{K}}&=&\mathcal{R}bas+\mathcal{R}was,\\ \mathcal{R}bas&=&\mathcal{R}w\star\mathcal{Z}L\star\mathcal{Z}U+\mathcal{Z}U\star\mathcal{Z}L\star\mathcal{R}w+\mathcal{Z}L\star\mathcal{R}w^2,\\ \mathcal{R}was&=&\mathcal{R}bh\star\mathcal{Z}U+\mathcal{Z}U\star\mathcal{R}bh+\mathcal{R}b^2,\\ \mathcal{R}bh&=&\mathcal{R}wh\star\mathcal{Z}L\star\mathcal{Z}U^2+\mathcal{Z}U^2\star\mathcal{Z}L\star\mathcal{R}wh+\mathcal{R}wh\star\mathcal{Z}L\star\mathcal{R}wh,\\ \mathcal{R}wh&=&\mathcal{Z}U+\mathcal{R}b\star\mathcal{Z}U+\mathcal{Z}U\star\mathcal{R}b+\mathcal{R}b^2,\\ \mathcal{R}b&=&(\mathcal{Z}U+\mathcal{R}w)\star\mathcal{Z}L\star(\mathcal{Z}U+\mathcal{R}w),\\ \mathcal{R}w&=&(\mathcal{Z}U+\mathcal{R}b)\star(\mathcal{Z}U+\mathcal{R}b). \end{array} \right. \end{equation} \begin{figure} \caption{The decomposition grammar for the two classes $\mathcal{R} \label{fig:grammar} \end{figure} In turn, this grammar is translated into a Boltzmann sampler $\Gamma \underline{\mathcal{K}}(z,w)$ for the class $\underline{\mathcal{K}}$ using the sampling rules given in Figure~\ref{table:rules}, similarly as we have done for the (simpler) class of complete binary trees in Example~1. \subsubsection{Boltzmann sampler for bicolored binary trees}\label{sec:Ksamp} We describe in this section a Boltzmann sampler $\Gamma \mathcal{K}(z,w)$ for asymmetric bicolored binary trees, which is derived from the Boltzmann sampler $\Gamma\underline{\mathcal{K}}(x,y)$ described in the previous section. Observe that each \emph{asymmetric} binary tree in $\mathcal{K}_{n,m}$ gives rise to $m$ rooted binary trees in $\underline{\mathcal{K}}_{n,m-1}$, as each of the $m$ leaves, which are \emph{distinguishable}, might be chosen to be discarded from the set of U-atoms. Hence, each object of $\mathcal{K}_{n,m}$ has probability $\underline{K}(z,w)^{-1}mz^n/n!y^{m-1}$ to be chosen when calling $\Gamma\underline{\mathcal{K}}(z,w)$ and taking the distinguished atom back into the set of U-atoms. Hence, from the rejection lemma (Lemma~\ref{lemma:rej}), the sampler \begin{center} \begin{tabular}{l} repeat $\gamma\leftarrow\Gamma\underline{\mathcal{K}}(z,w)$;\\ \hspace{.2cm}take the distinguished U-atom back into the set of U-atoms;\\ \hspace{.2cm}\{so $||\gamma||$ increases by $1$ and now $\gamma\in\mathcal{K}$\}\\ until $\mathrm{Bern}\left(\frac{2}{||\gamma||}\right)$;\\ return $\gamma$ \end{tabular} \end{center} is a Boltzmann sampler for $\mathcal{K}$. However, this sampler is not efficient enough, as it uses a massive amount of rejection to draw a tree of large size. Instead, we use an early-abort rejection algorithm, which allows us to ``simulate" the rejection step all along the generation, thus making it possible to reject before the entire object is generated. We find it more convenient to use the number of nodes, instead of leaves, as the parameter for rejection (the subtle advantage is that the generation process $\Gamma\underline{\mathcal{K}}(z,w)$ builds the tree node by node). Notice that the number of leaves in an unrooted binary tree $\gamma$ is equal to $2+N(\gamma)$, with $N(\gamma)$ the number of nodes of $\gamma$. Hence, the rejection step in the sampler above can be replaced by a Bernoulli choice with parameter $2/(N(\gamma)+2)$. We now give the early-abort algorithm, which repeats calling $\Gamma\underline{\mathcal{K}}(z,w)$ while using a global counter $N$ that records the number of nodes of the tree under construction. \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{K}(z,w)$:$\!\!$& repeat \\ &\hspace{0.2cm}$N:=0$; \{counter for nodes\}\\ &\hspace{0.2cm}Call $\Gamma\underline{\mathcal{K}}(z,w)$\\ &\hspace{0.2cm}each time a node is built do\\ &\hspace{0.4cm}$N:=N+1$;\\ & \hspace{0.4cm}if $\mathrm{Bern}((N+1)/(N+2))$ continue;\\ &\hspace{0.4cm}otherwise reject and restart from the first line; od\\ &until the generation finishes;\\ &return the object generated by $\Gamma\underline{\mathcal{K}}(z,w)$\\ &(taking the distinguished leaf back into the set of U-atoms) \end{tabular} } \begin{lemma}\label{lem:BoltzK} The algorithm $\Gamma \mathcal{K}(z,w)$ is a Boltzmann sampler for the class $\mathcal{K}$ of asymmetric bicolored binary trees. \end{lemma} \begin{proof} At each attempt, the call to $\Gamma\underline{\mathcal{K}}(z,w)$ would output a rooted binary tree $\gamma$ if there was no early interruption. Clearly, the probability that the generation of $\gamma$ finishes without interruption is $\prod_{i=1}^{N(\gamma)}(i+1)/(i+2)=2/(N(\gamma)+2)$. Hence, each attempt is equivalent to doing\\ \centerline{$\gamma\leftarrow\Gamma\underline{\mathcal{K}}(z,w)$; if $\mathrm{Bern}\left(\frac{2}{N(\gamma)+2}\right)$ return $\gamma$ else reject;}\\ Thus, the algorithm $\Gamma \mathcal{K}(z,w)$ is equivalent to the algorithm given in the discussion preceding Lemma~\ref{lem:BoltzK}, hence $\Gamma \mathcal{K}(z,w)$ is a Boltzmann sampler for the family $\mathcal{K}$. \end{proof} \subsubsection{Boltzmann sampler for irreducible dissections}\label{sec:sampI} As stated in Proposition~\ref{prop:bijbin3conn}, the closure-mapping realises a combinatorial isomorphism between asymmetric bicolored binary trees (class $\mathcal{K}$) and asymmetric bicolored irreducible dissections (class $\mathcal{I}$). Hence, the algorithm \fbox{\begin{tabular}{ll} $\Gamma \mathcal{I}(z,w)$:$\!\!$& $\tau\leftarrow \Gamma \mathcal{K}(z,w)$;\\ & return $\mathrm{closure}(\tau)$ \end{tabular}} \noindent is a Boltzmann sampler for $\mathcal{I}$. In turn this easily yields a Boltzmann sampler for the corresponding rooted class $\mathcal{J}$. Precisely, starting from an \emph{asymmetric} bicolored irreducible dissection, each of the 3 outer black vertices, which are \emph{distinguishable}, might be chosen as the root-vertex in order to obtain a rooted irreducible dissection. Moreover the sets of L-atoms and U-atoms are slightly different for the classes $\mathcal{I}$ and $\mathcal{J}$; indeed, a rooted dissection has one more L-atom (the black vertex following the root-vertex in cw order around the outer face) and one more U-atom (all faces are U-atoms in $\mathcal{J}$, whereas only the inner faces are U-atoms in $\mathcal{I}$)\footnote{We have chosen to specify the sets of L-atoms and U-atoms in this way in order to state the isomorphisms $\mathcal{K}\simeq\mathcal{I}$ and $\mathcal{J}a\simeq\overrightarrow{\mathcal{M}_3}$.}. This yields the identity \begin{equation} \mathcal{J}= 3\star\mathcal{Z}L\star\mathcal{Z}U\star\mathcal{I}, \end{equation} which directly yields (by the sampling rules of Figure~\ref{table:rules}) a Boltzmann sampler $\Gamma\mathcal{J}(z,w)$ for $\mathcal{J}$ from the Boltzmann sampler $\Gamma \mathcal{I}(z,w)$. Finally, we obtain a Boltzmann sampler for rooted admissible dissections by a simple rejection procedure \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{J}a(z,w)$:$\!\!$& repeat $\delta\leftarrow\Gamma \mathcal{J}(z,w)$ until $\delta\in\mathcal{J}a$;\\ & return $\delta$ \end{tabular} } \subsubsection{Boltzmann sampler for rooted 3-connected maps} The Boltzmann sampler for rooted irreducible dissections and the primal-map construction yield the following sampler for rooted 3-connected maps: \fbox{ \begin{tabular}{ll} $\Gamma \overrightarrow{\mathcal{M}_3}(z,w)$:$\!\!$& $\delta\leftarrow\Gamma \mathcal{J}a(z,w)$;\\ & return $\mathrm{Primal}(\delta)$ \end{tabular} } \noindent where $\mathrm{Primal}(\delta)$ is the rooted 3-connected map associated to $\delta$ (see Section~\ref{sec:primal_map}). \subsubsection{Boltzmann sampler for edge-rooted 3-connected planar graphs} To conclude, the Boltzmann sampler $\Gamma \overrightarrow{\mathcal{M}_3}(z,w)$ yields a Boltzmann sampler $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$ for edge-rooted 3-connected planar graphs, according to the isomorphism (Whitney's theorem) $\overrightarrow{\mathcal{M}t}\simeq 2\star\overrightarrow{\mathcal{G}t}$, \fbox{ \begin{tabular}{ll} $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$:$\!\!$& return $\Gamma \overrightarrow{\mathcal{M}_3}(z,w)$ (forgetting the embedding) \end{tabular} } \subsection{Boltzmann sampler for 2-connected planar graphs} \label{sec:2conn3conn} The next step is to realise a Boltzmann sampler for 2-connected planar graphs from the Boltzmann sampler for edge-rooted 3-connected planar graphs obtained in Section~\ref{sec:bolz3conn}. Precisely, we first describe a Boltzmann sampler for the class $\overrightarrow{\mathcal{G}b}$ of edge-rooted 2-connected planar graphs, and subsequently obtain, by using rejection techniques, a Boltzmann sampler for the class $\mathcal{G}bp$ of derived 2-connected planar graphs (having a Boltzmann sampler for $\mathcal{G}bp$ allows us to go subsequently to connected planar graphs). To generate edge-rooted 2-connected planar graphs, we use a well-known decomposition, due to Trakhtenbrot~\cite{trak}, which ensures that an edge-rooted 2-connected planar graph can be assembled from edge-rooted 3-connected planar components. This decomposition deals with so-called \emph{networks} (following the terminology of Walsh~\cite{Wa}), where a network is defined as a connected graph $N$ with two distinguished vertices $0$ and $\infty$ called \emph{poles}, such that the graph $N^*$ obtained by adding an edge between $0$ and $\infty$ is a 2-connected planar graph. Accordingly, we refer to Trakhtenbrot's decomposition as the \emph{network decomposition}. Notice that networks are closely related to edge-rooted 2-connected planar graphs, though not completely equivalent (see Equation~\eqref{eq:DB} below for the precise relation). We rely on~\cite{Wa} for the description of the network decomposition. A \emph{series-network} or $s$-network is a network made of at least 2 networks connected \emph{in chain} at their poles, the $\infty$-pole of a network coinciding with the $0$-pole of the following network in the chain. A \emph{parallel network} or $p$-network is a network made of at least 2 networks connected \emph{in parallel}, so that their respective $\infty$-poles and $0$-poles coincide. A \emph{pseudo-brick} is a network $N$ whose poles are not adjacent and such that $N^*$ is a 3-connected planar graph with at least 4 vertices. A \emph{polyhedral network} or $h$-network is a network obtained by taking a pseudo-brick and substituting each edge $e$ of the pseudo-brick by a network $N_e$ (polyhedral networks establish a link between 2-connected and 3-connected planar graphs). \begin{proposition}[Trakhtenbrot] \label{prop:trak} Networks with at least 2 edges are partitioned into $s$-networks, $p$-networks and $h$-networks. \end{proposition} Let us explain how to obtain a recursive decomposition involving the different families of networks. (We simply adapt the decomposition formalised by Walsh~\cite{Wa} so as to have only positive signs.) Let $\mathcal{D}$, $\mathcal{S}$, $\mathcal{P}$, and $\mathcal{H}$ be respectively the classes of networks, $s$-networks, $p$-networks, and $h$-networks, where the L-atoms are the vertices except the two poles, and the U-atoms are the edges. In particular, $\mathcal{Z}U$ stands here for the class containing the link-graph as only object, i.e., the graph with one edge connecting the two poles. Proposition~\ref{prop:trak} ensures that $$ \mathcal{D}=\mathcal{Z}U+\mathcal{S}+\mathcal{P}+\mathcal{H}. $$ An $s$-network can be uniquely decomposed into a non-$s$-network (the head of the chain) followed by a network (the trail of the chain), which yields $$ \mathcal{S}=(\mathcal{Z}U+\mathcal{P}+\mathcal{H})\star\mathcal{Z}L\star\mathcal{D}. $$ A $p$-network has a unique \emph{maximal} parallel decomposition into a collection of at least two components that are not $p$-networks. Observe that we consider here graphs without multiple edges, so that at most one of these components is an edge. Whether there is one or no such edge-component yields $$ \mathcal{P}=\mathcal{Z}U\star\Set_{\geq 1}(\mathcal{S}+\mathcal{H})+\Set_{\geq 2}(\mathcal{S}+\mathcal{H}). $$ By definition, the class of $h$-networks corresponds to a U-substitution of networks in pseudo-bricks; and pseudo-bricks are exactly edge-rooted 3-connected planar graphs. As a consequence (recall that $\mathcal{G}t$ stands for the family of 3-connected planar graphs), $$ \mathcal{H}=\overrightarrow{\mathcal{G}t}\circ_U\mathcal{D}. $$ To sum up, we have the following grammar corresponding to the decomposition of networks into edge-rooted 3-connected planar graphs: \includegraphics[width=10cm]{Figures/grammar_N} Using the sampling rules (Figure~\ref{table:rules}), the decomposition grammar (N) is directly translated into a Boltzmann sampler $\Gamma \mathcal{D}(z,y)$ for networks, as given in Figure~\ref{fig:samp_networks}. A network generated by $\Gamma \mathcal{D}(z,y)$ is made of a series-parallel backbone $\beta$ (resulting from the branching structures of the calls to $\Gamma\mathcal{S}(z,y)$ and $\Gamma\mathcal{P}(z,y)$) and a collection of rooted 3-connected planar graphs that are attached at edges of $\beta$; clearly all these 3-connected components are obtained from independent calls to the Boltzmann sampler $\Gamma\mathcal{G}tr(z,w)$, with $w=D(z,y)$. \begin{figure} \caption{Boltzmann samplers for networks. All generating functions are assumed to be evaluated at $(z,y)$, i.e., $D:=D(z,y)$, $S:=S(z,y)$, $P:=P(z,y)$, and $H:=H(z,y)$.} \label{fig:samp_networks} \end{figure} The only terminal nodes of the decomposition grammar are the classes $\mathcal{Z}L$, $\mathcal{Z}U$ (which are explicit), and the class $\overrightarrow{\mathcal{G}t}$. Thus, the sampler $\Gamma \mathcal{D}(z,y)$ and the auxiliary samplers $\Gamma \mathcal{S}(z,y)$, $\Gamma \mathcal{P}(z,y)$, and $\Gamma \mathcal{H}(z,y)$ are recursively specified in terms of $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$, where $w$ and $z$ are linked by $w=D(z,y)$. Observe that each edge-rooted 2-connected planar graph different from the link-graph gives rise to two networks, obtained respectively by keeping or deleting the root-edge. This yields the identity \begin{equation} \label{eq:DB} (1+\mathcal{Z}U)\star\overrightarrow{\mathcal{G}b}=(1+\mathcal{D}). \end{equation} From that point, a Boltzmann sampler is easily obtained for the family $\overrightarrow{\mathcal{G}b}$ of edge-rooted 2-connected planar graphs. Define a procedure \textsc{AddRootEdge} that adds an edge connecting the two poles $0$ and $\infty$ of a network if they are not already adjacent, and roots the obtained graph at the edge $(0,\infty)$ directed from $0$ to $\infty$. The following sampler for $\overrightarrow{\mathcal{G}b}$ is the counterpart of Equation~(\ref{eq:DB}). \fbox{ \begin{tabular}{rl} $\Gamma (1+\mathcal{D})(z,y)$: & $\!\!\!$ if $\mathrm{Bern}\left(\frac{1}{1+D(z,y)}\right)$ return the link-graph else return $\Gamma \mathcal{D}(z,y)$; \\ $\Gamma \overrightarrow{\mathcal{G}_2}(z,y)$: & $\!\!\!$ $\gamma \leftarrow \Gamma (1+\mathcal{D})(z,y)$; \textsc{AddRootEdge}($\gamma$); return $\gamma$ \end{tabular} } \begin{lemma}\label{lem:netto2conn} The algorithm $\Gamma \overrightarrow{\mathcal{G}_2}(z,y)$ is a Boltzmann sampler for the class $\overrightarrow{\mathcal{G}b}$ of edge-rooted 2-connected planar graphs. \end{lemma} \begin{proof} Firstly, observe that $\Gamma \overrightarrow{\mathcal{G}_2}(z,y)$ outputs the link-graph either if the initial Bernoulli choice $X$ is 0, or if $X=1$ and the sampler $\Gamma\mathcal{D}(z,y)$ picks up the link-graph. Hence the link-graph is returned with probability $(1+y)/(1+D(z,y))$, i.e., with probability $1/\overrightarrow{G_2}(z,y)$. Apart from the link-graph, each graph $\gamma\in\overrightarrow{\mathcal{G}b}$ appears twice in the class $\mathcal{E}:=1+\mathcal{D}$: once in $\mathcal{E}_{|\gamma|,||\gamma||+1}$ (keeping the root-edge) and once in $\mathcal{E}_{|\gamma|,||\gamma||}$ (deleting the root-edge). Therefore, $\gamma$ has probability $E(z,y)^{-1}z^{|\gamma|}/|\gamma|!(y^{||\gamma||+1}+y^{||\gamma||})$ of being drawn by $\Gamma \overrightarrow{\mathcal{G}_2}(z,y)$, where $E(z,y)=1+D(z,y)$ is the series of $\mathcal{E}$. This probability simplifies to $z^{|\gamma|}/|\gamma|!y^{||\gamma||}/\overrightarrow{G_2}(z,y)$. Hence, $\Gamma \overrightarrow{\mathcal{G}_2}(z,y)$ is a Boltzmann sampler for the class $\overrightarrow{\mathcal{G}b}$. \end{proof} The last step is to obtain a Boltzmann sampler for derived 2-connected planar graphs (i.e., with a distinguished vertex that is not labelled and does not count for the L-size) from the Boltzmann sampler for edge-rooted 2-connected planar graphs (as we will see in Section~\ref{sec:conn2conn}, derived 2-connected planar graphs constitute the blocks to construct connected planar graphs). We proceed in two steps. Firstly, we obtain a Boltzmann sampler for the U-derived class $\underline{\mathcal{G}b}$ (i.e., with a distinguished undirected edge that does not count in the U-size). Note that $\mathcal{F}:=2\star\underline{\mathcal{G}b}$ satisfies $\mathcal{F}=\mathcal{Z}L\ \!\!\!^2\star\overrightarrow{\mathcal{G}b}$. Hence, $\Gamma\overrightarrow{\mathcal{G}b}(z,y)$ directly yields a Boltzmann sampler $\Gamma\mathcal{F}(z,y)$ (see the sampling rules in Figure~\ref{table:rules}). Since $\mathcal{F}=2\star\underline{\mathcal{G}b}$, a Boltzmann sampler for $\underline{\mathcal{G}b}$ is obtained by calling $\Gamma\mathcal{F}(z,y)$ and then forgetting the direction of the root. Secondly, once we have a Boltzmann sampler $\Gamma\underline{\mathcal{G}_2}(z,y)$ for the U-derived class $\underline{\mathcal{G}_2}$, we just have to apply the procedure \UtoL (described in Section~\ref{sec:reject}) to the class $\mathcal{G}_2$ in order to obtain a Boltzmann sampler $\Gamma \mathcal{G}bp(z,y)$ for the L-derived class $\mathcal{G}bp$. The procedure \UtoL can be successfully applied, because the ratio vertices/edges is bounded. Indeed, each connected graph $\gamma$ satisfies $|\gamma|\leq ||\gamma||+1$, which easily yields $\alpha_{L/U}=2$ for the class $\mathcal{G}_2$ (attained by the link-graph). \subsection{Boltzmann sampler for connected planar graphs} \label{sec:conn2conn} Another well known graph decomposition, called the \emph{block-decomposition}, ensures that a connected graph can be decomposed into 2-connected components. We take advantage of this decomposition in order to specify a Boltzmann sampler for derived connected planar graphs from the Boltzmann sampler for derived 2-connected planar graphs obtained in the last section. Then, a further rejection step yields a Boltzmann sampler for connected planar graphs. The \emph{block-decomposition} (see~\cite[p.10]{Ha} for a detailed description) ensures that each derived connected planar graph can be uniquely constructed in the following way: take a set of derived 2-connected planar graphs and attach them together, by merging their marked vertices into a unique marked vertex. Then, for each unmarked vertex $v$ of each 2-connected component, take a derived connected planar graph $\gamma_v$ and merge the marked vertex of $\gamma_v$ with $v$ (this operation corresponds to an L-substitution). The block-decomposition gives rise to the following identity relating the classes $\mathcal{G}cp$ and $\mathcal{G}bp$: \begin{equation} \label{eq:2conn} \mathcal{G}cp=\Set\left(\mathcal{G}bp\circ_L(\mathcal{Z}L\star\mathcal{G}cp)\right). \end{equation} This is directly translated into the following Boltzmann sampler for $\mathcal{G}cp$ using the sampling rules of Figure~\ref{table:rules}. (Notice that the 2-connected blocks of a connected graph are built independently, each block resulting from a call to the Boltzmann sampler $\Gamma \mathcal{G}bp(z,y)$, where $z=xG_1\ \!\!\!'(x,y)$.) \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{G}cp(x,y)$:&$k\leftarrow \Pois (G_2\ \!\!\!'(z,y));\ \ [\mathrm{with}\ z=xG_1\ \!\!\!'(x,y)]$\\ & $\gamma\leftarrow (\Gamma \mathcal{G}bp(z,y),\ldots,\Gamma \mathcal{G}bp(z,y))$; \{$k$ independent calls\} \\ & merge the $k$ components of $\gamma$ at their marked vertices;\\ &for each unmarked vertex $v$ of $\gamma$ do\\ & $\ \ \ \ \ $ $\gamma_v\leftarrow \Gamma \mathcal{G}cp(x,y)$;\\ & $\ \ \ \ \ $ merge the marked vertex of $\gamma_v$ with $v$\\ & od;\\ &return $\gamma$. \end{tabular} } Then, a Boltzmann sampler for connected planar graphs is simply obtained from $\Gamma \mathcal{G}cp(x,y)$ by using a rejection step so as to adjust the probability distribution: \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{G}_1(x,y)$:& repeat $\gamma\leftarrow \Gamma \mathcal{G}cp(x,y)$\\ & \phantom{1}\hspace{.2cm}take the marked vertex $v$ back to the set of L-atoms;\\ & \phantom{1}\hspace{.2cm}(if we consider the labels, $v$ receives label $|\gamma|+1$)\\ & \phantom{1}\hspace{.2cm}\{this makes $|\gamma|$ increase by $1$, and $\gamma\in\mathcal{G}_1$\}\\ & until $\displaystyle\mathrm{Bern}\left(\frac{1}{|\gamma|}\right)$;\\ & return $\gamma$ \end{tabular} } \begin{lemma} \label{lemma:connconnpoint} The sampler $\Gamma \mathcal{G}_1(x,y)$ is a Boltzmann sampler for connected planar graphs. \end{lemma} \begin{proof} The proof is similar to the proof of Lemma~\ref{lem:LtoU}. Due to the general property that $\mathcal{C}_{n,m}$ identifies to $\mathcal{C}'_{n-1,m}$, the sampler delimited inside the repeat/until loop draws each object $\gamma\in\mathcal{G}_1$ with probability $G_1\ \!\!\!'(x,y)^{-1}\frac{x^{|\gamma|-1}}{(|\gamma|-1)!}y^{||\gamma||}$, i.e., with probability proportional to $|\gamma|\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$. Hence, according to Lemma~\ref{lemma:rej}, the sampler $\Gamma \mathcal{G}_1(x,w)$ draws each object $\gamma\in\mathcal{G}_1$ with probability proportional to $\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}$, i.e., is a Boltzmann sampler for $\mathcal{G}_1$. \end{proof} \subsection{Boltzmann sampler for planar graphs} \label{sec:planconn} A planar graph is classically decomposed into the set of its connected components, yielding \begin{equation} \label{eq:CtoG} \mathcal{G}=\Set(\mathcal{G}c), \end{equation} which translates to the following Boltzmann sampler for the class $\mathcal{G}$ of planar graphs (the Set construction gives rise to a Poisson law, see Figure~\ref{table:rules}): \fbox{\begin{tabular}{ll} $\Gamma \mathcal{G}(x,y)$:& $k\leftarrow\Pois(G_1(x,y))$;\\ & return $(\Gamma \mathcal{G}_1(x,y),\ldots,\Gamma \mathcal{G}_1(x,y))$ \{k independent calls\} \end{tabular}} \begin{proposition} \label{lemma:planconn} The procedure $\Gamma \mathcal{G}(x,y)$ is a Boltzmann sampler for planar graphs. \end{proposition} \section{Deriving an efficient sampler} \label{sec:efficient} We have completely described in Section~\ref{sec:decomp} a mixed Boltzmann sampler $\Gamma \mathcal{G}(x,y)$ for planar graphs. This sampler yields an exact-size uniform sampler and an approximate-size uniform sampler for planar graphs: to sample at size $n$, call the sampler $\Gamma \mathcal{G}(x,1)$ until the graph generated has size $n$; to sample in a range of sizes $[n(1-\epsilon),n(1+\epsilon)]$, call the sampler $\Gamma \mathcal{G}(x,1)$ until the graph generated has size in the range. These targetted samplers can be shown to have expected polynomial complexity, of order $n^{5/2}$ for approximate-size sampling and $n^{7/2}$ for exact-size sampling (we omit the proof since we will describe more efficient samplers in this section). However, more is needed to achieve the complexity stated in Theorem~\ref{theo:planarsamp1}, i.e., $O(n/\epsilon)$ for approximate-size sampling and $O(n^2)$ for exact-size sampling. The main problem of the sampler $\Gamma \mathcal{G}(x,1)$ is that the typical size of a graph generated is small, so that the number of attempts to reach a large target size is prohibitive. In order to correct this effect, we design in this section a Boltzmann sampler for ``bi-derived" planar graphs, which are equivalent to bi-pointed planar graphs, i.e., with 2 distinguished vertices\footnote{In an earlier version of the article and in the conference version~\cite{Fu05a}, we derived 3 times---as prescribed by~\cite{DuFlLoSc04}---in order to get a singularity type $(1-x/\rho)^{-1/2}$ (efficient targetted samplers are obtained when taking $x=\rho(1-1/(2n))$). We have recently discovered that deriving 2 times (which yields a square-root singularity type $(1-x/\rho)^{1/2}$) and taking again $x=\rho(1-1/(2n))$ yields the same complexities for the targetted samplers, with the advantage that the description and analysis is significantly simpler (in the original article~\cite{DuFlLoSc04}, they prescribe to take $x=\rho$ and to use some early abort techniques for square-root singularity type, but it seems difficult to analyse the gain due to early abortion here, since the Boltzmann sampler for planar graphs makes use of rejection techniques). }. The intuition is that a Boltzmann sampler for bi-pointed planar graphs gives more weight to large graphs, because a graph of size $n$ gives rise to $n(n-1)$ bi-pointed graphs. Hence, the probability of reaching a large size is better (upon choosing suitably the value of the Boltzmann parameter). The fact that the graphs have to be pointed 2 times is due to the specific asymptotic behaviour of the coefficients counting planar graphs, which has been recently analysed by Gim\'enez and Noy~\cite{gimeneznoy}. \subsection{Targetted samplers for classes with square-root singularities.} As we describe here, a mixed class $\mathcal{C}$ with a certain type of singularities (square-root type) gives rise to efficient approximate-size and exact-size samplers, provided $\mathcal{C}$ has a Boltzmann sampler such that the expected cost of generation is of the same order as the expected size of the object generated. \begin{definition} Given a mixed class $\mathcal{C}$, we define a \emph{singular point} of $\mathcal{C}$ as a pair $x_0>0$, $y_0>0$ such that the function $x\mapsto C(x,y_0)$ has a dominant singularity at $x_0$ (the radius of convergence is $x_0$). \end{definition} \begin{definition}\label{def:alpha_sing} For $\alpha\in\mathbb{R}\backslash\mathbb{Z}_{\geq 0}$, a mixed class $\mathcal{C}$ is called $\alpha$-singular if, for each singular point $(x_0,y_0)$ of $\mathcal{C}$, the function $x\mapsto C(x,y_0)$ has a unique dominant singularity at $x_0$ (i.e., $x_0$ is the unique singularity on the circle $|z|=x_0$) and admits a singular expansion of the form $$ C(x,y_0)=P(x)+c_{\alpha}\cdot\left( x_0-x \right)^{\alpha}+o\left( (x_0-x )^{\alpha}\right), $$ where $c_{\alpha}$ is a constant, $P(x)$ is rational with no poles in the disk $|z|\leq x_0$, and where the expansion holds in a so-called $\Delta$-neighbourhood of $x_0$, see~\cite{fla,flaod}. In the special case $\alpha=1/2$, the class is said to have square-root singularities. \end{definition} \begin{lemma}\label{lem:square} Let $\mathcal{C}$ be a mixed class with square-root singularities, and endowed with a Boltzmann sampler $\Gamma\mathcal{C}(x,y)$. Let $(x_0,y_0)$ be a singular point of $\mathcal{C}$. For any $n> 0$, define $$ x_n:=\big(1-\tfrac{1}{2n}\big)\cdot x_0. $$ Call $\pi_n$ ($\pi_{n,\epsilon}$, resp.) the probability that an object $\gamma$ generated by $\Gamma \mathcal{C}(x_n,y_0)$ satisfies $|\gamma|=n$ ($|\gamma|\in I_{n,\epsilon}:=[n(1-\epsilon),n(1+\epsilon)]$, resp.); and call $\sigma_n$ the expected size of the output of $\Gamma \mathcal{C}(x_n,y_0)$. Then $1/\pi_n$ is $O(n^{3/2})$, $1/\pi_{n,\epsilon}$ is $O(n^{1/2}/\epsilon)$, and $\sigma_n$ is $O(n^{1/2})$. \end{lemma} \begin{proof} The so-called transfer theorems of singularity analysis~\cite{flaod} ensure that the coefficient $a_n:=[x^n]C(x,y_0)$ satisfies, as $n\to\infty$, $a_n\mathop{\sim}_{n\to\infty}c\ \!x_0^{-n}n^{-3/2}$, where $c$ is a positive constant. This easily yields the asymptotic bounds for $1/\pi_n$ and $1/\pi_{n,\epsilon}$, using the expressions $\pi_n=a_nx_n\ \!\!\!^n/C(x_n,y_0)$ and $\pi_{n,\epsilon}=\sum_{k\in I_{n,\epsilon}}a_kx_n\ \!\!\!^k/C(x_n,y_0)$. It is also an easy exercise to find the asymptotics of $\sigma_n$, using the formula (given in~\cite{DuFlLoSc04}) $\sigma_n=x_n\cdot\partial_x C(x_n,y_0)/C(x_n,y_0)$. \end{proof} Lemma~\ref{lem:square} suggests the following simple heuristic to obtain efficient targetted samplers. For approximate-size sampling (exact-size sampling, resp.), repeat calling $\Gamma \mathcal{C}(x_n,1)$ until the size of the object is in $I_{n,\epsilon}$ (is exactly $n$, resp.). (The parameter $y$ is useful if a target U-size $m$ is also given, as we will see for planar graphs in Section~\ref{sec:sample_edges}.) The complexity of sampling will be good for a class $\mathcal{C}$ that has square-root singularities and that has an efficient Boltzmann sampler. Indeed, for approximate-size sampling, the number of attempts to reach the target-domain $I_{n,\epsilon}$ (i.e., $\pi_{n,\epsilon}^{-1}$) is of order $n^{1/2}$, and for exact-size sampling, the number of attempts to reach the size $n$ (i.e., $\pi_{n}^{-1}$) is of order $n^{3/2}$. If $\mathcal{C}$ is endowed with a Boltzmann sampler $\Gamma \mathcal{C}(x,y)$ such that the expected complexity of sampling at $(x_n,y_0)$ is of order $\sqrt{n}$ (same order as the expected size $\sigma_n$), then the expected complexity is typically $O(n/\epsilon)$ for approximate-size sampling and $O(n^2)$ for exact-size sampling, as we will see for planar graphs. Let us mention that the original article~\cite{DuFlLoSc04} uses a different heuristic. The targetted samplers also repeat calling the Boltzmann sampler until the size of the object is in the target domain, but the parameter $x$ is chosen to be \emph{exactly} at the singularity $\rho$. The second difference is that, at each attempt, the generation is interrupted if the size of the object goes beyond the target domain. We prefer to use the simple heuristic discussed above, which does not require early interruption techniques. In this way the samplers are easier to describe and to analyse. In order to apply these techniques to planar graphs, we have to derive two times the class of planar graphs, as indicated by the following two lemmas. \begin{lemma}[\cite{fla}] If a class $\mathcal{C}$ is $\alpha$-singular, then the class $\mathcal{C}'$ is $(\alpha-1)$-singular (by the effect of derivation). \end{lemma} \begin{lemma}[\cite{gimeneznoy}]\label{lem:bi_der} The class $\mathcal{G}$ of planar graphs is $5/2$-singular, hence the class $\mathcal{G}''$ of bi-derived planar graphs has square-root singularities. \end{lemma} \subsection{Derivation rules for Boltzmann samplers} As suggested by Lemma~\ref{lem:square} and Lemma~\ref{lem:bi_der}, we will get good targetted samplers for planar graphs if we can describe an efficient Boltzmann sampler for the class $\mathcal{G}''$ of bi-derived planar graphs (a graph in $\mathcal{G}''$ has two unlabelled vertices that are marked specifically, say the first one is marked $\ast$ and the second one is marked $\star$). Our Boltzmann sampler $\Gamma \mathcal{G}''(x,y)$ ---to be presented in this section--- makes use of the decomposition of planar graphs into 3-connected components which we have already successfully used to obtain a Boltzmann sampler for planar graphs in Section~\ref{sec:decomp}. This decomposition can be formally translated into a decomposition grammar (with additional unpointing/pointing operations). To obtain a Boltzmann sampler for bi-derived planar graphs instead of planar graphs, the idea is simply to \emph{derive} this grammar 2 times. As we explain here and as is well known in general, a decomposition grammar can be derived automatically. (In our framework, a decomposition grammar involves the 5 constructions $\{+,\star,\Set_{\geq d},\circ_{L},\circ_{U}\}$.) \begin{proposition}[derivation rules]\label{prop:der_rules} The basic finite classes satisfy $$ (\mathbf{1})'=0,\ \ \ (\mathcal{Z}_L)'=1,\ \ \ (\mathcal{Z}_U)'=0. $$ The 5 constructions satisfy the following derivation rules: \begin{equation} \left\{ \begin{array}{rcl} (\mathcal{A}+\mathcal{B})'&=&\mathcal{A}'+\mathcal{B}',\\ (\mathcal{A}\star\mathcal{B})'&=&\mathcal{A}'\star\mathcal{B}+\mathcal{A}\star\mathcal{B}',\\ (\Set_{\geq d}(\mathcal{B}))'&=&\mathcal{B}'\star\Set_{\geq d-1}(\mathcal{B})\ \mathrm{for}\ d\geq 0,\ \ \ \mathrm{(with}\ \Set_{\geq -1}=\Set)\\ (\mathcal{A}\circ_L\mathcal{B})'&=&\mathcal{B}'\star(\mathcal{A}'\circ_L\mathcal{B}),\\ (\mathcal{A}\circ_U\mathcal{B})'&=&\mathcal{A}'\circ_U\mathcal{B}+\mathcal{B}'\star(\underline{\mathcal{A}}\circ_U\mathcal{B}). \end{array} \right. \end{equation} \end{proposition} \begin{proof} The derivation formulas for basic classes are trivial. The proof of the derivation rules for $\{+,\star,\circ_L\}$ are given in~\cite{BeLaLe}. Notice that the rule for $\Set_{\geq d}$ follows from the rule for $\circ_L$. (Indeed, $\Set_{\geq d}(\mathcal{B})=\mathcal{A}\circ_{L}\mathcal{B}$, where $\mathcal{A}=\Set_{\geq d}(\mathcal{Z}L)$, which clearly satisfies $\mathcal{A}'=\Set_{\geq d-1}(\mathcal{Z}L)$.) Finally, the proof of the rule for $\circ_U$ uses similar arguments as the proof of the rule for $\circ_L$. In an object of $(\mathcal{A}\circ_U\mathcal{B})'$, the distinguished atom is either on the core-structure (in $\mathcal{A}$), or is in a certain component (in $\mathcal{B}$) that is substituted at a certain U-atom of the core-structure. The first case yields the term $\mathcal{A}'\circ_U\mathcal{B}$, and the second case yields the term $\mathcal{B}'\star(\underline{\mathcal{A}}\circ_U\mathcal{B})$. \end{proof} According to Proposition~\ref{prop:der_rules}, it is completely automatic to find a decomposition grammar for a derived class $\mathcal{C}'$ if we are given a decomposition grammar for $\mathcal{C}$. \subsection{Boltzmann sampler for bi-derived planar graphs} We present in this section our Boltzmann sampler $\Gamma \mathcal{G}''(x,y)$ for bi-derived planar graphs, with a quite similar approach to the one adopted in Section~\ref{sec:decomp}, and again a bottom-to-top presentation. At first the closure-mapping allows us to obtain Boltzmann samplers for 3-connected planar graphs marked in various ways. Then we go from 3-connected to bi-derived planar graphs via networks, bi-derived 2-connected, and bi-derived connected planar graphs. The complete scheme is illustrated in Figure~\ref{fig:scheme_bi_derived}, which is the counterpart of Figure~\ref{fig:scheme_unrooted}. \begin{figure} \caption{The complete scheme to obtain a Boltzmann sampler for bi-derived planar graphs.} \label{fig:scheme_bi_derived} \end{figure} \subsubsection{Boltzmann samplers for derived binary trees.}\label{sec:sampKp} We have already obtained in Section~\ref{sec:boltz_binary_trees} a Boltzmann sampler for the class $\mathcal{K}$ of unrooted asymmetric binary trees. Our purpose here is to derive a Boltzmann sampler for the derived class $\mathcal{K}'$. Recall that we have also described in Section~\ref{sec:boltz_binary_trees} a Boltzmann sampler for the U-derived class $\underline{\mathcal{K}}$, which satisfies the completely recursive decomposition grammar~(\ref{eq:grammar}) (see also Figure~\ref{fig:grammar}). Hence, we have to apply the procedure \UtoL described in Section~\ref{sec:reject} to the class $\mathcal{K}$ in order to obtain a Boltzmann sampler $\Gamma \mathcal{K}'(z,w)$ from $\Gamma \underline{\mathcal{K}}(z,w)$. For this we have to check that $\alpha_{L/U}$ is finite for the class $\mathcal{K}$. It is easily proved that a bicolored binary tree with $m$ leaves has $m-2$ nodes, and that at most $\lfloor 2(m-3)/3\rfloor$ of the nodes are black. In addition, there exist trees with $3i+3$ leaves and $2i$ black nodes (those with all leaves incident to black nodes). Hence, for the class $\mathcal{K}$, the parameter $\alpha_{L/U}$ is equal to $2/3$. Therefore the procedure \UtoL can be applied to the class $\mathcal{K}$. \subsubsection{Boltzmann samplers for derived rooted dissections and 3-connected maps}\label{sec:sampIp} Our next step is to obtain Boltzmann samplers for derived irreducible dissections, in order to go subsequently to 3-connected maps. As expected we take advantage of the closure-mapping. Recall that the closure-mapping realises the isomorphism $\mathcal{K}\simeq\mathcal{I}$ between the class $\mathcal{K}$ of asymmetric binary trees and the class $\mathcal{I}$ of asymmetric irreducible dissections. There is no problem in deriving an isomorphism, so the closure-mapping also realises the isomorphism $\mathcal{K}'\simeq\mathcal{I}'$. Accordingly we have the following Boltzmann sampler for the class $\mathcal{I}'$: \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{I}'(z,w)$:$\!\!$& $\tau\leftarrow\Gamma \mathcal{K}'(z,w)$;\\ & $\delta\leftarrow\mathrm{closure}(\tau)$;\\ & return $\delta$ \end{tabular} } \noindent where the discarded L-atom is the same in $\tau$ and in $\delta$. Then, we easily obtain a Boltzmann sampler for the corresponding \emph{rooted} class $\mathcal{J}'$. Indeed, the equation $\mathcal{J}=3\star\mathcal{Z}_L\star\mathcal{Z}_U\star\mathcal{I}$ that relates $\mathcal{I}$ and $\mathcal{J}$ yields $\mathcal{J}'=3\star\mathcal{Z}_U\star\mathcal{I}+3\star\mathcal{Z}_L\star\mathcal{Z}_U\star\mathcal{I}'$. Hence, using the sampling rules of Figure~\ref{table:rules}, we obtain a Boltzmann sampler $\Gamma \mathcal{J}'(z,w)$ from the Boltzmann samplers $\Gamma \mathcal{I}(z,w)$ and $\Gamma \mathcal{I}'(z,w)$. From that point, we obtain a Boltzmann sampler for the derived rooted dissections that are admissible. As $\mathcal{J}a\subset\mathcal{I}$, we also have $\mathcal{J}a'\subset\mathcal{J}'$, which yields the following Boltzmann sampler for $\mathcal{J}a'$: \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{J}a'(z,w)$:$\!\!$& repeat $\delta\leftarrow\Gamma \mathcal{J}'(z,w)$\\ & until $\delta\in\mathcal{J}a'$;\\ & return $\delta$ \end{tabular} } Finally, using the isomorphism $\mathcal{J}a\simeq\mathcal{M}tr$ (primal map construction, Section~\ref{sec:primal_map}), which yields $\mathcal{J}a'\simeq\mathcal{M}tr'$, we obtain a Boltzmann samplers for derived rooted 3-connected maps: \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{M}tr'(z,w)$:$\!\!$& $\delta\leftarrow\Gamma \mathcal{J}a'(z,w)$;\\ & return $\mathrm{Primal}(\delta)$ \end{tabular} } \noindent where the returned rooted 3-connected map inherits the distinguished L-atom of $\delta$. \subsubsection{Boltzmann samplers for derived rooted 3-connected planar graphs.} \label{sec:derived_3conn} As we have seen in Section~\ref{sec:equiv}, Whitney's theorem states that any 3-connected planar graph has two embeddings on the sphere (which differ by a reflection). Clearly the same property holds for 3-connected planar graphs that have additional marks. (We have already used this observation in Section~\ref{sec:equiv} for rooted graphs, $\mathcal{M}tr\simeq 2\star\mathcal{G}tr$, in order to obtain a Boltzmann sampler for $\mathcal{G}tr$.) Hence $\mathcal{M}tr'\simeq 2\star\mathcal{G}tr'$, which yields the following Boltzmann sampler for $\mathcal{G}tr'$: \fbox{ \begin{tabular}{ll} $\Gamma \mathcal{G}tr'(z,w)$:$\!\!$& return $\Gamma \mathcal{M}tr'(z,w)$;\\ & (forgetting the embedding) \end{tabular} } The next step (in Section~\ref{sec:derived_networks}) is to go to derived networks. This asks for a derivation of the decomposition grammar for networks, which involves not only the classes $\mathcal{G}tr$, $\mathcal{G}tr'$, but also the U-derived class $\underline{\mathcal{G}tr}$. Hence, we also need a Boltzmann sampler for $\underline{\mathcal{G}tr}$. To this aim we just have to apply the procedure \LtoU to the class $\mathcal{G}tr$. By the Euler relation, a 3-connected planar graph with $n$ vertices has at most $3n-6$ edges (equality holds for triangulations). Hence, the parameter $\alpha_{U/L}$ is equal to $3$ for the class $\mathcal{G}tr$, so \LtoU can be successfully applied to $\mathcal{G}tr$, yielding a Boltzmann sampler for $\underline{\mathcal{G}tr}$ from the Boltzmann sampler for $\mathcal{G}tr'$. \subsection{Boltzmann samplers for derived networks.}\label{sec:derived_networks} Following the general scheme shown in Figure~\ref{fig:scheme_bi_derived}, our aim is now to obtain a Boltzmann samplers for the class $\mathcal{D}'$ of derived networks. Recall that the decomposition grammar for $\mathcal{D}$ has allowed us to obtain a Boltzmann sampler for $\mathcal{D}$ from a Boltzmann sampler for $\mathcal{G}tr$. Using the derivation rules (Proposition~\ref{prop:der_rules}) injected in the grammar~(N), we obtain the following decomposition grammar for $\mathcal{D}'$: \noindent\includegraphics[width=11.3cm]{Figures/grammars_D_p} The only terminal classes in this grammar are $\mathcal{G}tr'$ and $\underline{\mathcal{G}tr}$. Hence, the sampling rules of Figure~\ref{table:rules} yield a Boltzmann sampler for $\mathcal{D}'$ from the Boltzmann samplers for $\mathcal{G}tr'$ and $\underline{\mathcal{G}tr}$ which we have obtained in Section~\ref{sec:derived_3conn}. The sampler $\Gamma\mathcal{D}'(z,y)$ looks similar (though with more cases) to the one for $\Gamma\mathcal{D}(z,y)$ given in Figure~\ref{fig:samp_networks}. \subsection{Boltzmann samplers for bi-derived 2-connected planar graphs.}\label{sec:sampDp} The aim of this section is to obtain Boltzmann samplers for the class $\mathcal{G}bp'$ of bi-derived 2-connected planar graphs (after the Boltzmann sampler for $\mathcal{G}bp$ obtained in Section~\ref{sec:2conn3conn}), in order to go subsequently to bi-derived connected planar graphs. At first, the Boltzmann sampler for $\mathcal{D}'$ yields a Boltzmann sampler for the class $\mathcal{G}br'$. Indeed the identity $(1+\mathcal{D})=(1+\mathcal{Z}_U)\star\overrightarrow{\mathcal{G}_2}$ is derived as $\mathcal{D}'=(1+\mathcal{Z}_U)\star\mathcal{G}br'$, which yields the following sampler, \fbox{ \begin{tabular}{ll} $\Gamma \overrightarrow{\mathcal{G}_2}'(z,y)$:$\!\!$& $\gamma\leftarrow\Gamma \mathcal{D}'(z,y)$;\\ & $\textsc{AddRootEdge}(\gamma)$;\\ & return $\gamma$ \end{tabular} } \noindent where \textsc{AddRootEdge} has been defined in Section~\ref{sec:2conn3conn}. The proof that this is a Boltzmann sampler for $\mathcal{G}br'$ is similar to the proof of Lemma~\ref{lem:netto2conn}. Next we describe a Boltzmann sampler for the class $\underline{\mathcal{G}b}'$. As we have seen in Section~\ref{sec:2conn3conn}, $\underline{\mathcal{G}b}$ and $\mathcal{G}br$ are related by the identity $2\star\underline{\mathcal{G}b}=\mathcal{Z}_L\ \!\!\!^2\star\mathcal{G}br$. Hence, if we define $\mathcal{F}:=2\star\underline{\mathcal{G}b}$, we have $\mathcal{F}'=\mathcal{Z}L\ \!\!\!^2\star\mathcal{G}br'+2\star\mathcal{Z}L\star\mathcal{G}br$. Hence, the sampling rules of Figure~\ref{table:rules} yield a Boltzmann sampler $\Gamma \mathcal{F}'(z,y)$ for the class $\mathcal{F}'$. Clearly, as $\mathcal{F}'=2\star\underline{\mathcal{G}b}'$, a Boltzmann sampler for $\underline{\mathcal{G}b}'$ is obtained by calling $\Gamma \mathcal{F}'(z,y)$ and forgetting the direction of the root. Finally, the procedure \UtoL yields (when applied to $\mathcal{G}bp$) from the Boltzmann sampler for $\underline{\mathcal{G}b}'$ to a Boltzmann sampler for $\mathcal{G}bp'$. The procedure can be successfully applied, as the class $\mathcal{G}bp$ satisfies $\alpha_{L/U}=1$ (attained by the link-graph). \subsubsection{Boltzmann sampler for bi-derived connected planar graphs.}\label{sec:sampCp} The block-decomposition makes it easy to obtain a Boltzmann sampler for the class $\mathcal{G}cp'$ of bi-derived connected planar graphs (this decomposition has already allowed us to obtain a Boltzmann sampler for $\mathcal{G}cp$ in Section~\ref{sec:conn2conn}). Recall that the block-decomposition yields the identity $$ \mathcal{G}cp=\Set\left(\mathcal{G}bp\circ_L(\mathcal{Z}_L\star\mathcal{G}cp)\right), $$ which is derived as $$ \mathcal{G}cp'=(\mathcal{G}cp+\mathcal{Z}_L\star\mathcal{G}cp')\star\mathcal{G}bp'\circ_L(\mathcal{Z}_L\star\mathcal{G}cp)\star\mathcal{G}cp. $$ As we already have Boltzmann samplers for the classes $\mathcal{G}bp'$ and $\mathcal{G}cp$, the sampling rules of Figure~\ref{table:rules} yield a Boltzmann sampler $\Gamma \mathcal{G}cp'(x,y)$ for the class $\mathcal{G}cp'$. Observe that the 2-connected blocks of a graph generated by $\Gamma \mathcal{G}cp'(x,y)$ are obtained as independent calls to $\Gamma \mathcal{G}bp(z,y)$ and $\Gamma \mathcal{G}bp'(z,y)$, where $z$ and $x$ are related by the change of variable $z=xG_1\ \!\!\!'(x,y)$. \subsubsection{Boltzmann samplers for bi-derived planar graphs}\label{sec:sampGp} We can now achieve our goal, i.e., obtain a Boltzmann sampler for the class $\mathcal{G}''$ of bi-derived planar graphs. For this purpose, we simply derive twice the identity $$ \mathcal{G}=\Set(\mathcal{G}c), $$ which yields successively the identities $$ \mathcal{G}'=\mathcal{G}cp\star\mathcal{G}, $$ and $$ \mathcal{G}''=\mathcal{G}cp'\star\mathcal{G}+\mathcal{G}cp\star\mathcal{G}'. $$ From the first identity and $\Gamma \mathcal{G}(x,y)$, $\Gamma \mathcal{G}cp(x,y)$, we get a Boltzmann sampler $\Gamma \mathcal{G}'(x,y)$ for the class $\mathcal{G}'$. Then, from the second identity and $\Gamma \mathcal{G}(x,y)$, $\Gamma \mathcal{G}'(x,y)$, $\Gamma \mathcal{G}cp(x,y)$, $\Gamma \mathcal{G}cp'(x,y)$, we get a Boltzmann sampler $\Gamma \mathcal{G}''(x,y)$ for the class $\mathcal{G}''$. \section{The targetted samplers for planar graphs}\label{sec:final_smap} The Boltzmann sampler $\Gamma\mathcal{G}''(x,y)$---when tuned as indicated in Lemma~\ref{lem:square}---yields efficient exact-size and approximate-size random samplers for planar graphs, with the complexities as stated in Theorem~\ref{theo:planarsamp1} and Theorem~\ref{theo:planarsamp2}. Define the algorithm: \begin{tabular}{ll} $\textsc{SamplePlanar}(x,y)$:& $\gamma\leftarrow\Gamma\mathcal{G}''(x,y)$;\\ & give label $|\gamma|+1$ to the vertex marked $\star$\\ & and label $|\gamma|+2$ to the marked vertex $\ast$\\ &(thus $|\gamma|$ increases by $2$, and $\gamma\in\mathcal{G}$);\\ & return $\gamma$ \end{tabular} \subsection{Samplers according to the number of vertices} \label{sec:sample_vertices} Let $\rho_G$ be the radius of convergence of $x\mapsto G(x,1)$. Define $$x_n:=\big(1-\tfrac{1}{2n}\big)\cdot \rho_G.$$ \noindent For $n\geq 1$, the exact-size sampler is $\frak{A}_n$: repeat $\gamma \leftarrow \textsc{SamplePlanar}(x_n,1)$ until $|\gamma|=n$; return~$\gamma$. \noindent For $n\geq 1$ and $\epsilon >0$, the approximate-size sampler is $\frak{A}_{n,\epsilon}$: repeat $\gamma\leftarrow \textsc{SamplePlanar}(x_n,1)$ until $|\gamma|\in [n(1-\epsilon),n(1+\epsilon)]$; return $\gamma$. \subsection{Samplers according to the numbers of vertices and edges} \label{sec:sample_edges} For any $y>0$, we denote by $\rho_G(y)$ the radius of convergence of $x\mapsto G(x,y)$. Let $\mu(y)$ be the function defined as $$ \mu(y):=-y\frac{\mathrm{d}\rho_G}{\mathrm{d}y}(y)/\rho_G(y). $$ As proved in~\cite{gimeneznoy} (using the so-called quasi-power theorem), for a fixed $y>0$, a large graph drawn by the Boltzmann sampler $\Gamma \mathcal{G}''(x,y)$ has a ratio edges/vertices concentrated around the value $\mu(y)$ as $x$ approaches the radius of convergence of $x\mapsto G(x,y)$. This yields a relation between the secondary parameter $y$ and the ratio edges/vertices. If we want a ratio edges/vertices close to a target value $\mu$, we have to choose $y$ so that $\mu(y)=\mu$. It is shown in~\cite{gimeneznoy} that the function $\mu(y)$ is strictly increasing on $(0,+\infty)$, with $\lim \mu(y)=1$ as $y\to 0$ and $\lim \mu(y)=3$ as $y\to +\infty$. As a consequence, $\mu(y)$ has an inverse function $y(\mu)$ defined on $(1,3)$. (In addition, $\mu\mapsto y(\mu)$ can be evaluated with good precision from the analytic equation it satisfies.) We define $$x_n(\mu):=\big(1-\tfrac{1}{2n}\big)\cdot\rho_G(y(\mu)).$$ For $n\geq 1$ and $\mu\in(1,3)$, the exact-size sampler is $\overline{\frak{A}}_{n,\mu}$:$\!$ repeat $\gamma\leftarrow \textsc{SamplePlanar}(x_n(\mu),y(\mu))$ until ($|\gamma|\!=\!n$ and $||\gamma||\!=\!\lfloor \mu n\rfloor)$; return~$\gamma$. \noindent For $n\geq 1$, $\mu\in(1,3)$, and $\epsilon>0$, the approximate-size sampler is \begin{tabular}{ll} $\overline{\frak{A}}_{n,\mu,\epsilon}$:& repeat $\gamma\leftarrow \textsc{SamplePlanar}(x_n(\mu),y(\mu))$\\ & until ($|\gamma|\in [n(1-\epsilon),n(1+\epsilon)]$ and $\frac{||\gamma||}{|\gamma|}\in [\mu (1-\epsilon),\mu (1+\epsilon)]$); \\ & return $\gamma$. \end{tabular} \noindent The complexity of the samplers is analysed in Section~\ref{sec:complexity}. \section{Implementation and experimental results}\label{sec:implement} \subsection{Implementation} We have completely implemented the random samplers for planar graphs described in Section~\ref{sec:efficient}. First we evaluated with good precision---typically 20 digits---the generating functions of the families of planar graphs that intervene in the decomposition (general, connected, 2-connected, 3-connected), derived up to 2 times. The calculations have been carried out in Maple using the analytic expressions of Gim\'enez and Noy for the generating functions~\cite{gimeneznoy}. We have performed the evaluations for values of the parameter $x$ associated with a bunch of reference target sizes in logarithmic scale, $n=\{10^2, 10^3, 10^4,10^5,10^6\}$. From the evaluations of the generating functions, we have computed the vectors of real values that are associated to the random choices to be performed during the generation, e.g., a Poisson law vector with parameter $G_1(x)$ (the EGF of connected planar graphs) is used for drawing the number of connected components of the graph. The second step has been the implementation of the random sampler in Java. To build the graph all along the generation process, it proves more convenient to manipulate a data structure specific to planar maps rather than planar graphs. The advantage is also that the graph to be generated will be equipped with an explicit (arbitrary) planar embedding. Thus if the graph generated is to be drawn in the plane, we do not need to call the rather involved algorithms for embedding a planar graph. Planar maps are suitably manipulated using the so-called \emph{half-edge structure}, where each half-edge occupies a memory block containing a pointer to the opposite half-edge along the same edge and to the next half-edge in ccw order around the incident vertex. Using the half-edge structure, it proves very easy to implement in cost $O(1)$ all primitives used for building the graph---typically, merging two components at a common vertex or edge. Doing this, the actual complexity of implementation corresponds to the complexity of the random samplers as stated in Theorem~\ref{theo:planarsamp1} and Theorem~\ref{theo:planarsamp2}: linear for approximate-size sampling and quadratic for exact-size sampling. In practice, generating a graph of size of order $10^5$ takes a few seconds on a standard computer. \begin{figure} \caption{Ratio edges/vertices observed on a collection $\gamma_1,\ldots,\gamma_{80} \label{fig:exp_ratio} \end{figure} \begin{figure} \caption{The distribution of vertex degrees observed on a collection $\gamma_1,\ldots,\gamma_{80} \label{fig:exp_degree} \end{figure} \subsection{Experimentations.} The good complexity of our random samplers allows us to observe statistical properties of parameters on very large random planar graphs---in the range of sizes $10^5$---where the asymptotic regime is already visible. We focus here on parameters that are known or expected to be concentrated around a limit value. Note that the experimentations are on connected planar graphs instead of general planar graphs. (It is slightly easier to restrict the implementation to connected graphs, which are conveniently manipulated using the half-edge data structure.) However, from the works of Gim\'enez and Noy~\cite{gimeneznoy} and previous work by MacDiarmid et al.~\cite{McD05}, a random planar graph consists of a huge connected component, plus other components whose total expected size is $O(1)$. Thus, statistical properties like those stated in Conjecture~\ref{conj:planar} should be the same for random planar graphs as for random connected planar graphs. \noindent\emph{Number of edges.} First we have checked that the random variable $X_n$ that counts the number of edges in a random connected planar graph with $n$ vertices is concentrated. Precisely, Gim\'enez and Noy have proved that $Y_n:=X_n/n$ converges in law to a constant $\mu\approx 2.213$, (they also show that the fluctuations are gaussian of magnitude $1/\sqrt{n}$). Figure~\ref{fig:exp_ratio} shows in ordinate the ratio edges/vertices for a collection of 80 random connected planar graphs of size at least $10^4$ drawn by our sampler. As we can see, the ratios are concentrated around the horizontal line $y=\mu$, agreeing with the convergence result of Gim\'enez and Noy. \noindent\emph{Degrees of vertices.} Another parameter of interest is the distribution of the degrees of vertices in a random planar graph. For a planar graph $\gamma$ with $n$ vertices, we denote by $N^{(k)}(\gamma)$ the number of vertices of $\gamma$ that have $k$ neighbours. Accordingly, $Z^{(k)}(\gamma):=N^{(k)}(\gamma)/n$ is the proportion of vertices of degree $k$ in $\gamma$. It is known from Gim\'enez and Noy that, for $k=1,2$, the random variable $Z^{(k)}$ converges in law to an explicit constant. Figure~\ref{fig:exp_degree} shows in abscissa the parameter $k$ and in ordinate the value of $Z^{(k)}$ for a collection of 80 random connected planar graphs of size at least $10^4$ drawn by our sampler. Hence, the vertical line at abscissa $k$ is occupied by 80 points whose ordinates correspond to the values taken by $Z^{(k)}$ for each of the graphs. As we can see, for $k$ small---typically $k<<\log n$---the values of $Z^{(k)}$ are concentrated around a constant. This leads us to the following conjecture. \begin{conjecture}\label{conj:planar} For every $k\geq 1$, let $Z^{(k)}_n$ be the random variable denoting the proportion of vertices of degree $k$ in a random planar graph with $n$ vertices taken uniformly at random. Then $Z_n^{(k)}$ converges in law to an explicit constant $\pi^{(k)}$ as $n\to\infty$; and $\sum_k\pi^{(k)}=1$. \end{conjecture} Let us mention some progress on this conjecture. It has recently been proved in~\cite{DrGiNo07} that the expected values $\mathbb{E}(Z_n^{(k)})$ converge as $n\to\infty$ to constants $\pi^{(k)}$ that are computable and satisfy $\sum_k\pi^{(k)}=1$. Hence, what remains to be shown regarding the conjecture is the concentration property. \section{Analysis of the time complexity}\label{sec:complexity} This whole section is dedicated to the proof of the complexities of the targetted random samplers. We show that the expected complexities of the targetted samplers $\frak{A}_n$, $\frak{A}_{n,\epsilon}$, $\overline{\frak{A}}_{n,\mu}$, and $\overline{\frak{A}}_{n,\mu,\epsilon}$, as described in Section~\ref{sec:final_smap}, are respectively $O(n^2)$, $O(n/\epsilon)$, $O_{\mu}(n^{5/2})$, and $O_{\mu}(n/\epsilon)$ respectively (the dependency in $\mu$ in not analysed for the sake of simplicity). Recall that the targetted samplers call $\Gamma\mathcal{G}''(x,y)$ (with suitable values of $x$ and $y$) until the size parameters are in the target domain. Accordingly, the complexity analysis is done in two steps. In the first step, we estimate the probability of hitting the target domain, which allows us to reduce the complexity analysis to the analysis of the expected complexity of the pure Boltzmann sampler $\Gamma\mathcal{G}''(x,y)$. We use a specific notation to denote such an expected complexity: \begin{definition} Given a class $\mathcal{C}$ endowed with a Boltzmann sampler $\Gamma\mathcal{C}(x,y)$, we denote by $\Lambda\mathcal{C}(x,y)$ the expected combinatorial complexity\footnote{See the discussion on the complexity model after the statement of Theorem~\ref{theo:planarsamp2} in the introduction.} of a call to $\Gamma\mathcal{C}(x,y)$ (note that $\Lambda\mathcal{C}(x,y)$ depends not only on $\mathcal{C}$, but also on a specific Boltzmann sampler for $\mathcal{C}$). \end{definition} Typically the values $(x,y)$ have to be close to a singular point of $\mathcal{G}$ in order to draw graphs of large size. Hence, in the second step, our aim is to bound $\Lambda\mathcal{G}''(x,y)$ when $(x,y)$ converges to a given singular point $(x_0,y_0)$ of $\mathcal{G}$. To analyse $\Lambda\mathcal{G}''(x,y)$, our approach is again from bottom to top, as the description of the sampler in Section~\ref{sec:efficient} (see also the general scheme summarized in Figure~\ref{fig:scheme_bi_derived}). At each step we give asymptotic bounds for the expected complexities of the Boltzmann samplers when the parameters approach a singular point. This study requires the knowledge of the singular behaviours of all series involved in the decomposition of bi-derived planar graphs, which are recalled in Section~\ref{sec:sing_beh}. \subsection{Complexity of rejection: the key lemma} The following simple lemma will be extensively used, firstly to reduce the complexity analysis of the targetted samplers to the one of pure Boltzmann samplers, secondly to estimate the effect of the rejection steps on the expected complexities of the Boltzmann samplers. \begin{lemma}[rejection complexity] \label{lem:target} Let $\frak{A}$ be a random sampler on a combinatorial class $\mathcal{C}$ according to a probability distribution $\mathbb{P}$, and let $p: \mathcal{C}\to [0,1]$ be a function on $\mathcal{C}$, called the rejection function. Consider the rejection algorithm $\frak{A}_{\mathrm{rej}}$: repeat $\gamma\leftarrow\frak{A}$ until $\Bern(p(\gamma))$ return $\gamma$. Then the expected complexity $\mathbb{E}(\frak{A}_{\mathrm{rej}})$ of $\frak{A}_{\mathrm{rej}}$ and the expected complexity $\mathbb{E}(\frak{A})$ of $\frak{A}$ are related by \begin{equation} \mathbb{E}(\frak{A}_{\mathrm{rej}})=\frac{1}{p_{\mathrm{acc}}}\mathbb{E}(\frak{A}), \end{equation} where $p_{\mathrm{acc}}:=\sum_{\gamma\in\mathcal{C}}\mathbb{P}(\gamma)p(\gamma)$ is the probability of success of $\frak{A}_{\mathrm{rej}}$ at each attempt. \end{lemma} \begin{proof} The quantity $\mathbb{E}(\frak{A}_{\mathrm{rej}})$ satisfies the recursive equation $$ \mathbb{E}(\frak{A}_{\mathrm{rej}})=\mathbb{E}(\frak{A})+(1-p_{\mathrm{acc}})\mathbb{E}(\frak{A}_{\mathrm{rej}}). $$ Indeed, a first attempt, with expected complexity $\mathbb{E}(\frak{A})$, is always needed; and in case of rejection, occurring with probability $(1-p_{\mathrm{acc}})$, the sampler restarts in the same way as when it is launched. \end{proof} As a corollary we obtain the following useful formulas to estimate the effect of rejection in Boltzmann samplers when going from L-derived (vertex-pointed) to U-derived (edge-pointed) graphs and vice-versa. \begin{corollary}[Complexity of changing the root]\label{lem:change_root} Let $\mathcal{A}$ be a mixed combinatorial class such that the constants $\alpha_{U/L}:=\mathrm{max}_{\gamma\in\mathcal{A}}\frac{||\gamma||}{|\gamma|}$ and $\alpha_{L/U}:=\mathrm{max}_{\gamma\in\mathcal{A}}\frac{|\gamma|}{||\gamma||}$ are finite. Define $c:=\alpha_{U/L}\cdot\alpha_{L/U}$. \begin{itemize} \item Assume $\mathcal{A}'$ is equipped with a Boltzmann sampler, and let $\Gamma \underline{\mathcal{A}}(x,y)$ be the Boltzmann sampler for $\underline{\mathcal{A}}$ obtained by applying $\LtoU$---as defined in Section~\ref{sec:reject}---to $\mathcal{A}$. Then $$ \Lambda \underline{\mathcal{A}}(x,y)\leq c\cdot \Lambda \mathcal{A}'(x,y). $$ \item Assume $\underline{\mathcal{A}}$ is equipped with a Boltzmann sampler, and let $\Gamma \mathcal{A}'(x,y)$ be the Boltzmann sampler for $\mathcal{A}'$ obtained by applying $\UtoL$---as defined in Section~\ref{sec:reject}---to $\mathcal{A}$. Then $$ \Lambda \mathcal{A}'(x,y)\leq c\cdot \Lambda \underline{\mathcal{A}}(x,y). $$ \end{itemize} \end{corollary} \begin{proof} Let us give the proof for \LtoU (the other case is proved in a similar way). By definition of \LtoU the probability of the Bernoulli choice at each attempt in $\Gamma\underline{\mathcal{A}}(x,y)$ is at least $\frac{1}{\alpha_{U/L}}\mathrm{min}_{\gamma\in\mathcal{A}}\frac{||\gamma||}{|\gamma|}$, i.e., at least $1/(\alpha_{U/L}\cdot\alpha_{L/U})$. Hence the probability $p_{\mathrm{acc}}$ of success at each attempt is at least $1/c$. Therefore, by Corollary~\ref{lem:change_root}, $\Lambda \underline{\mathcal{A}}(x,y)=\Lambda \mathcal{A}'(x,y)/p_{\mathrm{acc}}\leq c\cdot\Lambda\mathcal{A}'(x,y)$. \end{proof} \subsection{Reduction to analysing the expected complexity of Boltzmann samplers} We prove here that analysing the expected complexities of the targetted samplers reduces to analysing the expected complexity $\Lambda\mathcal{G}''(x,y)$ when $(x,y)$ approaches a singular point. (Recall that a singular point $(x_0,y_0)$ for a class $\mathcal{C}$ is such that the function $x\mapsto C(x,y_0)$ has a dominant singularity at $x_0$.) \begin{claim} \label{claim:eq} Assume that for every singular point $(x_0,y_0)$ of $\mathcal{G}$, the expected complexity of the Boltzmann sampler for $\mathcal{G}''$ satisfies\footnote{In this article all convergence statements are meant ``from below'', i.e., $x\to x_0$ means that $x$ approaches $x_0$ while staying smaller than $x_0$.} \begin{equation}\label{eq:claim} \Lambda\mathcal{G}''(x,y_0)=O((x_0-x)^{-1/2})\ \ \mathrm{as}\ x\to x_0. \end{equation} Then the expected complexities of the targetted samplers $\frak{A}_n$, $\frak{A}_{n,\epsilon}$, $\overline{\frak{A}}_{n,\mu}$, and $\overline{\frak{A}}_{n,\mu,\epsilon}$---as defined in Section~\ref{sec:final_smap}---are respectively $O(n^2)$, $O(n/\epsilon)$, $O_{\mu}(n^{5/2})$, and $O_{\mu}(n/\epsilon)$. In other words, proving~\eqref{eq:claim} is enough to prove the complexities of the random samplers for planar graphs, as stated in Theorem~\ref{theo:planarsamp1} and Theorem~\ref{theo:planarsamp2}. \end{claim} \begin{proof} Assume that (\ref{eq:claim}) holds. Let $\pi_{n,\epsilon}$ ($\pi_n$, resp.) be the probability that the output of $\textsc{SamplePlanar}(x_n,1)$ ---with $x_n=(1-1/2n)\cdot\rho_{G}$--- has size in $I_{n,\epsilon}:=[n(1-\epsilon),n(1+\epsilon)]$ (has size $n$, resp.). According to Lemma~\ref{lem:target}, the expected complexities of the exact-size and approximate-size samplers with respect to vertices ---as described in Section~\ref{sec:sample_vertices}--- satisfy $$ \mathbb{E}(\frak{A_n})=\frac{\Lambda \mathcal{G}''(x_n,1)}{\pi_n},\ \ \ \ \ \ \ \mathbb{E}(\frak{A_{n,\epsilon}})=\frac{\Lambda \mathcal{G}''(x_n,1)}{\pi_{n,\epsilon}}. $$ Equation~(\ref{eq:claim}) ensures that, when $n\to\infty$, $\Lambda \mathcal{G}''(x_n,1)$ is $O(n^{1/2})$. In addition, according to Lemma~\ref{lem:bi_der}, $\mathcal{G}''$ is $1/2$-singular (square-root singularities). Hence, by Lemma~\ref{lem:square}, $1/\pi_n$ is $O(n^{3/2})$ and $1/\pi_{n,\epsilon}$ is $O(n^{1/2}/\epsilon)$. Thus, $\mathbb{E}(\frak{A_n})$ is $O(n^2)$ and $\mathbb{E}(\frak{A_{n,\epsilon}})$ is $O(n/\epsilon)$. The proof for the samplers with respect to vertices and edges is a bit more technical. Consider a planar graph $\gamma$ drawn by the sampler $\textsc{SamplePlanar}(x_n(\mu),y(\mu))$. In view of the proof for the exact-size sampler, define $$\ol{\pi}_{n\wedge\mu}:=\mathbb{P}(||\gamma||\!=\! \lfloor \mu n\rfloor, |\gamma|=n),\ \ \ol{\pi}_{\mu|n}:=\mathbb{P}(||\gamma||\!\!=\!\!\lfloor \mu n\rfloor\ |\ |\gamma|\!\!=\!\!n),\ \ \pi_n:=\mathbb{P}(|\gamma|\!\!=\!\!n).$$ \noindent In view of the proof for the approximate-size sampler, define $$\ol{\pi}_{n\wedge\mu,\epsilon}:=\mathbb{P}(|\gamma|\in[n(1-\epsilon),n(1+\epsilon)],\ ||\gamma||/|\gamma|\in[\mu(1-\epsilon),\mu(1+\epsilon)]),$$ $$\ol{\pi}_{\mu | n,\epsilon}:=\mathbb{P}(||\gamma||/|\gamma|\in[\mu(1-\epsilon),\mu(1+\epsilon)]\ |\ |\gamma|\in[n(1-\epsilon),n(1+\epsilon)]),$$ and $$\pi_{n,\epsilon}:=\mathbb{P}(|\gamma|\in[n(1-\epsilon),n(1+\epsilon)]).$$ Notice that $\ol{\pi}_{n\wedge\mu}=\ol{\pi}_{\mu|n}\cdot\pi_n$ and $\ol{\pi}_{n\wedge\mu,\epsilon}=\ol{\pi}_{\mu | n,\epsilon}\cdot\pi_{n,\epsilon}$. Moreover, Lemma~\ref{lem:target} ensures that $$ \mathbb{E}(\overline{\frak{A}}_{n,\mu})=\frac{\Lambda \mathcal{G}''(x_n(\mu),y(\mu))}{\ol{\pi}_{n\wedge\mu}},\ \ \ \ \ \ \ \mathbb{E}(\overline{\frak{A}}_{n,\mu,\epsilon})=\frac{\Lambda \mathcal{G}''(x_n(\mu),y(\mu))}{\ol{\pi}_{n\wedge\mu,\epsilon}}. $$ It has been shown by Gim\'enez and Noy~\cite{gimeneznoy} (based on the quasi-power theorem) that, for a fixed $\mu\in(1,3)$, $1/\ol{\pi}_{\mu|n}$ is $O_\mu(n^{1/2})$ as $n\to\infty$ (the dependency in $\mu$ is not discussed here for the sake of simplicity). Moreover, Lemma~\ref{lem:square} ensures that $1/\pi_n$ is $O_{\mu}(n^{3/2})$ as $n\to\infty$. Hence, $1/\ol{\pi}_{n,\mu}$ is $O_{\mu}(n^{2})$. Finally Equation~(\ref{eq:claim}) ensures that $\Lambda \mathcal{G}''(x_n(\mu),y(\mu))$ is $O_{\mu}(n^{1/2})$, therefore $\mathbb{E}(\overline{\frak{A}}_{n,\mu})$ is $O_{\mu}(n^{5/2})$. For the approximate-size samplers, the results of Gim\'enez and Noy (central limit theorems) ensure that, when $\mu\in(1,3)$ and $\epsilon>0$ are fixed and $n\to \infty$, $\ol{\pi}_{\mu | n,\epsilon}$ converges to 1. In addition, Lemma~\ref{lem:square} ensures that $1/\pi_{n,\epsilon}$ is $O_{\mu}(n^{1/2}/\epsilon)$. Hence, $1/\ol{\pi}_{n\wedge\mu,\epsilon}$ is $O_{\mu}(n^{1/2}/\epsilon)$. Equation~(\ref{eq:claim}) implies that $\Lambda \mathcal{G}''(x_n(\mu),y(\mu))$ is $O_{\mu}(n^{1/2})$, hence $\mathbb{E}(\overline{\frak{A}}_{n,\mu,\epsilon})$ is $O_{\mu}(n/\epsilon)$. \end{proof} From now on, our aim is to prove that, for any singular point $(x_0,y_0)$ of $\mathcal{G}$, $\Lambda\mathcal{G}''(x,y_0)$ is $O((x_0-x)^{-1/2})$ as $x\to x_0$. \subsection{Expected sizes of Boltzmann samplers} Similarly as for the expected complexities, it proves convenient to use specific notations for the expected sizes associated to Boltzmann samplers, and to state some of their basic properties. \begin{definition}[expected sizes] Let $\mathcal{C}$ be a mixed combinatorial class, and let $(x,y)$ be admissible for $\mathcal{C}$ (i.e., $C(x,y)$ converges). Define respectively the expected L-size and the expected U-size at $(x,y)$ as the quantities $$ |\mathcal{C}|_{(x,y)}:=\frac{1}{C(x,y)}\sum_{\gamma\in\mathcal{C}}|\gamma|\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}=x\frac{\partial_x C(x,y)}{C(x,y)}, $$ $$||\mathcal{C}||_{(x,y)}:=\frac{1}{C(x,y)}\sum_{\gamma\in\mathcal{C}}||\gamma||\frac{x^{|\gamma|}}{|\gamma|!}y^{||\gamma||}=y\frac{\partial_yC(x,y)}{C(x,y)}. $$ \end{definition} We will need the following two simple lemmas at some points of the analysis. \begin{lemma}[monotonicity of expected sizes]\label{lem:monotonicity} Let $\mathcal{C}$ be a mixed class. \begin{itemize} \item For each fixed $y_0>0$, the expected L-size $x\mapsto |\mathcal{C}|_{(x,y_0)}$ is increasing with $x$. \item For each fixed $x_0>0$, the expected U-size $y\mapsto |\mathcal{C}|_{(x_0,y)}$ is increasing with $y$. \end{itemize} \end{lemma} \begin{proof} As noticed in~\cite{DuFlLoSc04} (in the labelled framework), the derivative of the function $f(x):=|\mathcal{C}|_{(x,y_0)}$ is equal to $1/x$ multiplied by the variance of the L-size of an object under the Boltzmann distribution at $(x,y_0)$. Hence $f'(x)\geq 0$ for $x>0$, so $f(x)$ is increasing with $x$. Similarly the derivative of $g(y):=||\mathcal{C}||_{(x_0,y)}$ is equal to $1/y$ multiplied by the variance of the U-size of an object under the Boltzmann distribution at $(x_0,y)$, hence $g(y)$ is increasing with $y$ for $y>0$. \end{proof} \begin{lemma}[divergence of expected sizes at singular points]\label{lem:exp_size} Let $\mathcal{C}$ be an $\alpha$-singular class and let $(x_0,y_0)$ be a singular point of $\mathcal{C}$. Then, as $x\to x_0$: \begin{itemize} \item if $\alpha>1$, the expected size $x\mapsto |\mathcal{C}|_{(x,y_0)}$ converges to a positive constant, \item if $0<\alpha<1$, the expected size $x\mapsto |\mathcal{C}|_{(x,y_0)}$ diverges and is of order $(x_0-x)^{\alpha-1}$. \end{itemize} \end{lemma} \begin{proof} Recall that $|\mathcal{C}|_{(x,y_0)}=x\cdot C'(x,y_0)/C(x,y_0)$, and $\mathcal{C}'$ is $(\alpha-1)$-singular if $\mathcal{C}$ is $\alpha$-singular. Hence, if $\alpha>1$, both functions $C(x,y_0)$ and $C'(x,y_0)$ converge to positive constants as $x\to x_0$, so that $|\mathcal{C}|_{(x,y_0)}$ also converges to a positive constant. If $0<\alpha<1$, $C(x,y_0)$ still converges, but $C'(x,y_0)$ diverges, of order $(x_0-x)^{\alpha-1}$ as $x\to x_0$. Hence $|\mathcal{C}|_{(x,y_0)}$ is also of order $(x_0-x)^{\alpha-1}$. \end{proof} \subsection{Computation rules for the expected complexities of Boltzmann samplers} Thanks to Claim~\ref{claim:eq}, the complexity analysis is now reduced to estimating the expected complexity $\Lambda\mathcal{G}''(x,y)$ when $(x,y)$ is close to a singular point of $\mathcal{G}$. For this purpose, we introduce explicit rules to compute $\Lambda\mathcal{C}(x,y)$ if $\mathcal{C}$ is specified from other classes by a decomposition grammar. These rules will be combined with Lemma~\ref{lem:target} and Corollary~\ref{lem:change_root} (complexity due to the rejection steps) in order to get a precise asymptotic bound for $\Lambda\mathcal{G}''(x,y)$. We can now formulate the computation rules for the expected complexities. \begin{figure} \caption{The expected complexities of Boltzmann samplers specified using the sampling rules for the constructions $\{+,\star,\Set_{\geq d} \label{fig:comp_rules} \end{figure} \begin{lemma}[computation rules for expected complexities]\label{lem:comp_rules} Let $\mathcal{C}$ be a class obtained from simpler classes $\mathcal{A}$, $\mathcal{B}$ by means of one of the constructions $\{+,\star,\Set_{\geq d},\circ_L,\circ_U\}$. If $\mathcal{A}$ and $\mathcal{B}$ are equipped with Boltzmann samplers, let $\Gamma\mathcal{C}(x,y)$ be the Boltzmann sampler for $\mathcal{C}$ obtained from the sampling rules of Figure~\ref{table:rules}. Then there are explicit rules, as given in Figure~\ref{fig:comp_rules}, to compute the expected complexity of $\Gamma\mathcal{C}(x,y)$ from the expected complexities of $\Gamma\mathcal{A}(x,y)$ and $\Gamma\mathcal{B}(x,y)$. \end{lemma} \begin{proof} \noindent\emph{Disjoint union:} $\Gamma\mathcal{C}(x,y)$ first flips a coin, which (by convention) has unit cost in the combinatorial complexity. Then $\Gamma\mathcal{C}(x,y)$ either calls $\Gamma\mathcal{A}(x,y)$ or $\Gamma\mathcal{B}(x,y)$ with respective probabilities $A(x,y)/C(x,y)$ and $B(x,y)/C(x,y)$. \noindent\emph{Product:} $\Gamma\mathcal{C}(x,y)$ calls $\Gamma\mathcal{A}(x,y)$ and then $\Gamma\mathcal{B}(x,y)$, which yields the formula. \noindent\emph{L-substitution:} $\Gamma\mathcal{C}(x,y)$ calls $\gamma\leftarrow\Gamma\mathcal{A}(B(x,y),y)$ and then replaces each L-atom of $\gamma$ by an object generated by $\Gamma\mathcal{B}(x,y)$. Hence, in average, the first step takes time $\Lambda\mathcal{A}(B(x,y),y)$ and the second step takes time $|\mathcal{A}|_{(B(x,y),y)}\cdot\Lambda\mathcal{B}(x,y)$. \noindent\emph{$\Set_{\geq d}$:} note that $\Set_{\geq d}(\mathcal{B})$ is equivalent to $\mathcal{A}\circ_L\mathcal{B}$, where $\mathcal{A}:=\Set_{\geq d}(\mathcal{Z}L)$, which has generating function $\exp_{\geq d}(z):=\sum_{k\geq d}z^k/k!$. A Boltzmann sampler $\Gamma\mathcal{A}(z,y)$ simply consists in drawing an integer under a conditioned Poisson law $\Pois_{\geq d}(z)$, which is done by a simple iterative loop. As the number of iterations is equal to the value that is returned (see~\cite{DuFlLoSc04} for a more detailed discussion), the expected cost of generation for $\mathcal{A}$ is equal to the expected size, i.e., $$ \Lambda\mathcal{A}(z,y)=|\mathcal{A}|_{(z,y)}=z\frac{\exp_{\geq d}\ \!\!\!'(z)}{\exp_{\geq d}(z)}=z\frac{\exp_{\geq d-1}(z)}{\exp_{\geq d}(z)}. $$ Hence, from the formula for $\Lambda(\mathcal{A}\circ_L\mathcal{B})(x,y)$, we obtain the formula for $\Set_{\geq d}$. \noindent\emph{U-substitution:} the formula for $\circ_U$ is proved similarly as the one for $\circ_L$. \end{proof} \begin{remark}\label{rk:finite} When using the computation rules of Figure~\ref{fig:comp_rules} in a recursive way, we have to be careful to check beforehand that all the expected complexities that are involved are finite. Otherwise there is the danger of getting weird identities like ``$\sum_{k\geq 0}2^k=1+2\sum_{k\geq 0}2^k$, so $\sum_{k\geq 0}2^k=-1$.'' \end{remark} \subsection{Analytic combinatorics of planar graphs}\label{sec:sing_beh} Let $\mathcal{C}$ be an $\alpha$-singular class (see Definition~\ref{def:alpha_sing}). A very useful remark to be used all along the analysis of the expected complexities is the following: if $\alpha\geq 0$, the function $C(x,y_0)$ converges when $x\to x_0$, and the limit has to be a positive constant; whereas if $\alpha< 0$, the function $C(x,y_0)$ diverges to $+\infty$ and is of order $(x_0-x)^{\alpha}$. In this section, we review the degrees of singularities of the series of all classes (binary trees, dissections, 3-connected, 2-connected, connected, and general planar graphs) that are involved in the decomposition of planar graphs. We will use extensively this information to estimate the expected complexities of the Boltzmann samplers in Section~\ref{sec:as_bounds}. \begin{lemma}[bicolored binary trees]\label{lem:sing_bin} Let $\mathcal{R}=\mathcal{R}b+\mathcal{R}w$ be the class of rooted bicolored binary trees, which is specified by the system $$ \mathcal{R}b=\mathcal{Z}_L\star(\mathcal{Z}_U+\mathcal{R}w)^2,\ \ \ \mathcal{R}w=(\mathcal{Z}U+\mathcal{R}b)^2. $$ Then the classes $\mathcal{R}b$, $\mathcal{R}w$ are $1/2$-singular. The class $\underline{\mathcal{K}}$ ($\mathcal{K}$) of rooted (unrooted, resp.) asymmetric bicolored binary trees is $1/2$-singular ($3/2$-singular, resp.). In addition, these two classes have the same singular points as $\mathcal{R}$. \end{lemma} \begin{proof} The classes $\mathcal{R}b$ and $\mathcal{R}w$ satisfy a decomposition grammar that has a strongly connected dependency graph. Hence, by a classical theorem of Drmota, Lalley, Woods~\cite{fla}, the generating functions of these classes have square-root singular type. Notice that, from the decomposition grammar~\eqref{eq:grammar}, the class $\underline{\mathcal{K}}$ can be expressed as a positive polynomial in $\mathcal{Z}L$, $\mathcal{Z}U$, $\mathcal{R}b$, and $\mathcal{R}w$. Hence $\underline{\mathcal{K}}$ inherits the singular points and the square-root singular type from $\mathcal{R}b, \mathcal{R}w$. Finally, the generating function of $\mathcal{K}$ is classically obtained as a subtraction (a tree has one more vertices than edges, so subtract the series counting the trees rooted at an edge from the series counting the trees rooted at a vertex). The leading square-root singular terms cancel out due to the subtraction, leaving a leading singular term of degree $3/2$. \end{proof} \begin{lemma}[irreducible dissections, from~\cite{FuPoSc05}]\label{lem:sing_diss} The class $\mathcal{J}$ of rooted irreducible dissections is $3/2$-singular and has the same singularities as $\mathcal{K}$. \end{lemma} \begin{proof} The class $\mathcal{J}$ is equal to $3\star\mathcal{Z}L\star\mathcal{Z}U\star\mathcal{I}$, which is isomorphic to $3\star\mathcal{Z}L\star\mathcal{Z}U\star\mathcal{K}$, so $\mathcal{J}$ has the same singular points and singularity type as $\mathcal{K}$. \end{proof} \begin{lemma}[rooted 3-connected planar graphs~\cite{BeRi}]\label{lem:sing_3_conn} The class $\overrightarrow{\mathcal{G}_3}$ of edge-rooted 3-connected planar graphs is $3/2$-singular; and the class $\underline{\overrightarrow{\mathcal{G}_3}}$ of U-derived edge-rooted 3-connected planar graphs is $1/2$-singular. These classes have the same singular points as~$\mathcal{K}$. \end{lemma} \begin{proof} The series $\overrightarrow{G_3}(z,w)$ has been proved in~\cite{Mu} to have a rational expression in terms of the two series $R_{\bullet}(z,w)$ and $R_{\circ}(z,w)$ of rooted bicolored binary trees. This property is easily shown to be stable by taking derivatives, so the same property holds for the series $\underline{\overrightarrow{G_3}}(z,w)$. It is proved in~\cite{BeRi,BeGa} that the singular points of $\mathcal{G}tr$ are the same as those of $\mathcal{R}b$ and $\mathcal{R}w$. Hence, the singular expansion of $\overrightarrow{G_3}(z,w)$ at any singular point is simply obtained from the ones of $R_{\bullet}(z,w)$ and $R_{\circ}(z,w)$; one finds that the square-root terms cancel out, leaving a leading singular term of degree $3/2$. The study of $\underline{\overrightarrow{\mathcal{G}_3}}$ is similar. First, the rooting operator does not change the singular points (as it multiplies a coefficient $(n,m)$ only by a factor $m$), hence, $\underline{\overrightarrow{\mathcal{G}_3}}$ has the same singular points as $\mathcal{R}b,\mathcal{R}w$, which ensures that the singular expansion of $\underline{\overrightarrow{G_3}}(z,w)$ can be obtained from those of $\mathcal{R}b$ and $\mathcal{R}w$. One finds that the leading singular term is this time of the square-root type. \end{proof} \begin{lemma}[networks, from~\cite{BeGa}]\label{lem:sing_networks} The classes $\mathcal{D}$, $\mathcal{S}$, $\mathcal{P}$, and $\mathcal{H}$ of networks are $3/2$-singular, and these classes have the same singular points. \end{lemma} \begin{lemma}[2-connected, connected, and general planar graphs~\cite{gimeneznoy}]\label{lem:sing_planar} The classes $\mathcal{G}_2$, $\mathcal{G}_1$, $\mathcal{G}$ of 2-connected, connected, and general planar graphs are all $5/2$-singular. In addition, the singular points of $\mathcal{G}_2$ are the same as those of networks, and the singular points are the same in $\mathcal{G}_1$ as in $\mathcal{G}$. \end{lemma} \subsection{Asymptotic bounds on the expected complexities of Boltzmann samplers} \label{sec:as_bounds} This section is dedicated to proving the asymptotic bound $\Lambda\mathcal{G}''(x,y_0)=O((x_0-x)^{-1/2})$. For this purpose we adopt again a bottom-to-top approach, following the scheme of Figure~\ref{fig:scheme_bi_derived}. For each class $\mathcal{C}$ appearing in this scheme, we provide an asymptotic bound for the expected complexity of the Boltzmann sampler in a neighbourhood of any fixed singular point of $\mathcal{C}$. In the end we arrive at the desired estimate of $\Lambda\mathcal{G}''(x,y_0)$. \subsubsection{Complexity of the Boltzmann samplers for binary trees}\label{sec:comp_binary_trees} \begin{lemma}[U-derived bicolored binary trees]\label{lem:comp_uK} Let $(z_0,w_0)$ be a singular point of $\mathcal{K}$. Then, the expected complexity of the Boltzmann sampler for $\underline{\mathcal{K}}$---given in Section~\ref{sec:boltz_binary_trees}---satisfies, $$ \Lambda \underline{\mathcal{K}}(z,w)=O\Big((z_0-z)^{-1/2}\Big)\ \mathrm{as}\ (z,w)\to (z_0,w_0). $$ \end{lemma} \begin{proof} The Boltzmann sampler $\Gamma \underline{\mathcal{K}}(z,w)$ is just obtained by translating a completely recursive decomposition grammar. Hence, the generation process consists in building the tree node by node following certain branching rules. Accordingly, the cost of generation is just equal to the number of nodes of the tree that is finally returned, assuming unit cost for building a node\footnote{ We could also use the computation rules for the expected complexities, but here there is the simpler argument that the expected complexity is equal to the expected size, as there is no rejection yet.}. As an unrooted binary tree has two more leaves than nodes, we have $$ \Lambda \underline{\mathcal{K}}(z,w)\leq ||\underline{\mathcal{K}}||_{(z,w)}\leq ||\underline{\mathcal{K}}||_{(z,w_0)}, $$ where the second inequality results from the monotonicity property of expected sizes (Lemma~\ref{lem:monotonicity}). Notice that, for $\tau\in\underline{\mathcal{K}}$, the number of nodes is not greater than $(3|\tau|+1)$, where $|\tau|$ is as usual the number of black nodes. Hence the number of nodes is at most $4|\tau|$. As a consequence, $$ \Lambda \underline{\mathcal{K}}(z,w)\leq 4\cdot |\underline{\mathcal{K}}|_{(z,w_0)}. $$ According to Lemma~\ref{lem:sing_bin}, the class $\underline{\mathcal{K}}$ is $1/2$-singular. Hence, by Lemma~\ref{lem:exp_size}, $|\underline{\mathcal{K}}|_{(z,w_0)}$ is $O((z_0-z)^{-1/2})$ as $z\to z_0$. So $\Lambda \underline{\mathcal{K}}(z,w)$ is also $O((z_0-z)^{-1/2})$. \end{proof} \begin{lemma}[derived bicolored binary trees]\label{lem:comp_der_bin} Let $(z_0,w_0)$ be a singular point of $\mathcal{K}$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{K}'$---given in Section~\ref{sec:sampKp}---satisfies $$ \Lambda \mathcal{K}'(z,w)=O\left((z_0-z)^{-1/2}\right)\ \mathrm{as}\ (z,w)\to(z_0,w_0). $$ \end{lemma} \begin{proof} The sampler $\Gamma \mathcal{K}'(z,w)$ has been obtained from $\Gamma \underline{\mathcal{K}}(z,w)$ by applying the procedure \UtoL to the class $\mathcal{K}$. It is easily checked that the ratio number of black nodes/number of leaves in a bicolored binary tree is bounded from above and from below (we have already used the ``below'' bound in Lemma~\ref{lem:comp_uK}). Precisely, $3|\tau|+3\geq ||\tau||$ and $|\tau|\leq 2||\tau||/3$, from which it is easily checked that $\alpha_{L/U}=2/3$ and $\alpha_{U/L}=6$ (attained by the tree with 1 black and 3 white nodes). Hence, according to Corollary~\ref{lem:change_root}, $\Lambda \mathcal{K}'(z,w)\leq 4\ \!\Lambda \underline{\mathcal{K}}(z,w)$, so $\Lambda \mathcal{K}'(z,w)$ is $O\left((z_0-z)^{-1/2}\right)$. \end{proof} \begin{lemma}[bicolored binary trees]\label{lem:comp_bin} Let $(z_0,w_0)$ be a singular point of $\mathcal{K}$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{K}$---given in Section~\ref{sec:Ksamp}---satisfies $$ \Lambda \mathcal{K}(z,w)=O\left(1\right)\ \mathrm{as}\ (z,w)\to(z_0,w_0). $$ \end{lemma} \begin{proof} At each attempt in the generator $\Gamma \mathcal{K}(z,w)$, the first step is to call $\Gamma\underline{\mathcal{K}}(z,w)$ to generate a certain tree $\tau\in\underline{\mathcal{K}}$ (it is here convenient to assume that the object is ``chosen'' before the generation starts), with probability $$ \frac{1}{\underline{K}(z,w)}\frac{z^{|\tau|}}{|\tau|!}w^{||\tau||}; $$ and the probability that the generation succeeds to finish is $2/(||\tau||+1)$. Hence, the total probability of success at each attempt in $\Gamma \mathcal{K}(z,w)$ satisfies $$ p_{\mathrm{acc}}=\sum_{\tau\in\underline{\mathcal{K}}}\frac{1}{\underline{K}(z,w)}\frac{z^{|\tau|}}{|\tau|!}w^{||\tau||}\cdot\frac{2}{||\tau||+1}. $$ As each object $\tau\in\mathcal{K}$ gives rise to $||\tau||$ objects in $\underline{\mathcal{K}}$ that all have L-size $|\tau|$ and U-size $||\tau||-1$, we also have $$ p_{\mathrm{acc}}=\sum_{\tau\in\mathcal{K}}\frac{2}{\underline{K}(z,w)}\frac{z^{|\tau|}}{|\tau|!}w^{||\tau||-1}=\frac{2K(z,w)}{w\underline{K}(z,w)}. $$ As $\mathcal{K}$ is $3/2$-singular and $\underline{\mathcal{K}}$ is $1/2$-singular, $p_{\mathrm{acc}}$ converges to the positive constant $c_0:=2K(z_0,w_0)/(w_0\underline{K}(z_0,w_0))$ as $(z,w)\to(z_0,w_0)$. Now call $\mathfrak{A}(z,w)$ the random generator for $\mathcal{K}$ delimited inside the repeat/until loop of $\Gamma \mathcal{K}(z,w)$, and let $\Lambda \frak{A}(z,w)$ be the expected complexity of $\mathfrak{A}(z,w)$. According to Lemma~\ref{lem:target}, $\Lambda \mathcal{K}(z,w)=\Lambda\frak{A}(z,w)/p_{\mathrm{acc}}$. In addition, when $(z,w)\to (z_0,w_0)$, $p_{\mathrm{acc}}$ converges to a positive constant, hence it remains to prove that $\Lambda \frak{A}(z,w)=O(1)$ in order to prove the lemma. Let $\tau\in\underline{\mathcal{K}}$, and let $m:=||\tau||$. During a call to $\frak{A}(z,w)$, and knowing (again, in advance) that $\tau$ is under generation, the probability that at least $k\geq 1$ nodes of $\tau$ are built is $2/(k+1)$, due to the Bernoulli probabilities telescoping each other. Hence, for $k<m-1$, the probability $p_k$ that the generation aborts when exactly $k$ nodes are generated satisfies $p_k=\frac{2}{k+1}-\frac{2}{k+2}=\frac{2}{(k+1)(k+2)}$. In addition, the probability that the whole tree is generated is $2/m$ (with a final rejection or not), in which case $(m-1)$ nodes are built. Measuring the complexity as the number of nodes that are built, we obtain the following expression for the expected complexity of $\frak{A}(z,w)$ knowing that $\tau$ is chosen: $$ \Lambda\frak{A}^{(\tau)}(z,w)=\sum_{k=1}^{m-2}k\cdot p_k+(m-1)\frac{2}{m}\leq 2\ \!H_m, $$ where $H_m:=\sum_{k=1}^m1/k$ is the $m$th Harmonic number. Define $a_m(z):=[w^m]\underline{K}(z,w)$. We have $$ \Lambda\frak{A}(z,w)\leq \frac{2}{\underline{K}(z,w)}\sum_m H_ma_m(z)w^m\leq \frac{2}{\underline{K}(z,w)}\sum_m H_ma_m(z_0)w_0^m. $$ Hence, writing $c_0:=3/\underline{K}(z_0,w_0)$, we have $\Lambda\frak{A}(z,w)\leq c_0\sum_m H_ma_m(z_0)w_0^m$ for $(z,w)$ close to $(z_0,w_0)$. Using the Drmota-Lalley-Woods theorem (similarly as in Lemma~\ref{lem:sing_bin}), it is easily shown that the function $w\mapsto \underline{K}(z_0,w)$ has a square-root singularity at $w=w_0$. Hence, the transfer theorems of singularity analysis~\cite{fla,flaod} yield the asymptotic estimate $a_m(z_0)\sim c\ \! m^{-3/2}w_0^{-m}$ for some constant $c>0$, so that $a_m(z_0)\leq c'\ \! m^{-3/2}w_0^{-m}$ for some constant $c'>0$. Hence $\Lambda\frak{A}(z,w)$ is bounded by the converging series $c_0\ \!c'\sum_m H_m\ \!m^{-3/2}$ for $(z,w)$ close to $(z_0,w_0)$, which concludes the proof. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for irreducible dissections} \begin{lemma}[irreducible dissections]\label{lem:comp_irr} Let $(z_0,w_0)$ be a singular point of $\mathcal{I}$. Then, the expected complexities of the Boltzmann samplers for $\mathcal{I}$ and $\mathcal{I}'$---described respectively in Section~\ref{sec:sampI} and~\ref{sec:sampIp}---satisfy, as $(z,w)\to (z_0,w_0)$: \begin{eqnarray*} \Lambda \mathcal{I}(z,w)&=&O\ (1),\\ \Lambda \mathcal{I}'(z,w)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} As stated in Proposition~\ref{prop:bijbin3conn} and proved in~\cite{FuPoSc05}, the closure-mapping has linear time complexity, i.e., there exists a constant $\lambda$ such that the cost of closing any binary tree $\kappa$ is at most $\lambda\cdot||\kappa||$. Recall that $\Gamma\mathcal{I}(z,w)$ calls $\Gamma \mathcal{K}(z,w)$ and closes the binary tree generated. Hence $$ \Lambda \mathcal{I}(z,w)\leq \Lambda \mathcal{K}(z,w)+\lambda\cdot ||\mathcal{K}||_{(z,w)}\leq \Lambda \mathcal{K}(z,w)+\lambda\cdot ||\mathcal{K}||_{(z,w_0)}, $$ where the second inequality results from the monotonicity property of expected sizes (Lemma~\ref{lem:monotonicity}). Again we use the fact that, for $\tau\in\mathcal{K}$, $||\tau||\leq 3|\tau|+1$, so $||\tau||\leq 4|\tau|$. Hence $$ \Lambda \mathcal{I}(z,w)\leq \Lambda \mathcal{K}(z,w)+4\lambda\cdot |\mathcal{K}|_{(z,w_0)}. $$ As the class $\mathcal{K}$ is $3/2$-singular, the expected size $|\mathcal{K}|_{(z,w_0))}$ is $O(1)$ when $z\to z_0$. In addition, according to Lemma~\ref{lem:comp_bin}, $\Lambda \mathcal{K}(z,w)$ is $O(1)$ when $(z,w)\to (z_0,w_0)$. Hence $\Lambda\mathcal{I}(z,w)$ is $O(1)$. Similarly, for $\mathcal{I}'$, we have $$ \Lambda \mathcal{I}'(z,w)\leq \Lambda \mathcal{K}'(z,w)+\lambda\cdot ||\mathcal{K}'||_{(z,w))}\leq \Lambda \mathcal{K}'(z,w)+4\lambda\cdot |\mathcal{Z}L\star\mathcal{K}'|_{(z,w_0)}. $$ As the class $\mathcal{K}'$ is $1/2$-singular (and so is $\mathcal{Z}L\star\mathcal{K}'$), the expected size $|\mathcal{Z}L\star\mathcal{K}'|_{(z,w_0)}$ is $O((z_0-z)^{-1/2})$ when $z\to z_0$. In addition we have proved in Lemma~\ref{lem:comp_der_bin} that $\Lambda \mathcal{K}'(z,w)$ is $O((z_0-z)^{-1/2})$. Therefore $\Lambda\mathcal{I}'(z,w)$ is $O((z_0-z)^{-1/2})$. \end{proof} \begin{lemma}[rooted irreducible dissections]\label{lem:comp_root_irr} Let $(z_0,w_0)$ be a singular point of $\mathcal{I}$. Then, the expected complexities of the Boltzmann samplers for $\mathcal{J}$ and $\mathcal{J}'$---described respectively in Section~\ref{sec:sampI} and~\ref{sec:sampIp}---satisfy, as $(z,w)\to (z_0,w_0)$: \begin{eqnarray*} \Lambda \mathcal{J}(z,w)&=&O\ (1),\\ \Lambda \mathcal{J}'(z,w)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} The sampler $\Gamma \mathcal{J}(z,w)$ is directly obtained from $\Gamma \mathcal{I}(z,w)$, according to the identity $\mathcal{J}=3\star\mathcal{Z}L\star\mathcal{Z}U\star\mathcal{I}$, so $ \Lambda\mathcal{J}(z,w)=\Lambda\mathcal{I}(z,w)$, which is $O(1)$ as $(z,w)\to(z_0,w_0)$. The sampler $\Gamma\mathcal{J}'(z,w)$ is obtained from $\Gamma \mathcal{I}(z,w)$ and $\Gamma \mathcal{I}'(z,w)$, according to the identity $\mathcal{J}'=3\star\mathcal{Z}L\star\mathcal{Z}U\star\mathcal{I}'+3\star\mathcal{Z}U\star\mathcal{I}$. Hence, $\Lambda\mathcal{J}'(z,w)\leq 1+\Lambda\mathcal{I}(z,w)+\Lambda\mathcal{I}'(z,w)$. According to Lemma~\ref{lem:comp_irr}, $\Lambda\mathcal{I}(z,w)$ and $\Lambda\mathcal{I}'(z,w)$ are respectively $O(1)$ and $O((z_0-z)^{-1/2})$ when $(z,w)\to (z_0,w_0)$. Hence $\Lambda\mathcal{J}'(z,w)$ is $O((z_0-z)^{-1/2})$. \end{proof} \begin{lemma}[admissible rooted irreducible dissections]\label{comp:Ia} Let $(z_0,w_0)$ be a singular point of $\mathcal{I}$. Then, the expected complexities of the Boltzmann samplers for $\mathcal{J}a$ and $\mathcal{J}a'$---described respectively in Section~\ref{sec:sampI} and~\ref{sec:sampIp}---satisfy, as $(z,w)\to (z_0,w_0)$: \begin{eqnarray*} \Lambda \mathcal{J}a(z,w)&=&O\ (1),\\ \Lambda \mathcal{J}a'(z,w)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} Call $\overline{\Gamma}{\mathcal{J}}(z,w)$ the sampler that calls $\Gamma\mathcal{J}(z,w)$ and checks if the dissection is admissible. By definition, $\Gamma\mathcal{J}a(z,w)$ repeats calling $\overline{\Gamma}{\mathcal{J}}(z,w)$ until the dissection generated is in $\mathcal{J}a$. Hence the probability of acceptance $p_{\mathrm{acc}}$ at each attempt is equal to $J_{\mathrm{a}}(z,w)/J(z,w)$, i.e., is equal to $\overrightarrow{M_3}(z,w)/J(z,w)$ (the isomorphism $\mathcal{J}a\simeq\overrightarrow{\mathcal{M}_3}$ yields $J_{\mathrm{a}}(z,w)=\overrightarrow{M_3}(z,w)$). Call $\overline{\Lambda}\mathcal{J}(z,w)$ the expected complexity of $\overline{\Gamma}\mathcal{J}(z,w)$. By Lemma~\ref{lem:comp_root_irr}, $$ \Lambda \mathcal{J}a(z,w)=\frac{1}{p_{\mathrm{acc}}}\overline{\Lambda} \mathcal{J}(z,w)=\frac{J(z,w)}{\overrightarrow{M_3}(z,w)}\overline{\Lambda} \mathcal{J}(z,w). $$ We recall from Section~\ref{sec:sing_beh} that the singular points are the same for rooted 3-connected planar graphs/maps, for bicolored binary trees, and for irreducible dissections. Hence $(z_0,w_0)$ is a singular point for $\overrightarrow{M_3}(z,w)$. The classes $\mathcal{J}$ and $\overrightarrow{\mathcal{M}_3}\simeq 2\star\overrightarrow{\mathcal{G}_3}$ are $3/2$-singular by Lemma~\ref{lem:sing_diss} and Lemma~\ref{lem:sing_3_conn}, respectively. Hence, when $(z,w)\to(z_0,w_0)$, the series $J(z,w)$ and $\overrightarrow{M_3}(z,w)$ are $\Theta(1)$, even more they converge to positive constants (because these functions are rational in terms of bivariate series for binary trees). Hence $p_{\mathrm{acc}}$ also converges to a positive constant, so it remains to prove that $\overline{\Lambda} \mathcal{J}(z,w)$ is $O(1)$. Testing admissibility (i.e., the existence of an internal path of length 3 connecting the root-vertex to the opposite outer vertex) has clearly linear time complexity. Hence, for some constant $\lambda$, $$\overline{\Lambda} \mathcal{J}(z,w)\leq\Lambda\mathcal{J}(z,w)+\lambda\cdot||\mathcal{J}||_{(z,w)}\leq \Lambda\mathcal{J}(z,w)+\lambda\cdot||\mathcal{J}||_{(z,w_0)},$$ where the second inequality results from the monotonicity of the expected sizes (Lemma~\ref{lem:monotonicity}). Both $\Lambda\mathcal{J}(z,w)$ and $||\mathcal{J}||_{(z,w_0)}$ are $O(1)$ when $z\to z_0$ (by Lemma~\ref{lem:comp_root_irr} and because $\mathcal{J}$ is $3/2$-singular, respectively). Hence $\overline{\Lambda}\mathcal{J}(z,w)$ is also $O(1)$, so $\Lambda \mathcal{J}a(z,w)$ is also $O(1)$. The proof for $\mathcal{J}a'$ is similar. First, we have $$ \Lambda \mathcal{J}a'(z,w)=\frac{J'(z,w)}{\overrightarrow{M_3}'(z,w)}\cdot\overline{\Lambda} \mathcal{J}'(z,w), $$ where $\overline{\Lambda} \mathcal{J}'(z,w)$ is the expected cost of a call to $\Gamma\mathcal{J}'(z,w)$ followed by an admissibility test. Both series $J'(z,w)$ and $\overrightarrow{M_3}'(z,w)$ are $1/2$-singular, even more, they converge to positive constants as $(z,w)\to(z_0,w_0)$ (again, because these functions are rational in terms of bivariate series of binary trees). Hence, when $(z,w)\to(z_0,w_0)$, the quantity $J'(z,w)/\overrightarrow{M_3}'(z,w)$ converges to a positive constant. Moreover, according to the linear complexity of admissibility testing, we have $\overline{\Lambda} \mathcal{J}'(z,w)\leq\Lambda \mathcal{J}'(z,w)+\lambda\cdot||\mathcal{J}'||_{(z,w_0)}$. Both quantities $\Lambda \mathcal{J}'(z,w)$ and $||\mathcal{J}'||_{(z,w_0)}$ are $O((z_0-z)^{-1/2})$. Hence $\Lambda \mathcal{J}a'(z,w)$ is also $O((z_0-z)^{-1/2})$. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for 3-connected maps} \begin{lemma}[rooted 3-connected maps]\label{lem:comp_M3} Let $(z_0,w_0)$ be a singular point of $\mathcal{M}t$. Then the expected complexities of the Boltzmann samplers for $\overrightarrow{\mathcal{M}_3}$ and $\overrightarrow{\mathcal{M}_3}'$ satisfy respectively, as $(z,w)\to (z_0,w_0)$: \begin{eqnarray*} \Lambda \overrightarrow{\mathcal{M}_3}(z,w)&=&O\ (1),\\ \Lambda \overrightarrow{\mathcal{M}_3}'(z,w)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} Recall that $\Gamma\overrightarrow{\mathcal{M}_3}(z,w)$ ($\Gamma\overrightarrow{\mathcal{M}_3}'(z,w)$, resp.) calls $\Gamma\mathcal{J}a(z,w)$ ($\Gamma\mathcal{J}a'(z,w)$, resp.) and returns the primal map of the dissection. The primal-map construction is in fact just a reinterpretation of the combinatorial encoding of rooted maps (in particular when dealing with the half-edge data structure). Hence $\Lambda \overrightarrow{\mathcal{M}_3}(z,w)=\Lambda \mathcal{J}a(z,w)$ and $\Lambda \overrightarrow{\mathcal{M}_3}'(z,w)=\Lambda \mathcal{J}a'(z,w)$. This concludes the proof, according to the estimates for $\Lambda \mathcal{J}a(z,w)$ and $\Lambda \mathcal{J}a'(z,w)$ given in Lemma~\ref{comp:Ia}. (A proof following the same lines as in Lemma~\ref{lem:comp_irr} would also be possible.) \end{proof} \subsubsection{Complexity of the Boltzmann samplers for 3-connected planar graphs} \begin{lemma}[rooted 3-connected planar graphs]\label{lem:comp_samp_Gt} Let $(z_0,w_0)$ be a singular point of $\mathcal{G}t$. Then the expected complexities of the Boltzmann samplers for $\overrightarrow{\mathcal{G}_3}$, $\overrightarrow{\mathcal{G}_3}'$ and $\underline{\overrightarrow{\mathcal{G}_3}}$ satisfy respectively, as $(z,w)\to (z_0,w_0)$: \begin{eqnarray*} \Lambda \overrightarrow{\mathcal{G}_3}(z,w)&=&O\ (1),\\ \Lambda \overrightarrow{\mathcal{G}_3}'(z,w)&=&O\left((z_0-z)^{-1/2}\right),\\ \Lambda \underline{\overrightarrow{\mathcal{G}_3}}(z,w)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} The sampler $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$ ($\Gamma \overrightarrow{\mathcal{G}_3}'(z,w)$, resp.) is directly obtained from $\Gamma \overrightarrow{\mathcal{M}_3}(z,w)$ ($\Gamma \overrightarrow{\mathcal{M}_3}'(z,w)$, resp.) by forgetting the embedding. Hence $\Lambda \overrightarrow{\mathcal{G}_3}(z,w)=\Lambda \overrightarrow{\mathcal{M}_3}(z,w)$ and $\Lambda \overrightarrow{\mathcal{G}_3}'(z,w)=\Lambda \overrightarrow{\mathcal{M}_3}'(z,w)$, which are---by Lemma~\ref{lem:comp_M3}---respectively $O(1)$ and $O((z_0-z)^{-1/2})$ as $(z,w)\to (z_0,w_0)$. Finally, the sampler $\Gamma \underline{\overrightarrow{\mathcal{G}_3}}(z,w)$ is obtained from $\Gamma \overrightarrow{\mathcal{G}_3}'(z,w)$ by applying the procedure \LtoU to the class $\overrightarrow{\mathcal{G}_3}$. By the Euler relation, $\alpha_{U/L}=3$ (given asymptotically by triangulations) and $\alpha_{L/U}=2/3$ (given asymptotically by cubic graphs). Thus, by Corollary~\ref{lem:change_root}, $\Lambda\underline{\overrightarrow{\mathcal{G}_3}}(z,w)\leq 2\cdot\Lambda\overrightarrow{\mathcal{G}_3}'(z,w)$, which ensures that $\Lambda\underline{\overrightarrow{\mathcal{G}_3}}(z,w)$ is $O((z_0-z)^{-1/2})$. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for networks} At first we need to introduce the following notations. Let $\mathcal{C}$ be a class endowed with a Boltzmann sampler $\Gamma\mathcal{C}(x,y)$ and let $\gamma\in\mathcal{C}$. Then $\Lambda\mathcal{C}^{(\gamma)}(x,y)$ denotes the expected complexity of $\Gamma\mathcal{C}(x,y)$ conditioned on the fact that the object generated is $\gamma$. If $\Gamma\mathcal{C}(x,y)$ uses rejection, i.e., repeats building objects and rejecting them until finally an object is accepted, then $\Lambda \mathcal{C}^{\mathrm{rej}}(x,y)$ denotes the expected complexity of $\Gamma\mathcal{C}(x,y)$ without counting the last (successful) attempt. \begin{lemma}[networks]\label{lem:comp_D} Let $(z_0,y_0)$ be a singular point of $\mathcal{D}$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{D}$---described in Section~\ref{sec:2conn3conn}---satisfies $$ \Lambda \mathcal{D}(z,y_0)=O\left(1\right)\ \mathrm{as}\ z\to z_0. $$ \end{lemma} \begin{proof} Trakhtenbrot's decomposition ensures that a network $\gamma\in\mathcal{D}$ is a collection of 3-connected components $\kappa_1,\ldots,\kappa_r$ (in $\mathcal{G}tr$) that are assembled together in a series-parallel backbone $\beta$ (due to the auxiliary classes $\mathcal{S}$ and $\mathcal{P}$). Moreover, if $\gamma$ is produced by the Boltzmann sampler $\Gamma \mathcal{D}(z,y_0)$, then each of the 3-connected components $\kappa_i$ results from a call to $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$, where $w:=D(z,y_0)$. An important point, which is proved in~\cite{BeGa}, is that the composition scheme to go from rooted 3-connected planar graphs to networks is critical. This means that $w_0:=D(z,y_0)$ (change of variable from 3-connected planar graphs to networks) is such that $(z_0,w_0)$ is a singular point of $\mathcal{G}tr$. As the series-parallel backbone is built edge by edge, the cost of generating $\beta$ is simply $||\beta||$ (the number of edges of $\beta$); and the expected cost of generating $\kappa_i$, for $i\in [1..r]$, is $\Lambda \overrightarrow{\mathcal{G}_3}^{(\kappa_i)}(z,w)$. Hence \begin{equation} \Lambda \mathcal{D}^{(\gamma)}(z,y_0)=||\beta||+\sum_{i=1}^r\Lambda\overrightarrow{\mathcal{G}_3}^{(\kappa_i)}(z,w). \end{equation} \begin{claim} There exists a constant $c$ such that, for every $\kappa\in\overrightarrow{\mathcal{G}_3}$, $$ \Lambda\overrightarrow{\mathcal{G}_3}^{(\kappa)}(z,w)\leq c||\kappa||\ \ \ \mathrm{as}\ \ (z,w)\to(z_0,w_0). $$ \end{claim} \noindent{\it Proof of the claim.} The Boltzmann sampler $\Gamma \overrightarrow{\mathcal{G}_3}(z,w)$ is obtained by repeated attempts to build binary trees until the tree is successfully generated (no early interruption) and gives rise to a 3-connected planar graph (admissibility condition). For $\kappa\in\mathcal{K}$, call $c^{(\kappa)}$ the cost of building $\kappa$ (i.e., generate the underlying binary tree and perform the closure). Then $$ \Lambda \overrightarrow{\mathcal{G}_3}^{(\kappa)}(z,w)=\Lambda \overrightarrow{\mathcal{G}_3}^{\mathrm{rej}}(z,w)+c^{(\kappa)}. $$ Notice that $\Lambda \overrightarrow{\mathcal{G}_3}^{\mathrm{rej}}(z,w)\leq \Lambda \overrightarrow{\mathcal{G}_3}(z,w)$, which is $O(1)$ as $(z,w)\to(z_0,w_0)$. Moreover, the closure-mapping has linear time complexity. Hence there exists a constant $c$ independent from $\kappa$ and from $z$ such that $\Lambda \overrightarrow{\mathcal{G}_3}^{(\kappa)}(z,w)\leq c\ \!||\kappa||$ as $z\to z_0$. $\triangle$ The claim ensures that, upon taking $c>1$, every $\gamma\in\mathcal{D}$ satisfies $$ \Lambda \mathcal{D}^{(\gamma)}(z,y_0)\leq c(||\beta||+\sum_{i=1}^r||\kappa_i||)\ \ \ \mathrm{as}\ \ z\to z_0. $$ Since each edge of $\gamma$ is represented at most once in $\beta\cup\kappa_1\cup\ldots\cup\kappa_r$, we also have $\Lambda D^{(\gamma)}(z,y_0)\leq c||\gamma||$. Hence, when $z\to z_0$, $\Lambda \mathcal{D}^{(\gamma)}(z,y_0)\leq 3c\cdot(|\gamma|+1)$ (by the Euler relation), which yields $$ \Lambda \mathcal{D}(z,y_0)\leq 3c\cdot |\mathcal{Z}L\star\mathcal{D}|_{(z,y_0)}. $$ As the class $\mathcal{D}$ is $3/2$-singular (clearly, so is $\mathcal{Z}L\star\mathcal{D}$), the expected size $|\mathcal{Z}L\star\mathcal{D}|_{(z,y_0)}$ is $O(1)$ when $z\to z_0$. Hence $\Lambda \mathcal{D}(z,y_0)$ is $O(1)$. \end{proof} \begin{lemma}[derived networks] Let $(z_0,y_0)$ be a singular point of $\mathcal{D}$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{D}'$---described in Section~\ref{sec:sampDp}---satisfies $$ \Lambda \mathcal{D}'(z,y_0)=O\left((z_0-z)^{-1/2}\right)\ \mathrm{as}\ z\to z_0. $$ \end{lemma} \begin{proof} Let us fix $z\in(0,z_0)$. Define $X:=(\Lambda \mathcal{D}'(z,y_0),\Lambda \mathcal{S}'(z,y_0),\Lambda \mathcal{P}'(z,y_0),\Lambda \mathcal{H}'(z,y_0))$. Our strategy here is to use the computation rules (Figure~\ref{fig:comp_rules}) to obtain a recursive equation specifying the vector $X$. By Remark~\ref{rk:finite}, we have to check that the components of $X$ are finite. \begin{claim}\label{claim:Dp_finite} For $z\in(0,z_0)$, the quantities $\Lambda \mathcal{D}'(z,y_0)$, $\Lambda \mathcal{S}'(z,y_0)$, $\Lambda \mathcal{P}'(z,y_0)$, and $\Lambda \mathcal{H}'(z,y_0)$ are finite. \end{claim} \noindent\emph{Proof of the claim.} Consider $\Lambda \mathcal{D}'(z,y_0)$ (the verification is similar for $\Lambda \mathcal{S}'(z,y_0)$, $\Lambda \mathcal{P}'(z,y_0)$, and $\Lambda \mathcal{H}'(z,y_0)$). Let $\gamma\in\mathcal{D}'$, with $\beta$ the series-parallel backbone and $\kappa_1,\ldots,\kappa_r$ the 3-connected components of $\gamma$. Notice that each $\kappa_i$ is drawn either by $\Gamma\overrightarrow{\mathcal{G}_3}(z,w)$ or $\Gamma\underline{\overrightarrow{\mathcal{G}_3}}(z,w)$ or $\Gamma\overrightarrow{\mathcal{G}_3}'(z,w)$, where $w=D(z,y_0)$. Hence the expected cost of generating $\kappa_i$ is bounded by $M+c||\kappa_i||$, where $M:=\mathrm{Max}(\Lambda\overrightarrow{\mathcal{G}_3}(z,w),\Lambda\underline{\overrightarrow{\mathcal{G}_3}}(z,w),\Lambda\overrightarrow{\mathcal{G}_3}'(z,w))$ and $c||\kappa_i||$ represents the cost of building $\kappa_i$ using the closure-mapping. As a consequence, $$ \Lambda\mathcal{D}'^{(\gamma)}(z,y_0)\leq ||\beta||+\sum_{i=1}^r M+c||\kappa_i||\leq C||\gamma||,\ \mathrm{with}\ C:=M+c+1. $$ Hence $$ \Lambda\mathcal{D}'(z,y_0)\leq \frac{C}{D'(z,y_0)}\sum_{\gamma\in\mathcal{D}'}||\gamma||\frac{z^{|\gamma|}}{|\gamma|!}y_0^{||\gamma||}, $$ which is $O(1)$ since it converges to the constant $Cy_0\partial_yD'(z,y_0)/D'(z,y_0)$. $\triangle$ Using the computation rules given in Figure~(\ref{fig:comp_rules}), the decomposition grammar~(N') of derived networks---as given in Section~\ref{sec:sampDp}---is translated to a linear system $$ X=AX+L, $$ where $A$ is a $4\times 4$-matrix and $L$ is a 4-vector. Precisely, the components of $A$ are rational or exponential expressions in terms of series of networks and their derivatives: all these quantities converge as $z\to z_0$ because all the classes of networks are $3/2$-singular. Hence $A$ converges to a matrix $A_0$ as $z\to z_0$. In addition, $A$ is a substochastic matrix, i.e., a matrix with nonnegative coefficients and with sum at most 1 in each row. Indeed, the entries in each of the 4 rows of $A$ correspond to probabilities of a Bernoulli switch when calling $\Gamma D'(z,y)$, $\Gamma S'(z,y)$, $\Gamma P'(z,y)$, and $\Gamma H'(z,y)$, respectively. Hence, the limit matrix $A_0$ is also substochastic. It is easily checked that $A_0$ is indeed strictly substochastic, i.e., at least one row has sum $<1$ (here, the first and third row add up to 1, whereas the second and fourth row add up to $<1$). In addition, $A_0$ is irreducible, i.e., the dependency graph induced by the nonzero coefficients of $A_0$ is strongly connected. A well known result of Markov chain theory ensures that $(I-A_0)$ is invertible~\cite{Ke}. Hence, $(I-A)$ is invertible for $z$ close to $z_0$, and $(I-A)^{-1}$ converges to the matrix $(I-A_0)^{-1}$. Moreover, the components of $L$ are of the form $$L=\Big(a,b,c,d\cdot\Lambda \overrightarrow{\mathcal{G}t}'(z,w)+e\cdot\Lambda \underline{\overrightarrow{\mathcal{G}_3}}(z,w)\Big),$$ where $w=D(z,y_0)$ and $\{a,b,c,d,e\}$ are expressions involving the series of networks, their derivatives, and the quantities $\{\Lambda D,\Lambda S, \Lambda P,\Lambda H\}$, which have already been shown to be bounded as $z\to z_0$. As a consequence, $a,b,c,d,e$ are $O(1)$ as $z\to z_0$. Moreover, it has been shown in~\cite{BeGa} that the value $w_0:=D(z_0,y_0)$ is such that $(z_0,w_0)$ is singular for $\mathcal{G}_3$, and $w_0-w\sim \lambda\cdot(z_0-z)$, with $\lambda:=D'(z_0,y_0)$. By Lemma~\ref{lem:comp_samp_Gt}, $\Lambda\overrightarrow{\mathcal{G}_3}'(z,w)$ and $\Lambda\underline{\overrightarrow{\mathcal{G}_3}}(z,w)$ are $O((z_0-z)^{-1/2})$ as $z\to z_0$; hence these quantities are also $O((z_0-z)^{-1/2})$. We conclude that the components of $L$ are $O((z_0-z)^{-1/2})$, as well as the components of $X=(I-A)^{-1}L$. In particular, $\Lambda\mathcal{D}'(z,y_0)$ (the first component of $X$) is $O((z_0-z)^{-1/2})$. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for 2-connected planar graphs} \begin{lemma}[rooted 2-connected planar graphs]\label{lem:comp_vecG2} Let $(z_0,y_0)$ be a singular point of $\mathcal{G}_2$. Then the expected complexities of the Boltzmann samplers for $\overrightarrow{\mathcal{G}_2}$ and $\overrightarrow{\mathcal{G}_2}'$ satisfy respectively, as $z\to z_0$: \begin{eqnarray*} \Lambda \overrightarrow{\mathcal{G}_2}(z,y_0)&=&O\ (1),\\ \Lambda \overrightarrow{\mathcal{G}_2}'(z,y_0)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} Recall that the Boltzmann sampler $\Gamma \overrightarrow{\mathcal{G}_2}(z,y_0)$ is directly obtained from $\Gamma \mathcal{D}(z,y_0)$, more precisely from $\Gamma (1+\mathcal{D})(z,y_0)$. According to Lemma~\ref{lem:comp_D}, $\Lambda \mathcal{D}(z,y_0)$ is $O(1)$ as $z\to z_0$, hence $\Lambda \overrightarrow{\mathcal{G}_2}(z,y_0)$ is also $O(1)$. Similarly $\Gamma\overrightarrow{\mathcal{G}_2}'(z,y_0)$ is directly obtained from $\Gamma \mathcal{D}'(z,y_0)$, hence $\Lambda\overrightarrow{\mathcal{G}_2}'(z,y_0)=\Lambda\mathcal{D}'(z,y_0)$, which is $O((z_0-z)^{-1/2})$ as $z\to z_0$. \end{proof} \begin{lemma}[U-derived 2-connected planar graphs]\label{lem:comp_UG2} Let $(z_0,y_0)$ be a singular point of $\mathcal{G}_2$. Then, the expected complexities of the Boltzmann samplers for $\underline{\mathcal{G}_2}$ and $\underline{\mathcal{G}_2}'$---described in Section~\ref{sec:sampDp}---satisfy, as $z\to z_0$: \begin{eqnarray*} \Lambda \underline{\mathcal{G}_2}(z,y_0)&=&O\ (1),\\ \Lambda \underline{\mathcal{G}_2}'(z,y_0)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} The Boltzmann sampler for $\underline{\mathcal{G}_2}$ is directly obtained from the one for $\overrightarrow{\mathcal{G}_2}$, according to the identity $2\star\underline{\mathcal{G}_2}=\mathcal{Z}_L\ \!\!\!^2\star\overrightarrow{\mathcal{G}_2}$. Hence $\Lambda\underline{\mathcal{G}_2}(z,y_0)=\Lambda\overrightarrow{\mathcal{G}_2}(z,y_0)$, which is $O(1)$ as $z\to z_0$, according to Lemma~\ref{lem:comp_vecG2}. Similarly, the Boltzmann sampler for $\underline{\mathcal{G}_2}'$ is directly obtained from the ones for the classes $\overrightarrow{\mathcal{G}_2}$ and $\overrightarrow{\mathcal{G}_2}'$, according to the identity $2\star\underline{\mathcal{G}_2}'=\mathcal{Z}_L\ \!\!\!^2\star\overrightarrow{\mathcal{G}_2}'+2\star\mathcal{Z}_L\star\overrightarrow{\mathcal{G}_2}$. Hence $\Lambda\underline{\mathcal{G}_2}(z,y_0)\leq 1+\Lambda\overrightarrow{\mathcal{G}_2}'(z,y_0)+\Lambda\overrightarrow{\mathcal{G}_2}(z,y_0)$. When $z\to z_0$, $\Lambda\overrightarrow{\mathcal{G}_2}(z,y_0)$ is $O(1)$ and $\Lambda\overrightarrow{\mathcal{G}_2}'(z,y_0)$ is $O((z_0-z)^{-1/2})$ according to Lemma~\ref{lem:comp_vecG2}. Hence, $\Lambda\underline{\mathcal{G}_2}'(z,y_0)$ is $O((z_0-z)^{-1/2})$. \end{proof} \begin{lemma}[bi-derived 2-connected planar graphs]\label{lem:comp_LG2} Let $(z_0,y_0)$ be a singular point of $\mathcal{G}_2$. Then, the expected complexities of the Boltzmann samplers for $\mathcal{G}bp$ and $\mathcal{G}bp'$---described in Section~\ref{sec:sampDp}---satisfy, as $z\to z_0$: \begin{eqnarray*} \Lambda \mathcal{G}bp(z,y_0)&=&O\ (1),\\ \Lambda \mathcal{G}bp'(z,y_0)&=&O\left((z_0-z)^{-1/2}\right). \end{eqnarray*} \end{lemma} \begin{proof} Recall that the Boltzmann sampler $\Gamma\mathcal{G}bp(z,y_0)$ is obtained from $\Gamma\underline{\mathcal{G}_2}(z,y_0)$ by applying the procedure \UtoL to the class $\mathcal{G}_2$. In addition, according to the Euler relation, any simple connected planar graph $\gamma$ (with $|\gamma|$ the number of vertices and $||\gamma||$ the number of edges) satisfies $|\gamma|\leq ||\gamma||+1$ (trees) and $||\gamma||\leq 3|\gamma|-6$ (triangulations). It is then easily checked that, for the class $\mathcal{G}_2$, $\alpha_{U/L}=3$ (attained asymptotically by triangulations) and $\alpha_{L/U}=2$ (attained by the link-graph, which has 2 vertices and 1 edge). Hence, by Corollary~\ref{lem:change_root}, $\Lambda\mathcal{G}bp(z,y_0)\leq 6\ \!\Lambda\underline{\mathcal{G}_2}(z,y_0)$. Thus, by Lemma~\ref{lem:comp_UG2}, $\Lambda\mathcal{G}bp(z,y_0)$ is $O(1)$ as $z\to z_0$. The proof for $\Lambda \mathcal{G}bp'(z,y_0)$ is similar, except that the procedure \UtoL is now applied to the derived class $\mathcal{G}bp$, meaning that the L-size is now the number of vertices minus 1. We still have $\alpha_{U/L}=3$ (attained asymptotically by triangulations), and now $\alpha_{L/U}=1$ (attained by the link-graph). Corollary~\ref{lem:change_root} yields $\Lambda\mathcal{G}bp'(z,y_0)\leq 3\ \!\Lambda\underline{\mathcal{G}_2}'(z,y_0)$. Hence, from Lemma~\ref{lem:comp_UG2}, $\Lambda\mathcal{G}bp'(z,y_0)$ is $O((z_0-z)^{-1/2})$ as $z\to z_0$. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for connected planar graphs} \begin{lemma}[derived connected planar graphs]\label{lem:comp_G1p} Let $(x_0,y_0)$ be a singular point of $\mathcal{G}_1$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{G}cp$---described in Section~\ref{sec:conn2conn}---satisfies \begin{eqnarray*} \Lambda \mathcal{G}cp(x,y_0)&=&O\ \!(1)\ \ \ \mathrm{as}\ x\to x_0.\\ \end{eqnarray*} \end{lemma} \begin{proof} Recall that the Boltzmann sampler for $\mathcal{G}cp$ results from the identity (block decomposition, Equation~\eqref{eq:2conn}) $$ \mathcal{G}cp=\Set\left(\mathcal{G}bp\circ_L(\mathcal{Z}_L\star\mathcal{G}cp) \right). $$ We want to use the computation rules (Figure~\ref{fig:comp_rules}) to obtain a recursive equation for $\Lambda\mathcal{G}cp(x,y_0)$. Again, according to Remark~\ref{rk:finite}, we have to check that $\Lambda\mathcal{G}cp(x,y_0)$ is finite. \begin{claim}\label{claim:Gcp_finite} For $0<x<x_0$, the quantity $\Lambda\mathcal{G}cp(x,y_0)$ is finite. \end{claim} \noindent\textit{Proof of the claim.} Let $\gamma\in\mathcal{G}cp$, with $\kappa_1,\ldots,\kappa_r$ the 2-connected blocks of $\gamma$. We have $$ \Lambda\mathcal{G}cp^{(\gamma)}(x,y_0)=2||\gamma||+\sum_{i=1}^r\Lambda\mathcal{G}bp^{(\kappa_i)}(z,y_0),\ \ \mathrm{where}\ z=xG_1\ \!\!\!'(x,y_0). $$ (The first term stands for the cost of choosing the degrees using a generator for a Poisson law; note that the sum of the degrees over all the vertices of $\gamma$ is $2||\gamma||$.) It is easily shown that there exists a constant $M$ such that $\Lambda\mathcal{G}bp^{(\kappa)}(z,y_0)\leq M||\kappa||$ for any $\kappa\in\mathcal{G}bp$ (using the fact that such a bound holds for $\Lambda\mathcal{D}^{(\kappa)}(z,y_0)$ and that $\Gamma\mathcal{G}bp(z,y_0)$ is obtained from $\Gamma\mathcal{D}(z,y_0)$ via a simple rejection step). Therefore $\Lambda\mathcal{G}cp^{(\gamma)}(x,y_0)\leq C||\gamma||$, with $C=2+M$. We conclude that $$ \Lambda\mathcal{G}cp(x,y_0)\leq \frac{C}{G_1\ \!\!\!'(x,y_0)}\sum_{\gamma\in\mathcal{G}cp}||\gamma||\frac{x^{|\gamma|}}{|\gamma|!}y_0^{||\gamma||}, $$ which is $O(1)$ since it converges to the constant $Cy_0\partial_yG_1\ \!\!\!'(x,y_0)/G_1\ \!\!\!'(x,y_0)$. $\triangle$ The computation rules (Figure~\ref{fig:comp_rules}) yield $$ \Lambda\mathcal{G}cp(x,y_0)=G_2\ \!\!\!'(z,y_0)\cdot\left(\Lambda\mathcal{G}bp(z,y_0)+|\mathcal{G}bp|_{(z,y_0)}\cdot\Lambda\mathcal{G}cp(x,y_0) \right)\ \ \mathrm{where}\ z=xG_1\ \!\!\!'(x,y_0), $$ so that $$ \Lambda\mathcal{G}cp(x,y_0)=\frac{G_2\ \!\!\!'(z,y_0)\Lambda\mathcal{G}bp(z,y_0)}{1-G_2\ \!\!\!'(z,y_0)\cdot|\mathcal{G}bp|_{(z,y_0)}}. $$ Similarly as in the transition from 3-connected planar graphs to networks, we use the important point, proved in~\cite{gimeneznoy}, that the composition scheme to go from 2-connected to connected planar graphs is critical. This means that, when $x\to x_0$, the quantity $z=xG_1\ \!\!\!'(x,y_0)$ (which is the change of variable from 2-connected to connected) converges to a positive constant $z_0$ such that $(z_0,y_0)$ is a singular point of $\mathcal{G}_2$. Hence, according to Lemma~\ref{lem:comp_LG2}, $\Lambda\mathcal{G}bp(z,y_0)$ is $O(1)$ as $x\to x_0$. Moreover, as the class $\mathcal{G}bp$ is $3/2$-singular, the series $G_2\ \!\!\!'(z,y_0)$ and the expected size $|\mathcal{G}bp|_{(z,y_0)}$ converge to positive constants that are denoted respectively $G_2\ \!\!\!'(z_0,y_0)$ and $|\mathcal{G}bp|_{(z_0,y_0)}$. We have shown that the numerator of $\Lambda\mathcal{G}cp(x,y_0)$ is $O(1)$ and that the denominator converges as $x\to x_0$. To prove that $\Lambda\mathcal{G}cp(x,y_0)$ is $O(1)$, it remains to check that the denominator does not converge to $0$, i.e., to prove that $G_2\ \!\!\!'(z_0,y_0)\cdot |\mathcal{G}bp|_{(z_0,y_0)}\neq 1$. To show this, we use the simple trick that the expected complexity and expected size of Boltzmann samplers satisfy similar computation rules. Indeed, from Equation~\eqref{eq:2conn}, it is easy to derive the equation $$ |\mathcal{G}cp|_{(x,y_0)}=G_2\ \!\!\!'(z,y_0)\cdot|\mathcal{G}bp|_{(z,y_0)}\cdot\left(|\mathcal{G}cp|_{(x,y_0)}+1\right)\ \ \mathrm{where}\ z=xG_1\ \!\!\!'(x,y_0), $$ either using the formula $|\mathcal{C}|_{(x,y)}=\partial_x C(x,y)/C(x,y)$, or simply by interpreting what happens during a call to $\Gamma\mathcal{G}cp(x,y)$ (an average of $G_2\ \!\!\!'(z,y_0)$ blocks are attached at the root-vertex, each block has average size $|\mathcal{G}bp|_{(z,y_0)}$ and carries a connected component of average size $(|\mathcal{G}cp|_{(x,y_0)}+1)$ at each non-root vertex). Hence $$ |\mathcal{G}cp|_{(x,y_0)}=\frac{G_2\ \!\!\!'(z,y_0)\cdot |\mathcal{G}bp|_{(z,y_0)}}{1-G_2\ \!\!\!'(z,y_0)\cdot |\mathcal{G}bp|_{(z,y_0)}}. $$ Notice that this is the same expression as $\Lambda\mathcal{G}cp(x,y_0)$, except for $|\mathcal{G}bp|_{(z,y_0)}$ replacing $\Lambda\mathcal{G}bp(z,y_0)$ in the numerator. The important point is that we already know that $|\mathcal{G}cp|_{(x,y_0)}$ converges as $x\to x_0$, since the class $\mathcal{G}cp$ is $3/2$-singular (see Lemma~\ref{lem:sing_planar}). Hence $G_2\ \!\!\!'(z_0,y_0)\cdot |\mathcal{G}bp|_{(z_0,y_0)}$ has to be different from $1$ (more precisely, it is strictly less than $1$), which concludes the proof. \end{proof} \begin{lemma}[bi-derived connected planar graphs]\label{lem:comp_G1pp} Let $(x_0,y_0)$ be a singular point of $\mathcal{G}_1$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{G}cp'$---described in Section~\ref{sec:sampCp}---satisfies \begin{eqnarray*} \Lambda \mathcal{G}cp'(x,y_0)&=&O\ \left((x_0-x)^{-1/2}\right)\ \ \mathrm{as}\ x\to x_0.\\ \end{eqnarray*} \end{lemma} \begin{proof} The proof for $\Lambda \mathcal{G}cp'(x,y_0)$ is easier than for $\Lambda \mathcal{G}cp(x,y_0)$. Recall that $\Gamma \mathcal{G}cp'(x,y_0)$ is obtained from the identity $$ \mathcal{G}cp'=\left(\mathcal{G}cp+\mathcal{Z}_L\star\mathcal{G}cp' \right)\star\mathcal{G}bp'\circ_L(\mathcal{Z}_L\star\mathcal{G}cp)\star\mathcal{G}cp. $$ At first one easily checks (using similar arguments as in Claim~\ref{claim:Gcp_finite}) that $\Lambda \mathcal{G}cp'(x,y_0)$ is finite. Using the computation rules given in Figure~\ref{fig:comp_rules}, we obtain, writing as usual $z=xG_1\ \!\!\!'(x,y_0)$, \begin{eqnarray*} \Lambda\mathcal{G}cp'(x,y_0)&\!\!\!=\!\!\!&1+\frac{G_1\ \!\!\!'(x,y_0)}{G_1\ \!\!\!'(x,y_0)\!+\!xG_1\ \!\!\!''(x,y_0)}\Lambda\mathcal{G}cp(x,y_0)+\frac{xG_1\ \!\!\!''(x,y_0)}{G_1\ \!\!\!'(x,y_0)\!+\!xG_1\ \!\!\!''(x,y_0)}\Lambda\mathcal{G}cp'(x,y_0)\\ &&+ \Lambda\mathcal{G}bp'(z,y_0)+|\mathcal{G}bp'|_{(z,y_0)}\cdot\Lambda\mathcal{G}cp(x,y_0)+\Lambda\mathcal{G}cp(x,y_0). \end{eqnarray*} Hence $$\Lambda\mathcal{G}cp'(x,y_0)=a(x,y_0)\cdot(1+b(x,y_0)\cdot \Lambda\mathcal{G}cp(x,y_0)+\Lambda\mathcal{G}bp'(z,y_0)+|\mathcal{G}bp'|_{(z,y_0)}\cdot\Lambda\mathcal{G}cp(x,y_0)),$$ where $$a(x,y_0)=\frac{G_1\ \!\!\!'(x,y_0)+xG_1\ \!\!\!''(x,y_0)}{G_1\ \!\!\!'(x,y_0)},\ \ \ b(x,y_0)=\frac{2G_1\ \!\!\!'(x,y_0)+xG_1\ \!\!\!''(x,y_0)}{G_1\ \!\!\!'(x,y_0)+xG_1\ \!\!\!''(x,y_0)}.$$ As the classes $\mathcal{G}cp$ and $\mathcal{G}cp'$ are respectively $3/2$-singular and $1/2$-singular, the series $a(x,y_0)$ and $b(x,y_0)$ converge when $x\to x_0$. As $\mathcal{G}bp'$ is $1/2$-singular, $|\mathcal{G}bp'|_{(z,y_0)}$ is $O((z_0-z)^{-1/2})$ when $z\to z_0$. Moreover, according to Lemma~\ref{lem:comp_LG2}, $\Lambda\mathcal{G}bp'(z,y_0)$ is $O((z_0-z)^{-1/2})$. Next we use the fact that the change of variable from 2-connected to connected is critical. Precisely, as proved in~\cite{BeGa}, when $x\to x_0$ and when $z$ and $x$ are related by $z=xG_1\ \!\!\!'(x,y_0)$, we have $z_0-z\sim \lambda\cdot (x_0-x)$, with $\lambda:=\lim \mathrm{d}z/\mathrm{d}x=x_0G_1\ \!\!\!''(x_0,y_0)+G_1\ \!\!\!'(x_0,y_0)$. Hence, $|\mathcal{G}bp'|_{(z,y_0))}$ and $\Lambda\mathcal{G}bp'(z,y_0)$ are $O((x_0-x)^{-1/2})$. In addition, we have proved in Lemma~\ref{lem:comp_G1p} that $\Lambda\mathcal{G}cp(x,y_0)$ is $O(1)$. We conclude that $\Lambda\mathcal{G}cp'(x,y_0)$ is $O((x_0-x)^{-1/2})$. \end{proof} \begin{lemma}[connected planar graphs]\label{lem:comp_G1} Let $(x_0,y_0)$ be a singular point of $\mathcal{G}_1$. Then, the expected complexity of the Boltzmann sampler for $\mathcal{G}_1$---described in Section~\ref{sec:conn2conn}---satisfies $$ \Lambda \mathcal{G}_1(x,y_0)=O\ (1)\ \ \mathrm{as}\ x\to x_0. $$ \end{lemma} \begin{proof} As described in Section~\ref{sec:conn2conn}, the sampler $\Gamma\mathcal{G}_1(x,y)$ computes $\gamma\leftarrow\Gamma\mathcal{G}cp(x,y)$ and keeps $\gamma$ with probability $1/(|\gamma|+1)$. Hence the probability of success at each attempt is $$ p_{\mathrm{acc}}=\frac{1}{G_1\ \!\!\!'(x,y_0)}\sum_{\gamma\in\mathcal{G}cp}\frac{1}{|\gamma|+1}\frac{x^{|\gamma|}}{|\gamma|!}y_0^{||\gamma||}=\frac{1}{G_1\ \!\!\!'(x,y_0)}\sum_{\gamma\in\mathcal{G}cp}\frac{x^{|\gamma|}}{(|\gamma|+1)!}y_0^{||\gamma||}. $$ Recall that for any class $\mathcal{C}$, $\mathcal{C}'_{n,m}$ identifies to $\mathcal{C}_{n+1,m}$. Hence $$ p_{\mathrm{acc}}=\frac{1}{G_1\ \!\!\!'(x,y_0)}\sum_{\gamma\in\mathcal{G}_1}\frac{x^{|\gamma|-1}}{|\gamma|!}y_0^{||\gamma||}=\frac{G_1(x,y_0)}{xG_1\ \!\!\!'(x,y_0)}. $$ In addition, by Lemma~\ref{lem:target}, $\Lambda\mathcal{G}_1(x,y_0)=\Lambda\mathcal{G}cp(x,y_0)/p_{\mathrm{acc}}$. As the classes $\mathcal{G}_1$ and $\mathcal{G}cp$ are respectively $5/2$-singular and $3/2$-singular, both series $G_1(x,y_0)$ and $G_1\ \!\!\!'(x,y_0)$ converge to positive constants when $x\to x_0$. Hence $p_{\mathrm{acc}}$ converges to a positive constant as well. In addition, $\Lambda\mathcal{G}cp(x,y_0)$ is $O(1)$ by Lemma~\ref{lem:comp_G1p}. Hence $\Lambda\mathcal{G}_1(x,y_0)$ is also $O(1)$. \end{proof} \subsubsection{Complexity of the Boltzmann samplers for planar graphs}\label{sec:comp_planar} \begin{lemma}[planar graphs] Let $(x_0,y_0)$ be a singular point of $\mathcal{G}$. Then, the expected complexities of the Boltzmann samplers for $\mathcal{G}$, $\mathcal{G}'$ and $\mathcal{G}''$---described in Section~\ref{sec:planconn} and~\ref{sec:sampGp}---satisfy, as $x\to x_0$: \begin{eqnarray*} \Lambda \mathcal{G}(x,y_0)&=&O\ (1),\\ \Lambda \mathcal{G}'(x,y_0)&=&O\ (1),\\ \Lambda \mathcal{G}''(x,y_0)&=&O\ ((x_0-x)^{-1/2}). \end{eqnarray*} \end{lemma} \begin{proof} Recall that $\Gamma\mathcal{G}(x,y)$ is obtained from $\Gamma\mathcal{G}_1(x,y)$ using the identity $$ \mathcal{G}=\Set(\mathcal{G}_1), $$ hence $\Lambda \mathcal{G}(x,y_0)=G_1(x,y_0)\cdot\Lambda\mathcal{G}_1(x,y_0)$. When $x\to x_0$, $G_1(x,y_0)$ converges (because $\mathcal{G}_1$ is $5/2$-singular) and $\Lambda\mathcal{G}_1(x,y_0)$ is $O(1)$ (by Lemma~\ref{lem:comp_G1}). Hence $\Lambda \mathcal{G}(x,y_0)$ is $O(1)$. Then, $\Gamma\mathcal{G}'(x,y)$ is obtained from $\Gamma\mathcal{G}cp(x,y)$ and $\Gamma\mathcal{G}(x,y)$ using the identity $$ \mathcal{G}'=\mathcal{G}cp\star\mathcal{G}. $$ Hence $\Lambda\mathcal{G}'(x,y_0)=\Lambda\mathcal{G}cp(x,y_0)+\Lambda\mathcal{G}(x,y_0)$. When $x\to x_0$, $\Lambda\mathcal{G}cp(x,y_0)$ is $O(1)$ (by Lemma~\ref{lem:comp_G1p}) and $\Lambda\mathcal{G}(x,y_0)$ is $O(1)$, as proved above. Hence $\Lambda \mathcal{G}'(x,y_0)$ is $O(1)$. Finally, $\Gamma\mathcal{G}''(x,y)$ is obtained from $\Gamma\mathcal{G}cp'(x,y)$, $\Gamma\mathcal{G}cp(x,y)$, $\Gamma\mathcal{G}'(x,y)$, and $\Gamma\mathcal{G}(x,y)$ using the identity $$ \mathcal{G}''=\mathcal{G}cp'\star\mathcal{G}+\mathcal{G}cp\star\mathcal{G}'. $$ Hence $$ \Lambda\mathcal{G}''(x,y_0)=1+\frac{a}{a+b}\left(\Lambda\mathcal{G}cp'(x,y_0)+\Lambda\mathcal{G}(x,y_0)\right)+\frac{b}{a+b}\left(\Lambda\mathcal{G}cp(x,y_0)+\Lambda\mathcal{G}'(x,y_0)\right), $$ where $a=G_1\ \!\!\!''(x,y_0)G(x,y_0)$ and $b=G_1\ \!\!\!'(x,y_0)G'(x,y_0)$. Thus $$ \Lambda\mathcal{G}''(x,y_0)\leq 1+\Lambda\mathcal{G}cp'(x,y_0)+\Lambda\mathcal{G}(x,y_0)+\Lambda\mathcal{G}cp(x,y_0)+\Lambda\mathcal{G}'(x,y_0). $$ When $x\to x_0$, $\Lambda \mathcal{G}cp'(x,y_0)$ is $O((x_0-x)^{-1/2})$ (by Lemma~\ref{lem:comp_G1pp}), $\Lambda \mathcal{G}cp(x,y_0)$ is $O(1)$ (by Lemma~\ref{lem:comp_G1p}), and $\Lambda \mathcal{G}'(x,y_0)$ and $\Lambda \mathcal{G}(x,y_0)$ are $O(1)$, as proved above. Hence $\Lambda \mathcal{G}''(x,y_0)$ is $O((x_0-x)^{-1/2})$, which concludes the proof. \end{proof} This concludes the proof of the expected complexities of our random samplers. (Recall that, thanks to Claim~\ref{claim:eq}, the proof has been reduced to proving the asymptotic estimate $\Lambda\mathcal{G}''(x,y_0)=O((x_0-x)^{-1/2})$.) \noindent\emph{Acknowledgements.} I am very grateful to Philippe Flajolet for his encouragements and for several corrections and suggestions that led to a significant improvement of the presentation of the results. I greatly thank the anonymous referee for an extremely detailed and insightful report, which led to a major revision of an earlier version of the article. I have also enjoyed fruitful discussions with Gilles Schaeffer, Omer Gim\'enez and Marc Noy, in particular regarding the implementation of the algorithm. \end{document}
\begin{document} \thispagestyle{empty} {\noindent \Large\bf\sc distribution of some functionals for a l\'{e}vy process with matrix-exponential jumps of the same sign }\footnotemark[1]\footnotetext{This is an electronic reprint of the original article published in Theory of Stochastic Processes, Vol. 19(35), No. 1 (2014), 26-36. This reprint differs from the original in pagination and typographic detail.} \begin{center} Ie.~Karnaukh \end{center} \footnote{Department of Statistics and Probability Theory, Dnipropetrovsk Oles Honchar National University,\\ 72, Gagarina pr., Dnipropetrovsk 49010, Ukraine. \href{mailto:[email protected]}{[email protected]} } \footnotemark[2]\footnotetext{The author would like to thank Prof. Dmytro Husak for the useful comments, remarks and proofreading several draft versions of the paper.} \begin{center} \begin{quotation} \noindent {\small This paper provides a framework for investigations in fluctuation theory for L\'evy processes with matrix-exponential jumps. We present a matrix form of the components of the infinitely divisible factorization. Using this representation we establish generalizations of some results known for compound Poisson processes with exponential jumps in one direction and generally distributed jumps in the other direction.} \end{quotation} \end{center} L\'evy processes have many applications in practice as a base model in risk theory, queuing and financial mathematics. Many problems can be connected to the fluctuation theory, in which the factorization method plays a crucial role (see, for instance, \cite{Kyprianou2006}). The most studied class of L\'evy processes is the class of semi-continuous processes (with L\'evy measure supported on a half-axis). One of the factorization components for the semi-continuous processes is entirely defined by the real root of the cumulant equation (or more specifically, by the right-inverse of the cumulant function). This result can be generalized for L\'evy processes with matrix-exponential upward (or downward) jumps (see \cite{Bratiychuk1990, Lewis2008} and references therein). For such processes one of the factorization components is a rational function with finite number of poles, which are the (possibly complex) roots of the cumulant equation. Using the properties of matrix-exponential distribution we can invert the component to find the distribution of corresponding killed extremum. The convolution of the distribution of this extremum and the integral transform of the L\'evy measure defines the moment generating function of other extremum. We use the relations for the factorization components to obtain in closed form the moment generating function of occupation time of a half-line (for semi-continuous case we refer to~\cite{Landriault2011} and for some other cases to \cite{Husak2011engl}). Further generalization could be done for meromorphic L\'evy processes with the main difference that the factorization components have infinitely many poles (see~\cite{Kuznetsov2012}). \section{Matrix-exponential distribution} The class of matrix-exponential (ME) distributions is the generalization of exponential distribution and it comprises the phase-type distributions. The ME class can be defined as a class of distributions with a rational moment generating function (see \cite{Asmussen2010}). The properties of ME distributions allow us to find in the closed form some generalizations of the results known for L\'evy processes with exponential jumps. A nonnegative random variable has a ME$\left(d\right)$ distribution $\left(d\geq1\right)$, if its cumulative distribution function is as follows \[ F\left(x\right)=\begin{cases} 1+\boldsymbol{\beta}e^{\mathbf{R}x}\mathbf{R}^{-1}\mathbf{t} & x>0;\\ 0 & x\leq0, \end{cases} \] where $\boldsymbol{\beta}$ is a $1\times d$ vector, $\mathbf{R}$ is a non singular $d\times d$ matrix, $\mathbf{t}$ is a $d\times 1$ vector, and each possibly have complex entries. The triple $\left(\boldsymbol{\beta},{\mathbf{R}},{\mathbf{t}}\right)$ is called a representation of the ME distribution. Note that, the same distribution may have several representations. If the cumulative function of a ME distribution can be represented as $F\left(x\right)= 1-\boldsymbol{\alpha}e^{\mathbf{T}x}\mathbf{e}, x>0$, where $\boldsymbol{\alpha}$ is a probability vector and $\mathbf{T}$ is the intensity matrix of a Markov chain, $\mathbf{e}=\left(0,\ldots,0,1\right)^{\top}$, then the ME distribution is called phase-type distribution. For details and general results on matrix-exponential distributions we refer to \cite{Fackrell2003}. For $x>0$ a ME distribution has density $f\left(x\right)= \boldsymbol{\beta}e^{\mathbf{R}x}\mathbf{t}$, which can be rewritten as (see \cite{Bratiychuk1990} and \cite{Lewis2008}): \[ f\left(x\right)=\sum_{i=1}^{m}P_{i}\left(x\right)e^{-r_{i}x}, \] where $P_{i}\left(x\right)$ are polynomials of degree $k_{i}$, $\Re[r_{m}]\geq\ldots\geq\Re[r_{2}]>r_{1}>0$ and $\sum_{i=1}^{m}k_{i}+m=d$. If $p_{0}=1+\boldsymbol{\beta}\mathbf{R}^{-1}\mathbf{t}\neq0$, then the ME distribution has nonzero mass at zero, and the moment generating function has the form \[ \int_{0}^{\infty}e^{rx}dF\left(x\right)=p_{0} -\boldsymbol{\beta}\left(r\mathbf{I}+\mathbf{R}\right)^{-1}\mathbf{t}, \quad\Re[r]=0. \] To find a representation of the distribution with known moment generating function we can follow the approach given in \cite{Asmussen2010}. Denote the vectors $\boldsymbol{\rho}=\left(\rho_{d},\ldots,\rho_{1}\right),$ $\left(\boldsymbol{\rho},1\right)=\left(\rho_{d},\ldots,\rho_{1},1\right)$, $\mathbf{h}_{d}\left(r\right)=\left(1,r,\ldots,r^{d}\right)^{\top}$. If the Laplace transform of $f\left(x\right)$ has the form \begin{equation}\label{eq:1.1} \int_{0}^{\infty}e^{-rx}f\left(x\right)dx=\frac{\beta_{1}r^{d-1} +\beta_{2}r^{d-2}+\ldots+\beta_{d}}{r^{d}+\rho_{1}r^{d-1}+\ldots +\rho_{d-1}r+\rho_{d}}=\frac{\boldsymbol{\beta}\mathbf{h}_{d-1} \left(r\right)}{\left(\boldsymbol{\rho},1\right)\mathbf{h}_{d}\left(r\right)}, \end{equation} then the corresponding density is $f\left(x\right)=\boldsymbol{\beta}e^{\mathbf{R}x}\mathbf{e}$, $x>0$, where $\boldsymbol{\beta}=\left(\beta_{d},\ldots,\beta_{1}\right),$ $\mathbf{e}=\left(0,\ldots,0,1\right)^{\top},$ and $\mathbf{R}=\begin{pmatrix}0 & 1 & \ldots & 0\\ \vdots & \vdots & \ddots & \vdots\\ 0 & 0 & \ldots & 1\\ -\rho_{d} & -\rho_{d-1} & \ldots & -\rho_{1} \end{pmatrix}=\begin{pmatrix}0\quad\mathbf{I}\\ -\boldsymbol{\rho} \end{pmatrix}$. In the case when a ME distribution is defined on negative half-axis, we can follow the similar reasoning. If \begin{equation}\label{eq:1.2} \int_{-\infty}^{0}e^{rx}f\left(x\right)dx= \frac{\boldsymbol{\beta}\mathbf{h}_{d-1}\left(r\right)} {\left(\boldsymbol{\rho},1\right)\mathbf{h}_{d}\left(r\right)}, \end{equation} then $f\left(x\right)=\boldsymbol{\beta}e^{\mathbf{R}x}\mathbf{e}$, $x<0$, where $\boldsymbol{\beta}=\left(\beta_{d},\ldots,\beta_{1}\right),$ $\mathbf{R}=\begin{pmatrix}0\quad-\mathbf{I}\\ \boldsymbol{\rho} \end{pmatrix}$. If a ME distribution has support on the entire real line, then it is called bilateral matrix-exponential distribution (see~\cite{Bladt2012}). \section{Extrema and overshoot} Let us suppose that $X_{t},t\geq0$ is a L\'evy process with cumulant function \[ k\left(r\right)=a'r+\frac{\sigma^{2}}{2}r^{2}+ \int_{-\infty}^{\infty}\left(e^{rx}-1-rxI_{\left\{ |x|\leq1\right\} }\right) \Pi\left(dx\right), \] where $a'$ is a real constant, $\sigma>0$, and $\Pi$ is a non negative measure, defined on $R\backslash\{0\}$: $\int_{R}\min\left\{ x^{2},1\right\} \Pi\left(dx\right)<\infty.$ Throughout we impose the restriction that $\int_{-1}^{1}|x|\Pi\left(dx\right)<\infty,$ then the cumulant function can be represented as follows \begin{equation}\label{eq:2.1} k\left(r\right)=ar+\frac{\sigma^{2}}{2}r^{2}+ \int_{-\infty}^{\infty} \left(e^{rx}-1\right)\Pi\left(dx\right), \end{equation} where $a=a'-\int_{-1}^{1}|x|\Pi\left(dx\right).$ Denote by $\theta_{s}$ an exponential random variable with parameter $s>0$: $\mathsf{P}\left\{ \theta_{s}>t\right\} =e^{-st},t>0,$ independent of process $X_{t},$ and by definition $\theta_{0}=\infty.$ Then $X_{\theta_{s}}$ is called a L\'evy process killed at rate $s$ (see \cite{Bertoin1996}). For the moment generating function of $X_{\theta_{s}}$ \[ \mathsf{E}e^{rX_{\theta_{s}}}=\frac{s}{s-k\left(r\right)} \] the identity of infinitely divisible factorization takes place \[ \mathsf{E}e^{rX_{\theta_{s}}}=\mathsf{E}e^{rX_{\theta_{s}}^{+}}\mathsf{E}e^{rX_{\theta_{s}}^{-}},\Re\left[r\right]=0, \] where $X_{\theta_{s}}^{+}=\sup_{0\leq t\leq\theta_{s}}X_{t}$, $X_{\theta_{s}}^{-}=\inf_{0\leq t\leq\theta_{s}}X_{t}$ are the supremum and infimum of the process, respectively, killed at rate $s$. In general case the closed formulae for factorization components are not known, so we should impose additional restrictions on the parameters of the process. Following~\cite{Lewis2008}, we consider L\'evy processes that have finite intensity negative (or positive) jumps with ME distribution, arbitrary positive (negative) jumps, and possibly drift and gaussian component. That is, we assume that $\Pi\left(dx\right)=\lambda_{-}\sum_{i=1}^{m_{-}}P_{i}^{-}\left(x\right)e^{b_{i}x}dx$, $x<0$ (or correspondingly $\Pi\left(dx\right)=\lambda_{+}\sum_{i=1}^{m_{+}}P_{i}^{+}\left(x\right)e^{-c_{i}x}dx$, $x>0$) where $\lambda_{\pm}=\int_{R^{\pm}}\Pi\left(dx\right)<\infty$, $P_{i}^{\pm}\left(x\right)$ are the polynomials of degree $k_{i}^{\pm}$, $\sum_{i=1}^{m_{\pm}}k_{i}^{\pm}+m_{\pm}=d_{\pm}$, $\Re[b_{m_{-}}]\geq\ldots\geq\Re[b_{2}]>b_{1}>0$ and $\Re[c_{m_{+}}]\geq\ldots\geq\Re[c_{2}]>c_{1}>0$. Also we split two cases (for details, see~\cite{Lewis2008}): \begin{description} \item[$\left(NS\right)_{\pm}$] $\sigma>0$ or $\sigma=0,\pm a>0$, \item[$\left(S\right)_{\pm}$] $\sigma=0,\mp a\geq0$, \end{description} where sign '+' corresponds to the case when positive jumps have ME distribution while sign '--' corresponds to the case when negative jumps have ME distribution. Due to \cite{Bratiychuk1990,Lewis2008}, in any of the cases $\left(NS\right)_{\pm}$ or $\left(S\right)_{\pm}$ the moment generating function $\mathsf{E}e^{rX_{\theta_{s}}^{\pm}}$ is a rational function and $X_{\theta_{s}}^{\pm}$ has a ME distribution. In addition, the cumulant equation \[ k\left(r\right)=s \] has the roots $\left\{ \pm r_{i}^{\pm} \left(s\right)\right\} _{i=1}^{N_{\pm}}$ in half-plane $\pm\Re[r]>0$, where $N_{\pm}=\begin{cases} d_{\pm}+1 & \left(NS\right)_{\pm},\\ d_{\pm} & \left(S\right)_{\pm}, \end{cases}$ $r_{1}^{+}\left(s\right)$ is the unique root on $\left[0,c_{1}\right]$ ($-r_{1}^{-}\left(s\right)$ is the unique root on $\left[-b_{1},0\right]$). These roots entirely define the distribution of corresponding extrema. Write \begin{gather*} \beta_{k}^{-}=\sum_{1\leq i_{1}<\ldots<i_{k}\leq N_{-}}b_{i_{1}} \ldots b_{i_{k}},\beta_{k}^{+}=\sum_{1\leq i_{1}<\ldots<i_{k}\leq N_{+}}c_{i_{1}}\ldots c_{i_{k}},\\\rho_{k}^{\pm}\left(s\right)= \sum_{1\leq i_{1}<\ldots<i_{k}\leq N_{\pm}}r_{i_{1}}^{\pm}\left(s\right) \ldots r_{i_{k}}^{\pm}\left(s\right), \end{gather*} then the distribution of $X_{\theta_{s}}^{\pm}$ can be represented by the parameters: \[ \boldsymbol{\beta}_{\pm}=\left(\beta_{d_{\pm}}^{\pm}, \ldots,\beta_{1}^{\pm}\right),\boldsymbol{\rho}_{\pm}\left(s\right)= \left(\rho_{N_{\pm}}^{\pm}\left(s\right),\ldots, \rho_{1}^{\pm}\left(s\right)\right),\mathbf{\mathbf{R}}_{\pm}\left(s\right) =\begin{pmatrix}0\quad\mathbf{\pm I}\\ \mp\boldsymbol{\rho}_{\pm}\left(s\right) \end{pmatrix}. \] Under additional conditions the moment generating function of $X_{\theta_{s}}^{\mp}$ we can determine in terms of integral transforms of the L\'evy measure: \[ \overline{\Pi}^{+}\left(x\right)=\int_{x}^{\infty}\Pi\left(dx\right),x>0; \overline{\Pi}^{-}\left(x\right)=\int_{-\infty}^{x}\Pi\left(dx\right),x<0; \tilde{\Pi}^{\pm}\left(r\right)=\int_{R^{\pm}}e^{rx} \overline{\Pi}^{\pm}\left(x\right)dx. \] The following statement is essentially based on the results given in \cite{Bratiychuk1990} and \cite{Lewis2008}. \begin{thm}\label{thm:2.1} If for L\'evy process $X_t$ one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ holds, then \begin{equation}\label{eq:2.2} P'_{-}\left(s,x\right)=\frac{\partial}{\partial x}\mathsf{P} \left\{ X_{\theta_{s}}^{-}<x\right\} =\mathbf{q}_{-}\left(s\right) e^{\mathbf{R}_{-}\left(s\right)x}\mathbf{e},x<0, \end{equation} where \begin{equation}\label{eq:2.3} \mathbf{q}_{-}\left(s\right)=\begin{cases} \frac{\rho_{d_{-}+1}^{-}\left(s\right)}{\beta_{d_{-}}^{-}} \left(\boldsymbol{\beta}_{-},1\right) & \left(NS\right)_{-},\\ \frac{\rho_{d_{-}}^{-}\left(s\right)}{\beta_{d_{-}}^{-}} \left(\boldsymbol{\beta}_{-}-\boldsymbol{\rho}_{-}\left(s\right)\right) & \left(S\right)_{-}. \end{cases} \end{equation} Moreover, in case $\left(S\right)_{-}$: $p_{-}\left(s\right)=\mathsf{P}\left\{ X_{\theta_{s}}^{-}=0\right\} \neq0$ and $p_{-}\left(s\right)=\frac{\rho_{d_{-}}^{-}\left(s\right)} {\beta_{d_{-}}^{-}}=\left(\prod_{i=1}^{d_{-}}\frac{r_{i}^{-} \left(s\right)}{b_{i}}\right)$. If additionally $\mathsf{D}X_{1}<\infty, \mathsf{E}X_{\theta_{s}}^{+}<\infty$, the moment generating function of $X_{\theta_{s}}^{+}$ could be represented as \begin{equation}\label{eq:2.4} \mathsf{E}e^{rX_{\theta_{s}}^{+}}=\left(1-\frac{r}{s} \left(A_{*}^{-}\left(s\right)+\mathsf{E} e^{rX_{\theta_{s}}^{-}}\tilde{\Pi}^{+}\left(r\right) -\mathbf{q}_{-}\left(s\right)\left(r\mathbf{I} +\mathbf{R}_{-}\left(s\right)\right)^{-1}\tilde{\Pi}^{+} \left(-\mathbf{R}_{-}\left(s\right)\right)\mathbf{e}\right)\right)^{-1}, \end{equation} \[ A_{*}^{-}\left(s\right)=\left\{ \begin{array}{lc} \frac{\sigma^{2}}{2}\left.\frac{\partial}{\partial y} \mathsf{P}\left\{ X_{\theta_{s}}^{-}<y\right\} \right|_{y=0} & \sigma>0,\\ \mathsf{P}\left\{ X_{\theta_{s}}^{-}=0\right\} \max\left\{ 0,a\right\} & \sigma=0, \end{array}\right. =\begin{cases} \frac{\sigma^2}{2}\frac{\rho_{d_{-}+1}^{-}\left(s\right)}{\beta_{d_{-}}^{-}}& \left(NS\right)_{-},\\ a \frac{\rho_{d_{-}}^{-}\left(s\right)}{\beta_{d_{-}}^{-}} & \left(S\right)_{-}. \end{cases} \] Denote the first passage time by $\tau_{x}^{+}= \inf\left\{ t>0:X_{t}>x\right\} $, then the distribution of discounted overshoot $X_{\tau_{x}^{+}}-x$, $x>0$, is defined by \begin{multline}\label{eq:2.5} \mathsf{E}\left[e^{-s\tau_{x}^{+}},X_{\tau_{x}^{+}}-x\in dv, \tau_{x}^{+}<\infty\right]=s^{-1}A_{*}^{-}\left(s\right) \frac{\partial}{\partial x}\mathsf{P}\left\{ X_{\theta_{s}}^{+}<x\right\} \delta\left(v\right)dv+\\ s^{-1}\int_{0}^{x}\left(p_{-}\left(s\right)\Pi\left(dv+y\right) +\int_{y}^{\infty}\Pi\left(dv+z\right)\mathbf{q}_{-}\left(s\right) e^{\mathbf{R}_{-}\left(s\right)\left(y-z\right)}\mathbf{e}dz\right) \mathsf{P}\left\{ X_{\theta_{s}}^{+}\in x-dy\right\}, \end{multline} where $\delta(v)$ is the Dirac delta function. \end{thm} \begin{proof} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ holds, then according to \cite{Lewis2008} the moment generating function of $X_{\theta_{s}}^{-}$ can be defined by the relation \begin{equation}\label{eq:2.6} \mathsf{E}e^{rX_{\theta_{s}}^{-}}= \frac{\prod_{i=1}^{N_{-}}r_{i}^{-}\left(s\right)} {\prod_{i=1}^{d_{-}}b_{i}}\frac{\prod_{i=1}^{d_{-}}\left(r+b_{i}\right)} {\prod_{i=1}^{N_{-}}\left(r+r_{i}^{-}\left(s\right)\right)}. \end{equation} Using notation given above we can rewrite this relation as \begin{equation}\label{eq:2.7} \mathsf{E}e^{rX_{\theta_{s}}^{-}}= \frac{\rho_{N_{-}}^{-}\left(s\right)} {\beta_{d_{-}}^{-}}\frac{\left(\boldsymbol{\beta}_{-},1\right) \mathbf{h}_{d_{-}}\left(r\right)}{\left(\boldsymbol{\rho}_{-} \left(s\right),1\right)\mathbf{h}_{N_{-}}\left(r\right)}= \begin{cases} \frac{\rho_{d_{-}+1}^{-}\left(s\right)}{\beta_{d_{-}}^{-}} \frac{\left(\boldsymbol{\beta}_{-},1\right)\mathbf{h}_{d_{-}} \left(r\right)}{\left(\boldsymbol{\rho}_{-}\left(s\right),1\right) \mathbf{h}_{d_{-}+1}\left(r\right)} & \left(NS\right)_{-},\\ \frac{\rho_{d_{-}}^{-}\left(s\right)}{\beta_{d_{-}}^{-}} \left(1+\frac{\left(\boldsymbol{\beta}_{-}-\boldsymbol{\rho}_{-} \left(s\right)\right)\mathbf{h}_{d_{-}-1}\left(r\right)} {\left(\boldsymbol{\rho}_{-}\left(s\right),1\right) \mathbf{h}_{d_{-}}\left(r\right)}\right) & \left(S\right)_{-}. \end{cases} \end{equation} which allows for inversion in $r$, so we get (\ref{eq:2.2}) and (\ref{eq:2.3}). Under conditions of the theorem (see \cite[Corollary 2.2]{Husak2011engl}): \begin{equation}\label{eq:2.8} \mathsf{E}e^{rX_{\theta_{s}}^{+}}= \left(1-s^{-1}r\left(A_{*}^{-}\left(s\right) +\int_{0}^{\infty}e^{rx}\int_{-\infty}^{0}\overline{\Pi}^{+} \left(x-y\right)dP_{-}\left(s,y\right)\right)\right)^{-1}. \end{equation} Using (\ref{eq:2.2}) and (\ref{eq:2.3}) we get \begin{multline*} \int_{0}^{\infty}e^{rx}\int_{-\infty}^{0} \overline{\Pi}^{+}\left(x-y\right)dP_{-}\left(s,y\right)=\\ =p_{-}\left(s\right)\tilde{\Pi}^{+}\left(r\right)+ \mathbf{q}_{-}\left(s\right)\left(r\mathbf{I}+ \mathbf{R}_{-}\left(s\right)\right)^{-1}\left(\tilde{\Pi}^{+}\left(r\right) -\tilde{\Pi}^{+}\left(-\mathbf{R}_{-}\left(s\right)\right)\right)\mathbf{e}_{-}=\\ =\mathsf{E}e^{rX_{\theta_{s}}^{-}}\tilde{\Pi}^{+} \left(r\right)-\mathbf{q}_{-}\left(s\right)\left(r\mathbf{I}+ \mathbf{R}_{-}\left(s\right)\right)^{-1}\tilde{\Pi}^{+} \left(-\mathbf{R}_{-}\left(s\right)\right)\mathbf{e}_{-}, \end{multline*} Substituting the last relation in (\ref{eq:2.8}) yields (\ref{eq:2.4}). Using formula (\ref{eq:2.2}), relation (\ref{eq:2.5}) can be deduce by integration of the Gerber-Shiu measure (see \cite{Kuznetsov2012}): \begin{multline*} \mathsf{E}\left[e^{-s\tau_{x}^{+}},x-X_{\tau_{x}^{+}-0}^{+} \in dy,x-X_{\tau_{x}^{+}-0}\in dz,X_{\tau_{x}^{+}}-x\in dv, \tau_{x}^{+}<\infty\right]=\\ =s^{-1}\mathsf{P}\left\{ X_{\theta_{s}}^{+}\in x-dy\right\} \mathsf{P}\left\{ -X_{\theta_{s}}^{-}\in dz-v\right\} \Pi\left(dv+z\right),\; v,z>0,0\leq y\leq\min\left\{ x,z\right\} , \end{multline*} with respect to $y$ and $z$ and taking into account that $\mathsf{E}\left[e^{-s\tau_{x}^{+}}, x-X_{\tau_{x}^{+}-0}=0, \tau_{x}^{+}<\infty\right]=s^{-1}A_{*}^{-}\left(s\right) \frac{\partial}{\partial x}\mathsf{P}\left\{ X_{\theta_{s}}^{+}<x\right\}$ (see \cite[(2.55)]{Husak2011engl}). \end{proof} \begin{cor}\label{cor:2.1} If one of the cases $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ holds, then \begin{equation}\label{eq:2.9} P'_{+}\left(s,x\right)=\frac{\partial}{\partial x}\mathsf{P} \left\{ X_{\theta_{s}}^{+}<x\right\} =\mathbf{q}_{+}\left(s\right) e^{\mathbf{R}_{+}\left(s\right)x}\mathbf{e},x>0, \end{equation} where \begin{equation}\label{eq:2.10} \mathbf{q}_{+}\left(s\right)=\begin{cases} \frac{\rho_{d_{+}+1}^{+}\left(s\right)} {\beta_{d_{+}}^{+}}\left(\boldsymbol{\beta}_{+},1\right) & \left(NS\right)_{+},\\ \frac{\rho_{d_{+}}^{+}\left(s\right)}{\beta_{d_{+}}^{+}} \left(\boldsymbol{\beta}_{+}-\boldsymbol{\rho}_{+}\left(s\right)\right) & \left(S\right)_{+}. \end{cases} \end{equation} Moreover, in the case $\left(S\right)_{+}$: $p_{+}\left(s\right)=\mathsf{P}\left\{ X_{\theta_{s}}^{+}=0\right\} \neq0$ and $p_{+}\left(s\right)=\rho_{d_{+}}^{+} \left(s\right)/\beta_{d_{+}}^{+}$. If additionally, $\mathsf{D}X_{1}<\infty,\mathsf{E}X_{\theta_{s}}^{-}<\infty$, then the moment generating function of $X_{\theta_{s}}^{-}$ admits the representation \begin{equation}\label{eq:2.11} \mathsf{E}e^{rX_{\theta_{s}}^{-}}=\left(1+\frac{r}{s} \left(A_{*}^{+}\left(s\right)+\mathsf{E}e^{rX_{\theta_{s}}^{+}} \tilde{\Pi}^{-}\left(r\right)+\mathbf{q}_{-}\left(s\right) \left(r\mathbf{I}+\mathbf{R}_{+}\left(s\right)\right)^{-1}\tilde{\Pi}^{-} \left(-\mathbf{R}_{+}\left(s\right)\right)\mathbf{e}\right)\right)^{-1}, \end{equation} where \[ A_{*}^{+}\left(s\right)=\begin{cases} \frac{\sigma^2}{2}\frac{\rho_{d_{+}+1}^{+}\left(s\right)}{\beta_{d_{+}}^{+}}& \left(NS\right)_{+},\\ a \frac{\rho_{d_{+}}^{+}\left(s\right)}{\beta_{d_{+}}^{+}} & \left(S\right)_{+}. \end{cases} \] \end{cor} \begin{proof} To prove relations (\ref{eq:2.9}) -- (\ref{eq:2.11}) we can use (\ref{eq:2.2}) -- (\ref{eq:2.4}) and the fact that if for the dual process $Y_{t}=-X_{t}$ one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ holds, then for $X_{t}$ we have the case $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ correspondingly, and $\mathsf{E}e^{rY_{\theta_{s}}^{+}} =\mathsf{E}e^{-rX_{\theta_{s}}^{-}}$. \end{proof} If we have cases $\left(NS\right)_{-}$ and $\left(NS\right)_{+}$ ($\left(S\right)_{-}$ and $\left(S\right)_{+}$) at the same time, then the L\'evy process $X_{t}$ has the gaussian part with possibly drift (with zero drift and without gaussian part, correspondingly) and the jump part is a compound Poisson process with bilateral matrix-exponential distributed jumps. \begin{cor}\label{cor:2.2} If we have the cases $\left(NS\right)_{-}$ and $\left(NS\right)_{+}$ at the same time, then \begin{equation}\label{eq:2.12} \frac{\partial}{\partial x}\mathsf{P} \left\{ X_{\theta_{s}}^{\pm}<x\right\} =\mathbf{q}_{\pm}\left(s\right) e^{\mathbf{R}_{\pm}\left(s\right)x}\mathbf{e},\pm x>0, \end{equation} where $\mathbf{q}_{\pm}\left(s\right)= \frac{\rho_{d_{\pm+1}}^{\pm}\left(s\right)}{\beta_{d\pm}^{\pm}} \left(\boldsymbol{\beta}_{\pm},1\right)$. If we have $\left(S\right)_{-}$ and $\left(S\right)_{+}$ simultaneously, then $\mathsf{P}\left\{ X_{\theta_{s}}^{\pm}=0\right\} =\rho_{d_{\pm}}^{\pm}\left(s\right)/\beta_{d_{\pm}}^{\pm}$ and \begin{equation}\label{eq:2.13} \frac{\partial}{\partial x} \mathsf{P}\left\{ X_{\theta_{s}}^{\pm}<x\right\} = \mathbf{q}_{\pm}\left(s\right)e^{\mathbf{R}_{\pm}\left(s\right)x}\mathbf{e}, \pm x>0, \end{equation} where $\mathbf{q}_{\pm}\left(s\right)= \frac{\rho_{d_{\pm}}^{\pm}\left(s\right)}{\beta_{d_{\pm}}^{\pm}} \left(\boldsymbol{\beta}_{\pm}-\boldsymbol{\rho}_{\pm}\left(s\right)\right)$. \end{cor} Note that, if $\sigma=0,a\geq0,$ and $\Pi\left(dx\right)=\lambda_{-}b_{1}e^{b_{1}x}dx$ for $x<0$, then the process $X_{t}$ is called almost lower semi-continuous (for details see \cite{Husak2011engl}) and we have the case $\left(S\right)_{-}$ with $d_{-}=1$, hence $N_{-}=1$ and the cumulant equation has a unique negative real root $-r_{1}^{-}\left(s\right)>-b_{1}$. Hence, by (\ref{eq:2.2}) the density of infimum is $P'_{-}\left(s,x\right) =\frac{r_{1}^{-}\left(s\right)}{b_{1}} \left(b_{1}-r_{1}^{-}\left(s\right)\right)e^{r_{1}^{-}\left(s\right)x},x>0$ (cf. \cite[(3.110)]{Husak2011engl}). To find the distribution of absolute supremum $X^{+}=\sup_{0\leq t<\infty}X_{t}$ or infimum $X^{-}= \inf_{0\leq t<\infty}X_{t}$ we should take into consideration the sign of $\mathsf{E} X_{1}$. If $\mu=\mathsf{E}X_{1}<0$ ($\mu>0$), then the distribution of $X^+$ ($X^-$) is non degenerate and it is defined in terms of the roots of the cumulant equation for $s=0$ (see, for instance, \cite{Husak2011engl}). If we have one of the cases $\left(NS\right)_{\pm}$ or $\left(S\right)_{\pm}$, then from \cite{Bratiychuk1990} it can be seen that for $\pm\mu<0$: $r_{i}^{\pm}\left(s\right)\underset{s\rightarrow0}{\longrightarrow}r_{i}^{\pm}, \Re\left[r_{i}^{\pm}\right]>0$, $i={1,\ldots,N_{\pm}}$, and for $\mp\mu<0$: $r_{i}^{\pm}\left(s\right)\underset{s\rightarrow0}{\longrightarrow}r_{i}^{\pm}, \Re\left[r_{i}^{\pm}\right]>0$, $i={2,\ldots,N_{\pm}}$, $r_{1}^{\pm}\left(s\right) \underset{s\rightarrow0}{\longrightarrow}0$, $s^{-1}r_{1}^{\pm}\left(s\right)\underset{s\rightarrow0}{\longrightarrow}|\mu|$. Thus we obtain the next corollary of Theorem \ref{thm:2.1}. \begin{cor}\label{cor:2.3} Let one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ hold and $\mu=\mathsf{E}X_{1}<0$, then \begin{equation}\label{eq:2.14} \lim_{s\rightarrow0}s^{-1}P'_{-}\left(s,x\right) =\mathbf{q}'_{-}e^{\mathbf{R}_{-}\left(0\right)x}\mathbf{e},x<0, \end{equation} \begin{equation}\label{eq:2.15} \mathbf{q}'_{-}=\begin{cases} {\displaystyle \frac{\prod_{i=2}^{d_{-}+1}r_{i}^{-}}{|\mu|\beta_{d_{-}}^{-}} \left(\boldsymbol{\beta}_{-},1\right)} & \left(NS\right)_{-},\\ {\displaystyle \frac{\prod_{i=2}^{d_{-}}r_{i}^{-}}{|\mu| \beta_{d_{-}}^{-}}\left(\boldsymbol{\beta}_{-}-\boldsymbol{\rho}_{-} \left(0\right)\right)} & \left(S\right)_{-}. \end{cases} \end{equation} Moreover, in case $\left(S\right)_{-}$: $p'_{-}=\lim_{s\rightarrow0}s^{-1}p_{-}\left(s\right) =\left(\prod_{i=2}^{d_{-}}r_{i}^{-}\right)/\left(|\mu| \prod_{i=1}^{d_{-}}b_{i}\right)$. If additionally $\mathsf{D}X_{1}<\infty$, then the moment generating function of $X^{+}$ has the form \begin{multline}\label{eq:2.16} \mathsf{E}e^{rX^{+}}=\\=\left(1-r\left(A'_{-}+p'_{-}\tilde{\Pi}^{+}\left(r\right) +\mathbf{q}'_{-}\left(r\mathbf{I}+\mathbf{R}_{-}\left(0\right) \right)^{-1}\left(\tilde{\Pi}^{+}\left(r\right) \mathbf{I}-\tilde{\Pi}^{+}\left(-\mathbf{R}_{-}\left(0\right) \right)\right)\mathbf{e}\right)\right)^{-1}, \end{multline} \begin{equation*} A'_{-}=\lim_{s\rightarrow0}s^{-1}A_{*}^{-}\left(s\right)=\begin{cases} {\frac{\sigma^2}{2}\frac{\prod_{i=2}^{d_{-}+1}r_{i}^{-}}{|\mu|\beta_{d_{-}}^{-}}} & \left(NS\right)_{-},\\ { a\frac{\prod_{i=2}^{d_{-}}r_{i}^{-}}{|\mu| \beta_{d_{-}}^{-}}} & \left(S\right)_{-}. \end{cases} \end{equation*} The distribution of the overjump is defined by the relation \begin{multline}\label{eq:2.17} \mathsf{P}\left\{ X_{\tau_{x}^{+}}-x\in dv\right\} = A'_{-}\frac{\partial}{\partial x}\mathsf{P}\left\{ X^{+}<x\right\} \delta\left(v\right)dv+\\ \int_{0}^{x}\left(p'_{-}\Pi\left(dv+y\right)+\int_{y}^{\infty} \Pi\left(dv+z\right)\mathbf{q}'_{-}e^{\mathbf{R}_{-}\left(0\right) \left(y-z\right)}\mathbf{e}dz\right)\mathsf{P}\left\{ X^{+}\in x-dy\right\}. \end{multline} \end{cor} \section{Occupation time and ladder process} Denote the moment generating function for the time that the process $X_{t}$ spends in the interval $\left(x,+\infty\right)$ until $\theta_{s}$ by \[ D_{x}\left(s,u\right)=\mathsf{E}e^{-u\int_{0}^{\theta_{s}}I\left\{ X_{t}>x\right\} dt}. \] Combining the results of the previous section with the relations for $D_{x}\left(s,u\right)$ given in~\cite{Husak2011engl} yields the following statement. \begin{thm}\label{thm:3.1} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ holds, then \[ D_{0}(s,u)=\frac{s}{s+u}\frac{\rho_{N_{-}}^{-}\left(s+u\right)} {\rho_{N_{-}}^{-}\left(s\right)}, \] \begin{multline}\label{eq:3.1} D_{x}\left(s,u\right)= \frac{s}{s+u}\times\\\times\left(1-\frac{\rho_{N_{-}}^{-}\left(s+u\right)} {\rho_{N_{-}}^{-}\left(s\right)}\left(\boldsymbol{\rho}_{-}\left(s\right) -\boldsymbol{\rho}_{-}\left(s+u\right)\right)\mathbf{R}_{-}^{-1} \left(s+u\right)e^{\mathbf{R}_{-}\left(s+u\right)x}\mathbf{e}\right), x<0. \end{multline} If one of the cases $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ $\left(a<0\right)$ holds, then \[ D_{0}(s,u)=\frac{\rho_{N_{+}}^{+}\left(s\right)}{\rho_{N_{+}}^{+}\left(s+u\right)}, \] \begin{equation}\label{eq:3.2} D_{x}\left(s,u\right)=1+\frac{\rho_{N_{+}}^{+}\left(s\right)} {\rho_{N_{+}}^{+}\left(s+u\right)}\left(\boldsymbol{\rho}_{+} \left(s+u\right)-\boldsymbol{\rho}_{+}\left(s\right)\right) \mathbf{R}_{+}^{-1}\left(s\right)e^{\mathbf{R}_{+}\left(s\right)x} \mathbf{e},x>0. \end{equation} \end{thm} \begin{proof} Following \cite[Theorem 2.6]{Husak2011engl}, for non step-wise processes the next relations are true \begin{gather} \int_{-0}^{+\infty}e^{rx}D'_{x}\left(s,u\right)dx+D_{0}(s,u)= \frac{\mathsf{E}e^{rX_{\theta_{s}}^{+}}} {\mathsf{E}e^{rX_{\theta_{s+u}}^{+}}},\nonumber \\ \int_{-\infty}^{+0}e^{rx}D'_{x}\left(s,u\right)dx-D_{0}(s,u) =-\frac{s}{s+u}\frac{\mathsf{E}e^{rX_{\theta_{s+u}}^{-}}} {\mathsf{E}e^{rX_{\theta_{s}}^{-}}}.\label{eq:3.3} \end{gather} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ holds, then recalling Theorem \ref{thm:2.1} it follows that \begin{multline*} \frac{\mathsf{E}e^{rX_{\theta_{s+u}}^{-}}} {\mathsf{E}e^{rX_{\theta_{s}}^{-}}} =\frac{\rho_{N_{-}}^{-}\left(s+u\right)}{\rho_{N_{-}}^{-} \left(s\right)}\frac{\left(\boldsymbol{\rho}_{-}\left(s\right),1\right) \mathbf{h}_{N_{-}}\left(r\right)}{\left(\boldsymbol{\rho}_{-} \left(s+u\right),1\right)\mathbf{h}_{N_{-}}\left(r\right)}=\\= \frac{\rho_{N_{-}}^{-}\left(s+u\right)}{\rho_{N_{-}}^{-}\left(s\right)} \left(1-\frac{\left(\boldsymbol{\rho}_{-}\left(s+u\right)- \boldsymbol{\rho}_{-}\left(s\right)\right)\mathbf{h}_{N_{-}-1} \left(r\right)}{\left(\boldsymbol{\rho}_{-}\left(s+u\right), 1\right)\mathbf{h}_{N_{-}}\left(r\right)}\right). \end{multline*} Taking account of formula (\ref{eq:3.3}) this gives us the following \[ D'_{x}\left(s,u\right)=\frac{s}{s+u} \frac{\rho_{N_{-}}^{-}\left(s+u\right)}{\rho_{N_{-}}^{-}\left(s\right)} \left(\boldsymbol{\rho}_{-}\left(s+u\right)-\boldsymbol{\rho}_{-}\left(s\right) \right) e^{\mathbf{R}_{-}\left(s+u\right)x}\mathbf{e},\; x<0, \] and combining with $\lim_{x\rightarrow-\infty}D_{x}\left(s,u\right) =\frac{s}{s+u}$, we receive (\ref{eq:3.1}). Similarly, in case $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ $\left(a<0\right)$: \[ \frac{\mathsf{E}e^{-rX_{\theta_{s}}^{+}}} {\mathsf{E}e^{-rX_{\theta_{s+u}}^{+}}}= \frac{\rho_{N_{+}}^{+}\left(s\right)}{\rho_{N_{+}}^{+} \left(s+u\right)}\left(1+\frac{\left(\boldsymbol{\rho}_{+} \left(s+u\right)-\boldsymbol{\rho}_{+}\left(s\right)\right) \mathbf{h}_{N_{+}-1}\left(r\right)}{\left(\boldsymbol{\rho}_{+} \left(s\right),1\right)\mathbf{h}_{N_{+}}\left(r\right)}\right). \] Hence, \[ D'_{x}\left(s,u\right)=\frac{\rho_{N_{+}}^{+}\left(s\right)} {\rho_{N_{+}}^{+}\left(s+u\right)}\left(\boldsymbol{\rho}_{+} \left(s+u\right)-\boldsymbol{\rho}_{+}\left(s\right)\right) e^{\mathbf{R}_{+}\left(s\right)x}\mathbf{e},\; x>0, \] and taking into account that $\lim_{x\rightarrow+\infty} D_{x}\left(s,u\right)=1$ we deduce (\ref{eq:3.2}). \end{proof} This statement generalize the representation of $D_{x}\left(s,u\right)$ known for almost semi-continuous processes given in~\cite{Husak2011engl}. Note that, for a non step-wise L\'evy process $\mathsf{P}\left\{ X_{t}=0\right\} =0$, then by \cite[VI, Lemma~15]{Bertoin1996} for any $t\geq0$ the time it spends in $\left[0,\infty\right)$ $Q_{0}\left(t\right)= \int_{0}^{t}I\left\{ X_{v}\geq0\right\} dv$ and the instant of its last supremum $g_{t}= \sup\left\{ v<t:X_{v}=X_{v}^{+}\right\} $ have the same law. Moreover, by \cite[Theorem 2.9]{Husak2011engl} $Q_{0}\left(t\right)$ and the time the maximum is achieved $T_{t}=\inf\left\{ v>0:X_{v}=X_{v}^{+}\right\} $ also have the same law. Hence, the results of Theorem \ref{thm:3.1} define the moment generating functions of $g_{\theta_{s}}$ and $T_{\theta_{s}}$. Let $L\left(t\right)$ be the local time in $\left[0,t\right]$ that $X_{t}^{+}-X_{t}$ spends at zero and $$L^{-1}\left(t\right)= \inf\left\{ v>0:L\left(v\right)>t\right\} $$ is the inverse local time (for details, see \cite[VI]{Bertoin1996}). Denote by $\kappa\left(s,r\right)$ the Laplace exponent of the so called ladder process $\left\{ L^{-1},X_{L^{-1}}\right\}$: $$e^{-\kappa\left(s,r\right)}=\mathsf{E}\left[e^{-sL^{-1} \left(1\right)-rX_{L^{-1}\left(1\right)}},1<L_{\infty}\right].$$ According to \cite[VI, (1)]{Bertoin1996}: \[ \mathsf{E}e^{-rX_{\theta_{s}}^{+}-ug_{\theta_{s}}}= \frac{\kappa\left(s,0\right)}{\kappa\left(s+u,r\right)}. \] Assuming that the normalization constant of the local time is 1, we can deduce that $\kappa\left(s,0\right)= \mathsf{E}e^{-\left(1-s\right)g_{\theta_{s}}}$. Taking into account that for non step-wise processes $Q_{0}\left(\theta_{s}\right)$ and $g_{\theta_{s}}$ have the same distribution we can write that \begin{equation}\label{eq:3.4} \kappa\left(s,r\right)=\frac{D_{0}\left(s,1-s\right)} {\mathsf{E}e^{-rX_{\theta_{s}}^{+}}}. \end{equation} Hence, using Theorem \ref{thm:2.1} and Theorem \ref{thm:3.1}, we can deduce the following statement. \begin{cor}\label{cor:3.1} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ holds, then \begin{multline*} \kappa\left(s,-r\right)=\frac{\rho_{N_{-}}^{-}\left(1\right)} {\rho_{N_{-}}^{-}\left(s\right)}\times\\\times\left(s-r\left(A_{*}^{-}\left(s\right) +\mathsf{E}e^{rX_{\theta_{s}}^{-}}\tilde{\Pi}^{+}\left(r\right) -\mathbf{q}_{-}\left(s\right)\left(r\mathbf{I}+ \mathbf{R}_{-}\left(s\right)\right)^{-1} \tilde{\Pi}^{+}\left(-\mathbf{R}_{-}\left(s\right)\right) \mathbf{e}\right)\right). \end{multline*} If $X_{t}$ is a compound Poisson process with negative drift $a<0$, without gaussian part $\left(\sigma=0\right)$, and with bilateral ME distributed jumps, then \[ \kappa\left(s,r\right)=\frac{\rho_{d_{-}+1}^{-}\left(1\right)} {\rho_{d_{-}+1}^{-}\left(s\right)}\frac{\beta_{d_{+}}^{+}} {\rho_{d_{+}}^{+}\left(s\right)}\left(1+\frac{\left(\boldsymbol{\rho}_{+} \left(s\right)-\boldsymbol{\beta}_{+}\right)\mathbf{h}_{d_{+}-1} \left(r\right)}{\left(\boldsymbol{\rho}_{+}\left(s\right),1\right) \mathbf{h}_{d_{+}}\left(r\right)}\right). \] \end{cor} The next statement applies Theorem \ref{thm:3.1} and Corollary~\ref{cor:2.3} to get a representation of the moment generating function of the total sojourn time over a level $D_{x}\left(0,u\right)= \mathsf{E}e^{-u\int_{0}^{\infty}I\left\{ X_{t}>x\right\} dt}$, which in risk theory defines the time in risk zone (for details, see \cite{Husak2011engl}). \begin{cor}\label{cor:3.2} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ holds and $\mu=\mathsf{E}X_{1}<0$, then \[ \mathsf{E}e^{-u\int_{0}^{\infty}I\left\{ X_{t}>0\right\} dt} =\frac{|\mu|}{u}\frac{\prod_{i=1}^{N_{-}}r_{i}^{-} \left(u\right)}{\prod_{i=2}^{N_{-}}r_{i}^{-}}, \] \begin{equation}\label{eq:3.5} \mathsf{E}e^{-u\int_{0}^{\infty}I\left\{ X_{t}>x\right\} dt} =\frac{|\mu|\prod_{i=1}^{N_{-}}r_{i}^{-}\left(u\right)} {u\prod_{i=2}^{N_{-}}r_{i}^{-}}\left(\boldsymbol{\rho}_{-} \left(u\right)-\boldsymbol{\rho}_{-}\left(0\right)\right) \mathbf{R}_{-}^{-1}\left(u\right)e^{\mathbf{R}_{-}\left(u\right)x} \mathbf{e},x<0. \end{equation} The integral transform of the moment generating function of the sojourn time over a positive level has the next representation \begin{multline}\label{eq:3.6} \int_{-0}^{+\infty}e^{rx}D'_{x}\left(0,u\right)dx+ D_{0}(0,u)=\frac{\mathsf{E}e^{rX^{+}}} {\mathsf{E}e^{rX_{\theta_{u}}^{+}}}=\\ =\frac{1-\frac{r}{u}\left(A_{*}^{-}\left(u\right) +\mathsf{E}e^{rX_{\theta_{u}}^{-}}\tilde{\Pi}^{+} \left(r\right)-\mathbf{q}_{-}\left(u\right) \left(r\mathbf{I}+\mathbf{R}_{-}\left(u\right)\right)^{-1} \tilde{\Pi}^{+}\left(-\mathbf{R}_{-}\left(u\right)\right) \mathbf{e}\right)}{1-r\left(A'_{-}+p'_{-} \tilde{\Pi}^{+}\left(r\right)+\mathbf{q}'_{-}\left(r\mathbf{I}+ \mathbf{R}_{-}\left(0\right)\right)^{-1} \left(\tilde{\Pi}^{+}\left(r\right)\mathbf{I}-\tilde{\Pi}^{+} \left(-\mathbf{R}_{-}\left(0\right)\right)\right)\mathbf{e}\right)}. \end{multline} If for the process $X_t$: $\sigma=0,a\leq0$, negative jumps have a ME distribution and $\mu<0$, then \begin{multline}\label{eq:3.7} \mathsf{E}e^{-u\int_{0}^{\infty}I\left\{ X_{t}>x\right\} dt}= \mathsf{P}\left\{ X^{+}<x\right\} + \frac{p_{+}\left(u\right)}{u}\bigg(p_{-}\left(u\right) \int_{0}^{x}\overline{\Pi}^{+}\left(x-z\right) \mathsf{P}\left\{ X^{+}\in dz\right\} +\\ +\mathbf{q}_{-}\left(u\right)\int_{0}^{\infty} \overline{\Pi}^{+}\left(y\right)\int_{\max\left\{ 0,x-y\right\} }^{x} e^{\mathbf{R}_{-}\left(u\right)(x-y-z)}\mathsf{P}\left\{ X^{+}\in dz\right\} \mathbf{e} dy\bigg). \end{multline} \end{cor} \begin{proof} Equality (\ref{eq:3.5}) follows by taking the limit as $s\rightarrow 0$ in (\ref{eq:3.1}). Formula (\ref{eq:3.6}) is a straightforward consequence of formulas (\ref{eq:2.4}), (\ref{eq:2.16}) and (\ref{eq:3.3}). If $\sigma=0,a\leq0$, then $\left\{ \tau_{0}^{+},X_{\tau_{0}^{+}}\right\} $ has non degenerate joint distribution. Applying prelimit generalization of the Pollaczek-Khinchin formula (\cite[Theorem 2.4]{Husak2011engl}), we get \[ \int_{-0}^{+\infty}e^{rx}D'_{x}\left(s,u\right)dx+ D_{+0}(s,u)=\frac{\mathsf{E}e^{rX_{\theta_{s}}^{+}}} {\mathsf{P}\left\{ X_{\theta_{s+u}}^{+}=0\right\} } \left(1-\mathsf{E}\left[e^{-\left(s+u\right)\tau_{0}^{+} +rX_{\tau_{0}^{+}}},\tau_{0}^{+}<\infty\right]\right). \] Whence $D_{+0}(s,u)=\frac{\mathsf{P}\left\{ X_{\theta_{s}}^{+} =0\right\} }{\mathsf{P}\left\{ X_{\theta_{s+u}}^{+}=0\right\}}$ and for $x>0$ \[ D_{x}\left(s,u\right)= P_{+}\left(s,x\right)+\int_{0}^{x} \mathsf{P}\left\{ X_{\theta_{s+u}}^{+}>0, X_{\tau_{0}^{+}}>x-z\right\} dP_{+}\left(s,z\right). \] Due to \cite[Corollary 2.3]{Husak2011engl}: \[ \mathsf{P}\left\{ X_{\theta_{s+u}}^{+}>0,X_{\tau_{0}^{+}}>z\right\} =\frac{\mathsf{P}\left\{ X_{\theta_{s+u}}^{+}=0\right\} } {s+u}\int_{-\infty}^{0} \overline{\Pi}^{+}\left(z-y\right)dP_{-}\left(s+u,y\right). \] If negative jumps have the ME distribution, then \begin{multline*} D_{x}\left(s,u\right) =P_{+}\left(s,x\right)+\frac{p_{+}\left(s+u\right)}{s+u} \bigg(p_{-}\left(s+u\right)\int_{0}^{x}\overline{\Pi}^{+} \left(x-z\right)dP_{+}\left(s,z\right)+\\ +\mathbf{q}_{-}\left(s+u\right)\int_{0}^{\infty}\overline{\Pi}^{+} \left(y\right)\int_{\max\left\{ 0,x-y\right\} }^{x} e^{\mathbf{R}_{-}\left(s+u\right)(x-y-z)}\mathbf{e}dP_{+}\left(s,z\right)dy\bigg). \end{multline*} from here as $s\rightarrow0$ relation (\ref{eq:3.7}) follows. \end{proof} Note that, for the step-wise $\left(a=0\right)$ almost lower semi-continuous processes formula (\ref{eq:3.7}) is reduced to \begin{multline*} \mathsf{E}e^{-u\int_{0}^{\infty}I\left\{ X_{t}>x\right\} dt}= \mathsf{P}\left\{ X^{+}<x\right\} +\frac{1}{u+\lambda} \bigg(\int_{0}^{x}\overline{\Pi}^{+}\left(x-z\right) \mathsf{P}\left\{ X^{+}\in dz\right\} +\\ +\left(b_{1}-r_{1}^{-}\left(u\right)\right) \int_{0}^{\infty}\overline{\Pi}^{+}\left(y\right) \int_{\max\left\{ 0,x-y\right\} }^{x}e^{r_{1}^{-}\left(u\right)(x-y-z)} \mathsf{P}\left\{ X^{+}\in dz\right\} dy\bigg). \end{multline*} If negative (positive) jumps have hyperexponential distribution, that is, if we have additional condition that $b_{m_{-}}>\ldots>b_{2}>b_{1}>0$ ($c_{m_{+}}>\ldots>c_{2}>c_{1}>0$), then the roots of the cumulant equation $\left\{ -r_{i}^{-}\left(s\right)\right\} _{i=1}^{N_{-}}$ $\left(\left\{ r_{i}^{+}\left(s\right)\right\} _{i=1}^{N_{+}}\right)$ are real and distinct (see \cite{Lewis2008}), and the matrix exponents in Theorem~\ref{thm:3.1} can be simplified. \begin{cor} \label{cor:3.3} If one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ holds and $b_{m_{-}}>\ldots>b_{1}>0$, then \[ D_{0}(s,u)=\frac{s}{s+u}\prod_{i=1}^{N_{-}} \frac{r_{i}^{-}\left(s+u\right)}{r_{i}^{-}\left(s\right)}, \] \begin{equation}\label{eq:3.8} D_{x}\left(s,u\right)=\frac{s}{s+u}\left(1+\sum_{k=1}^{N_{-}} \frac{\prod_{i=1}^{N_{-}}\left(r_{k}^{-}\left(s+u\right)/r_{i}^{-} \left(s\right)-1\right)}{\prod_{i=1,i\neq k}^{N_{-}} \left(r_{k}^{-}\left(s+u\right)/r_{i}^{-}\left(s+u\right)-1\right)} e^{r_{k}^{-}\left(s+u\right)x}\right),x<0. \end{equation} For $\mu<0$ \[ D_{0}(0,u)=\frac{|\mu|}{u} \prod_{i=2}^{N_{-}}\frac{r_{i}^{-}\left(u\right)}{r_{i}^{-}}, \] \begin{equation}\label{eq:3.9} D_{x}\left(0,u\right)=\frac{|\mu|}{u}\sum_{k=1}^{N_{-}} \frac{\prod_{i=2}^{N_{-}}\left(r_{k}^{-}\left(u\right)/r_{i}^{-}-1\right)} {\prod_{i=1,i\neq k}^{N_{-}}\left(r_{k}^{-}\left(u\right)/ r_{i}^{-}\left(u\right)-1\right)} r_{k}^{-}\left(u\right)e^{r_{k}^{-}\left(u\right)x},x<0. \end{equation} If one of the cases $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ $\left(a<0\right)$ holds and $c_{m_{+}}>\ldots>c_{1}>0$, then \[ D_{0}(s,u)=\prod_{i=1}^{N_{+}}\frac{r_{i}^{+}\left(s\right)} {r_{i}^{+}\left(s+u\right)}, \] \begin{equation}\label{eq:3.10} D_{x}\left(s,u\right)=1-\sum_{k=1}^{N_{+}} \frac{\prod_{i=1}^{N_{+}}\left(1-r_{k}^{+}\left(s\right)/r_{i}^{+} \left(s+u\right)\right)}{\prod_{i=1,i\neq k}^{N_{+}} \left(1-r_{k}^{+}\left(s\right)/r_{i}^{+} \left(s\right)\right)}e^{-r_{k}^{+}\left(s\right)x}\:,x>0. \end{equation} For $\mu>0$ and $x\geq0$: $D_{x}\left(0,u\right)=0,$ in other words $\mathsf{P}\left\{ \int_{0}^{+\infty}I\left\{ X_{t}>x\right\} dt=+\infty\right\} =1$. \end{cor} \begin{proof} If we have one of the cases $\left(NS\right)_{-}$ or $\left(S\right)_{-}$ $\left(a>0\right)$ and $b_{m_{-}}>\ldots>b_{2}>b_{1}>0$, then the roots $\left\{ -r_{i}^{-}\left(s\right)\right\} _{i=1}^{N_{-}}$ are real and distinct, and instead of using formula (\ref{eq:3.1}) it is more convenient to substitute relation (\ref{eq:2.6}) in (\ref{eq:3.3}) and invert with respect to $r$. Similarly for the case $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ $\left(a<0\right)$ and $c_{m_{+}}>\ldots>c_{2}>c_{1}>0$ we can deduce formula (\ref{eq:3.10}). To get $D_{x}\left(0,u\right)$ in the corresponding cases apply the limit behavior of the roots of cumulant equation as $s\rightarrow0$. To find the limit as $s\rightarrow0$ in (\ref{eq:3.10}) for the case $\left(NS\right)_{+}$ or $\left(S\right)_{+}$ and for $\mu>0$ we can use the relation \[ \frac{\prod_{i=1}^{N_{+}}\left(1-r_{k}^{+}\left(s\right)/r_{i}^{+} \left(s+u\right)\right)}{\prod_{i=1,i\neq k}^{N_{+}} \left(1-r_{k}^{+}\left(s\right)/r_{i}^{+} \left(s\right)\right)}\underset{s\rightarrow0}{\longrightarrow} \begin{cases} 1 & k=1,\\ 0 & k\neq1. \end{cases} \] \end{proof} Note that, using the results of \cite{Kuznetsov2012}, Corollary \ref{cor:3.3} could be generalized for the case of the so called meromorphic L\'evy processes (the cumulant function is holomorphic except a set of isolated points, the poles of the function), for which $N_{\pm}=\infty$ in (\ref{eq:3.8})--(\ref{eq:3.10}). \end{document}
\begin{document} \begin{frontmatter} \title{Some hypergeometric summation theorems and reduction formulas via Laplace transform method} \author{M. I. Qureshi} \author{*Showkat Ahmad Dar} \ead{[email protected]} \address{miqureshi\[email protected],[email protected]} \address{Department of Applied Sciences and Humanities , \\Faculty of Engineering and Technology, \\Jamia Millia Islamia ( Central University), New Delhi, 110025, India.} \cortext[cor1]{Corresponding author} \begin{abstract} In this paper, we obtain analytical solutions of Laplace transform based some generalized class of the hyperbolic integrals in terms of hypergeometric functions ${}_{3}F_{2}(\pm1)$, ${}_{4}F_{3}(\pm1)$, ${}_{5}F_{4}(\pm1)$,\\${}_{6}F_{5}(\pm1)$, ${}_{7}F_{6}(\pm1)$ and ${}_{8}F_{7}(\pm1)$ with suitable convergence conditions, by using some algebraic properties of Pochhammer symbols. In addition, reduction formulas for ${}_{4}F_{3}(1)$, ${}_{7}F_{6}(-1)$ and some new summation theorems (not recorded earlier in the literature of hypergeometric functions) for ${}_{3}F_{2}(-1), {}_{6}F_{5}(\pm1)$, ${}_{7}F_{6}(\pm1)$ and ${}_{8}F_{7}(\pm1)$ are obtained. \\ \\ \textit{2010 AMS Classification: 33C05; 33C20; 44A10; 33B15 } \end{abstract} \begin{keyword} \small{Generalized hypergeometric functions; Summation and multiplication theorems; Laplace transform; Beta and Gamma function} \end{keyword} \end{frontmatter} \section{Introduction and Preliminaries} For the sake of conciseness of this paper, we use the following notations \\ $~~~~~~~~~~~\mathbb{N}:=\{1,2,...\};~~~~~~\mathbb{N}_{0}:=\mathbb{N}\cup\{0\};~~~~~~\mathbb{Z}_{0}^{-}:=\mathbb{Z}^{-}\cup\{0\}=\{0,-1,-2,-3,...\},$\\ where the symbols $\mathbb{N}$ and $\mathbb{Z}$ denote the set of natural numbers and integers; as usual, the symbols $\mathbb{R}$ and $\mathbb{C}$ denote the set of real and complex numbers respectively. \\ Here the notation $(\lambda)_{\upsilon}~(\lambda, \upsilon\in\mathbb{C})$ denotes the Pochhammer's symbol (or the shifted factorial, since $(1)_{n}=n! )$ is defined, in general, by \begin{equation}\label{GRI04} (\lambda)_{\upsilon}:=\frac{\Gamma(\lambda+\upsilon)}{\Gamma(\lambda)}=\begin{cases} 1 , \quad~~~~~~~~~~~~~ (\upsilon=0~;~\lambda\in\mathbb{C}\backslash\{0\}) \\ \lambda(\lambda+1)...(\lambda+n-1), \quad (\upsilon=n\in\mathbb{N}~;~\lambda\in\mathbb{C}). \\ \end{cases} \end{equation} A natural generalization of Gauss hypergeometric series ${}_{2}F_{1}$ is the general hypergeometric series ${}_{p}F_{q}$ with $p$ numerator parameters $\alpha_{1}, ... , \alpha_{p}$ and $q$ denominator parameters $\beta_{1}, ..., \beta_{q}$. It is defined by\\ \begin{equation}\label{GRI05} {}_{p}F_{q}\left(\ \begin{array}{lll}\alpha_{1},...,\alpha_{p}~;~\\\beta_{1}, ..., \beta_{q}~;~\end{array} z\right) =\sum_{n=0}^{\infty}\frac{(\alpha_{1})_{n} ... (\alpha_{p})_{n}}{(\beta_{1})_{n} ... (\beta_{q})_{n}}\frac{z^{n}}{n!}~,\newline \end{equation} where $\alpha_{i}\in\mathbb{C}~(i=1,...,p)$ and $\beta_{j}\in\mathbb{C}\setminus \mathbb{Z}_{0}^{-}~(j=1,...,q)~\left(\ \mathbb{Z}_{0}^{-}:=\{0,-1,-2,...\}\right)$ and \\ $\left(\ p,~q\in\mathbb{N}_{0}:=\mathbb{N}\cup\{0\}=\{0,1,2,...\}\right)$. The ${}_{p}F_{q}(\cdot)$ series in eq.(\ref{GRI05}) is convergent for $|z|<\infty$ if $p\leq q$, and for $|z|<1$ if $p=q+1$. Furthermore, if we set \begin{equation}\label{GRI06} \omega=\left(\ \sum_{j=1}^{q}\beta_{j}-\sum_{i=1}^{p}\alpha_{i}\right),\newline \end{equation} it is known that the ${}_{p}F_{q}$ series, with $p=q+1$, is\\ (i) absolutely convergent for $|z|=1$ if $\Re(\omega)>0,$\\ (ii) conditionally convergent for $|z|=1, z\neq1$, if $-1<Re(\omega)\leq0$.\\ The binomial function is given by \begin{equation}\label{GRI08} (1-z)^{-a}={}_{1}F_{0}\left(\begin{array}{lll}a~;\\ \overline{~~~};\end{array} z \right) =\sum_{n=0}^{\infty}\frac{(a)_{n}}{n!}z^{n}, \end{equation} ~~~~~~~~~where $|z|<1,~~a\in\mathbb{C}$.\\ Next we collect some results that we will need in the sequel.\\ \begin{equation} B_{z}(\alpha,\beta)=\int_{0}^{z}t^{\alpha-1}(1-t)^{\beta-1}dt,~~~~~0<z\leq1, \end{equation} \begin{equation} {}_{2}F_{1}\left(\begin{array}{lll}a,~~b;\\ 1+b,;\end{array} z\right)=\frac{b}{z^{b}}B_{z}(b,1-a), \end{equation} where $B_{z}$ is incomplete beta function.\\ The Dixon's theorem for ${}_{3}F_{2}$ with positive unit argument is given by \\ \begin{equation}\label{GRI11} {}_{3}F_{2}\left(\begin{array}{lll}a,~~b,~~c~~~~~~~~~~~~~~~~~~;\\ 1+a-b,~1+a-c;\end{array} 1\right) =\frac{\Gamma(1+a-b)\Gamma(1+a-c)\Gamma(1+\frac{a}{2})\Gamma(1+\frac{a}{2}-b-c)}{\Gamma(1+\frac{a}{2}-b)\Gamma(1+\frac{a}{2}-c)\Gamma(1+a)\Gamma(1+a-b-c)}, \end{equation} ~~~~~~~~where $\Re(a-2b-2c)>-2;~ 1+a-b,~1+a-c\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,\\ and when $c=1+\frac{a}{2}$ in the eq.(\ref{GRI11}), we get \begin{equation} {}_{3}F_{2}\left(\begin{array}{lll}a,~~1+\frac{a}{2},~~ b;\\ \frac{a}{2},~~1+a-b;~\end{array} 1\right)=0, \end{equation} ~~~~~~~~where $\Re(b)<0;~\frac{a}{2}, 1+a-b\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \\ The following summation theorems ${}_{3}F_{2}(-1)$ is given by \begin{equation}\label{FGG} {}_{3}F_{2}\left(\begin{array}{lll}a,~~1+\frac{a}{2},~~ b;\\ \frac{a}{2},~~1+a-b;~\end{array} -1\right) =\frac{\Gamma(1+a-b)\Gamma(\frac{1+a}{2})}{\Gamma(\frac{1+a}{2}-b)\Gamma(1+a)}, \end{equation} ~~~~~~~~where $\Re(b)<\frac{1}{2}$; ~$\frac{a}{2}, 1+a-b\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ The eq.(\ref{FGG}) can be obtained by setting $2d=1+a$ and $c=b$ in the eq.(\ref{GRI12}).\\ Contiguous function relations \cite[p.71, Q.N0.21, part(13)]{R} \begin{equation}\label{GGG} (\beta-\gamma+1)~{}_{2}F_{1}\left(\begin{array}{lll}\alpha,~\beta;\\ \gamma~~~~;~\end{array} z\right)=\beta~{}_{2}F_{1}\left(\begin{array}{lll}\alpha,~\beta+1;\\ \gamma~~~~~~~~~~~~;~\end{array} z\right) -(\gamma-1)~{}_{2}F_{1}\left(\begin{array}{lll}\alpha,~\beta;\\ \gamma-1~;~\end{array} z\right). \end{equation} In the both sides of eq.(\ref{GGG}) replace $z$ by $zt$, multiply by $t^{h-1}(1-t)^{g-h-1}$, integrate with respect to $t$ over the interval $(0,1)$ and using the definition of beta function, after simplification we get \begin{equation}\label{GGH} (\beta-\gamma+1)~{}_{3}F_{2}\left(\begin{array}{lll}\alpha,~\beta,~h;\\ \gamma,~g~~~~~;~\end{array} z\right)=\beta~{}_{3}F_{2}\left(\begin{array}{lll}\alpha,~\beta+1,~h;\\ \gamma,~g~~~~~~~~~~~;~\end{array} z\right)-(\gamma-1)~{}_{3}F_{2}\left(\begin{array}{lll}\alpha,~\beta,~h;\\ \gamma-1,~g;~\end{array} z\right). \end{equation} In the eq.(\ref{GGH}) put $\alpha=a,~\beta=c,~h=b,~\gamma=1+d,~g=1+c$, after simplification we get \begin{equation}\label{GGR} {}_{3}F_{2}\left(\begin{array}{lll}a,~b,~c~~~~~~~;\\ 1+c,~1+d;~\end{array} z\right)=\frac{c}{(c-d)}~{}_{2}F_{1}\left(\begin{array}{lll}a,~b;\\ d+1;~\end{array} z\right)-\frac{d}{(c-d)}~{}_{3}F_{2}\left(\begin{array}{lll}a,~b,~c;\\ c+1,d;~\end{array} z\right). \end{equation} Put $d=b$ in eq.(\ref{GGR}), we get\\ When $c\neq b$ and $1+b,1+c\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$, then \begin{equation}\label{FRI} {}_{3}F_{2}\left(\begin{array}{lll}a,~b,~c~~~~~~~;\\ 1+b,~1+c;~\end{array} z\right)=\left(\frac{c}{c-b}\right){}_{2}F_{1}\left(\begin{array}{lll}a,~b;\\ 1+b;\end{array} z\right)-\left(\frac{b}{c-b}\right){}_{2}F_{1}\left(\begin{array}{lll}a,~c;\\ 1+c;\end{array} z\right). \end{equation} When $z=1$ in the eq.(\ref{FRI}) and using Gauss classical summation theorem, we get \begin{equation} {}_{3}F_{2}\left(\begin{array}{lll}a,~b,~c~~~~~~~;\\ 1+b,~1+c;~\end{array} 1\right)=\frac{bc}{(c-b)}\Gamma(1-a)\left\{\frac{\Gamma(b)}{\Gamma(1+b-a)}-\frac{\Gamma(c)}{\Gamma(1+c-a)}\right\}, \end{equation} where $c\neq b; \Re(a)<1$; $1+b,1+c\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,\\ \\ The classical summation theorems for hypergeometric series ${}_{4}F_{3}(\pm1)$ \cite[ p.28, eq.(4.4.3)]{B2} are given by \begin{equation}\label{GRI12} {}_{4}F_{3}\left(\begin{array}{lll}a,~~1+\frac{a}{2},~~ c,~~d~~~~~~~~~~~~~;\\ \frac{a}{2},~~1+a-c,~~ 1+a-d;~\end{array} -1\right) =\frac{\Gamma(1+a-c)\Gamma(1+a-d)}{\Gamma(1+a)\Gamma(1+a-c-d)}\newline, \end{equation} ~~~~~~~~provided $\Re(a-2c-2d)>-2 ;~ \frac{a}{2}, 1+a-c, 1+a-d\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ When $e=(1+a)/2$ in the eq.(\ref{GRI0}), we get \begin{multline}\label{GRI102} {}_{4}F_{3}\left(\begin{array}{lll}a,~~1+\frac{a}{2},~~ c,~~d~~~~~~~~~~~~~;\\ \frac{a}{2},~~1+a-c,~~ 1+a-d;~\end{array} 1\right)\\ =\frac{\Gamma(1+a-c)\Gamma(1+a-d)\Gamma(\frac{1+a}{2})\Gamma(\frac{1+a}{2}-c-d)}{\Gamma(1+a)\Gamma(\frac{1+a}{2}-d)\Gamma(\frac{1+a}{2}-c)\Gamma(1+a-c-d)},\newline \end{multline} ~~~~~~~~provided $\Re(2c+2d-a)<1;~ \frac{a}{2}, 1+a-c, 1+a-d\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ Another classical summation theorem for hypergeometric series ${}_{5}F_{4}(1)$ \cite[p.27,eq.(4.4.1)]{B2}, is given by \begin{multline}\label{GRI0} {}_{5}F_{4}\left(\begin{array}{lll}a,~~1+\frac{a}{2},~~ c,~~d,~~e~~~~~~~~~~~~~~~~~~~~~~~~~;\\ \frac{a}{2},~~1+a-c,~~ 1+a-d,~~ 1+a-e;~\end{array} 1\right)\\ =\frac{\Gamma(1+a-c)\Gamma(1+a-d)\Gamma(1+a-e)\Gamma(1+a-c-d-e)}{\Gamma(1+a)\Gamma(1+a-d-e)\Gamma(1+a-c-e)\Gamma(1+a-c-d)}, \end{multline} provided $\Re(a-c-d-e)>-1 ;~ \frac{a}{2}, 1+a-c, 1+a-d,1+a-e\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ Laplace transform of any constant $k$ is given by \begin{equation}\label{GRI22} \mathcal{L}[k;q]=\int_{0}^{\infty}e^{-qt}k~dt=\frac{k}{q}, \end{equation} ~~~~~provided \begin{equation}\label{GRI23} \Re(q)>0. \end{equation} The Digamma function (or Psi function) is given by \begin{equation*} \Psi(x)=\frac{d}{dx}\{\ln\Gamma(x)\}=\frac{\Gamma~^{'}(x)}{\Gamma(x)}, \end{equation*} \begin{equation} =-\gamma+(x-1)\sum_{n=0}^{\infty}\frac{1}{(n+1)(n+x)}, \end{equation} where $\gamma(=0.57721566490...)$ being the Euler-Mascheroni constant and $\Psi^{'}(x)=\frac{d}{dx}\{\Psi(x)\}$ is called trigamma function,\\ where \begin{equation} \Psi^{'}(x)=\sum_{k=0}^{\infty}\frac{1}{(x+k)^{2}}=\frac{1}{x^{2}}~{}_{3}F_{2}\left(\begin{array}{lll} 1,~x,x~~~;\\ 1+x,~1+x;\end{array} 1\right), \end{equation} OR \begin{equation} {}_{3}F_{2}\left(\begin{array}{lll} 1,~x,x~~~;\\ 1+x,~1+x;\end{array} 1\right)=x^{2}\Psi^{'}(x), \end{equation} and \begin{equation}\label{GRI24} {}_{3}F_{2}\left(\begin{array}{lll} 1,~a,b~~~;\\ 1+a,~1+b;\end{array} 1\right)=\frac{ab}{(b-a)}\left[\Psi(b)-\Psi(a)\right],~~~~b\neq a. \end{equation} Properties of Digamma function \cite{G,G3} \begin{equation}\label{GRI25} \Psi(1+x)=\Psi(x)+\frac{1}{x}, \end{equation} \begin{equation}\label{GRI262} \Psi(1-x)=\Psi(x)+\pi\cot(\pi x), \end{equation} where $x\neq0,\pm1,\pm2,\pm3,...$. \begin{equation} \Psi\left(\frac{1}{2}+x\right)-\Psi\left(\frac{1}{2}-x\right)=\pi \tan(\pi x), \end{equation} where $x\neq\pm\frac{1}{2},\pm\frac{3}{2},\pm\frac{5}{2},...$.\\ Lower case beta function of one variable which is related with Digamma function, is given by \begin{equation}\label{GRI25} \beta(x)=\sum_{k=0}^{\infty}\frac{(-1)^{k}}{(k+x)}=\frac{1}{x}~{}_{2}F_{1}\left(\begin{array}{lll} 1,~x;\\ 1+x;\end{array} -1\right), \end{equation} \begin{equation}\label{GRI26} ~~~~~~~~~=\frac{1}{2}\bigg[\Psi\left(\frac{1+x}{2}\right)-\Psi\left(\frac{x}{2}\right)\bigg],~~~~~~~~~x\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}, \end{equation} and \begin{equation}\label{GRI000} {}_{3}F_{2}\left(\begin{array}{lll} 1,~a,b~~~;\\ 1+a,~1+b;\end{array} -1\right)=\frac{ab}{(b-a)}\left[\beta(a)-\beta(b)\right],~~~~b\neq a, \end{equation} \begin{equation} {}_{3}F_{2}\left(\begin{array}{lll} 1,~x,x~~~;\\ 1+x,~1+x;\end{array} -1\right)=-x^{2}\frac{d}{dx}\{\beta(x)\}=-x^{2}\beta^{'}(x). \end{equation} Hypergeometric forms of some trigonometric ratios \cite[pp.137-138, eq.(4.2.6); eq.(4.2.9); eq.(4.2.3)]{hy} and \cite[pp.48-49, eqns(20,23,26)]{QI} , are given by \begin{equation}\label{GRI31} \tan(z)=\frac{8z}{(\pi^{2}-4z^{2})}~~{}_{3}F_{2}\left(\begin{array}{lll} 1,~\frac{1}{2}+\frac{z}{\pi},~~\frac{1}{2}-\frac{z}{\pi};\\ \frac{3}{2}+\frac{z}{\pi},~~\frac{3}{2}-\frac{z}{\pi}~~~~;\end{array} 1\right), \end{equation} where $z\in\mathbb{C}\setminus\{\pm\frac{\pi}{2},~\pm\frac{3\pi}{2},~\pm\frac{5\pi}{2},...\}$, \begin{equation}\label{GRI32} \sec(z)=\frac{4\pi}{(\pi^{2}-4z^{2})}~~{}_{4}F_{3}\left(\begin{array}{lll} 1,~\frac{3}{2},~~\frac{1}{2}+\frac{z}{\pi},~~\frac{1}{2}-\frac{z}{\pi};\\ \frac{1}{2},~~\frac{3}{2}+\frac{z}{\pi},~~\frac{3}{2}-\frac{z}{\pi}~~~~;\end{array} -1\right), \end{equation} where $z\in\mathbb{C}\setminus\{\pm\frac{\pi}{2},~\pm\frac{3\pi}{2},~\pm\frac{5\pi}{2},...\}$, \begin{eqnarray}\label{GRI33} \sec^{2}(z)=\frac{4}{(2z-\pi)^{2}}~~{}_{3}F_{2}\left(\begin{array}{lll} 1,~\frac{1}{2}-\frac{z}{\pi},~~\frac{1}{2}-\frac{z}{\pi};\\ \frac{3}{2}-\frac{z}{\pi},~~\frac{3}{2}-\frac{z}{\pi}~;\end{array} 1\right)+\\ +\frac{4}{(2z+\pi)^{2}}~~{}_{3}F_{2}\left(\begin{array}{lll} 1,~\frac{1}{2}+\frac{z}{\pi},~\frac{1}{2}+\frac{z}{\pi};\\ \frac{3}{2}+\frac{z}{\pi},~\frac{3}{2}+\frac{z}{\pi}~;\end{array} 1\right), \end{eqnarray} where $z\in\mathbb{C}\setminus\{\pm\frac{\pi}{2},~\pm\frac{3\pi}{2},~\pm\frac{5\pi}{2},...\}$.\\ \\ $~~~~~~$ The plan of this paper is as follows. First, we obtain generalized class and analytical solutions of some hyperbolic integrals in terms of ${}_{6}F_{5}(\pm1)$, ${}_{7}F_{6}(\pm1)$, ${}_{8}F_{7}(\pm1)$ shown in \texttt{section 4}. Special class of some hyperbolic integrals in terms of ${}_{3}F_{2}(\pm1),{}_{4}F_{3}(\pm1),{}_{5}F_{4}(\pm1)$ are given in \texttt{section 5}. We apply suitable product formulas associated with hyperbolic function in special class of hyperbolic integrals given in \texttt{section 6}. Moreover, we find two reduction formulas and some new summation theorems given in \texttt{sections 2-3} by comparing the similar integrals. \section{Some new summation theorems} \begin{theorem} The first summation theorem ${}_{6}F_{5}(-1)$ holds true: \begin{multline} {}_{6}F_{5}\left(\begin{array}{lll}v,~~1+\frac{v}{2},~~\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c}, ~~\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~~~~~~~~;\\ \frac{v}{2},~1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c},~~;\end{array} -1\right)\\ =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{16vabc^{2}\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right)\\ -\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\bigg], \end{multline} \end{theorem} where $\Re(v)<4,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Proof}: Comparing the two equations (\ref{GRI45}) and (\ref{sss1}),we get a summation theorem for ${}_{6}F_{5}(-1)$. \begin{theorem} The second summation theorem ${}_{6}F_{5}(1)$ holds true: \begin{multline} {}_{6}F_{5}\left(\begin{array}{lll}v,~~1+\frac{v}{2},~~\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c}, ~~\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~~~~~~~~;\\ \frac{v}{2},~1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c},~~;\end{array} 1\right)\\ =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{16vabc^{2}\Gamma(v)} \bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right)\frac{\cos\left(\frac{(a+b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\\ -\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\cos\left(\frac{(a-b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\bigg], \end{multline} \end{theorem} where $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Proof}: Comparing the two equations (\ref{GRI46}) and (\ref{ss2}),we get a summation theorem for ${}_{6}F_{5}(1)$. = \begin{theorem} The third summation theorem ${}_{6}F_{5}(-1)$ holds true: \begin{multline} {}_{6}F_{5}\left(\begin{array}{lll}1,\frac{3}{2},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right)\\ =\frac{\pi(b-a-c)(b+a+c)(b-a+c)(b+a-c)}{2^{2}acb^{2}} \frac{\sin\left(\frac{a\pi}{2b}\right)\sin\left(\frac{c\pi}{2b}\right)}{\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\}}, \end{multline} \end{theorem} where $\Re(b)>0,~\Re(b\pm a\pm c)>0,~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,$\frac{a\pm c}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ \textbf{Proof}: Comparing the two equations (\ref{FAF81}) and (\ref{FAF811}), we get a summation theorem for ${}_{6}F_{5}(-1)$. \begin{theorem} The fourth summation theorem ${}_{7}F_{6}(1)$ holds true: \begin{multline}\label{FA56} {}_{7}F_{6}\left(\begin{array}{lll}v,1+\frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},1+\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c},\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~;\\ \frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c},1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c} ,1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c};\end{array} 1\right)\\ =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{16(v^{2}ac^{3}-a^{3}c+ab^{2}c)\Gamma(v)} \bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right)\frac{\sin\left(\frac{(a+b)\pi}{2c}\right)}{\sin(\frac{v\pi}{2})}\\ +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\sin\left(\frac{(a-b)\pi}{2c}\right)}{\sin(\frac{v\pi}{2})}\bigg], \end{multline} \end{theorem} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Proof}: Comparing the two equations (\ref{GRI48}) and (\ref{ss3}),we get a summation theorem for ${}_{7}F_{6}(1)$. \begin{theorem} The fifth summation theorem ${}_{7}F_{6}(1)$ holds true: \begin{multline}\label{FA56} {}_{7}F_{6}\left(\begin{array}{lll}1,\frac{3}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} 1\right)\\ =\frac{\pi(b-a-c)(b+a+c)(b-a+c)(b+a-c)}{4(ab^{3}-a^{3}b+abc^{2})} \frac{\sin\left(\frac{a\pi}{b}\right)}{\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\}}, \end{multline} \end{theorem} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,~$\frac{a\pm c}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ \textbf{Proof}: Comparing the two equations (\ref{FAF71}) and (\ref{FAF710}), we get a summation theorem for ${}_{7}F_{6}(1)$. \begin{theorem} The sixth summation theorem ${}_{7}F_{6}(-1)$ holds true: \begin{multline} {}_{7}F_{6}\left(\begin{array}{lll}1,\frac{3}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right)\\ =\frac{\pi(b-a-c)(b+a+c)(b-a+c)(b+a-c)}{2(ab^{3}-a^{3}b+abc^{2})} \frac{\cos\left(\frac{a\pi}{2b}\right)\cos\left(\frac{c\pi}{2b}\right)}{\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\}}\\ -\frac{(b-a-c)(b+a+c)(b-a+c)(b+a-c)}{4(ab^{3}-a^{3}b+abc^{2})}\left[\beta\left(\frac{(a+b+c)}{2b}\right)+\beta\left(\frac{(a+b-c)}{2b}\right)\right], \end{multline} \end{theorem} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,$\frac{a\pm c}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ \textbf{Proof}:Comparing the two equations (\ref{FAF101}) and (\ref{FAF111}),we get a summation theorem for ${}_{7}F_{6}(-1)$. \begin{theorem} The seventh summation theorem ${}_{8}F_{7}(-1)$ holds true: \begin{multline} {}_{8}F_{7}\left(\begin{array}{lll}v,1+\frac{v}{2},1+\frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},1+\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c},\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~;\\ \frac{v}{2}, \frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c},1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c} ,1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c};\end{array} -1\right)\\ =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{8(v^{3}c^{4}-a^{2}vc^{2}-b^{2}vc^{2})\Gamma(v)} \bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right)\\ +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\bigg], \end{multline} \end{theorem} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Proof}: Comparing the two equations (\ref{GRI49}) and (\ref{ss4}),we get a summation theorem for ${}_{8}F_{7}(-1)$. \begin{theorem} The eigth summation theorem ${}_{8}F_{7}(1)$ holds true: \begin{multline} {}_{8}F_{7}\left(\begin{array}{lll}v,1+\frac{v}{2},1+\frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},1+\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c},\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~;\\ \frac{v}{2}, \frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c},1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c} ,1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c};\end{array} 1\right)\\ =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{8(v^{3}c^{4}-a^{2}vc^{2}-b^{2}vc^{2})\Gamma(v)} \bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right)\frac{\cos\left(\frac{(a+b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\\ +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\cos\left(\frac{(a-b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\bigg], \end{multline} \end{theorem} where $\\Re(v)<1,~Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Proof}: Comparing the two equations (\ref{GRI50}) and (\ref{ss5}),we get a summation theorem for ${}_{8}F_{7}(1)$. \begin{theorem} The ninth summation theorem ${}_{8}F_{7}(-1)$ holds true: \begin{multline} {}_{8}F_{7}\left(\begin{array}{lll}1,\frac{3}{2},\frac{3}{2}-\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}, \frac{1}{2}-\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right)\\ =\frac{\pi(b-a-c)(b+a+c)(b-a+c)(b+a-c)}{2(b^{4}-a^{2}b^{2}-c^{2}b^{2})} \frac{\cos\left(\frac{a\pi}{2b}\right)\cos\left(\frac{c\pi}{2b}\right)}{\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\}}, \end{multline} \end{theorem} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}+c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,$\frac{a\pm c}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ \textbf{Proof}: Comparing the two equations (\ref{FAF09}) and (\ref{FAF109}),we get a summation theorem for ${}_{8}F_{7}(-1)$. \begin{theorem} The tenth summation theorem ${}_{3}F_{2}(-1)$ holds true: \begin{equation}\label{FA55} {}_{3}F_{2}\left(\begin{array}{lll}1,~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b};\end{array} -1\right)=\frac{(b^{2}-a^{2})}{2ab}\bigg[\frac{\pi}{2}\sec\left(\frac{\pi a}{2b}\right)-\beta\left(\frac{a+b}{2b}\right)\bigg], \end{equation} \begin{equation}\label{FA77} =\frac{(b^{2}-a^{2})}{8ab}\left[\Psi\left(\frac{3b-a}{4b}\right)-\Psi\left(\frac{b-a}{4b}\right)-\Psi\left(\frac{3b+a}{4b}\right)+\Psi\left(\frac{b+a}{4b}\right)\right], \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$,~ $\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$. \end{theorem} \textbf{Proof}: The summation formula with unit negative argument (\ref{FA55}) is obtained by comparing the two solutions of the integral $\int_{0}^{\infty}\frac{\sinh(ax)}{\cosh(b x)}dx$, given in (\ref{FA53}), (\ref{FA54}) and is not available in the literature of the hypergeometric summation theorem. The above eq.(\ref{FA77}) is obtained by using properties of beta function of one variable (\ref{GRI26}) and (\ref{GRI000}). \section{ Some reduction formulas} \begin{theorem} The first reduction formula holds true: \begin{equation} \label{FAF17} {}_{4}F_{3}\left(\begin{array}{lll}2,2,~\frac{1}{2}+\frac{a}{2b},~\frac{1}{2}-\frac{a}{2b};\\ 1,~\frac{5}{2}+\frac{a}{2b},~\frac{5}{2}-\frac{a}{2b}~~~;\end{array} 1\right) =\frac{(9b^{2}-a^{2})}{8b^{2}}~~{}_{3}F_{2}\left(\begin{array}{lll}1,~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b}~~~~;\end{array} 1\right), \end{equation} \end{theorem} where $\Re(b)>0,~\Re(b\pm a)>0,~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash\mathbb{Z}_{0}^{-}$;~$\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ \textbf{Proof}: The reduction formula (\ref{FAF17}) is obtained by comparing the two integrals (\ref{FA1}) and (\ref{FA33}). \begin{theorem} The second reduction formula holds true: \begin{eqnarray*} {}_{7}F_{6}\left(\begin{array}{lll}v,1+\frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},1+\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c}, \frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c};\\ \frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c},1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, 1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c};\end{array} -1\right) \end{eqnarray*} \begin{multline}\label{FAF18} =\frac{\{(vc)^{2}-(a+b)^{2}\}\{(vc)^{2}-(a-b)^{2}\}}{4(v^{2}ac^{2}-a^{3}+ab^{2})}\times\\\times \bigg[\frac{1}{(vc-a-b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc-a-b}{2c}~~~;\\ 1+\frac{vc-a-b}{2c};\end{array} -1\right) -\frac{1}{(vc+a+b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc+a+b}{2c}~~~;\\ 1+\frac{vc+a+b}{2c};\end{array} -1\right)\\ +\frac{1}{(vc-a+b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc-a+b}{2c}~~~;\\ 1+\frac{vc-a+b}{2c};\end{array} -1\right) -\frac{1}{(vc+a-b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc+a-b}{2c}~~~;\\ 1+\frac{vc+a-b}{2c};\end{array} -1\right)\bigg], \end{multline} where $\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \end{theorem} \textbf{Proof}: Comparing the integral (\ref{GRI47}) and its companion (\ref{cc9}), we get the reduction formula (\ref{FAF18}). The integral representation of ${}_{2}F_{1}(-1)$ type hypergeometric functions involved in reduction formula (\ref{FAF18}) is given below \begin{equation}\label{FRS} {}_{2}F_{1}\left(\begin{array}{lll}a,~b,;\\ 1+b;\end{array} -1\right)=b\int_{0}^{1}\frac{t^{b-1}}{(1+t)^{a}}dt,~~~~~~\Re(b)>0. \end{equation} The definite integral (\ref{FRS}) can be solved by using suitable numerical methods (for example composite trapezoidal rule, composite Simpson's $1/3$ rule, composite Simpson's $3/8$ rule, composite Boole rule, composite Midris two rules, composite Weddle rule, composite Sadiq rule, Gauss-Legendre three points formula, Gauss-Chebyshev three points formula, Radau three points formula, Lobatto three points formula etc ). \section{Generalized class of some hyperbolic integrals in terms of ${}_{6}F_{5}(\pm1)$, ${}_{7}F_{6}(\pm1)$ and ${}_{8}F_{7}(\pm1)$} Many authors have studied some definite integrals containing the integrands as a quotient of hyperbolic functions. Mainly, V. H. Moll et.al evaluated some definite integrals given in the table of Gradshteyn and Ryzhik \cite{G,G3}, by using the change of independent variables. We obtain generalizations and analytical solutions of some hyperbolic integrals, using hypergeometric approach and Laplace transform method. \begin{theorem} The first generalized hyperbolic integral holds true: \begin{multline}\label{GRI45} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(bx)}{\cosh^{v}(cx)}dx=\frac{2^{v+1}v abc}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times ~~{}_{6}F_{5}\left(\begin{array}{lll}v,~~1+\frac{v}{2},~~\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c}, ~~\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~~~~~~~~~~~~~~~;\\ \frac{v}{2},~1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c},~~;\end{array} -1\right), \end{multline} \end{theorem} where $\Re(v)<4,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \begin{theorem} The second generalized hyperbolic integral holds true: \begin{multline}\label{GRI46} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(bx)}{\sinh^{v}(cx)}dx=\frac{2^{v+1}v abc}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times ~~{}_{6}F_{5}\left(\begin{array}{lll}v,~~1+\frac{v}{2},~~\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c}, ~~\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~~\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}~~~~~~~~~~~~;\\ \frac{v}{2},~1+\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~1+\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c},~~;\end{array} 1\right), \end{multline} \end{theorem} where $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \begin{theorem} The third generalized hyperbolic integral holds true: \begin{multline}\label{GRI47} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(bx)}{\cosh^{v}(cx)}dx=\frac{2^{v}(v^{2}ac^{2}-a^{3}+ab^{2})}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times {}_{7}F_{6}\left(\begin{array}{lll}v,1+\sigma_{1},~1+\sigma_{2},~\sigma_{3},~\sigma_{4} ,\sigma_{5},~ \sigma_{6}~~~~~~~~~;\\ \sigma_{1},\sigma_{2},1+\sigma_{3} ,~1+\sigma_{4},~1+\sigma_{5},~1+ \sigma_{6};\end{array} -1\right), \end{multline} \end{theorem} where \begin{equation*} \sigma_{1}= \frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},~\sigma_{2}=\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c}~, \sigma_{3}=\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \end{equation*} \begin{equation*} ~\sigma_{4}=\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~\sigma_{5}=\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~ \sigma_{6}=\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}, \end{equation*} and $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \begin{theorem} The fourth generalized hyperbolic integral holds true: \begin{multline}\label{GRI48} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(bx)}{\sinh^{v}(cx)}dx=\frac{2^{v}(v^{2}ac^{2}-a^{3}+ab^{2})}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times {}_{7}F_{6}\left(\begin{array}{lll}v,1+\sigma_{1},~1+\sigma_{2},~\sigma_{3},~\sigma_{4} ,\sigma_{5},~ \sigma_{6}~~~~~~~~~;\\ \sigma_{1},\sigma_{2},1+\sigma_{3} ,~1+\sigma_{4},~1+\sigma_{5},~1+ \sigma_{6};\end{array} 1\right), \end{multline} \end{theorem} where \begin{equation*} \sigma_{1}= \frac{v}{2}-\frac{\sqrt{a^{2}-b^{2}}}{2c},~\sigma_{2}=\frac{v}{2}+\frac{\sqrt{a^{2}-b^{2}}}{2c}~, \sigma_{3}=\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \end{equation*} \begin{equation*} ~\sigma_{4}=\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~\sigma_{5}=\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~ \sigma_{6}=\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}, \end{equation*} and $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \begin{theorem} The fifth generalized hyperbolic integral holds true: \begin{multline}\label{GRI49} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(bx)}{\cosh^{v}(cx)}dx=\frac{2^{v}(v^{3}c^{3}-a^{2}vc-b^{2}vc)}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times {}_{8}F_{7}\left(\begin{array}{lll}v,~1+\frac{v}{2},~1+\lambda_{1},~1+\lambda_{2},~\sigma_{3},~\sigma_{4} ,\sigma_{5},~ \sigma_{6}~;\\ \frac{v}{2},~ \lambda_{1},~\lambda_{2},1+\sigma_{3} ,~1+\sigma_{4},~1+\sigma_{5},~1+\sigma_{6}~;\end{array} -1\right), \end{multline} \end{theorem} where \begin{equation*} \lambda_{1}= \frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},~\lambda_{2}=\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c}~, \sigma_{3}=\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \end{equation*} \begin{equation*} ~\sigma_{4}=\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~\sigma_{5}=\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~ \sigma_{6}=\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}, \end{equation*} and $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \begin{theorem} The sixth generalized hyperbolic integral holds true: \begin{multline}\label{GRI50} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(bx)}{\sinh^{v}(cx)}dx=\frac{2^{v}(v^{3}c^{3}-a^{2}vc-b^{2}vc)}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times\\\times {}_{8}F_{7}\left(\begin{array}{lll}v,~1+\frac{v}{2},~1+\lambda_{1},~1+\lambda_{2},~\sigma_{3},~\sigma_{4} ,\sigma_{5},~ \sigma_{6}~;\\ \frac{v}{2},~ \lambda_{1},~\lambda_{2},1+\sigma_{3} ,~1+\sigma_{4},~1+\sigma_{5},~1+\sigma_{6}~;\end{array} 1\right), \end{multline} \end{theorem} where \begin{equation*} \lambda_{1}= \frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},~\lambda_{2}=\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c}~, \sigma_{3}=\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \end{equation*} \begin{equation*} ~\sigma_{4}=\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~\sigma_{5}=\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~ \sigma_{6}=\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}, \end{equation*} and $\Re(v)<1,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ \textbf{Hypergeometric proof of integral (\ref{GRI49})}: Suppose left hand side of eq.(\ref{GRI49}) is denoted by $\Upsilon(a,b,c,v)$ and using the product formula of hyperbolic function in the left hand side of eq.(\ref{GRI49}), we get \begin{equation}\label{GRI78} \Upsilon(a,b,c,v)= \frac{1}{2}\int_{0}^{\infty}\frac{\cosh\{(a+b)x\}}{\cosh^{v}(c x)}dx+\frac{1}{2}\int_{0}^{\infty}\frac{\cosh\{(a-b)x\}}{\cosh^{v}(c x)}dx=\textbf{L}_{1}+\textbf{L}_{2}, \end{equation} where $\textbf{L}_{1}$ and $\textbf{L}_{2}$ are given by \begin{equation}\label{GRI79} \textbf{L}_{1}= \frac{1}{2}\int_{0}^{\infty}\frac{\cosh\{(a+b)x\}}{\cosh^{v}(c x)}dx ~~and~~\textbf{L}_{2}=\frac{1}{2}\int_{0}^{\infty}\frac{\cosh\{(a-b)x\}}{\cosh^{v}(c x)}dx. \end{equation} Using exponential definition of hyperbolic functions in the integral $\textbf{L}_{1}$, which yields \begin{equation*}\label{GRI80} \textbf{L}_{1}= 2^{v-2}\int_{0}^{\infty}e^{-vcx}\bigg[e^{(a+b)x}+e^{-(a+b)x}\bigg]\left(1+e^{-2cx}\right)^{-v}dx. \end{equation*} \begin{equation}\label{GRI81} = 2^{v-2}\int_{0}^{\infty}e^{-vcx}\bigg[e^{(a+b)x}+e^{-(a+b)x}\bigg] {}_{1}F_{0}\left(\begin{array}{lll}v~;\\ \overline{~~~};\end{array} -e^{-2cx} \right)dx, \end{equation} when $\Re(c)>0$ , then $|-e^{-2cx}|<1$ for all real $x>0$. It is the convergence condition of above binomial function ${}_{1}F_{0}(\cdot)$ in eq.(\ref{GRI81}), then it yields \begin{equation}\label{GRI82} \textbf{L}_{1} =2^{v-2}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\bigg[\int_{0}^{\infty}e^{-\{(vc-a-b)+2cr\}x}dx+\int_{0}^{\infty}e^{-\{(vc+a+b)+2cr\}x}dx\bigg], \end{equation} where $\Re(vc-a-b)>0,~\Re(vc+a+b)>0,~\Re(c)>0$, it is the convergence conditions of Laplace transform of unity in the integral (\ref{GRI82}). Then applying Laplace transformation formula (\ref{GRI22}) in the eq.(\ref{GRI82}), we obtain \begin{equation}\label{GRI83} \textbf{L}_{1}=2^{v-2}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\bigg[\frac{1}{\{(vc-a-b)+2cr\}}+\frac{1}{\{(vc+a+b)+2cr\}}\bigg], \end{equation} ~~~~ where $\Re(vc-a-b)>0,~\Re(vc+a+b)>0,~\Re(c)>0$.\\ Similarly, proof of $\textbf{L}_{2}$ is given by \begin{equation}\label{GRI84} \textbf{L}_{2}=2^{v-2}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\bigg[\frac{1}{\{(vc-a+b)+2cr\}}+\frac{1}{\{(vc+a-b)+2cr\}}\bigg], \end{equation} ~~~~ where $\Re(vc-a+b)>0,~\Re(vc+a-b)>0,~\Re(c)>0$.\\ Making use of the eqns (\ref{GRI83}) and (\ref{GRI84}) in the above eq. (\ref{GRI78}), we obtain \begin{multline*}\label{GRI85} \Upsilon(a,b,c,v)=2^{v-2}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\times\\\times \bigg[\frac{1}{\{(vc-a-b)+2cr\}}+\frac{1}{\{(vc+a+b)+2cr\}} +\frac{1}{\{(vc-a+b)+2cr\}}+\frac{1}{\{(vc+a-b)+2cr\}}\bigg], \end{multline*} \begin{eqnarray}\label{GRI86} =2^{v-1}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\times\nonumber~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\\times \bigg[\frac{(vc-a+2cr)}{\{(vc-a-b)+2cr\}\{(vc-a+b)+2cr\}}+\frac{(vc+a+2cr)}{\{(vc+a+b)+2cr\}\{(vc+a-b)+2cr\}}\bigg]\nonumber\\ \end{eqnarray} ~~~~ where $\Re(vc\pm a\pm b)>0,~\Re(c)>0$. After simplifications we obtain \begin{eqnarray*} \Upsilon(a,b,c,v)=2^{v-1}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\times\nonumber~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\\times \bigg[\frac{16c^{3}r^{3}+24vc^{3}r^{2}+(12v^{2}c^{3}-4a^{2}c-4b^{2}c)r + (2v^{3}c^{3}-2va^{2}c-2b^{2}vc)}{\{(vc-a-b)+2cr\}\{(vc+a+b)+2cr\}\{(vc-a+b)+2cr\}\{(vc+a-b)+2cr\}}\bigg],\nonumber\\ \end{eqnarray*} \begin{eqnarray}\label{GRI87} =2^{v-1}\sum_{r=0}^{\infty}\frac{(v)_{r}}{r!}(-1)^{r}\times\nonumber~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\\times \bigg[\frac{16c^{3}\left\{r-\left(\frac{-vc+\sqrt{a^{2}+b^{2}}}{2c}\right)\right\}\left\{r-\left(\frac{-vc-\sqrt{a^{2}+b^{2}}}{2c}\right)\right\}\left\{r+\frac{v}{2}\right\}} {\{(vc-a-b)+2cr\}\{(vc+a+b)+2cr\}\{(vc-a+b)+2cr\}\{(vc+a-b)+2cr\}}\bigg],\nonumber\\ \end{eqnarray} ~~~~ where $\Re(vc\pm a\pm b)>0,~\Re(c)>0$.\\ Employ algebraic properties of Pochhammer symbol in the eq.(\ref{GRI87}), after simplifications, we obtain \begin{eqnarray*}\label{GRI89} \Upsilon(a,b,c,v)=\frac{2^{v}(v^{3}c^{3}-a^{2}vc-b^{2}vc)}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\sum_{r=0}^{\infty}\bigg[\frac{(v)_{r}\left(1+\frac{v}{2}\right)_{r}}{\left(\frac{v}{2}\right)_{r}r!}\times~~~~~~~~~~~~~\\ \times\frac{\left(1+\frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c}\right)_{r}\left(1+\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c}\right)_{r}\left(\frac{vc-a-b}{2c}\right)_{r}\left(\frac{vc+a+b}{2c} \right)_{r}\left(\frac{vc-a+b}{2c}\right)_{r}\left(\frac{vc+a-b}{2c}\right)_{r}(-1)^{r}} {\left(\frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c}\right)_{r}\left(\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c}\right)_{r}\left(\frac{vc-a-b+2c}{2c}\right)_{r}\left(\frac{vc+a+b+2c}{2c}\right)_{r}\left(\frac{vc-a+b+2c}{2c}\right)_{r}\left(\frac{vc+a-b+2c}{2c}\right)_{r}}\bigg], \end{eqnarray*} \begin{eqnarray}\label{GRI90} =\frac{2^{v}(v^{3}c^{3}-a^{2}vc-b^{2}vc)}{(vc-a-b)(vc+a+b)(vc-a+b)(vc+a-b)}\times~~~~~~~~~~~~~~~~~~~~~~~~\nonumber\\ \times {}_{8}F_{7}\left(\begin{array}{lll}v,~1+\frac{v}{2},~1+\lambda_{1},~1+\lambda_{2},~\sigma_{3},~\sigma_{4} ,\sigma_{5},~ \sigma_{6}~;\\ \frac{v}{2},~ \lambda_{1},~\lambda_{2},1+\sigma_{3} ,~1+\sigma_{4},~1+\sigma_{5},~1+\sigma_{6}~;\end{array} -1\right), \end{eqnarray} where \begin{equation*} \lambda_{1}= \frac{v}{2}-\frac{\sqrt{a^{2}+b^{2}}}{2c},~\lambda_{2}=\frac{v}{2}+\frac{\sqrt{a^{2}+b^{2}}}{2c}~, \sigma_{3}=\frac{v}{2}-\frac{a}{2c}-\frac{b}{2c}, \end{equation*} \begin{equation*} ~\sigma_{4}=\frac{v}{2}-\frac{a}{2c}+\frac{b}{2c},~\sigma_{5}=\frac{v}{2}+\frac{a}{2c}+\frac{b}{2c},~ \sigma_{6}=\frac{v}{2}+\frac{a}{2c}-\frac{b}{2c}, \end{equation*} and $\Re(c)>0,~\Re(vc\pm a\pm b)>0;\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. Similarly, proofs of the integrals (\ref{GRI45}),(\ref{GRI46}),(\ref{GRI47}),(\ref{GRI48}) and (\ref{GRI50}) are much akin to that of the integral (\ref{GRI49}), which we have already discussed in a detailed manner. \section{Special class of some hyperbolic integrals in terms of ${}_{3}F_{2}(\pm1),{}_{4}F_{3}(\pm1)$ and ${}_{5}F_{4}(\pm1)$} \texttt{Each of the following hyperbolic definite integrals holds true:} $\bullet$ When $v=2$ and $c=b$ in the eq.(\ref{GRI45}), we get \begin{equation}\label{FA3} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(bx)}{\cosh^{2}(bx)}dx=\frac{16ab^{2}}{(b^{2}-a^{2})(9b^{2}-a^{2})} ~~{}_{4}F_{3}\left(\begin{array}{lll}2,2,~\frac{1}{2}+\frac{a}{2b},~\frac{1}{2}-\frac{a}{2b};\\ 1,~\frac{5}{2}+\frac{a}{2b},~\frac{5}{2}-\frac{a}{2b}~~~;\end{array} -1\right), \end{equation} \begin{equation}\label{FA00} ~~~=\frac{a\pi}{2b^{2}}\sec\left(\frac{\pi a}{2b}\right),~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\Re(3b\pm a)>0,~\frac{5}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$;~ $\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$. Using hypergeometric form of $\sec (z) $ function (\ref{GRI32}) [when $z=\frac{\pi a}{2b}$ ] in the eq. (\ref{FA3}), we obtain right hand side of (\ref{FA00}). Also, right hand side of eq.(\ref{FA00}) can be obtained by using summation theorem (\ref{GRI12}) and recurrence relation for gamma function in the hypergeometric series (\ref{FA3}).\\ $\bullet$ When $v=2$ and $c=b$ in the eq.(\ref{GRI46}), we get \begin{equation}\label{FA33} \int_{0}^{\infty}\frac{\sinh(ax)}{\sinh(bx)}dx=\frac{16ab^{2}}{(b^{2}-a^{2})(9b^{2}-a^{2})} ~~{}_{4}F_{3}\left(\begin{array}{lll}2,2,~\frac{1}{2}+\frac{a}{2b},~\frac{1}{2}-\frac{a}{2b};\\ 1,~\frac{5}{2}+\frac{a}{2b},~\frac{5}{2}-\frac{a}{2b}~~~;\end{array} 1\right), \end{equation} \begin{equation}\label{FA000} ~~~=\frac{\pi}{2b}\tan\left(\frac{\pi a}{2b}\right),~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\Re(3b\pm a)>0,~\frac{5}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$;~ $\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$. The right hand side of eq.(\ref{FA000}) can be obtained by using summation theorem (\ref{GRI102}), recurrence relations for gamma function in the hypergeometric series (\ref{FA33}).\\ $\bullet$ When $v=2$, $b=a$, $c=1$ in the eq.(\ref{GRI46}), we get \begin{eqnarray}\label{FA0011} \frac{1}{2}\int_{-\infty}^{\infty}\frac{\sinh^{2}(a x)}{\sinh^{2}( x)}dx=\int_{0}^{\infty}\frac{\sinh^{2}(a x)}{\sinh^{2}( x)}dx =\frac{a^{2}}{(1-a^{2})}~~{}_{3}F_{2}\left(\begin{array}{lll}1,~1-a,~1+a;\\ 2-a,~2+a~~~~;\end{array} 1\right), \end{eqnarray} \begin{equation} =\frac{a}{2}[\Psi(1+a)-\Psi(1-a)], \end{equation} \begin{equation}\label{FA0012} =\frac{1}{2}[1-a\pi\cot(a\pi)],~~~~~~~~~~~ \end{equation} where $a\neq0,\pm1,\pm2,\pm3,...$\\ The right hand side of eq.(\ref{FA0012}) can be obtained by using the properties of Digamma function (\ref{GRI24})-(\ref{GRI262}) in the hypergeometric series (\ref{FA0011}).\\ $\bullet$ In the eq.(\ref{GRI47}) we interchange $a$ and $b$; $v=2$, then put $c=b$, we get \begin{multline}\label{FA001} \int_{0}^{\infty}\frac{\cosh(ax)\sinh(bx)}{\cosh^{2}(bx)}dx\\=\frac{(12b^{3}+4a^{2}b)}{(b^{2}-a^{2})(9b^{2}-a^{2})} ~~{}_{5}F_{4}\left(\begin{array}{lll}2,~2-\frac{\sqrt{b^{2}-a^{2}}}{2b},~2+\frac{\sqrt{b^{2}-a^{2}}}{2b},~\frac{1}{2}-\frac{a}{2b},~\frac{1}{2}+\frac{a}{2b};\\ 1-\frac{\sqrt{b^{2}-a^{2}}}{2b},~1+\frac{\sqrt{b^{2}-a^{2}}}{2b},~\frac{5}{2}+\frac{a}{2b},~\frac{5}{2}-\frac{a}{2b}~~~~;\end{array} -1\right), \end{multline} where $\Re(b)>0,~\Re(b\pm a)>0,~\Re(3b\pm a)>0;~1\pm\frac{\sqrt{b^{2}-a^{2}}}{2b},~\frac{5}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$ .\\ $\bullet$ When $b=0$ in the eq.(\ref{GRI47}), we get \begin{equation}\label{cc1} \int_{0}^{\infty}\frac{\sinh(ax)}{\cosh^{v}(cx)}dx=\frac{2^{v}a}{[(vc)^{2}-a^{2}]} ~~{}_{3}F_{2}\left(\begin{array}{lll}v,~\frac{v}{2}-\frac{a}{2c},~\frac{v}{2}+\frac{a}{2c};\\ 1+\frac{v}{2}-\frac{a}{2c},~1+\frac{v}{2}+\frac{a}{2c};\end{array} -1\right), \end{equation} \begin{equation}\label{cc7} = \frac{2^{v-1}}{(vc-a)}~{}_{2}F_{1}\left(\begin{array}{lll}v,~\frac{v}{2}-\frac{a}{2c}~~;\\ 1+\frac{v}{2}-\frac{a}{2c};\end{array} -1\right) -\frac{2^{v-1}}{(vc+a)}~{}_{2}F_{1}\left(\begin{array}{lll}v,~\frac{v}{2}+\frac{a}{2c}~~;\\ 1+\frac{v}{2}+\frac{a}{2c};\end{array} -1\right), \end{equation} where $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a)>0,~~1+\frac{v}{2}\pm\frac{a}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ When $b=0$ in the eq.(\ref{GRI48}), we get \begin{equation}\label{cc2} \int_{0}^{\infty}\frac{\sinh(ax)}{\sinh^{v}(cx)}dx=\frac{2^{v}a}{[(vc)^{2}-a^{2}]} ~~{}_{3}F_{2}\left(\begin{array}{lll}v,~\frac{v}{2}-\frac{a}{2c},~\frac{v}{2}+\frac{a}{2c};\\ 1+\frac{v}{2}-\frac{a}{2c},~1+\frac{v}{2}+\frac{a}{2c};\end{array} 1\right), \end{equation} \begin{equation}\label{cc2} =\frac{2^{v-2}}{(c)\Gamma(v)}\Gamma\left(\frac{v}{2}+\frac{a}{2c}\right)\Gamma\left(\frac{v}{2}-\frac{a}{2c}\right) \frac{\sin\left(\frac{a\pi}{2c}\right)}{\sin\left(\frac{v\pi}{2}\right)}, \end{equation} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a)>0,~~1+\frac{v}{2}\pm\frac{a}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$ and $v\neq0,\pm2,\pm4,\pm6,...$\\ $\bullet$ When $v=1$ and $b=0$, then $c=b$ in the eq.(\ref{GRI48}), we get \begin{eqnarray}\label{FA1} \int_{0}^{\infty}\frac{\sinh(ax)}{\sinh(b x)}dx= \frac{2a}{(b^{2}-a^{2})}~~{}_{3}F_{2}\left(\begin{array}{lll}1,~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b}~~~~;\end{array} 1\right), \end{eqnarray} \begin{equation}\label{FA49} =\frac{\pi}{2b}\tan\left(\frac{\pi a}{2b}\right),~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$;~$\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ Using hypergeometric form of $\tan (z) $ function (\ref{GRI31}) [ when $z=\frac{\pi a}{2b}$ ] in the eq. (\ref{FA1}), we obtain right hand side of eq.(\ref{FA49}). Also, right hand side of eq.(\ref{FA49}) can be obtained by using the Dixon's theorem ${}_{3}F_{2}(1)$ (\ref{GRI11}) in the eq. (\ref{FA1}).\\ $\bullet$ When $b=0$ in the eq.(\ref{GRI49}), we get \begin{equation} \int_{0}^{\infty}\frac{\cosh(ax)}{\cosh^{v}(cx)}dx=\frac{2^{v}(vc)}{[(vc)^{2}-a^{2}]} ~~{}_{4}F_{3}\left(\begin{array}{lll}v,~1+\frac{v}{2},\frac{v}{2}-\frac{a}{2c},~\frac{v}{2}+\frac{a}{2c};\\ \frac{v}{2}, 1+\frac{v}{2}-\frac{a}{2c},~1+\frac{v}{2}+\frac{a}{2c}~~~;\end{array} -1\right), \end{equation} \begin{equation}\label{cc3} =\frac{2^{v-2}}{(c)\Gamma(v)}\Gamma\left(\frac{v}{2}+\frac{a}{2c}\right)\Gamma\left(\frac{v}{2}-\frac{a}{2c}\right), \end{equation} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a)>0;~~\frac{v}{2},1+\frac{v}{2}\pm\frac{a}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ When $v=1$ and $b=0$, then $c=b$ in the eq.(\ref{GRI49}), we get \begin{eqnarray}\label{FA2} \int_{0}^{\infty}\frac{\cosh(ax)}{\cosh(bx)}dx=\frac{2b}{(b^{2}-a^{2})}~~{}_{4}F_{3}\left(\begin{array}{lll}1,~\frac{3}{2},~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{1}{2},~\frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b}~~~~;\end{array} -1\right), \end{eqnarray} \begin{equation}\label{FA50} =\frac{\pi}{2b}\sec\left(\frac{\pi a}{2b}\right),~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$;~ $\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$.\\ Using hypergeometric form of $\sec (z) $ function (\ref{GRI32}) [when $z=\frac{\pi a}{2b}$ ] in the eq. (\ref{FA2}), we obtain right hand side of eq. (\ref{FA50}). Also, right hand side of eq.(\ref{FA50}) can be obtained by using the classical summation theorem ${}_{4}F_{3}(-1)$ (\ref{GRI12}) in the eq. (\ref{FA2}).\\ $\bullet$ In the eq.(\ref{FA50}) replacing $a\rightarrow 2ia $ and $b=\pi$, we get a known result of Ramanujan \cite[p.11,eq.(1.5.1(27))]{E1} \begin{equation}\label{F00} \int_{0}^{\infty}\frac{\cos(2ax)}{\cosh(\pi x)}dx =\frac{1}{2}sech\left(a\right),~~~~~~~~~|\Im(a)|<\frac{\pi}{2}. \end{equation} $\bullet$ When $b=0$ in the eq.(\ref{GRI50}), we get \begin{equation} \int_{0}^{\infty}\frac{\cosh(ax)}{\sinh^{v}(cx)}dx=\frac{2^{v}(vc)}{[(vc)^{2}-a^{2}]} ~~{}_{4}F_{3}\left(\begin{array}{lll}v,~1+\frac{v}{2},\frac{v}{2}-\frac{a}{2c},~\frac{v}{2}+\frac{a}{2c};\\ \frac{v}{2}, 1+\frac{v}{2}-\frac{a}{2c},~1+\frac{v}{2}+\frac{a}{2c}~~~;\end{array} 1\right), \end{equation} \begin{equation}\label{cc4} =\frac{2^{v-2}}{(c)\Gamma(v)}\Gamma\left(\frac{v}{2}+\frac{a}{2c}\right)\Gamma\left(\frac{v}{2}-\frac{a}{2c}\right) \frac{\cos\left(\frac{a\pi}{2c}\right)}{\cos\left(\frac{v\pi}{2}\right)}, \end{equation} where $\Re(v)<1,~\Re(c)>0,~\Re(vc\pm a)>0;~~\frac{v}{2},1+\frac{v}{2}\pm\frac{a}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$ and $v\neq\pm1,\pm3,\pm5,...$\\ $\bullet$ When $v=1$ and $b=0$, then $c=b$ in the eq.(\ref{GRI47}), we get \begin{equation}\label{FA54} \int_{0}^{\infty}\frac{\sinh(ax)}{\cosh(b x)}dx =\frac{2a}{(b^{2}-a^{2})}{}_{3}F_{2}\left(\begin{array}{lll}1,~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b}~~~~;\end{array} -1\right), \end{equation} \begin{equation}\label{FA503} ~~~=\frac{\pi}{2b}\sec\left(\frac{\pi a}{2b}\right)-\frac{1}{b}\beta\left(\frac{a+b}{2b}\right), \end{equation} \begin{equation}\label{FA53} ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~=\frac{\pi}{2b}\sec\left(\frac{\pi a}{2b}\right)-\frac{1}{2b}\left[\Psi\left(\frac{a+3b}{4b}\right)-\Psi\left(\frac{a+b}{4b}\right)\right], \end{equation} \begin{equation}\label{F00} =\frac{(b^{2}-a^{2})}{8ab}\left[\Psi\left(\frac{3b-a}{4b}\right)-\Psi\left(\frac{b-a}{4b}\right)-\Psi\left(\frac{3b+a}{4b}\right)+\Psi\left(\frac{b+a}{4b}\right)\right], \end{equation} where $\Re(b)>0,~\Re(b\pm a)>0,~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$;~ $\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\}$ .\\ \textbf{Independent proofs of} (\ref{FA54})-(\ref{F00}): Taking left hand side of eq.(\ref{FA54}) and suppose it is denoted by $\Phi(a,b)$ upon using the well known result of hyperbolic function , we get \begin{equation}\label{AF04} \Phi(a,b) =\int_{0}^{\infty}\left(\frac{e^{ax}}{e^{bx}+e^{-bx}}\right)dx-\int_{0}^{\infty}\left(\frac{e^{-ax}}{e^{bx}+e^{-bx}}\right)dx=\textbf{Y}_{1}-\textbf{Y}_{2}, \end{equation} where $\textbf{Y}_{1}$ and $\textbf{Y}_{2}$ are given by \begin{equation}\label{AF05} \textbf{Y}_{1}= \int_{0}^{\infty}\left(\frac{e^{ax}}{e^{bx}+e^{-bx}}\right)dx,~~and~~ \textbf{Y}_{2}= \int_{0}^{\infty}\left(\frac{e^{-ax}}{e^{bx}+e^{-bx}}\right)dx. \end{equation} From the above integral $\textbf{Y}_{1}$ can also written by \begin{equation*} \textbf{Y}_{1}= \int_{0}^{\infty}e^{-(b-a)x}\bigg(1+e^{-2bx}\bigg)^{-1}dx, \end{equation*} \begin{equation}\label{AF06} = \int_{0}^{\infty}\bigg[e^{-(b-a)x}{}_{1}F_{0}\left(\begin{array}{lll}1~;\\ \overline{~~~};\end{array} -e^{-2bx} \right)\bigg]dx, \end{equation} when $\Re(b)>0$ , then $|-e^{-2bx}|<1$ for all $x>0$. It is the convergence conditions of above binomial function ${}_{1}F_{0}(\cdot)$ in eq.(\ref{AF06}), then it yields \begin{equation*} \textbf{Y}_{1}=\int_{0}^{\infty}\bigg[e^{-(b-a)x}\sum_{r=0}^{\infty}(-1)^{r}~e^{-2brx}\bigg]dx, \end{equation*} \begin{equation}\label{AF07} =\sum_{r=0}^{\infty}(-1)^{r}\bigg[\int_{0}^{\infty}e^{-\{(b-a)+2br\}x}dx\bigg], \end{equation} where $\Re(b-a)>0,~\Re(b)>0$, it is the convergence condition of Laplace transform of unity in the integral (\ref{AF07}). Then applying formula (\ref{GRI22}) in the eq.(\ref{AF07}), we obtain \begin{equation}\label{AF08} \textbf{Y}_{1}=\sum_{r=0}^{\infty}(-1)^{r}\bigg[\frac{1}{(b-a)+2br}\bigg], ~~~~~~~~~~~~~\Re(b-a)>0,~\Re(b)>0. \end{equation} Similarly, proof of $\textbf{Y}_{2}$ is given by \begin{equation}\label{AF09} \textbf{Y}_{2}=\sum_{r=0}^{\infty}(-1)^{r}\bigg[\frac{1}{(b+a)+2br}\bigg], ~~~~~~~~~~~~~\Re(b+a)>0,~\Re(b)>0. \end{equation} Making use of the eqns (\ref{AF08}) and (\ref{AF09}) in the above eq. (\ref{AF04}), we obtain \begin{equation}\label{AF10} \Phi(a,b)=\sum_{r=0}^{\infty}\bigg[\frac{(-1)^{r}}{(b-a)+2br}-\frac{(-1)^{r}}{(b+a)+2br}\bigg], \end{equation} ~~~~~where $\Re(b\pm a)>0,~\Re(b)>0.$\\ Employ algebraic properties of Pochhammer symbol in the eq.(\ref{AF10}), after simplifications, we obtain \begin{equation*}\label{AF11} \Phi(a,b)=\frac{2a}{b^{2}-a^{2}}\sum_{r=0}^{\infty}\bigg[\frac{\left(\frac{b-a}{2b}\right)_{r}\left(\frac{b+a}{2b}\right)_{r}(-1)^{r}} {\left(\frac{3b-a}{2b}\right)_{r}\left(\frac{3b+a}{2b}\right)_{r}}\bigg], \end{equation*} \begin{equation}\label{AF12} ~~~~~~~~~~=\frac{2a}{(b^{2}-a^{2})}~~{}_{3}F_{2}\left(\begin{array}{lll}1,~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b};\end{array} -1\right), \end{equation} where $\Re(b\pm a)>0,~\Re(b)>0,~\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\},~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. Now, proof of the eq.(\ref{FA503}) is obtained by using the eq. (\ref{AF10}) with addition and substraction of its second term, is given below \begin{equation}\label{AF101} \Phi(a,b)=\sum_{r=0}^{\infty}\bigg[\frac{(-1)^{r}}{(b-a)+2br}+\frac{(-1)^{r}}{(b+a)+2br}-\frac{2(-1)^{r}}{(b+a)+2br}\bigg], \end{equation} \begin{equation}\label{AF102} =\sum_{r=0}^{\infty}(-1)^{r}\bigg[\frac{(1+2r)}{\{(b-a)+2br\}\{(b+a)+2br\}}\bigg]-\frac{1}{b}\sum_{r=0}^{\infty}\frac{(-1)^{r}}{\big\{\left(\frac{b+a}{2b}\right)+r\big\}}. \end{equation} Employ algebraic properties of Pochhammer symbol and Lower case beta function (\ref{GRI25}) in the eq.(\ref{AF102}), after simplifications, we obtain \begin{equation*}\label{AF103} \Phi(a,b)=\frac{2b}{b^{2}-a^{2}}\sum_{r=0}^{\infty}\bigg[\frac{\left(\frac{3}{2}\right)_{r}\left(\frac{b-a}{2b}\right)_{r}\left(\frac{b+a}{2b}\right)_{r}(-1)^{r}} {\left(\frac{1}{2}\right)_{r}\left(\frac{3b-a}{2b}\right)_{r}\left(\frac{3b+a}{2b}\right)_{r}}\bigg]-\frac{1}{b}\beta\left(\frac{a+b}{2b}\right), \end{equation*} \begin{equation}\label{AF104} ~~~~~~~~~~=\frac{2b}{(b^{2}-a^{2})}~~{}_{4}F_{3}\left(\begin{array}{lll}1,~\frac{3}{2},~\frac{1}{2}-\frac{a}{2b},~~\frac{1}{2}+\frac{a}{2b};\\ \frac{1}{2},~\frac{3}{2}-\frac{a}{2b},~~\frac{3}{2}+\frac{a}{2b};\end{array} -1\right)-\frac{1}{b}\beta\left(\frac{a+b}{2b}\right), \end{equation} where $\Re(b\pm a)>0,~\Re(b)>0,~\frac{a}{b}\in\mathbb{C}\backslash\{\pm1,\pm3,\pm5,...\},~\frac{3}{2}\pm\frac{a}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. We obtain, upon using hypergeometric function of sec(z) function (\ref{GRI32}) [when $z=\frac{\pi a}{2b}$ ] in the eq. (\ref{AF104}). \begin{equation}\label{AF1014} \Phi(a,b)=\frac{\pi}{2b}\sec\left(\frac{\pi a}{2b}\right)-\frac{1}{b}\beta\left(\frac{a+b}{2b}\right). \end{equation} Using the classical summation theorem ${}_{4}F_{3}(-1)$ (\ref{GRI12}) in the right hand side of eq.(\ref{AF104}), after simplification we get eq.(\ref{AF1014}) or (\ref{FA503}). In the eq.(\ref{FA54}) apply the properties of Digamma functions (\ref{GRI26}) and (\ref{GRI000}), we get the result (\ref{F00}). \section{Applications of product formulas in special class of hyperbolic integrals} Product formulas of hyperbolic functions: \begin{equation}\label{GRI34} \sinh(A)\cosh(B)=\frac{1}{2}[\sinh(A+B)+\sinh(A-B)], \end{equation} \begin{equation}\label{GRI35} \sinh(A)\sinh(B)=\frac{1}{2}[\cosh(A+B)-\cosh(A-B)], \end{equation} \begin{equation}\label{GRI36} \cosh(A)\cosh(B)=\frac{1}{2}[\cosh(A+B)+\cosh(A-B)]. \end{equation} \texttt{Each of the following hyperbolic definite integrals holds true:}\\ $\bullet$ In the eq.(\ref{GRI45}) using product formula and applying the result (\ref{cc3}), we get \begin{multline}\label{sss1} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(bx)}{\cosh^{v}(c x)}dx\\ =\frac{2^{v-3}}{(c)\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right) -\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\bigg], \end{multline} where $\Re(v)<4,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI45}) put $v=1$ and interchange $b$ and $c$, we get \begin{multline}\label{FAF811} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(cx)}{\cosh(b x)}dx =\frac{2^{2}acb}{(b-a-c)(b+a+c)(b-a+c)(b+a-c)}\times\\\times {}_{6}F_{5}\left(\begin{array}{lll}1,\frac{3}{2},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right), \end{multline} where $\Re(b)>0,~\Re(b\pm a\pm c)>0,~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ Using the product formula of hyperbolic function, then applying eq.(\ref{FA50}), we get \begin{eqnarray}\label{FAF8} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(cx)}{\cosh(bx)}dx =\frac{\pi}{4b}\left[\sec\left(\frac{\pi(a+c)}{2b}\right)-\sec\left(\frac{\pi(a-c)}{2b}\right)\right], \end{eqnarray} \begin{equation}\label{FAF81} =\left(\frac{\pi}{b}\right)\frac{\sin\left(\frac{a\pi}{2b}\right)\sin\left(\frac{c\pi}{2b}\right)} {\left\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\right\}}, \end{equation} where $\Re(b)>0,~\Re(b\pm a\pm c)>0,~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI46}) using product formula and applying the result (\ref{cc4}), we get \begin{multline}\label{ss2} \int_{0}^{\infty}\frac{\sinh(ax)\sinh(bx)}{\sinh^{v}(c x)}dx =\frac{2^{v-3}}{(c)\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right) \frac{\cos\left(\frac{(a+b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\\ -\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\cos\left(\frac{(a-b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\bigg], \end{multline} where $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI47}) using product formula and then applying the result (\ref{cc7}), we get \begin{multline}\label{cc9} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(bx)}{\cosh^{v}(c x)}dx\\ =2^{v-2} \bigg[\frac{1}{(vc-a-b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc-a-b}{2c}~~~;\\ 1+\frac{vc-a-b}{2c};\end{array} -1\right) -\frac{1}{(vc+a+b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc+a+b}{2c}~~~;\\ 1+\frac{vc+a+b}{2c};\end{array} -1\right)\\ +\frac{1}{(vc-a+b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc-a+b}{2c}~~~;\\ 1+\frac{vc-a+b}{2c};\end{array} -1\right) -\frac{1}{(vc+a-b)}~{}_{2}F_{1}\left(\begin{array}{lll}v,\frac{vc+a-b}{2c}~~~;\\ 1+\frac{vc+a-b}{2c};\end{array} -1\right)\bigg], \end{multline} where $\Re(v)<3,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI47}) put $v=1$ and interchange $b$ and $c$, we get \begin{multline}\label{FAF111} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(cx)}{\cosh(b x)}dx =\frac{2(ab^{2}-a^{3}+ac^{2})}{(b-a-c)(b+a+c)(b-a+c)(b+a-c)}\times\\\times {}_{7}F_{6}\left(\begin{array}{lll}1,\frac{3}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right), \end{multline} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ Using the product formula of hyperbolic function , then applying eq.(\ref{FA503}), we get \begin{eqnarray}\label{FAF10} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(cx)}{\cosh(bx)}dx =\frac{\pi}{4b}\left[\sec\left(\frac{\pi(a+c)}{2b}\right)+\sec\left(\frac{\pi(a-c)}{2b}\right)\right]\nonumber\\ -\frac{1}{2b}\left[\beta\left(\frac{(a+b+c)}{2b}\right)+\beta\left(\frac{(a+b-c)}{2b}\right)\right], \end{eqnarray} \begin{equation}\label{FAF101} =\left(\frac{\pi}{b}\right)\frac{\cos\left(\frac{a\pi}{2b}\right)\cos\left(\frac{c\pi}{2b}\right)} {\left\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\right\}} -\frac{1}{2b}\left[\beta\left(\frac{(a+b+c)}{2b}\right)+\beta\left(\frac{(a+b-c)}{2b}\right)\right], \end{equation} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI48}) using product formula and applying the result (\ref{cc2}), we get \begin{multline}\label{ss3} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(bx)}{\sinh^{v}(c x)}dx =\frac{2^{v-3}}{(c)\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right) \frac{\sin\left(\frac{(a+b)\pi}{2c}\right)}{\sin(\frac{v\pi}{2})}\\ +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\sin\left(\frac{(a-b)\pi}{2c}\right)}{\sin(\frac{v\pi}{2})}\bigg], \end{multline} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2}\pm\frac{\sqrt{a^{2}-b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI48}) put $v=1$ and interchange $b$ and $c$, we get \begin{multline}\label{FAF710} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(cx)}{\sinh(b x)}dx =\frac{2(ab^{2}-a^{3}+ac^{2})}{(b-a-c)(b+a+c)(b-a+c)(b+a-c)}\times\\\times {}_{7}F_{6}\left(\begin{array}{lll}1,\frac{3}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}-\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}-c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} 1\right), \end{multline} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ Using the product formula of hyperbolic function , then applying eq.(\ref{FA49}), we get \begin{eqnarray}\label{FAF7} \int_{0}^{\infty}\frac{\sinh(ax)\cosh(cx)}{\sinh(b x)}dx =\frac{\pi}{4b}\left[\tan\left(\frac{\pi(a+c)}{2b}\right)+\tan\left(\frac{\pi(a-c)}{2b}\right)\right], \end{eqnarray} \begin{equation}\label{FAF71} =\left(\frac{\pi}{2b}\right)\frac{\sin\left(\frac{a\pi}{b}\right)} {\left\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\right\}},~ \end{equation} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}-c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ ~ $\bullet$ In the eq.(\ref{GRI49}) using product formula and applying the result (\ref{cc3}), we get \begin{multline}\label{ss4} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(bx)}{\cosh^{v}(c x)}dx\\ =\frac{2^{v-3}}{(c)\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right) +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\bigg], \end{multline} where $\Re(v)<2,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI49}) put $v=1$ and interchange $b$ and $c$, we get \begin{multline}\label{FAF109} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(cx)}{\cosh(b x)}dx =\frac{2(b^{3}-a^{2}b-c^{2}b)}{(b-a-c)(b+a+c)(b-a+c)(b+a-c)}\times\\\times {}_{8}F_{7}\left(\begin{array}{lll}1,\frac{3}{2},\frac{3}{2}-\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{3}{2}+\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{1}{2}-\frac{a}{2b}-\frac{c}{2b}, \frac{1}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{1}{2}+\frac{a}{2b}-\frac{c}{2b}~;\\ \frac{1}{2}, \frac{1}{2}-\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{1}{2}+\frac{\sqrt{a^{2}+c^{2}}}{2b},\frac{3}{2}-\frac{a}{2b}-\frac{c}{2b} ,\frac{3}{2}-\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}+\frac{c}{2b},\frac{3}{2}+\frac{a}{2b}-\frac{c}{2b};\end{array} -1\right), \end{multline} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}+c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ Using the product formula of hyperbolic function, then applying eq.(\ref{FA50}), we get \begin{eqnarray}\label{FAF9} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(cx)}{\cosh(bx)}dx =\frac{\pi}{4b}\left[\sec\left(\frac{\pi(a+c)}{2b}\right)+\sec\left(\frac{\pi(a-c)}{2b}\right)\right], \end{eqnarray} \begin{equation}\label{FAF09} =\left(\frac{\pi}{b}\right)\frac{\cos\left(\frac{a\pi}{2b}\right)\cos\left(\frac{c\pi}{2b}\right)} {\left\{\cos\left(\frac{c\pi}{b}\right)+\cos\left(\frac{a\pi}{b}\right)\right\}}, \end{equation} where $\Re(b)>0,~\Re(b\pm a\pm c)>0;~\frac{1}{2}\pm\frac{\sqrt{a^{2}+c^{2}}}{2b},~\frac{3}{2}\pm\frac{a}{2b}\pm\frac{c}{2b}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$.\\ $\bullet$ In the eq.(\ref{GRI50}) using product formula and applying the result (\ref{cc4}), we get \begin{multline}\label{ss5} \int_{0}^{\infty}\frac{\cosh(ax)\cosh(bx)}{\sinh^{v}(c x)}dx =\frac{2^{v-3}}{(c)\Gamma(v)}\bigg[\Gamma\left(\frac{vc+a+b}{2c}\right)\Gamma\left(\frac{vc-a-b}{2c}\right) \frac{\cos\left(\frac{(a+b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\\ +\Gamma\left(\frac{vc+a-b}{2c}\right)\Gamma\left(\frac{vc-a+b}{2c}\right)\frac{\cos\left(\frac{(a-b)\pi}{2c}\right)}{\cos(\frac{v\pi}{2})}\bigg], \end{multline} where $\Re(v)<1,~\Re(c)>0,~\Re(vc\pm a\pm b)>0;~\frac{v}{2},~\frac{v}{2}\pm\frac{\sqrt{a^{2}+b^{2}}}{2c},~1+\frac{v}{2}\pm\frac{a}{2c}\pm\frac{b}{2c}\in\mathbb{C}\backslash \mathbb{Z}_{0}^{-}$. \section*{Conclusion} Here, we have described some definite integrals containing the quotients of hyperbolic functions. Thus certain integrals of hyperbolic functions, which may be different from those of presented here, can also be evaluated in a similar way. Therefore, the results presented in this paper can be expressed in terms of hypergeometric functions, trigonometric and hyperbolic functions, Digamma functions, Beta function of one variable, and Gamma function. \section*{References} \end{document}
{\beta}gin{document} \title{Curvature of almost Hermitian manifolds and applications} {\alpha}uthor{Chengjie Yu$^1$ } {\alpha}ddress{Department of Mathematics, Shantou University, Shantou, Guangdong, 515063, China} \email{[email protected]} \thanks{$^1$Research partially supported by the National Natural Science Foundation of China (11001161),(10901072) and (11101106).} \renewcommand{\subjclassname}{ \textup{2000} Mathematics Subject Classification} \subjclass[2000]{Primary 53B25; Secondary 53C40} \date{} \keywords{Almost-Hermitian manifold, canonical connection, holomorphic bisectional curvature} {\beta}gin{abstract} In this paper, by introducing a notion of local quasi holomorphic frame, we obtain a curvature formula for almost Hermitian manifolds which is similar to that of Hermitian manifolds. Moreover, as applications of the curvature formula, we extend a result of H.S. Wu and a result of F. Zheng to almost Hermitian manifolds. \end{abstract} \maketitle\markboth{Chengjie Yu}{curvature of almost hermitian manifold } \section{Introduction} A triple $(M,J,g)$ is called an almost Hermitian manifold where $M$ is a smooth manifold of even dimension, $J$ is an almost complex structure on $M$ and $g$ is a Riemannian metric on $M$ that is $J$-invariant. There are several kinds of interesting connections on almost Hermitian manifolds (see \cite{g}). Among them, the Levi-Civita connection which is torsion free and compatible with the metric and the canonical connection which is compatible with the metric and complex structure with vanishing $(1,1)$-part of the torsion attracted the most attentions. The geometry of almost Hermitian manifolds with respect to the Levi-Civita connection was studied by Gray (See \cite{Gray1,Gray2,Gray3}) and the other geometers in the 70's of the last century. An important conjecture in this line was raised by Goldberg (\cite{Goldberg}): An Einstein almost most K\"ahler manifold must be K\"ahler. Here, almost K\"ahler manifolds means almost Hermitian manifolds with the fundamental ${\omega}ega_g(X,Y)=g(JX,Y)$ closed. The conjecture was proved by Sekigawa \cite{Sekigawa} with the further assumption of nonnegative scalar curvature. The full conjecture is still open. One can consult the survey \cite{AD} for recent progresses of the conjecture. The canonical connection in crucial in the study of the structure of nearly K\"ahler manifolds by Nagy \cite{Na1,Na2}. In \cite{twy}, Tossati, Weinkove and Yau used the canonical connection other than the Levi-Civita connection to solve the Calabi-Yau equation on almost K\"ahler manifolds related to an interesting and important program on the study of simplectic topology proposed by Donaldson \cite{Donaldson}. Later, in \cite{vt}, Tossati obtained Laplacian comparison, a Schwartz lemma for almost Hermitian manifolds which is a generalization of Yau's Schwartz lemma for Hermitian manifolds(See \cite{Yau}). Moreover, with the help of the generalized Laplacian comparison and Schwartz lemma, Tossati extended a result by Seshadri-Zheng \cite {SZ} on the nonexistence of complete Hermitian metrics with holomorphic bisectional curvature bounded between two negative constants and bounded torsion on a product of complex manifolds to a product of almost complex manifolds with almost Hermitian metrics. In \cite{FTY}, Fan, Tam and the author further weaker the curvature assumption of the result of Tossati and obtain the same conclusion which is also a generalization of a result of Tam-Yu \cite{TY}. The canonical connection was first introduced by Ehresmann and Libermann in \cite{e}. It is a natural generalization of the Chern connection on Hermitian manifolds (See \cite{Chern}) and is more related to the almost complex structure. When the complex structure is integrable, it is just the Chern connection. In this paper, by introducing a notion of local quasi holomorphic frame, we obtain a curvature formula of almost Hermitian manifolds similar to that of Hermitian manifolds. More precisely, we have the following result. {\beta}gin{thm} Let $(M,g,J)$ be an almost Hermitian manifold. Let $(e_1,\cdots,e_n)$ be a local quasi holomorphic frame at $p$. Then {\beta}gin{equation} R_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)=-\overline {e_l} e_k(g_{i{{\bar\alpha}r j}})(p)+g^{{{\bar\alpha}r\mu}\langlem}e_k(g_{i{{\bar\alpha}r\mu}})\overline {e_l}(g_{\langlem{{\bar\alpha}r j}})(p). \end{equation} \end{thm} As an application of the curvature formula on almost Hermitian manifolds, we extend a result of Wu \cite{Wu} to almost Hermitian manifolds. {\beta}gin{thm} Let $(M,J)$ be an almost complex manifold. Let $g,h$ be two almost Hermitian metrics on $M$. Then $$R^{g+h}(X,\overline X,Y,\overline Y)\leq R^g(X,\overline X,Y,\overline Y)+R^h(X,\overline X,Y,\overline Y)$$ for any two $(1,0)$ vectors $X$ and $Y$ where $R^{g+h},R^g$ and $R^h$ denote the curvature tensors of the metrics $g+h, g$ and $h$ respectively. \end{thm} Another application of the curvature formula on almost Hermitian manifolds in paper is to give a classification of almost Hermitian metrics with nonpositive holomorphic bisectional curvature on a product of two compact almost complex manifolds which is a generalization of a result of Zheng \cite{Zheng} and a previous result of the author \cite{Yu}. More precisely, we obtain the following results. {\beta}gin{thm}\langlebel{thm-class-prod} Let $M$ and $N$ be compact almost complex manifolds. Let ${\partial}hi_1,{\partial}hi_2,\cdots, {\partial}hi_r$ be a basis of $H^{1,0}(M)$ and ${\partial}si_1,{\partial}si_2,\cdots,{\partial}si_s$ be a basis of $H^{1,0}(N)$. Then, for any almost Hermitian metric $h$ on $M\times N$ with nonpositive holomorphic bisectional curvature, {\beta}gin{equation*} {\omega}ega_h={\partial}i_1^{*}{\omega}ega_{h_1}+{\partial}i_2^{*}{\omega}ega_{h_2}+\rho+{\bar\alpha}r\rho \end{equation*} where $h_1$ and $h_2$ are almost Hermitian metrics on $M$ and $N$ with nonpositive holomorphic bisectional curvature respectively, ${\partial}i_1$ and ${\partial}i_2$ are natural projections from $M\times N$ to $M$ and from $M\times N$ to $N$ respectively, and {\beta}gin{equation*} \rho=\sqrt{-1}\sum_{k=1}^{r}\sum_{l=1}^{s}a_{kl}{\partial}hi_k\wedge\overline{{\partial}si_l} \end{equation*} with $a_{kl}$'s are complex numbers. \end{thm} {\beta}gin{cor} Let $M$ and $N$ be two almost Hermitian manifolds with $\mathcal H(M)\neq\emptyset$ and $\mathcal H(N)\neq\emptyset$. Then $$\mbox{codim}_\mathbb{R}(\mathcal{H}(M)\times \mathcal{H}(N),\mathcal{H}(M\times N))=2\dim H^{1,0}(M)\cdot \dim H^{1,0}(N).$$ \end{cor} Here $H^{1,0}(M)$ means the space of holomorphic $(1,0)$-from on $M$ and $\mathcal{H}(M)$ is the collection of almost Hermitian metric on $M$ with nonpositive bisectional curvature. The corollary above implies that an almost Hermitian metric on a product of two compact almost complex manifolds with nonpositive holomorphic bisectional curvature must be a product metric if one of the compact almost complex manifold admits no nontrivial holomorphic $(1,0)$-form. Note that the local quasi holomorphic frame introduced in this paper is different with the the generalized normal holomorphic frame introduced by Vezzoni \cite{Vezzoni} since the background connection considered in \cite{Vezzoni} is the Levi-Civita connection and Vezzoni's generalized normal frame exists only on quasi K\"ahler manifolds. The introduced frame is also different with local holomorphic coordinate introduced in \cite{FTY}. \section{Frames on almost complex manifolds} Recall the following definition of holomorphic vector fields on almost complex manifolds which is a generalization of holomorphic vector fields on complex manifolds. {\beta}gin{defn}[\cite{g}] Let $(M,J)$ be an almost complex manifold, a $(1,0)$-vector field is said to be pseudo holomorphic at the point $p\in M$ if $(L_{\overline X}Y)^{(1,0)}(p)=[\overline X,Y]^{(1,0)}(p)=0$ for any $(1,0)$ vector field $X$ on $M$, where $Z^{(1,0)}$ means the $(1,0)$ part of $Z$ and $L$ means Lie deriviative. If $Y$ is pseudo holomorphic all over $M$, we call $Y$ a holomorphic vector field. \end{defn} Note that the almost complex structure may not be integrable, so we can not expect an existence of local holomorphic vector fields in general. However, for any point $p\in M$, we can find a local $(1,0)$-frame that is pseudo holomorphic at $p$. {\beta}gin{lem}\langlebel{lem-exist-pseudo-holo} Let $(M,J)$ be an almost complex manifold. Then, for any $p\in M$, there is a $(1,0)$-frame $(e_1,e_2,\cdots,e_n)$ near $p$ such that $e_i$ is pseudo holomorphic at $p$ for all $i$. We call the $(1,0)$-frame $(e_1,e_2,\cdots,e_n)$ a local pseudo holomorphic frame at $p$. \end{lem} {\beta}gin{proof} Let $(v_1,v_2,\cdots,v_n)$ be a local $(1,0)$-frame of $M$ near $p$ and $(e_1,e_2,\cdots,e_n)$ be another local $(1,0)$-frame of $M$ near $p$. Suppose that {\beta}gin{equation} e_i=f_{ij}v_j, \end{equation} and {\beta}gin{equation} [\overline {v_j},v_i]^{(1,0)}=c_{i{{\bar\alpha}r j}}^k v_k, \end{equation} where $f_{ij}$'s are local smooth functions to be determined. Then {\beta}gin{equation} {\beta}gin{split} [\overline{v_j},e_i]^{(1,0)}(p)=\overline {v_{j}}(f_{i\mu})(p)v_\mu(p)+f_{i\langlem}(p)c_{\langlem{{\bar\alpha}r j}}^\mu(p) v_\mu(p). \end{split} \end{equation} If we choose $f_{ij}$ such that that $f_{ij}(p)={\delta}lta_{ij}$ and $\overline {v_{k}}(f_{ij})(p)=-c_{i{{\bar\alpha}r k}}^j(p)$ for all $i,j$ and $k$, then {\beta}gin{equation} [\overline {v_j},e_i]^{(1,0)}(p)=0 \end{equation} for all $i$ and $j$. So, $(e_1,e_2,\cdots,e_n)$ is a local $(1,0)$-frame on $M$ near $p$ such that $e_i$ is pseudo holomorphic at $p$ for all $i$. \end{proof} Due to the nonexistence of local holomorphic frame on an almost complex manifold in general, we have to introduce a notion of holomorphicity at a point that is better than pseudo holomorphic for further application. {\beta}gin{defn} Let $(M,J)$ be an almost complex manifold. A $(1,0)$-vector field $X$ is called quasi holomorphic at the point $p\in M$ if $[Z,[\overline Y,X]]^{(1,0)}(p)=0$ for any $(1,0)$-vector fields $Y$ and $Z$ that are pseudo holomorphic at $p$, and moreover, $X$ itself is also pseudo holomorphic at $p$. \end{defn} From the definition, it is easy to check that a holomorphic vector field is quasi holomorphic all over $M$. {\beta}gin{lem}\langlebel{lem-criterion-quasi-holo} Let $(M,J)$ be an almost complex manifold and $(e_1,e_2,\cdots,e_n)$ be a local pseudo holomorphic frame at $p\in M$. Let $X$ be a $(1,0)$ vector field on $M$ that is pseudo holomorphic at $p$. Then, $X$ is quasi holomorphic at $p$ if and only if $[e_i,[\overline{e_j},X]]^{(1,0)}(p)=0$ for all $i$ and $j$. \end{lem} {\beta}gin{proof} Let $Y=Y_i e_i$ and $Z=Z_i e_i$ be two $(1,0)$-vector fields that are pseudo holomorphic at $p$. Then, it is clear that $\overline{v}(Y_i)(p)=\overline{v}(Z_i)(p)=0$ for any $(1,0)$ vector $v$ at $p$. Then {\beta}gin{equation} {\beta}gin{split} &[Z,[\overline{Y},X]]^{(1,0)}(p)\\ =&[Z_ie_i,[\overline{Y},X]]^{(1,0)}(p)\\ =&Z_i[e_i,[\overline{Y},X]]^{(1,0)}(p)-([\overline Y,X](Z_i))(p)e_i(p)\\ =&Z_i[e_i,[\overline{Y_je_j},X]]^{(1,0)}(p)\\ =&Z_i[e_i,\overline{Y_j}[\overline{e_j},X]]^{(1,0)}(p)-Z_i[e_i,X(\overline{Y_j})\overline{e_j}]^{(1,0)}(p)\\ =&Z_i\overline{Y_j}[e_i,[\overline{e_j},X]]^{(1,0)}(p)+Z_ie_i(\overline{Y_j})[\overline{e_j},X]^{(1,0)}(p)\\ =&Z_i\overline{Y_j}[e_i,[\overline{e_j},X]]^{(1,0)}(p)\\ =&0. \end{split} \end{equation} \end{proof} {\beta}gin{lem}\langlebel{lem-exist-quasi-holo} Let $(M,J)$ be an almost complex manifold. Then, for any $p\in M$, there is a $(1,0)$-frame $(e_1,e_2,\cdots,e_n)$ near $p$ such that $e_i$ is quasi holomorphic at $p$ for all $i$. We call the $(1,0)$-frame $(e_1,e_2,\cdots,e_n)$ a local quasi holomorphic frame at $p$. \end{lem} {\beta}gin{proof} Let $(v_1,v_2,\cdots,v_n)$ be a local pseudo holomorphic frame of $M$ at $p$ and $(e_1,e_2,\cdots,e_n)$ be a local $(1,0)$-frame of $M$ near $p$. Suppose that {\beta}gin{equation} e_i=f_{ij}v_j, \end{equation} where $f_{ij}$'s are local smooth functions to be determined. We first assume that $\overline{v_k}(f_{ij})(p)=0$ for all $i,j$ and $k$. Then, by the proof of Lemma \ref{lem-exist-pseudo-holo}, $(e_1,e_2,\cdots,e_n)$ is a local pseudo holomorphic frame at $p$. Moreover, suppose that {\beta}gin{equation} [\overline {v_j},v_i]^{(1,0)}=c_{i{{\bar\alpha}r j}}^k v_k \end{equation} It is clear that $c_{i{{\bar\alpha}r j}}^k(p)=0$ for all $i,j$ and $k$ since $(v_1,v_2,\cdots,v_n)$ is a local pseudo holomorphic frame at $p$. Then {\beta}gin{equation} {\beta}gin{split} &[v_k,[\overline{v_j},e_i]]^{(1,0)}(p)\\ =&[v_k,[\overline{v_j},f_{i\langlem}v_\langlem]]^{(1,0)}(p)\\ =&[v_k,f_{i\langlem}[\overline{v_j},v_\langlem]]^{(1,0)}(p)+[v_k,\overline{v_j}(f_{i\langlem})v_\langlem]^{(1,0)}(p)\\ =&f_{i\langlem}[v_k,[\overline{v_j},v_\langlem]]^{(1,0)}(p)+v_k(f_{i\langlem})[\overline{v_j},v_\langlem]^{(1,0)}(p)+\overline{v_j}(f_{i\langlem})(p)[v_k,v_\langlem]^{(1,0)}(p)\\ &+v_k\overline{v_j}(f_{i\langlem})v_\langlem(p)\\ =&f_{i\langlem}[v_k,[\overline{v_j},v_\langlem]]^{(1,0)}(p)+v_k\overline{v_j}(f_{i\langlem})v_\langlem(p)\\ =&f_{i\langlem}[v_k,[\overline{v_j},v_\langlem]^{(1,0)}]^{(1,0)}(p)+v_k\overline{v_j}(f_{i\langlem})v_\langlem(p)\\ =&f_{i\langlem}[v_k,c_{\langlem{{\bar\alpha}r j}}^\mu v_\mu]^{(1,0)}(p)+v_k\overline{v_j}(f_{i\langlem})v_\langlem(p)\\ =&f_{i\langlem}c_{\langlem{{\bar\alpha}r j}}^\mu[v_k,v_\langlem]^{(1,0)}(p)+f_{i\langlem}v_k(c_{\langlem{{\bar\alpha}r j}}^\mu)v_\mu(p)+v_k\overline{v_j}(f_{i\langlem})v_\langlem(p)\\ =&(f_{i\langlem}v_k(c_{\langlem{{\bar\alpha}r j}}^\mu)+v_k\overline{v_j}(f_{i\mu}))v_\mu(p)\\ \end{split} \end{equation} If we further choose $f_{ij}$ such that $f_{ij}(p)={\delta}lta_{ij}$ and {\beta}gin{equation} v_l\overline{v_k}(f_{ij})(p)=-v_{l}(c_{i{{\bar\alpha}r k}}^j)(p), \end{equation} for all $i,j,k$ and $l$, then {\beta}gin{equation} [v_k,[\overline{v_j},e_i]]^{(1,0)}(p)=0 \end{equation} for all $i,j$ and $k$. By Lemma \ref{lem-criterion-quasi-holo}, we know that $e_i$ is quasi holomorphic at $p$ for all $i$. This completes the proof. \end{proof} \section{Frames on almost Hermitian manifolds} In this section, we recall some basic definitions for almost Hermitian manifolds and introduce a notion for almost Hermitian manifolds that is analogous to normal frame on Hermitian manifolds {\beta}gin{defn}[\cite{k,k2,g}] Let $(M,J)$ be an almost complex manifold. A Riemannian metric $g$ on $M$ such that $g(JX,JY)=g(X,Y)$ for any two tangent vectors $X$ and $Y$ is called an almost Hermitian metric. The triple $(M,J,g)$ is called an almost Hermitian manifold. The two form ${\omega}ega_g=g(JX,Y)$ is called the fundamental form of the almost Hermitian manifold. A connection $\nabla$ on an almost Hermitian manifold $(M,J,g)$ such that $\nabla g=0$ and $\nabla J=0$ is called an almost Hermitian connection. \end{defn} For a connection $\nabla$ on a manifold $M$, recall that the torsion $\tau$ of the connection is a vector-valued two form defined as {\beta}gin{equation} \tau(X,Y)=\nabla_XY-\nabla_YX-[X,Y]. \end{equation} On an almost Hermitian manifold, there are many almost Hermitian connections. However, there is a unique one such that $\tau(X,\overline Y)=0$ for any two $(1,0)$-vectors $X$ and $Y$. Such a notion is first introduced by Ehresman and Libermann \cite{e}. {\beta}gin{defn}[\cite{k,k2}]The unique almost Hermitian connection $\nabla$ on an almost Hermitian manifold $(M,J,g)$ with vanishing $(1,1)$-part of the torsion is called the canonical connection of the almost Hermitian manifold. \end{defn} In this paper, for an almost Hermitian metric, the connection is always chosen to be the canonical connection $\nabla$. Recall that the curvature tensor $R$ of the connection $\nabla$ is defined as {\beta}gin{equation} R(X,Y,Z,W)=\vv<\nabla_Z\nabla_WX-\nabla_W\nabla_ZX-\nabla_{[Z,W]}X,Y> \end{equation} for any tangent vectors $X,Y,Z$ and $W$. Note that unlike the curvature tensor on Hermitian manifolds with Chern connection, the curvature tensor $R$ of the canonical connection may has non-vanishing (2,0)-part which mean that $R(X,\overline{Y}, Z,W)$ may not vanish for any $(1,0)$-vectors $X,Y,Z,W$. For the $(1,1)$-part of the curvature tensor $R$, we means $R(X,\overline Y,Z,\overline W)$ for $(1,0)$-vectors $X,Y,Z$ and $W$. Moreover, $R$ is said to be of nonpositive (negative) holomorphic bisectional curvature if $R(X,\overline X,Y,\overline Y)\leq 0(<0)$ for any two nonzero $(1,0)$-vectors $X$ and $Y$. This notion of holomorphic vector fields introduced in the last section is somehow compatible with the canonical connection on almost Hermitian manifolds analogous to that on Hermitian manifolds. {\beta}gin{lem}\langlebel{lem-hol-con} $\nabla_{\overline X}Y=[\overline X,Y]^{(1,0)}$ for any two $(1,0)$-vector fields $X$ and $Y$ on an almost Hermitian manifold. \end{lem} {\beta}gin{proof} By the definition of canonical connection and torsion, we have {\beta}gin{equation} \nabla_{\overline X}Y=\nabla_Y\overline X+[\overline X,Y]+\tau(\overline X,Y)=\nabla_Y{\overline X}+[\overline X,Y]. \end{equation} Since $\nabla J=0$, we know that $\nabla_{\overline X}Y$ is a $(1,0)$-vector and $\nabla_Y\overline X$ is a $(0,1)$-vector. Therefore, the conclusion follows. \end{proof} A local pseudo holomorphic frame for an almost Hermitian manifold play a similar role as a local holomorphic frame for an Hermitian manifold. More precisely, we have the following formula for the Christoffel symbol of the canonical connection under a local pseudo holomorphic frame. {\beta}gin{lem}\langlebel{lem-christoffel} Let $(M,J,g)$ be an almost Hermitian manifold and \\ $(e_1,e_2,\cdots,e_n)$ be a local pseudo holomorohic frame at $p$. Then $\Gamma_{i{{\bar\alpha}r j}}^k(p)=0$ and $\Gamma_{ij}^k(p)=g^{{{\bar\alpha}r\mu} k}e_j(g_{i{{\bar\alpha}r\mu}})(p)$ for any $i,j$ and $k$. \end{lem} {\beta}gin{proof} By Lemma \ref{lem-hol-con}, we know that $\nabla_{\overline e_j}e_i(p)=0$. So $\Gamma_{i{{\bar\alpha}r j}}^k(p)=0$ for all $i,j$ and $k$. Moreover, {\beta}gin{equation} {\beta}gin{split} &e_{j}(g_{i{{\bar\alpha}r\mu}})(p)\\ =&\vv<\nabla_{e_j}e_i,\overline{e_\mu}>(p)+\vv<e_i,\nabla_{e_j}\overline{e_\mu}(p)>\\ =&\Gamma_{ij}^k(p)g_{k{{\bar\alpha}r\mu}}(p). \end{split} \end{equation} Hence $\Gamma_{ij}^k(p)=g^{{{\bar\alpha}r\mu} k}e_j(g_{i{{\bar\alpha}r\mu}})(p)$ for any $i,j$ and $k$. \end{proof} Similarly as on Hermitian manifolds, we can have a similar notion of normal holomorphic frame. {\beta}gin{lem}\langlebel{lem-pseudo-normal} Let $(M,J,g)$ be an almost Hermitian manifold. Then, there is a local pseudo holomorphic frame $(e_1,e_2,\cdots,e_n)$ at $p$, such that $\nabla e_i(p)=0$, or equivalently, $d g_{i{{\bar\alpha}r j}}(p)=0$ for all $i$ and $j$. We call the frame a local pseudo holomorphic normal frame at $p$. \end{lem} {\beta}gin{proof} Let $(v_1,v_2,\cdots,v_n)$ be a local pseudo holomorphic frame at $p$ and $(e_1,\cdots,e_n)$ be a local $(1,0)$-frame at $p$. Suppose that {\beta}gin{equation} e_i=f_{ij}v_j, \end{equation} where $f_{ij}$'s are local smooth functions to be determined. We first assume that $\overline{v_{k}}(f_{ij})(p)=0$ for all $i,j$ and $k$. Then, $(e_1,e_2,\cdots,e_n)$ is a local pseudo holomorphic coordinate at $p$. Moreover, note that {\beta}gin{equation} \nabla_{v_k}e_i(p)=v_k(f_{i\mu})(p)v_\mu(p)+f_{ij}(p)\Gamma_{jk}^\mu(p) v_\mu(p) \end{equation} where $\Gamma_{jk}^\mu$ is the Christoffel symbol with respect to the frame $(v_1,v_2,\cdots,v_n)$. So, if we choose $f_{ij}$ such that $f_{ij}(p)={\delta}lta_{ij}$, $\overline{v_k}(f_{ij})(p)=0$ and $v_k(f_{ij})(p)=-\Gamma_{ik}^j(p)$. Then $\nabla e_i(p)=0$ for all $i$. \end{proof} By a similar argument as in the proof of Lemma \ref{lem-pseudo-normal}, we have the existence of a so called local quasi holomorphic normal frame. {\beta}gin{lem}\langlebel{lem-quasi-normal} Let $(M,J,g)$ be an almost Hermitian manifold. Then, there is a local quasi holomorphic frame $(e_1,e_2,\cdots,e_n)$ at $p$, such that $\nabla e_i(p)=0$, or equivalently, $d g_{i{{\bar\alpha}r j}}(p)=0$ for all $i$ and $j$. We call the frame a local quasi holomorphic normal frame at $p$. \end{lem} \section{Curvature of almost Hermitian manifolds} In this section, we derive a formula for the curvature tensor with respect to a quasi holomorphic frame. {\beta}gin{lem}\langlebel{lem-part-curv} Let $(M,g,J)$ be an almost Hermitian manifold and $(e_1,\cdots,e_n)$ be a local quasi holomorphic frame at $p\in M$. Then {\beta}gin{equation} \nabla_{e_k}\nabla_{\overline{e_j}}e_i(p)=0 \end{equation} for all $i,j$ and $k$. \end{lem} {\beta}gin{proof} First, note that {\beta}gin{equation}\langlebel{eqn-lie-hol} [\overline{e_j},e_i](p)=\nabla_{\overline{e_j}}e_i(p)-\nabla_{e_i}\overline{e_j}(p)=0 \end{equation} by Lemma \ref{lem-hol-con}. Moreover, {\beta}gin{equation} {\beta}gin{split} &\vv<\nabla_{e_k}\nabla_{\overline{e_j}}e_i,\overline{e_l}>(p)\\ =&\vv<\nabla_{e_k}(\nabla_{e_i}{\overline{e_j}}+[\overline{e_j},e_i]),\overline{e_l}>(p)\\ =&\vv<\nabla_{e_k}[\overline{e_j},e_i],\overline{e_l}>(p)\\ =&\vv<\nabla_{[\overline{e_j},e_i]}{e_k}+[e_k,[\overline{e_j},e_i]]+\tau(e_k,[\overline{e_j},e_i]),\overline{e_l}>(p)\\ =&\vv<[e_k,[\overline{e_j},e_i]]^{(1,0)},\overline{e_l}>(p)\\ =&0. \end{split} \end{equation} This completes the proof since $\nabla_{e_k}\nabla_{\overline{e_j}}e_i(p)$ is a $(1,0)$-vector. \end{proof} We are now ready to compute the $(1,1)$-part of the curvature tensor for an almost Hermitian manifold. {\beta}gin{thm}\langlebel{thm-curv} Let $(M,g,J)$ be an almost Hermitian manifold. Let $(e_1,\cdots,e_n)$ be a local quasi holomorphic frame at $p$. Then {\beta}gin{equation} R_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)=-\overline{e_l} e_k(g_{i{{\bar\alpha}r j}})+g^{{{\bar\alpha}r\mu}\langlem}e_k(g_{i{{\bar\alpha}r\mu}})\overline{e_l}(g_{\langlem{{\bar\alpha}r j}}). \end{equation} \end{thm} {\beta}gin{proof} {\beta}gin{equation} {\beta}gin{split} &R_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)\\ =&\vv<\nabla_{e_k}\nabla_{\overline{e_l}}e_i,\overline{e_j}>(p)-\vv<\nabla_{\overline{e_l}}\nabla_{e_k}e_i,\overline{e_j}>(p)-\vv<\nabla_{[e_k,\overline{e_l}]}e_i,\overline{e_j}>(p)\\ =&-\vv<\nabla_{\overline{e_l}}\nabla_{e_k}e_i,\overline{e_j}>(p)\\ =&-\overline{e_l}\vv<\nabla_{e_k}e_i,\overline{e_j}>(p)+\vv<\nabla_{e_k}e_i,\nabla_{\overline{e_l}}\overline{e_j}>(p)\\ =&-\overline{e_l}e_k(g_{i{{\bar\alpha}r j}})(p)+\overline{e_l}\vv<e_i,\nabla_{e_k}\overline{e_j}>(p)+\vv<\nabla_{e_k}e_i,\nabla_{\overline{e_l}}\overline{e_j}>(p)\\ =&-\overline{e_l}e_k(g_{i{{\bar\alpha}r j}})(p)+\vv<e_i,\nabla_{\overline{e_l}}\nabla_{e_k}\overline{e_j}>(p)+\vv<\nabla_{e_k}e_i,\nabla_{\overline{e_l}}\overline{e_j}>(p)\\ =&-\overline{e_l} e_k(g_{i{{\bar\alpha}r j}})+g^{{{\bar\alpha}r\mu}\langlem}e_k(g_{i{{\bar\alpha}r\mu}})\overline{e_l}(g_{\langlem{{\bar\alpha}r j}}) \end{split} \end{equation} where we have used Lemma \ref{lem-hol-con}, \eqref{eqn-lie-hol}, Lemma \ref{lem-christoffel} and Lemma \ref{lem-part-curv}. \end{proof} By Lemma \ref{lem-quasi-normal}, we have following direct corollary. {\beta}gin{cor}\langlebel{cor-curv-normal} Let $(M,J,g)$ be an almost Hermitian manifold and $(e_1,e_2,\cdots,e_n)$ be a local quasi holomorphic normal frame at $p\in M$. Then {\beta}gin{equation} R_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)=-\overline{e_l}{e_k}(g_{i{{\bar\alpha}r j}})(p). \end{equation} \end{cor} \section{A generalization of Wu's result} In this section, with the help the curvature formula derive in the last section, we obtain a generalization of Wu's result in \cite{Wu}. {\beta}gin{thm} Let $(M,J)$ be an almost complex manifold. Let $g,h$ be two almost Hermitian metrics on $M$. Then $$R^{g+h}(X,\overline X,Y,\overline Y)\leq R^g(X,\overline X,Y,\overline Y)+R^h(X,\overline X,Y,\overline Y)$$ for any two $(1,0)$-vectors $X$ and $Y$, where $R^{g+h},R^g$ and $R^h$ denote the curvature tensor of the metrics $g+h, g$ and $h$ respectively. \end{thm} {\beta}gin{proof} Let $p\in M$, and $(e_1,e_2,\cdots,e_n)$ be a local quasi holomorphic normal frame at $p$ with respect to the almost Hermitian metric $g+h$. Then, by Theorem \ref{thm-curv} and Corollary \ref{cor-curv-normal}, {\beta}gin{equation} {\beta}gin{split} &R^{g+h}_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)\\ =&-\overline{e_l} e_k((g+h)_{i{{\bar\alpha}r j}})(p)\\ =&R^g_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)+R^h_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)-g^{{{\bar\alpha}r\mu}\langlem}e_k (g_{i{{\bar\alpha}r\mu}})\overline{e_{l}} (g_{\langlem{{\bar\alpha}r j}})(p)-h^{{{\bar\alpha}r\mu}\langlem}e_k (h_{i{{\bar\alpha}r\mu}})\overline{e_{l}} (h_{\langlem{{\bar\alpha}r j}})(p). \end{split} \end{equation} Hence, {\beta}gin{equation} {\beta}gin{split} &R^{g+h}(X,\overline X,Y,\overline Y)(p)\\ =&R^{g}(X,\overline X,Y,\overline Y)(p)+R^{h}(X,\overline X,Y,\overline Y)(p)\\ &-g^{{{\bar\alpha}r\mu}\langlem}X^i Y^k e_k(g_{i{{\bar\alpha}r\mu}})\overline{X^jY^l}\overline{e_{l}} (g_{\langlem{{\bar\alpha}r j}})(p)-h^{{{\bar\alpha}r\mu}\langlem}X^i Y^k e_k (h_{i{{\bar\alpha}r\mu}})\overline{X^jY^l} \overline{e_{l}} (h_{\langlem{{\bar\alpha}r j}})(p)\\ \leq&R^{g}(X,\overline X,Y,\overline Y)(p)+R^{h}(X,\overline X,Y,\overline Y)(p).\\ \end{split} \end{equation} \end{proof} Similar as in \cite{Wu}, we have the following two direct corollaries. {\beta}gin{cor} Let $(M,J,g)$ be a compact almost Hermitian manifold with nonpositive (negative) holomorphic bisectional curvature. Then, there is an almost Hermitian metric on $M$ with nonpositive (negative) holomorphic bisectional curvature that is invariant under all the automorphisms of the almost complex structure $J$. \end{cor} {\beta}gin{defn} Let $(M,J)$ be an almost complex manifold. Denote the collection of all almost Hermitian metrics with nonpositive holomorphic bisectional curvature as $\mathcal H(M)$. \end{defn} {\beta}gin{cor} Let $(M,J)$ be an almost complex manifold with $\mathcal{H}(M)\neq\emptyset$. Then $\mathcal H(M)$ is a convex cone. \end{cor} \section{A generalization of Zheng's result} Let ${\alpha}$ be a smooth $(r,s)$ form on the almost complex manifold $(M,J)$. Recall that ${\bar\alpha}r\partial {\alpha}=(d{\alpha})^{(r,s+1)}$ and a $(r,0)$-form ${\alpha}$ is said to be a holomorphic $(r,0)$-form if ${\bar\alpha}r\partial{\alpha}=0$. {\beta}gin{lem}\langlebel{lem-holo-lie} A $(r,0)$-form ${\alpha}$ on an almost complex manifold is holomorphic if and only if $(L_{\overline X}{\alpha})^{(r,0)}=0$ for any $(1,0)$ vector field $X$. \end{lem} {\beta}gin{proof} By Cartan's formula, {\beta}gin{equation} {\beta}gin{split} (L_{\overline X}{\alpha})^{(r,0)}=(i_{\overline X}d{\alpha}+di_{\overline X}{\alpha})^{(r,0)}=i_{\overline X}{\bar\alpha}r\partial{\alpha}. \end{split} \end{equation} The conclusion follows. \end{proof} {\beta}gin{defn} A $(r,0)$-form ${\alpha}$ on an almost complex manifold is said to be pseudo holomorphic at $p\in M$ if $(L_{\overline X}{\alpha})^{(r,0)}(p)=0$ for all $(1,0)$ vector field $X$. \end{defn} {\beta}gin{lem}\langlebel{lem-crt-pseudo-holo-form} Let $(M,J)$ be an almost complex manifold and $(e_1,e_2,\cdots,e_n)$ be a local pseudo holomorphic frame at $p\in M$. Let $({\omega}ega^1,{\omega}ega^2,\cdots,{\omega}ega^n)$ be the dual frame of $(e_1,e_2,\cdots,e_n)$ and ${\alpha}$ be a $(r,0)$ form on $M$. Suppose that {\beta}gin{equation} {\alpha}=\sum_{i_1<i_2<\cdots<i_r}{\alpha}_{i_1i_2\cdots i_r}{\omega}ega^{i_1}\wedge{\omega}ega^{i_2}\wedge\cdots\wedge{\omega}ega^{i_r}. \end{equation} Then, ${\alpha}lpha$ is pseudo holomorphic at $p$ if and only if ${\bar\alpha}r v({\alpha}lpha_{i_1i_2\cdots i_r})(p)=0$ for all $v\in T^{1,0}_pM$ and any indices $i_1<i_2<\cdots<i_r$. \end{lem} {\beta}gin{proof} Let $\overline{X}$ be a (1,0) vector field with $\overline{X}(p)={\bar\alpha}r v$. Then, for any indices $i_1<i_2<\cdots<i_r$, {\beta}gin{equation} {\beta}gin{split} &(L_{\overline{X}}){\alpha}(e_{i_1},e_{i_2},\cdots,e_{i_r})(p)\\ =&\overline{X}({\alpha}(e_{i_1},e_{i_2},\cdots,e_{i_r}))(p)-{\alpha}(L_{\overline{X}}e_{i_1}(p),e_{i_2}(p),\cdots,e_{i_r}(p))-\cdots\\ &-{\alpha}(e_{i_1}(p),e_{i_2}(p),\cdots,L_{\overline{X}}e_{i_r}(p))\\ =&{\bar\alpha}r v({\alpha}_{i_1i_2\cdots i_r})(p)-{\alpha}([\overline{X},e_{i_1}]^{(1,0)}(p),e_{i_2}(p),\cdots,e_{i_r}(p))-\cdots\\ &-{\alpha}(e_{i_1}(p),e_{i_2}(p),\cdots,[\overline{X},e_{i_r}]^{(1,0)}(p))\\ =&{\bar\alpha}r v({\alpha}_{i_1i_2\cdots i_r})(p). \end{split} \end{equation} This completes the proof. \end{proof} {\beta}gin{thm}\langlebel{thm-curv-2} Let $(M,J,g)$ be an almost Hermitian manifold and ${\alpha}$ be a holomorphic $(1,0)$-form on $M$. Let $h$ be another metric on $M$ defined by {\beta}gin{equation} h(u,v)=g(u,v)+{\alpha}(u){\bar\alpha}r{\alpha}(v). \end{equation} for any two tangent vectors $u$ and $v$. Then $h$ is an almost Hermitian metric on $M$. Moreover, {\beta}gin{equation} R^h(X,\overline X,Y,\overline{Y})\leq R^g(X,\overline X,Y,\overline Y) \end{equation} for any $(1,0)$ vectors $X$ and $Y$. \end{thm} {\beta}gin{proof} It is easy to check that $h$ is an almost Hermitian metric by definition. Fixed $p\in M$. Let $(e_1,e_2,\cdots,e_n)$ be a local quasi holomorphic normal frame at $p$ for the almost Hermitian metric $h$. Then, $h_{i{{\bar\alpha}r j}}=g_{i{{\bar\alpha}r j}}+{\alpha}_i\overline{{\alpha}_i}$. Since ${\alpha}$ is holomorphic on $M$, we have {\beta}gin{equation} {\beta}gin{split} 0=&\vv<L_{\overline{e_j}}{\alpha},e_i>=\overline{e_j}\vv<{\alpha},e_i>-\vv<{\alpha},L_{\overline{e_j}}e_i>=\overline{e_j}({\alpha}_i)-\vv<{\alpha},\nabla_{\overline{e_j}}e_i>.\\ \end{split} \end{equation} Therefore, {\beta}gin{equation} \overline{e_j}e_k({\alpha}_j)(p)=e_k\overline{e_j}({\alpha}_i)(p)=e_k\vv<{\alpha},\nabla_{\overline{e_j}}e_i>(p)=\vv<{\alpha},\nabla_{e_k}\nabla_{\overline{e_j}}e_i(p)>=0\\ \end{equation} by Lemma \ref{lem-part-curv} and $[\overline{e_j},e_k](p)=0$. Hence, by Theorem \ref{thm-curv} and Corollary \cite{cor-curv-normal}, {\beta}gin{equation} {\beta}gin{split} &R^h_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)\\ =&-\overline{e_{l}}e_k(h_{i{{\bar\alpha}r j}})(p)\\ =&-\overline{e_{l}}e_k(g_{i{{\bar\alpha}r j}}+{\alpha}lpha_i\overline{{\alpha}_j})(p)\\ =&-\overline{e_l}e_k(g_{i{{\bar\alpha}r j}})(p)-e_k({\alpha}lpha_i)\overline{e_l({\alpha}_j)}(p)-\overline{e_l}{e_k}({\alpha}lpha_i)\overline{{\alpha}_j}(p)-{\alpha}_i\overline{e_l\overline{e_k}({\alpha}_j)}(p)\\ =&R^g_{i{{\bar\alpha}r j} k{{\bar\alpha}r l}}(p)-g^{{{\bar\alpha}r\mu}\langlem}e_k(g_{i{{\bar\alpha}r\mu}})\overline{e_{l}}(g_{\langlem{{\bar\alpha}r j}})(p)-e_k({\alpha}lpha_i)\overline{e_l({\alpha}_j)}(p). \end{split} \end{equation} This completes the proof by processing the same as in the proof of Theorem \ref{thm-curv}. \end{proof} {\beta}gin{defn} Let $(M,J)$ be a compact almost complex manifold. Denote the space of of holomorphic $(1,0)$-form on $M$ as $H^{1,0}(M)$. \end{defn} {\beta}gin{lem}\langlebel{lem-double-holo-form} Let $M^{2m}$ and $N^{2n}$ be two compact almost complex manifolds. Let ${\partial}hi_1,{\partial}hi_2,\cdots, {\partial}hi_r$ be a basis of $H^{1,0}(M)$ and ${\partial}si_1,{\partial}si_2,\cdots,{\partial}si_s$ be a basis of $H^{1,0}(N)$. Let $\rho$ be a holomorphic $(2,0)$-form on $M\times N$ and locally have the form of {\beta}gin{equation} \rho=\sum_{i=1}^m\sum_{j=1}^n\rho_{ij}{\alpha}^i\wedge{\beta}^j \end{equation} where $({\alpha}^1,{\alpha}^2,\cdots,{\alpha}^m)$ and $({\beta}^1,{\beta}^2,\cdots,{\beta}ta^n)$ are local (1,0)-frames on $M$ and $N$ respectively and $\rho_{ij}$'s are local smooth functions on $M\times N$. Then, there is a unique matrix $(a_{kl})$ of complex numbers with size $r\times s$ such that {\beta}gin{equation} \rho=\sum_{k=1}^r\sum_{l=1}^sa_{kl}{\partial}hi_k\wedge{\partial}si_l. \end{equation} \end{lem} {\beta}gin{proof} Fixed $y\in N$, let $(e_1,e_2,\cdots,e_n)$ be a local pseudo holomorphic frame at of $N$ at $y$ and $({\omega}ega^1,{\omega}^2,\cdots,{\omega}^n)$ be its dual frame. Then, {\beta}gin{equation} \rho=\sum_{j=1}^n\theta_j(y)\wedge {\omega}ega^j(y) \end{equation} where $\theta_j(y)$'s are $(1,0)$-forms on $M=M\times\{y\}$. By Lemma \ref{lem-crt-pseudo-holo-form}, it is easy to check that $\theta_j(y)$ is a holomorphic $(1,0)$-form on $M$ for any $j$. Then {\beta}gin{equation} \theta_j(y)=\sum_{k=1}^rb_{kj}(y){\partial}hi_k. \end{equation} Therefore {\beta}gin{equation} \rho=\sum_{k=1}^r{\partial}hi_k\wedge \sum_{j=1}^n b_{kj}(y){\omega}ega^j(y). \end{equation} It easy to check that $\sum_{j=1}^n b_{kj}(y){\omega}ega^j(y)$ is a holomorphic $(1,0)$ form on $N$. Hence {\beta}gin{equation} \sum_{j=1}^n b_{kj}(y){\omega}ega^j(y)=\sum_{l=1}^sa_{kl}{\partial}si_l \end{equation} where $a_{kl}$ are complex numbers. This completes the proof. \end{proof} With help of Lemma \ref{lem-double-holo-form} and the curvature formula in Theorem \ref{thm-curv}, the same argument as in \cite{Yu} give us the following classification of almost Hermitian metrics on product of compact almost complex manifolds which generalizes the result of Zheng \cite{Zheng} and a previous result of the author \cite{Yu}. {\beta}gin{thm}\langlebel{thm-class-prod} Let $M$ and $N$ be compact almost complex manifolds. Let ${\partial}hi_1,{\partial}hi_2,\cdots, {\partial}hi_r$ be a basis of $H^{1,0}(M)$ and ${\partial}si_1,{\partial}si_2,\cdots,{\partial}si_s$ be a basis of $H^{1,0}(N)$. Then, for any almost Hermitian metric $h$ on $M\times N$ with nonpositive holomorphic bisectional curvature, {\beta}gin{equation*} {\omega}ega_h={\partial}i_1^{*}{\omega}ega_{h_1}+{\partial}i_2^{*}{\omega}ega_{h_2}+\rho+{\bar\alpha}r\rho \end{equation*} where $h_1$ and $h_2$ are almost Hermitian metrics on $M$ and $N$ with nonpositive holomorphic bisectional curvature respectively, ${\partial}i_1$ and ${\partial}i_2$ are natural projections from $M\times N$ to $M$ and from $M\times N$ to $N$ respectively, and {\beta}gin{equation*} \rho=\sqrt{-1}\sum_{k=1}^{r}\sum_{l=1}^{s}a_{kl}{\partial}hi_k\wedge\overline{{\partial}si_l} \end{equation*} with $a_{kl}$'s are complex numbers. \end{thm} Similarly as in \cite{Yu}, with the help of Theorem \ref{thm-curv-2}, we can obtain the following corollary. {\beta}gin{cor} Let $M$ and $N$ be two almost Hermitian manifolds with $\mathcal H(M)\neq\emptyset$ and $\mathcal H(N)\neq\emptyset$. Then $$\mbox{codim}_\mathbb{R}(\mathcal{H}(M)\times \mathcal{H}(N),\mathcal{H}(M\times N))=2\dim H^{1,0}(M)\cdot \dim H^{1,0}(N).$$ \end{cor} {\beta}gin{thebibliography}{99} {{\bar\alpha}r i}bitem{AD}Apostolov, Vestislav; Dr\u{a}ghici, Tedi.{\sl The curvature and the integrability of almost-K\"ahler manifolds: a survey.} Symplectic and contact topology: interactions and perspectives (Toronto, ON/Montreal, QC, 2001), 25¨C53, Fields Inst. Commun., 35, Amer. Math. Soc., Providence, RI, 2003. {{\bar\alpha}r i}bitem{Chern} Chern, S.-S. {\it Characteristic classes of Hermitian manifolds}, Ann. of Math. (2) \textbf{47} (1946), no. 1, 85--121. {{\bar\alpha}r i}bitem{Donaldson} Donaldson, S. K. {\sl Two-forms on four-manifolds and elliptic equations.} Inspired by S. S. Chern, 153--72, Nankai Tracts Math., 11, World Sci. Publ., Hackensack, NJ, 2006. {{\bar\alpha}r i}bitem{e} Ehresmann,C.;Libermann, P. {\it Sur les structures presque hermitiennes isotropes}, C. R. Acad. Sci. Paris \textbf{232} (1951), 1281--1283. {{\bar\alpha}r i}bitem{FTY}Fan, Xu-Qian; Tam, Luen-Fai; Yu, Chengjie. {\sl Product of almost Hermitian manifolds } arXiv:1109.2498 {{\bar\alpha}r i}bitem{g} Gauduchon, P. {\it Hermitian connections and Dirac operators}, Boll. Unione Mat. Ital. B \textbf{11} (1997), no. 2, suppl, 257--288. {{\bar\alpha}r i}bitem{Goldberg}Goldberg, S. I. {\sl Integrability of almost Kaehler manifolds.} Proc. Amer. Math. Soc. 21 1969 96--100. {{\bar\alpha}r i}bitem{Gray1} Gray, Alfred. {\sl Curvature identities for Hermitian and almost Hermitian manifolds.} Tohoku Math. J. (2) 28 (1976), no. 4, 601--12. {{\bar\alpha}r i}bitem{Gray2} Gray, Alfred {\sl Nearly K\"aler manifolds.} J. Differential Geometry, 4, 1970, 283--309. {{\bar\alpha}r i}bitem{Gray3} Gray, Alfred {\sl Minimal varieties and almost Hermitian submanifolds.} Michigan Math. J. 12 1965 273--87. {{\bar\alpha}r i}bitem{k} Kobayashi, S. {\it Almost complex manifolds and hyperbolicity}, Results Math. \textbf{40} (2001), no. 1-4, 246--256. {{\bar\alpha}r i}bitem{k2} Kobayashi, S. {\it Natural connections in almost complex manifolds}, Explorations in complex and Riemannian geometry, 153--169, Contemp. Math., 332, Amer. Math. Soc., Providence, RI, 2003. {{\bar\alpha}r i}bitem{Na1} Nagy, Paul-Andi.{\sl Nearly K\"ahler geometry and Riemannian foliations.} Asian J. Math. 6 (2002), no. 3, 481--504. {{\bar\alpha}r i}bitem{Na2} Nagy, Paul-Andi. {\sl On nearly-K\"ahler geometry.} Ann. Global Anal. Geom. 22 (2002), no. 2, 167--178. {{\bar\alpha}r i}bitem{Sekigawa}Sekigawa, Kouei. {\sl On some 4-dimensional compact Einstein almost K\"ahler manifolds.} Math. Ann. 271 (1985), no. 3, 333--337. {{\bar\alpha}r i}bitem{SZ} Seshadri, H.; Zheng, F. {\it Complex product manifolds cannot be negatively curved}, Asian J. Math. \textbf{12} (2008), no. 1, 145--149. {{\bar\alpha}r i}bitem{TY} Tam, Luen-Fai;Yu, Chengjie. {\sl Complex Product Manifolds and Bounds of curvature}, Asian J. Math. \textbf{14} (2010), no. 2, 235--242. {{\bar\alpha}r i}bitem{vt} Tosatti, V. {\it A general Schwarz lemma for almost-Hermitian manifolds}, Comm. Anal. Geom. \textbf{15} (2007), no. 5, 1063--1086. {{\bar\alpha}r i}bitem{twy} Tosatti,V.; Weinkove, B.;Yau, S.T. {\it Taming symplectic forms and the Calabi-Yau equation}, Proc. London Math. Soc. \textbf{97} (2008), no. 3, 401--424. {{\bar\alpha}r i}bitem{Vezzoni} Vezzoni, Luigi. {\sl A generalization of the normal holomorphic frames in symplectic manifolds.} Boll. Unione Mat. Ital. Sez. B Artic. Ric. Mat. (8) 9 (2006), no. 3, 723--732. {{\bar\alpha}r i}bitem{Wu}Wu, H. {\sl A remark on holomorphic sectional curvature.} Indiana Univ. Math. J. 22 (1972/73), 1103--1108. {{\bar\alpha}r i}bitem{Yau}Yau, S.T. {\sl A general Schwarz lemma for K\"ahler manifolds.} Amer. J. Math. 100 (1978), no. 1, 197-¨C203. {{\bar\alpha}r i}bitem{Yu}Yu, Chengjie. {\sl Nonpositively curved Hermitian metrics on product manifolds.} Proc. Amer. Math. Soc. 139 (2011), no. 4, 1469--1472. {{\bar\alpha}r i}bitem{Zheng}Zheng, Fangyang. {\sl Non-positively curved K\"ahler metrics on product manifolds.} Ann. of Math. (2) 137 (1993), no. 3, 671--673. \end{thebibliography} \end{document}
\begin{document} \begin{abstract} We compute the convex hull $\Pi$ of an arbitrary finite subgroup $\Gamma$ of ${\mathbb{C}^*}^2$ --- or equivalently, of a generic orbit of the action of $\Gamma$ on $\mathbb{C}^2$. The basic case is $\Gamma=\{(e^{2ik\pi/q},e^{2ikp\pi/q})~|~0\leq k<q\}$ where $p\in\llbracket 2,q-2\rrbracket$ is coprime to $q$: then, $\Pi$ projects to a canonical or ``Delaunay'' triangulation $\mathcal{D}$ of the lens space $L_{p/q}=\mathbb{S}^3/\Gamma$ (endowed with its spherical metric), and the combinatorics of $\mathcal{D}$ are dictated by the continued fraction expansion of $p/q$. \end{abstract} \title{Delaunay triangulations of lens spaces} \section{Introduction} Given a compact pointed Riemannian $3$--manifold $(M,x_0)$, a natural object to construct is the Voronoi domain of $x_0$, i.e.~the set $X$ of all points $x$ such that the shortest path from $x$ to $x_0$ is unique. This domain $X$ can be embedded as a contractible subset of the universal cover $\widetilde{M}$ of $M$; if $M$ is homogeneous, then $X$ is typically (though not always) the interior of a polyhedron whose faces are glued in pairs to yield $M$. If so, dual to $X$ (and this gluing data) is the so-called Delaunay decomposition $\mathcal{D}$ of $M$, which comprises one cell per vertex of $X$, and has only one vertex, namely $x_0$. If $\widetilde{M}$ is $\mathbb{S}^3$ or $\mathbb{R}^3$ or $\mathbb{H}^3$, it is a classical result that $\mathcal{D}$ is itself realized by geodesic polyhedra which tile $M$. A strong motivation for studying the Delaunay decomposition is that it is a combinatorial invariant of $(M,x_0)$ that encodes all the topology of $M$; this also suggests that computing $\mathcal{D}$ is hard in general. Jeff Weeks' program SnapPea \cite{snappea} achieves this numerically in the cusped hyperbolic case (taking $x_0$ in the cusp); for explicit theoretical predictions of $\mathcal{D}$ in special cases, see for example \cite{these, aswy, lackenby, qf, ananas}. This paper is primarily concerned (Sections \ref{sec:prelim} through \ref{sec:proof}) with the case $M=\mathbb{S}^3/\varphi$, where $$\varphi(z,z')=\left (e^{\frac{2i\pi}{q}}z,e^{\frac{2ip\pi}{q}}z'\right )$$ and $\mathbb{S}^3$ is seen as the unit sphere of $\mathbb{C}^2$. Here, $\frac{p}{q}$ is a rational of $(0,1)$ in reduced form, and $M$ is called the \emph{lens space} $L_{p/q}$. We will show that the combinatorics of $\mathcal{D}$ (and $X$) are dictated by the continued fraction expansion of $\frac{p}{q}$ (and are independent of the choice of basepoint $x_0$). The lift of $\mathcal{D}$ to $\mathbb{S}^3$ is the Delaunay decomposition of $\mathbb{S}^3$ with respect to a \emph{finite set} $\langle \varphi \rangle \widetilde{x}_0$ of vertices. Finally, in Section \ref{sec:general}, we extend our results to the case where $\langle \varphi \rangle$ is replaced by an arbitrary finite subgroup of $\mathbb{S}^1\times \mathbb{S}^1$ (possibly non-cyclic, acting possibly with fixed points on $\mathbb{S}^3$). \subsection*{History} After the first version of this paper was posted, G\"unter M. Ziegler made me aware of Smilansky's paper \cite{smilansky2} where essentially the same results were proven. The approaches are similar, except for the key result: we prove the convexity of a certain plane curve $\gamma$ by a big computation (Claim \ref{cla:key}); Smilansky in \cite{smilansky2} seems unaware that $\gamma$ is always convex, but has a clever lemma (proved in \cite{smilansky1}) to show that $\gamma$ behaves ``as though it were convex'' with respect to certain intersecting lines. Note that Sergei Anisov has also announced similar results in \cite{anisov1, anisov2}. \section{Preliminaries} \label{sec:prelim} Let $x_0$ be a point of $\mathbb{S}^3$ and $\mathcal{O}\subset \mathbb{S}^3$ its $\langle \varphi \rangle$--orbit. Suppose that the convex hull $\Pi$ of $\mathcal{O}$ has non-empty interior. It is well-known that the boundary of $\Pi$ then decomposes into affine cells, whose projections to $\mathbb{S}^3$ (from the origin) are precisely the cells of the Delaunay decomposition $\mathcal{D}$. Therefore, all we have to do is to determine the faces of the convex hull $\Pi$ of $\mathcal{O}$: these are Theorems \ref{thm:main} and \ref{thm:nofaces} below. \subsection{What is the generic case?} However, if $p\equiv\pm 1~[\text{mod } q]$, then any orbit $\mathcal{O}$ of $\varphi$ is a regular polygon contained in a plane of $\mathbb{R}^4\simeq\mathbb{C}^2$, which easily implies that the Voronoi domain $X$ of $L_{p/q}$ (for any basepoint) is bounded by only two spherical caps (this is a special case where $X$ is \emph{not} a proper spherical polyhedron). It is also easy to see that the isometry group of $L_{p/q}$ acts transitively on $L_{p/q}$ in that case. Therefore, we will assume $p\notin\{1,q-1\}$. Then, the identity component of the isometry group of $L_{p/q}$ lifts to the group $G=\mathbb{S}^1\times \mathbb{S}^1$ acting diagonally on $\mathbb{C}^2$ (of course, $\varphi \in G$). The $G$-orbits in $\mathbb{S}^3$ are the tori $\{(z,z')~|~\frac{|z'|}{|z|}=\kappa \}$ for $\kappa \in \mathbb{R}_+^*$, and the circles $C=\{0\}\times \mathbb{S}^1$ and $C'=\mathbb{S}^1\times \{0\}$. If $x_0\in C\cup C'$, then the orbit $\mathcal{O}=\langle \varphi \rangle x_0$ is a plane regular polygon, so the Voronoi domain $X$ is again bounded by two spherical caps. Therefore, we will be concerned with the generic case $x_0\in\mathbb{S}^3\smallsetminus (C\cup C')$. Since changing $x_0$ only modifies its orbit $\mathcal{O}$ (and therefore the polyhedron $\Pi$) by a diagonal automorphism of $\mathbb{C}^2$, all basepoints $x_0 \notin C\cup C'$ are equivalent as regards the combinatorics of $\Pi$ and of the Delaunay decomposition. In fact $x_0$ does not even need to belong to the \emph{unit} sphere: for convenience, we will take $x_0=(1,0,1,0)\in\sqrt{2}\mathbb{S}^3$ in Theorem \ref{thm:main}. \subsection{An intuitive description of the triangulation} Clearly, $L_{p/q}$ is obtained by gluing two solid tori $\{(z,z')\in \mathbb{S}^3~|~\frac{|z|}{|z'|}\geq 1\}/\varphi$ and $\{(z,z')\in \mathbb{S}^3~|~\frac{|z|}{|z'|}\leq 1\}/\varphi$, boundary-to-boundary. Equivalently, $L_{p/q}$ is a thickened torus $(\mathbb{S}^1)^2\times [0,1]$, attached to two thickened disks (one for each boundary component, along possibly very different slopes $s,s'$) and capped off with two balls. We now sketch a way of triangulating $L_{p/q}$ that emulates this construction: although it will not be needed in the sequel, it might provide some geometric intuition (the triangulation described here will turn out to be combinatorially equivalent to the Delaunay decomposition of $L_{p/q}$). Consider the standard unit torus $T:=\mathbb{R}^2/\mathbb{Z}^2$ decomposed into two simplicial triangles, $(0,0)(0,1)(1,1)$ and $(0,0)(1,0)(1,1)$. We can simplicially attach two faces of a tetrahedron $\Delta$ to $T$, so that $\Delta$ materializes an \emph{exchange of diagonals} in the unit square. The union $T\cup\Delta$ is now a (partially) thickened torus, whose top and bottom boundaries are triangulated in two different ways. We can attach a new tetrahedron $\Delta'$, e.g. to the top boundary, so as to perform a new exchange of diagonals. Iterating the process many times, we can obtain a triangulation of (possibly a retract of) $T\times [0,1]$ with top and bottom triangulated (into two triangles each) in two essentially arbitrary ways. Finally, there exists a standard way of folding up the top boundary $T\times \{1\}$ on itself, identifying its two triangles across an edge: this was perhaps first formulated that way in \cite{jaco}. The result after folding-up is a \emph{solid torus}, also described with many pictures in \cite{ananas}. (In that paper, we show that such triangulated solid tori also arise naturally in the Delaunay decompositions of many \emph{hyperbolic} manifolds, namely, large ``generic'' Dehn fillings.) If we fold up the bottom $T\times \{0\}$ in a similar way, it turns out we can get any $L_{p/q}$ with $p\equiv \hspace{-9pt} \slash \hspace{4pt} \pm 1 ~[\text{mod }q]$. The main theorems below (\ref{thm:main} and \ref{thm:nofaces}) describe this same triangulation in a way that is self-contained and completely explicit, although perhaps less synthetic or helpful than the process described above. The interested reader may infer the equivalence of the two descriptions from the proof of Theorem \ref{thm:nofaces}; see also \cite{ananas}. \subsection{Strategy} Let $\mathbb{T}:=(\mathbb{R}/2\pi\mathbb{Z})^2$ be the standard torus and $\iota:\mathbb{T}\rightarrow \mathbb{C}^2\simeq\mathbb{R}^4$ denote the standard injection, satisfying $$\iota(u,v)=(\cos u, \sin u, \cos v, \sin v).$$ The subgroup $\Gamma:=\{\tau_k=(k\frac{2\pi}{q}, kp\frac{2\pi}{q})\}_{k\in \mathbb{Z}}$ of $\mathbb{T}$ is such that $\iota(\Gamma)=\mathcal{O}$, the orbit of $(1,0,1,0)\in \mathbb{R}^4$ under $\varphi$. Therefore, each top-dimensional cell (tetrahedron, as it turns out) in $\partial \Pi$ is spanned by the images under $\iota$ of four points $\tau,\tau',\tau'',\tau'''$ of $\Gamma$. Our main result, Theorem \ref{thm:main}, claims that $\tau,\dots,\tau'''$ are the vertices of certain \emph{parallelograms} of $\mathbb{T}$ with the minimal possible area, namely $\frac{(2\pi)^2}{q}$. To prove this, the strategy is to consider a linear form $\rho:\mathbb{R}^4\rightarrow \mathbb{R}$ that takes the same value, say $Z>0$, on $\iota(\tau),\dots,\iota(\tau''')$; then look (e.g. in the chart $[-\pi,\pi]^2$) at the level curve $\gamma=(\rho\circ\iota)^{-1}(Z)$. Lemma \ref{lem:convex} says that if $Z$ and the coefficients of $\rho$ satisfy certain inequalities, then $\gamma$ is a \emph{convex} Jordan curve passing through $\tau,\dots,\tau'''$. Intuitively, if the hyperplane $\rho^{-1}(Z)$ passes \emph{far enough} from the origin of $\mathbb{R}^4$ (in a sense depending on the direction of $\ker \rho$), it will only skim a small cap off $\iota(\mathbb{T})$ that looks convex in the chart. Convexity is key: it will imply that no other point of $\Gamma$ than $\tau,\dots,\tau'''$ lies inside $\gamma$, i.e.~in $(\rho\circ\iota)^{-1}[Z,+\infty)$. In other words, $\rho^{-1}(Z)\supset \iota(\{\tau,\dots,\tau'''\})$ is a supporting plane of the convex hull of $\iota(\Gamma)=\mathcal{O}$. Proving that $Z$ and the coefficients of $\rho$ satisfy the inequalities of Lemma \ref{lem:convex} will be the trickier part of the work, done in Section \ref{sec:proof} using only basic trigonometry. \subsection{Notation} Until the end of Section \ref{sec:proof}, we fix $q\geq 5$ and $p\in \llbracket 2,q-2 \rrbracket$ coprime to $q$, so that $Q:=\frac{p}{q}$ is a rational of $(0,1)$ in reduced form. We denote by $x_0$ the point $(1,1)$ of $\mathbb{C}^2$, and by $x_k$ the $k$-th iterate of $x_0$ under the map $\varphi: (z,z') \mapsto ( e^{\frac{2i\pi}{q}} z, e^{\frac{2ip\pi}{q}} z')$. Finally we let $\Pi$ be the convex hull of $x_0,\dots,x_{q-1}$. We identify $\mathbb{R}^4$ with $\mathbb{C}^2$ in the standard way. The transpose of a matrix $M$ is written $M^t$. By \emph{Farey graph}, we mean the graph obtained by connecting two rationals $\frac{\alpha}{a}, \frac{\beta}{b}$ of $\mathbb{P}^1\mathbb{R}=\partial_{\infty}\mathbb{H}^2$ by a geodesic line in $\mathbb{H}^2$ whenever $|\alpha b -\beta a|=1$ (this graph consists of the ideal triangle $01\infty$ reflected in its sides \emph{ad infinitum}, and $\text{PSL}_2\mathbb{\mathbb{Z}}\subset \text{PSL}_2\mathbb{R}\simeq \text{Isom}^+(\mathbb{H}^2)$ acts faithfully transitively on oriented edges). For example, two rationals connected by a Farey edge are called \emph{Farey neighbors}. Refer to \cite{farey} for the classical casting of continued fractions in terms of the Farey graph. \section{Main result: description of the faces of $\Pi$} \label{sec:mainresult} \begin{theorem} Let $A=\frac{\alpha}{a}, B=\frac{\beta}{b} \in [0,1]$ be Farey neighbors such that $Q=\frac{p}{q}$ lies strictly between $A$ and $B$, at most one of $A,B$ is a Farey neighbor of $Q$, and at most one of $A,B$ is a Farey neighbor of $\infty$ (i.e.~belongs to $\{0,1\}$). Then $x_0,x_a,x_b,x_{a+b}$ span a top-dimensional cell (tetrahedron) of $\Pi$. \label{thm:main} \end{theorem} Note that in the simplest case $\frac{p}{q}=\frac{2}{5}$, there is only one pair $\{\frac{\alpha}{a},\frac{\beta}{b}\}=\{\frac{1}{3},\frac{1}{2}\}$. Theorem \ref{thm:main} will be proved in Section \ref{sec:proof}. Meanwhile, we check (Theorem \ref{thm:nofaces}) that there are no \emph{other} top-dimensional faces in $\partial \Pi$. Note that we make no assumption on whether $A<B$ or $B<A$, or on whether $a<b$ or $b<a$ (all four possibilities can arise), so we will always be able to switch $A$ and $B$ for convenience. \begin{remark} \label{rem:order} It is well-known that the number of unordered pairs of rationals $\{\frac{\alpha}{a},\frac{\beta}{b}\}$ satisfying the hypotheses of Theorem \ref{thm:main} is $n-3$, where $n$ is the sum of all coefficients of the continued fraction expansion of $Q$. Moreover, these pairs are naturally ordered: the first pair is $\{\frac{0}{1},\frac{1}{2}\}$ or $\{\frac{1}{2},\frac{1}{1}\}$ according to the sign of $Q-\frac{1}{2}$; the pair coming after $\{\frac{\alpha}{a},\frac{\beta}{b}\}$ is either $\{\frac{\alpha}{a},\frac{\alpha+\beta}{a+b}\}$ or $\{\frac{\alpha+\beta}{a+b},\frac{\beta}{b}\}$. Reversing this, the pair coming \emph{before} $\{\frac{\alpha}{a},\frac{\beta}{b}\}$ is $\{\frac{\min (\alpha,\beta)}{\min(a,b)},\frac{|\alpha-\beta|}{|a-b|}\}$. The last pair $\{\frac{\alpha}{a},\frac{\beta}{b}\}$ contains exactly one Farey neighbor of $\frac{p}{q}$ and is such that $\frac{\alpha+\beta}{a+b}$ is another Farey neighbor of $\frac{p}{q}$: therefore that last pair satisfies either $\frac{\alpha+(\alpha+\beta)}{a+(a+b)}=\frac{p}{q}$ or $\frac{(\alpha+\beta)+\beta}{(a+b)+b}=\frac{p}{q}$. \end{remark} \begin{theorem} \label{thm:nofaces} All top-dimensional faces of $\Pi$ are tetrahedra whose vertices are of the form $x_n x_{n+a} x_{n+b} x_{n+a+b}$ with $a,b$ as in Theorem \ref{thm:main}, and $n\in\mathbb{Z}$. \end{theorem} \begin{proof} Assuming Theorem \ref{thm:main}, it is enough to find a tetrahedron of the given form, adjacent to every face of the tetrahedron $T_{a,b}:=x_0 x_a x_b x_{a+b}$ (but possibly with a different pair $\{a,b\}$). First, the faces of $T_{a,b}$ obtained by dropping $x_0$ or $x_{a+b}$ indeed have neighbors: If $T_{a,b}$ is the first tetrahedron for the ordering, Remark \ref{rem:order} implies $T_{a,b}=T_{1,2}=x_0x_1x_2x_3$. The face $x_0x_1x_2$ of $T_{a,b}$ (obtained by dropping $x_3$) is adjacent to $\varphi^{-1}(T_{a,b})=x_{-1}x_0x_1x_2$, and similarly the face $x_1x_2x_3$ obtained by dropping $x_0$ is adjacent to $\varphi(T_{a,b})=x_1x_2x_3x_4$. If $T_{a,b}$ is \emph{not} the first tetrahedron, then we can assume $a<b$ and by Remark \ref{rem:order} there is a previous tetrahedron $T_{b-a,a}$. The face $x_0 x_a x_b$ of $T_{a,b}$ is adjacent to $T_{b-a,a}=x_0 x_{b-a} x_a x_b$; the face $x_a x_b x_{a+b}$ of $T_{a,b}$ is adjacent to $\varphi^a(T_{b-a,a})=x_a x_b x_{2a} x_{a+b}$. Lastly, the faces of $T_{a,b}$ obtained by dropping $x_a$ or $x_b$ also have neighbors: If $T_{a,b}$ is the last tetrahedron, then Remark \ref{rem:order} implies $a+2b=q$ (up to switching $a,b$), hence $x_{a+b}=x_{-b}$ (because $x_q=x_0$). Therefore the face $x_0 x_a x_{a+b}=x_0 x_a x_{-b}$ of $T_{a,b}$ is adjacent to $\varphi^{-b}(T_{a,b})=x_{-b}x_{a-b}x_0 x_a$, and the face $x_0 x_b x_{a+b}=x_{a+2b} x_b x_{a+b}$ of $T_{a,b}$ is adjacent to $\varphi^{b}(T_{a,b})=x_{b}x_{a+b}x_{2b} x_{a+2b}$. If $T_{a,b}$ is \emph{not} the last tetrahedron, then up to switching $a,b$ there is, by Remark 2, a next tetrahedron $T_{a,a+b}$. Therefore the face $x_0 x_a x_{a+b}$ of $T_{a,b}$ is adjacent to $T_{a,a+b}=x_0 x_a x_{a+b} x_{2a+b}$, and the face $x_0 x_b x_{a+b}$ of $T_{a,b}$ is adjacent to $\varphi^{-a}(T_{a,a+b})=x_{-a} x_0 x_b x_{a+b}$. \end{proof} \section{Main tools} \label{sec:tools} Under the assumptions of Theorem \ref{thm:main}, and before we start its proof proper, let us introduce some tools. These are of two types: arithmetic properties of the integers appearing in the Farey diagram (Section \ref{sec:farey}), and geometric properties of the standard embedding $\iota$ of $\mathbb{S}^1\times \mathbb{S}^1$ into $\mathbb{R}^2\times \mathbb{R}^2$ (especially its intersections with hyperplanes), in Section \ref{sec:tori}. \subsection{Farey relationships on integers} \label{sec:farey} Let $X=\frac{\xi}{x}=\frac{\alpha+\beta}{a+b}$ and $Y=\frac{\eta}{y}=\frac{|\alpha-\beta|}{|a-b|}$ be the two common Farey neighbors of $A$ and $B$ ($X$ is closer to $Q$ while $Y$ is closer to $\infty=\frac{1}{0}$; we have $X,Y\in[0,1]$). We introduce the notation $$\frac{u}{v} \wedge \frac{s}{t}:=|ut-vs|$$ for any two rationals $\frac{u}{v}, \frac{s}{t}$ in reduced form. For example, if $h,h'$ are rational, then $h\wedge h'=1$ if and only if $h,h'$ are Farey neighbors; moreover, the denominator of $h$ is always equal to $h\wedge \infty$. \noindent We thus have $\left \{\begin{array}{c} a=A\wedge\infty \\ b=B\wedge\infty \\ x=X\wedge\infty \\ y=Y\wedge\infty \\ q=Q\wedge\infty \end{array} \right .$ and we define $\left \{\begin{array}{c} a':=A\wedge Q \\ b':=B\wedge Q \\ x':=X\wedge Q \\ y':=Y\wedge Q \end{array} \right .$, all positive. \begin{proposition} \label{prop:abab} One has $\left \{\begin{array}{ccc} a+b&=&x \\ |a-b|&=&y \end{array} \right .$, and $\left \{ \begin{array}{ccc}a'+b'&=&y' \\ |a'-b'|&=& x' \end{array}\right .$, and $$a'b+b'a=q.$$ \end{proposition} \begin{proof} The first two identities are obvious from the definitions of $X,Y$. For the next two identities, notice that $\alpha q-ap$ and $\beta q-bp$ have opposite signs, because $Q$ lies between $A$ and $B$~: therefore $$a'+b'=|\alpha q - ap| + |\beta q - bp|=|(\alpha q-ap)-(\beta q-bp)|=|(\alpha-\beta)q-(a-b)p|=Y\wedge Q~;$$ $$|a'-b'|=||\alpha q-ap|-|\beta q - bp||=|(\alpha q-ap)+(\beta q-bp)|=|(\alpha+\beta)q-(a+b)p|=X\wedge Q~.$$ For the last identity, compute \begin{eqnarray*} a'b+b'a &=&(Q \wedge A)(\infty\wedge B)+(Q \wedge B)(\infty\wedge A) \\ &=& b|q\alpha-pa|+a|q\beta-pb| \\ &=& |b(q\alpha-pa)-a(q\beta-pb)| \\ &=& q|b\alpha-a\beta|=q(A\wedge B)=q~. \end{eqnarray*} \end{proof} An easy consequence is that all of $a,a',b,b',x,x',y,y'$ are integers of $\llbracket 1,q-1\rrbracket$. Note that the properties of Proposition \ref{prop:abab} are invariant under the exchange of $(a,a')$ with $(b,b')$ and under the exchange of $(a,b,x,y)$ with $(a',b',y',x')$ (which actually amounts to swapping $Q$ and $\infty$). \begin{proposition} \label{prop:notq2} None of $a,a',b,b'$ is equal to $\frac{q}{2}$. \end{proposition} \begin{proof} Suppose $b=\frac{q}{2}$. Since $a'b+b'a=q$, we then have $a'=1$. We have $b'a=q-a'b=\frac{q}{2}$ so $a$ divides $\frac{q}{2}$, but $a$ is also coprime to $b=\frac{q}{2}$ (because $A\wedge B=1$). Therefore $a=1$ (which by the way means $A\in\{0,1\}$). But since $a'=1$, this implies that $A$ is a Farey neighbor both of $Q$ and $\infty$, i.e.~$Q$ has the form $\frac{1}{q}$ or $\frac{q-1}{q}$, which we ruled out in the first place. If instead of $b$ another term of $a,a',b,b'$ is equal to $\frac{q}{2}$, then we can apply the same argument, up to permuting $a,a',b,b'$. \end{proof} Notice, however, that one of $a,a',b,b'$ could be \emph{larger} than $\frac{q}{2}$. \subsection{Level curves on the torus} \label{sec:tori} Let $\mathbb{T}:=(\mathbb{R}/2\pi\mathbb{Z})^2$ be the standard torus and $\iota:\mathbb{T}\rightarrow \mathbb{C}^2\simeq\mathbb{R}^4$ denote the standard injection, satisfying $$\iota(u,v)=(\cos u, \sin u, \cos v, \sin v).$$ The subgroup $\Gamma=\{\tau_k=(k\frac{2\pi}{q}, kp\frac{2\pi}{q})\}_{k\in \mathbb{Z}}$ of $\mathbb{T}$ lifts to an affine lattice $\Lambda$ of the universal cover $\mathbb{R}^2$ of $\mathbb{T}$. The index of $2\pi\mathbb{Z}^2$ in $\Lambda$ is $q$. Rationals $A,B$ are still as in Theorem \ref{thm:main}. \begin{proposition} \label{prop:basis} Define the lifts $u=(a\frac{2\pi}{q},ap\frac{2\pi}{q}-2\alpha\pi)$ and $v=(b\frac{2\pi}{q},bp\frac{2\pi}{q}-2\beta\pi)$ of $\tau_a$ and $\tau_b$ respectively. Also define the center $\overline{c}:=\frac{1}{2}(u+v)$ of the parallelogram $D:=(0,u,u+v,v)$ of $\mathbb{R}^2$. Then $(u,v)$ is a basis of the lattice $\Lambda$, and $D$ is contained in the square $\overline{c}+(-\pi,\pi)^2$. \end{proposition} \begin{proof} Clearly, $\Lambda \subset \mathbb{R}^2$ has covolume $(2\pi)^2/q$. On the other hand, the determinant of $(u,v)$ is $2\pi\frac{2\pi}{q}(\alpha b-a\beta)=\pm(2\pi)^2/q$, so $(u,v)$ is a basis of $\Lambda$. The abscissae of $u,v$ are clearly positive, and their sum is $\frac{a+b}{q}2\pi=\frac{x}{q}2\pi<2\pi$. The ordinates $2\pi a(Q-A)$ of $u$ and $2\pi b(Q-B)$ of $v$ have opposite signs, and the sum of their absolute values is $$2\pi \left ( \left | \frac{ap-\alpha q}{q} \right |+ \left |\frac{bp-\beta q}{q}\right | \right )= 2\pi \frac{A\wedge Q + B\wedge Q}{q}=2\pi\frac{y'}{q}<2\pi~,$$ by Proposition \ref{prop:abab}. This proves the claim on $D$. \end{proof} \begin{definition} \label{def:center} Let $c=\left (\frac{a+b}{q}\pi,[p\frac{a+b}{q} - (\alpha+\beta)]\pi \right )$ denote the projection of $\overline{c}$ to the torus $\mathbb{T}=(\mathbb{R}/2\pi\mathbb{Z})^2$. \end{definition} \begin{proposition} \label{prop:convexmiss} Let $\Lambda\subset\mathbb{R}^2$ be a lattice and $P$ be a strictly convex, compact region of $\mathbb{R}^2$ such that $\Lambda\cap \partial P$ consists of the four vertices of a fundamental parallelogram of $\Lambda$. Then $\Lambda\cap P=\Lambda\cap \partial P$ (i.e.~$P$ contains no other lattice points). \end{proposition} \begin{proof} Without loss of generality, $\Lambda=\mathbb{Z}^2$ and $\{0,1\}^2\subset \partial P$. Since $P$ is strictly convex, the horizontal axis $\mathbb{R}\times \{0\}$ intersects $P$ precisely along $[0,1]\times \{0\}$. A similar statement holds for each side of the unit square. Therefore $P\smallsetminus \{0,1\}^2\subset (0,1)\times \mathbb{R} \cup \mathbb{R}\times (0,1)$, which contains no other vertices of $\mathbb{Z}^2$. \end{proof} The idea of the proof of Theorem \ref{thm:main} is to consider a linear form $\rho:\mathbb{R}^4\rightarrow \mathbb{R}$ that takes the same value $Z>0$ on $x_0, x_a, x_b,x_{a+b}$ and check that $\rho<Z$ on all other $x_i$. This will be achieved by looking at the level curve $\gamma$ of $\rho\circ\iota$ in $\mathbb{T}$, of level $Z$, and checking that the lift of $\gamma$ to $\mathbb{R}^2$ bounds a convex body that satisfies the hypotheses of Proposition \ref{prop:convexmiss}. For this, we will need the following property and its corollary. \begin{lemma} \label{lem:convex} If $(U,U'),(V,V')\in \mathbb{R}^2\smallsetminus\{(0,0)\}$ and $Z\in \mathbb{R}_+^*$ satisfies $$\left |\sqrt{V^2+V'^2}-\sqrt{U^2+U'^2}\right | <Z<\sqrt{V^2+V'^2}+\sqrt{U^2+U'^2}~,$$ then the preimage of $Z$ under $$\begin{array}{rrcl} &\mathbb{R}^2 & \rightarrow & \mathbb{R} \\ \psi~: & (x,y) & \longmapsto & (U \cos x + U'\sin x) + (V\cos y+V'\sin y) \end{array}$$ consists of a convex curve $\gamma$ (i.e.~a closed curve bounding a strictly convex domain), together with all the translates of $\gamma$ under $2\pi\mathbb{Z}^2$, which are pairwise disjoint. \end{lemma} \begin{proof} Up to shifting $x$ and $y$ by constants, we can assume $U'=V'=0$ and $U,V>0$. Up to exchanging $x$ and $y$, we can furthermore assume $V\geq U$, so that $0\leq V-U<Z<V+U$ and $\psi(x,y)=U\cos x + V\cos y$. Notice that $U,V,Z$ now satisfy all three strong triangular inequalities. Let $C$ be the square $[-\pi,\pi]^2$. Let us first determine that $\gamma:=\psi^{-1}(Z)\cap C$ is a convex curve contained in the interior of $C$. If $(x,y)\in\gamma$ then $U \cos x \geq Z-V \in (-U,U)$ so $$|x|\leq \arccos \frac{Z-V}{U}\in (0,\pi)~\text{ and }~\pm y=f(x):=\arccos \frac{Z-U \cos x}{V}\in[0,\pi)~,$$ since $Z-U>-V$. Clearly, $f$ vanishes at $\pm \arccos \frac{Z-V}{U}$. Moreover, using the chain rule $(\arccos \circ\, g)''=-\frac{g''(1-g^2)+gg'^2}{(1-g^2)^{3/2}}$, computation yields $$f''(x)=\frac{-U^2Z}{[V^2-(Z-U\cos x)^2]^{\frac{3}{2}}} \left [ 1+ \frac{V^2-Z^2-U^2}{UZ} \cos x + \cos^2 x\right ]$$ so to show $f''<0$ it is enough to check that the discriminant of the polynomial in $\cos x$ (in the right factor) is negative. This amounts to $\left |\frac{V^2-Z^2-U^2}{UZ}\right |<2$, which in turn follows from the triangular inequalities $(Z+U)^2>V^2$ and $(Z-U)^2<V^2$. We have proved that $\gamma$ is a convex curve (the union of the graphs of $f$ and $-f$) contained in the interior of $C$: the rest of the lemma follows easily. \end{proof} \begin{corollary} \label{cor:patate} Under the assumptions of Lemma \ref{lem:convex}, the set $H:=\psi^{-1}[Z,+\infty)$ consists of the disjoint union of all the convex domains bounded by $\gamma$ and its translates. \end{corollary} \begin{proof} Again restricting to $U'=V'=0<U\leq V$, we see that $H\cap C$ contains the origin (encircled by $\gamma$, and where $\psi$ achieves its maximum $U+V$) and does not contain $(\pi,\pi)$ (where $\psi$ achieves its minimum $-U-V$). The theorem of intermediate values allows us to conclude. \end{proof} \section{Proof of Theorem \ref{thm:main}} \label{sec:proof} Identifying $\mathbb{C}^2$ with $\mathbb{R}^4$ in the standard way, the matrix with column vectors $x_0, x_a,x_b,x_{a+b}$ is \begin{equation} \label{eq:matrixm} M:= \left ( \begin{array}{clll} 1 & \cos a \frac{2\pi}{q} & \cos b \frac{2\pi}{q} & \cos (a+b) \frac{2\pi}{q} \\ 0 & \sin a \frac{2\pi}{q} & \sin b \frac{2\pi}{q} & \sin (a+b) \frac{2\pi}{q} \\ 1 & \cos pa\frac{2\pi}{q} & \cos pb\frac{2\pi}{q} & \cos p(a+b)\frac{2\pi}{q} \\ 0 & \sin pa\frac{2\pi}{q} & \sin pb\frac{2\pi}{q} & \sin p(a+b) \frac{2\pi}{q} \end{array} \right )~. \end{equation} We refer to $\{x_0,x_a,x_b,x_{a+b}\}$ as our \emph{candidate face}. \subsection{Candidate faces are non-degenerate} \label{sec:nondeg} \begin{proposition} \label{prop:invertible} The determinant $D$ of the matrix $M$ is nonzero. \end{proposition} \begin{proof} Rotating the plane of the first two coordinates by $\frac{-a-b}{q}\pi$, and the plane of the last two coordinates by $\frac{-a-b}{q}p\pi$, we see that \begin{eqnarray*} D&=&\left | \begin{array}{llll} \cos\frac{-a-b}{q}\pi & \cos\frac{a-b}{q}\pi & \cos\frac{ b-a}{q}\pi & \cos\frac{a+b}{q}\pi \\ \sin\frac{-a-b}{q}\pi & \sin\frac{a-b}{q}\pi & \sin\frac{ b-a}{q}\pi & \sin\frac{a+b}{q}\pi \\ \cos\frac{-a-b}{q}p\pi & \cos\frac{a-b}{q}p\pi & \cos\frac{ b-a}{q}p\pi & \cos\frac{a+b}{q}p\pi \\ \sin\frac{-a-b}{q}p\pi & \sin\frac{a-b}{q}p\pi & \sin\frac{ b-a}{q}p\pi & \sin\frac{a+b}{q}p\pi \end{array} \right | \\ &=& 4 \left | \begin{array}{llll} \cos\frac{a+b}{q}\pi & \cos\frac{a-b}{q}\pi & \cos\frac{b-a}{q}\pi & \cos\frac{a+b}{q}\pi \\ 0 & 0 & \sin\frac{b-a}{q}\pi & \sin\frac{a+b}{q}\pi \\ \cos\frac{a+b}{q}p\pi & \cos\frac{a-b}{q}p\pi & \cos\frac{b-a}{q}p\pi & \cos\frac{a+b}{q}p\pi \\ 0 & 0 & \sin\frac{b-a}{q}p\pi & \sin\frac{a+b}{q}p\pi \end{array} \right | \hspace{6pt} \text{(column operations)}\\ &=& 4 \left | \begin{array}{ll} \cos\frac{a+b}{q} \pi & \cos\frac{a-b}{q} \pi \\ \cos\frac{a+b}{q}p\pi & \cos\frac{a-b}{q}p\pi \end{array} \right | \cdot \left | \begin{array}{ll} \sin\frac{a-b}{q} \pi & \sin\frac{a+b}{q} \pi \\ \sin\frac{a-b}{q}p\pi & \sin\frac{a+b}{q}p\pi \end{array} \right | \\ &=& \textstyle{4~(2 \cos\frac{a}{q} \pi \cos\frac{b}{q} \pi \cdot \sin\frac{ap}{q}\pi \sin\frac{bp}{q}\pi - 2 \sin\frac{a}{q} \pi \sin\frac{b}{q} \pi \cdot \cos\frac{ap}{q}\pi \cos\frac{bp}{q}\pi)} \\ && \textstyle{~(2 \sin\frac{a}{q} \pi \cos\frac{b}{q} \pi \cdot \sin\frac{bp}{q}\pi \cos\frac{ap}{q}\pi - 2 \sin\frac{b}{q} \pi \cos\frac{a}{q} \pi \cdot \sin\frac{ap}{q}\pi \cos\frac{bp}{q}\pi)}~, \end{eqnarray*} so we only need to prove $$\tan\frac{ap\pi}{q}\tan\frac{bp\pi}{q} \neq \tan\frac{a\pi}{q} \tan\frac{b\pi}{q} \hspace{10pt} ; \hspace{10pt} \tan\frac{a\pi}{q} \tan\frac{bp\pi}{q} \neq \tan\frac{b\pi}{q} \tan\frac{ap\pi}{q}$$ (provided all these tangents are finite). Since $ap-\alpha q=a'\cdot \sigma(Q-A)$ (where $\sigma$ is the sign function) and $\tan$ is $\pi$-periodic, $$\tan \frac{ap\pi}{q}=\tan \frac{ap-\alpha q}{q}\pi=\sigma(Q-A)\tan \frac{a'}{q}\pi$$ and similarly $\tan \frac{bp\pi}{q}=\sigma(Q-B)\tan \frac{b'}{q}\pi$. Since $Q$ lies between $A$ and $B$, the signs of $Q-A$ and $Q-B$ are opposite, so we only need to prove \begin{equation} \label{eq:ineq} \tan\frac{a'\pi}{q}\tan\frac{b'\pi}{q} \neq -\tan\frac{a\pi}{q} \tan\frac{b\pi}{q} \hspace{10pt} ; \hspace{10pt} \tan\frac{a\pi}{q} \tan\frac{b'\pi}{q} \neq -\tan\frac{b\pi}{q} \tan\frac{a'\pi}{q}~. \end{equation} (All these tangents \emph{are} finite, by Proposition \ref{prop:notq2}.) If $a,a',b,b'\leq \frac{q}{2}$, then all the values of ``$\text{tan}$'' in (\ref{eq:ineq}) are positive, which yields the result. If one of $a,a',b,b'$ is larger than $\frac{q}{2}$, say $b>\frac{q}{2}$, then $a'b+b'a=q$ requires $a'=1$, which entails $a\geq 2$ (because $A$ is not a Farey neighbor of both $Q$ and $\infty$), and $b'\geq 2$ (because $A$ and $B$ are not both Farey neighbors of $Q$). We have $ab'=q-b<\frac{q}{2}$ and $b=\frac{q-ab'}{a'}=q-ab'$. Therefore the first inequality of (\ref{eq:ineq}) can be written $$\tan\frac{\pi}{q} \tan\frac{b'\pi}{q} \neq \tan\frac{a\pi}{q} \tan\frac{ab'\pi}{q}~,$$ which is clearly true (both members are positive, but the right one is larger, factor-wise, because $a\geq 2$). Similarly, the second inequality of (\ref{eq:ineq}) becomes $\tan\frac{a\pi}{q} \tan\frac{b'\pi}{q} \neq \tan\frac{ab'\pi}{q} \tan\frac{\pi}{q}$ (all values of ``$\tan$'' are still positive), i.e. $$\frac{\tan\frac{a\pi}{q}}{\tan\frac{\pi}{q}} \neq \frac{\tan\frac{ab'\pi}{q}}{\tan\frac{b'\pi}{q}}~.$$ Notice that without the ``$\tan$'s'', this would be an identity. To see that the right member is larger, it is therefore enough to make sure that the function $g:u\mapsto \frac{\tan u}{\tan (u/a)}$ is increasing on $(0,\frac{\pi}{2})$. Computation yields $$g'(u)=\frac{\sin (2 u/a)-\sin (2u)/a}{2\sin^2 (u/a)\cos^2 u}~:$$ since $a\geq 2$, the numerator is clearly positive, by strict concavity of $\sin$ on $[0,\pi]$. If instead of $b$ another term of $a,a',b,b'$ is larger than $\frac{q}{2}$, then we can apply the same argument, up to permuting $a,a',b,b'$. \end{proof} \subsection{Candidate faces are faces of the convex hull} We must now show that if $\rho:\mathbb{R}^4\rightarrow \mathbb{R}$ is some linear form that takes the same value $Z>0$ on each column vector $x_0,x_a,x_b,x_{a+b}$ (i.e.~$\iota(\tau_0),\iota(\tau_a),\iota(\tau_b),\iota(\tau_{a+b})$) of the matrix $M$ from (\ref{eq:matrixm}), then $\rho\circ\iota(\tau_k)<Z$ for any $k\in \llbracket 0,q-1\rrbracket \smallsetminus \{0,a,b,a+b\}$. This will be done by showing \emph{via} Corollary \ref{cor:patate} that $(\rho\circ\iota)^{-1}[Z,+\infty)$ is (once lifted to $\mathbb{R}^2$) a convex region of the type seen in Proposition \ref{prop:convexmiss}. An elementary computation shows that in coordinates, \begin{equation} \label{eq:formvalue}\left \{ \begin{array}{rcl} \rho&=& (-1)^{\alpha+\beta} \left ( \begin{array}{r} -\cos \frac{a+b}{q} \pi \sin \frac{ap \pi}{q} \sin \frac{bp \pi}{q} \\ -\sin \frac{a+b}{q} \pi \sin \frac{ap \pi}{q} \sin \frac{bp \pi}{q} \\ \cos \frac{a+b}{q}p \pi \sin \frac{a \pi}{q} \sin \frac{b \pi}{q} \\ \sin \frac{a+b}{q}p \pi \sin \frac{a \pi}{q} \sin \frac{b \pi}{q} \end{array} \right )^t =:\left (\begin{array}{l} U\\ U'\\ V\\ V'\end{array}\right )^t\\ &&\\ Z&=& (-1)^{\alpha+\beta} \left ( \cos \frac{a+b}{q}p \pi \sin \frac{a \pi}{q} \sin \frac{b \pi}{q} - \cos \frac{a+b}{q} \pi \sin \frac{ap \pi}{q} \sin \frac{bp \pi}{q} \right ) \\ &=& \frac{(-1)^{\alpha+\beta}}{2} \left ( \cos \frac{a+b}{q}p \pi \cos \frac{a-b}{q} \pi - \cos \frac{a+b}{q} \pi \cos \frac{a-b}{q}p \pi \right )\end{array} \right .\end{equation} will do ($Z$ will turn out to be positive by Claim \ref{cla:key} below; so far we only know $Z\neq 0$ by Proposition \ref{prop:invertible}). The notation $U,U',V,V'$ is made to fit Lemma \ref{lem:convex}. Define $$\left \{ \begin{array}{rclcl} U''&:=&\sqrt{U^2+U'^2}&=& |\sin \frac{ap \pi}{q} \sin \frac{bp \pi}{q}|>0 \\ V''&:=&\sqrt{V^2+V'^2}&=& |\sin \frac{a \pi}{q} \sin \frac{b \pi}{q}|>0~. \end{array}\right .$$ \begin{claim} \label{cla:key} The point $c$ of Definition \ref{def:center} is the absolute maximum of $\rho \circ \iota$ on the torus $\mathbb{T}$. Moreover, $$Z=\cos \frac{x'}{q}\pi\cos\frac{y}{q}\pi - \cos \frac{x}{q}\pi \cos \frac{y'}{q}\pi~,$$ $Z$ is positive, and one has: $|V''-U''|<Z<V''+U''$. \end{claim} This claim proves Theorem \ref{thm:main}. Indeed, assume the claim, and let $H$ denote $[Z,+\infty)$. Let $\overline{\pi}$ denote the natural projection $\mathbb{R}^2\rightarrow \mathbb{T}$. By Corollary \ref{cor:patate}, the level curve $(\rho\circ\iota\circ \overline{\pi})^{-1}(Z)\subset \mathbb{R}^2$ contains a striclty convex closed curve $\gamma$ centered around $\overline{c}$, contained in the square $C:=\overline{c}+(-\pi,\pi)^2$ and passing through the representatives of $\tau_0,\tau_a,\tau_b,\tau_{a+b}$ contained in $C$. By Proposition \ref{prop:basis}, these representatives are the vertices $0,u,v,u+v$ of the fundamental parallelogram $D$. Corollary \ref{cor:patate} and Proposition \ref{prop:convexmiss} then yield the result: $(\rho\circ\iota)^{-1}(H)$ contains no other points $\tau_k$ than $\tau_0,\tau_a,\tau_b,\tau_{a+b}$. \begin{proof} (Claim \ref{cla:key}). The maximum of $\rho \circ \iota$ on $\mathbb{T}$ is clearly $U''+V''$. Since $$\iota(c)=\left ( \begin{array}{c} \cos \frac{a+b}{q}\pi \\ \sin \frac{a+b}{q}\pi \\ \cos [p\frac{a+b}{q} - (\alpha+\beta)]\pi \\ \sin [p\frac{a+b}{q} - (\alpha+\beta)]\pi \end{array} \right )~,$$ we can compute \begin{eqnarray*} \rho\circ\iota(c)&=&(-1)^{\alpha+\beta}\left (-\sin\frac{ap\pi}{q}\pi\sin\frac{bp\pi}{q}\pi+(-1)^{\alpha+\beta}\sin\frac{a\pi}{q}\sin\frac{b\pi}{q} \right ) \\ &=& -\sin\frac{ap-\alpha q}{q}\pi\sin\frac{bp-\beta q}{q}\pi + \sin\frac{a}{q}\pi\sin\frac{b}{q}\pi \\ &=& \sin\frac{A\wedge Q}{q}\pi\sin\frac{B\wedge Q}{q}\pi + \sin\frac{a}{q}\pi\sin\frac{b}{q}\pi \\ &=& \sin\frac{a'}{q}\pi\sin\frac{b'}{q}\pi + \sin\frac{a}{q}\pi\sin\frac{b}{q}\pi \end{eqnarray*} because $ap-\alpha q$ and $bp-\beta q$ have opposite signs ($Q$ lies between $A$ and $B$). Both terms in the last expression are positive since $a,a',b,b'\in \llbracket 1,q-1 \rrbracket$. In fact, since $$V''= \left | \sin \frac{a \pi}{q} \sin \frac{b \pi}{q}\right |=\sin \frac{a \pi}{q} \sin \frac{b \pi}{q}$$ and $$U''= \left |\sin \frac{a p\pi}{q} \sin \frac{bp \pi}{q}\right |=\left |\sin \frac{a p-\alpha q}{q}\pi \sin \frac{bp -\beta q}{q}\pi\right |=\sin \frac{a' \pi}{q} \sin \frac{b' \pi}{q}~,$$ we have shown that $\rho\circ \iota(c)=U''+V''$, the absolute maximum of $\rho\circ\iota$. The computation of $Z$ follows similar lines: in the second expression for $Z$ in (\ref{eq:formvalue}), notice that the first and last cosines can be written $$(-1)^{\alpha+\beta}\cos\frac{a+b}{q}p\pi=\cos\frac{(a+b)p-(\alpha+\beta)q}{\pi}=\cos \frac{X\wedge Q}{q}\pi~;$$ $$(-1)^{\alpha-\beta}\cos\frac{a-b}{q}p\pi=\cos\frac{(a-b)p-(\alpha-\beta)q}{\pi}=\cos \frac{Y\wedge Q}{q}\pi$$ (using Proposition \ref{prop:abab}). Together with $\frac{a+b}{q}=\frac{x}{q}$ and $\frac{a-b}{q}=\frac{\pm y}{q}$, this yields the desired expression of $Z=\cos \frac{x'}{q}\pi\cos\frac{y}{q}\pi - \cos \frac{x}{q}\pi \cos \frac{y'}{q}\pi$. The upper bound on $Z$ is obvious from the first expression of $Z$ in (\ref{eq:formvalue}). We now focus on the lower bound (which will also imply $Z>0$), i.e.~we aim to show \begin{equation} \label{eq:minoration} \cos\frac{x'}{q}\pi \cdot \cos\frac{y}{q}\pi - \cos\frac{x}{q}\pi \cdot \cos\frac{y'}{q}\pi > 2\left | \sin\frac{a'}{q}\pi \cdot \sin\frac{b'}{q}\pi - \sin\frac{a}{q}\pi \cdot \sin\frac{b}{q}\pi \right |~. \end{equation} By Proposition \ref{prop:abab}, the right member of (\ref{eq:minoration}) can be written $$\left | \left ( \cos\frac{x'}{q}\pi- \cos\frac{y'}{q}\pi\right )-\left ( \cos\frac{y}{q}\pi- \cos\frac{x}{q}\pi\right )\right |~;$$ therefore we are down to proving the two identities $$\left \{ \begin{array}{l}\displaystyle{ \left (\cos \frac{x'}{q}\pi-1 \right )\cdot \left (\cos \frac{y}{q}\pi +1 \right )~>~ \left (\cos \frac{x}{q}\pi +1 \right )\cdot \left (\cos \frac{y'}{q}\pi-1 \right )} \\ \\ \displaystyle{ \left (\cos \frac{x'}{q}\pi+1 \right )\cdot \left (\cos \frac{y}{q}\pi -1 \right )~>~ \left (\cos \frac{x}{q}\pi -1 \right )\cdot \left (\cos \frac{y'}{q}\pi+1 \right )~.} \end{array} \right . $$ \noindent Using $\cos t +1=2\cos^2\frac{t}{2}$ and $\cos t - 1=-2\sin^2\frac{t}{2}$, this in turn amounts to $$\left \{\begin{array}{l} \displaystyle{ \sin \left (\frac{x'}{q}\right )\frac{\pi}{2}\cdot \cos \left (\frac{y}{q}\right )\frac{\pi}{2}~<~ \cos \left (\frac{x}{q}\right )\frac{\pi}{2}\cdot \sin \left (\frac{y'}{q}\right )\frac{\pi}{2}} \\ \\ \displaystyle{ \cos \left (\frac{x'}{q}\right )\frac{\pi}{2}\cdot \sin \left (\frac{y}{q}\right )\frac{\pi}{2}~<~ \sin \left (\frac{x}{q}\right )\frac{\pi}{2}\cdot \cos \left (\frac{y'}{q}\right )\frac{\pi}{2}~,} \end{array} \right .$$ \noindent or equivalently \begin{equation}\label{eq:sineratio}\left \{\begin{array}{rcll} \frac{\displaystyle{\sin \left (\frac{x'}{q}\right )\frac{\pi}{2}}} {\displaystyle{\sin \left (\frac{y'}{q}\right )\frac{\pi}{2}}} &<& \frac{\displaystyle{\sin \left (\frac{q - x}{q}\right )\frac{\pi}{2}}} {\displaystyle{\sin \left (\frac{q - y}{q}\right )\frac{\pi}{2}}} & \hspace{20pt} (i) \\ \\ \frac{\displaystyle{\sin \left (\frac{y}{q}\right )\frac{\pi}{2}}} {\displaystyle{\sin \left (\frac{x}{q}\right )\frac{\pi}{2}}} &<& \frac{\displaystyle{\sin \left (\frac{q-y'}{q}\right )\frac{\pi}{2}}} {\displaystyle{\sin \left (\frac{q-x'}{q}\right )\frac{\pi}{2}}} & \hspace{20pt} (ii). \end{array} \right .\end{equation} To prove (\ref{eq:sineratio})-$(i)$ and (\ref{eq:sineratio})-$(ii)$, we will use \begin{proposition} \label{prop:sines} If $\displaystyle{0<s<t<\frac{\pi}{2}}$ and $\displaystyle{0<s'<t'<\frac{\pi}{2}}$ satisfy $s<s'$ and $\displaystyle{\frac{s}{t}\leq\frac{s'}{t'}}$, then $\displaystyle{\frac{\sin s}{\sin t}<\frac{\sin s'}{\sin t'}}$. \end{proposition} \begin{proof} Up to decreasing $t$, it is clearly enough to treat the case $\frac{s}{t}=\frac{s'}{t'}=\frac{1-\lambda}{1+\lambda}$ (where $0<\lambda<1$). The result then follows from the fact that $f(u)=\frac{\sin(1-\lambda)u}{\sin(1+\lambda)u}$ is increasing on $(0,\frac{\pi}{2(1+\lambda)}]$, which can be seen by computing $$ f'(u)=\frac{\sin(2\lambda u)-\lambda\sin(2u)}{\sin^2(1+\lambda)u}~:$$ here the numerator is positive by strong concavity of $\sin$ on $[0,\frac{\pi}{1+\lambda}]$. \end{proof} We now prove (\ref{eq:sineratio})-$(i)$: by Proposition \ref{prop:sines}, it is enough to check $$0<x'<y'< q\text{ and }0<y<x<q$$ (which are obvious from Proposition \ref{prop:abab}), plus \begin{equation}\label{eq:enfin} x' < q-x ~\text{ and }~ \frac{x'}{y'}\leq \frac{q-x}{q-y}~. \end{equation} The first inequality of (\ref{eq:enfin}) amounts, by Proposition \ref{prop:abab}, to $$|a-b|+(a+b)<a'b+b'a$$ which can be written $$(a'-1)(b\pm 1)+(b'-1)(a\mp 1)>0~.$$ If $a'$ and $b'$ are $>1$, then at least one of the products in the left member is positive, and we are done. If $a'=1$, then $b'>1$ (because $A,B$ are not both Farey neighbors of $Q$ in the assumptions of Theorem \ref{thm:main}) and $a>1$ (because $Q,\infty$ have no common Farey neighbors, i.e.~$p\notin\{1,q-1\}$) and we are also done. If $b'=1$, the argument is the same, exchanging $(A,a,a')$ and $(B,b,b')$. The second inequality of (\ref{eq:enfin}) amounts to $$q(y'-x')\geq y'x-x'y$$ which by Proposition \ref{prop:abab} can also be written $$ y'-x' \geq \frac{(a'+b')(a+b)-|(a'-b')(a-b)|}{a'b+b'a}=:H~.$$ Here the left member is at least 2~: indeed, by Proposition \ref{prop:abab} it can be written $$a'+b'-|a'-b'|=2\inf\{a',b'\}~.$$ The right member $H$, however, is at most $2$~: indeed, \begin{eqnarray*}2-H &=&\frac{a'(2b-a-b)+b'(2a-a-b)+|(a'-b')(a-b)|}{a'b+b'a} \\ &=& \frac{(a'-b')(b-a)+|(a'-b')(a-b)|}{a'b+b'a} \end{eqnarray*} and the numerator has the form $u+|u|\geq 0$. This finishes the proof of (\ref{eq:sineratio})-$(i)$. The proof of (\ref{eq:sineratio})-$(ii)$ is identical with that of (\ref{eq:sineratio})-$(i)$, exchanging $(a,b,x,y)$ with $(a',b',y',x')$. Claim \ref{cla:key}, and therefore Theorem \ref{thm:main}, are proved. \end{proof} \section{General finite subgroups of $\iota(\mathbb{T})\subset{\mathbb{C}^*}^2$} \label{sec:general} In this last section, let $\Gamma$ be \emph{any} finite subgroup of $\mathbb{T}=(\mathbb{S}^1)^2=(\mathbb{R}/2\pi\mathbb{Z})^2$. There exists a unique rational $Q=\frac{p}{q}\in[0,1)$ (here in reduced form) and a unique pair $(\mu,\nu)\in\mathbb{Z}_{>0}^2$ such that $\Gamma$ is the preimage of $\{\tau_k=(\frac{k}{q},\frac{kp}{q})\}_{0\leq k < q}$ under $$\begin{array}{rrcl} & \mathbb{T}& \longrightarrow & \mathbb{T} \\ \psi_{\mu\nu}~: & (s,t) & \mapsto & (\mu s,\nu t)~. \end{array}$$ Indeed, $\mu$ (resp. $\nu$) is just the cardinality of $\Gamma\cap(\mathbb{S}^1\times\{0\})$ (resp. $\Gamma\cap (\{0\}\times\mathbb{S}^1)$); the order of $\Gamma$ is $q\mu\nu$. The case $\frac{p}{q}=0$ can be put aside: it corresponds to $\iota(\Gamma)\subset\mathbb{R}^4$ being (the vertices of) the Cartesian product of a regular $\mu$-gon with a regular $\nu$-gon (the 3-dimensional faces are then regular prisms; degeneracies occur if $\mu\leq 2$ or $\nu \leq 2$). The case $\mu=\nu=1$ was treated in the previous sections, including the discussion of degeneracies when $p\equiv 0 \text{ or } \pm 1~[\text{mod }q]$. It is easy to see that if $\mu=1<\nu$ (resp. $\nu=1<\mu$) and $\frac{p}{q}=\frac{1}{2}$, then $\iota(\Gamma)$ is contained in a $3$-dimensional subspace of $\mathbb{R}^4$ --- in fact, $\iota(\Gamma)$ is the vertex set of an antiprism with $\nu$-gonal (resp. $\mu$-gonal) basis, which in turn degenerates to a tetrahedron when $\nu=2$ (resp. $\mu=2$). Therefore, we can make \begin{assumption} \label{ass:qumunu} Until the end of this section, \begin{itemize} \item at least one of the positive integers $\mu,\nu$ is larger than one; \item the rational $\frac{p}{q}\in (0,1)$ is not $\frac{1}{2}$ when $\mu=1$ or $\nu=1$. \end{itemize} \end{assumption} Then, we claim that faces of the convex hull of $\iota(\gamma)\subset\mathbb{R}^4$ come in three types: \begin{enumerate} \item If $A,B\in[0,1]$ are rationals satisfying the hypotheses of Theorem \ref{thm:main}, then there is a tetrahedron spanned by the images under $\iota:\mathbb{T}\rightarrow \mathbb{R}^4$ of $$\textstyle{ \left (\frac{0}{q\mu}, \frac{0}{q\nu} \right ), \left (\frac{a}{q\mu}2\pi, \frac{ap-\alpha q}{q\nu}2\pi \right), \left (\frac{b}{q\mu}2\pi, \frac{bp-\beta q}{q\nu}2\pi \right), \left (\frac{a+b}{q\mu}2\pi, \frac{(a+b)p-(\alpha+\beta) q}{q\nu}2\pi\right),}$$ which are clearly four points of $\Gamma=\psi_{\mu\nu}^{-1}\{\tau_1,\dots,\tau_q\}$. They form a parallelogram whose center is $c=\left ( \frac{a+b}{q\mu}\pi, \frac{(a+b)p-(\alpha+\beta) q}{q\nu}\pi \right)$. \item If $\nu>1$, add an extra tetrahedron of the type above for the pair $\{A,B\}=\{\frac{0}{1},\frac{1}{1}\}$ (this was ruled out in Theorem \ref{thm:main} because $A,B$ were not allowed both to be Farey neighbors of $\infty=\frac{1}{0}$). Similarly, if $\mu>1$, add an extra tetrahedron of the type above for $\{A,B\}$ equal to the unique pair of Farey neighbors $\frac{\alpha}{a},\frac{\beta}{b}$ such that $\frac{\alpha+\beta}{a+b}=\frac{p}{q}$. (If $\frac{p}{q}=\frac{1}{2}$ and $\mu,\nu\geq 2$, these two ``extra'' tetrahedra are in fact the same one.) \item If $\nu>1$, add an extra cell spanned by the $2\nu$ vertices images under $\iota$ of $$\textstyle{\left \{\left . \left (0,\frac{k}{\nu}2\pi\right )~\right |~0\leq k < \nu \left \} \, \cup \left \{\left . \left (\frac{1}{q\mu}2\pi,\frac{p+kq}{q\nu}2\pi \right )~\right |~0\leq k<\nu\right \} \right . \right . .}$$ If $\nu>2$, this cell is an antiprism with regular $\nu$-gonal basis; it degenerates to a tetrahedron when $\nu=2$. Similarly, if $\mu>1$, add an extra cell spanned by the $2\mu$ vertices images under $\iota$ of $$\textstyle{\left \{\left . \left (\frac{k}{\mu}2\pi,0\right )~\right |~0\leq k < \mu\right \}\cup\left \{\left . \left (\frac{p+kq}{q\mu}2\pi,\frac{1}{q\nu}2\pi \right )~\right |~0\leq k<\mu\right \}~.}$$ \end{enumerate} Actually, cells of type (3) degenerate to segments when $\mu,\nu=1$. \begin{observation} \label{obs:whoisone} Let $\{A,B\}\subset [0,1]$ be a pair of rationals describing a face of type (1) or (2), define $a,a',b,b'\in\mathbb{Z}_{>0}$ and $x,x',y,y'\in\mathbb{Z}_{\geq 0}$ in the usual way, and bear in mind Proposition \ref{prop:abab}. Then, \begin{itemize} \item having $a=b=1$ (i.e.~$y=0$, i.e.~$y'=q$) is only allowed if $\nu>1$; \item having $a'=b'=1$ (i.e.~$x'=0$, i.e.~$x=q$) is only allowed if $\mu>1$; \item Proposition \ref{prop:notq2} no longer holds: some of $a,a',b,b'$ may be equal to $\frac{q}{2}$. \end{itemize} \end{observation} First we prove that cells of types (1)--(2)--(3), pushed forward by $\Gamma$, are combinatorially glued face-to-face (i.e.~an analogue of Theorem \ref{thm:nofaces} holds). The proof exactly shadows that of Theorem \ref{thm:nofaces} (lifting to the cover $\psi_{\mu\nu}$), except that when $\mu>1$ (resp. $\nu>1$), we must check that faces of type (2)--(3) also fit together correctly. Assume $\nu>1$: the ``first'' tetrahedron (of type (2) in the list), corresponding to $\{A,B\}=\{\frac{0}{1},\frac{1}{1}\}$, is spanned (up to action of $\Gamma$) by the images under $\iota$ of $$\left (\frac{0}{q\mu},\frac{0}{q\nu}\right ), \left (\frac{1}{q\mu}2\pi, \frac{p}{q\nu}2\pi \right ), \left (\frac{1}{q\mu}2\pi, \frac{p-q}{q\nu}2\pi \right ), \left (\frac{2}{q\mu}2\pi, \frac{2p-q}{q\nu}2\pi\right )~. $$ The subfaces obtained by dropping the second or third of these four vertices also belong to faces of type (1) (with $\{A,B\}=\{\frac{0}{1},\frac{1}{2}\}$ or $\{\frac{1}{2},\frac{1}{1}\}$), by the argument of the proof of Theorem \ref{thm:nofaces}. The face obtained by dropping the last vertex is clearly a face of the $\nu$-antiprism of type (3). The face obtained by dropping the first vertex is clearly a face of that same antiprism, shifted by $(\frac{1}{q\mu},\frac{p}{q\nu})\in\Gamma$. The antiprism and its shift, finally, are glued base-to-base along $\iota\left \{(\frac{1}{q\mu}2\pi,\frac{p+kq}{q\nu}2\pi)~|~0\leq k<\nu\right \}$. A similar argument holds when $\mu>1$ near the ``end'' of the sequence of tetrahedra: again, this just amounts to swapping $Q$ and $\infty$. Next, we proceed to show that the candidate faces of types (1)--(2)--(3) are indeed faces of the convex hull of $\iota(\Gamma)$. \subsection{Faces of type (3)} The vertices $\left ( \!\! \begin{array}{c} 1 \\ 0 \\ \cos \frac{2k\pi}{\nu} \\ \sin \frac{2k\pi}{\nu} \end{array} \!\! \right )_{\!0\leq k<\nu}$ and $\left ( \!\! \begin{array}{c} \cos \frac{2\pi}{q\mu} \\ \sin \frac{2\pi}{q\mu} \\ \cos \frac{2\pi (p+kq)}{q\nu} \\ \sin \frac{2\pi (p+kq)}{q\nu} \end{array} \!\! \right )_{\!0\leq k<\nu}$ form two regular $\nu$-gons contained in \emph{distinct} planes parallel to $\{(0,0)\}\times \mathbb{R}^2$, and are not translates of each other (they are off by a rotation of angle $2\pi \frac{p}{q\nu} \notin \frac{2\pi}{\nu}\mathbb{Z}$): this shows that they are the vertices of a convex, non--degenerate antiprism. Moreover, these $2\nu$ vertices clearly maximize the linear form $\rho=(\cos \frac{\pi}{q\mu},\sin\frac{\pi}{q\mu},0,0)$ (that is a purely 2-dimensional statement) and therefore span a face of the convex hull of $\iota(\Gamma)$. Similarly, the vertices of the other antiprism maximize $\rho'=(0,0,\cos\frac{\pi}{q\nu},\sin\frac{\pi}{q\nu})$. \subsection{Faces of type (1) and (2)} Let $\{A,B\}=\{\frac{\alpha}{a},\frac{\beta}{b}\}$ be as in type (1) or (2); the candidate face now is spanned by the column vectors of $$M:=\left ( \begin{array}{cccc} 1&\cos\frac{a}{\mu q}2\pi&\cos\frac{b}{\mu q}2\pi&\cos\frac{a+b}{\mu q}2\pi \\ 0&\sin\frac{a}{\mu q}2\pi&\sin\frac{b}{\mu q}2\pi&\sin\frac{a+b}{\mu q}2\pi \\ 1&\cos\frac{ap-\alpha q}{\nu q}2\pi&\cos\frac{bp-\alpha q}{\nu q}2\pi& \cos\frac{(a+b)p-(\alpha+\beta)q}{\nu q}2\pi \\ 0&\sin\frac{ap-\alpha q}{\nu q}2\pi&\sin\frac{bp-\alpha q}{\nu q}2\pi& \sin\frac{(a+b)p-(\alpha+\beta)q}{\nu q}2\pi \end{array}\right )~.$$ We now transpose the argument of Section \ref{sec:proof}. Generally speaking, the presence of $\mu,\nu\geq 1$ makes \emph{even more true} any given inequality that we have to check, but we must check it also for the extra tetrahedra of type (2): hence some additional care. \subsection*{Candidate faces are non-degenerate} Rotating the first two coordinates by $\frac{-a-b}{\mu q}\pi$ and the last two by $\frac{-(a+b)p+(\alpha+\beta)q}{\nu q}\pi=\frac{-(ap-\alpha q)-(bp-\beta q)}{\nu q}\pi$, using the method of Section \ref{sec:nondeg}, and replacing $\frac{(ap-\alpha q)\pm(bp-\beta q)}{\nu q}$ with $\frac{a'\mp b'}{\nu q}\cdot\sigma(ap-\alpha q)$, compute $$\begin{array}{rcl} \det M &=& \pm 4 \left | \begin{array}{cc} \cos \frac{a+b}{\mu q}\pi& \cos \frac{a-b}{\mu q}\pi \\ \cos \frac{a'-b'}{\nu q} \pi & \cos \frac{a'+b'}{\nu q} \pi \end{array} \right | \cdot \left | \begin{array}{cc} \sin \frac{a-b}{\mu q}\pi& \sin \frac{a+b}{\mu q}\pi \\ \sin \frac{a'+b'}{\nu q} \pi & \sin \frac{a'-b'}{\nu q} \pi \end{array} \right |\\ &=&\pm 16( \cos\frac{a\pi }{\mu q} \cos\frac{b\pi }{\mu q} \sin\frac{a'\pi}{\nu q} \sin\frac{b'\pi}{\nu q} + \sin\frac{a\pi}{\mu q} \sin\frac{b\pi}{\mu q} \cos\frac{a'\pi}{\nu q}\cos\frac{b'\pi}{\nu q}) \\ && \cdot \,( \sin\frac{a\pi}{\mu q} \cos\frac{b\pi}{\mu q} \sin\frac{b'\pi}{\nu q} \cos\frac{a'\pi}{\nu q} + \sin\frac{b\pi}{\mu q} \cos\frac{a\pi}{\mu q} \sin\frac{a'\pi}{\nu q} \cos\frac{b'\pi}{\nu q})~. \end{array}$$ To follow up the method of Section \ref{sec:nondeg}, we would divide both factors of $\det M$ by $$\textstyle{H:=\cos \frac{a\pi}{\mu q}\cos \frac{b\pi}{\mu q}\cos \frac{a'\pi}{\nu q}\cos \frac{b'\pi}{\nu q}}~:$$ however, that number can be $0$. In that case, each factor of $\det M$ has a vanishing summand. Let us prove that the other summand is then nonzero, so that $\det M\neq 0$. (Note that the \emph{sines} in $\det M$ never vanish, only the \emph{cosines} may.) If $\cos\frac{a\pi}{\mu q}=0$, then $\mu=1$ and $a=\frac{q}{2}$. This implies $\nu>1$ by Assumption \ref{ass:qumunu}, so the first factor of $\det M$ has a nonzero second summand. Moreover, the second factor of $\det M$ has a nonzero first summand unless $\cos \frac{b\pi}{\mu q}=0$ i.e.~$b=\frac{q}{2}=a$. But $a,b$ are coprime, so we then have $a=b=1$ and $q=2$ and $\frac{p}{q}=\frac{1}{2}$, which is ruled out when $\mu=1$ (Assumption \ref{ass:qumunu}). If another factor of $H$ vanishes, the argument is similar up to switching $(a,a')$ with $(b,b')$, and/or $(a,b,\mu)$ with $(a',b',\nu)$. In any case, $M$ is invertible. On the other hand, if $H\neq 0$, we must make sure that \begin{equation} \label{eq:munutan} \textstyle{\tan\frac{a'\pi}{\nu q} \tan\frac{b'\pi}{\nu q} \neq - \tan\frac{a\pi}{\mu q} \tan\frac{b\pi}{\mu q}} ~\text{ ; }~ \textstyle{\tan\frac{a\pi}{\mu q} \tan\frac{b'\pi}{\nu q} \neq -\tan\frac{b\pi}{\mu q} \tan\frac{a'\pi}{\nu q}}~. \end{equation} If $\mu>1$ and $\nu>1$, all tangents in (\ref{eq:munutan}) are positive, so (\ref{eq:munutan}) holds. Suppose $\mu=1<\nu$. Then at most one of $a',b'$ is equal to $1$ (Observation \ref{obs:whoisone}). If $a,b<\frac{q}{2}$, the members in (\ref{eq:munutan}) have opposite signs. If $a>\frac{q}{2}$, since $a'b+b'a=q$, we have $b'=1$ which implies $a'>1$ and $a=q-a'b$. Thus, (\ref{eq:munutan}) becomes $$\textstyle{\tan\frac{a'\pi}{\nu q} \tan\frac{\pi}{\nu q} \neq \tan\frac{a'b\pi}{q} \tan\frac{b\pi}{q}~\text{ ; }~\left . \tan\frac{a'b\pi}{q} \right / \tan\frac{b\pi}{q} \neq \left . \tan\frac{a'\pi}{\nu q} \right / \tan\frac{\pi}{\nu q}~:}$$ in the first inequality, even if $b=1$, the right member is larger because $\nu>1$. In the second inequality, even if $b=1$, the method of Section \ref{sec:nondeg} shows that the left member is larger because $\nu>1$ and $a'>1$. If $b>\frac{q}{2}$, the argument is the same, exchanging $(a,a')$ with $(b,b')$. Finally, if $\nu=1<\mu$, the argument is again the same, switching $(a,b,\mu)$ with $(a',b',\nu)$. Therefore, the matrix $M$ is invertible and the candidate face is non-degenerate. \subsection*{Candidate faces are faces of the convex hull} Let us now prove that if a linear form $\rho=(U,U',V,V')$ takes the same value $Z>0$ on each column vector of $M$, then $\rho\circ\iota$ achieves its maximum on $\mathbb{T}$ at $c$ and $|V''-U''|<Z<V''+U''$, where $U''=\sqrt{U^2+U'^2}$ and $V''=\sqrt{V^2+V'^2}$ (by the argument after Claim \ref{cla:key}, this will show that the candidate face is a face of the convex hull). An elementary computation shows that \begin{equation*} \left \{ \begin{array}{rcl} \rho&=& \left (\begin{array}{r} -\cos \frac{a+b}{\mu q} \pi \sin \frac{ap-\alpha q}{\nu q}\pi \sin \frac{bp-\beta q}{\nu q} \pi \\ -\sin \frac{a+b}{\mu q} \pi \sin \frac{ap-\alpha q}{\nu q}\pi \sin \frac{bp-\beta q}{\nu q}\pi \\ \cos \frac{(ap-\alpha q)+(bp-\beta q)}{\nu q}\pi \sin \frac{a}{\mu q}\pi \sin \frac{b}{\mu q}\pi \\ \sin \frac{(ap-\alpha q)+(bp-\beta q)}{\nu q}\pi \sin \frac{a}{\mu q}\pi \sin \frac{b}{\mu q}\pi \end{array} \right )^t =:\left (\begin{array}{l} U\\ U'\\ V\\ V'\end{array} \right )^t\\ &&\\ Z&=& \cos \frac{(ap-\alpha q)+(bp-\beta q)}{\nu q}\pi \sin \frac{a \pi}{\mu q} \sin \frac{b \pi}{\mu q} -\cos \frac{a+b}{\mu q} \pi \sin \frac{ap-\alpha q}{\nu q}\pi \sin \frac{bp -\beta q}{\nu q}\pi \\ &=& \frac{1}{2} \left ( \cos \frac{x'\pi}{\nu q} \cos \frac{y\pi}{\mu q} -\cos \frac{x\pi}{\mu q} \cos \frac{y'\pi}{\nu q} \right ) \end{array} \right . \end{equation*} will do (the second expression of $Z$ follows from the first one and from the fact that $(ap-\alpha q)(bp-\beta q)<0$ --- again, the sign of $Z$ remains to be checked). First, $$\begin{array}{rcl}\rho\circ\iota(c)&=&-\sin\frac{ap-\alpha q}{\nu q}\pi\sin\frac{bp-\beta q}{\nu q}\pi+\sin\frac{a}{\mu q}\pi\sin\frac{b}{\mu q}\pi \\ &=&\sin\frac{a'}{\nu q}\pi\sin\frac{b'}{\nu q}\pi+\sin\frac{a}{\mu q}\pi\sin\frac{b}{\mu q}\pi=U''+V''\end{array}$$ (again because $(ap-\alpha q)(bp-\beta q)<0$), so $\underset{\mathbb{T}}{\max}(\rho\circ\iota)=\rho\circ\iota(c)$. The upper bound $U''+V''$ for $Z$ is clear from its first expression; the lower bound follows lines similar to the proof of Claim \ref{cla:key}: we just need to check $$\textstyle{2Z=\cos \frac{x'\pi}{\nu q} \cos\frac{y\pi}{\mu q} - \cos \frac{x\pi}{\mu q} \cos\frac{y'\pi}{\nu q}>2 \left | \sin \frac{a\pi}{\mu q} \sin \frac{b\pi}{\mu q}- \sin \frac{a'\pi}{\nu q} \sin \frac{b'\pi}{\nu q} \right |~.}$$ The right member being $|(\cos\frac{x'}{\nu q}\pi-\cos\frac{y'}{\nu q}\pi )-(\cos\frac{y}{\mu q}\pi-\cos\frac{x}{\mu q}\pi)|$, we only need $$\textstyle{ (\cos \frac{x'}{\nu q}\pi\pm 1 )\cdot (\cos \frac{y}{\mu q}\pi \mp 1 )~>~ (\cos \frac{x}{\mu q}\pi \mp 1 )\cdot (\cos \frac{y'}{\nu q}\pi\pm 1 )}$$ which amounts to \begin{equation}\label{eq:sineratio2} { \frac{\sin \frac{x'}{\nu q} \cdot \frac{\pi}{2}} {\sin \frac{y'}{\nu q}\cdot\frac{\pi}{2}} < \frac{\sin \frac{\mu q - x}{\mu q}\cdot \frac{\pi}{2}} {\sin \frac{\mu q - y}{\mu q}\cdot \frac{\pi}{2}} \hspace{6pt}(i)\hspace{8pt}\text{ and }~ \frac{\sin \frac{y}{\mu q} \cdot \frac{\pi}{2}} {\sin \frac{x}{\mu q} \cdot\frac{\pi}{2}} < \frac{\sin \frac{\nu q-y'}{\nu q}\cdot\frac{\pi}{2}} {\sin \frac{\nu q-x'}{\nu q}\cdot\frac{\pi}{2}} \hspace{6pt}(ii)~.} \end{equation} Let us focus on (\ref{eq:sineratio2})-$(i)$. By Proposition \ref{prop:sines}, it is enough to check $0<x'<y'< \nu q$ and $0<y<x<\mu q$ (which are clear from Proposition \ref{prop:abab}: indeed, by Observation \ref{obs:whoisone}, we \emph{may} have $y'=q$ but then $\nu>1$; we \emph{may} have $x=q$ but then $\mu>1$), plus \begin{equation}\label{eq:enfin2} \textstyle { \frac{x'}{\nu} < q-\frac{x}{\mu} ~\text{ and }~ \frac{x'}{y'}\leq \frac{\mu q-x}{\mu q-y}~.}\end{equation} The first inequality of (\ref{eq:enfin2}) can be written $\frac{|a'-b'|}{\nu}+\frac{a+b}{\mu}<a'b+b'a$, or equivalently, $$\textstyle{(a'-\frac{1}{\mu}) \cdot (b\pm \frac{1}{\nu})+ (b'-\frac{1}{\mu})\cdot (a\mp \frac{1}{\nu})>0~.}$$ If $\mu,\nu>1$ this is obvious. If $\mu=1<\nu$, then at least one of $a',b'$ is larger than $1$ (Observation \ref{obs:whoisone}), and the product where it appears is positive: done. If $\nu=1<\mu$, then at least one of $a,b$ is larger than one and we are also done. The second inequality of (\ref{eq:enfin2}) can be written $\mu (y'-x')\geq \frac{(a+b)(a'+b')-|(a-b)(a'-b')|}{a'b+b'a}$. As in the proof of Claim \ref{cla:key}, the left member is $2\mu\inf\{a',b'\}\geq 2$ while the right member is at most $2$. The proof of (\ref{eq:sineratio2})-$(ii)$ is identical to that of (\ref{eq:sineratio2})-$(i)$, swapping $(a,b,x,y)$ with $(a',b',y',x')$. \begin{flushright} Laboratoire Paul Painlev\'e (UMR 8524) \\ CNRS -- Universit\'e de Lille I \\ 59 655 Villeneuve d'Ascq C\'edex, France \\ \url{[email protected]} \end{flushright} \end{document}
\begin{document} \title[Isometric and anti-isometric classes of timelike minimal surfaces]{Isometric and anti-isometric classes of timelike minimal surfaces in Lorentz--Minkowski space} \author[S. Akamine]{Shintaro Akamine} \address[Shintaro Akamine]{ Department of Liberal Arts, College of Bioresource Sciences, Nihon University, 1866 Kameino, Fujisawa, Kanagawa, 252-0880, Japan} \email{[email protected]} \keywords{Lorentz-Minkowski space, timelike minimal surface, symmetry, isometric class, anti-isometric class.} \subjclass[2010]{Primary 53A10; Secondary 53B30, 57R45.} \thanks{ This work was partially supported by JSPS KAKENHI Grant Numbers 19K14527 and 23K12979. } \begin{abstract} Isometric class of minimal surfaces in the Euclidean $3$-space $\mathbb{R}^3$ has the rigidity: if two simply connected minimal surfaces are isometric, then one of them is congruent to a surface in the specific one-parameter family, called the associated family, of the other. On the other hand, the situation for surfaces with Lorentzian metrics is different. In this paper, we show that there exist two timelike minimal surfaces in the Lorentz-Minkowski $3$-space $\mathbb{R}^3_1$ that are isometric each other but one of which does not belong to the congruent class of the associated family of the other. We also prove a rigidity theorem for isometric and anti-isometric classes of timelike minimal surfaces under the assumption that surfaces have no flat points. Moreover, we show how symmetries of such surfaces propagate for various deformations including isometric and anti-isometric deformations. In particular, some conservation laws of symmetry for Goursat transformations are discussed. \end{abstract} \maketitle \section{Introduction} Surfaces which admit a one-parameter family of isometric deformations preserving the mean curvature are called {\it Bonnet surfaces}. Due to Bonnet \cite{Bonnet}, it is known that any constant mean curvature surfaces in the Euclidean space $\mathbb{R}^3$ which is not totally umbilic is a Bonnet surface. For the case of minimal surfaces in $\mathbb{R}^3$, each minimal surface has a one-parameter family of isometric minimal surfaces, called the {\it associated family}. Furthermore, the following rigidity theorem was shown by Schwarz \cite[p.175]{Schwarz}: \begin{fact}\label{thm:Schwarz} If two simply connected minimal surfaces in $\mathbb{R}^3$ are isometric, then one of them is congruent to a surface in the associated family of the other. \end{fact} However, the situation in the case of surfaces with Lorentzian metrics is different as follows. \begin{itemize} \item Not only isometric deformations but also anti-isometric deformations, which reverse the first fundamental form of the original surface can be considered. \item The shape operator is not necessarily diagonalizable and hence a specific point, the so-called a {\it quasi-umbilic point}, on which the shape operator is non-diagonalizable can appear on such a surface (see Section 2.2). Moreover umbilic and quasi-umbilic points are not isolated in general even for the case of minimal surfaces. \end{itemize} In this paper, we consider {\it timelike minimal surfaces} in the Lorentz-Minkowski 3-space $\mathbb{R}^3_1$ with signature $(-,+,+)$, which are surfaces whose induced metric from $\mathbb{R}^3_1$ is Lorentzian and whose mean curvature vanishes identically. The following is the first main theorem. \begin{theor}\label{Theorem1_Intro} The following statement holds. \begin{itemize} \item[(1)] There exist two flat timelike minimal surfaces in $\mathbb{R}^3_1$ which are isometric each other but one of which does not belong to the congruent class of the associated family of the other. \end{itemize} Let $f_1$ and $f_2$ be simply connected timelike minimal surfaces in $\mathbb{R}^3_1$ without flat points. Furthermore, the following statements hold. \begin{itemize} \item[(2)] If $f_1$ and $f_2$ are isometric, then $f_1$ is congruent to a surface in the associated family $\{(f_2)_\theta\}_{\theta \in \mathbb{R}}$ of $f_2$. \item[(3)] If $f_1$ and $f_2$ are anti-isometric, then $f_1$ is congruent to a surface in the associated family $\{(\hat{f_2})_\theta\}_{\theta \in \mathbb{R}}$ of the conjugate surface $\hat{f_2}$. \end{itemize} \end{theor} The statement (1) gives a counterexample to show that the same assertion as in Fact \ref{thm:Schwarz} for timelike minimal surfaces does not hold. The statements (2) and (3) show rigidities for isometric and anti-isometric classes of timelike minimal surfaces under the assumption that surfaces have no flat points, where a flat point is a point on which the Gaussian curvature $K$ vanishes. We remark that flat points of a timelike minimal surface consist of umbilic and quasi-umbilic points. The definitions of the associated family and the conjugate surface of a timelike minimal surface will be given in Definition \ref{def:associated}. In the second half of the paper, we consider symmetry of timelike minimal surfaces. We show how symmetries of such surfaces propagate under various deformations including the above isometric and anti-isometric deformations. Each conformal timelike minimal surface $f\colon M \to \mathbb{R}^3_1$ from a (simply connected) Lorentz surface $M$ into $\mathbb{R}^3_1$ is realized as the real part of a paraholomorphic null curve $\Phi\colon M \to \mathbb{C}'^3$ into the paracomplex $3$-space $\mathbb{C}'^3$, that is, $f=\operatorname{Re}{\Phi}$. More specifically, a Weierstrass type representation formula was given by Konderak \cite{Konderak}, see Fact \ref{fact:Weierstrass1} for more details. Since the conformality is preserved under transformations of the form $f_A:=\operatorname{Re}{A\Phi}$, which is called the {\it Goursat transformation} of $f$ (see \cite{G}), for a matrix $A$ in the paracomplex conformal group \[ \mathrm{CO}(1,2; \mathbb{C}') = \{ A\in \mathrm{M}(3,\mathbb{C}') \mid {}^t\! AI_{1,2}A=cI_{1,2},\ c\in \mathbb{C}',\ c\bar{c}\neq 0 \}, \] where ${}^t\! A$ is the transposed matrix of $A$ and $I_{1,2}=\text{diag}(-1,1,1)$. A symmetry $g$ of $f$ is an isometry of the Lorentz surface $M$ satisfying $f\circ g = Of +t $ for some $O$ in the indefinite orthogonal group $O(1,2)$ of $\mathbb{R}^3_1$ and a vector $t\in \mathbb{R}^3_1$, and we call $O$ the {\it linear part} of $g$. The set of such symmetries is denoted by $S_f(M)$ and is often referred to as the {\it space group} (see Definition \ref{def:space group}). In the above setting, we give the following conservation law of symmetry for Goursat transformations. \begin{theor}\label{thm:symmetry_Goursat_Introduction} Let $f\colon M\to \mathbb{R}^3_1$ be a simply connected timelike minimal surface, $f_A$ be its Goursat transformation for $A\in \mathrm{CO}(1,2; \mathbb{C}')$ and $g\in S_f(M)$ with the linear part $O$. Then the following statements hold. \begin{itemize} \item[(1)] When $g$ is orientation preserving, there exists $\widetilde{O}\in \mathrm{O}(1,2)$ such that $AO=\widetilde{O}A$ if and only if $g\in S_{f_A}(M)$ and its linear part is $\widetilde{O}$. \item[(2)] When $g$ is orientation reversing, there exists $\widetilde{O}\in \mathrm{O}(1,2)$ such that $AO=\widetilde{O}\bar{A}$ if and only if $g\in S_{f_A}(M)$ and its linear part is $\widetilde{O}$. \end{itemize} \end{theor} Symmetry conservation under deformations of minimal surfaces has often been discussed. A well-known classical symmetry correspondence is the fact that the line symmetry with respect to a straight line on a minimal surface in $\mathbb{R}^3$ (or a timelike minimal surface in $\mathbb{R}^3_1$) corresponds to the planar symmetry of the conjugate surface with respect to a plane orthogonal to the line, see \cite{DHS, Karcher, KKSY}, for example. Similarly, as discussed by Kim, Koh, Shin and Yang \cite{KKSY}, there is also a symmetry correspondence between shrinking singularity (also called conelike singularity) and folding singularity on timelike minimal surfaces in $\mathbb{R}^3_1$. All of these symmetries are due to the reflection principle, which are derived from orientation reversing isometries of the form $g(z)=\bar{z}$. Hence, these symmetry relations are obtained by considering the Goursat transformation $f_J$ for $J=jI_3$ in Theorem \ref{thm:symmetry_Goursat_Introduction}, where $j$ is the imaginary unit of $\mathbb{C}'$ and $I_3$ is the identity matrix. Furthermore, by considering the Goursat transformation $f_D$ by a special matrix $D$ in Section \ref{sec:duality}, these symmetries about lines, planes, shrinking singularities, and folding singularities can be unified in Corollary \ref{cor:quadruple}. See Example \ref{example:Enneper} for a concrete example and Figure \ref{Fig:Intro}. For translation symmetry, Meeks \cite{Meeks} showed a necessary and sufficient condition for the conjugate minimal surface in the Euclidean space to have a translation symmetry. Leschke and Moriya \cite{LM} also revealed results on the conservation of translation symmetry of simple factor dressing of minimal surfaces in $\mathbb{R}^3$ and $\mathbb{R}^4$, which is also a special kind of Goursat transformations. Since translation symmetry corresponds to the case where $O=I_3$ in Theorem \ref{thm:symmetry_Goursat_Introduction}, we also obtain a conservation law of translation symmetry in Corollary \ref{cor:translation}. Similarly, as an application of Theorem \ref{thm:symmetry_Goursat_Introduction}, by considering specific matrices in $\mathrm{CO}(1,2; \mathbb{C}')$ as \[ J(\theta)=\begin{pmatrix} \phoro{e}^{j\theta} & 0& 0 \\ 0 & \phoro{e}^{j\theta} & 0 \\ 0 & 0 & \phoro{e}^{j\theta} \end{pmatrix},\quad \hat{J}(\theta)=\begin{pmatrix} j\phoro{e}^{j\theta} & 0& 0 \\ 0 & j\phoro{e}^{j\theta} & 0 \\ 0 & 0 & j\phoro{e}^{j\theta} \end{pmatrix}\ \text{ and }\ A(\lambda)=\begin{pmatrix} \frac{\lambda+\frac{1}{\lambda}}{2} & j\frac{\lambda-\frac{1}{\lambda}}{2} & 0 \\ j\frac{\lambda-\frac{1}{\lambda}}{2} & \frac{\lambda+\frac{1}{\lambda}}{2} & 0 \\ 0 & 0 & 1 \end{pmatrix} \] where $\theta\in \mathbb{R}$ and $\lambda >0$, we obtain symmetry relations for the isometric deformation $\{f_\theta\}=\{f_{J(\theta)}\}$, the anti-isometric deformation $\{\hat{f}_\theta\}=\{f_{\hat{J}(\theta)}\}$ in Corollary \ref{cor:Spacegroup}, which is the Lorentzian counter part of the result for minimal surfaces by Meeks \cite[Theorem 5.5]{Meeks} and for the deformations $\{f_{A(\lambda)}\}$ called the {\it L\'opez-Ros} deformations discussed in Section 4.3. \begin{figure} \caption{Symmetries under Goursat transformations are related each others: two orientation preserving translation symmetries degenerating in one direction on the initial surface $f$ (the elliptic catenoid) are preserved as discussed in Corollary \ref{cor:translation} \label{Fig:Intro} \end{figure} This article is organized as follows: In Section 2, we describe some notions of paracomplex analysis and timelike minimal surfaces. In Section 3, we investigate isometric and anti-isometric classes of timelike minimal surfaces. The proof of Theorem \ref{Theorem1_Intro} is given in two separate parts: the proofs of Proposition \ref{prop:counter} and Theorem \ref{thm:timelikeSchwarz}. Regarding (1) of Theorem \ref{Theorem1_Intro}, we also determine all flat timelike minimal surfaces in Proposition \ref{FlatMinimal_rev}. In Section 4, we give a proof of Theorem \ref{thm:symmetry_Goursat_Introduction} and several applications of it for well-known import deformations and transformations. Finally, in Section \ref{sec:Ex} we describe relationships between the symmetries of various concrete examples in terms of Goursat transformations. \section{Preliminary} First, we briefly recall the theories of paracomplex analysis and timelike minimal surfaces. For a more detailed introduction, we refer the readers to works such as \cite{A,ACO,IT,Konderak,W} and their references. \subsection{Paracomplex analysis} A {\it paracomplex number} (or {\it split-complex number}) is a number $z$ of the form $z=x+jy$, where $x,y\in \mathbb{R}$ and $j$ is the imaginary unit satisfying $j^2=1$. We denote the set of such paracomplex numbers by $\mathbb{C}'$ and refer it as the {\it paracomplex plane}. Just as for complex numbers, $\mathbb{C}'$ forms an algebra over $\mathbb{R}$ and one can define the notions of \begin{itemize} \item the \emph{real part} $\operatorname{Re} z := x$ and the \emph{imaginary part} $\operatorname{Im} z := y$ of $z=x+jy$, \item the {\it conjugate} $\bar{z}$ of $z=x+jy$ as $\bar{z}:=x-jy$, and \item the squared modulus of $z$ defined by $|z|^2 := z\bar{z}=x^2-y^2$. \end{itemize} It should be remarked that it is possible for the relation $|z|^2<0$ to hold, and the relation $|jz|^2=-|z|^2$ holds. Given a paracomplex function $\phoro{f} : \Sigma \subset \mathbb{C}' \to \mathbb{C}'$ where $\Sigma$ is a simply-connected domain, we call $\phoro{f}$ \emph{paraholomorphic} if $\phoro{f}$ satisfies \begin{equation}\label{eqn:cauchyRiemann} \phoro{f}_{\bar{z}} = \partial_{\bar{z}} \phoro{f} = 0, \end{equation} where $\partial_z := \frac{1}{2}\left(\partial_x + j \partial_y \right)$ and $\partial_{\bar{z}} := \frac{1}{2}\left(\partial_x - j \partial_y \right)$ are the paracomplex Wirtinger derivatives. Furthermore, we call a function $\phoro{f} : \Sigma \to \mathbb{C}'$ \emph{parameromorphic} if it is $\mathbb{C}'$-valued on an open dense subset of $\Sigma$ and for arbitrary $p \in \Sigma$, there exists a paraholomorphic function $\phoro{g}$ such that $\phoro{fg}$ is paraholomorphic near $p$. We recall some elementary paracomplex analytic functions used in this paper. The exponential function $\phoro{e}^z$ is defined by \[ \phoro{e}^z := \sum_{n = 0}^{\infty} \frac{z^n}{n!}. \] We can see the paracomplex version of Euler's formula \[ \phoro{e}^{j \theta} = \cosh{\theta} + j \,\sinh{\theta},\quad \theta \in \mathbb{R}. \] By using this function, each element of the hyperbola $\{z\in \mathbb{C}' \mid |z|^2 =1\}$ or $\{z\in \mathbb{C}' \mid |z|^2 =-1\}$ is written as $z=\pm \phoro{e}^{j \theta}$ or $z=\pm j\phoro{e}^{j \theta}$ for some $\theta$, respectively, which will play an important role of the proof of Theorem \ref{thm:timelikeSchwarz}. The paracomplex circular functions are also defined via analytic continuation from the real counterparts as follows. \[ \operatorname{\phoro{cos}} z := \sum_{n = 0}^{\infty} (-1)^n \frac{z^{2n}}{(2n)!}, \quad \operatorname{\phoro{sin}} z := \sum_{n = 0}^{\infty} (-1)^n \frac{z^{2n + 1}}{(2n + 1)!},\quad \operatorname{\phoro{tan}} z := \frac{\operatorname{\phoro{sin}} z}{\operatorname{\phoro{cos}} z}. \] The functions $\operatorname{\phoro{cos}} z$ and $\operatorname{\phoro{sin}} z$ are paraholomorphic on $\mathbb{C}'$ satisfying the relations \[ \operatorname{\phoro{cos}}{z}= \cos{x}\cos{y}-j\sin{x}\sin{y}, \quad \operatorname{\phoro{sin}}{z}=\sin{x}\cos{y}+j\cos{x}\sin{y}, \] where $z=x+jy$. The function $\operatorname{\phoro{tan}}{z}$ is parameromorphic on $\mathbb{C}'$. \subsection{Timelike minimal surfaces and their symmetry groups} Let $\mathbb{R}^3_1$ be the Lorentz-Minkowski 3-space with the indefinite inner product \[ \langle \prescript{t\!}{}(x_1,x_2,x_3), \prescript{t\!}{}(y_1,y_2, y_3)\rangle = -x_1y_1+x_2y_2+x_3y_3. \] A surface $f\colon M \to \mathbb{R}^3_1$ from a $2$-dimensional manifold $M$ to $\mathbb{R}^3_1$ is said to be {\it timelike} if its first fundamental form $\mathrm{I}_f=\langle df, df\rangle$ is Lorentzian. The Gaussian curvature $K$ and the mean curvature $H$ of a timelike surface $f$ are defined as follows. \[ K := \det{S},\quad H:=\frac{1}{2}\mathrm{tr}{S}, \] where $S$ is the shape operator of $f$. One of the remarkable properties of timelike surfaces is the diagonalizability of the shape operator. The shape operator $S$ of a timelike surface is not always diagonalizable over $\mathbb{R}$, that is, principal curvatures can be complex numbers. More precisely, there are three possibilities of the diagonalizability of $S$ at each point of a timelike surface in $\mathbb{R}^3_1$ as follows: \begin{itemize} \item[(i)] $S$ is diagonalizable over $\mathbb{R}$. In this case $H^2 - K \geq 0$ with the equality holds on umbilic points. \item[(ii)] $S$ is diagonalizable over $\mathbb{C}\setminus \mathbb{R}$. In this case $H^2 - K < 0$. \item[(iii)] $S$ is non-diagonalizable over $\mathbb{C}$. In this case $H^2 - K = 0$. Each point satisfying this condition is called {\it quasi-umbilic} (see \cite{Clelland}). \end{itemize} In this paper, we discuss {\it timelime minimal surfaces}, which are timelike surfaces with $H=0$. Hence, the above relations show that the diagonalizability of $S$ is directly related to the sign of $K$, and there is no restriction of the sign of $K$ for timelike minimal surfaces. This is quite different from the situation where $K\leq 0$ holds for minimal surfaces in $\mathbb{R}^3$ and $K\geq 0$ holds for maximal surfaces in $\mathbb{R}^3_1$. By using paracomplex analysis, timelike minimal surfaces admit a Weierstrass type representation \cite{Konderak} on Lorentz surfaces (see also \cite{A} for surfaces with singularities): \begin{fact}\label{fact:Weierstrass1} Any timelike minimal surface $f\colon M \to \mathbb{R}^3_1$ can be represented as \begin{equation}\label{eq:pW} f(z) = \operatorname{Re}\int \prescript{t\!}{}(-(1 + h^2),j(1 - h^2), 2h)\eta \end{equation} over a simply-connected Lorentz surface $M$ on which $h$ is a parameromorphic, while $\eta$ and $h^2\eta$ are paraholomorphic. Furthermore, the induced metric of the surface becomes \begin{equation}\label{eqn:wConformal} \mathrm{I}_{f} = -(1 - |h|^2)^2 |\eta |^2. \end{equation} We call $(h, \eta)$ the \emph{Weierstrass data} of the timelike minimal surface $f$. \end{fact} \begin{remark}\label{remark:Gaussmap} The parameromorphic function $h$ is identified with the Gauss map of $f$ as follows. Let $S^2_1=\{\prescript{t\!}{}(x_1,x_2,x_3)\in \mathbb{R}^3_1 \mid -x_1^2+x_2^2+x_3^2=1\}$ be the unit pseudosphere and $H^1=\{\prescript{t\!}{}(x_1,x_2)\in \mathbb{R}^2_1 \mid -x_1^2+x_2^2=-1\}$ be the hyperbola on the $x_1x_2$-plane, identified with the Minkowski plane $\mathbb{R}^2_1$. We consider the following stereographic projection $\mathcal{P}$ with respect to the point $(0,0,1)$ \[ \mathcal{P}(\bm{x}) = \prescript{t\!}{}{\left(\frac{x_1}{1-x_3}, \frac{x_2}{1-x_3}\right)},\quad \bm{x}=\prescript{t\!}{}(x_1,x_2,x_3) \] from $S^2_1\setminus \{x_3\neq 1\}$ to $\mathbb{R}^2_1\setminus H^1$. Since we can take a unit normal vector field $\nu$ of \eqref{eq:pW} as \[ \nu=\frac{1}{1-|h|^2}\prescript{t\!}{}{\left(2\operatorname{Re} h,2\operatorname{Im} h, 1+|h|^2\right)}, \] we have the relation $\mathcal{P}\circ \nu = h$ where we identify the $x_1x_2$-plane with $\mathbb{C}'$. \end{remark} Similar to the minimal surfaces and maximal surfaces cases, timelike minimal surfaces also admit associated families and conjugate surfaces as follows. \begin{definition}[Associated family and conjugate surface]\label{def:associated} Let $f$ be a timelike minimal surface written as \eqref{eq:pW} with Weierstrass data $(h,\eta)$, we define the {\it associated family} $\{f_\theta\}_{\theta \in \mathbb{R}}$ consists of the timelike minimal surface $f_\theta$ which is defined by the Weierstrass data $(h,\phoro{e}^{j\theta}\eta)$. We also call the timelike minimal surface $\hat{f}$ defined by the Weierstrass data $(h,j\eta)$ the {\it conjugate surface} of $f$. \end{definition} \begin{remark}\label{rema:asso_family} Different from minimal and maximal surfaces, the conjugate surface $\hat{f}$ is not a member of the associated family $\{f_\theta\}_{\theta \in \mathbb{R}}$. By \eqref{eqn:wConformal} and the relation $|jz|^2=-|z|^2$, each $f_\theta$ is isometric to the original one $f=f_1$ and $\hat{f}$ is anti-isometric to $f$. \end{remark} The Weierstrass-type representation formula \eqref{eq:pW} gives a conformal parametrization for timelike minimal surfaces. In addition, timelike surfaces have the following characterization on {\it null coordinates} $(u,v)$, on which the first fundamental form of the surface is written as $ \mathrm{I}_{f} =\Lambda dudv$ for some function $\Lambda$. \begin{fact}[\cite{McNertney}]\label{Fact:McNertney} If $\varphi(u)$ and $\psi(v)$ are null curves in $\mathbb{R}^3_1$ such that $\varphi'(u)$ and $\psi'(v)$ are linearly independent for all $u$ and $v$, then \begin{equation}\label{null curves decomposition} f(u,v)=\varphi(u)+\psi(v) \end{equation} is a timelike minimal surface. Conversely, any timelike minimal surface can be written locally as the equation \eqref{null curves decomposition} for some two null curves. \end{fact} \begin{remark}\label{rema:asso_family2} We can easily check that the associated family $\{f_\theta\}_{\theta \in \mathbb{R}}$ and the conjugate surface $\hat{f}$ in Definition \ref{def:associated} correspond to deformations of the generating null curves $\varphi$ and $\psi$ in \eqref{null curves decomposition} as follows. \begin{align*}\label{null curves decomposition2} f_\theta(u,v)=e^\theta \varphi(u)+e^{-\theta}\psi(v),\quad \hat{f}(u,v)= \varphi(u)- \psi(v). \end{align*} These surfaces have the relationship $f_\theta =\cosh{\theta}f+\sinh{\theta}\hat{f}$. \end{remark} In Section \ref{sec:Goursat}, we will see that symmetries of timelike minimal surfaces can be controlled under various deformations including the above isometric and anti-isometric deformations. For this purpose, we define the group of symmetries of a timelike minimal surface based on the work \cite{Meeks}. \begin{definition}[Space group]\label{def:space group} Let $f\colon M \to \mathbb{R}^3_1$ be a timelike minimal surface. The {\it space group} $S_f(M)$ of $f$ is the group of isometries of $M$ induced by symmetries of $f(M)$ in $\mathbb{R}^3_1$, which consists of an isometry $g\colon M \to M$ such that $f(g(p))=Of(p)+t$ for a matrix $O\in O(1,2)$, a vector $t\in \mathbb{R}^3_1$ and arbitrary $p\in \mathbb{R}^3_1$. \[ \begin{diagram} \node[2]{M} \arrow[2]{e,b}{f} \arrow[1]{s,l}{g} \node[2]{f(M)\subset \mathbb{R}^3_1} \arrow[1]{s,r}{\text{isometry of $\mathbb{R}^3_1$}} \\ \node[2]{M} \arrow[2]{e,t}{f} \node[2]{f(M)\subset \mathbb{R}^3_1} \end{diagram} \] We call the matrix part $O$ the {\it linear part} of $g\in S_f(M)$. We also denote the orientation preserving subgroup of $S_f(M)$ by $S^\circ_f(M)$, and the orientation reversing elements of $S_f(M)$ by $S^r_f(M)$. \end{definition} \section{Rigidity theorem} As we saw in Remark \ref{rema:asso_family} and Introduction, the associated family gives an isometric deformation and the converse is true as in the sense of Fact \ref{thm:Schwarz}. For the proof of Fact \ref{thm:Schwarz}, the fact that flat points of non-planar minimal surfaces are isolated and real analyticity play an important role, as one can see in \cite[p.275]{Spivak}, for example. By the same reasons, the same result obviously holds for maximal surfaces in $\mathbb{R}^3_1$. However, flat points of timelike minimal surfaces, which are points with $K=0$ consist of umbilics and quasi-umbilics and they are not isolated in general. Hence, we can construct a counterexample to show that the same assertion as in Fact \ref{thm:Schwarz} for timelike minimal surfaces does not hold. \begin{proposition}\label{prop:counter} There exist two timelike minimal surfaces that are isometric each other but one of which does not belong to the congruent class of the associated family of the other. \end{proposition} \begin{proof} Let us construct two flat timelike minimal surfaces that are isometric each other, one of which is a timelike plane and the other is not as follows. \begin{align*} f_1(u,v)&=\varphi_1(u)+{}^t(v,0,v),\quad \varphi_1(u)={}^t(-2,\sqrt{3},1)e^u/2,\\ f_2(u,v)&=\varphi_2(u)+{}^t(v,0,v),\quad \varphi_2(u)=\prescript{t\!}{}{\left(-u-\frac{3}{4}e^u,2\sqrt{3}e^{u/2}, -u+\frac{3}{4}e^u\right)}, \end{align*} where $\varphi_1$ and $\varphi_2$ are null curves. A straightforward calculation shows that the first fundamental forms $\mathrm{I}_{f_1}$ and $\mathrm{I}_{f_2}$ of the surfaces $f_1$ and $f_2$ satisfy \[ \mathrm{I}_{f_1}= \mathrm{I}_{f_2} =3e^ududv, \] and hence two surfaces $f_1$ and $f_2$ are isometric. By definition, the surface $f_1$ is a timelike plane, which is totally umbilic. The surface $f_2$ is totally quasi-umbilic because it has a non-diagonalizable shape operator of the form \[ \begin{pmatrix} 0 & 0 \\ \sqrt{3}e^{-u/2} & 0 \end{pmatrix}. \] Finally, by Remark \ref{rema:asso_family2}, we can see that the associated family of a plane remains plane. Therefore, we obtain the desired result. \end{proof} Proposition \ref{prop:counter} shows that there are many flat timelike minimal surfaces. Here, we determine such flat timelike minimal surfaces. \begin{proposition}\label{FlatMinimal_rev} Any flat timelike minimal surface in $\mathbb{R}^3_1$ is a cylinder whose base curve and director curve are lightlike. \end{proposition} \begin{proof} We consider a local parametrization \eqref{null curves decomposition} on a domain $D$ of the form $(u_0-\varepsilon, u_0+\varepsilon)\times (v_0-\varepsilon, v_0+\varepsilon)$. On $D$, the first and the second fundamental forms can be written as follows. \begin{center} $\mathrm{I}=2\Lambda dudv$\quad and\quad $\mathrm{II}=Qdu^2+Rdv^2$. \end{center} Therefore, the shape operator $S$ and the Gaussian curvature $K$ are \begin{equation}\label{condition1} \hspace{0.1cm} S=\mathrm{I}^{-1}\mathrm{II}= \left(\begin{array}{cc} 0 & \frac{R}{\Lambda} \\ \frac{Q}{\Lambda} & 0 \\ \end{array} \right),\quad K=-\frac{QR}{\Lambda^2}. \end{equation} Here, we remark that the Codazzi equation shows that the coefficients $Q=Q(u)$ and $R=R(v)$ are functions of one variable, see \cite{FI}. When $Q$ and $R$ vanishes identically on $D$, the surface $f$ is totally umbilic, and hence it is a part of a timelike plane. When $Q \not\equiv 0$ or $R \not\equiv 0$ holds on $D$, without loss of generality, we may assume that $Q(u_0) \neq 0$. Since $R$ is a function of one variable and $QR\equiv 0$, we obtain $R \equiv 0$ on $D$. This means that $\psi' /\!/ \psi''$ and hence we can take a parameter $\tilde{v}=\tilde{v}(v)$ and a lightlike vector $\psi_0\in \mathbb{R}^3_1$ such that $\psi = \tilde{v}\psi_0$, which completes the proof. \end{proof} \begin{remark} The quadratic differentials $Qdu^2=\langle f_{uu}, \nu \rangle du^2$ and $Rdv^2=\langle f_{vv},\nu\rangle du^2$ are called the {\it Hopf differentials} of $f$, where $\nu$ is a unit normal vector field of $f$. Fujioka and Inoguchi \cite{FI} showed that any umbilic free timelike (not necessary minimal) surface in a Lorentzian space form satisfying the condition $Q\neq 0, R\equiv 0$ or $Q\equiv 0, R\neq 0$ must be a ruled surface whose base curve and director curve are lightlike, which is called a {\it B-scroll}. For B-scrolls in $\mathbb{R}^3_1$, see also \cite{CDM,Clelland,Graves,IT,McNertney}. \end{remark} By avoiding flat points, the following rigidity theorem for isometric and anti-isometric classes as in Fact \ref{thm:Schwarz} holds. \begin{theorem}\label{thm:timelikeSchwarz} Let $f_1$ and $f_2$ be simply connected timelike minimal surfaces in $\mathbb{R}^3_1$ without flat points and singular points. Then the following statements hold. \begin{itemize} \item[(1)] If $f_1$ and $f_2$ are isometric, then $f_1$ is congruent to a surface in the associated family $\{(f_2)_\theta\}_{\theta \in \mathbb{R}}$ of $f_2$. \item[(2)] If $f_1$ and $f_2$ are anti-isometric, then $f_1$ is congruent to a surface in the associated family $\{(\hat{f_2})_\theta\}_{\theta \in \mathbb{R}}$ of the conjugate surface $\hat{f_2}$. \end{itemize} \end{theorem} To prove the theorem, it should be noted that by making an additional assumption, the following basic property that holds for holomorphic functions also holds for paraholomorphic functions. \begin{lemma}\label{lemma:constant} If a paraholomorphic function $\phoro{f}$ satisfies $|\phoro{f}|^2=c$ for a non-zero constant $c$, then $\phoro{f}$ is a constant function. \end{lemma} This lemma is a direct consequence of the Cauchy-Riemann type equations, see \eqref{eqn:cauchyRiemann}. In the case where $c=0$, in contrast to the case of holomorphic functions, there exists a nonconstant paraholomorphic function $\phoro{f}$ satisfying $| \phoro{f}|^2=0$ such as $\phoro{f}(z)=z(1+j)$. \begin{proof}[{\bf Proof of Theorem $\ref{thm:timelikeSchwarz}$}] As well as surfaces in $\mathbb{R}^3$, the first, second and third fundamental forms $\mathrm{I}_f=\langle df, df \rangle$, $\mathrm{II}_f=-\langle df, d\nu \rangle$ and $\mathrm{III}_f=\langle d\nu, d\nu \rangle$ of a timelike surface $f$ satisfy the relation \[ -K\mathrm{I}_f-2H\mathrm{II}_f+\mathrm{III}_f=0, \] where $K$ and $H$ are the Gaussian curvature and the mean curvature of $f$. The condition $H=0$ implies $\mathrm{III}_f=K\mathrm{I}_f$. When $f_1$ and $f_2$ are isometric (resp. anti-isometric) $\mathrm{I}_{f_1}=\mathrm{I}_{f_2}$ and $K_{f_1}=K_{f_2}$ (resp. $\mathrm{I}_{f_1}=-\mathrm{I}_{f_2}$ and $K_{f_1}=-K_{f_2}$) hold, and hence third fundamental forms of $f_1$ and $f_2$ satisfy $\mathrm{III}_{f_1}=\mathrm{III}_{f_2}$ in both cases (1) and (2). By the definition of the third fundamental form and the assumption $K\neq 0$, it means that unit normal vector fields $\nu_1$ and $\nu_2$ of the surfaces $f_1$ and $f_2$ have the same non-degenerate first fundamental forms $\mathrm{I}_{\nu_1}=\mathrm{I}_{\nu_2}$. On the other hand, the relation $\mathrm{I}_{\nu_1}=\mathrm{I}_{\nu_2}$ and the Weingarten equation for the surfaces $\nu_1$ and $\nu_2$ imply $\mathrm{II}_{\nu_1}=\mathrm{II}_{\nu_2}$. Therefore, by the fundamental theorem of surface theory, we conclude that $\nu_1=\nu_2$ after an isometry of $\mathbb{R}^3_1$. Let $(h_i, \eta_i)$ be the Weierstrass data of the surface $f_i$ $(i=1,2)$. The relation $\nu_1=\nu_2$ shows that $h_1 = h_2$ by Remark \ref{remark:Gaussmap}. Since we are considering regular surfaces, the equation \eqref{eqn:wConformal} implies $|\eta_1|^2=|\eta_2 |^2 \neq 0$ when $f_1$ and $f_2$ are isometric and $|\eta_1 |^2=-|\eta_2|^2 \neq 0$ when $f_1$ and $f_2$ are anti-isometric. Then we obtain the paraholomorphic function $\hat{\eta_2}/\hat{\eta_1}$ satisfying $| \hat{\eta_2}/\hat{\eta_1}|^2=\pm 1$, where $\eta_i=\hat{\eta_i}dz^2$ $(i=1,2)$. Finally, Lemma \ref{lemma:constant} shows that $\hat{\eta_2}/\hat{\eta_1}$ is a constant function with $|\hat{\eta_2}/\hat{\eta_1}|^2=\pm 1$. Hence, there exists a real number $\theta$ such that $ \hat{\eta_2}/\hat{\eta_1} = \pm \phoro{e}^{j\theta}$ when $f_1$ and $f_2$ are isometric or $\hat{\eta_2}/\hat{\eta_1} = \pm j\phoro{e}^{j\theta}$ when $f_1$ and $f_2$ are anti-isometric. In summary, we prove that $(h_2, \eta_2)=(h_1, \pm \phoro{e}^{j\theta}\eta_1)$ or $(h_2, \eta_2)=(h_1, \pm j\phoro{e}^{j\theta}\eta_1)$ after an isometry of $\mathbb{R}^3_1$, giving the desired result. \end{proof} \section{Deformation and symmetry}\label{sec:Goursat} In this section, we show how symmetry of a timelike minimal surface $f$ preserved under the isometric deformation $\{f_\lambda \}_\lambda$ and the anti-isometric deformation $\{\hat{f}_\lambda \}_\lambda$. We also show that symmetry of timelike minimal surfaces is also propagated while changing its shape under more general transformations and deformations. We denote the paracomplex conformal group with signature $(-,+,+)$ by $\mathrm{CO}(1,2; \mathbb{C}')$, that is, \[ \mathrm{CO}(1,2; \mathbb{C}') = \{ A\in \mathrm{M}(3,\mathbb{C}') \mid {}^t\! AI_{1,2}A=cI_{1,2},\ |c|^2\neq 0 \}, \] where ${}^t\! A$ is the transposed matrix of $A$ and $I_{1,2}=\text{diag}(-1,1,1)$. Let $f=\mathrm{Re}\int {\omega}$ be a conformal timelike minimal surface on a simply connected Lorentz surface $M$ with a paraholomorphic 1-form $\omega={}^t\!(\omega_1,\omega_2,\omega_3)$. For a matrix $A\in \mathrm{CO}(1,2; \mathbb{C}')$, the surface \begin{equation}\label{eq: Goursat} f_A(p) := \mathrm{Re}\left(A\int_{p_0}^p{\omega}\right) + f_A(p_0) \end{equation} also gives a conformal timelike minimal surface because $A$ satisfies the condition $\langle A\omega, A\omega \rangle =c\langle \omega, \omega \rangle =0$. Based on the work \cite{G} by Goursat, we call it the {\it Goursat transformation} of $f$. First, we recall the following Lemma (see \cite[Lemma 5.3]{Meeks} for the Riemannian case). \begin{lemma}\label{lemma:gomega} Let $f\colon M \to \mathbb{R}^3_1$ be a simply connected timelike minimal surface represented by $f=\mathrm{Re}\left(\int{\omega}\right)$ with a paraholomorphic 1-form $\omega={}^t\!(\omega_1,\omega_2,\omega_3)$. Then \begin{itemize} \item[(1)] If $g\in S^\circ_f(M)$ with the linear part $O\in O(1,2)$, then $g^*\omega = O\omega$, \item[(2)] If $g\in S^r_f(M)$ with the linear part $O\in O(1,2)$, then $g^*\omega = O\bar{\omega}$. \end{itemize} \end{lemma} \begin{proof} By the relation $f=\mathrm{Re}\left(\int{\omega}\right)$, we first remark that $\omega = df +j*df$, where $*$ is the Lorentzian Hodge star operator represented by the relation \[ *dx = dy,\quad *dy =dx\quad \] for each paracomplex coordinate $z=x+jy$. By the assumption $g\in S_f(M)$, there exists an isometry $\tilde{g}$ of $\mathbb{R}^3_1$ such that $\tilde{g}\circ f = f\circ g$. Hence, the relation \[ g^*df=g^*(f^*dx)=(f\circ g)^*dx=f^*(\tilde{g}^*dx) =f^*Odx=Of^*dx=Odf \] holds, where $dx={}^t\!(dx_1,dx_2, dx_3)$. Therefore, we obtain the desired relations (1) and (2) by using the fact that $g^*(*df)=*g^*(df)$ if $g$ is orientation preserving and $g^*(*df)=-*g^*(df)$ if $g$ is orientation reversing. \end{proof} \begin{remark}[For the case of surfaces with singularities] We remark that the Hodge star operator on $1$-forms on a Lorentz surface is defined by the formula \[ *dz=jdz,\quad *d\bar{z}=jd\bar{z}. \] This means that it depends only on the paracomplex structure and not on the Lorentzian metric. This fact implies that the assertions of Lemma \ref{lemma:gomega} and the results of this section hold for timelike minimal surfaces with singularities exactly the same way. Here, a singularity means a point on which the induced metric of the considered surface degenerates. For details on timelike minimal surfaces with singularities, see \cite{A,KKSY}. \end{remark} Here, we give a proof of Theorem \ref{thm:symmetry_Goursat_Introduction} which explains the relationships between the space groups $S_{f_A}(M)$ and $S_f(M)$. \begin{proof}[{\bf Proof of Theorem \ref{thm:symmetry_Goursat_Introduction}}] We prove only the case (1). Suppose $f_A=\mathrm{Re}\left(A\int{\omega}\right)$ and $g\in S_f^\circ(M)$ has linear part $O$. To prove the sufficiency let us assume that $AO=\widetilde{O}A$. First, we prove that $g\in S_f^\circ(M)$ is an isometry of $f_A(M)$. The first fundamental form $\mathrm{I}_{f_A}$ of $f_A$ is written by \[ \mathrm{I}_{f_A}= 4\langle (f_A)_w, \overline{(f_A)_w} \rangle dwd\bar{w} = \langle A\omega, \overline{A\omega} \rangle, \] and hence Lemma \ref{lemma:gomega} and the assumption $AO=\widetilde{O}A$ imply that \[ g^* \mathrm{I}_{f_A} = \langle Ag^*\omega, \overline{A g^*\omega} \rangle = \langle AO\omega, \overline{A O\omega} \rangle = \langle \widetilde{O}A\omega, \overline{\widetilde{O}A\omega} \rangle = \langle A\omega, \overline{A\omega} \rangle =\mathrm{I}_{f_A}, \] which means that $g$ is also an isometry of $f_A(M)$. Next, we check $g$ also induces a symmetry of $f_A(M)$. \begin{align*} f_A(g(p)) &= \mathrm{Re}\left(A\int_{p_0}^{g(p)}{\omega}\right) + f_A(p_0) \\ &= \mathrm{Re}\left(A\int_{p_0}^{g(p_0)}{\omega}\right) + \mathrm{Re}\left(A\int_{g(p_0)}^{g(p)}{\omega}\right)+ f_A(p_0) \\ &= \mathrm{Re}\left(A\int_{p_0}^{p}{g^*\omega}\right)+ f_A(g(p_0)) \\ &= \mathrm{Re}\left(A\int_{p_0}^{p}{O\omega}\right)+ f_A(g(p_0)) \\ &= \mathrm{Re}\left(AO\int_{p_0}^{p}{\omega}\right)+ f_A(g(p_0)) \\ &= \mathrm{Re}\left(\widetilde{O}A\int_{p_0}^{p}{\omega}\right)+ f_A(g(p_0)) \\ &= \widetilde{O}f_A(p)- \widetilde{O}f_A(p_0)+ f_A(g(p_0)). \end{align*} Conversely, if $g\in S_{f_A}(M)$ and its linear part is a matrix $\widetilde{O}\in \mathrm{O}(1,2)$. Then there exists a vector $\tilde{t}\in \mathbb{R}^3_1$ such that $f_A\circ g = \widetilde{O}f_A + \tilde{t}$. Based on the relation $g^*(A\omega)=AO\omega$, by taking the derivative of this equation, we have $AO=\widetilde{O}A$. This proves (1) and the case (2) is proved similarly by using the assumption $AO=\widetilde{O}\bar{A}$ and the relation $g^*\omega =O\bar{\omega}$ in Lemma \ref{lemma:gomega}. \end{proof} Since the identity matrix $I_3$ commutes with arbitrary matrix, we have the following result regarding the conservation of translation symmetry. \begin{corollary}\label{cor:translation} Under the same assumptions as in Theorem \ref{thm:symmetry_Goursat_Introduction}, suppose an orientation preserving isometry $g\in S^\circ_f(M)$ gives a translation symmetry of a surface $f$, then $g\in S^\circ_{f_A}(M)$ also gives a translation symmetry of arbitrary Goursat transformation $f_A$ for $A\in \mathrm{CO}(1,2; \mathbb{C}')$ whenever the translation vector does not vanish. \end{corollary} By using this corollary, we can produce many periodic timelike minimal surfaces as Example \ref{example:Bonnet}. \begin{remark} In \cite[Corollary 5.2]{Meeks}, Meeks pointed out a necessary and sufficient condition for the conjugate minimal surface in the Euclidean space to have a translation symmetry. Moreover, Leschke and Moriya \cite{LM} revealed similar results on the conservation of translation symmetry of simple factor dressing of minimal surfaces in $\mathbb{R}^3$ and $\mathbb{R}^4$, which is also a special kind of Goursat transformations. See Theorem 6.2 and Corollary 6.8 in \cite{LM}. \end{remark} Goursat transformations of the form \eqref{eq: Goursat} include various important transformations and deformations of timelike minimal surfaces. From now on, we give some applications of Theorem \ref{thm:symmetry_Goursat_Introduction} for specific deformations. \subsection{Associated family and conjugation, revisited} The isometric deformation $\{f_\theta\}_{\theta \in \mathbb{R}}$ and the anti-isometric deformation $\{\hat{f}_\theta\}_{\theta \in \mathbb{R}}$ in Definition \ref{def:associated} are also corresponding to the Goursat transformations for the matrices $\phoro{e}^{j\theta}I_3$ and $j\phoro{e}^{j\theta}I_3$, respectively. Therefore, we obtain the following result, which is a timelike counter part of the result for minimal surfaces by Meeks \cite[Theorem 5.5]{Meeks}, describing how space groups behave with respect to the above transformations. \begin{corollary}\label{cor:Spacegroup} Let $f\colon M \to \mathbb{R}^{3}_1$ be a simply connected timelike minimal surface. Then \begin{itemize} \item[(1)] $S_f^\circ(M) = S_{f_\theta}^\circ(M)= S_{\hat{f}_\theta}^\circ(M)$ for all $\theta \in \mathbb{R}$. Moreover, the linear part of $g\in S_{f}^\circ(M)$ is preserved under the isometric deformation $\{f_\theta\}_{\theta\in \mathbb{R}}$ and the anti-isometric deformation $\{\hat{f}_\theta\}_{\theta\in \mathbb{R}}$. \item[(2)] If $g\in S_f^r(M)$, then $g\not \in S_{f_\theta}^r(M)$ for all $\theta\neq 0$. \item[(3)] If $g\in S_f^r(M)$, then $g\in S_{\hat{f}_\theta}^r(M)$ for some $\theta \in \mathbb{R}$ if and only if $\theta =0$. Moreover, if $g\in S_f^r(M)$ has the linear part $O$, then $g\in S_{\hat{f}}^r(M)$ has the linear part $-O$. \end{itemize} \end{corollary} \begin{proof} Let $g$ be a symmetry in $S_f(M)$ whose linear part is $O$. When $g$ is orientation preserving, the assertion $(1)$ follows from Theorem \ref{thm:symmetry_Goursat_Introduction} and the fact that $\phoro{e}^{j\theta}I_3$ and $j\phoro{e}^{j\theta}I_3$ commute with $O$. When $g$ is orientation reversing, Theorem \ref{thm:symmetry_Goursat_Introduction} asserts that $g\in S_{f_\theta}(M)$ holds if and only if there exists $\tilde{O}\in \mathrm{O}(1,2)$ such that \[ \phoro{e}^{j\theta}I_3O=\widetilde{O}\overline{\phoro{e}^{j\theta}I_3} \Leftrightarrow \phoro{e}^{2j\theta}O = \widetilde{O}. \] Therefore, we obtain $\theta =0$ proving the assertion (2). On the other hand, $g\in S_{\hat{f}_\theta}(M)$ holds if and only if there exists $\tilde{O}\in \mathrm{O}(1,2)$ such that \[ j\phoro{e}^{j\theta}I_3O=\widetilde{O}\overline{j\phoro{e}^{j\theta}I_3} \Leftrightarrow \phoro{e}^{2j\theta}O =-\widetilde{O}. \] Therefore, we obtain $\theta=0$ and $\widetilde{O}=-O$ proving the assertion (3). \end{proof} From now on, we focus on the conjugation. Taking the conjugation $\hat{f}$ of a timelike minimal surface $f$ corresponds to the Goursat transformation of $f$ with respect to the matrix \begin{equation}\label{Conjugate_matrix} J = \begin{pmatrix} j & 0 & 0 \\ 0 & j & 0 \\ 0 & 0 & j \end{pmatrix} \in \mathrm{CO}(1,2; \mathbb{C}'). \end{equation} Since $J$ commutes with arbitrary matrix, conjugation and arbitrary Goursat transformation commute up to a constant vector. More precisely, the following commutative diagram holds. \[ \begin{diagram} \node[2]{f} \arrow[3]{e,b,2}{\text{Goursat transformation}} \arrow[1]{s,l}{\text{conjugation}} \node[3]{f_A} \arrow[1]{s,r}{\text{conjugation}} \\ \node[2]{\hspace{1cm}\hat{f}=f_J\hspace{1cm}} \arrow[3]{e,t,1}{\hspace{1cm}\text{Goursat transformation}} \node[3]{\hspace{1cm} \hat{f}_A=f_{AJ}=f_{JA}} \end{diagram} \] where $A\in \mathrm{CO}(1,2; \mathbb{C}')$. Due to the relation $\bar{J}=-J$, Theorem \ref{thm:symmetry_Goursat_Introduction} and Corollary \ref{cor:Spacegroup}, we obtain the following relation between space groups of the surfaces $f, f_A, \hat{f}=f_J$ and $\hat{f}_A=f_{AJ}$. \begin{corollary}\label{cor:diagram} Let $f\colon M\to \mathbb{R}^3_1$ be a simply connected timelike minimal surface, $g$ be an orientation preserving (resp.~reversing) isometry of $f(M)$. Assume that $A$ be a matrix in $ \mathrm{CO}(1,2; \mathbb{C}')$ and $O, \widetilde{O}$ be matrices in $\mathrm{O}(1,2)$ such that $AO=\widetilde{O}A$ (resp.~ $AO=\widetilde{O}\bar{A}$). Then the following statements are equivalent. \begin{itemize} \item[(1)] $g\in S_f(M)$ with linear part $O$, \item[(2)] $g\in S_{\hat{f}}(M)$ with linear part $O$ (resp. $-O$), \item[(3)] $g\in S_{f_A}(M)$ with linear part $\widetilde{O}$, \item[(4)] $g\in S_{\hat{f}_A}(M)$ with linear part $\widetilde{O}$ (resp. $-\widetilde{O}$). \end{itemize} \end{corollary} \subsection{Self duality relation}\label{sec:duality} The next important Goursat transformation is the following self duality relation. We call the Goursat transformation $f_D$ of a timelike minimal surface $f$ with the matrix \begin{equation} D=\begin{pmatrix}\label{Dmatrix} j & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix} \in \mathrm{CO}(1,2; \mathbb{C}') \end{equation} the {\it dual timelike minimal surface} of $f$. The dual timelike minimal surface $f_D$ has the following notable property. \begin{proposition}\label{prop: dual data} Let $f$ be a timelike minimal surface with the Weierstrass data $(h,\eta)$. The Weierstrass data $(h_D, \eta_D)$ of the dual timelike minimal surface $f_D$ is \[ h_D=\cfrac{\left(1+j\right)h-\left(1-j\right)1/h}{2},\quad \eta_D=\cfrac{\left(1+j\right)1/h-\left(1-j\right)h}{2}\, h\eta. \] In particular, the duality reverses the signs of the Gaussian curvatures $K_f$ of the surface $f$ and $K_{f_D}$ of the dual $f_D$ as follows \[ \mathrm{sgn}(K_f) = - \mathrm{sgn}(K_{f_D}). \] \end{proposition} \begin{proof} The former relations follow immediately from a straightforward calculation. Since the second fundamental form $\mathrm{II}_f$ of $f$ is written as \[ \mathrm{II}_f=\operatorname{Re}{(\eta dh)}, \] we can check the latter property by showing the relation $\eta_Ddh_D =j\eta dh$. \end{proof} \begin{remark}[Duality between minimal surfaces and maximal surfaces]\label{maximalduality} Let us consider a minimal surface in $\mathbb{R}^3$ written as $f=\operatorname{Re}{\prescript{t\!}{}{\left(\omega_1, \omega_2, \omega_3 \right)}}$ with holomorphic one forms $\omega_j$ ($j=1,2,3$), and the transformation \[ \begin{pmatrix} \tilde{\omega}_1 \\ \tilde{\omega}_2 \\ \tilde{\omega}_3 \end{pmatrix} =\widetilde{D} \begin{pmatrix} \omega_1 \\ \omega_2 \\ \omega_3 \end{pmatrix}, \quad \widetilde{D}:= \begin{pmatrix} i & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix} \] where $i$ is the imaginary unit on the complex plane $\mathbb{C}$ satisfying $i^2=-1$. Although the matrix $\widetilde{D}$ belongs neither to the complex orthogonal group $\mathrm{O}(3; \mathbb{C})=\{ A\in \mathrm{M}(3,\mathbb{C}) \mid {}^t\! AA=I_{3} \},$ nor to the indefinite complex orthogonal group $\mathrm{O}(1,2; \mathbb{C})= \{ A\in \mathrm{M}(3,\mathbb{C}) \mid {}^t\! AI_{1,2}A=I_{1,2} \}$, the surface $f_{\tilde{D}}:=\operatorname{Re}{\prescript{t\!}{}{\left(\tilde{\omega}_1, \tilde{\omega}_2, \tilde{\omega}_3 \right)}}$ gives a maximal surface in $\mathbb{R}^3_1$. This one to one correspondence between minimal surfaces in $\mathbb{R}^3$ and maximal surfaces in $\mathbb{R}^3_1$ is called the {\it duality}, see \cite{Lee} and also \cite{AF,AL,LLS,UY} for example. We also remark that any minimal surface has non positive Gaussian curvature $K\leq 0$ and any maximal surface has non negative Gaussian curvature $K\geq 0$. Therefore, Proposition \ref{prop: dual data} means that the self duality between $f$ and $f_D$ is a Lorentizan version of the above duality between minimal and maximal surfaces. \end{remark} At the end of this subsection, we discuss symmetries derived from reflection principles, which are closely related to the conjugation $\hat{f}=f_J$ and the dual $f_D$. It is well known that if a minimal surface in Euclidean space has a straight line, then the surface has a symmetry with respect to the line, and its conjugate surface has a planar symmetry with respect to a plane orthogonal to the line. The same results also valid for maximal surfaces and timelike minimal surfaces in $\mathbb{R}^3_1$, see \cite{{ACM1}} and \cite{KKSY} for example. Since such a reflection symmetry is obtained by an orientation reversing isometry of the form $g(z)=\bar{z}$ for an appropriate coordinate $z$, the equivalence between (1) and (2) in Corollary \ref{cor:diagram} is a generalization of this fact. It can be also used to express the relation in the correspondence between the symmetries with respect to a shrinking singularity on a timelike minimal surface $f$ and a folding singularity on the conjugate surface $\hat{f}$. To deal with singularities on timelike minimal surfaces, we recall the classes of generalized timelike minimal surfaces introduced in \cite{KKSY}. A non-constant smooth map $f\colon M \longrightarrow \mathbb{R}^3_1$ from a Lorentz surface $M$ into $\mathbb{R}^3_1$ is called a {\it generalized timelike minimal surface} if $f$ is immersed on an open dense set of $M$ and there exists a local coordinate system $(U; x, y)$ near each point of $M$ such that $\langle f_x, f_x \rangle = -\langle f_y, f_y \rangle$, $\langle f_x, f_y \rangle \equiv0$ and $f_{xx}-f_{yy}\equiv 0$ on $U$. For each local coordinate system $(U; x, y)$, let \[ \mathcal{A}=\{p\in U \mid \text{$f_x(p)$ or $f_y(p)$ is lightlike in $\mathbb{R}^3_1$} \},\quad \mathcal{B}=\{p\in U\mid d{f}_p=0\}. \] Since the induced metric $\mathrm{I}_f$ degenerates at each point $p$ in $\mathcal{A}\cup \mathcal{B}$, we call $p$ a {\it singular point} of $f$. A singular point $p \in \mathcal{A}$ is called a {\it shrinking singular point} (or a {\it conelike singular point}) if there is a regular curve $\gamma\colon I \to U$ from an interval $I$ passing through $p$ such that $\gamma(I)\subset \mathcal{A}$ and $f\circ \gamma (I)$ becomes a single point in $\mathbb{R}^3_1$, which we call a {\it shrinking singularity}. Also a singular point $p \in \mathcal{A}$ is called a {\it folding singular point} (or a {\it fold singular point}) if there is a neighborhood of $p$ on which the surface is reparametrized as $p=(0,0)$ and $f_y(x,0)\equiv 0$. We call the image $\{f(x,0)\mid (u,0)\in U)\}$ a {\it folding singularity}. By using the singular Bj\"oling representation formula, reflection principles with respect to shrinking singular points and folding singular points have been proved in \cite[Lemma 4.3 and 4.5]{KKSY}. By considering the Goursat transformations $\hat{f}=f_J$ and $f_D$, symmetries about lines, planes, shrinking singularities, and folding singularities can be unified as follows (the same result holds for Riemannian case, see \cite{AF} for more details). \begin{corollary}\label{cor:quadruple} Let $f\colon M\to \mathbb{R}^3_1$ be a simply connected timelike minimal surface. Then the following statements are equivalent. \begin{itemize} \item[(1)] $f$ has the line symmetry with respect to a timelike straight line on the surface $f$ which is parallel to the $x_1$-axis, \item[(2)] $\hat{f}=f_J$ has the planar symmetry with respect to a spacelike plane parallel to the $x_2x_3$-plane which is perpendicular to the surface $\hat{f}$, \item[(3)] $f_D$ has the point symmetry with respect to a shrinking singularity, and \item[(4)] $\hat{f}_D=f_{DJ}=f_{JD}$ has the folded symmetry with respect to a folding singularity. \end{itemize} \begin{proof} We give only an argument from the case where $f$ has the straight line of the form $f(x+j0)={}^t(x,0,0)$. By the regular reflection principle in \cite[Lemma 4.1]{KKSY}, we obtain \[ f(\bar{z}) = Of(z),\quad \text{where $O=\text{diag}(1, -1, -1)$ and $z=x+jy$}. \] By the relation $JO=-O\overline{J}$ and Corollary \ref{cor:diagram}, the conjugate surface $f_J$ has the symmetry $f_J(\bar{z})=-Of_J(z)$ up to a translation which proving the assertion (2). Similarly, the relation $DO=-I_3\overline{D}$ induces the point symmetry $f_D(\bar{z})=-f_D(z)$ up to a translation. In particular, $f_D(x+j0)$ shrinks to a single point in $\mathbb{R}^3_1$ proving the assertion (3). Finally, the relation $D(-O)=I_3\overline{D}$ induces the folded symmetry $f_{JD}(\bar{z})=f_{JD}(z)$ up to a translation proving the assertion (4). \end{proof} \end{corollary} \subsection{L\'opez-Ros deformation} Another interesting example of Goursat transformations is the following L\'opez-Ros type deformation. We define the {\it L\'opez-Ros deformation} $\{f_\lambda \}_{\lambda>0}$ of a timelike minimal surface $f$ by changing Weierstrass data from $(h, \eta)$ to $(\lambda h, \eta / \lambda)$, that is, \begin{equation}\label{eq:Gourast} f_\lambda(p) =\mathrm{Re}\int^p_{p_0}\prescript{t\!}{}{\left( -\left(\frac{1}{\lambda}+\lambda h^2\right), j\left(\frac{1}{\lambda}-\lambda h^2\right), 2h \right)}\eta +f_\lambda(p_0). \end{equation} This deformation was introduced in \cite{LR} for minimal surfaces in $\mathbb{R}^3$, and the deformation $\{f_\lambda \}_{\lambda>0}$ preserves the second fundamental form as the original L\'opez-Ros deformations since the second fundamental form $\mathrm{II}_f$ of $f=f_1$ is written as \[ \mathrm{II}_f=\operatorname{Re}{(\eta dh)}. \] Moreover, a straightforward calculation shows that the deformation \eqref{eq:Gourast} is obtained by the Goursat transformation of $f=f_1$ with respect to the matrix \begin{equation}\label{LRmatrix} A=A(\lambda) = \begin{pmatrix} \frac{\lambda+\frac{1}{\lambda}}{2} & j\frac{\lambda-\frac{1}{\lambda}}{2} & 0 \\ j\frac{\lambda-\frac{1}{\lambda}}{2} & \frac{\lambda+\frac{1}{\lambda}}{2} & 0 \\ 0 & 0 & 1 \end{pmatrix} \in \mathrm{CO}(1,2; \mathbb{C}'). \end{equation} Whereas the isometric deformation $\{f_\lambda \}_\lambda$ preserving the first fundamental form $\mathrm{I}_f$ and the anti isometric deformation $\{\hat{f}_\lambda\}_\lambda$ preserving $-\mathrm{I}_f$ have preserved a kind of symmetries, as an application of Theorem \ref{thm:symmetry_Goursat_Introduction}, we can control symmetries of timelike minimal surfaces while keeping the second fundamental form $\mathrm{II}_f$ via the L\'opez-Ros deformation. \begin{remark}[Ambient isometry as a Goursat transformation] We should remark that ambient isometries in $\mathbb{R}^3_1$ and Goursat transformations \eqref{eq: Goursat} do not commute in general, and this noncommutativity produces different surfaces which are not isometric to the original surface. In the case of the duality of minimal and maximal surfaces, such noncommutativity was discussed by Ara\'ujo and Leite \cite{AL} (see also Remark \ref{maximalduality}). In the case of timelike minimal surfaces, up to a translation, we can see an ambient isometry in $\mathbb{R}^3_1$ as a Goursat transformation with a matrix in $\mathrm{O}(1,2)\subset \mathrm{CO}(1,2; \mathbb{C})$. Thus, we can also handle by Theorem \ref{thm:symmetry_Goursat_Introduction} the symmetries of different surfaces that arise from the noncommutativity of these transformations. \end{remark} \section{Examples}\label{sec:Ex} In this section, we give concrete examples of symmetry relations discussed in the previous section. Let us first see how Corollary \ref{cor:diagram} gives many symmetry relations between $f, f_D, \hat{f}$ and $\hat{f}_D$. \begin{example}[Lorentzian Enneper surface, parabolic helicoid and their conjugates]\label{example:Enneper} Let us take the Weierstrass data $(h,\eta)=(z, dz)$ defined on $\mathbb{C}'$. The surface written by \eqref{eq:pW} is \begin{align*} f(z=x+jy) &=\mathrm{Re}\prescript{t\!}{}{\left( -z-\frac{z^3}{3}, j\left(z-\frac{z^3}{3}\right), z^2 \right)}\\ &=\prescript{t\!}{}{\left(-x-\frac{x^3}{3}-xy^2, y-x^2y-\frac{y^3}{3},x^2+y^2 \right)} \end{align*} and it is called the {\it Lorentzian Enneper surface} (see \cite{Konderak} for example). By using the matrix $D$ in \eqref{Dmatrix}, its dual $f_D$ is written as follows. \begin{align*} f_D (z) &= \mathrm{Re}\left(D \prescript{t\!}{}{\left( -z-\frac{z^3}{3}, j\left(z-\frac{z^3}{3}\right), z^2 \right)}\right)\\ &=\prescript{t\!}{}{\left(-y-x^2y-\frac{y^3}{3}, y-x^2y-\frac{y^3}{3},x^2+y^2 \right)}. \end{align*} The surface $f_D$ and its conjugate $\hat{f}_D$ are nothing but the surfaces called the {\it timelike parabolic helicoid} and the {\it timelike parabolic catenoid}, respectively (see \cite{KKSY} for example). Let us see how the symmetries of $f, f_D, \hat{f}$ and $\hat{f}_D$ relate to each other. First, the surface $f$ has the following orientation reversing planar symmetries: \begin{align*} f(\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z),\quad f(-\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z). \end{align*} By Theorem \ref{thm:symmetry_Goursat_Introduction} and the matrix relations \begin{align*} D\begin{pmatrix} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix} = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix} \overline{D}, \qquad D\begin{pmatrix} -1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix} = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix}\overline{D}, \end{align*} the dual timelike minimal surface $f_D$ has the following symmetries: \begin{align*} f_D(\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_D(z),\quad f_D(-\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_D(z) \end{align*} which mean that $f_D$ has the line symmetry with respect to the spacelike $x_3$-axis and the folded symmetry with respect to fold singularities along $\operatorname{Im}{z}=0$, see the top right of Figure \ref{Fig:Enneper}. Furthermore, by Corollary \ref{cor:diagram}, the conjugate surface $\hat{f}$ has the following line symmetries with respect to the spacelike $x_2$-axis and the timelike $x_1$-axis: \begin{align*} \hat{f}(\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & -1 \end{pmatrix}\hat{f}(z),\quad \hat{f}(-\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & -1 \end{pmatrix}\hat{f}(z). \end{align*} Finally, Corollary \ref{cor:diagram} shows that $\hat{f}_D$ has the following symmetries: \begin{align*} \hat{f}_D(\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & -1 \end{pmatrix}\hat{f}_D(z),\quad \hat{f}_D(-\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & -1 \end{pmatrix}\hat{f}_D(z). \end{align*} which mean that $\hat{f}_D$ has the planar symmetry with respect to the timelike $x_1x_2$-plane and the point symmetry with respect to shrinking singularities along $\operatorname{Im}{z}=0$, see the bottom right of Figure \ref{Fig:Enneper}. For orientation preserving isometries, we can also check that by Corollary \ref{cor:diagram} $f, f_D, \hat{f}$ and $\hat{f}_D$ share the line symmetry with respect to the spacelike $x_3$-axis: \begin{align*} f(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z),\quad f_D(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_D(z).\\ \hat{f}(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}\hat{f}(z),\quad \hat{f}_D(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}\hat{f}_D(z). \end{align*} \end{example} \begin{figure} \caption{The Lorentzian Enneper surface $f$ (left top), the parabolic helicoid $f_D$ (right top), the conjugate Enneper surface $\hat{f} \label{Fig:Enneper} \end{figure} \begin{example}[Periodic Bonnet type surfaces]\label{example:Bonnet} Finally, we see symmetry relations for L\'opez-Ros deformation. Let us take the Weierstrass data $(h,\eta)=(\operatorname{\phoro{tan}}{z}, \frac{1}{2}\operatorname{\phoro{cos}}^2{z}dz)$ defined on $\mathbb{C}'$. The surface written by \eqref{eq:pW} is \begin{align*} f(z=x+jy) &=\mathrm{Re}\prescript{t\!}{}{\left( -\frac{z}{2}, \frac{j}{4}\operatorname{\phoro{sin}}{2z}, -\frac{1}{4}\operatorname{\phoro{cos}}{2z} \right)}\\ &=\prescript{t\!}{}{\left(-\frac{x}{2}, \frac{1}{4}\cos{2x}\sin{2y}, -\frac{1}{4}\cos{2x}\cos{2y} \right)} \end{align*} and it is called the {\it elliptic catenoid}, which is a rotational timelike minimal surface. By using the matrix $A(\lambda)$ in \eqref{LRmatrix}, its L\'opez-Ros deformation $f_\lambda=f_{A(\lambda)}$ is as follows \begin{align*} f_\lambda (z) &= \mathrm{Re}\left(A(\lambda) \prescript{t\!}{}{\left( -\frac{z}{2}, \frac{j}{4}\operatorname{\phoro{sin}}{2z}, \frac{1}{4}\operatorname{\phoro{cos}}{2z} \right)} \right). \end{align*} The Weierstrass data of $f_\lambda$ is $(\lambda h,\eta/\lambda)=(\lambda \operatorname{\phoro{tan}}{z}, \frac{1}{2\lambda}\operatorname{\phoro{cos}}^2{z}dz)$, and hence the surface $f_\lambda$ is exactly the surface known as a {\it timelike minimal Bonnet type surface}, on which each curvature line lies on a plane for any $\lambda>0$, see Figure \ref{Fig:Bonnet}. For more details of such surfaces, see \cite{ACO}. Let us see how the symmetry of $f$ is preserved or changed via the L\'opez-Ros deformation $\{f_\lambda\}_{\lambda>0}$. The surface $f$ has the following orientation preserving symmetries: \begin{align*} f(z+\pi) = f(z) +\prescript{t\!}{}{\left( -\frac{\pi}{2},0, 0 \right)},\quad f(z+j\pi) = f(z),\quad f(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z). \end{align*} Obviously, the linear parts of these symmetries commute with the matrix $A(\lambda)$ in \eqref{LRmatrix} for each $\lambda$. Hence, Theorem \ref{thm:symmetry_Goursat_Introduction} implies that the above symmetries are propagated to the deformed surface $f_\lambda$. In particular, the linear parts of the above symmetries are preserved as follows. \begin{align*} &f_\lambda(z+\pi) = f_\lambda(z) + \prescript{t\!}{}{\left( -\frac{\pi}{4}\left(\lambda+\frac{1}{\lambda}\right),0, 0 \right)},\quad \\ f_\lambda(z+j\pi) &= f_\lambda(z)+\prescript{t\!}{}{\left(0, -\frac{\pi}{4}\left(\lambda-\frac{1}{\lambda}\right), 0 \right)},\quad f_\lambda(-z) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_\lambda(z). \end{align*} These relations mean that the surface $f_\lambda$ is doubly periodic for $\lambda \neq 1$ and the elliptic catenoid $f=f_1$ is singly periodic, and $f_\lambda$ for any $\lambda>0$ has the line symmetry with respect to $x_3$-axis. Also, $f$ has the following orientation reversing symmetries: \[ f(\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z),\quad f(-\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f(z). \] If $O$ be any of the above matrices, we can check that $O$ satisfies $AO=O\bar{A}$ for the matrix $A=A(\lambda)$ in \eqref{LRmatrix} for each $\lambda$. Hence, Theorem \ref{thm:symmetry_Goursat_Introduction} implies that the above symmetries are propagated to the deformed surface $f_\lambda$ as follows. \[ f_\lambda(\bar{z}) = \begin{pmatrix} 1 & 0 & 0 \\ 0 & -1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_\lambda(z),\quad f_\lambda(-\bar{z}) = \begin{pmatrix} -1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{pmatrix}f_\lambda(z), \] which mean that the planar symmetries with respect to the timelike $x_1x_3$-plane and the spacelike $x_2x_3$-plane on the surface $f_\lambda$ are preserved for any $\lambda>0$. \begin{figure} \caption{The singly periodic elliptic catenoid $f_1$ (left), the doubly periodic Bonnet type surfaces $f_{1.5} \label{Fig:Bonnet} \end{figure} \end{example} \end{document}
\begin{document} \title[Analytical solution of the weighted Fermat-Torricelli problem]{Analytical solution of the weighted Fermat-Torricelli problem for convex quadrilaterals in the Euclidean plane: The case of two pairs of equal weights} \author{Anastasios N. Zachos} \address{University of Patras, Department of Mathematics, GR-26500 Rion, Greece} \email{[email protected]} \keywords{weighted Fermat-Torricelli problem, weighted Fermat-Torricelli point, convex quadrilaterals} \subjclass{51E12, 52A10, 51E10} \begin{abstract} The weighted Fermat-Torricelli problem for four non-collinear points in $\mathbb{R}^{2}$ states that: Given four non-collinear points $A_{1},$ $A_{2},$ $A_{3},$ $A_{4}$ and a positive real number (weight) $B_{i}$ which correspond to each point $A_{i},$ for $i=1,2,3,4,$ find a fifth point such that the sum of the weighted distances to these four points is minimized. We present an analytical solution for the weighted Fermat-Torricelli problem for convex quadrilaterals in $\mathbb{R}^{2}$ for the following two cases: (a) $B_{1}=B_{2}$ and $B_{3}=B_{4},$ for $B_{1}>B_{4}$ and (b) $B_{1}=B_{3}$ and $B_{2}=B_{4}.$ \end{abstract}\maketitle \section{Introduction} The weighted Fermat-Torricelli problem for $n$ non-collinear points in $\mathbb{R}^{2}$ refers to finding the unique point $A_{0}\in \mathbb{R}^{2},$ minimizing the objective function: \[f(X)=\sum_{i=1}^{n}B_{i}\|X-A_{i}\|,\] $X\in \mathbb{R}^{2}$ given four non-collinear points $\{A_{1},A_{2},A_{3},A_{4},...,A_{n}\}$ with corresponding positive real numbers (weights) $B_{1}, B_{2}, B_{3}, B_{4},...,B_{n}$ where $\|\cdot\|$ denotes the Euclidean distance. The existence and uniqueness of the weighted Fermat-Torricelli point and a complete characterization of the solution of the weighted Fermat-Torricelli problem has been given by Y. S Kupitz and H. Martini (see \cite{Kup/Mar:97}, theorem 1.1, reformulation 1.2 page 58, theorem 8.5 page 76, 77). A particular case of this result for four non-collinear points in $\mathbb{R}^{2},$ is given by the following theorem: \begin{theorem}{\cite{BolMa/So:99},\cite{Kup/Mar:97}}\label{theor1} Let there be given four non-collinear points $\{A_{1},A_{2},A_{3},A_{4}\},$ $A_{1}, A_{2}, A_{3},A_{4}\in\mathbb{R}^{2}$ with corresponding positive weights $B_{1}, B_{2}, B_{3}, B_{4}.$ \\ (a) The weighted Fermat-Torricelli point $A_{0}$ exists and is unique. \\ (b) If for each point $A_{i}\in\{A_{1},A_{2},A_{3},A_{4}\}$ \begin{equation}\label{floatingcase} \|{\sum_{j=1, i\ne j}^{4}B_{j}\vec u(A_i,A_j)}\|>B_i, \end{equation} for $i,j=1,2,3$ holds, then \\ ($b_{1}$) the weighted Fermat-Torricelli point $A_{0}$ (weighted floating equilibrium point) does not belong to $\{A_{1},A_{2},A_{3},A_{4}\}$ and \\ ($b_{2}$) \begin{equation}\label{floatingequlcond} \sum_{i=1}^{4}B_{i}\vec u(A_0,A_i)=\vec 0, \end{equation} where $\vec u(A_{k} ,A_{l})$ is the unit vector from $A_{k}$ to $A_{l},$ for $k,l\in\{0,1,2,3,4\}$ (Weighted Floating Case).\\ (c) If there is a point $A_{i}\in\{A_{1},A_{2},A_{3},A_{4}\}$ satisfying \begin{equation} \|{\sum_{j=1,i\ne j}^{4}B_{j}\vec u(A_i,A_j)}\|\le B_i, \end{equation} then the weighted Fermat-Torricelli point $A_{0}$ (weighted absorbed point) coincides with the point $A_{i}$ (Weighted Absorbed Case). \end{theorem} In 1969, E. Cockayne, Z. Melzak proved in \cite{CockayneMelzak:69} by using Galois theory that for a specific set of five non-collinear points the unweighted Fermat-Torricelli point $A_{0}$ cannot be constructed by ruler and compass in a finite number of steps (Euclidean construction). In 1988, C. Bajaj also proved in \cite{Bajaj:87} by applying Galois theory that for $n\ge 5$ the weighted Fermat-Torricelli problem for $n$ non-collinear points is in general not solvable by radicals over the field of rationals in $\mathbb{R}^{3}.$ We recall that for $n=4,$ Fagnano proved that the solution of the unweighted Fermat-Torricelli problem ($B_{1}=B_{2}=B_{3}=B_{4}$) for convex quadrilaterals in $\mathbb{R}^{2}$ is the intersection point of the two diagonals and it is well known that the solution of the weighted Fermat-Torricelli problem for non-convex quadrilaterals is the vertex of the non-convex angle. Extensions of Fagnano result to some metric spaces are given by Plastria in \cite{Plastria:06}. In 2012, Roussos studied the unweighted Fermat-Torricelli problem for Euclidean triangles and Uteshev studied the corresponding weighted Fermat-Torricelli problem and succeeded in finding an analytic solution by using some algebraic system of equations (see \cite{Roussos:12} and \cite{Uteshev:12}). Thus, we consider the following open problem: \begin{problem} Find an analytic solution with respect to the weighted Fermat-Torricelli problem for convex quadrilaterals in $\mathbb{R}^{2},$ such that the corresponding weighted Fermat-Torricelli point is not any of the given points. \end{problem} In this paper, we present an analytic solution for the weighted Fermat-Torricelli problem for a given tetragon in $\mathbb{R}^{2}$ for $B_{1}>B_{4},$ $B_{1}=B_{2}$ and $B_{3}=B_{4},$ by expressing the objective function as a function of the linear segment which connects the intersection point of the two diagonals and the corresponding weighted Fermat-Torricelli point (Section~2, Theorem~\ref{theortetr}). By expressing the angles $\angle A_{1}A_{0}A_{2}$ $\angle A_{2}A_{0}A_{3},$ $\angle A_{3}A_{0}A_{4}$ and $\angle A_{4}A_{0}A_{1}$ as a function of $B_{1},$ $B_{4}$ and $a$ and taking into account the invariance property of the weighted Fermat-Torricelli point, we obtain an analytic solution for a convex quadrilateral having the same weights with the tetragon (Section~3, Theorem~\ref{theorquadnn}). Finally, we derive that the solution for the weighted Fermat-Torricelli problem for a given convex quadrilateral in $\mathbb{R}^{2}$ for the weighted floating case for $B_{1}=B_{3}$ and $B_{2}=B_{4}$ is the intersection point (Weighted Fermat-Torricelli point) of the two diagonals (Section~4, Theorem~\ref{diagonalquad}). \section{The weighted Fermat-Torricelli problem for a tetragon: The case $B_{1}=B_{2}$ and $B_{3}=B_{4}.$ } We consider the weighted Fermat-Torricelli problem for a tetragon $A_{1}A_{2}A_{3}A_{4},$ for $B_{1}>B_{4},$ $B_{1}=B_{2}$ and $B_{3}=B_{4}.$ We denote by $a_{ij}$ the length of the linear segment $A_iA_j,$ $O$ the intersection point of $A_{1}A_{3}$ and $A_{2}A_{4},$ $y$ the length of the linear segment $OA_{0}$ and $\alpha_{ikj}$ the angle $\angle A_{i}A_{k}A_{j}$ for $i,j,k=0,1,2,3,4, i\neq j\neq k$ (See fig.~\ref{fig1}) and we set $a_{12}=a_{23}=a_{34}=a_{41}=a.$ \begin{figure} \caption{The weighted Fermat-Torricelli problem for a tetragon $B_{1} \label{fig1} \end{figure} \begin{problem}\label{sym1} Given a tetragon $A_{1}A_{2}A_{3}A_{4}$ and a weight $B_{i}$ which corresponds to the vertex $A_{i},$ for $i=1,2,3,4,$ find a fifth point $A_{0}$ (weighted Fermat-Torricelli point) which minimizes the objective function \begin{equation}\label{obj1} f=B_{1}a_{01}+B_{2} a_{02}+ B_{3} a_{03}+B_{4} a_{04} \end{equation} for $B_{1}>B_{4},$ $B_{1}=B_{2}$ and $B_{3}=B_{4}.$ \end{problem} \begin{theorem}\label{theortetr} The location of the weighted Fermat-Torricelli point of $A_{1}A_{2}A_{3}A_{4}$ for $B_{1}=B_{2},$ $B_{3}=B_{4}$ and $B_{1}>B_{4}$ is given by: \begin{eqnarray}\label{analsoltetragon} &&y=\frac{1}{2} \sqrt{\frac{a^2}{4}+r}-\nonumber{}\\ &&{}-\frac{1}{2} \sqrt{\frac{a^2}{4}-\frac{t^{1/3}}{24\ 2^{1/3} q^{1/3}}-\frac{25 p q^{1/3}}{3\ 2^{2/3} t^{1/3} \left(B_1^2-B_4^2\right){}^2} +\frac{a^2 B_1^2-a^2B_4^2}{12 \left(B_1^2-B_4^2\right)}-\frac{-a^3 B_1^2-a^3 B_4^2}{2 \sqrt{\frac{a^2}{4}+r} \left(B_1^2-B_4^2\right)}}\nonumber{}\\ \end{eqnarray} where \begin{eqnarray}\label{analsoltetragon1} &&t=2000 a^6 B_1^6-2544 a^6 B_1^4 B_4^2+2544 a^6 B_1^2 B_4^4-2000 a^6 B_4^6+\nonumber\\ &&{}+192 \sqrt{3} \sqrt{a^{12} B_1^2 B_4^2 \left(B_1^2-B_4^2\right){}^2 \left(125 B_1^4-142 B_1^2 B_4^2+125 B_4^4\right)}, \end{eqnarray} \begin{eqnarray}\label{analsoltetragon2} p=a^4 B_1^4-2 a^4 B_1^2 B_4^2+a^4 B_4^4, \end{eqnarray} \begin{eqnarray}\label{analsoltetragon3} q=B_1^6-3 B_1^4 B_4^2+3 B_1^2 B_4^4-B_4^6 \end{eqnarray} and \begin{eqnarray}\label{analsoltetragon4} r=\frac{t^{1/3}}{24\ 2^{1/3} q^{1/3}}+\frac{25 p q^{1/3}}{3\ 2^{2/3} t^{1/3} \left(B_1^2-B_4^2\right){}^2}-\frac{a^2 B_1^2-a^2 B_4^2}{12 \left(B_1^2-B_4^2\right)}. \end{eqnarray} \end{theorem} \begin{proof}[Proof of Theorem~\ref{theortetr}:] Taking into account the symmetry of the weights $B_{1}=B_{4}$ and $B_{2}=B_{3}$ for $B_{1}>B_{4}$ and the symmetries of the tetragon the objective function (\ref{obj1}) of the weighted Fermat-Torricelli problem (Problem~\ref{sym1}) could be reduced to an equivalent Problem by placing a wall to the midperpendicular line from $A_{1}A_{2}$ and $A_{3}A_{4}$ which states that: Find a point $A_{0}$ which belongs to the midperpendicular of $A_{1}A_{2}$ and $A_{3}A_{4}$ and minimizes the objective function \begin{equation}\label{obj12} \frac{f}{2}=B_{1}a_{01}+B_{4} a_{04}. \end{equation} \begin{figure} \caption{The weighted floating equilibrium point (weighted Fermat-Torricelli point) $A_{0} \label{fig2} \end{figure} We express $a_{01},$ $a_{02},$ $a_{03}$ and $a_{04}$ as a function of $y:$ \begin{equation}\label{a01} a_{01}^{2}=\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}-y\right)^{2} \end{equation} \begin{equation}\label{a02} a_{02}^{2}=\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}-y\right)^{2} \end{equation} \begin{equation}\label{a03} a_{03}^{2}=\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}+y\right)^{2} \end{equation} \begin{equation}\label{a04} a_{04}^{2}=\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}+y\right)^{2} \end{equation} By replacing (\ref{a01}) and (\ref{a04}) in (\ref{obj12}) we get: \begin{equation}\label{obj13} B_{1}\sqrt{\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}-y\right)^{2}}+B_{4}\sqrt{\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}+y\right)^{2}} \to min. \end{equation} By differentiating (\ref{obj13}) with respect to $y,$ and by squaring both parts of the derived equation, we get: \begin{equation}\label{fourth1} \frac{B_{1}^2 \left(\frac{a}{2}-y\right)^{2} }{\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}-y\right)^{2}}=\frac{B_{4}^2 \left(\frac{a}{2}+y\right)^{2}}{\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}+y\right)^{2}} \end{equation} or \begin{equation}\label{fourth2} 8 \left(B_{1}^{2}-B_{4}^2\right) y^4+2 a^2 \left(-B_{1}^2+B_{4}^2\right) y^2-2 a^3 \left(B_{1}^2+B_{4}^2\right) y+a^4 \left(B_{1}^2-B_{4}^2\right)=0. \end{equation} By solving the fourth order equation with respect to $y,$ we derive two complex solutions and two real solutions (Ferrari's solution, see also in \cite{Shmakov:11}) which depend on $B_{1}, B_{4}$ and $a.$ One of the two real solutions with respect to $y$ is (\ref{analsoltetragon}). From (\ref{analsoltetragon}), we obtain that the weighted Fermat-Torricelli point $A_{0}$ is located at the interior of $A_{1}A_{2}A_{3}A_{4}$ (see fig.~\ref{fig2}). \end{proof} The Complementary Fermat-Torricelli problem was stated by Courant and Robbins (see in \cite[pp.~358]{Cour/Rob:51}) for a triangle which is derived by the weighted Fermat-Torricelli problem by placing one negative weight to one of the vertices of the triangle and asks for the complementary weighted Fermat-Torricelli point which minimizes the corresponding objective function. We need to state the Complementary weighted Fermat-Torricelli problem for a tetragon, in order to explain the second real solution which have been obtained by (\ref{fourth2}) with respect to $y.$ \begin{problem}\label{sym2complementary} Given a tetragon $A_{1}A_{2}A_{3}A_{4}$ and a weight $B_{i}$ (a positive or negative real number) which corresponds to the vertex $A_{i},$ for $i=1,2,3,4,$ find a fifth point $A_{0}$ (weighted Fermat-Torricelli point) which minimizes the objective function \begin{equation}\label{obj1} f=B_{1}a_{01}+B_{2} a_{02}+ B_{3} a_{03}+B_{4} a_{04} \end{equation} for $\|B_{1}\|>\|B_{4}\|,$ $B_{1}=B_{2}$ and $B_{3}=B_{4}.$ \end{problem} \begin{proposition}\label{theortetrcomp1} The location of the complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ (solution of Problem~\ref{sym2complementary}) of $A_{1}A_{2}A_{3}A_{4}$ for $B_{1}=B_{2}<0,$ $B_{3}=B_{4}<0$ and $\|B_{1}\|>\|B_{4}\|$ coincides with the location of the corresponding weighted Fermat-Torricelli point of $A_{1}A_{2}A_{3}A_{4}$ for $B_{1}=B_{2}>0,$ $B_{3}=B_{4}>0$ and $\|B_{1}\|>\|B_{4}\|.$ \end{proposition} \begin{proof}[Proof of Proposition~\ref{theortetrcomp1}:] By applying theorem~\ref{theortetr} for $B_{1}=B_{2}<0,$ $B_{3}=B_{4}<0$ we derive the weighted floating equilibrium condition (see fig.~\ref{fig3}): \begin{figure} \caption{The complementary weighted Fermat-Torricelli point $A_{0} \label{fig3} \end{figure} \begin{equation}\label{compl1} \vec{B_{1}}+\vec{B_{2}}+\vec{B_{3}}+\vec{B_{4}}=\vec{0} \end{equation} or \begin{equation}\label{compl2} (-\vec{B_{1}})+(-\vec{B_{2}})+(-\vec{B_{3}})+(-\vec{B_{4}})=\vec{0}. \end{equation} From (\ref{compl1}) and (\ref{compl2}), we derive that the complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ coincides with the weighted Fermat-Torricelli point $A_{0}.$ The difference between the figures~\ref{fig2} and ~\ref{fig3} is that the vectors $\vec{B}_{i}$ change direction from $A_{i}$ to $A_{0},$ for $i=1,2,3,4.$ \end{proof} \begin{proposition}\label{theortetrcomp2} The location of the complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ (solution of Problem~\ref{sym2complementary}) of $A_{1}A_{2}A_{3}A_{4}$ for $B_{1}=B_{2}<0,$ $B_{3}=B_{4}>0$ or $B_{1}=B_{2}>0,$ $B_{3}=B_{4}<0$ and $\|B_{1}\|>\|B_{4}\|$ is given by: \begin{eqnarray}\label{analsoltetragoncom} &&y=\frac{\sqrt{d}}{2}+\frac{1}{2} \nonumber\\ &&{}\sqrt{-\frac{\frac{2\ 2^{1/3} w}{\left(\sqrt{s}+z\right)^{1/3}}+2^{2/3} \left(\sqrt{s}+z\right)^{1/3}+32 a \left(2+a \left(-2-\frac{3}{\sqrt{d}}\right)\right) B_1^2+32 a \left(-2+2 a-\frac{3 a}{\sqrt{d}}\right) B_4^2}{96 \left(B_1^2-B_4^2\right)}}.\nonumber\\ \end{eqnarray} where \begin{eqnarray}\label{analsoltetragon1com} &&z=-1024 \left(-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2\right){}^3+27648 \left(B_1^2-B_4^2\right) \left(a^2 B_1^2+a^2 B_4^2\right){}^2+\nonumber\\ &&{}+9216 \left(B_1^2-B_4^2\right) \left(-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2\right) \left(2 a^3 B_1^2+a^4 B_1^2-2 a^3 B_4^2-a^4 B_4^2\right)\nonumber\\ \end{eqnarray} \begin{eqnarray}\label{analsoltetragon2com} w=64 \left(-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2\right){}^2+192 \left(B_1^2-B_4^2\right) \left(2 a^3 B_1^2+a^4 B_1^2-2 a^3 B_4^2-a^4 B_4^2\right),\nonumber\\ \end{eqnarray} \begin{eqnarray}\label{analsoltetragon3com} &&s=-4 w^3+(-1024 \left(-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2\right){}^3+27648 \left(B_1^2-B_4^2\right) \left(a^2 B_1^2+a^2 B_4^2\right){}^2+\nonumber\\ &&{}+9216 \left(B_1^2-B_4^2\right) \left(-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2\right) \left(2 a^3 B_1^2+a^4 B_1^2-2 a^3 B_4^2-a^4 B_4^2\right)){}^2 \end{eqnarray} and \begin{eqnarray}\label{analsoltetragon4com} &&d=\frac{1}{2} \left(-a+a^2\right)+\frac{w}{24\ 2^{2/3} \left(\sqrt{s}+z\right)^{1/3} \left(B_1^2-B_4^2\right)}+\frac{\left(\sqrt{s}+z\right)^{1/3}}{48\ 2^{1/3} \left(B_1^2-B_4^2\right)}-\nonumber\\ &&{}-\frac{-a B_1^2+a^2 B_1^2+a B_4^2-a^2 B_4^2}{6 \left(B_1^2-B_4^2\right)}. \end{eqnarray} \end{proposition} \begin{proof}[Proof of Proposition~\ref{theortetrcomp2}:] Taking into account (\ref{obj13}) for $B_{1}=B_{2}<0,$ $B_{3}=B_{4}>0$ or $B_{1}=B_{2}>0,$ $B_{3}=B_{4}<0$ and $\|B_{1}\|>\|B_{4}\|$ and differentiating (\ref{obj13}) with respect to $y\equiv OA_{0}^{\prime},$ and by squaring both parts of the derived equation, we obtain (\ref{fourth2}) which is a fourth order equation with respect to $y.$ The second real solution of $y$ gives (\ref{analsoltetragoncom}). From (\ref{analsoltetragoncom}) and the vector equilibrium condition $\vec{B_{1}}+\vec{B_{2}}+\vec{B_{3}}+\vec{B_{4}}=\vec{0}$ we obtain that the complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ for $B_{1}=B_{2}<0,$ $B_{3}=B_{4}>0$ coincides with the complementary weighted Fermat-Torricelli point $A_{0}^{\prime\prime}$ for $B_{1}=B_{2}>0,$ $B_{3}=B_{4}<0$ ( Fig.~\ref{fig4} and ~\ref{fig5}). Furthermore, the solution (\ref{analsoltetragoncom}) yields that the complementary $A_{0}^{\prime}$ is located outside the tetragon $A_{1}A_{2}A_{3}A_{4}$ (Fig.~\ref{fig4} and ~\ref{fig5}). \begin{figure} \caption{The complementary weighted Fermat-Torricelli point $A_{0} \label{fig4} \end{figure} \begin{figure} \caption{The complementary weighted Fermat-Torricelli point for a tetragon $B_{1} \label{fig5} \end{figure} \end{proof} \begin{example}\label{tetr1} Given a tetragon $A_{1}A_{2}A_{3}A_{4}$ in $\mathbb{R}^{2},$ $a=2, B_{1}=B_{2}=1.5,$ $B_{3}=B_{4}=1$ from \ref{analsoltetragon} and (\ref{analsoltetragoncom}) we get $y=0.36265$ and $y=1.80699,$ respectively, with five digit precision. The weighted Fermat-Torricelli point $A_{0}$ and the complementary weighted Fermat-Torricelli point $A_{0^{\prime}}\equiv A_{0}$ for $B_{1}=B_{2}=-1.5$ and $B_{3}=B_{4}=-1$ corresponds to $y=0.36265.$ The complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ for $B_{1}=B_{2}=-1.5$ and $B_{3}=B_{4}=1$ or $B_{1}=B_{2}=1.5$ and $B_{3}=B_{4}=-1$ lies outside the tetragon $A_{1}A_{2}A_{3}A_{4}$ and corresponds to $y=1.80699$ \end{example} We denote by $A_{12}$ the intersection point of the midperpendicular of $A_{1}A_{2}$ and $A_{3}A_{4}$ with $A_{1}A_{2}$ and by $A_{14}$ the intersection point of the perpendicular from $A_{0}$ to the line defined by $A_{1}A_{4}.$ We shall calculate the angles $\alpha_{102}, \alpha_{203}, \alpha_{304}$ and $\alpha_{401}.$ \begin{proposition}\label{anglestetragon} The angles $\alpha_{102},$ $\alpha_{203},$ $\alpha_{304}$ and $\alpha_{401}$ are given by: \begin{equation}\label{alpha102} \alpha_{102}=2\arccos{\frac{\frac{a}{2}-y(B_{1},B_{4},a)}{\sqrt{\left(\frac{a}{2}\right)^{2}+\left(\frac{a}{2}-y\right)^{2}}}}, \end{equation} \begin{equation}\label{alpha304} \alpha_{304}=2\arccos{\left(\frac{B_{1}}{B_{4}}\cos\frac{\alpha_{102}}{2}\right)} \end{equation} and \begin{equation}\label{alpha401} \alpha_{401}=\alpha_{203}=\pi-\frac{\alpha_{102}}{2}-\frac{\alpha_{304}}{2}. \end{equation} \end{proposition} \begin{proof}[Proof of Proposition~\ref{anglestetragon}:] From $\triangle A_{1}A_{12}A_{0}$ and taking into account (\ref{analsoltetragon}), we get (\ref{alpha102}). From the right angled triangles $\triangle A_{1}A_{12}A_{0},$ $\triangle A_{1}A_{14}A_{0}$ and $\triangle A_{4}A_{14}A_{0},$ we obtain: \begin{equation}\label{sin102bis} a_{01}=\frac{a}{2\sin\frac{\alpha_{102}}{2}}, \end{equation} \begin{equation}\label{sin304bis} a_{04}=\frac{a}{2\sin\frac{\alpha_{304}}{2}}, \end{equation} and \begin{equation}\label{sin102304bis} a_{01}\cos\frac{\alpha_{102}}{2}+a_{04}\cos\frac{\alpha_{304}}{2}=a, \end{equation} By dividing both members of (\ref{sin102304bis}) by (\ref{sin102bis}) or (\ref{sin304bis}), we get: \begin{equation}\label{cot102304bis} \cot\frac{\alpha_{102}}{2}=2-\cot\frac{\alpha_{304}}{2}. \end{equation} From (\ref{cot102304bis}) the angle $\alpha_{102}$ is expressed as a function of $\alpha_{304}:$ $\alpha_{102}=\alpha_{102}(\alpha_{304}).$ By replacing (\ref{sin102bis}) and (\ref{sin304bis}) in (\ref{obj12}) we get: \begin{equation}\label{objbis} \frac{B_{1}}{\sin\frac{\alpha_{102}}{2}}+\frac{B_{4}}{\sin\frac{\alpha_{304}}{2}}\to min. \end{equation} By differentiating (\ref{cot102304bis}) with respect to $\alpha_{304},$ we derive: \begin{equation}\label{dercot102304bis} \frac{d\alpha_{102}}{d\alpha_{304}}=-\frac{\sin^{2}\frac{\alpha_{102}}{2}}{\sin^{2}\frac{\alpha_{304}}{2}}. \end{equation} By differentiating (\ref{objbis}) with respect to $\alpha_{304}$ and replacing in the derived equation (\ref{dercot102304bis}) we obtain (\ref{alpha304}). From the equality of triangles $\triangle A_{1}A_{0}A_{4}$ and $A_{2}A_{0}A_{3},$ we get $\alpha_{401}=\alpha_{203}$ which yields (\ref{alpha401}). \end{proof} \section{The weighted Fermat-Torricelli problem for convex quadrilaterals: The case $B_{1}=B_{2}$ and $B_{3}=B_{4}.$ } We need the following lemma, in order to find the weighted Fermat-Torricelli point for a given convex quadrilateral $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ in $\mathbb{R}^{2},$ which has been proved in \cite[Proposition~3.1,pp.~414]{Zachos/Zou:88} for convex polygons in $\mathbb{R}^{2}.$ \begin{lemma}{\cite[Proposition~3.1,pp.~414]{Zachos/Zou:88} }\label{tetragonnn} Let $A_1A_2A_{3}A_4$ be a tetragon in $\mathbb{R}^{2}$ and each vertex $A_{i}$ has a non-negative weight $B_{i}$ for $i=1,2,3,4.$ Assume that the floating case of the weighted Fermat-Torricelli point $A_{0}$ is valid: \begin{equation}\label{floatingcasetetr1} \|{\sum_{j=1, i\ne j}^{4}B_{j}\vec u(A_i,A_j)}\|>B_i. \end{equation} If $A_0$ is connected with every vertex $A_i$ for $i=1,2,3,4$ and a point $A_{i}^{\prime}$ is selected with corresponding non-negative weight $B_{i}$ on the ray that is defined by the line segment $A_0A_i$ and the convex quadrilateral $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ is constructed such that: \begin{equation}\label{floatingcasequad2} \|{\sum_{j=1, i\ne j}^{4}B_{j}\vec u(A_{i}^{\prime},A_{j}^{\prime})}\|>B_i, \end{equation} then the weighted Fermat-Torricelli point $A_{0}^{\prime}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ is identical with $A_{0}.$ \end{lemma} Let $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ be a convex quadrilateral with corresponding non-negative weights $B_{1}=B_{2}$ at the vertices $A_{1}^{\prime}, A_{2}^{\prime}$ and $B_{3}=B_{4}$ at the vertices $A_{3}^{\prime}, A_{4}^{\prime}.$ We select $B_{1}$ and $B_{4}$ which satisfy the inequalities (\ref{floatingcasetetr1}), (\ref{floatingcasequad2}) and $B_{1}>B_{4},$ which correspond to the weighted floating case of the tetragon $A_{1}A_{2}A_{3}A_{4}$ and $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}.$ Furthermore, we assume that $A_{0}$ is located at the interior of $\triangle A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}.$ We denote by $a_{ij}^{\prime}$ the length of the linear segment $A_{i}^{\prime}A_{j}^{\prime},$ $\alpha_{ikj}^{\prime}$ the angle $\angle A_{i}^{\prime}A_{k}^{\prime}A_{j}^{\prime}$ for $i,j,k=0,1,2,3,4, i\neq j\neq k$ (See fig.~\ref{fig6}) \begin{figure} \caption{The weighted Fermat-Torricelli point of a convex quadrilateral for $B_{1} \label{fig6} \end{figure} \begin{theorem}\label{theorquadnn} The location of the weighted Fermat-Torricelli point $A_{0}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ for $B_{1}=B_{2}$ and $B_{3}=B_{4}$ under the conditions (\ref{floatingcasetetr1}), (\ref{floatingcasequad2}) and $B_{1}>B_{4},$ is given by: \begin{equation}\label{a02prime} a_{02}^{\prime}=a_{12}^{\prime}\frac{\sin(\alpha_{213}^{\prime}-\alpha_{013}^{\prime})}{\sin\alpha_{102}} \end{equation} and \begin{equation}\label{alpha120prime} \alpha_{120}^{\prime}=\pi-\alpha_{102}-(\alpha_{123}^{\prime}-\alpha_{013}^{\prime}), \end{equation} where \begin{equation} \label{eq:evquad3} \alpha_{013}^{\prime}=\frac{\sin(\alpha_{213}^{\prime})-\cos(\alpha_{213}^{\prime}) \cot(\alpha_{102})- \frac{a_{31}^{\prime}}{a_{12}^{\prime} }\cot(\alpha_{304}+\alpha_{401})} {-\cos(\alpha_{213}^{\prime})-\sin(\alpha_{213}^{\prime}) \cot(a_{102})+ \frac{a_{31}^{\prime}}{a_{12}^{\prime}}} \end{equation} and \begin{equation} \label{eq:evquad1} \cot(\alpha_{304}+\alpha_{401})= \frac{B_1+B_2\cos(\alpha_{102})+B_4\cos(\alpha_{401}))}{B_4\sin(\alpha_{401})-B_2\sin(\alpha_{102})}. \end{equation} \end{theorem} \begin{proof}[Proof of Theorem~\ref{theorquadnn}:] From lemma~\ref{tetragonnn} the weighted Fermat Torricelli point $A_{0}$ of $A_{1}A_{2}A_{3}A_{4}$ is the same with the weighted Fermat-Torricelli point $A_{0}^{\prime}\equiv A_{0}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime},$ for the weights $B_{1}=B_{2}$ and $B_{3}=B_{4},$ under the conditions (\ref{floatingcasetetr1}), (\ref{floatingcasequad2}) and $B_{1}>B_{4}.$ Thus, we derive that: $\alpha_{102}=\alpha_{102}^{\prime},$ $\alpha_{203}=\alpha_{203}^{\prime},$ $\alpha_{304}=\alpha_{304}^{\prime}$ and $\alpha_{401}=\alpha_{401}^{\prime}.$ By applying the same technique that was used in \cite[Solution~2.2,pp.~412-414]{Zachos/Zou:88} we express $a_{02}^{\prime},$ $a_{03}^{\prime},$ $a_{04}^{\prime}$ as a function of $a_{01}^{\prime}$ and $\alpha_{013}^{\prime}$ taking into account the cosine law to the corresponding triangles $\triangle A_{2}^{\prime}A_{1}^{\prime}A_{0}^{\prime},$ $\triangle A_{3}^{\prime}A_{1}^{\prime}A_{0}^{\prime}$ and $\triangle A_{4}^{\prime}A_{1}^{\prime}A_{0}^{\prime}.$ By differentiating the objective function (\ref{obj1}) with respect to $a_{01}^{\prime}$ and $\alpha_{013}^{\prime}$ and applying the sine law in $\triangle A_{2}^{\prime}A_{1}^{\prime}A_{0}^{\prime},$ $\triangle A_{3}^{\prime}A_{1}^{\prime}A_{0}^{\prime}$ and $\triangle A_{4}^{\prime}A_{1}^{\prime}A_{0}^{\prime}$ we derive (\ref{eq:evquad1}) and solving with respect to $\alpha_{013}^{\prime}$ we derive (\ref{eq:evquad3}). By applying the sine law in $\triangle A_{2}^{\prime}A_{1}^{\prime}A_{0}^{\prime},$ we get (\ref{a02prime}). Finally, $\alpha_{120}^{\prime}=\pi-\alpha_{102}-(\alpha_{123}^{\prime}-\alpha_{013}^{\prime}).$ \end{proof} \section{The weighted Fermat-Torricelli problem for convex quadrilaterals: The case $B_{1}=B_{3}$ and $B_{2}=B_{4}.$ } Let $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ be a convex quadrilateral with corresponding non-negative weights $B_{1}=B_{3}$ at the vertices $A_{1}^{\prime}, A_{2}^{\prime}$ and $B_{2}=B_{4}$ at the vertices $A_{3}^{\prime}, A_{4}^{\prime}.$ We select $B_{1}$ and $B_{4}$ which satisfy the inequalities (\ref{floatingcasetetr1}), such that $A_{0}$ is an interior point of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}.$ \begin{theorem}\label{diagonalquad} The location of the weighted Fermat-Torricelli point $A_{0}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ for $B_{1}=B_{3}$ and $B_{2}=B_{4}$ under the conditions (\ref{floatingcasetetr1}), (\ref{floatingcasequad2}) is the intersection point of the diagonals $A_{1}^{\prime}A_{3}^{\prime}$ and $A_{2}^{\prime}A_{4}^{\prime}.$ \end{theorem} \begin{proof}[Proof of Theorem~\ref{diagonalquad}:] From the weighted floating equilibrium condition (\ref{floatingequlcond}) of theorem~\ref{theor1} we get: \begin{equation}\label{fltcond1} \vec{B_{1}}+\vec{B_{2}}=-(\vec{B_{3}}+\vec{B_{4}}) \end{equation} and \begin{equation}\label{fltcond2} \vec{B_{1}}+\vec{B_{4}}=-(\vec{B_{2}}+\vec{B_{3}}) \end{equation} Taking the inner product of the first part of (\ref{fltcond1}) with $\vec{B_{1}}+\vec{B_{2}}$ and the second part of (\ref{fltcond1}) with $-(\vec{B_{3}}+\vec{B_{4}}),$ we derive that: \[\alpha_{102}=\alpha_{304}.\] Similarly, taking the inner product of the first part of (\ref{fltcond2}) with $\vec{B_{1}}+\vec{B_{4}}$ and the second part of (\ref{fltcond2}) with $-(\vec{B_{2}}+\vec{B_{3}}),$ we derive that: \[\alpha_{104}=\alpha_{203}.\] \end{proof} \begin{proposition}\label{diagonalquadcom} The location of the complementary weighted Fermat-Torricelli point $A_{0}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ for $B_{1}=B_{3}<0$ and $B_{2}=B_{4}<0$ under the conditions (\ref{floatingcasetetr1}), (\ref{floatingcasequad2}) is the intersection point of the diagonals $A_{1}^{\prime}A_{3}^{\prime}$ and $A_{2}^{\prime}A_{4}^{\prime}.$ \end{proposition} \begin{proof}[Proof of Proposition~\ref{diagonalquadcom}:] Taking into account (\ref{obj1}) for $B_{1}=B_{3}<0,$ $B_{2}=B_{4}<0$ we derive the same vector equilibrium condition $\vec{B_{1}}+\vec{B_{2}}+\vec{B_{3}}+\vec{B_{4}}=\vec{0}.$ Therefore, we obtain that the complementary weighted Fermat-Torricelli point $A_{0}^{\prime}$ for $B_{1}=B_{3}<0,$ $B_{2}=B_{4}<0$ coincides with the weighted Fermat-Torricelli point $A_{0}$ of $A_{1}^{\prime}A_{2}^{\prime}A_{3}^{\prime}A_{4}^{\prime}$ for $B_{1}=B_{3}>0,$ $B_{2}=B_{4}>0.$ \end{proof} The author acknowledges Professor Dr. Vassilios G. Papageorgiou for many fruitful discussions and for his valuable comments. \end{document}
\begin{document} \title{Toeplitz algebras in quantum Hopf fibrations} \begin{abstract} The paper presents applications of Toeplitz algebras in Noncommutative Geometry. As an example, a quantum Hopf fibration is given by gluing trivial U(1) bundles over quantum discs (or, synonymously, Toeplitz algebras) along their boundaries. The construction yields associated quantum line bundles over the generic Podle\'s spheres which are isomorphic to those from the well-known Hopf fibration of quantum SU(2). The relation between these two versions of quantum Hopf fibrations is made precise by giving an isomorphism in the category of right U(1)-comodules and left modules over the C*-algebra of the generic Podle\'s spheres. It is argued that the gluing construction yields a significant simplification of index computations by obtaining elementary projections as representatives of K-theory classes. \end{abstract} \sigma}\def\hs{\sigmaection{Introduction} In Noncommutative Geometry, the Toeplitz algebra has a fruitful interpretation as the algebra of continuous function on the quantum disc \cite{KL}. In this picture, the description of the Toeplitz algebra as the C*-algebra extension of continuous functions on the circle by the compact operators corresponds to an embedding of the circle into the quantum disc. Analogous to the classical case, one can construct ``topologically'' non-trivial quantum spaces by taking trivial fibre bundles over two quantum discs and gluing them along their boundaries. Here, the gluing procedure is described by a fibre product in an appropriate category (C*-algebras, finitely generated projective modules, etc.). This approach has been applied successfully to the construction of line bundles over quantum 2-spheres \cite{CM1,HMS,W} and to the description of quantum Hopf fibrations \cite{BaumHMW,CM2,HMS06,HW}. One of the advantages of the fibre product approach is that it provides an effective tool for simplifying index computations. This has been discussed in \cite{W} on the example of the Hopf fibration of quantum SU(2) over the generic Podle\'s spheres \cite{P}. Whilst earlier index computations for quantum 2-spheres relied heavily on the index theorem \cite{H,HMS03}, the fibre product approach in \cite{W} allowed to compute the index pairing directly by producing simpler representatives of K-theory classes. The description of quantum line bundles in \cite{W} bears a striking analogy to the classical case: the same transition functions are used to glue the trivial bundles over the (quantum) disc along their boundaries. However, the link between the fibre product approach of quantum line bundles and the Hopf fibration of quantum SU(2) has been established only at a ``K-theoretic level'', i.~e., it has been shown that the corresponding projective modules are Murray-von Neumann equivalent. The present work will give a more geometrical picture of the quantum Hopf fibration. Analogous to the classical case, we will construct a non-trivial U(1) quantum principal bundle over the generic Podle\'s spheres such that the associated line bundles are the previously obtained quantum line bundles. Here, a quantum principal bundle is described by a Hopf-Galois extension (see the preliminaries). It turns out that our $\U$ quantum principal bundle is isomorphic to a quantum 3-sphere from \cite{CM2}. As an application of the fibre product approach, we will show that the associated quantum line bundles are isomorphic to projective modules given by completely elementary 1-dimensional projections which leads to a significant simplification of index computations. It is known that the Hopf fibration of quantum SU(2) over the generic Podle\'s spheres is not given by a Hopf-Galois extension but only by a so-called coalgebra Galois extension (that is, U(1) is only considered as a coalgebra). In the present paper, we will establish a relation between both versions of a quantum Hopf fibration by describing an explicit isomorphism in the category of right U(1)-comodules and left modules over the C*-algebra of the generic Podle\'s spheres. Clearly, this isomorphism cannot be turned into an algebra isomorphism of quantum 3-spheres since otherwise the Hopf fibration of quantum SU(2) over the generic Podle\'s spheres would be a Hopf-Galois extension. \sigma}\def\hs{\sigmaection{Preliminaries} \sigma}\def\hs{\sigmaubsection{Coalgebras and Hopf algebras} \label{qg} A coalgebra is a vector space $C$ over a field ${\mathbb K}$ equipped with two linear maps ${\mathrm{co}}p :C\rightarrow C\otimes C$ and $\varepsilon : S\rightarrow {\mathbb K}$, called the comultiplication and the counit, respectively, such that \begin{align} &({\mathrm{co}}p\otimes \mathrm{id}) \circ {\mathrm{co}}p = (\mathrm{id}\otimes{\mathrm{co}}p ) \circ {\mathrm{co}}p,\\ & (\varepsilon\otimes \mathrm{id}) \circ {\mathrm{co}}p = \mathrm{id} =(\mathrm{id}\otimes\varepsilon ) \circ {\mathrm{co}}p. \end{align} A (right) corepresentation of a coalgebra $C$ on a ${\mathbb K}$-vector space $V$ is a linear mapping ${\mathrm{co}}p_V :V\rightarrow V\otimes C$ satisfying \[ ({\mathrm{co}}p_V\otimes \mathrm{id}) \circ {\mathrm{co}}p_V = (\mathrm{id}\otimes{\mathrm{co}}p ) \circ {\mathrm{co}}p_V, \quad (\mathrm{id}\otimes\varepsilon ) \circ{\mathrm{co}}p_V =\mathrm{id}. \] We then refer to $V$ as a right $C$-comodule. The corepresentation is said to be irreducible if $\{0\}$ and $V$ are the only invariant subspaces. A linear mapping $\phi$ between right $C$-comodules $V$ and $W$ is called colinear, if ${\mathrm{co}}p_W\circ \phi=(\phi\otimes\mathrm{id})\circ {\mathrm{co}}p_V$. A Hopf algebra $A$ is a unital algebra and coalgebra such that ${\mathrm{co}}p$ and $\varepsilon$ are algebra homomorphism, together with a linear mapping $\kappa: A\rightarrow A$, called the antipode, such that \[ m \circ (\kappa\otimes \mathrm{id}) \circ {\mathrm{co}}p(a) = \varepsilon(a)=m\circ (\mathrm{id}\otimes\kappa ) \circ {\mathrm{co}}p(a), \quad a\in A, \] where $m: A\otimes A\rightarrow A$ denotes the multiplication map. We say that $C$ and $A$ are a *-coalgebra and a *-Hopf algebra, respectively, if $C$ and $A$ carry an involution such that ${\mathrm{co}}p$ becomes a *-morphism. This immediately implies that $\varepsilon(x^*)= \overline{\varepsilon(x)}$. A finite dimensional corepresentation ${\mathrm{co}}p_V :V\rightarrow V\otimes A$ is called unitary, if there exists a linear basis $\{e_1, \ldots e_n\}$ of $V$ such that ${\mathrm{co}}p_V(e_i)=\sigma}\def\hs{\sigmaum_{j=1}^n e_j\otimes v_{ji}$ and $\sigma}\def\hs{\sigmaum_{j=1}^n v_{jk}^* v_{ji}=\delta_{ki}$, where $\delta_{kj}$ denotes the Kronecker symbol. The elements $v_{ij}$ are called matrix coefficients. A Hopf *-algebra $A$ is called a compact quantum group algebra if it is the linear span of all matrix coefficients of irreducible finite dimensional unitary corepresentations. It can be shown that then $A$ admits a C*-algebra completion $H$ in the universal C*-norm (that is, the supremum of the norms of all bounded irreducible Hilbert space *-representations). We call $H$ also a compact quantum group and refer to the dense subalgebra $A$ as its Peter-Weyl algebra. The counit of $A$ has then a unique extension to $H$, and ${\mathrm{co}}p$ has a unique extension to a *-homomorphism ${\mathrm{co}}p: H\rightarrow H\,\bar\otimes\, H$, where $H\,\bar\otimes\, H$ denotes the least C*-completion of the algebraic tensor product. The main example in this paper will be $H={\mathbb C}S$, the C*-algebra of continuous functions on the unit circle $\mathrm{S}^1$. It is a compact quantum group with comultiplication ${\mathrm{co}}p(f)(p,q)=f(pq)$, counit $\varepsilon(f)=f(1)$ and antipode $\kappa(f)(p)=f(p^{-1})$. Note that ${\mathrm{co}}p$, $\varepsilon$ and $\kappa$ are given by pullbacks of the group operations of $\mathrm{S}^1=\mathrm{U}(1)$. Let $U\in{\mathbb C}S$, $U(\mathrm{e}^{\mathrm{i}\phi})=\mathrm{e}^{\mathrm{i}\phi }$, denote the unitary generator of ${\mathbb C}S$. Then the Peter-Weyl algebra of $H$ is given by ${\mathcal O}(\mathrm{U}(1))=\mbox{$\mathrm{span}$}\{\, U^N:N\in{\mathbb Z}\,\}$ with ${\mathrm{co}}p(U^N)=U^N\otimes U^N$, $\varepsilon(U^N)=1$ and $\kappa(U^N)=U^{-1}$. Note also that the irreducible unitary corepresentations of ${\mathcal O}(\mathrm{U}(1))$ are all 1-dimensional and are given by ${\mathrm{co}}p_{\mathbb C}(1)=1\otimes U^N$. From the previous paragraph, it becomes clear why noncommutative compact quantum groups are regarded as generalizations of function algebras on compact groups. We give now the definition for a quantum analogue of principal bundles. First we remark that a group action on a topological space corresponds to a coaction of a quantum group or, more generally, to a coaction of a coalgebra. Now let $A$ be a Hopf algebra, $P$ a unital algebra, and ${\mathrm{co}}p_P : P\rightarrow P\otimes A$ a corepresentation which is also an algebra homomorphism (one says that $P$ is a right $A$-comodule algebra). Then the space of coinvariants $$ P^{{\mathrm{co}} A}:=\{ b\in P : {\mathrm{co}}p(b)=b\otimes 1\} $$ is an algebra considered as a function algebra on the base space, and $P$ plays the role of a function algebra on the total space. If $A$ is a Hopf *-algebra and $P$ is a *-algebra, we require ${\mathrm{co}}p$ to be a *-homomorphism so that $B$ becomes a unital *-subalgebra of $P$. If ${\mathrm{co}}p : P\rightarrow P\otimes C$ is a corepresentation of a coalgebra $C$, then we set $$ P^{{\mathrm{co}} C}:=\{\, b\in P : {\mathrm{co}}p(bp)=b{\mathrm{co}}p(p)\ \, \text{for all}\ \, p\in P \,\} $$ with multiplication $b(p\otimes c)= bp\otimes c$ on the left tensor factor. Again, $P^{{\mathrm{co}} C}$ is a subalgebra of $P$. In our examples, there will be a group like element $e\in C$ (that is, ${\mathrm{co}}p(e)=e\otimes e$) such that ${\mathrm{co}}p(1)= 1\otimes e$ and $$ P^{{\mathrm{co}} C}=B:=\{\, b\in P : {\mathrm{co}}p(b)=b\otimes e\, \}. $$ If $P$ and $C$ carry an involution, ${\mathrm{co}}p_P$ is a *-morphism and $e^*=e$, then $B$ is a *-subalgebra of $P$. Analogous to right corepresentations, one defines left corepresentations ${}_V{\mathrm{co}}p: V\rightarrow C\otimes V$. The associated (quantum) vector bundles are given by the cotensor product $P\,\mathcal{B}ox_C\, V$, where $$ P\,\mathcal{B}ox_C\, V :=\{\, x\in P\otimes V : ({\mathrm{co}}p_P\otimes \mathrm{id})(x)=(\mathrm{id}\otimes{}_V {\mathrm{co}}p)(x)\,\}. $$ Obviously, $P\,\mathcal{B}ox_C\, V$ is a left $P^{{\mathrm{co}} C}$-module. For the 1-dimensional representation ${}_{\mathbb C}{\mathrm{co}}p(1)=U^N\otimes 1$, this module is equivalent to $$ P_N:= \{\, p\in P: {\mathrm{co}}p_P(p)=p\otimes U^N\, \} $$ and is considered as a (quantum) line bundle. \sigma}\def\hs{\sigmaubsection{Pullback diagrams and fibre products} \label{sec-fp} The purpose of this section is to collect some elementary facts about fibre products. For simplicity, we start by considering the category of vector spaces. Let $\pi_0: A_0 \rightarrow A_{01}$ and $\pi_1:A_1\rightarrow A_{01}$ be vector spaces morphisms. Then the fibre product $A:=A_0{\times}_{(\pi_0,\pi_1)} A_1$ is defined by the pullback diagram \begin{equation} \label{A_is_fibre_product} \begin{CD} {A} @ >{\mathrm{pr}_1}>> {A_1} @.\\ @ V{\mathrm{pr}_0} VV @ V{\pi_1} VV @.\\ {A_0} @ >{\pi_0} >> {A_{01}} @. \ .\\ \end{CD} \end{equation} Up to a unique isomorphism, $A$ is given by \begin{equation} \label{A} A=\left\lbrace (a_0,a_1)\in A_0\times A_1 : \pi_0(a_0)=\pi_1(a_1) \right\rbrace, \end{equation} where the morphisms $\mathrm{pr}_0:A\rightarrow A_0$ and $\mathrm{pr}_1:A\rightarrow A_1$ are the left and right projections, respectively. In this paper, we will consider fibre products in the following categories: \begin{itemize} \item If $\pi_0: A_0 \rightarrow A_{01}$ and $\pi_1:A_1\rightarrow A_{01}$ are morphisms of *-al\-ge\-bras, then the fibre product $A_0{\times}_{(\pi_0,\pi_1)} A_1$ is a *-al\-ge\-bra with componentwise multiplication and involution. \item If we consider the pullback diagram \eqref{A_is_fibre_product} in the category of unital $C^*$-algebras, then $A_0{\times}_{(\pi_0,\pi_1)} A_1$ will be a unital $C^*$-algebra. \item If $B$ is an algebra and $\pi_0: A_0 \rightarrow A_{01}$ and $\pi_1:A_1\rightarrow A_{01}$ are morphisms of left $B$-modules, then the fibre product $A:=A_0{\times}_{(\pi_0,\pi_1)} A_1$ is a left $B$-module with left action $b.(a_0,a_1)=(b. a_0,b. a_1)$, where $b\in B$ and the dot denotes the left action. \item If we consider the pullback diagram \eqref{A_is_fibre_product} in the category of right $C$-comodules (or right $H$-comodule algebras), then $A:=A_0{\times}_{(\pi_0,\pi_1)} A_1$ will be a right $C$-comodule (or a right $H$-comodule algebra) with the coaction given by ${\mathrm{co}}p_A(a_1,a_2)=({\mathrm{co}}p_{A_1}(a_1), 0)+ (0,{\mathrm{co}}p_{A_2}(a_2))$. \end{itemize} Finally we remark that if $B_0$, $B_1$ and $B_{01}$ are dense subalgebras of $C^*$-algebras $A_0$, $A_1$ and $A_{01}$, respectively, and $\pi_0$ and $\pi_1$ restrict to morphisms $\pi_0: B_0 \rightarrow B_{01}$ and $\pi_1:B_1\rightarrow B_{01}$, then $B_0{\times}_{(\pi_0,\pi_1)} B_1$ is not necessarily dense in $A_0{\times}_{(\pi_0,\pi_1)} A_1$. A useful criterion for this to happen can be found in \cite[Theorem 1.1]{HW}. It suffices that $\pi_1\!\!\upharpoonright_{B_1} : B_1 \rightarrow B_{01}$ is surjective and $\ker(\pi_1) \cap B_1$ is dense in $\ker(\pi_1)$. \sigma}\def\hs{\sigmaubsection{Disc-type quantum 2-spheres} \label{sec-dq} From now on we will work over the complex numbers and $q$ will denote a real number from the interval $(0,1)$. The *-algebra $\Dq$ of polynomial functions on the quantum disc is generated by two generators $z$ and $z^{*}$ with relation \[ \label{Dq} z^* z -q z z^* = 1- q. \] A complete list of bounded irreducible *-representations of $\Dq$ can be found in \cite{KL}. First, there is a faithful representation on the Hilbert space $\lN$. On an orthonormal basis $\{ e_n:n\in{\mathbb N}_0\}$, the action of the generators reads as \[ \label{z} ze_n=\sigma}\def\hs{\sigmaqrt{1-q^{n+1}}Se_n, \quad z^*e_n=\sigma}\def\hs{\sigmaqrt{1-q^{n}}S^*e_n, \] where $$ Se_n=e_{n+1}, $$ denotes the shift operator on $\lN$. Next, there is a 1-parameter family of irreducible *-representations $\rho_u$ on ${\mathbb C}$, where $u\in \mathrm{S}^1=\{x\in{\mathbb C}:|x|=1\}$. They are given by assigning $$ \rho_u(z)=u,\qquad \rho_u(z^*)= \bar u. $$ The set of these representations is considered as the boundary $\mathrm{S}^1$ of the quantum disc consisting of ``classical points''. The universal C*-algebra of $\Dq$ is well known. It has been discussed by several authors (see, e.g., \cite{KL,MNW,s-a91}) that it is isomorphic to the Toeplitz algebra $\mathcal{T}$. Here, it is convenient to view the Toeplitz algebra $\mathcal{T}$ as the universal C*-algebra generated by $S$ and $S^*$ in $\mathrm{B}(\lN)$. Then above *-representation on $\lN$ becomes simply an embedding. Another characterization is given by the C*-extension $$ 0\longrightarrow \mathcal{K}(\lN) \longrightarrow\mathcal{T}\sigma}\def\hs{\sigmatackrel{\sigma}\def\hs{\sigmaigma}{\longrightarrow} {\mathbb C}S \longrightarrow 0, $$ where $\sigma}\def\hs{\sigmaigma : \mathcal{T} \rightarrow {\mathbb C}S$ is the so-called symbol map and corresponds, in the classical case, to an embedding of $\mathrm{S}^1$ into the complex unit disc. Let again $U(\mathrm{e}^{\mathrm{i}\phi})=\mathrm{e}^{\mathrm{i}\phi }$ denote the unitary generator of ${\mathbb C}S$. Then the symbol map is completely determined by setting $\sigma}\def\hs{\sigma(z)=U$. We can now construct a quantum 2-sphere ${\mathbb C}Sq$ by gluing two quantum discs along their boundaries. The gluing procedure is described by the fibre product $\mathcal{T}\times_{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}\mathcal{T}$, where $\mathcal{T}\times_{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}\mathcal{T}$ is defined by the following pullback diagram in the category of C*-algebras: \begin{equation} \label{TtimesT} \begin{CD} {\mathcal{T}\underset{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}{\times}\mathcal{T}} @ >{\mathrm{pr}_1}>> {\mathcal{T}} @.\\ @ V{\mathrm{pr}_0} VV @ V{\sigma}\def\hs{\sigma} VV @.\\ {\mathcal{T}} @ >{\sigma}\def\hs{\sigma} >> {{\mathbb C}S}. @.\\ \end{CD} \end{equation} Up to isomorphism, the C*-algebra ${\mathbb C}Sq:= \mathcal{T}\times_{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}\mathcal{T}$ is given by \begin{equation} \label{CSq} {\mathbb C}Sq=\{\, (a_1,a_2)\in \mathcal{T}\times\mathcal{T} : \sigma}\def\hs{\sigma(a_1)=\sigma}\def\hs{\sigma(a_2) \,\}. \end{equation} In the classical case, complex line bundles with winding number $N\in{\mathbb Z}$ over the 2-sphere can be constructed by taking trivial bundles over the northern and southern hemispheres and gluing them together along the boundary via the map $U^{N}:\mathrm{S}^1 \rightarrow \mathrm{S}^1$,\, $U^{N}(\mathrm{e}^{\mathrm{i}\phi})=\mathrm{e}^{\mathrm{i}\phi N}$. In \cite{W}, the same construction has been applied to to the quantum 2-sphere ${\mathbb C}Sq$. The roles of the northern and southern hemispheres are played by two copies of the quantum disc, and the transition function along the boundaries remains the same. This construction can be expressed by the following pullback diagram: \begin{equation} \label{TT} \xymatrix{ & \mathcal{T} \,{\underset{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)}{\times}}\, \mathcal{T} \ar[dl]_{\mathrm{pr}_0} \ar[dr]^{\mathrm{pr}_1}& \\ \mathcal{T} \ar[d]_{\sigma}\def\hs{\sigmaigma} & & \mathcal{T} \ar[d]^{\sigma}\def\hs{\sigmaigma}\\ {\mathbb C}S \ar[rr]_{f\mapsto U^Nf} & & {\mathbb C}S. } \end{equation} So, up to isomorphism, we have \begin{equation} \label{fpLB} \mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T} {\mathrm{co}}ng \{\,(a_0,a_1)\in \mathcal{T}\times \mathcal{T} \,:\, U^{N}\sigma}\def\hs{\sigma(a_0)=\sigma}\def\hs{\sigma(a_1)\,\}. \end{equation} It follows directly from Equation \eqref{CSq} that $\mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$ is a ${\mathbb C}Sq$-(bi)module. This can also be seen from the general pullback construction by equipping $\mathcal{T}$ and ${\mathbb C}S$ with the structure of a left ${\mathbb C}Sq$-module. Explicitly, for $(a_0,a_1)\in{\mathbb C}Sq$, one defines $(a_0,a_1).a= a_0 a$ for $a\in\mathcal{T}$ on the left side, $(a_0,a_1).a= a_1 a$ for $a\in \mathcal{T}$ on the right side, and $(a_0,a_1).a= \sigma}\def\hs{\sigma(a_0) b=\sigma}\def\hs{\sigma(a_1)b$ for $b\in{\mathbb C}S$. To determine the K-theory and K-homology of ${\mathbb C}Sq$, we may use the results of \cite{MNW}. There it is shown that $\Kn({\mathbb C}podl){\mathrm{co}}ng {\mathbb Z} \oplus {\mathbb Z}$ and $\KN({\mathbb C}podl){\mathrm{co}}ng {\mathbb Z} \oplus {\mathbb Z}$. The two generators of the $\Kn$-group can be chosen to be the class $[1]$ of the unit element of ${\mathbb C}Sq$, and the class $[(0,1-SS^{*})]$. Describing an even Fredholm module by a pair of representations on the same Hilbert space such that the difference is a compact operator, one generator of $\KN({\mathbb C}podl)$ is obviously given by the class $[(\mathrm{pr}_1,\mathrm{pr}_0)]$ on the Hilbert space $\lN$. A second generator is $[(\pi_+\circ\hs\,,\,\pi_-\circ\hs)]$, where $\sigma}\def\hs{\sigma$ denotes the symbol map and $\pi_\pm: {\mathbb C}S\rightarrow\mathrm{B}(\lZ)$ is given by \begin{align} \begin{split} \label{pi} &\pi_+(U)e_n=e_{n+1}, \quad n\in{\mathbb Z},\\ &\pi_-(U)e_n=e_{n+1},\quad n\in{\mathbb Z}\sigma}\def\hs{\sigmaetminus \{-1,0\},\quad \pi_-(U)e_{-1}=e_1, \quad \pi_-(U)e_{0}=0 \end{split} \end{align} on an orthonormal basis $\{e_n\,:\, n\in{\mathbb Z}\}$ of $\lZ$. Note that the representation $\pi_-$ is non-unital: $\pi_-(1)$ is the projection onto $\mbox{$\mathrm{span}$}\{e_n\,:\, n\in{\mathbb Z}\sigma}\def\hs{\sigmaetminus \{0\}\,\}$. \sigma}\def\hs{\sigmaubsection{Quantum 3-spheres and quantum Hopf fibrations} \label{qhf} First we follow \cite{HMS06} and introduce the coordinate ring of a Heegaard-type quantum 3-sphere $\mathcal{O}(\mathrm{S}^3_{pq})$, $p,q\in (0,1)$ as the *-algebra generated by $a$, $a^*$, $b$, $b^*$ subjected to the relations \begin{align} \begin{split} \label{ab} & a^*a-qaa^*= 1-q, \quad b^*b-p b b^* = 1-p, \\ & (1-aa^*)(1-bb^*)=0,\quad ab=ba,\quad a^*b = ba^*. \end{split} \end{align} Its universal C*-algebra (i.e., the closure of $\mathcal{O}(\mathrm{S}^3_{pq})$ in the universal C*-norm given by the supremum over all bounded Hilbert space representations) will be denoted by ${\mathbb C}Spq$. One can easily verify that the coaction ${\mathrm{co}}p_{\mathcal{O}(\mathrm{S}^3_{pq})}: \mathcal{O}(\mathrm{S}^3_{pq})\rightarrow \mathcal{O}(\mathrm{S}^3_{pq})\otimes {\mathcal O}(\mathrm{U}(1))$ given by $$ {\mathrm{co}}p_{\mathcal{O}(\mathrm{S}^3_{pq})}(a)=a\otimes U^*,\quad {\mathrm{co}}p_{\mathcal{O}(\mathrm{S}^3_{pq})}(b)=b\otimes U $$ turns $\mathcal{O}(\mathrm{S}^3_{pq})$ into a ${\mathcal O}(\mathrm{U}(1))$-comodule *-algebra. Its *-subalgebra of ${\mathcal O}(\mathrm{U}(1))$-coinvariants $\mathcal{O}(\mathrm{S}^2_{pq}):=\mathcal{O}(\mathrm{S}^3_{pq})^{{\mathrm{co}} {\mathcal O}(\mathrm{U}(1))}$ is generated by $$ A:= 1-aa^* , \quad B:= 1-bb^*,\quad R:= ab $$ with involution $A^*=A$, $B^*=B$ and commutation relations \begin{equation*} R^*R=1-qA-pB, \ \ RR^*=1-A-B, \ \ AR=qRA, \ \ BR =pRB,\ \ AB=0. \end{equation*} Note that $\mathcal{O}(\mathrm{S}^2_{pq})$ can also be considered as a *-subalgebra of ${\mathbb C}Sq$ from \eqref{CSq} by setting $$ A=( 1-zz^*,0), \quad B=(0,1-yy^*), \quad R=(z,y), $$ where $y$ and $z$ denote the generators of the quantum discs ${\mathcal O}(\mathrm{D}^2_{p})$ and ${\mathcal O}(\mathrm{D}^2_{q})$, respectively, satisfying the defining relation \eqref{Dq}. Using the fact that ${\mathcal O}(\mathrm{D}^2_{q})$ is dense in the Toeplitz algebra $\mathcal{T}$ for all $q\in(0,1)$, and the final remark of Section \ref{sec-fp}, one easily proves that ${\mathbb C}Sq= \mathcal{T}\times_{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}\mathcal{T}$ is the universal C*-algebra of $\mathcal{O}(\mathrm{S}^2_{pq})$. For $N\in {\mathbb Z}$, let \[ \label{LN} L_N:= \{\, p\in \mathcal{O}(\mathrm{S}^3_{pq})\,:\, {\mathrm{co}}p_{\mathcal{O}(\mathrm{S}^3_{pq})}(p)=p\otimes U^N\,\} \] denote the associated quantum line bundles. It has been shown in \cite{HMS06} that $L_N$ is isomorphic to $\mathcal{O}(\mathrm{S}^2_{pq})^{|N|+1} E_N$, where \[ \label{EN} E_N = X_N\, Y_N^\mathrm{tr}ans\in \mathrm{Mat}_{|N|+1,|N|+1}(\mathcal{O}(\mathrm{S}^2_{pq})) \] and, for $n\in{\mathbb N}$, \begin{align*} & X_{-n}=(b^{*n},ab^{*n-1},\ldots,a^n)^\mathrm{tr}ans,\qquad X_{n}=(a^{*n},ba^{*n-1},\ldots,b^n)^\mathrm{tr}ans,\\ &Y_{-n}=\mathcal{B}ig(\mbox{$\binom{n}{0}$}_{\!p}\,p^nA^nb^n\,,\, \mbox{$\binom{n}{1}$}_{\!p}p^{n-1}A^{n-1}b^{n-1}a^*\,,\,\ldots\,,\, \mbox{$\binom{n}{n}$}_{\!p}a^{*n}\mathcal{B}ig)^\mathrm{tr}ans, \\ &Y_{n}=\mathcal{B}ig(\mbox{$\binom{n}{0}$}_{\!q}q^nB^na^n\,,\, \mbox{$\binom{n}{1}$}_{\!q}q^{n-1}B^{n-1}a^{n-1}b^*\,,\,\ldots\,,\, \mbox{$\binom{n}{n}$}_{\!q}b^{*n}\mathcal{B}ig)^\mathrm{tr}ans, \end{align*} with $$ \mbox{$\binom{n}{0}$}_{\!x}= \mbox{$\binom{n}{n}$}_{\!x}:=1, \quad \mbox{$\binom{n}{k}$}_{\!x}:=\mbox{$ \frac{(1-x)\dots (1-x^n)}{(1-x)\dots (1-x^k)(1-x)\dots (1-x^{n-k})}$}, \ \, 0<k<n, \ \, x\in (0,1). $$ That $E_N$ is indeed an idempotent follows from $Y_N^\mathrm{tr}ans \, X_N= 1$ which can be verified by direct computations. Now we consider a much more prominent example of a quantum Hopf fibration. The *-algebra $\SUq$ of polynomial functions on the quantum group $\mathrm{SU}_q(2)$ is generated by $\ha$, $\hb$, $\hc$, $\hd$ with relations \begin{align*} & \ha \hb =q \hb \ha,\quad \ha \hc =q\hc \ha,\quad \hb \hd = q \hd \hb,\quad \hc \hd = q \hd \hc, \quad \hb \hc =\hc \hb,\\ & \ha \hd - q \hb \hc = 1, \quad \hd \ha - q^{-1} \hb \hc = 1, \end{align*} and involution $\ha^*=\hd$, $\hb^*=-q\hc$. This is actually a Hopf *-algebra with the Hopf structure $\Delta$, $\varepsilon$, $\kappa$. Here, we will only need explicit formulas for the homomorphism $\varepsilon : \SUq\rightarrow {\mathbb C}$ given by $$ \varepsilon(\ha)=\varepsilon(\hd)=1,\quad \varepsilon(\hb)=\varepsilon(\hc)=0. $$ For $s\in (0,1]$, the *-subalgebra generated by $$ \eta_s:=(\hd+q^{-1}s\hb)(\hb-s\hd),\quad \zeta_s:=1-(\ha-qs\hc)(\hd+s\hb). $$ is known as the generic Podle\'s sphere $\podl$ \cite{P}. Its generators satisfy the defining relations $$ \zeta_s \eta_s = q^2 \eta_s \zeta_s,\quad \!\! \eta_s^*\eta_s=(1-\zeta_s)(s^2+\zeta_s),\quad \!\! \eta_s\eta_s^*=(1-q^{-2}\zeta_s)(s^2+q^{-2}\zeta_s), $$ and $\zeta_s^*=\zeta_s$. For all $s\in (0,1]$ and $q\in(0,1)$, the universal C*-algebra of $\podl$ is isomorphic to ${\mathbb C}Sq$ \cite{MNW,s-a91}. With $x$ the generator of ${\mathcal O}(\mathrm{D}^2_{q^2})$, set $t:=1-xx*\in \mathcal{T}$. An embedding of $\podl$ into ${\mathbb C}Sq$ as a dense *-subalgebra is given by \[ \zeta_s =(-s^2 q^2 t, q^2 t), \quad \label{etaT} \eta_s=\left(s\sigma}\def\hs{\sigmaqrt{(1-q^2t)(1+s^2 q^2t)}\,S \,,\, \sigma}\def\hs{\sigmaqrt{(1-q^2 t )(s^2+q^2 t)}\,S\right). \] Let $\podl^+:=\{x\in\podl\,:\,\varepsilonpsilon (x)=0\}$. It has been shown in \cite{MS} that the quotient space $\SUq/\podl^+\SUq$ with coaction $(\mathrm{pr}_s\otimes \mathrm{pr}_s)\circ \Delta$ is a coalgebra isomorphic to ${\mathcal O}(\mathrm{U}(1))$. Here $\mathrm{pr}_s$ denotes the canonical projection and $\Delta$ the coaction of $\SUq$. We emphasize that this isomorphism holds only in the category of coalgebras, that is, $\SUq/\podl^+\SUq$ is a linear space (not an algebra!) spanned by basis elements $U^N$, $N\in {\mathbb Z}$, with coaction $\Delta( U^N)= U^N\otimes U^N$. The composition $(\mathrm{id}\otimes\mathrm{pr}_s)\circ \Delta$ turns $\SUq$ into an ${\mathcal O}(\mathrm{U}(1))$-comodule and the associated line bundles are given by $$ M_N:=\{\, p\in \SUq\,:\, (\mathrm{id}\otimes\mathrm{pr}_s)\circ \Delta (p)= p\otimes U^N\,\},\quad N\in {\mathbb Z}. $$ Moreover, $\podl=M_0=\SUq^{{\mathrm{co}} {\mathcal O}(\mathrm{U}(1))}$ and $\SUq=\oplus_{N\in{\mathbb Z}} M_N$. In contrast to quantum line bundles $L_N$ defined above, $M_N$ is only a \emph{left} $\podl$-module but not a \emph{bi}module. This is also due to the fact that $\SUq$ with above coaction is only an ${\mathcal O}(\mathrm{U}(1))$-comodule but not an ${\mathcal O}(\mathrm{U}(1))$-comodule algebra. Explicit descriptions of idempotents representing $M_N$ have been given in \cite{HMS03,SW}. Analogous to $L_N$, there are elements $v^N_{0},v^N_{1}, \ldots, v^N_{|N|} \in \SUq$ such that $M_N{\mathrm{co}}ng \podl^{|N|+1}P_N$, where \[ \label{PN} P_N:= (v^N_{0},v^N_{1}, \ldots, v^N_{|N|})^\mathrm{tr}ans\, (v^{N*}_{0},v^{N*}_{1}, \ldots, v^{N*}_{|N|})\in \mathrm{Mat}_{|N|+1,|N|+1}(\podl) \] with \[ \label{1} (v^{N*}_{0},v^{N*}_{1}, \ldots, v^{N*}_{|N|})\,(v^N_{0},v^N_{1}, \ldots, v^N_{|N|})^\mathrm{tr}ans=1. \] For a definition of $v^N_{k}$, see \cite{SW}. A description of the universal C*-algebra ${\mathbb C}SU$ of $\SUq$ as a fibre product can be found in \cite{HW}. There it is shown that ${\mathbb C}SU$ is isomorphic to the fibre product C*-algebra of the following pullback diagram: \begin{equation} \label{CSUq} \xymatrix{ & \makebox[48pt][c]{$\mathcal{T}\,\bar\otimesimes\,{\mathbb C}S \,{\underset{(W\circ \sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id} ,\pi_2)}{\times}}\, {\mathbb C}S$} \ar[dl]_{\mathrm{pr}_1} \ar[dr]^{\mathrm{pr}_2}& \\ \mathcal{T} \,\bar\otimesimes\,{\mathbb C}S\ar[d]_{\sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id} } & & {\mathbb C}S \ar[d]^{\pi_2}\\ {\mathbb C}S\,\bar\otimesimes\,{\mathbb C}S \ar[rr]_{W} & & {\mathbb C}S\,\bar\otimesimes\,{\mathbb C}S\;. } \end{equation} Here, $\pi_2: {\mathbb C}S\rightarrow {\mathbb C}S\,\bar\otimesimes\,{\mathbb C}S$ is defined by $\pi_2(f)(x,y)=f(y)$, and \[ \label{W} W: {\mathbb C}S\,\bar\otimesimes\, {\mathbb C}S\rightarrow {\mathbb C}S\,\bar\otimesimes\, {\mathbb C}S,\quad W(f)(x,y)=f(x,xy), \] is the so-called multiplicative unitary. In the next section, we will frequently use that $W(g\otimes U^N)(x,y)=g(x)x^Ny^N=(gU^N\otimes U^N)(x,y)$, that is, \[ \label{WU} W(g\otimes U^N) = gU^N\otimes U^N \] for all $g\in {\mathbb C}S$ and $N\in{\mathbb Z}$. As above, $U$ denotes the unitary generator of ${\mathbb C}S$ given by $U(\mathrm{e}^{\mathrm{i}\phi})=\mathrm{e}^{\mathrm{i}\phi }$ for $\mathrm{e}^{\mathrm{i}\phi}\in\mathrm{S^1}$. \sigma}\def\hs{\sigmaection{Fibre product approach to quantum Hopf fibrations} \sigma}\def\hs{\sigmaubsection{C*-algebraic construction of a quantum Hopf fibration} \label{Cfp} The aim of this section is to construct a $\mathrm{U}(1)$ quantum principal bundle over a quantum 2-sphere such that the associated quantum line bundles are given by \eqref{TT}. Our strategy will be to start with trivial $\mathrm{U}(1)$-bundles over two quantum discs and to glue them together along their boundaries by a non-trivial transition function. Working in the category of C*-algebras, an obvious quantum analogue of a trivial bundle $D\times \mathrm{S}^1$ is given by the completed tensor product $\mathcal{T}\,\bar\otimes\, {\mathbb C}S$, where we regard $\mathcal{T}$ as the algebra of continuous functions on the quantum disc. Since ${\mathbb C}S$ is nuclear, there is no ambiguity about the tensor product completion. Recall from Section \ref{qg} that a group action on a principal bundle gets translated to a Hopf algebra coaction (or, slightly weaker, coalgebra coaction). As our group is $\mathrm{U}(1)=\mathrm{S}^1$, we take the Hopf *-algebra ${\mathbb C}S$ introduced in Section \ref{qg}. On the trivial bundle $\mathcal{T}\,\bar\otimes\,{\mathbb C}S$, we consider the ``trivial'' coaction given by applying the coproduct of ${\mathbb C}S$ to the second tensor factor. The gluing of the trivial bundles $\mathcal{T}\,\bar\otimes\,{\mathbb C}S$ will be accomplished by a fibre product over the ``boundary'' ${\mathbb C}S\,\bar\otimes\,{\mathbb C}S$. To obtain a non-trivial fibre bundle, we impose a non-trivial transition function. From the requirement that the associated quantum line bundles should be given by \eqref{TT}, the transition function is easily guessed: We use the multiplicative unitary $W$ from \eqref{W}. The result is described by the following pullback diagram. \begin{equation} \label{CSfp} \xymatrix{ & \makebox[48pt][c]{$ \mathcal{T}\,\bar\otimesimes\,{\mathbb C}S {\underset{(W\circ \pi_1,\pi_2)}{\times}} \mathcal{T}\,\bar\otimesimes\,{\mathbb C}S$} \ar[dl]_{\mathrm{pr}_1} \ar[dr]^{\mathrm{pr}_2}& \\ \mathcal{T} \,\bar\otimesimes\,{\mathbb C}S\ar[d]_{\pi_1:=\sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id} } & & \mathcal{T} \,\bar\otimesimes\,{\mathbb C}S\ar[d]^{\pi_2:=\sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id}}\\ {\mathbb C}S\,\bar\otimesimes\,{\mathbb C}S \ar[rr]_{W} & & {\mathbb C}S\,\bar\otimesimes\,{\mathbb C}S\;. } \end{equation} For brevity, we set ${\mathbb C}Sfp:=\mathcal{T}\,\bar\otimesimes\,{\mathbb C}S {\times}_{(W\circ \pi_1 ,\pi_2)} \mathcal{T}\,\bar\otimesimes\,{\mathbb C}S$. Note that $\sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id}$ and $W$ are morphisms of right ${\mathbb C}S$-comodule algebras. Thus ${\mathbb C}Sfp$ is a right ${\mathbb C}S$-comodule algebra (cf.\ Section \ref{qg}) or, in the terminology of Section \ref{qg}, a ${\mathbb C}S$ quantum principal bundle. Its relation to the (algebraic) Hopf fibration of $\mathcal{O}(\mathrm{S}^3_{pq})$ and to the quantum line bundles from Equation \ref{fpLB} will be established in the next proposition. \begin{prop} \label{prop1} ${\mathbb C}Sfp$ is the universal C*-algebra of $\mathcal{O}(\mathrm{S}^3_{pq})$, the associated quantum line bundles \[ \label{CN} {\mathbb C}Sfp_N:=\{\, p\in {\mathbb C}Sfp \, :\, {\mathrm{co}}p_{{\mathbb C}Sfp}(p)=p\otimes U^N\,\}, \quad N\in{\mathbb Z}, \] are isomorphic to $\mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$ from \eqref{fpLB}, and $L_N\sigma}\def\hs{\sigmaubset {\mathbb C}Sfp_N$. Here, $L_N$ denotes the quantum line bundle defined in \eqref{LN}, and $U$ is the unitary generator of ${\mathbb C}S$. \end{prop} \begin{proof} Let $z$ and $y$ be the generators of the quantum discs ${\mathcal O}(\mathrm{D}^2_{q})$ and ${\mathcal O}(\mathrm{D}^2_{p})$, respectively. Consider the *-algebra homomorphism $\iota:\mathcal{O}(\mathrm{S}^3_{pq})\rightarrow {\mathbb C}Sfp$ given by \[ \label{i} \iota(a)= (z\otimes U^*, 1\otimes U^*), \quad \iota(b)= (1\otimes U, y\otimes U). \] Choosing a Poincar\'e-Birkhoff-Witt basis of $\mathcal{O}(\mathrm{S}^3_{pq})$, for instance all ordered polynomials in $a$, $a^*$, $b$, $b^*$, and using the embedding $\Dq\sigma}\def\hs{\sigmaubset \mathcal{T}$, one easily verifies that $\iota$ is injective. Moreover, since the operators $\pi(a)$ and $\pi(b)$ satisfy the quantum disc relation \eqref{Dq} for any bounded representation $\pi$, the *-representation $\iota$ is actually an isometry if we equip $\mathcal{O}(\mathrm{S}^3_{pq})$ with the universal C*-norm. Therefore it suffices to prove that $\iota(\mathcal{O}(\mathrm{S}^3_{pq}))$ is dense in ${\mathbb C}Sfp$. For this, consider the image of $\iota(\mathcal{O}(\mathrm{S}^3_{pq}))$ under the projections $\mathrm{pr}_1$ and $\mathrm{pr}_2$. Since $1\otimes U =\mathrm{pr}_1(\iota(b))\in \mathrm{pr}_1(\iota(\mathcal{O}(\mathrm{S}^3_{pq})))$ and $z\otimes 1= \mathrm{pr}_1(\iota(ab)) \in \mathrm{pr}_1(\iota(\mathcal{O}(\mathrm{S}^3_{pq})))$, we get $\mathrm{pr}_1(\iota(\mathcal{O}(\mathrm{S}^3_{pq})))=\Dq\otimes {\mathcal O}(\mathrm{U}(1))$, and similarly $\mathrm{pr}_2(\iota(\mathcal{O}(\mathrm{S}^3_{pq})))=\Dq\otimes {\mathcal O}(\mathrm{U}(1))$. Note that the latter is a dense *-subalgebra of $\mathcal{T}\otimes{\mathbb C}S$. Moreover, $(\hs\bar\otimes\mathrm{id})(\Dq\otimes {\mathcal O}(\mathrm{U}(1)))={\mathcal O}(\mathrm{U}(1))\otimes{\mathcal O}(\mathrm{U}(1))$ is dense in ${\mathbb C}S\,\bar\otimes\,{\mathbb C}S$, and $W:{\mathcal O}(\mathrm{U}(1))\otimes{\mathcal O}(\mathrm{U}(1))\rightarrow {\mathcal O}(\mathrm{U}(1))\otimes{\mathcal O}(\mathrm{U}(1))$ is an isometry. Since $W(U^n\otimes U^m)=U^{n+m}\otimes U^m$ for all $n,m\in{\mathbb Z}$ by \eqref{WU}, it is a bijection of ${\mathcal O}(\mathrm{U}(1))\otimes{\mathcal O}(\mathrm{U}(1))$ onto itself. From the foregoing, it follows that $ \iota(\mathcal{O}(\mathrm{S}^3_{pq}))= \Dq\otimesimes{\mathcal O}(\mathrm{U}(1)) {\times}_{(W\circ \sigma}\def\hs{\sigmaigma\otimesimes\mathrm{id} ,\pi_2)} \Dq\otimesimes{\mathcal O}(\mathrm{U}(1))$. By considering the ideal generated by the compact operator $1-zz^*\in \Dq$ (or $1-yy\in{\mathcal O}(\mathrm{D}^2_{p})$), one easily shows that $\ker(\hs\bar\otimes\mathrm{id})\cap (\Dq\otimes {\mathcal O}(\mathrm{U}(1)))$ is dense in $\ker(\hs\bar\otimes\mathrm{id})$. From the final remark in Section \ref{sec-fp}, we conclude that $\iota(\mathcal{O}(\mathrm{S}^3_{pq}))$ is dense in ${\mathbb C}Sfp$. To determine ${\mathbb C}Sfp_N$, recall that the coaction is given by the coproduct on the second tensor factor ${\mathbb C}S$. Assume that $f\in{\mathbb C}S$ satisfies ${\mathrm{co}}p(f)=f\otimes U^N$. Then it follows from $f=(\varepsilon\otimes\mathrm{id})\circ{\mathrm{co}}p(f)= f(1)U^N$ that $(\mathrm{id}\otimes{\mathrm{co}}p)(x)=x\otimes U^N$ for $x\in \mathcal{T}\,\bar\otimes\,{\mathbb C}S$ if and only if $x=t\otimes U^N$ with $t\in \mathcal{T}$. Since the morphisms in the pullback diagram \eqref{CSfp} are right colinear, we get $p\in {\mathbb C}Sfp_N$ if and only if $p=(t_1\otimes U^N, t_2\otimes U^N)$ and $(W\circ \sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id})(t_1\otimes U^N)=(\sigma}\def\hs{\sigmaigma\bar\otimesimes\mathrm{id})(t_2\otimes U^N)$. By \eqref{WU}, $W(\sigma}\def\hs{\sigmaigma(t_1)\otimesimes U^N)=\sigma}\def\hs{\sigmaigma(t_1)U^N\otimesimes U^N$. Therefore $(t_1\otimes U^N, t_2\otimes U^N)\in {\mathbb C}Sfp_N$ if and only if $\sigma}\def\hs{\sigma(t_1)U^N=\sigma}\def\hs{\sigma(t_2)$. This shows that an isomorphism between ${\mathbb C}Sfp_N$ and $\mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$ is given by \[ \label{isom} {\mathbb C}Sfp_N\ni (t_1\otimes U^N, t_2\otimes U^N) \mapsto (t_1,t_2)\in \mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}. \] From \eqref{i} and ${\mathrm{co}}p(U^N)=U^N\otimes U^N$, it follows that ${\mathrm{co}}p_{{\mathbb C}Sfp}(\iota(a))=\iota(a)\otimes U^*$ and ${\mathrm{co}}p_{{\mathbb C}Sfp}(\iota(b))=\iota(b)\otimes U$. Hence $\iota$ is right colinear. Since $\iota$ is also an isometry, we can view $\mathcal{O}(\mathrm{S}^3_{pq})$ as a subalgebra of ${\mathbb C}Sfp$. Then $L_N\sigma}\def\hs{\sigmaubset {\mathbb C}Sfp_N$ by the definitions of $L_N$ and ${\mathbb C}Sfp_N$ in \eqref{LN} and \eqref{CN}, respectively. \end{proof} We remark that the universal C*-algebra of $\mathcal{O}(\mathrm{S}^3_{pq})$ has been studied in \cite{HMS06}, the K-theory of ${\mathbb C}Sfp$ has been determined in \cite{BaumHMW}; and from the last example in \cite{HKMZ}, it follows that ${\mathbb C}Sfp$ behaves well under the ${\mathbb C}S$-coaction (it is a principal Hopf-Galois extension). \sigma}\def\hs{\sigmaubsection{Index computation for quantum line bundles} The aim of this section is to illustrate that the fibre product approach may lead to a significant simplification of index computations. First we remark that, in (algebraic) quantum group theory, algebras are frequently defined by generators and relations similar those in \eqref{ab} for $\mathcal{O}(\mathrm{S}^3_{pq})$ (more examples can be found, e.g., in \cite{KS}). A pair of *-representations on the same Hilbert space such that the difference yields compact operators gives rise to an even Fredholm module and can be used for index computations by pairing it with $K_0$-classes. If we want to compute for instance the index pairing with the $K_0$-class of the projective modules $L_N$ from \eqref{LN} by using the idempotents given in \eqref{EN}, then we face difficulties because of the growing size of the matrices. It is therefore desirable to find simpler representatives of K-theory classes of the projective modules $L_N$. This section shows that the fibre product approach provides us with an effective tool for obtaining more suitable projections. In our example, the index pairing will reduce to its simplest possible form: it remains to calculate a trace of a projection onto a finite dimensional subspace. We start by proving that the projective modules ${\mathbb C}Spq_N$ can be represented by elementary 1-dimensional projections. Because of the isomorphism between ${\mathbb C}Spq_N$ and $\mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$ in Proposition \ref{CSfp}, this result has already been obtained in \cite{W}. For the convenience of the reader, we include here the proof. It uses essentially the same ``bra-ket'' argument that was used in \cite{HMS03,SW} to prove $M_N{\mathrm{co}}ng \podl^{|N|+1}P_N$ for the Hopf fibration of $\SUq$. \begin{prop} \label{isoEchi} For $N\in{\mathbb Z}$, define \begin{align} &\chi_N:= (1\,,\,S^{|N|}S^{*|N|})\in {\mathbb C}Sq,~~\mbox{for}\ \, N <0,\\ &\chi_N:=(S^{N}S^{*N}\,,\,1)\in {\mathbb C}Sq,~~\mbox{for}\ \, N\geq 0. \end{align} Then the left ${\mathbb C}Sq$-modules ${\mathbb C}Spq_N$ and ${\mathbb C}Sq\chi_N$ are isomorphic. \end{prop} \begin{proof} Since $\sigma}\def\hs{\sigma(S^{n}S^{*n})=U^{n}U^{*n}=1$ for all $n\in{\mathbb N}$, the projections $\chi_N$ belong to ${\mathbb C}Sq=\mathcal{T} \times_{(\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$. We will use the isomorphism of Proposition \ref{CSfp} and prove that ${\mathbb C}Sq\chi_N$ is isomorphic to $\mathcal{E}_{N}:=\mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$. Let $N\geq 0$. From \eqref{CSq} and \eqref{fpLB}, it follows that $(fS^{*N}\,,\,g )\in{\mathbb C}Sq$ for all $(f,g)\in \mathcal{E}_N$. Therefore we can define a ${\mathbb C}Sq$-linear map $\Psi_N\,:\, \mathcal{E}_{N}\rightarrow {\mathbb C}Sq\chi_N$ by \[ \label{Psi} \Psi_N(f,g) := (fS^{*N} \,,\, g)\chi_N= (fS^{*N} \,,\, g), \] where we used $S^*S=\mathrm{id}$ in the second equality. Since $S^*$ is right invertible, we have $(f S^{*N}\,,\, g)=0$ if and only if $(f \,,\, g)=0$, hence $\Psi_N$ is injective. Now let $(f,g)\chi_N\in{\mathbb C}Sq\chi_N$. Then one has $(fS^{N},g)\in \mathcal{E}_{N}$ and $\Psi_N(fS^{N},g)=(fS^{N}S^{*N},g )=(f,g)\chi_N$, thus $\Psi_N$ is also surjective. This proves the claim of Proposition \ref{isoEchi} for $N\geq 0$. The proof for $N<0$ runs analogously with $\Psi_N$ defined by $\Psi_N(f,g) := (f\,,\, gS^{*N} )\chi_N$. \end{proof} Clearly, the (left) multiplication by elements of the C*-algebra ${\mathbb C}Sq$ turns $L_N{\mathrm{co}}ng\mathcal{O}(\mathrm{S}^2_{pq})^{|N|+1} E_N$ into a (left) ${\mathbb C}Sq$-module. With a slight abuse of notation, we set ${\mathbb C}Sq L_N :=\mbox{$\mathrm{span}$}\{xv: x\in {\mathbb C}Sq,\ v\in L_N\}$. (Later it turns out that this module is generated by one element in $L_N$ so that the notation is actually correct.) If we show that ${\mathbb C}Sq L_N$ is isomorphic to ${\mathbb C}Spq_N$, then the elementary projections $\chi_N$ and the $(|N|+1)\times (|N|+1)$-matrices $E_N$ define the same $K_0$-class. The desired isomorphism will be established in the next proposition by using the embedding $L_N\sigma}\def\hs{\sigmaubset {\mathbb C}Spq_N$ from Proposition \ref{prop1}. \begin{prop} \label{prop2} The left ${\mathbb C}Sq$-modules ${\mathbb C}Sq L_N{\mathrm{co}}ng {\mathbb C}Sq^{|N|+1} E_N$ and ${\mathbb C}Spq_N$ are isomorphic. \end{prop} \begin{proof} Using embedding \eqref{i} and the inclusion from Proposition \ref{prop1}, we can view ${\mathbb C}Sq L_N =\mbox{$\mathrm{span}$}\{xv: x\in {\mathbb C}Sq,\ v\in L_N\}\sigma}\def\hs{\sigmaubset{\mathbb C}Spq_N $ as a submodule of ${\mathbb C}Spq_N$. Let $N\in{\mathbb N}_0$. It follows from the isomorphism $\Psi_N$ defined in \eqref{Psi} that the left ${\mathbb C}Sq$-module ${\mathbb C}Spq_N=\{(fS^{*N},g):(f,g)\in{\mathbb C}Sq\}$ is generated by the element $(S^{*N},1)$. Therefore, to prove ${\mathbb C}Sq L_N ={\mathbb C}Spq_N$, it suffices to show that $(S^{*N},1)\in {\mathbb C}Sq L_N$. Since $\sigma}\def\hs{\sigma(z^{*n})=U^{-N}$, we have $(z^{*N},1)\in \mathcal{T} \times_{(U^N\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}$. Since $(z^{*N},1)$ is the image of $\iota(a^{*N})= (z^{*N}\otimes U^N,1\otimes U^N)$ under the isomorphism \eqref{isom}, we can view $(z^{*N},1)$ as an element of $L_N$. Let $t:=1-z z^*\in \mathcal{T}$. Note that $t$ is a self-adjoint operator with spectrum $\mathrm{spec}(t)=\{q^n: n\in{\mathbb N}_0\}\cup\{0\}$ (see Equation \eqref{z}). Applying the commutation relations \eqref{Dq}, one easily verifies that $z^{*N}z^{N}= \Pi_{k=1}^N(1-q^k t)$. Since $\mathrm{spec}(t)\sigma}\def\hs{\sigmaubset [0,1]$, the operator $z^{*N}z^{N}$ is strictly positive. Hence $|z^{N}|^{-1}=(z^{*N}z^{N})^{-1/2}$ belongs to the C*-algebra $\mathcal{T}$. Moreover, $\sigma}\def\hs{\sigma(|z^{N}|^{-1})=1$ since $\sigma}\def\hs{\sigma(z^{*N}z^{N})=1$. Therefore $(|z^{N}|^{-1},1)\in\mathcal{T} \times_{(\sigma}\def\hs{\sigmaigma ,\sigma}\def\hs{\sigmaigma)} \mathcal{T}= {\mathbb C}Sq$ and thus $(S^{*N},1)= (|z^{N}|^{-1},1) (z^{*N},1)\in {\mathbb C}Sq L_N$. This completes the proof for $N\geq 0$. The case $N<0$ is treated analogously. \end{proof} Recall that an (even) Fredholm module of an *-algebra $\mathcal{A}$ can be given by a pair of *-representations $(\rho_+,\rho_-)$ of $\mathcal{A}$ on a Hilbert space $\mathcal{H}$ such that the difference $\rho_+(a)-\rho_-(a)$ yields a compact operator. In this case, for any projection $P\in \mathrm{Mat}_{n, n}(\mathcal{A})$, the operator $\varrho_+(P)\varrho_-(P): \varrho_-(P)\mathcal{H}^{n} \rightarrow \varrho_+(P)\mathcal{H}^{n}$ is a Fredholm operator and its Fredholm index does neither depend on the $K_0$-class of $P$ nor on the class of $(\rho_+,\rho_-)$ in K-homology. This pairing between K-theory and K-homology is referred to as index pairing. If it happens that $\rho_+(a)-\rho_-(a)$ yields trace class operators, then the index pairing can be computed by a trace formula, namely \begin{equation} \label{CCpair} \langle [(\rho_+,\rho_-)],[P]\rightarrowngle=\mathrm{tr}_\mathcal{H} (\mathrm{tr}_{\mathrm{Mat}_{n,n}} (\rho_+-\rho_-)(P)) \end{equation} In general, the computation of the traces gets more involved with increasing size of the matrix $P$. This will especially be the case if one works only with the polynomial algebras $\mathcal{O}(\mathrm{S}^3_{pq})$ and $\mathcal{O}(\mathrm{S}^2_{pq})$, and uses the the $(|N|+1)\times(|N|+1)$-projections $E_N$ from \eqref{EN} with entries in belonging to $\mathcal{O}(\mathrm{S}^2_{pq})$. In our example, the C*-algebraic fibre product approach improves the situation considerably since Propositions \ref{isoEchi} and \ref{prop2} provide us with the equivalent 1-dimensional projections $\chi_N$. As the index computation is one of our main objectives, we state the result in the following theorem. \begin{thm} \label{T1} Let $N\in{\mathbb Z}$. The isomorphic projective left ${\mathbb C}Sq$-modules ${\mathbb C}Spq_N$, ${\mathbb C}Sq L_N$, ${\mathbb C}Sq^{|N|+1} E_N$ and ${\mathbb C}Sq\chi_N$ define the same class in $K_0({\mathbb C}Sq)$, say $[\chi_N]$, and the pairing with the generators of the K-homology $K^0({\mathbb C}Sq)$ from the end of Section \ref{sec-dq} is given by \[ \label{ic} \langle\, [(\mathrm{pr}_1,\mathrm{pr}_0)] \,, [\chi_N]\,\rangle= N, \qquad \langle\, [(\pi_+\circ\hs\,,\,\pi_-\circ\hs)] \,, [\chi_N]\,\rangle=1. \] \end{thm} \begin{proof} The equivalences of the left ${\mathbb C}Sq$-modules has been shown in Propositions \ref{isoEchi} and \ref{prop2}. In particular, we are allowed to choose $\chi_N$ as a representative. For all $N\in{\mathbb Z}$, the operator $\pi_+\circ\hs(\chi_N)-\pi_-\circ\hs(\chi_N) =\pi_+(1)-\pi_-(1)$ is the projector onto the 1-dimensional subspace ${\mathbb C} e_0$, see Equation \eqref{pi}. In particular, it is of trace class so that Equation \eqref{CCpair} applies. Since the trace of a 1-dimensional projection is 1, we get $$ \langle\, [(\pi_+\circ\hs\,,\,\pi_-\circ\hs)] \,, [\chi_N]\,\rangle =\mathrm{tr}_{\lN}(\pi_+(1)-\pi_-(1))=1. $$ Now let $N\geq 0$. Then $(\mathrm{pr}_1,\mathrm{pr}_0)(\chi_N)=(\mathrm{pr}_1-\mathrm{pr}_0)(S^NS^{*N},1)=1-S^NS^{*N}$ is the projection onto the subspace $\mbox{$\mathrm{span}$}\{e_0,\ldots,e_{n-1}\}$. Since it is of trace class with trace equal to the dimension of its image, we can apply Equation \eqref{CCpair} and get $$ \langle\, [(\mathrm{pr}_1,\mathrm{pr}_0)] \,, [\chi_N]\,\rangle=\mathrm{tr}_{\lN}(1-S^NS^{*N})=N. $$ Analogously, for $N<0$, $$ \langle\, [(\mathrm{pr}_1,\mathrm{pr}_0)] \,, [\chi_N]\,\rangle=\mathrm{tr}_{\lN}(S^{|N|}S^{*|N|}-1)=-|N|=N, $$ which completes the proof. \end{proof} Since the C*-algebra ${\mathbb C}Sq$ is isomorphic to the universal C*-algebra of the Podle\'s spheres $\podl$, the indices in Equation \eqref{ic} have also been obtained in \cite{HMS03} and \cite{W}. In the first paper, the computations relied heavily on the index theorem, whereas in \cite{W} and Theorem \ref{T1} the traces were computed directly by using elementary projections. Note that Equation \eqref{ic} has a geometrical interpretation: The pairing with the K-homology class $[(\pi_+\circ\hs\,,\,\pi_-\circ\hs)]$ detects the rank of the projective module, and the pairing $\langle\, [(\mathrm{pr}_1,\mathrm{pr}_0)] \,, [\chi_N]\,\rangle= N$ coincides with the power of $U$ in \eqref{TT} and thus computes the ``winding number'', that is, the number of rotations of the transition function along the equator. \sigma}\def\hs{\sigmaubsection{Equivalence to the generic Hopf fibration of quantum SU(2)} Recall from Section \ref{qhf} that $\SUq=\oplus_{N\in{\mathbb Z}} M_N$, where $$ M_N:=\{\, p\in \SUq\,:\, \Delta_{\SUq} (p)= p\otimes U^N\,\}{\mathrm{co}}ng \podl^{|N|+1} P_N $$ with $P_N\in \mathrm{Mat}_{|N|+1,|N|+1}(\podl) $ given in Equation \eqref{PN}. For the definition of the ${\mathcal O}(\mathrm{U}(1))$-coaction $\Delta_{\SUq}= (\mathrm{id}\otimes\mathrm{pr}_s)\circ \Delta$, see Section \ref{qhf}. Since ${\mathcal O}(\mathrm{U}(1))$ can be embedded into its universal C*-algebra, which is isomorphic to ${\mathbb C}Sq$, we can turn $M_N$ into a left ${\mathbb C}Sq$-module by considering $\overline{M}_{\!N}:={\mathbb C}Sq^{|N|+1} P_N$. It has been shown in \cite{W}, that this left ${\mathbb C}Sq$-module is isomorphic to ${\mathbb C}Sq \chi_N$, and therefore to ${\mathbb C}Spq_N$. The aim of this section is to define a left ${\mathbb C}Sq$-module and right ${\mathcal O}(\mathrm{U}(1))$-comodule $P$ such that, for all $N\in{\mathbb Z}$, the line bundle associated to the 1-di\-men\-sional left corepresentation ${}_{\mathbb C}{\mathrm{co}}p(1)=U^N\otimes 1$ is isomorphic to $\overline{M}_{\!N}$. A natural idea would be to consider the embedding of $\SUq$ into ${\mathbb C}SU$ and to extend the right coaction $\Delta_{\SUq}$ to the C*-algebra closure. But then we face the problem that $\Delta_{\SUq}$ is merely a coaction and not an algebra homomorphism. If we impose at ${\mathcal O}(\mathrm{U}(1))$ the obvious multiplicative structure given by $U^NU^K=U^{N+K}$, and turn $\oplus_{N\in{\mathbb Z}} \overline{M}_{\!N}$ into a *-algebra such that the right ${\mathcal O}(\mathrm{U}(1))$-coaction becomes an algebra homomorphism, then the C*-closure of $\oplus_{N\in{\mathbb Z}} \overline{M}_{\!N}{\mathrm{co}}ng \oplus_{N\in{\mathbb Z}} {\mathbb C}Spq_N$ would be isomorphic to ${\mathbb C}Spq$ and not to ${\mathbb C}SU$. Note that there cannot be an isomorphism between ${\mathbb C}Spq$ and ${\mathbb C}SU$ since otherwise, by the pullback diagrams \eqref{CSUq} and \eqref{CSfp}, ${\mathbb C}S{\mathrm{co}}ng \ker(\mathrm{pr_1}){\mathrm{co}}ng \mathcal{T}\,\bar\otimes\,{\mathbb C}S$, a contradiction. Instead of extending the coaction $\Delta_{\SUq}$ to some closure of $\SUq$, we turn $\SUq$ into a left ${\mathbb C}Sq$-module by setting $P={\mathbb C}Sq \otimesimes_{\podl}\SUq$ and keeping the ${\mathcal O}(\mathrm{U}(1))$-coaction, now acting on the second tensor factor. Then it follows immediately that $$ P=\mathop{\oplus}_{N\in{\mathbb Z}} {\mathbb C}Sq\!\underset{\podl}{\otimesimes}\! M_N \ \ \text{and} \ \ {\mathbb C}Sq\underset{\podl}{\otimesimes} M_N=\{ p\in P\,:\, {\mathrm{co}}p_P(p)=p\otimes U^N\}. $$ Thus our aim will be achieved if we show that $\overline{M}_{\!N}{\mathrm{co}}ng {\mathbb C}Sq\otimesimes_{\podl} M_N$. For this, we prove that $P$, as a left ${\mathbb C}Sq$-module and right ${\mathcal O}(\mathrm{U}(1))$-comodule, is isomorphic to the following fibre product \begin{equation} \label{SUqfp} \xymatrix{ & \makebox[48pt][c]{$ \mathcal{T}\, \otimesimes\,{\mathcal O}(\mathrm{U}(1)) {\underset{(\Phi\circ \pi_1,\pi_2)}{\times}} \mathcal{T}\, \otimesimes\,{\mathcal O}(\mathrm{U}(1))$} \ar[dl]_{\mathrm{pr}_1} \ar[dr]^{\mathrm{pr}_2}& \\ \mathcal{T} \, \otimesimes\,{\mathcal O}(\mathrm{U}(1))\ar[d]_{\pi_1:=\sigma}\def\hs{\sigmaigma \otimesimes\mathrm{id} } & & \mathcal{T} \, \otimesimes\,{\mathcal O}(\mathrm{U}(1))\ar[d]^{\pi_2:=\sigma}\def\hs{\sigmaigma \otimesimes\mathrm{id}}\\ {\mathbb C}S\, \otimesimes\,{\mathcal O}(\mathrm{U}(1)) \ar[rr]_{\Phi} & & {\mathbb C}S\, \otimesimes\,{\mathcal O}(\mathrm{U}(1))\,. } \end{equation} Here $\Phi$ is defined by $\Phi(f\otimes U^N)=fU^N\otimes U^N$. Then, by comparing the pullback diagrams \eqref{CSfp} and \eqref{SUqfp} in the category of left ${\mathbb C}Sq$-modules and right ${\mathbb C}S$-comodules, it follows that \[ \label{cong} \overline{M}_{\! N}{\mathrm{co}}ng {\mathbb C}Spq_N{\mathrm{co}}ng P\,\mathcal{B}ox_{{\mathbb C}Sq}\, {\mathbb C}{\mathrm{co}}ng {\mathbb C}Sq\otimesimes_{\podl} M_N \] with the 1-dimensional corepresentation ${}_{{\mathbb C}Sq}{\mathrm{co}}p(1)=U^N\otimes 1$ on ${\mathbb C}$. For simplicity of notation, we set $$ \mathcal{A}:=\SUq,\quad \mathcal{B}:=\podl,\quad \ov{\B}} \def\CB{\ov{\B}:=\mathcal{T}\!\underset{(\sigma}\def\hs{\sigma,\sigma}\def\hs{\sigma)}{\times}\!\mathcal{T}{\mathrm{co}}ng {\mathbb C}Sq,\quad C:={\mathcal O}(\mathrm{U}(1)). $$ Recall that $\mathcal{B}$ can be embedded in $\mathcal{A}$ as well as in $\ov{\B}} \def\CB{\ov{\B}$, so both are $\mathcal{B}$-bimodules with respect to the multiplication. Moreover, the pullback diagram \eqref{TtimesT} provides us with *-algebra homomorphism $\mathrm{pr}_0:\ov{\B}} \def\CB{\ov{\B}\rightarrow\mathcal{T}$ and $\mathrm{pr}_1:\ov{\B}} \def\CB{\ov{\B}\rightarrow\mathcal{T}$ by projecting onto the left and right component, respectively. Perhaps it should here also be mentioned that $C$ is only considered as a coalgebra, not as an algebra. Let $v^N_{0},v^N_{1}, \ldots, v^N_{|N|} \in \mathcal{A}$ denote the matrix elements from the definition of $P_N$ in \eqref{PN}. Since the entries of $P_N$ belong to $\mathcal{B}$, we have $v^N_{j}v^{*N}_{k}\in\mathcal{B}$ for all $j,k=0,\ldots |N|$. The following facts are proven in \cite[Lemma 6.5]{SW}. \begin{lem} \label{L1} Let $l\in{\mathbb Z}$ and $k,m\in \{0,\ldots |l|\}$. \begin{enumerate} \item[(i)] For $l\geq 0$, the elements $\mathrm{pr}_1(v^{l}_{l}v^{l*}_{l})$ and $\mathrm{pr}_0(v^{l}_{0}v^{l*}_{0})$ are invertible in $\mathcal{T}$. \sigma}\def\hs{\sigmamallskip \item[(ii)] For $l< 0$, the elements $\mathrm{pr}_1(v^{l}_{0}v^{l*}_{0})$ and $\mathrm{pr}_0(v^{l}_{|l|}v^{l*}_{|l|})$ are invertible in $\mathcal{T}$. \sigma}\def\hs{\sigmamallskip \item[(iii)] $\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{l})\, \mathrm{pr}_1(v^{l}_{l}v^{l\ast}_{l})^{-1}\, \mathrm{pr}_1(v^{l}_{l}v^{l\ast}_{m})=\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{m})$ \ \,and \sigma}\def\hs{\sigmamallskip\\ $\mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1} \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{m})=\mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{m})$\ \,for $l\geq 0$.\sigma}\def\hs{\sigmamallskip \item[(iv)] $\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{0})\, \mathrm{pr}_1(v^{l}_{0}v^{l\ast}_{0})^{-1}\, \mathrm{pr}_1(v^{l}_{0}v^{l\ast}_{m})=\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{m})$\ \,and \sigma}\def\hs{\sigmamallskip\\ $\mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{|l|}) \mathrm{pr}_0(v^{l}_{|l|}v^{l\ast}_{|l|})^{-1} \mathrm{pr}_0(v^{l}_{|l|}v^{l\ast}_{m})=\mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{m})$\ \,for $l< 0$, \end{enumerate} \end{lem} We can turn $\mathcal{T}$ into a $\ov{\B}} \def\CB{\ov{\B}$-bimodule by setting $a.t. b := \mathrm{pr}_0(a)\,t\,\mathrm{pr}_0(b)$ and $a.t. b = \mathrm{pr}_1(a)\,t\,\mathrm{pr}_1(b)$, where $a,b\in \ov{\B}} \def\CB{\ov{\B}$ and $t\in\mathcal{T}$. To distinguish between both bimodules, we denote $\mathcal{T}$ equipped with the first action by $\mathcal{T}_-$, and write $\mathcal{T}_+$ if we use the second action. Clearly, as left or right ${\mathbb C}B$-module, both are generated by $1\in\mathcal{T}$. The next proposition is the key in proving \eqref{cong}. \begin{prop} \label{TBM} The left ${\mathbb C}B$-modules $\mathcal{T}_\pm$ and $\mathcal{T}_\pm\,\otimesimes_\mathcal{B}\, M_l$ are isomorphic. The corresponding isomorphisms are given by \begin{align*} &\psi_{l,+}:\mathcal{T}_+\rightarrow\mathcal{T}_+\otimesimes_\mathcal{B} M_l,\quad \psi_{l,+}(t)= t\, \mathrm{pr}_0(v^{l}_{0}v^{l*}_{0})^{-1/2}\otimesimes_\mathcal{B} v^{l}_{0}, \quad l\geq 0,\\ &\psi_{l,-}:\mathcal{T}_-\rightarrow\mathcal{T}_-\otimesimes_\mathcal{B} M_l,\quad \psi_{l,-}(t)= t\, \mathrm{pr}_1(v^{l}_{l}v^{l*}_{l})^{-1/2}\otimesimes_\mathcal{B} v^{l}_{l}, \quad l\geq 0,\\ &\psi_{l,+}:\mathcal{T}_+\rightarrow\mathcal{T}_+\otimesimes_\mathcal{B} M_l,\quad \psi_{l,+}(t)= t\, \mathrm{pr}_0(v^{l}_{|l|}v^{l*}_{|l|})^{-1/2}\otimesimes_\mathcal{B} v^{l}_{|l|}, \quad l< 0,\\ &\psi_{l,-}:\mathcal{T}_-\rightarrow\mathcal{T}_-\otimesimes_\mathcal{B} M_l,\quad \psi_{l,-}(t)= t\, \mathrm{pr}_1(v^{l}_{0}v^{l*}_{0})^{-1/2}\otimesimes_\mathcal{B} v^{l}_{0}, \quad l< 0. \end{align*} The inverse isomorphisms satisfy, for all $k=0,1,\dots,|l|$, \begin{align} \label{psi+} & \psi_{l,+}^{-1} (1\otimesimes_\mathcal{B} v^{l}_{k}) = \mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1/2}, \quad l\geq 0,\\ & \psi_{l,-}^{-1} (1\otimesimes_\mathcal{B} v^{l}_{k}) =\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{l}) \mathrm{pr}_1(v^{l}_{l}v^{l\ast}_{l})^{-1/2}, \quad l\geq 0,\\ & \psi_{l,+}^{-1} (1\otimesimes_\mathcal{B} v^{l}_{k}) = \mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{|l|})\, \mathrm{pr}_1(v^{l}_{|l|}v^{l\ast}_{|l|})^{-1/2}, \quad l< 0,\\ & \psi_{l,-}^{-1} (1\otimesimes_\mathcal{B} v^{l}_{k})=\mathrm{pr}_1(v^{l}_{k}v^{l\ast}_{0})\, \mathrm{pr}_1(v^{l}_{0}v^{l\ast}_{0})^{-1/2}, \quad l<0. \end{align} \end{prop} \begin{proof} We prove the proposition for $\psi_{l,+}$ with $l\geq 0$, the other cases are treated analogously. Since $\mathrm{pr}_0(v^{l}_{0}v^{l*}_{0})$ is positive and invertible, $\mathrm{pr}_0(v^{l}_{0}v^{l*}_{0})^{-1/2}\in\mathcal{T}$ is invertible, and thus $\psi_{l,+}$ is injective. The left ${\mathbb C}B$-module $\mathcal{T}_+\,\otimesimes_\mathcal{B}\, M_l$ is generated by $1\otimesimes_\mathcal{B} v^{l}_{k}$, $k=0,1,\dots,l$ (cf.\ \cite[Theorem 4.1]{SW}). As $\psi_{l,+}$ is left ${\mathbb C}B$-linear, it suffices to prove that the elements $1\otimesimes_\mathcal{B} v^{l}_{k}$ belong to the image of $\psi_{l,+}$. Applying \eqref{1} and Lemma \ref{L1}(iii), we get \begin{align*} \nonumber 1\otimesimes_\mathcal{B} v^{l}_{k}&=\mbox{$\sigma}\def\hs{\sigmaum$}_j 1\otimesimes_\mathcal{B} v^{l}_{k} v^{l*}_{j} v^{l}_{j} =\mbox{$\sigma}\def\hs{\sigmaum$}_j \mathrm{pr}_0(v^{l}_{k} v^{l*}_{j} )\otimesimes_\mathcal{B} v^{l}_{j} \\ \nonumber &=\mbox{$\sigma}\def\hs{\sigmaum$}_j \mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1} \mathrm{pr}_0(v^{l}_{0} v^{l*}_{j})\otimesimes_\mathcal{B} v^{l}_{j}\\ \nonumber &= \mbox{$\sigma}\def\hs{\sigmaum$}_j \mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1} \otimesimes_\mathcal{B} v^{l}_{0} v^{l*}_{j}v^{l}_{j}\\ \nonumber &= \mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1} \otimesimes_\mathcal{B} v^{l}_{0} =\psi_{l,+}\big(\mathrm{pr}_0(v^{l}_{k}v^{l\ast}_{0}) \mathrm{pr}_0(v^{l}_{0}v^{l\ast}_{0})^{-1/2}\big). \end{align*} This proves the surjectivity of $\psi_{l,+}$ and Equation \eqref{psi+}. \end{proof} Using the last proposition and the decomposition $\mathcal{A}= \oplus_{N\in{\mathbb Z}} M_N$, we can define left $\ov{\B}} \def\CB{\ov{\B}$-linear, right $C$-colinear isomorphisms $$ \Psi_- : \mathcal{T}_-\otimesimes_\mathcal{B} \mathcal{A} \rightarrow \mathop{\oplus}_{N\in{\mathbb Z}}\mathcal{T}_-\otimesimes U^N,\quad \Psi_+ : \mathcal{T}_+\otimesimes_\mathcal{B} \mathcal{A} \rightarrow \mathop{\oplus}_{N\in{\mathbb Z}}\mathcal{T}_+\otimesimes U^N $$ by setting \[ \label{tm} \Psi_\pm (t\otimesimes_\mathcal{B} m_N)= \psi_{N,\pm}^{-1}(t\otimesimes_\mathcal{B} m_N)\,\otimesimes\, U^N,\qquad t\in\mathcal{T},\ \,m_N\in M_N. \] Next we define left $\ov{\B}} \def\CB{\ov{\B}$-linear, right $C$-colinear surjections $$\mathrm{pr}_\pm : {\mathbb C}B\otimesimes_\mathcal{B} \mathcal{A} \rightarrow \mathcal{T}_\pm\otimesimes_\mathcal{B} \mathcal{A}, $$ by \[ \label{prpm} \mathrm{pr}_-((t_1,t_2)\otimesimes_\mathcal{B} a) := t_1\otimesimes_\mathcal{B} a, \quad \mathrm{pr}_+((t_1,t_2)\otimesimes_\mathcal{B} a) := t_2\otimesimes_\mathcal{B} a. \] Furthermore, we turn ${\mathbb C}S$ into a left $\ov{\B}} \def\CB{\ov{\B}$-module by defining $b.f:=\sigma}\def\hs{\sigma(b)f$ for all $b\in\ov{\B}} \def\CB{\ov{\B}$ and $f\in {\mathbb C}S$. Now consider the following diagram in the category of left $\ov{\B}} \def\CB{\ov{\B}$-modules, right $C$-comodules: \[ \label{cd} \xymatrix{ {\mathbb C}B\otimesimes_\mathcal{B} \mathcal{A} \ar[d]_{\Psi_-\circ\mathrm{pr}_-} \ar[rr]^{\Psi_+\circ\mathrm{pr}_+ } & & \mathcal{T}_+\otimesimes C \ar[d]^{\sigma}\def\hs{\sigma\otimesimes \mathrm{id}} \\ \mathcal{T}_-\otimesimes C \ar[rr]_{\Phi\circ(\sigma}\def\hs{\sigma\otimesimes \mathrm{id})}& &{\mathbb C}S\otimesimes C, } \] where $\Phi$ is the same as in \eqref{SUqfp}. \begin{lem} \label{L2} The diagram \eqref{cd} is commutative, $\Psi_-\circ\mathrm{pr}_-$ and $\Psi_+\circ\mathrm{pr}_+ $ are sur\-jec\-tive and $\ker( \Psi_-\circ\mathrm{pr}_-)\cap \ker(\Psi_+\circ\mathrm{pr}_+ )=\{0\}$. \end{lem} \begin{proof} Since all maps are left ${\mathbb C}B$-linear, it suffices to prove the lemma for generators of the left ${\mathbb C}B$-module ${\mathbb C}B\otimesimes_\mathcal{B} \mathcal{A}$. Moreover, since $\mathcal{A}=\oplus_{N\in{\mathbb Z}}M_N$, we can restrict ourselves to the generators of the left $\mathcal{B}$-modules $M_N$. Let $l\geq 0$. Since $\sigma}\def\hs{\sigma(\mathrm{pr}_0(f))=\sigma}\def\hs{\sigma(f)=\sigma}\def\hs{\sigma(\mathrm{pr}_1(f))$ for all $f\in{\mathbb C}B$ by \eqref{CSq}, we get from Equation \eqref{tm} and Lemma \ref{L1} \begin{align} \label{RX} (\hs\otimesimes \mathrm{id})\circ \Psi_+\circ\mathrm{pr}_- \big(1\otimesimes_\mathcal{B} v^{l}_{k} \big) &= \hs(v^{l}_{k}v^{l\ast}_{0}) \hs(v^{l}_{0}v^{l\ast}_{0})^{-1/2} \otimesimes U^l,\\ \label{L} \phi\circ(\hs\otimesimes \mathrm{id})\circ \Psi_-\circ\mathrm{pr}_+\big(1\otimesimes_\mathcal{B} v^{l}_{k} \big) &= \hs(v^{l}_{k}v^{l\ast}_{l}) \hs(v^{l}_{l}v^{l\ast}_{l})^{-1/2}U^l\otimesimes U^l. \end{align} By Lemma \ref{L1} (iii) (with $m=0$), we have \[ \label{k0} \hs(v^{l}_{k}v^{l\ast}_{0})= \hs(v^{l}_{k}v^{l\ast}_{l})\, \hs(v^{l}_{l}v^{l\ast}_{l})^{-1}\, \hs(v^{l}_{l}v^{l\ast}_{0}). \] Inserting the latter equation into \eqref{RX} and comparing with \eqref{L} shows that it suffices to prove \[ \label{ul} \hs(v^{l}_{l}v^{l\ast}_{l})^{-1/2}\, \hs(v^{l}_{l}v^{l\ast}_{0})\,\hs(v^{l}_{0}v^{l\ast}_{0})^{-1/2} = U^l. \] It follows from \cite[Lemma 2.2]{W} (with $v^{l}_{l}\sigma}\def\hs{\sigmaim u_l$ and $v^{l}_{0}\sigma}\def\hs{\sigmaim w_l$), or can be computed directly by using explicit expressions for $v^{l}_{0}$ and $v^{l}_{l}$, that $v^{l}_{l}v^{l\ast}_{0}\sigma}\def\hs{\sigmaim \eta_s^l$. From the embedding \eqref{etaT}, we deduce that $\eta_s^l$ has polar decomposition $\eta_s^l=(S^l,S^l) |\eta_s^l|$. Therefore we can write $v^{l}_{l}v^{l\ast}_{0}= (S^l,S^l) |v^{l}_{l}v^{l\ast}_{0}|$ which implies $$ \hs(v^{l}_{l}v^{l\ast}_{0})= \hs(|v^{l}_{l}v^{l\ast}_{0}|)\, U^l. $$ By comparing with \eqref{ul}, we see that it now suffices to verify \[ \label{|v|} \hs(v^{l}_{l}v^{l\ast}_{l})^{-1/2}\, \hs(|v^{l}_{l}v^{l\ast}_{0}|)\,\hs(v^{l}_{0}v^{l\ast}_{0})^{-1/2} = 1. \] Multiplying both sides of Equation \eqref{k0} with $\hs(v^{l}_{l}v^{l\ast}_{l})$ gives $$ \hs(v^{l}_{0}v^{l\ast}_{0})\, \hs(v^{l}_{l}v^{l\ast}_{l})= \hs(v^{l}_{0}v^{l\ast}_{l})\, \hs(v^{l}_{l}v^{l\ast}_{0}). $$ Thus $$ \hs(|v^{l}_{l}v^{l\ast}_{0}|)= \hs\big((v^{l}_{0}v^{l\ast}_{l}v^{l}_{l}v^{l\ast}_{0})^{1/2}\big) =\big(\hs(v^{l}_{0}v^{l\ast}_{l})\hs(v^{l}_{l}v^{l\ast}_{0})\big)^{1/2} =\big(\hs(v^{l}_{0}v^{l\ast}_{0}) \hs(v^{l}_{l}v^{l\ast}_{l}) \big)^{1/2}, $$ which proves \eqref{|v|}. This concludes the proof of the commutativity of \eqref{cd} for $l\geq 0$. The case $l<0$ is treated analogously. The surjectivity of $\Psi_-\circ\mathrm{pr}_-$ and $\Psi_+\circ\mathrm{pr}_+ $ follows from the bijectivity of $\Psi_\pm$ and the surjectivity of $\mathrm{pr}_\pm$. Suppose that $\sigma}\def\hs{\sigmaum_{k=1}^n(r_k,s_k)\otimes_B a_k\in \ker( \Psi_-\circ\mathrm{pr}_-)\cap \ker(\Psi_+\circ\mathrm{pr}_+)$. Since $\Psi_\pm$ is an isomorphism, we get $\sigma}\def\hs{\sigmaum_{k=1}^n r_k\otimes_B a_k=0$ and $\sigma}\def\hs{\sigmaum_{k=1}^n s_k\otimes_B a_k=0$ by \eqref{prpm}. Hence $\sigma}\def\hs{\sigmaum_{k=1}^n(r_k,s_k)\otimes_B a_k= \sigma}\def\hs{\sigmaum_{k=1}^n (r_k,0)\otimes_B a_k+\sigma}\def\hs{\sigmaum_{k=1}^n (0,s_k)\otimes_B a_k=0$ which proves last claim of the lemma. \end{proof} We are now in a position to prove the main theorem of this section. \begin{thm} There is an isomorphism of left ${\mathbb C}Sq$-modules and right ${\mathcal O}(\mathrm{U}(1))$-comodules between the fibre product $\mathcal{T}\, \otimesimes\,{\mathcal O}(\mathrm{U}(1)) {\times}_ {(\Phi\circ \pi_1,\pi_2)}\mathcal{T}\, \otimesimes\,{\mathcal O}(\mathrm{U}(1))$ from \eqref{SUqfp} and ${\mathbb C}Sq \otimesimes_{\podl}\SUq$. Moreover, the chain of isomorphisms in \eqref{cong} holds. \end{thm} \begin{proof} Lemma \ref{L2} states that ${\mathbb C}Sq \otimesimes_{\podl}\SUq$ is an universal object of the pull back diagram \eqref{cd}. Comparing \eqref{cd} and \eqref{SUqfp} shows that both pullback diagrams define up to isomorphism the same universal object which proves the first part of the theorem. The first isomorphism in \eqref{cong} follows from the Murray-von Neumann equivalence of the corresponding projections, see \cite{W}. The second isomorphism follows from the above equivalence of pullback diagrams, and the last one from fact that all mappings in \eqref{cd} are right ${\mathcal O}(\mathrm{U}(1))$-colinear. \end{proof} \sigma}\def\hs{\sigmaubsection*{Acknowledgment} The author thanks Piotr M.\ Hajac for many discussions on the subject. This work was financially supported by the CIC of the Universidad Michoacana and European Commission grant PIRSES-GA-2008-230836. \end{document}
\begin{document} \title{Applying computational complexity\\to the emergence of classicality} \author{Arkady Bolotin\footnote{$Email: [email protected]$} \\ \textit{Ben-Gurion University of the Negev, Beersheba (Israel)}} \maketitle \begin{abstract}\noindent Can the computational complexity theory of computer science and mathematics say something new about unresolved problems in quantum physics? Particularly, can the \textbf{P} versus \textbf{NP} question in the computational complexity theory be a factor in the elucidation of the emergency of classicality in quantum mechanics? The paper compares two different ways of deriving classicality from the quantum formalism resulted from two differing hypotheses regarding the \textbf{P} versus \textbf{NP} question -- the approach of the quantum decoherence theory implying that \textbf{P }= \textbf{NP }and the computational complexity approach which assumes that \textbf{P }is not equal to \textbf{NP}.\\ \noindent \textbf{Keywords:} Computational complexity · Quantum measurements · Schrödinger equation · Decoherence · \textbf{P} versus \textbf{NP} question \end{abstract} \section{Introduction} \noindent Let us consider a system of $N$ qubits (i.e. a quantum system comprised of $N$ entangled spin-½ particles). Such a system can be in an arbitrary superposition of up to $2^N$ different states simultaneously; therefore, in general, a quantum state of this system is specified by $2^N$ complex numbers, that is, probability amplitudes of the states (one for every possible outcome of measuring the spins in the \{0, 1\} basis). Hence, even for the modest value of, say, $N=500$ (this may be a system containing just a few hundred atoms) the number of the amplitudes will be larger than the estimated number of atoms in the whole Universe. On the other hand, it is the time-dependent Schrödinger equation that gives the description of the system's quantum state (the vector in a space of $2^{500}$ dimension) evolving with time. So, the question becomes, ``How Nature can manipulate such enormous data that fast -- i.e. in parallel with the system's evolution?'' As Nielsen and Chuang put this, ``It is as if Nature were keeping $2^{500}$ hidden pieces of scratch paper on the side, on which she performs her calculations as the system evolves'' \cite{Nielsen}.\\ \noindent If Nature can do it, so can we. This was the essence of the visionary idea of a quantum computer suggested by Feynman -- to take advantage of the Nature's (allegedly) enormous computational power in order to perform simulations of quantum mechanical systems, extremely difficult to simulate on a classical computer \cite{Feynman}.\\ \noindent But let us for a while leave aside questions of whether a large-scale quantum computer can be ever built or what the reason for the quantum computational speedup is (which are usually being asked in connection with quantum computation) and instead ask this. What if Nature could not manipulate such enormous quantities of data in parallel with the system's evolution (or within any reasonable time) at all? What if her computational power was actually limited in such a way that solving the Schrödinger equation prescribed by a system with a huge number of variables needed to describe any possible state of the system (like $2^{500}$ amplitudes) would be unfeasible even for Nature and thus this equation might be resolved in a short time only approximately?\\ \noindent Really, what evidence do we have which can demonstrate that the Schrödinger equation is exactly solvable within the period of observation for any given system? As a matter of fact, there is plenty of evidence to the contrary. For one, fast and practically realizable exact analytical or numerical solutions to this equation applicable to any given physical system and its associated potential energy are unknown (see, for example,\cite{Ohya} or \cite{Popelier}). As a result, at present the only truly predictive algorithms for solving the Schrödinger equation are ones that built on brute force. Inevitably, the explosion in computational work they lead to warrants only approximate solutions. Moreover, even if there is a superfast algorithm able to exactly solve the Schrödinger equation for any system including that of 500 qubits by manipulating all $2^{500}$ classical bits in a short time $t<T$, any measurement performed after the system has advanced forward by time $T$ will with some probability retrieve a state specified by no more than 500 classical bits, just as in the case of some inexact algorithm, which ignores most of the system's variables and in this way produces an approximate solution to the Schrödinger equation within period $t$.\\ \noindent So, since the statement about vast computational resources of Nature is based on neither a provable proposition nor empirical data but on a mere conjecture \cite{Bernstein}, those what-if questions asked above should be considered at least reasonable to think about.\\ \noindent Obviously, what-if questions will only have answers if one makes some assumptions. This paper makes the assumption that there are no vast computational resources, particularly that solving the Schrödinger equation for any given system is an intractable problem and investigates a role of this assumption in resolving the quantum measurement problem (also known in more recent literature as the problem of the emergence of classicality from quantum systems).\\ \noindent The paper is structured as follows. In section 2 we discuss the computational complexity of the Schrödinger equation and show (using elementary arguments) that solving this equation for any given Hamiltonian is a problem at least as hard as the hardest problems in the \textbf{NP} complexity class, which in turn implies that this problem has no efficient algorithm unless the \textbf{P} complexity class is equal to \textbf{NP}. In section 3 we present two different ways of resolving the quantum measurement problem based on the differing assumptions regarding the \textbf{P }versus \textbf{NP }question -- the approach of the quantum decoherence theory based on the assumption that \textbf{P} = \textbf{NP} and the approach of ours (called computational complexity approach) based on the assumption that\textbf{ P}$\neq$ \textbf{NP}. Section 4 concludes the paper.\\ \section{Quantum computational reductionism} \noindent To make our discussion on computational complexity more tangible, let us frame the following ``practical'' question: \begin{quotation} \noindent \textit{Given a potential} $V\!\!=\!\!V\!\!\left({{\mathbf r}}_1,\dots ,{{\mathbf r}}_N\right)$ \textit{of a system comprised of} $N$ \textit{spin-0 particles of masses} $m_1,\dots ,m_N$, \textit{and a certain level} $E_B$\textit{, is there a state of the system with energy }$E$ \textit{less than or equal to this level }$E_B$\textit{?} \end{quotation} \noindent In terms of computational complexity this yes-or-no question is \textit{the} \textit{decision} \textit{problem} (we will refer to it as the problem ${\Pi }_E$) that can be equivalently defined as the set $S_E$ of all inputs for which ${\Pi }_E$ returns 1 (i.e. `yes') \begin{equation} \label{1} S_E=\!\left\{ \begin{array}{cl} \!\!\! N,m_1,\dots ,m_N,V,E_B: & {\Pi }_E\left(\! \displaystyle -\frac{{\hbar}^2}{2} \sum^N_{n=1} {\frac{{{\nabla }_n}^2}{m_n}}\left.\left|{\psi }_E\!\right.\right\rangle +V\!\!\left.\left|{\psi }_E\!\right.\right\rangle =E\!\!\left.\left|{\psi }_E\!\right.\right\rangle \wedge E\le E_B\!\right)\!\!=1 \!\!\! \end{array} \right\}, \end{equation} \noindent where ${{\nabla }_n}^2$ denotes the Laplace operator and $\left.\left|{\psi }_E\!\right.\right\rangle $ represents the state of the system with the energy $E$. We will assume that the problem ${\Pi }_E$ is \textit{decidable} (that is, there is an algorithm for ${\Pi }_E$ that instead of looping indefinitely terminates after a finite amount of time and correctly returns a Boolean true 1 or false value 0) and weigh up how difficult the problem ${\Pi }_E$ is to solve.\\ \noindent To begin with, let us notice that the solutions to the problem ${\Pi }_E$ can be quickly verified, namely, one can prove that the state $\left.\left|{\psi }_E\!\right.\right\rangle $ is indeed a solution to ${\Pi }_E$ for a particular instance by substituting $\left.\left|{\psi }_E\right.\right\rangle $ into the expression for ${\Pi }_E$ and then calculating the second derivative values of $\left.\left|{\psi }_E\right.\right\rangle $ (say, with automatic differentiation techniques \cite{Neidinger}) on a deterministic computing device in the polynomial number of steps $T\in O\!\left(N^c\right)$, where $c>0$ is a constant (that does not depend on the particular instance).\\ \noindent Such a property of the problem ${\Pi }_E$ -- verifiability by a deterministic computing device in a polynomial number of steps (i.e. \textit{in polynomial time}) -- suggests that ${\Pi }_E$ belongs to the \textbf{NP} complexity class containing all decision problems for which the instances where the answer is `yes' have efficiently verifiable proofs of the fact that the answer is indeed `yes' (``efficiently verifiable proof'' means a proof by a method each step of which is precisely predetermined and which is certain to produce the answer in a polynomial number of steps).\\ \noindent But can the problem ${\Pi }_E$ be not only efficiently verifiable but also efficiently solvable? Is there a polynomial-time algorithm for ${\Pi }_E$? Before answering to that query, let us put an alternative, more abstract, yes-or-no question: \begin{quotation} \noindent \textit{Given a Schrödinger Hamiltonian and an arbitrary condition, is there a solution to the Schrödinger equation subject to this condition?} \end{quotation} \noindent It is clear that this problem (we will call it the problem ${\Pi }_P$) is closely related to another one, namely, the \textit{function problem} ${\Phi }_{\psi }$, which is this: \begin{quotation} \noindent \textit{Given a Schrödinger Hamiltonian, what is a solution to the Schrödinger equation?} \end{quotation} \noindent Accordingly, the problem ${\Pi }_P$ can be defined as the set $S_P$ of inputs (i.e. various Hamiltonians and conditions) for which ${\Pi }_P$ returns 1 \begin{equation} \label{2} S_P=\left\{ \begin{array}{cl} H\!\left(t\right),\ P: & {\rm \ }{\Pi }_P\left(H\!\left(t\right)\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle =i\hbar \displaystyle \frac{\partial }{\partial t}\left.\left|\psi \!\left(t\right)\!\right.\right\rangle \ \wedge \ P\left(\left.\left|\psi \!\left(t\right)\!\right.\right\rangle \right)\right)=1 \end{array} \right\}\;\;\;\; , \end{equation} \noindent where $H\!\left(t\right)$ stands for the Hamiltonian (in general, time-dependent) operator and $\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle$ denotes the Schrödinger equation solution for which the condition $P\left(\left.\left|\psi \!\left(t\right)\!\right.\right\rangle \right)$ holds.\\ \noindent The relationship between the decision problem ${\Pi }_P$ and the function problem ${\Phi }_{\psi }$ is such that if ${\Phi }_{\psi }$ were computable (according to the Church-Turing thesis \cite{Amram}, this means ``if the function $\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle$ had an algorithm''), then ${\Pi }_P$ would be decidable. Furthermore, if the problem ${\Phi }_{\psi }$ were effectively solvable, then the problem ${\Pi }_P$ would be as well.\\ \noindent Let us assume that the decision problem ${\Pi }_P$ is decidable. Then, to solve an instance of this problem would mean to set up the Hamiltonian for a system accounting for the kinetic and potential energy of the particles constituting the system and having inserted $H\left(t\right)$ into the Schrödinger equation to solve the resulting partial differential (in general, time-dependent) equation for the quantum state $\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle$ in order to decide whether the ensuing solution $\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle$ satisfies the condition $P\left(\left.\left|\psi \!\left(t\right)\!\right.\right\rangle \right)$ imposed on the positions, momentums or (and) other physical properties of the system's constituting particles.\\ \noindent Clearly, the problem ${\Pi }_P$ can be quickly modified in the problem ${\Pi }_E$ since the modifying procedure $f:\ S_P\to S_E$, which straightforwardly transforms the problem ${\Pi }_P$ into the problem ${\Pi }_E$ \begin{equation} \label{3} \begin{array}{rcl} {f:\ S}_P & \to & S_E \\ H\left(t\right) & \mapsto & H= \displaystyle -\frac{{\hbar}^2}{2} \sum^N_{n=1}{\frac{{{\nabla }_n}^2}{m_n}}+V \\ P\left(\left.\left|{\psi }_E\right.\right\rangle \right) & \mapsto & E\le E_B \end{array} \;\;\;\; , \end{equation} \noindent can be obviously executed in a polynomial number of steps. This means that the problem ${\Pi }_E$ can be solved using the algorithm for solving ${\Pi }_P$, and for this reason the problem ${\Pi }_E$ is \textit{reducible} to ${\Pi }_P$ (which is intuitively understandable since ${\Pi }_E$ is no more difficult than ${\Pi }_P$). As a consequence, if the problem ${\Pi }_P$ had a polynomial-time algorithm, the problem ${\Pi }_E$ could be efficiently solvable too.\\ \noindent \noindent At this point, let us invoke the possibility of encoding a specific instance of a given decision problem in a Hamiltonian. Explicitly, let us consider an adiabatically evolving system characterized by the Hamiltonian $H\!\left(t\right)$, which is slowly varying, and thus at any instant of time $t$ the system remains in the state $\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle$ close to the instantaneous ground state $\!\left.\left|{\psi }_g\!\left(t\right)\!\right.\right\rangle$ of the Hamiltonian $H\!\left(t\right)$. Suppose we choose $H\!\left(t\right)$ so that at time $t=0\ $the ground state $\!\left.\left|{\psi }_g\!\left(0\right)\!\right.\right\rangle $ of $H\!\left(0\right)$ encodes an input of some decision problem ${\Pi }_C$ ( so $\!\left.\left|{\psi }_g\!\left(0\right)\!\right.\right\rangle $ is known in advance and the system can be easily prepared in $\!\left.\left|{\psi }_g\!\left(0\right)\!\right.\right\rangle $), whereas at time $t=T$ the system's Hamiltonian $H\!\left(T\right)$ coincides with the Hamiltonian $H_C$ whose ground state $\!\left.\left|{\psi }_g\!\left(T\right)\!\right.\right\rangle $ is unknown and encodes the solution to the problem ${\Pi }_C$. If $H\!\left(0\right)$ and $H_C$ are easy to specify, then using the modifying procedure $g:\ S_P\to S_C$ \begin{equation} \label{4} \begin{array}{rcl} g:\ S_P & \to & S_C \\ H\!\left(t\right) & \mapsto & \displaystyle \tilde{H}\!\left(t\right)\cong \!\left(1-\frac{t}{T}\right)\!H\!\left(0\right)+\frac{t}{T}H_C \\ P\!\left(\left.\left|\psi \!\left(0\right)\!\right.\right\rangle \right) & \mapsto & \left.\left|\psi \!\left(0\right)\!\right.\right\rangle =\left.\left|{\psi }_g\!\left(0\right)\!\right.\right\rangle \end{array} \end{equation} \noindent (where $S_C$ is the set of inputs of ${\Pi }_C$ for which ${\Pi }_C$ returns 1) the algorithm for solving ${\Pi }_P$ can be quickly modified to solve ${\Pi }_C$ (to be precise, to solve the Schrödinger equation $\tilde{H}\!\left(t\right)\!\left.\left|\psi \!\left(t\right)\!\right.\right\rangle ={\mathcal E}\!\left(t\right)\left.\left|\psi \!\left(t\right)\!\right.\right\rangle $ for the system's state $\left.\left|\psi \!\left(t\right)\!\right.\right\rangle $, which at time $t=T$ will be close to the ground state $\left.\left|{\psi }_g\!\left(T\right)\!\right.\right\rangle $ encoding the solution to the given problem ${\Pi }_C$ under the condition that at some initial time $t=0$ the ground state $\left.\left|{\psi }_g\left(0\right)\!\right.\right\rangle $ is known).\\ \noindent As it was demonstrated in the paper \cite{Farhi}, the Hamiltonians $H\!\left(0\right)$ and $H_C$ are straightforward to construct if the decision problem ${\Pi }_C$ is the \textit{Exact Cover}, an\textbf{ NP}-complete problem. Since this particular \textbf{NP}-complete problem can be solved using the algorithm for ${\Pi }_P$, it immediately implies that \textit{every problem} in the \textbf{NP} complexity class can be reducible (in a polynomial number of steps) to ${\Pi }_P$; in other words, it implies that the decision problem ${\Pi }_P$ is \textbf{NP}-complete.\\ \noindent What is more, this infers that the function problem ${\Phi }_{\psi }$ of solving the Schrödinger equation is \textbf{NP}-hard, which means that if we had a polynomial time algorithm (on a deterministic computing device) for finding the solutions to the Schrödinger equation for any given Hamiltonian, we could solve all problems in the \textbf{NP} complexity class in polynomial time.\\ \noindent If such a \textit{quantum computational reductionism} did really take place (i.e. if there were a polynomial algorithm for the \textbf{NP}-hard problem ${\Phi }_{\psi }$), then the complexity class \textbf{P} (which is the set of decision problems solvable on a deterministic computing device within polynomial time) would be equal to the class \textbf{NP}; otherwise, \textbf{P}$\neq$ \textbf{NP}.\\ \noindent In the next section, we will see what role this \textbf{P} versus \textbf{NP} question might play in the resolution of the measurement problem.\\ \section{Two approaches to the measurement problem} \noindent We now proceed to show one by one two approaches to the measurement problem that are based (tacitly or explicitly) on the opposing assumptions regarding the \textbf{P} versus \textbf{NP} question -- the first is that \textbf{P} = \textbf{NP}, and the next is that \textbf{P}$\neq$\textbf{NP}.\\ \subsection{Quantum decoherence approach} \noindent Let us consider a Stern-Gerlach experiment, in which an initial spin eigenstate $\left.\left|{\psi }_0\!\right.\right\rangle $ with the eigenvalue $s_x=½$ along the x-axis is separated (by means of a magnetic field that is inhomogeneous along the z-axis) into two orthonormal states $\left.\left|0\right.\right\rangle $ and $\left.\left|1\right.\right\rangle $ that have the respective eigenvalues $s_z=½$ and $s_z=-½$ along the z-axis: \begin{equation} \label{5} \left.\left|{\psi }_0\!\right.\right\rangle =\frac{1}{\sqrt{2}}\left(\left.\left|0\!\right.\right\rangle +\left.\left|1\!\right.\right\rangle \right)\;\;\;\; ; \end{equation} \noindent thus, at the detector one sees either $\left.\left|0\!\right.\right\rangle $ or $\left.\left|1\!\right.\right\rangle $ with the probability of observing the eigenvalue $s_x=½$ again equal to \begin{equation} \label{6} P\left(s_x=½\right)=\frac{1}{2}\;\;\;\; . \end{equation} \noindent The quantum decoherence approach to the measurement problem goes as follows: Since there is no consistent formalism to describe the interaction between a quantum and a classical system and since quantum mechanics is a universally applicable theory, every system is basically quantum mechanical. Therefore, to have a consistent theory of measurement, we must treat the detector ${\mathcal A}$ (the measurement apparatus) quantum mechanically. Accordingly, we introduce a Hilbert space ${{\mathcal H}}_{{\mathcal A}{\rm \ }}$ for the detector ${\mathcal A}$ and assume that the orthonormal basis vectors for ${\mathcal A}$ are represented by the exact solutions $\left.\left|\epsilon_k\!\right.\right\rangle $ to the many-body Schrödinger equation with the Hamiltonian $H_{{\mathcal A}}$ describing different configurations $k$ of the detector's $N$ constituent microscopic particles (like different sets of their spatial positions ${{\mathbf r}}_1,{{\mathbf r}}_2,\dots ,{{\mathbf r}}_N$ at the instant of time $t$): \begin{equation} \label{7} i\hbar \frac{\partial }{\partial t}\left.\left|\epsilon_k\!\right.\right\rangle =\left[\sum^N_{n=1}{\frac{{{{\mathbf p}}_n}^2}{2m_n}}+\frac{1}{2M}\sum^N_{j\ne l}{{{\mathbf p}}_j{{\mathbf p}}_l}+{\widehat{{\rm H}}}_{{\rm s}}+V\!\left({{\mathbf r}}_1,{{\mathbf r}}_2,\dots ,{{\mathbf r}}_N,t\right)\right]\!\left.\left|\epsilon_k\!\right.\right\rangle \equiv H_{{\mathcal A}}\!\left.\left|\epsilon_k\!\right.\right\rangle \;\;\;\; , \end{equation} \noindent where the terms of the form ${{\mathbf p}}_j{{\mathbf p}}_l$ (known as mass polarization terms) are due to the kinetic energy dependency on the spatial configuration of the interacting with each other constituent particles ($M$ denotes the mass of the collection of the particles resulting in this extra kinetic energy), ${\widehat{{\rm H}}}_{{\rm s}}$ is the term accounting for the presence of the constituent particles' spins (this terms may include spin-orbit coupling, spin-rotation coupling and spin-spin coupling), and the index $k$ can be continuous, a discrete one, or a mixture of continuous and discrete indexes (in which case the meaning of orthonormality of the basis vectors $\left.\left|\epsilon_k\!\right.\right\rangle $ may turn out to be ambiguous; however, that detail is an inessential for the purposes of this section).\\ \noindent The assumption that the possible states of the macroscopic detector ${\mathcal A}$ are represented by the Hilbert space ${{\mathcal H}}_{{\mathcal A}{\rm \ }}$, whose unit vectors are the exact solutions $\left.\left|\epsilon_k\!\right.\right\rangle $ to the detector's Schrödinger equation, is dictated by the analogy with the Hilbert space ${{\mathcal H}}_{{\mathcal S}{\rm \ }}$ for the observed quantum microscopic system ${\mathcal S}$ -- the spin-$½$ test-particle of mass $m$ and charge $q$ -- spanned by the orthonormal basis vectors $\left.\left|0\!\right.\right\rangle $ and $\left.\left|1\!\right.\right\rangle $ \begin{equation} \label{8} \begin{array}{c} \left[\!\! \begin{array}{c} 1 \\ 0 \end{array} \!\!\right]=\left.\left|0\right.\right\rangle \\ \left[\!\! \begin{array}{c} 0 \\ 1 \end{array} \!\!\right]=\left.\left|1\right.\right\rangle \end{array} \;\;\;\; , \end{equation} \noindent which are the exact solutions to the Schrödinger equation with the Hamiltonian $H_{{\mathcal S}}$ describing the test-particle flowing through the external inhomogeneous magnetic field ${\mathbf B}$ of the Stern-Gerlach device \begin{equation} \label{9} i\hbar \frac{\partial }{\partial t} \left( \!\!\!\! \begin{array}{c} \left.\left|0\right.\right\rangle \\ \left.\left|1\right.\right\rangle \end{array} \!\!\!\! \right)= \left[\left(\frac{{\left({\mathbf p}-q{\mathbf A}\right)}^2}{2m}+q\phi\right){\hat{1}}_{\left[2×2\right]}-\frac{q\hbar }{2m}{\mathbf \sigma }\cdot {\mathbf B}\right] \left( \!\!\!\! \begin{array}{c} \left.\left|0\right.\right\rangle \\ \left.\left|1\right.\right\rangle \end{array} \!\!\!\! \right) \equiv H_{{\mathcal S}} \left( \!\!\!\! \begin{array}{c} \left.\left|0\right.\right\rangle \\ \left.\left|1\right.\right\rangle \end{array} \!\!\!\! \right) \;\;\;\; , \end{equation} \noindent where the electromagnetic field is defined by the three-component vector potential ${\mathbf A}$ and scalar electric potential $\phi$, while ${\mathbf \sigma }$ is the three-component vector of the Pauli $2×2$ matrices and ${\hat{1}}_{\left[2×2\right]}$ is the $2×2$ identity matrix.\\ \noindent In the conventional treatment, one treats the combined system ${\mathcal S}+{\mathcal A}$ as a closed quantum system (ignoring the environment) with the Hilbert space \begin{equation} \label{10} {\mathcal H}={{\mathcal H}}_{{\mathcal S}{\rm \ }}\!\!\otimes\!\! \ {{\mathcal H}}_{{\mathcal A}{\rm \ }} \;\;\;\; . \end{equation} \noindent Writing the total Hamiltonian of the particle-detector combine system ${\mathcal S}+{\mathcal A}$ as \begin{equation} \label{11} H=H_{{\mathcal S}}+H_{{\mathcal A}}+H_{{\rm int}} \;\;\;\; , \end{equation} \noindent a standard way to express the interaction term $H_{{\rm int}}$ in the Hamiltonian $H$ is to employ the interaction of the von Neumann form \begin{equation} \label{12} H_{{\rm int}}= \left(\!\left.\left|0\!\right.\right\rangle \!\left\langle \left.\!0\right|\right.\!\right)\!\otimes \!\left(\sum_k{A_k\!\left.\left|\epsilon_k\!\right.\right\rangle \!\left\langle \left.\!\epsilon_k\right|\right.}\!\right) + \left(\!\left.\left|1\!\right.\right\rangle \!\left\langle \left.\!1\right|\right.\!\right)\!\otimes \!\left(\sum_k{B_k\!\left.\left|\epsilon_k\!\right.\right\rangle \!\left\langle \left.\!\epsilon_k\right|\right.}\!\right) \end{equation} \noindent (in which $\left.\left|\epsilon_k\!\right.\right\rangle \!\left\langle \left.\!\epsilon_k\right|\right.$ are the operators acting on ${{\mathcal H}}_{{\mathcal A}{\rm \ }}$, $A_k$ and $B_k$ are the definite interaction energies of the $k^{th}$ configuration of the detector's constituent particles for test-particle's eigenstates $\left.\left|0\!\right.\right\rangle $ and $\left.\left|1\right.\right\rangle $ correspondingly) and stipulate that during the interval $[t_i,t_f]$ of the interaction, the interaction term $H_{{\rm int}}$ in the Hamiltonian $H$ dominates over the other two terms, so that, effectively, $H\approx H_{{\rm int}}$.\\ \noindent Let $\left.\left|\epsilon_0\right.\right\rangle $ \begin{equation} \label{13} \left.\left|\epsilon_0\right.\right\rangle =\sum_k{a_k\!\left.\left|\epsilon_k\right.\right\rangle } \end{equation} \noindent be the initial state of the detector and $U\left(t_f,t_i\right)$ \begin{equation} \label{14} U\left(t_f,t_i\right)\ =I-\frac{i\tau }{\hbar }H_{{\rm int}}={\rm exp}\left(-\frac{i\tau }{\hbar }H_{{\rm int}}\right) \end{equation} \noindent be the evolution operator for the macroscopic system ${\mathcal S}+{\mathcal A}$ for the short duration $\tau =t_f-t_i$ of the interaction. If at time $t_i$, i.e. before the interaction takes place, the state of the combined system ${\mathcal S}+{\mathcal A}$ is the direct product of the test-particle state and the detector state \begin{equation} \label{15} \left.\left|{\Psi }_i\right.\right\rangle = \left.\left|{\psi }_0\right.\right\rangle \!\otimes\! \left.\left|\epsilon_0\right.\right\rangle = \frac{1}{\sqrt{2}}\left(\left.\left|0\right.\right\rangle +\left.\left|1\right.\right\rangle \right)\!\otimes\! \sum_k{a_k\!\left.\left|\epsilon_k\!\right.\right\rangle } \;\;\;\; , \end{equation} \noindent then linearity of the evolution operator implies that at time $t_f$, i.e. after the interaction has happened, we must get the equation \begin{equation} \label{16} U\left(t_f,t_i\right)\!\left.\left|{\Psi }_i\right.\right\rangle = \sum_k{\left( \frac{1}{\sqrt{2}}\,a_k\!\left.\left|0\right.\right\rangle \!\otimes\! \left.\left|\epsilon_k\right.\right\rangle e^{-\frac{i\tau }{\hbar }A_k} + \frac{1}{\sqrt{2}}\,a_k\!\left.\left|1\right.\right\rangle \!\otimes\! \left.\left|\epsilon_k\right.\right\rangle e^{-\frac{i\tau }{\hbar }B_k} \!\right)} \;\;\;\; , \end{equation} \noindent where the right hand side is a superposition of the quantum states of the macroscopic system ${\mathcal S}+{\mathcal A}$ (in which the detector ``sees'' the test-particle in both $\left.\left|0\right.\right\rangle $ and $\left.\left|1\right.\right\rangle $ states at the same time).\\ \noindent According to the Born probability rule, to compute the probability $P\left(s_x=½\right)$ of observing the spin eigenvalue $s_x=½$ for the test-particle that has made a quantum leap from the initial state $\left.\left|{\psi }_0\right.\right\rangle $ to the final state $\left.\left|{\Psi }_f\right.\right\rangle {\rm =}U\left(t_f,t_i\right)\!\left.\left|{\Psi }_i\right.\right\rangle $, we have to calculate the modulus squared of the scalar product of these two states: \begin{equation} \label{17} P\left(s_x=½\right)= {\left|\left\langle {\psi }_0\!\mathrel{\left|\!\vphantom{{\psi }_0 {\Psi }_f}\right.\kern-\nulldelimiterspace}{\Psi }_f\right\rangle \right|}^2= \sum_k{{\left|\left\langle \left.{\psi }_0\right|\right.\!\otimes\! \left\langle \left.\epsilon_k\right|\!\left.\left|{\Psi }_f\right.\right\rangle \right.\right|}^2}= \sum_k{{\left|a_k\right|}^2{{\cos }^2 \left(\frac{\left(A_k-B_k\right)}{2\hbar }\tau \right)\ }} \;\;\;\; . \end{equation} \noindent Now, let us recall that as a macroscopic object, the detector has an enormous ``volume'' available to it in the Hilbert space ${{\mathcal H}}_{{\mathcal A}{\rm \ }}$ corresponding to the detector's microscopic degrees of freedom (which -- due to the interaction between the detector's $N$ internal microscopic particles -- even with the most coarse grain discretization would be of the same magnitude as a double exponential of $N$). This fact might be seen as the practical impossibility of accurately keeping track of the superposition coefficients $a_k$ as well as the interaction energies $A_k$ and $B_k$. So, if initially the detector was in the superposition (\ref{13}) of states such that $a_k\ne 0$ for many values of $k$, then -- \textit{on assumption that the coefficients }$a_k$\textit{ and the interaction energies }$A_k$\textit{ and }$B_k$\textit{ are distributed randomly} -- after a very short period of time the argument of the cosine squared would likely take on several essentially random values. Hence, afterward the weighted average of the cosine squared (with normalized weights $\sum_k{{\left|a_k\right|}^2}=1$) can be replaced by the overall average of sinusoid squared \begin{equation} \label{18} \sum_k{{\left|a_k\right|}^2{{\cos }^2 \left(\frac{\left(A_k-B_k\right)}{2\hbar }\tau \right)\ }}\ {{\underset{\left(\frac{\left(A_k-B_k\right)}{2\hbar }\tau \right)\ \to \ \infty }{\longrightarrow}}}\ \frac{1}{2} \end{equation} \noindent giving approximately the classical value to the probability $P\left(s_x=½\right)$ \begin{equation} \label{19} P\left(s_x=½\right)=\overline{{{\cos }^2 \left(\frac{\left(A_k-B_k\right)}{2\hbar }\tau \right)\ }}\approx \frac{1}{2} \;\;\;\; . \end{equation} \noindent The procedure of averaging the cosine-squared random values over all the possible configurations $k$ (i.e. all the possible sets of the spatial positions) of the detector's microscopic constituent particles can be interpreted as ignoring the detector's microscopic degrees of freedom (which are uncontrolled and unmeasured). Seemingly, this is comparable to the procedure of deriving probability $½$ for `heads' (as well as for `tails') in the experiment of tossing a fair coin by averaging over the uncontrolled and unmeasured degrees of freedom of the environment of the coin \cite{Bub}. However, these two procedures are substantially different. In the coin toss experiment if we take into consideration appropriate environmental parameters, a definite outcome can be predicted. Whereas in the case of the Stern-Gerlach experiment we cannot claim that taking the detector's microscopic degrees of freedom into consideration, a definite outcome of the experiment will be predicted. In fact, doing so we will only get back the non-classical probability (\ref{17}) resulted from the superposition (\ref{16}) of the quantum states of the combined macroscopic system ${\mathcal S}+{\mathcal A}$.\\ \subsection{Computational complexity approach} \noindent Unlike the equation (\ref{9}) describing the single microscopic test-particle, the Schrödinger equation (\ref{7}) describing $N$ mutually interacting microscopic particles (which constitute a many-body situation) is neither known nor believed to have the exact generic \textit{analytical} solutions (i.e. applicable to an arbitrary many-body system exact solutions constructed using well-known operations that lend themselves readily to calculation of outputs in the short interaction time $\tau $) \cite{Mattis}. Moreover, if the \textbf{NP}-hard problem ${\Phi }_{\psi }$ of finding the solutions to the Schrödinger equation for an arbitrary Hamiltonian was intractable (i.e., if \textbf{P} $\neq$ \textbf{NP}), then the computational effort to solve the Schrödinger equation for an arbitrary system would, in general, scale exponentially with the system's constituent particle number. From whence it would follow that due to the huge number $N$ of the constituent microscopic particles comprising the macroscopic detector, the exact generic \textit{numerical} solutions to the equation (\ref{7}) would be impossible to reach within not only the interaction time $\tau $ but any reasonable amount of time at all (this task would necessarily require vast computational resources that even Nature does not have). Hence, in case of \textbf{P} $\neq$ \textbf{NP} we cannot treat the macroscopic detector quantum mechanically since in that case the orthonormal basis vectors $\left.\left|\epsilon_k\right.\right\rangle $ that span the Hilbert space ${{\mathcal H}}_{{\mathcal A}{\rm \ }}$ for the detector ${\mathcal A}$ cannot be obtainable by any practical means. (This inference may explain why there is a limitation in the application of quantum mechanics to a macroscopic world constituted by small particles obeying the quantum laws.)\\ \noindent On the other hand, an intractable problem can surely be solved in a short time but only if the problem's input is small. This might happen if a system has a small number of the degrees of freedom (like a toy model or a microscopic system completely isolated from the environment) or if the system has just a few effective -- i.e. controlled or measured -- degrees of freedom among many others that are completely ignored (and thus uncontrolled and unmeasured); in the latter case, however, the solution might be only inexact (i.e. with a degree of uncertainty) since the description of the system would be incomplete.\\ \noindent From here, we can infer that to be able to explain the interaction of the microscopic test-particle with the macroscopic detector quantum mechanically, we must ignore the detector's microscopic degrees of freedom in the interacting system ${\mathcal S}+{\mathcal A}$ because only then the Schrödinger equation with the interaction Hamiltonian $H_{{\rm int}}$ would be able to quickly (i.e., in the time not longer than $\tau $) produce the solution $\left.\left|{\psi }_{\tau }\right.\right\rangle $, albeit an inexact one, which would describe (approximately) the final state of the test-particle in practical terms.\\ \noindent An obvious way to do this -- while remaining within the von Neumann measurement scheme -- is to allow significant uncertainties in the interaction energies $A_k$ and $B_k$ associated with the configurations of the detector's microscopic constituent particles such that \begin{equation} \label{20} \forall k:\ \ \ \ A_k\equiv \tilde{A}+{\alpha }_k\left(\omega \right)\ \ \ ,\ \ \ \ B_k\equiv \tilde{B}+{\beta }_k\left(\omega \right) \;\;\;\; , \end{equation} \noindent where $\tilde{A}$ and $\tilde{B}$ are the assigned ``best guess'' estimates for these interaction energies (roughly calculated as proportional to the number of electrons in the detector and inversely proportional to the distance between the test-particle and the detector since the interaction is assumed to be due to the Coulomb force), ${\alpha }_k\left(\omega \right)$ and ${\beta }_k\left(\omega \right)$ are their uncertainties -- the real-valued random (stochastic) functions of equal distribution \begin{equation} \label{21} \forall k:\ \ \ \ {\alpha }_k\left(\omega \right)\ \sim \ \alpha \left(\omega \right){\rm \ ,\ \ \ \ }\ {\beta }_k\left(\omega \right)\ \sim \ \beta \left(\omega \right) \end{equation} \noindent defined on a set of possible outcomes, the sample space $\Omega $, as \begin{equation} \label{22} \left\{\ \omega \in \Omega :\ \ \ \left|\alpha \left(\omega \right)\right|\le \tilde{A}{\rm \ ,\ \ \ }\left|\beta \left(\omega \right)\right|\le \tilde{B}\ \right\} \;\;\;\; . \end{equation} \noindent Indeed, permitting such uncertainties in the measurement theory would mean that the interaction energies -- $A_k$ and $A_j$, or $B_k$ and $B_j$ -- identified with the different $\ k\ne j$ sets of spatial positions of the detector's constituent particles would be impossible to differentiate in practical terms. In other words, it would mean that the probability of a certain interaction energy $E_{{\rm int}}$ would be the same for the different configurations $k\ne j$: \begin{equation} \label{23} \forall E_{{\rm int}},k\ne j:\ \ \ \ \ P\left(A_k\le E_{{\rm int}}\right)=P\left(A_j\le E_{{\rm int}}\right){\rm \ \ ,\ }\ \ P\left(B_k\le E_{{\rm int}}\right)=P\left(B_j\le E_{{\rm int}}\right) \;\;\;\; . \end{equation} \noindent It implies that for all practical purposes in the theory of measurement the random variables $A_k$ and $\tilde{A}+\alpha \left(\omega \right)$ would be equal in distribution, i.e., $A_k\ \sim \ \tilde{A}+\alpha \left(\omega \right)$; the same holds for the random variables $B_k$ and $\tilde{B}+\beta \left(\omega \right)$: $B_k\ \sim \ \tilde{B}+\beta \left(\omega \right)$. This gives the following stochastic equalities \begin{equation} \label{24} \sum_k{A_k\left.\left|\epsilon_k\right.\right\rangle \!\left\langle \left.\epsilon_k\right|\right.}\ \sim \ \left(\tilde{A}+\alpha \left(\omega \right)\right)\hat{1}{\rm \ \ \ ,}\ \ \ \sum_k{B_k\left.\left|\epsilon_k\right.\right\rangle \!\left\langle \left.\epsilon_k\right|\right.}\ \sim \ \left(\tilde{B}+\beta \left(\omega \right)\right)\hat{1} \;\;\;\; , \end{equation} \noindent where $\sum_k{\left.\left|\epsilon_k\right.\right\rangle \!\left\langle \left.\epsilon_k\right|\right.=\hat{1}}$ is the identity operator. When substituted into the von Neumann form (\ref{12}), these equalities bring out the stochastic expression for the interaction Hamiltonian $H_{{\rm int}}$ that does not contain the detector's microscopic degrees of freedom \begin{equation} \label{25} H_{{\rm int}}\!\left(\omega \right)\ \sim \ \left[\left(\tilde{A}+\alpha \!\left(\omega \right)\right)\!\left.\left|0\!\right.\right\rangle \!\left\langle \left.0\right|\right.\!\otimes \!\hat{1} +\left(\tilde{B}+\beta \!\left(\omega \right)\right)\!\left.\left|1\!\right.\right\rangle \!\left\langle \left.1\right|\right.\!\otimes \!\hat{1}\right] \;\;\;\; , \end{equation} \noindent the feature that renders the information about the spatial arrangements of the detector's microscopic particles determined by the exact solutions $\left.\left|\epsilon_k\right.\right\rangle $ (unreachable in reasonable time, unless \textbf{P} = \textbf{NP}) immaterial to the inexact (stochastic) solution $\left.\left|{\psi }_{\tau }\!\left(\omega \right)\!\right.\right\rangle $ to the Schrödinger equation with the interaction Hamiltonian $H_{{\rm int}}$ \begin{equation} \label{26} U\!\left(\tau \right)\!\left.\left|{\psi }_0\right.\right\rangle \!\otimes\! \left.\left|\epsilon_0\right.\right\rangle = \left(I-\frac{i\tau }{\hbar }\,H_{{\rm int}}\!\left(\omega \right)\right) \frac{1}{\sqrt{2}}\left(\left.\left|0\right.\right\rangle +\left.\left|1\right.\right\rangle \right)\!\otimes\! \left.\left|\epsilon_0\right.\right\rangle \ \sim \ \left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle \!\otimes \!\left.\left|\epsilon_0\right.\right\rangle \;\;\;\; , \end{equation} \noindent where the solution $\left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle $ is \begin{equation} \label{27} \left\{\omega \in \Omega :\ \ \ \left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle \equiv \frac{1}{\sqrt{2}}\left.\left|0\right.\right\rangle e^{-\frac{i\tau}{\hbar }\left(\tilde{A}+\alpha \left(\omega \right)\right)} +\frac{1}{\sqrt{2}}\left.\left|1\right.\right\rangle e^{-\frac{i\tau}{\hbar }\left(\tilde{B}+\beta \left(\omega \right)\right)} \right\} \;\;\;\; . \end{equation} \noindent At this juncture, let us call to mind that a random variable conceptually does not have a single, fixed value (even if unknown); more exactly, it takes on a set of possible different values (each with an associated probability). Thus, the solution $\left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle $ obtained in (\ref{27}) -- which is a complex-valued function of the real-valued random variables $\alpha \left(\omega \right)$ and $\beta \left(\omega \right)$ -- does not represent a single, unique state of the test-particle (or a linear combination, a superposition, of fixed states), rather it represents a set of possible states of the test-particle after its interaction with the detector in a yet-to-be-performed experiment. That is, the solution (\ref{27}) is the function that associates a possible final state of the test-particle with every instance $\omega $ of the experiment so that $\left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle $ will vary from instance to instance as the experiment is repeated. Performing the experiment many times, one will find the probability $P\left(s_x=½\right)$ of observing the test-particle's spin eigenvalue $s_x=½$ by calculating in each instance $\omega $ the modulus squared ${\left|\left\langle {\psi }_0\!\mathrel{\left|\vphantom{{\psi }_0 {\psi }_{\tau }\!\left(\omega \right)}\right.\kern-\nulldelimiterspace}{\psi }_{\tau }\left(\omega \right)\right\rangle \right|}^2$ of the scalar product of the initial state $\left.\left|{\psi }_0\right.\right\rangle $ and a possible final state $\left.\left|{\psi }_{\tau }\left(\omega \right)\right.\right\rangle $ \begin{equation} \label{28} \begin{array}{r} {\left|\left\langle {\psi }_0\mathrel{\left|\vphantom{{\psi }_0 {\psi }_{\tau }\left(\omega \right)}\right.\kern-\nulldelimiterspace}{\psi }_{\tau }\left(\omega \right)\right\rangle \right|}^2 \sim \left[\displaystyle \frac{1}{2} + \frac{1}{2}{\rm cos}\!\left(\frac{\tilde{A}-\tilde{B}}{\hbar }\tau \right){\rm cos}\!\left(\frac{\alpha \left(\omega \right)-\beta \left(\omega \right)}{\hbar }\tau \right)\right.{\rm } \\ \left.{\rm -} \displaystyle \frac{1}{2}{\sin \!\left(\frac{\tilde{A}-\tilde{B}}{\hbar }\tau \right)\ }{\sin \!\left(\frac{\alpha \left(\omega \right)-\beta \left(\omega \right)}{\hbar }\tau \right) }\right] \end{array} \end{equation} \noindent and afterwards averaging the ensuing real-valued random function ${\left|\left\langle {\psi }_0\!\mathrel{\left|\!\vphantom{{\psi }_0 {\psi }_{\tau }\left(\omega \right)}\right.\kern-\nulldelimiterspace}{\psi }_{\tau }\left(\omega \right)\right\rangle \right|}^2$ over the whole sample space $\Omega $.\\ \noindent To find this average value, let us first find the total span of the random argument of the cosine and sine functions in (\ref{28}) by assessing its min ${\xi }_{{\rm min}}$ and max ${\xi }_{{\rm max}}$ values: \begin{equation} \label{29} {\xi }_{{\rm max}}=-{\xi }_{{\rm min}}= {\mathop{\max }_{\Omega } \left[\frac{\alpha \left(\omega \right)-\beta \left(\omega \right)}{\hbar }\tau \right]\ } \approx \frac{\tilde{A}+\tilde{B}}{\hbar }\tau \;\;\;\; . \end{equation} \noindent Assigning probabilities to possible outcomes of this random argument, we choose a uniform probability distribution as there is no reason to favor any one of the propositions regarding the argument's outcomes over the others (in such a case the only reasonable probability distribution would be uniform \cite{Park}, and then the information entropy would be equal to its maximum possible value). Subsequently, the average of the cosine function of the random uniformly distributed argument over $\Omega $ can be estimated as \begin{equation} \label{30} \overline{{\cos \left(\frac{\alpha \left(\omega \right)-\beta \left(\omega \right)}{\hbar }\tau \right)\ }}\approx \frac{\hbar }{\left(\tilde{A}+\tilde{B}\right)\tau }\ {\rm sin}\frac{\tilde{A}+\tilde{B}}{\hbar }\tau \end{equation} \noindent (whereas the estimated average of the sine function of the same argument ought to be zero due to the symmetry of the assessed limits ${\xi }_{{\rm min}}$ and ${\xi }_{{\rm max}}$ with respect to 0). Given that the limit ${\mathop{\lim }_{\theta \to \infty } \left({\theta }^{-1}{\sin \theta \ }\right)\ }$ exists and is equal to 0, one can conclude from (\ref{30}) that \begin{equation} \label{31} \overline{{\cos \left(\frac{\alpha \left(\omega \right)-\beta \left(\omega \right)}{\hbar }\tau \right)\ }}{\rm \ }{{\underset{\left(\frac{\tilde{A}+\tilde{B}}{\hbar }\tau \right)\ \to \ \infty }{\longrightarrow}}}\ 0 \;\;\;\; . \end{equation} \noindent Since the estimated interaction energies $\tilde{A}$ and $\tilde{B}$ for the macroscopic detector are values of classical magnitude, we finally find that after a very short period of time the probability $P\left(s_x=½\right)$ will be of classical form \begin{equation} \label{32} P\left(s_x=½\right)=\overline{{\left|\left\langle {\psi }_0\mathrel{\left|\vphantom{{\psi }_0 {\psi }_{\tau }\left(\omega \right)}\right.\kern-\nulldelimiterspace}{\psi }_{\tau }\left(\omega \right)\right\rangle \right|}^2}\approx \frac{1}{2} \;\;\;\; . \end{equation} \subsection{Comparing the approaches} \noindent Comparing (\ref{32}) with the analogous expression (\ref{19}) readily points out where the basic distinction between the approaches lies. In the quantum decoherence theory, the interaction between a particular microscopic system and a related macroscopic system is described at first (i.e. \textit{before} decoherence) deterministically by a way of exactly solving the Schrödinger equation for the interacting systems, and only then (i.e. \textit{after} decoherence) random sampling is brought in to simulate uncertainties (caused by some unknown way, in which the microscopic system is entangled with the macroscopic one) in the combined interacting system. In contrast to this, the computational complexity approach changes such a tactic in the first place solving stochastically the Schrödinger equation (by using a stochastic Hamiltonian, which turns the Schrödinger equation into a stochastic differential equation) that specifies the interaction of the microscopic system with the macroscopic one.\\ \noindent Clearly, such dissimilarity is caused by the different attitude towards the \textbf{P} versus \textbf{NP} question -- particularly, the question of the computational hardness of the Schrödinger equation -- adopted by these two approaches. In fact, the quantum decoherence theory tacitly assumes that the quantum computational reductionism holds -- i.e. that \textbf{P} = \textbf{NP} and thus for any system, including a macroscopic detector, the exact solutions to the Schrödinger equation can be deterministically computed either in an instant or in a time so short (in comparison with the interaction time $\tau $) that it could be ignored in the theory.\\ \noindent By contrast, the computational complexity approach presumes that the problem ${\Phi }_{\psi }$ of solving the Schrödinger equation for any given Hamiltonian is intractable (i.e.\textbf{ P} $\neq$ \textbf{NP}), which implies that to do a parallel with the experiment synchronized calculation (using standard quantum theory) is only possible by stochastically solving the Schrödinger equation for a macroscopic interacting system.\\ \section{Conclusion} \noindent But then, an objection can be made that in fact, the results of quantum theory, when applied to measurements, by no means depend on the status of the \textbf{P }versus \textbf{NP }question because there are simplified apparatus models, which are solvable (e.g., a von Neumann measurement model of a pointer interacting with a microscopic system) and which give an excellent agreement with experiment. So, the inability to exactly solve the Schrödinger equation for an arbitrary macroscopic system in reasonable time (which might or might not be true) has no consequences for the foundations of the theory.\\ \noindent In order to meet this objection, let us recall that a hard, or intractable, problem is not necessarily a problem for which there is no solution; rather it is a problem for which there are no efficient means of solving. In other words, even though some instances of a hard problem could be guessed (and then verified) in reasonable time, no particular rule is followed how to efficiently solve any other instances of the problem. (For example, even if you have guessed the solution to various instances of the Sudoku puzzle, a \textbf{NP}-complete problem \cite{Yato}, you still will not have an efficient algorithm for solving any new instance of this puzzle.) So, despite the fact that various instances of the Schrödinger equation have been successfully solved, we still do not have an efficient algorithm for solving this equation for an arbitrary Hamiltonian (and hence for an arbitrary system), or any assurance that this equation can be always exactly soluble in reasonable time (such an assurance can be only offered by the proof that \textbf{P }= \textbf{NP}).\\ \noindent In an analogous manner, a number of simple (and because of that) exactly solvable apparatus models (or, for that matter, models of any macroscopic system) cannot guarantee that the mutual orthogonality of the apparatus's state vectors needed to provide the loss of coherence in the modulus squared of the scalar product will always arise in all experiments. But it is quite clear that without such a guarantee the approach of the quantum decoherence theory cannot be stated as a set of the general rules applicable to any physical system. However, to get just such a guarantee for any possible system one should be first required having an efficient algorithm capable of solving the Schrodinger equation exactly for any possible system. Obviously, this could be only achievable if \textbf{P} were to be equal to \textbf{NP}.\\ \noindent Thus, how to choose between the two presented approaches to the problem of the emergence of classicality finally depends on the status of the \textbf{P }versus \textbf{NP }question, a major unsolved problem in computer science.\\ \noindent If, for instance, the equality \textbf{P} = \textbf{NP} were to prove to be correct, then, indeed, Nature would be able to solve the Schrödinger equation for a truly macroscopic system in a moment but soon after the solution would be reached decoherence would make the superposition of the quantum states of the macroscopic system (following from the linearity of the Schrödinger equation) unavailable for inspection by local observers. However, if \textbf{P} were to turn out to be not equal to \textbf{NP}, then there would be no physical means to solve this equation for the macroscopic system within a reasonable amount of time; hence, the superposition of the macroscopic system's quantum states as a linear combination of the exact solutions to the Schrödinger equation for this system would be originally nonexistent and thus unavailable for inspection by any observer.\\ \end{document}
\begin{document} \begin{center} {\Large\bf Universal Test for Quantum One-Way Permutations}\\[7mm] \large Akinori Kawachi$^{\dag,\S}$ \ \ Hirotada Kobayashi$^\ddag$ \ \ Takeshi Koshiba$^{\dag,\P}$ \ \ Raymond H. Putra$^{\dag,\S}$\\[7mm] \normalsize \begin{tabular}{l@{\hskip 1mm}l} $^\dag$ & \sl Quantum Computation and Information Project,\\ & \sl ERATO, Japan Science and Technology Agency\\ & 406 Iseya-cho, Kawaramachi-Marutamachi, Kamigyo-ku, Kyoto 602-0873, Japan.\\ & \tt \verb+{kawachi,koshiba,raymond}@qci.jst.go.jp+\\[2mm] $^\ddag$ & \sl Quantum Computation and Information Project,\\ & \sl ERATO, Japan Science and Technology Agency\\ & 5-28-3 Hongo, Bunkyo-ku, Tokyo 113-0033, Japan.\\ & \tt\[email protected]+\\[2mm] $^\S$ & \sl Graduate School of Informatics, Kyoto University\\ & Yoshida-Honmachi, Sakyo-ku, Kyoto 606-8501, Japan.\\[2mm] $^\P$ & \sl Secure Computing Laboratory, Fujitsu Laboratories Ltd.\\ & 4-1-1 Kamikodanaka, Nakahara-ku, Kawasaki 211-8588, Japan.\\ \end{tabular} \end{center} \vspace*{1mm} \begin{abstract} The next bit test was introduced by Blum and Micali and proved by Yao to be a universal test for cryptographic pseudorandom generators. On the other hand, no universal test for the cryptographic one-wayness of functions (or permutations) is known, though the existence of cryptographic pseudorandom generators is equivalent to that of cryptographic one-way functions. In the quantum computation model, Kashefi, Nishimura and Vedral gave a sufficient condition of (cryptographic) quantum one-way permutations and conjectured that the condition would be necessary. In this paper, we affirmatively settle their conjecture and complete a necessary and sufficient for quantum one-way permutations. The necessary and sufficient condition can be regarded as a universal test for quantum one-way permutations, since the condition is described as a collection of stepwise tests similar to the next bit test for pseudorandom generators. \end{abstract} \section{Introduction} One-way functions are functions $f$ such that, for each $x$, $f(x)$ is efficiently computable but, only for a negligible fraction of $y$, $f^{-1}(y)$ is computationally tractable. While the modern cryptography depends heavily on one-way functions, the existence of one-way functions is one of the most important open problems in theoretical computer science. On the other hand, Shor \cite{shor97} showed that famous candidates of one-way functions such as the RSA function or the discrete logarithm function are no longer one-way in the quantum computation model. Nonetheless, some cryptographic applications based on quantum one-way functions have been considered (see, e.g., \cite{ac02,dms00}). As a cryptographic primitive other than one-way functions, pseudorandom generators have been studied well. Blum and Micali \cite{bm84} proposed how to construct pseudorandom generators from one-way permutations and introduced the next bit test for pseudorandom generators. (They actually constructed a pseudorandom generator assuming the hardness of the discrete logarithm problem.) Since Yao \cite{yao82} proved that the next bit test is a universal test for pseudorandom generators, the Blum--Micali's construction paradigm of pseudorandom generators from one-way permutations was accomplished. In the case of pseudorandom generators based on one-way permutations, the next bit unpredictability can be proved by using the hard-core predicates for one-way permutations. After that, Goldreich and Levin \cite{gl89} showed that there exists a hard-core predicate for any one-way function (and also permutation) and H{\aa}stad {\em et al.} \cite{hill99} showed that the existence of pseudorandom generators is equivalent to that of one-way functions. Yao's result on the universality of the next bit test assumes that any bits appeared in pseudorandom bits are computationally unbiased. Schrift and Shamir \cite{ss93} extended Yao's result to the biased case and proposed universal tests for nonuniform distributions. On the other hand, no universal test for the one-wayness of a function (or a permutation) is known, although pseudorandom generators and one-way functions (or permutations) are closely related. In the quantum computation model, Kashefi, Nishimura and Vedral \cite{knv02} gave a necessary and sufficient condition for the existence of {\em worst-case\/} quantum one-way permutations. They also considered the {\em cryptographic (i.e., average-case)\/} quantum one-way permutations and gave a sufficient condition of (cryptographic) quantum one-way permutations. They also conjectured that the condition would be necessary. Their conditions are based on the efficient implementability of reflection operators about some class of quantum states. Note that the reflection operators are successfully used in the Grover's algorithm \cite{gro96} and the quantum amplitude amplification technique \cite{bhmt00}. To obtain a sufficient condition of cryptographic quantum one-way permutations, a notion of ``pseudo identity'' operators was introduced \cite{knv02}. Since the worst-case hardness of reflection operators is concerned with the worst-case hardness of the inversion of the permutation $f$, we need some technical tool with which the inversion process of $f$ becomes tolerant of some computational errors in order to obtain a sufficient condition of cryptographic quantum one-way permutations. Actually, pseudo identity operators permit of {\em exponentially\/} small errors during the inversion process \cite{knv02}. In this paper, we complete a necessary and sufficient condition of cryptographic quantum one-way permutations conjectured in \cite{knv02}. We incorporate their basic ideas with a probabilistic argument in order to obtain a technical tool to permit of {\em polynomially\/} small errors during the inversion process. Roughly saying, pseudo identity operators are close to the identity operator in a sense. The similarity is defined by an intermediate notion between the statistical distance and the computational distance. In \cite{knv02}, it is ``by upper-bounding the similarity'' that the sufficient condition of cryptographic quantum one-way permutations was obtained. By using a probabilistic argument, we can estimate the expectation of the similarity and then handle polynomially small errors during the inversion of the permutation $f$. Moreover, the necessary and sufficient condition of quantum one-way permutations can be regard as a universal test for the quantum one-wayness of permutations. To discuss universal tests for the one-wayness of permutations, we briefly review the universality of the next bit test for pseudorandom generators. Let $g(x)$ be a length-regular deterministic function such that $g(x)$ is of length $\ell(n)$ for any $x$ of length $n$. The universality of the next bit test says that we have only to check a collection of stepwise polynomial-time tests $T_1,...,T_{\ell(n)}$ instead of considering all the polynomial-time tests that try to distinguish the truly random bits from output bits from $g$, where each $T_i$ is the test whether, given the $(i-1)$-bits prefix of $g(x)$ (and the value of $\ell(|x|)$), the $i$-th bit of $g(x)$ is predictable or not with probability non-negligibly higher than 1/2. Our necessary and sufficient condition of quantum one-way permutations says that the quantum one-wayness of a given permutation $f$ can be checked by a collection of stepwise tests $T_1',...,T_n'$ instead of considering all the tests of polynomial-size quantum circuit, where each $T_i'$ is the test whether, given some quantum state $q_{i-1}$ that can be defined by using the $(i-1)$-bits prefix of $f(x)$, some other quantity $t_i$ is computable with polynomial-size quantum circuit or not and the next state $q_i$ can be determined from $q_{i-1}$ and $t_i$. In this sense, our universal test for quantum one-way permutations is analogous to the universal test (i.e., the next bit test) for pseudorandom generators. \section{Preliminaries} We say that a unitary operator (on $n$ qubits) is {\em easy\/} if there exists a quantum circuit implementing $U$ with polynomial size in $n$ and a set $\cal F$ of unitary operators is {\em easy\/} if every $U\in\cal F$ is easy. Throughout this paper, we assume that $f:\{0,1\}^\ast \rightarrow \{0,1\}^\ast$ is a length-preserving permutation unless otherwise stated. Namely, for any $x\in\{0,1\}^n$, $f(x)$ is an $n$-bits string and the set $\{f(x): x\in\{0,1\}^n\}$ is of cardinality $2^n$ for every $n$. First, we mention some useful operators in describing the previous and our results. The tagging operators $O_j$ are defined as follows: \[ O_j\ket{x}\ket{y} = \begin{cases} - \ket{x}\ket{y} & {\rm if~} \fixbitE{f(y)}{x}{2j}{2j+1}\\ \ket{x}\ket{y} & {\rm if~} \fixbitN{f(y)}{x}{2j}{2j+1} \end{cases} \] where $y_{(i,j)}$ denotes the substring from the $i$-th bit to the $j$-th bit of the bit string $y$. Note that these unitary operators $O_j$ are easy. Next, we consider the reflection operators $Q_j(f)$ as follows: \[ Q_j(f) = \sum_{\udn{x}}\kb{x}{x}\otimes (2\kb{\psi_{j,x}}{\psi_{j,x}}-I) \] where \[ \ket{\psi_{j,x}} = \frac{1}{\sqrt{2^{n-2j}}} \sum_{y:\fixbitE{f(y)}{x}{1}{2j}} \ket{y}. \] (See Fig.\ 1 for the reflection operator.) We sometimes use the notation $Q_j$ instead of $Q_j(f)$. \begin{center} \mbox{}\\ \scalebox{0.4}{\includegraphics{ref.eps}}\\ Fig.~1: Reflection operator\\ \mbox{} \end{center} Actually, these reflection operators are somewhat special for our purpose. In general, reflection operators are commonly and successfully used in the Grover's algorithm \cite{gro96} and the quantum amplitude amplification technique \cite{bhmt00}. \begin{theorem}\label{thm:wc}{\rm (Kashefi, Nishimura and Vedral \cite{knv02})} Let $f:\{0,1\}^n\rightarrow\{0,1\}^n$ be a permutation. Then $f$ is worst-case quantum one-way if and only if the set ${\cal F}_n = \{Q_j(f)\}_{j=0,1,...,\frac{n}{2}-1}$ of unitary operators is not easy. \end{theorem} As a part of the proof of Theorem \ref{thm:wc}, Kashefi, Nishimura and Vedral \cite{knv02} give a quantum algorithm (we call Algorithm {\sf INV} in what follows) computing $f^{-1}$ by using unitary operators $O_j$ and $Q_j$. The initial input state to {\sf INV} is assumed to be \[ \frac{1}{\sqrt{2^n}}\ket{x}\sum_{\udn{y}}\ket{y}, \] where {\sf INV} trys to compute $f^{-1}(x)$. Then {\sf INV} performs the following steps: \begin{quote} {\bf foreach} $j=0$ to $\frac{n}{2}-1$\\ \hspace*{7mm} ({\sf step W.j.1}) Apply $O_j$ to the first and the second registers;\\ \hspace*{7mm} ({\sf step W.j.2}) Apply $Q_j$ to the first and the second registers. \end{quote} After each step, we have the following: \begin{eqnarray*} \mbox{(the state after {\sf step W.j.1})} & = & \frac{2^j}{\sqrt{2^n}}\ket{x}\left( \sqrt{2^{n-2j}}\ket{\psi_{j,x}} - 2\sum_{y:\fixbitE{f(y)}{x}{1}{2j+2}}\ket{y}\right).\\ \mbox{(the state after {\sf step W.j.2})} & = & \frac{2^{j+1}}{\sqrt{2^n}}\ket{x}\sum_{y:\fixbitE{f(y)}{x}{1}{2j+2}}\ket{y}. \end{eqnarray*} Before reviewing a known sufficient condition of cryptographic quantum one-way permutations, we define two types of cryptographic ``one-wayness'' in the quantum computational setting. \begin{definition}\rm A permutation $f$ is {\em weakly quantum one-way\/} if the following conditions are satisfied: \begin{enumerate} \item $f$ can be computed by a polynomial size quantum circuit (and whenever inputs are classical the corresponding outputs must be classical). \item There exists a polynomial $p(\cdot)$ such that for every polynomial size quantum circuit $A$ and all sufficiently large $n$'s, \[ \Pr[A(f(U_n))\ne U_n] > \frac{1}{p(n)}, \] where $U_n$ is the uniform distribution over $\{0,1\}^n$. \end{enumerate} \end{definition} \begin{definition}\rm A permutation $f$ is {\em strongly quantum one-way\/} if the following conditions are satisfied: \begin{enumerate} \item $f$ can be computed by a polynomial size quantum circuit (and whenever inputs are classical the corresponding outputs must be classical). \item For every polynomial size quantum circuit $A$ and every polynomial $p(\cdot)$ and all sufficiently large $n$'s, \[ \Pr[A(f(U_n))= U_n] < \frac{1}{p(n)}. \] \end{enumerate} \end{definition} As in the classical one-way permutations, we can show that the existence of weakly quantum one-way permutations is equivalent to that of strongly quantum one-way permutations (see, e.g., \cite{goldreich}). Thus, we consider the weakly quantum one-way permutations in this paper. While Theorem \ref{thm:wc} is a necessary and sufficient condition of {\em worst-case\/} quantum one-way permutations, Kashefi, Nishimura and Vedral \cite{knv02} also gave a sufficient condition of {\em cryptographic\/} quantum one-way permutations by using the following notion. \begin{definition}\rm Let $d(n)\ge n$ be a polynomial in $n$ and $J_n$ be a $d(n)$-qubit unitary operator. $J_n$ is called $(a(n),b(n))$-pseudo identity if there exists a set $X_n\subseteq \{0,1\}^n$ such that $|X_n|/2^n \le b(n)$ and for any $\udn{z}\setminus X_n$ \[ | 1 - (\bra{z}_1\bra{0}_2)J_n(\ket{z}_1\ket{0}_2)| \le a(n), \] where $\ket{z}_1$ is the $n$-qubit basis state for each $z$ and $\ket{0}_2$ corresponds to the ancillae of $d(n)-n$ qubits. \end{definition} The closeness between a pseudo identity operator and the identity operator is measured by a pair of parameters $a(n)$ and $b(n)$. The first parameter $a(n)$ is a measure of a statistical property and the second one $b(n)$ is a measure of a computational property. Note that we do not care where each $z\in X_n$ is mapped by the pseudo identity operator $J_n$. While we will give a necessary and sufficient condition of quantum one-way permutations by using the notion of pseudo identity, we introduce a new notion, which may be helpful to understand intuitions of our and previous conditions, in the following. \begin{definition}\rm Let $d'(n)\ge n$ be a polynomial in $n$ and $P_n$ be a $d'(n)$-qubit unitary operator. $P_n$ is called $(a(n),b(n))$-pseudo reflection (with respect to $\ket{\psi(z)}$) if there exists a set $X_n\subseteq \{0,1\}^n$ such that $|X_n|/2^n \le b(n)$ and for any $\udn{z}\setminus X_n$ \[ \left| 1 - \biggl(\bra{z}_1\bra{w}_2 \Bigl(\sum_{\udn{y}}\ket{y}\bra{y}_1 \otimes (2\ket{\psi(y)}\bra{\psi(y)}-I)_2\Bigr) \bra{0}_3\biggr)P_n (\ket{z}_1\ket{w}_2\ket{0}_3)\right| \le a(n). \] \end{definition} The above definition of pseudo reflection operators is somewhat complicated. Since Fig.\ 2 illustrates a geometrical intuition, it may be helpful to understand the idea of pseudo reflection operators. Let $J_n$ be a $d(n)$-qubit $(a(n),b(n))$-pseudo identity operator. Then $(I_n\otimes J_n)^{\dag}(Q_j\otimes I_{d(n)-n}) (I_n\otimes J_n)$ is a $(d(n)+n)$-qubit $(a'(n),b'(n))$-pseudo reflection operator with respect to $\ket{\psi_{j,x}}$, where $a'(n)\le 2a(n)$ and $b'(n)\le 2b(n)$. These estimations of $a'(n)$ and $b'(n)$ are too rough to obtain a necessary and sufficient condition. Rigorously estimating these parameters is a main technical issue in this paper. \begin{theorem}\label{thm:avc} {\rm (Kashefi, Nishimura and Vedral \cite{knv02})} Let $f$ be a permutation that can be computed by a polynomial-size quantum circuit. If $f$ is not (weakly) quantum one-way, then for any polynomial $p$ and infinitely many $n$, there exist a polynomial $r_p(n)$ and a $r_p(n)$-qubit $(1/2^{p(n)},1/p(n))$-pseudo identity operator $J_{n}$ such that the family of pseudo reflection operators \[ {\cal F}_{p,n}(f) = \{ (I_n\otimes J_{n})^{\dag} (Q_j(f)\otimes I_{r_p(n) -n}) (I_n\otimes J_{n})\}_{j=0,1,...,\frac{n}{2}-1} \] is easy. \end{theorem} Kashefi, Nishimura and Vedral \cite{knv02} conjectured that the converse of Theorem \ref{thm:avc} should still hold and proved a weaker version of the converse as follows. \begin{theorem} {\rm (Kashefi, Nishimura and Vedral \cite{knv02})} Let $f$ be a permutation that can be computed by a polynomial-size quantum circuit. If for any polynomial $p$ and infinitely many $n$ there exist a polynomial $r_p(n)$ and a $r_p(n)$-qubit $(1/2^{p(n)},p(n)/2^n)$-pseudo identity operator $J_n$ such that the family of pseudo reflection operators \[ {\cal F}_{p,n}(f) = \{ (I_n\otimes J_{n})^{\dag} (Q_j(f)\otimes I_{r_p(n) -n}) (I_n\otimes J_{n})\}_{j=0,1,...,\frac{n}{2}-1} \] is easy, then $f$ is not (weakly) quantum one-way. \end{theorem} \begin{center} \mbox{}\\ \scalebox{0.6}{\includegraphics{pseudo-ref.eps}}\\ Fig.~2: Pseudo reflection operator\\ \mbox{} \end{center} We mention why it is difficult to show the converse of Theorem \ref{thm:avc}. To prove it by contradiction, all we can assume is the existence of a pseudo identity operator. This means that we cannot know how the pseudo identity operator is close to the identity operator. To overcome this difficulty, we introduce a probabilistic technique and estimate the expected behavior of the pseudo identity operator. Eventually, we give a necessary and sufficient condition of the existence of quantum one-way permutations in terms of reflection operators. This says that we affirmatively settle their conjecture. \section{Necessary and Sufficient Condition of Quantum One-way Permutations} We have a necessary and sufficient condition of cryptographic quantum one-way permutations as follows. \begin{theorem}\label{thm:main} The following statements are equivalent. \begin{enumerate} \item There exists a weakly quantum one-way permutation. \item There exists a polynomial-time computable function $f$ satisfying that there exists a polynomial $p$ such that for all sufficiently large $n$'s, any polynomial $r_p(n)$ and any $r_p(n)$-qubit $(1/2^{p(n)},1/p(n))$-pseudo identity operator $J_{n}$ such that the family of pseudo reflection operators \[ {\cal F}_{n,p}(f)=\{(I_n\otimes J_{n})^{\dag} (Q_j(f)\otimes I_{r_p(n)-n}) (I_n\otimes J_{n})\}_{j=0,1,...,\frac{n}{2}-1} \] is not easy. \end{enumerate} \end{theorem} To grasp the intuition of Theorem \ref{thm:main}, Fig 3.\ may be helpful. Theorem \ref{thm:main} can be proved as the combination of Theorem \ref{thm:avc} and the following theorem. \begin{theorem}\label{thm:new2} Let $f$ be a permutation that can be computed by a polynomial-size quantum circuit. If for any polynomial $p$ and infinitely many $n$ there exist a polynomial $r_p(n)$ and a $r_p(n)$-qubit $(1/2^{p(n)},1/p(n))$-pseudo identity operator $J_{n}$ such that the family of pseudo reflection operators \[ {\cal F}_{n,p}(f)=\{\tilde{Q}_j(f)\} = \{(I_n\otimes J_{n})^{\dag} (Q_j(f)\otimes I_{r_p(n)-n}) (I_n\otimes J_{n})\}_{j=0,1,...,\frac{n}{2}-1} \] is easy, then $f$ is not (weakly) quantum one-way. \end{theorem} \begin{center} \mbox{}\\ \scalebox{0.4}{\includegraphics{Qtilde3.eps}}\\ Fig.~3: Basic operations for the inversion\\ \mbox{} \end{center} \begin{proof} Suppose that for any polynomial $p(n)$, infinitely many $n$, and some $(1/2^{p(n)},1/p(n))$-pseudo identity operator $J_{n}$, the family ${\cal F}_{p,n}$ of unitary operators is easy. Moreover, let $f$ be a weakly quantum one-way permutation. By a probabilistic argument, we show that a contradiction follows from this assumption. For more detail, we construct an efficient inverter for $f$ using ${\cal F}_{p,n}$ and then, if we choose a polynomial $p(n)$ appropriately, this efficient inverter can compute $x$ from $f(x)$ for a large fraction of inputs, which violates the assumption that $f$ is a weakly quantum one-way permutation. We first construct a polynomial-size algorithm {\sf av-INV} to invert $f$ by using unitary operations in ${\cal F}_{p,n}$. Algorithm {\sf av-INV} is almost similar to Algorithm {\sf INV} except the following change: the operator $Q_j$ is now replaced with $\tilde{Q}_j$. The initial input state to {\sf av-INV} is also assumed to be \[ \frac{1}{\sqrt{2^n}}\ket{x}_1\sum_{\udn{y}}\ket{y}_2\ket{0}_3, \] where $\ket{z}_1$ (resp., $\ket{z}_2$ and $\ket{z}_3$) denotes the first $n$-qubit (resp., the second $n$-qubit and the last $(r_p(n)-n)$-qubit) register. Algorithm {\sf av-INV} performs the following steps: \begin{quote} {\bf foreach} $j=0$ to $\frac{n}{2}-1$\\ \hspace*{7mm} ({\sf step j.1}) Apply $O_j$ to the first and the second registers;\\ \hspace*{7mm} ({\sf step j.2}) Apply $\tilde{Q}_j$ to all the registers. \end{quote} For analysis of Algorithm {\sf av-INV}, we use the following functionally equivalent description. (Note that the following procedure may not be efficient though the behavior is equivalent to Algorithm {\sf av-INV}.) \begin{quote} {\bf foreach} $j=0$ to $\frac{n}{2}-1$\\ \hspace*{7mm} ({\sf step A.j.1}) Apply $O_j$ to the first and the second registers;\\ \hspace*{7mm} ({\sf step A.j.2}) Apply $J_n$ to the second and third registers;\\ \hspace*{7mm} ({\sf step A.j.3}) Apply ${Q}_j$ to the first and the second registers;\\ \hspace*{7mm} ({\sf step A.j.4}) Apply $J_n^{\dag}$ to the second and third registers. \end{quote} Then, we can prove the following two claims. \begin{claim}\label{claim:qowp} Suppose that $f$ is a weakly quantum one-way permutation, i.e., there exists a polynomial $r(n)\ge 1$ such that for every polynomial size quantum circuit $A$ and all sufficiently large $n$'s, $\Pr[A(f(U_n))\ne U_n] > 1/r(n)$. Then, there are at least $2^n(1/r(n)-1/q^2(n))/(1-1/q^2(n))$ $x$'s such that $A$ cannot compute $x$ from $f(x)$ with probability at least $1-1/q^2(n)$. \end{claim} \begin{claim}\label{claim:av-INV} Let $q(n) = p^{1/4}(n)/\sqrt{2n}$. There are at most $2^n/q(n)$ $x$'s such that Algorithm {\sf av-INV} cannot compute $x$ from $f(x)$ with probability at least $1-1/q^2(n)$. \end{claim} The proof of Claim \ref{claim:av-INV} is delayed and that of Claim \ref{claim:qowp} follows immediately from the definition of a weakly quantum one-way permutation by a counting argument. Recall that we assume that $f$ is a weakly quantum one-way permutation at the beginning of this proof. Now, we can set $p(n) = 4n^2(r(n)+1)^4$, that is, $q(n)=r(n)+1\ge 2$. It follows that $(1/r(n)-1/q^2(n))/(1-1/q^2(n)) > 1/q(n)$, which is a contradiction since {\sf av-INV} is an inverter violating the assumption of a weakly quantum one-way permutation $f$. This implies that $f$ is not weakly quantum one-way. In what follows, we present a proof of Claim \ref{claim:av-INV} to complete the proof of this theorem. \begin{proofof}{Claim \ref{claim:av-INV}} From the definition of pseudo identity operators, there exists a set $X_n\subseteq\{0,1\}^n$ with $|X_n| \le 2^n/p(n)$ such that for any $y\in Y_n = \{0,1\}^n\setminus X_n$, \[ J_{n}\ket{y}_2\ket{0}_3 = \alpha_y\ket{y}_2\ket{0}_3 + \ket{\psi_y}_{23}, \] where $\ket{\psi_y}_{23} \bot \ket{y}_2\ket{0}_3$ and $|1-\alpha_y|\le \frac{1}{2^{p(n)}}$. In Algorithm {\sf av-INV}, we apply $J_{n}$ before and after {\sf step A.j.3} for each $j$. The application of $J_{n}$ makes an error in computation of $f^{-1}$. We call the vector $J_{n}\ket{\psi}-\ket{\psi}$ the {\em error\/} associated to $\ket{\psi}$. To measure the effect of this error, we use the following lemmas. (Lemma \ref{lem:keep} itself was stated in \cite{knv02}.) We note, in the sequel, the norm over vectors is Euclidean. \begin{lemma}\label{lem:bound} Assume that $T\subseteq S\subseteq \{0,1\}^n$. Then length $l(S,T)$ of the error associated to the state \[ \ket{\psi(S,T)} = \frac{1}{\sqrt{|S|}}\left( \sum_{y\in S\setminus T}\ket{y}\ket{0} - \sum_{y\in T}\ket{y}\ket{0}\right) \] satisfies that \[ l(S,T) \le 2\sqrt{\frac{|S\cap X_n|}{|S|}}+\gamma(n), \] where $\gamma(n)$ is a negligible function in $n$. \end{lemma} \begin{proof} First, we restate the property of the length of the error associated to the state $\ket{y}\ket{0}$ which was shown in \cite{knv02}. The property is that the length is at most $\frac{2}{2^{p(n)/2}}$ if $y\in Y_n$ and at most 2 if $y\in X_n$. Using this property more carefully, we have a more tight bound of $l(S,T)$ as follows: \begin{eqnarray*} l(S,T) & = & |J_{n}\ket{\psi(S,T)}-\ket{\psi(S,T)}|\\ & = & \frac{1}{\sqrt{|S|}}\left| (J_{n}-I)\left( \sum_{y\in Y_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in Y_n\cap T}\ket{y}\ket{0} + \sum_{y\in X_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in X_n\cap T}\ket{y}\ket{0} \right)\right|\\ & \le & \frac{1}{\sqrt{|S|}}\left| (J_{n}-I)\left( \sum_{y\in Y_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in Y_n\cap T}\ket{y}\ket{0}\right)\right| \\ & & + \frac{1}{\sqrt{|S|}}\left| (J_{n}-I)\left( \sum_{y\in X_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in X_n\cap T}\ket{y}\ket{0} \right)\right|\\ & \le & \frac{1}{\sqrt{|S|}}\left( \sum_{y\in Y_n\cap (S\setminus T)} |J_{n}\ket{y}\ket{0}-\ket{y}\ket{0}| + \sum_{y\in Y_n\cap T} |J_{n}\ket{y}\ket{0}-\ket{y}\ket{0}|\right)\\ & & + \frac{1}{\sqrt{|S|}}\left(\left| J_{n}\left( \sum_{y\in X_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in X_n\cap T}\ket{y}\ket{0}\right)\right| + \left| \sum_{y\in X_n\cap (S\setminus T)}\ket{y}\ket{0} - \sum_{y\in X_n\cap T}\ket{y}\ket{0}\right|\right)\\ & \le & \frac{2}{2^{p(n)/2}}\frac{|S\cap Y_n|}{\sqrt{|S|}} + \frac{2}{\sqrt{|S|}} \sqrt{ (|X_n\cap (S\setminus T)| + |X_n\cap T|) }\\ & = & \frac{2}{2^{p(n)/2}} \frac{|S\cap Y_n|}{\sqrt{|S|}} + 2\sqrt{\frac{|S\cap X_n|}{|S|}}. \end{eqnarray*} Let $\gamma(n)$ be the former term in the above inequality. Then \[ \gamma(n) = \frac{2}{2^{p(n)/2}}\frac{|S\cap Y_n|}{\sqrt{|S|}} < \frac{2^{n+1}}{2^{p(n)/2}} < \frac{1}{2^n} \] and is negligible. \end{proof} \begin{lemma}\label{lem:keep} Let $J_{n}\ket{\psi(S,T)}=\alpha \ket{\psi(S,T)} + \ket{\psi(S,T)^{\bot}}$, where $\ket{\psi(S,T)}\bot \ket{\psi(S,T)^{\bot}}$. Then, $|\ket{\psi(S,T)^{\bot}}|\le l(S,T)$. \end{lemma} By using Lemma \ref{lem:bound} and Lemma \ref{lem:keep}, we consider the effect of the additional applications of pseudo identity operators to {\sf INV} in order to analyze Algorithm {\sf av-INV}. For each $j$, we let $S_{x,j}=\{y:f(y)_{(1,2j)}=x_{(1,2j)}\}$ and $T_{x,j}=\{y:f(y)_{(1,2j+2)}=x_{(1,2j+2)}\}$. We assume that the state before {\sf step A.j.2} is \[ \ket{x}_1\ket{\psi(S_{x,j},T_{x,j})}_{23} = \ket{x}_1\frac{2^j}{\sqrt{2^n}} \left(\sum_{y\in S_{x,j}\setminus T_{x,j}}\ket{y}_2 - \sum_{y\in T_{x,j}}\ket{y}_2\right)\ket{0}_3. \] Note that the above state is the same as the one before W.$j$.2 in Algorithm {\sf INV}. In {\sf step A.j.2}, $J_n$ is applied to the state. From Lemma \ref{lem:bound} and a probabilistic argument, we have the following. \begin{lemma}\label{lem:eval} For each $j$, \[ {\bf E}[l(S_{x,j},T_{x,j})] \le \frac{2}{\sqrt{p(n)}}+\gamma(n), \] where the expectation is over $x\in\{0,1\}^n$ and $\gamma(n)$ is a negligible function in $n$. \end{lemma} \begin{proof} Since $f$ is a permutation, by the definition of $S_{x,j}$, $|S_{x,j}| = 2^{n-2j}$. Also, $y\in S_{x,j}$ for some $x$ if and only if $y_{(1,2j)}=x_{(1,2j)}$. Then, \[ \Pr\left[ y\in S_{x,j} \right] = \frac{2^{n-2j}}{2^n} = \frac{1}{2^{2j}}, \] where the probability is taken over $x\in\{0,1\}^n$ uniformly. Since, for any $(1/2^{p(n)}, 1/p(n))$-pseudo identity, \[ {\bf E}[|X_n \cap S_{x,j}|] = \frac{|X_n|}{2^{2j}}, \quad |S_{x,j}| = 2^{n-2j}, \quad\mbox{and}\quad \frac{|X_n|}{2^n} = \frac{1}{p(n)}, \] it holds that \[ {\bf E}\left[\frac{|X_n \cap S_{x,j}|}{|S_{x,j}|}\right] = \frac{1}{p(n)}, \] where the expectation is over $x\in\{0,1\}^n$. By Lemma \ref{lem:bound}, \[ {\bf E}\left[l(S_{x,j},T_{x,j})\right] \le 2{\bf E}\left[\sqrt{\frac{|X_n \cap S_{x,j}|}{|S_{x,j}|}}\right] + \gamma(n) \le 2\sqrt{{\bf E}\left[\frac{|X_n \cap S_{x,j}|}{|S_{x,j}|}\right]} + \gamma(n) = \frac{2}{\sqrt{p(n)}} + \gamma(n) \] for some negligible function $\gamma$. \end{proof} From Lemma \ref{lem:keep} and Lemma \ref{lem:eval}, we obtain a vector $v=v_1+v_2$ where $v_1/|v_1|$ is the unit vector corresponding to the state before {\sf step W.j.2} in Algorithm {\sf INV} and $v_2$ is a vector of expected length at most $2/\sqrt{p(n)}$ orthogonal to $v_1$. (For simplicity, we neglect a negligible term $\gamma(n)$.) The vector $v_2$ corresponds to an error that happens when $J_{n}$ is applied before {\sf step A.j.3}. Next, we consider the state after step {\sf A.j.3}. We assume that the state after {\sf step A.j.3} is \[ \ket{x}_1\ket{\psi(S_{j+1},\varnothing)}_{23} = \ket{x}_1\frac{2^j}{\sqrt{2^n}} \left(\sum_{y\in S_{x,j+1}}\ket{y}_2\right)\ket{0}_3. \] Note that the above state is the same as the one after {\sf step W.j.2} in Algorithm {\sf INV}. In order to analyze the effect of the application of $J_{n}^{\dag}$ after {\sf step A.j.3}, we need another lemma similar to Lemma \ref{lem:eval}. (The proof is omitted since its proof is also similar.) \begin{lemma}\label{lem:eval2} For each $j$, \[ {\bf E}[l(S_{x,j+1},\varnothing)] \le \frac{2}{\sqrt{p(n)}} + \gamma(n), \] where the expectation is over $x\in\{0,1\}^n$ and $\gamma(n)$ is a negligible function in $n$. \end{lemma} By a similar argument to the above, we obtain a vector $v=v_1+v_2$ where $v_1/|v_1|$ is the unit vector corresponding to the state after {\sf step W.j.2} in Algorithm {\sf INV} and $v_2$ is a vector of expected length at most $2/\sqrt{p(n)}$ orthogonal to $v_1$. (For simplicity, we neglect a negligible term $\gamma(n)$.) The vector $v_2$ corresponds to an error that happens when $J_{n}^{\dag}$ is applied after {\sf step A.j.3}. From the above analysis, we can see that after the completion of Algorithm {\sf av-INV} on input $x$ the final state become $v(x)=v_1(x)+v_2(x)$ where $v_1(x)$ is parallel to \[ \ket{x}_1\ket{f^{-1}(x)}_2\ket{0}_3 \] and $v_2(x)$ is a vector orthogonal to $v_1$. By Lemma \ref{lem:eval2} and the linearity of expectation, we have \[ {\bf E}[| v_2(x) |] \le 2 \cdot \frac{n}{2} \cdot \frac{2}{\sqrt{p(n)}} = \frac{2n}{\sqrt{p(n)}} \le \frac{1}{q^2(n)} \] for $q(n) = p^{1/4}(n)/\sqrt{2n}$, where the expectation is over $x\in\{0,1\}^n$. It follows that the number of $x$ such that $|v_2(x)| > 1/q(n)$ is at most $2^n/q(n)$, i.e., {\sf av-INV} can invert $f(x)$ for at least $2^n(1-1/q(n))$ $x$'s with probability at least $1-1/q^2(n)$. \end{proofof} \end{proof} \section{Conclusion} By giving a proof of the conjecture left by Kashefi, Nishimura and Vedral \cite{knv02}, we have completed a necessary and sufficient condition of cryptographic quantum one-way permutations in terms of pseudo-identity and reflection operator in this paper. The necessary and sufficient condition of quantum one-way permutations can be regard as a universal test for the quantum one-wayness of permutations. As long as the authors know, this is, classical or quantum, the first result on the universality for one-way permutations, though the next bit test is a universal test for pseudorandom generators in the classical computation. We believe that our universal test for quantum one-way permutations may help to find good candidates for them, which are currently not known. \subsubsection*{Acknowledgments.} We are grateful for valuable comments from anonymous referees. AK would like to acknowledge the financial support of the 21st COE for Research and Education of Fundamental Technologies in Electrical and Electronic Engineering, Kyoto University. \end{document}
\begin{document} \title{ Production of genuine multimode entanglement in circular waveguides with long-range interactions} \author{T Anuradha$^{1}$, Ayan Patra$^{1}$, Rivu Gupta$^{1}$, Amit Rai$^{2}$, Aditi Sen(De)$^{1}$} \affiliation{$^{1}$ Harish-Chandra Research Institute, A CI of Homi Bhabha National Institute, Chhatnag Road, Jhunsi, Prayagraj - 211019, India } \affiliation{$^{2}$ School of Physical Sciences, Jawaharlal Nehru University, New Delhi 110067, India} \begin{abstract} Starting with a product initial state, squeezed (coherent squeezed) state in one of the modes, and vacuum in the rest, we report that a circular waveguide comprising modes coupled with varying interaction strength is capable of producing genuine multimode entanglement (GME), quantified via the generalized geometric measure (GGM). We demonstrate that for a fixed interaction and squeezing strength, the GME content of the resulting state increases as the range of interactions between the waveguides increases, although the GGM collapses and revives with the variation of interaction strength and time. To illustrate the advantage of long-range interactions, we propose a quantity, called accumulated GGM, measuring the area under the GGM curve, which clearly illustrates the growing trends with the increasing range of interactions. We analytically determine the exact expression of GGM for systems involving arbitrary number of modes, when all the modes interact with each other equally. The entire analysis is performed in the phase-space formalism. We manifest the constructive effect of disorder in the coupling parameter, which promises a steady production of GME, independent of the interaction strength. \end{abstract} \maketitle \section{Introduction} \label{sec:intro} \begin{comment} Entanglement \cite{Horodecki_RMP_2009} is the most well-studied form of correlation in quantum systems, which has no classical analog. Several quantum protocols such as dense coding \cite{Bennett_PRL_1992}, teleportation \cite{Bennett_PRL_1993}, randomness certification \cite{Pironio_Nature_2010}, one-way quantum computation \cite{Raussendorf_PRL_2001}, and secure key distribution \cite{Ekert_PRL_1991, Bennett_TAQC_2014} depend solely on the presence of entangled resources. As such, the detection and quantification of entanglement are fundamental problems in quantum information theory. Several criteria exist to detect whether a state is entangled or not, viz. the partial transpose criterion \cite{Peres_PRL_1996,Horodecki_PLA_1996}, entanglement witnesses \cite{Guhne_JMO_2003,Guhne_PR_2009}, majorization \cite{Nilesen_PRL_2001}, entropic relations \cite{Horodecki_PRL_1994}, and Bell inequalities \cite{Bell_CUP_2002, Seevink_PRL_2002, Laskowski_PRA_2005}, to name a few. On the other hand, the problem of quantifying the amount of entanglement present in a state becomes non-trivial when one moves away from the paradigm of pure bipartite states. Bipartite entanglement can be estimated using concurrence \cite{Hill_PRL_1997}, negativity \cite{Vidal_PRA_2002}, distillable entanglement \cite{Bennett_PRA_1996}, entanglement of formation \cite{Wootters_PRL_1998}, and relative entropy \cite{Vedral_PRL_1997}. However, when trying to quantify entanglement across multiple partitions, one encounters several possibilities e.g., when the state is entangled in some of the parties but separable in the rest. For pure multipartite states, genuine entanglement i.e., when each party shares entanglement with every other, can be quantified using a geometric distance-based measure \cite{Shimony_ANYAS_1995, Barnum_JPA_2001} known as Generalized Geometric Measure (GGM) \cite{SenDe_PRA_2010}. GGM can be used to quantify genuine multiparty entanglement in a wide range of pure quantum systems. In the case of mixed states, however, GGM can effectively quantify entanglement only for certain specific classes of states based on convex roof extension \cite{Das_PRA_2016}.\\ Going beyond qudit systems, continuous variable (CV) systems, have proved to be the testing ground for several quantum information processing protocols. Such systems are characterized by position and momentum quadrature variables, and they possess an infinite spectrum \cite{Serafini_2017}. They have been successfully used for the experimental realization of a plethora of tasks, notably dense coding \cite{li2002,mizuno2005}, teleportation \cite{furusawa1998,bowen2003}, cloning \cite{andersen2005}, key distribution \cite{lance2005}, and have also been used to design quantum thermal machines \cite{andolina2018,yong2021,friis2018}. Therefore, the generation and quantification of entanglement in CV systems form a cornerstone in the implementation of quantum protocols. In this regard, optical photonic lattices are a viable option. They are a highly prospective field of research that has the capacity to transform the conventional approach of using light for multiple applications and to usher in a new era of progress in quantum information processing \cite{O_Brien_2009,Wang_2019,moody_2022} and communication. \end{comment} Continuous variable systems, characterized by position and momentum quadratures \cite{Serafini_2017}, are one of the potential platforms for the experimental realization of a wide range of quantum information processing tasks. Notable ones include quantum communication protocols \cite{furusawa1998, bowen2003, mizuno2005} with or without security \cite{li2002, lance2005}, quantum cloning machine \cite{andersen2005}, and the preparation of cluster states \cite{Yoshikawa_APL_2016} essential for building one-way quantum computer \cite{Raussendorf_PRL_2001}. One of the key resources required to design these quantum protocols is multimode entanglement \cite{Horodecki_RMP_2009}. Therefore, the generation of entanglement in physical substrates \cite{Masada_NP_2015, Lenzini_SA_2018, Larsen_NPJ_2019}, its detection \cite{Simon_PRL_2000, Duan_PRL_2000, Giedke_PRA_2001, vanLoock_PRA_2003, Armstrong_NP_2015, Qin_NPJ_2019}, and quantification \cite{Adesso_PRA_2004, Braunstein_RMP_2005, Guhne_PR_2009} have attracted lots of attention . Coupled optical waveguides in a one-dimensional array turn out to be an efficient method to manipulate light \cite{Takesue_Optica_2008, Camacho_Optica_2012, Das_PRL_2017, Kannan_SA_2020, Zhang_Nature_2021} or to simulate quantum spin models via optics \cite{Hunh_PNAS_2016, Bello_PRX_2022}. A periodic arrangement of waveguide arrays can be fabricated using femtosecond laser techniques\cite{pertsch_2004_discrete,itoh_2006_ultrafast,szameit_2010, meany_2015} and nanofabrication methods \cite{Rafizadeh_CLE_1997, Belarouci_JL_2001}, having minimal decoherence \cite{Perets_PRL_2008, Dreeben_QST_2018}. Thus they have emerged as suitable candidates for performing continuous time random walks \cite{perets_2008,peruzzo_2010_quantum}, Bloch oscillation \cite{Morandotti_PRL_1999, Pertsch_PRL_1999, Sapienza_PRL_2003, Dreisow_OL_2011}, Anderson localization \cite{Martin_OE_2011}, quantum computation \cite{Fu_SPIE_2003, Politi_S_2008, Paulisch_IOP_2016}, optical simulation \cite{keil_2015_optical} and generation of entangled states \cite{rai_2010_quantum}. \begin{comment} The output photons have unique quantum properties, such as non-local behavior and quantum interference, which are important for a range of quantum technologies. In linear waveguide arrays, entanglement can be produced through processes such as spontaneous parametric down-conversion \cite{Zhang_Nature_2021}, four-wave mixing \cite{Takesue_Optica_2008,Camacho_Optica_2012}, and superconducting circuit-based interfaces \cite{Das_PRL_2017, Kannan_SA_2020}. The entangled quantum states can be represented using either discrete or continuous variables, with the polarization of photons or spin of electrons representing discrete variables, and the quadrature of light beams such as squeezed light or coherent states representing continuous variables.\\ Nonclassical states, utilized as input to the waveguides, have been extensively studied in quantum optics. They have gained significant attention due to their impact on fundamental questions in quantum electrodynamics as well as their practical applications, which are largely based on the ability to lower light quantum fluctuations below the standard quantum limit set by classical electrodynamics in optical measurement processes \cite{gerry_knight_2004,Walls_Millburn_2010}. Here specifically, we are using an input squeezed state \cite{Walls_Nature_1983,Andersen_PS_2016}, the most commonly used type of non-classical light, that exhibits quadrature fluctuations below the zero-point level. Yuen and Shapiro \cite{Yuen_IEEE_1978,Shapiro_IEEE_1979,Yuen_IEEE_1980} proposed the use of such light signals with phase-sensitive quantum noise in optical communication systems. Squeezed states can also be used as an important resource in continuous-variable quantum computing \cite{Menicucci_PRL_2006,Rev_ModPhys_.77.513}, to enhance the sensitivity of quantum sensors \cite{Lawrie_ACS_2019} and to improve the precision of quantum measurements in areas such as gravitational wave detection \cite{schnabel_2017}, illumination \cite{Tan_PRL_2008}, and metrology \cite{giovannetti_2011}. \end{comment} Various studies have utilized different linear waveguide array models to detect continuous variable entanglement e.g., via the van Loock and Furusawa inequalities \cite{vanLoock_PRA_2003, Rai_PRA_2012}, and quantifying entanglement between two modes using logarithmic negativity \cite{Barral_PRA_2017, Barral_PRA_2018, Asjad_PRAl_2021}. More recently, the transfer of quantum states of light between modes in circular waveguide arrays has also been explored \cite{rai_2022_transfer}. A majority of these works are based on Hamiltonians involving interactions only between neighboring modes, popularly known as nearest-neighbor (NN) interactions although non-nearest-neighbor interaction is essential in some situations. For instance, in quantum information and quantum computation applications of optical waveguides, it is necessary to fabricate compact waveguide circuits to reduce the footprints of such circuits \cite{meany_2015}. When the separation between the waveguides in such circuits would keep on decreasing, or when the waveguide is long, the higher-order coupling must be taken into account. Note that the long waveguides are necessary for the study of quantum walk in optical waveguide systems \cite{perets_2008, peruzzo_2010_quantum, Poulios_PRL_2014}. In this paper, our system involves interactions between the modes that are not adjacent to each other. \begin{comment} Such long-range (LR) interactions occur naturally in several physical systems such as self-gravitating systems \cite{Padmanabhan_PR_1990, Dauxois_2008}, dipolar ferromagnets \cite{Bitko_PRL_1996, Chakraborty_PRB_2004} and spin-ice materials \cite{Bramwell_Science_2001, Castelnovo_Nature_2008}. Moreover, these interactions are much easier to prepare than NN interactions, as demonstrated in \cite{Islam_Nature_2011, Richerme_Nature_2014}. \end{comment} Benefits of non-nearest-neighbor interactions have been shown in molecular excitation transfer \cite{Gaididei_PRE_1997}, the study of Bloch oscillations in photonic waveguide lattices \cite{Morandotti_PRL_1999, Pertsch_PRL_1999, Sapienza_PRL_2003, Dreisow_OL_2011}, the dynamics of bio-molecules \cite{Mingaleev_JBP_1999} and polymer chains \cite{Hennig_EPJB_2001}. Moreover, long-range (LR) interactions play a vital role in localization \cite{Lopez_NP_2008}, simulations \cite{Aspuru_Nature_2012} and quantum walks in waveguide systems. More importantly, such LR interactions can be simulated and manipulated in laboratories with several physical systems including photonic waveguides \cite{Davis_OL_1996, Kevrekidis_PD_2003, Iyer_OE_2007, Szameit_OL_2009, Longhi_LPR_2009, Garanovich_PR_2012, Golshani_PRA_2013} (c.f. \cite{Jones_JOSM_1965, Estes_PR_1968}), trapped ions \cite{Porras_PRL_2004, Islam_Nature_2011} etc. Furthermore, with the development of fabrication techniques \cite{itoh_2006_ultrafast, szameit_2010}, it is now possible to synthesize waveguide arrays consisting of a large number of waveguides with minimal decoherence. However, all the previous studies on optical waveguides only include a small number of modes and quantify quantum correlations between pairs of modes, despite the fact that multimode entangled states are crucial for several quantum information protocols \cite{Hillery_PRA_1999, Cleve_PRL_1999, Gottesman_PRA_2000, Bruss_PRL_2004, Ishizaka_PRL_2008, Bennett_TAQC_2014}. \begin{comment} Interactions acting over arbitrary lengths have been analyzed in \cite{Szameit_PRA_2008} for waveguide lattices and have been proved to be essential in the miniaturization of integrated optical components \cite{Dreisow_OL_2008} and for the study of multiparty quantum correlations \cite{Gilead_PRA_2017}. Furthermore, cold-atomic systems have been used to control and manipulate such interactions to a high level of accuracy \cite{Friedenauer_Nature_2008, Schauß_Nature_2012, Borish_PRL_2020, Monroe_RMP_2021}. However, the creation of LR interactions presents serious challenges in the optical setup, since the attenuation of evanescent fields causes the correlations to die out exponentially with the waveguide separation \cite{Jones_JOSM_1965, Estes_PR_1968}. They are also theoretically complicated to solve \cite{Szameit_OE_2007}. In spite of such difficulties, long-range interactions can be mimicked by building compact waveguide circuits to reduce the waveguide spacing, as suggested in \cite{Bonneau_NJP_2012, Xu_OE_2013, Silverstone_NP_2014, Qi_JOP_2014}. Moreover, photonic waveguides arranged in a zigzag manner allow for precise manipulation of long-range coupling strengths - as studied theoretically in \cite{Kevrekidis_PD_2003} and realized experimentally in \cite{Szameit_OL_2009, Garanovich_PR_2012}. Another well-known method of fabricating waveguides comprises the femtosecond laser writing technique \cite{Davis_OL_1996, Longhi_LPR_2009}. Using the aforementioned processes, experimental simulation of coupling between non-adjacent waveguide modes has been achieved \cite{Iyer_OE_2007, Szameit_PRA_2008, Golshani_PRA_2013}, including imaginary next-nearest-neighbour interaction \cite{Bell_Opt_2017}. Successful applications of long-range waveguide coupling have been demonstrated in Klein tunneling \cite{Longhi_PRB_2010, Dreisow_EL_2012}, driven lattice systems \cite{Dunlap_PRB_1986, Zhu_JPCM_1999, Jivulescu_JPCM_2006}. Superlattice with long-range interactions has been proposed in \cite{Zhao_PRB_1997} and arbitrary long-range coupling has been studied both theoretically \cite{Martínez__JPA_2012} and also been proposed to be accessible experimentally \cite{Stockhofe_PRA_2015}. \end{comment} Here, we provide a technique that uses circular waveguide arrays that are evanescently coupled to produce genuine multimode entangled states (GME) from product ones. We point out that our work is novel since most of the earlier research works relating to continuous variable (CV) multimode entanglement involves the use of bulk optical elements, which are large and inherently sensitive to decoherence resulting in a reduction of entanglement content. In this article, we focus on integrated photonic waveguides which provide novel tools and expanded capabilities for quantum information technology \cite{meany_2015}. This is because these waveguides are compact and can be precisely manufactured using a femtosecond laser direct writing method. These platforms guarantee a very low loss factor and are interferometrically stable, scalable, and less susceptible to decoherence, thereby ensuring robustness against noise. We quantify genuine multimode entanglement by computing the generalized geometric measure (GGM) \cite{Shimony_ANYAS_1995, Barnum_JPA_2001, Wei_PRA_2003, SenDe_PRA_2010, Das_PRA_2016, Buchholz_AP_2016} for CV Gaussian system by using phase-space formalism \cite{Roy_PRA_2020}. \begin{comment} To determine the GGM, we utilize the phase space formalism \cite{Adesso_OSID_2014}. Our calculation begins with three, four, five, and six modes where we consider long-range interactions along with the nearest-neighbor coupling. We then extend this calculation to $N$-modes and present a general formalism for quantifying genuine multimode entanglement generation through long-range interactions. When disorder is incorporated in the parameters of the waveguide Hamiltonian, we demonstrate how it helps provide a stable supply of entanglement in the output state. We further study the block entropy of entanglement and investigate the effect of long-range interaction through the behavior of the Renyi-$2$ entropy of the reduced block with its length. \end{comment} In particular, the multimode entangled state is generated using waveguides organized in a circular way and coupled with varying interaction strengths, where a squeezed state of light is given as input in one mode and vacuum in the other modes. Notice that our work is based on linear waveguides arranged in circular configuration and does not require nonlinear process which are relatively more difficult to work with. We first observe that irrespective of the range of interactions, the GGM collapses and revives with the variation of the coupling constant and time By exploiting the symmetry of the system, we analytically arrive at the compact form of GGM when the dynamics are driven by the LR interactions having equal strengths. We illustrate that the time-varying GME content can be higher for the LR model than that of the NN model for a fixed coupling and squeezing strength, although the maximum GGM produced with NN coupling coincides with the one generated by waveguides having LR interactions. In order to illustrate the advantage of LR interactions, we introduce a quantity, referred to as accumulated GGM (AcGGM) which measures the area under the GGM curve for a fixed range of interaction strength and for a given squeezing parameter. We report that the AcGGM increases with the increase of the range of interactions while it decreases with the increase of the number of modes in the circular waveguides, thereby demonstrating competition between the range of interactions and the number of modes involved in the circular waveguide circuit. We also show that if disorder is introduced in the couplings, the fluctuations in the generated quenched averaged GGM decreases at the expense of the maximum GGM content. It indicates that the generation of a non-fluctuating genuine multimode entanglement can only be accomplished when there are some imperfections in the coupling strength which naturally arise during the implementation of the waveguide system. Additionally, the quenched average GGM increases with the increase of the range of interactions involved in the evolution process. Our paper has the following structure: Sec. \ref{sec:pre} provides a brief overview of the theoretical model for a circular array of linear waveguides, including the Hamiltonian and the input state. In Sec. \ref{sec:LR}, we explain the benefits of taking long-range interactions for creating genuine multimode entanglement in four, five, and six modes and further extend it to $N$-modes. The advantage of LR interactions is exhibited by introducing the quantity, AcGGM in Sec. \ref{subsubsec:GGM_net}. Sec. \ref{sec:disorder} explores the impact of disorder present in the coupling strength on multimode entanglement. In Sec. \ref{sec:entropy}, we analyze the block entropy of entanglement. Finally, we conclude in Sec. \ref{sec:conclu}. \section{Design of the Waveguide setup} \label{sec:pre} \begin{figure} \caption{Circular waveguide setup for the generation of genuine multimode entanglement between eight optical modes. The dark circle represents the mode in which the squeezed state $|\psi_s\rangle$ is given as input, whereas the light circles denote the vacuum $|0\rangle$ modes. The dark green curved lines correspond to the nearest-neighbor (NN) interaction. Long-range interactions are shown as follows: next-nearest-neighbor (NNN) with light yellow straight lines and next-to-next-nearest-neighbor (NNNN) interaction as very light dashed blue lines. For a waveguide with a large number of modes, higher levels of long-range interaction has be incorporated. The interaction strengths of the NN and long-range interactions, in general, can be different.} \label{fig:schematic} \end{figure} Let us first introduce the model which describes the evolution of the product input state to a genuinely entangled multimode state. The system comprises $N$ identical waveguides arranged in a circular configuration and coupled to each other, with varying interaction strength (for a schematic description of the system, see Fig. \ref{fig:schematic} for $N = 8$). The Hamiltonian that governs the interactions of the $N$ modes within the system is represented by \begin{equation} \begin{split} \hat{H}=&\sum_{i=1}^{[\frac{N}{2}]-1}\hbar J'_i\sum_{j=1}^{N}( \hat{a}^\dagger_j\hat{a}_{j+i} +H.c.)+\\&\frac{1}{1+\frac{1}{2}(1+(-1)^N)}\hbar J'_{[\frac{N}{2}]}\sum_{j=1}^{N}( \hat{a}^\dagger_j \hat{a}_{j+[\frac{N}{2}]} + \text{H.c.}), \label{eq:H_n} \end{split} \end{equation} where an increasing $i$ indicates an increasing range of interactions. Here $N+j\equiv j(mod~N)$, $\hat{a}_j$ and $\hat{a}_j^\dagger$ are the bosonic annihilation and creation operators respectively, corresponding to the $j$-th mode, H.c. stands for the Hermitian conjugate, $J'_i$ denotes interaction strength or coupling constants between waveguide modes with $J'_1 = J'$ and $J'_i = n_i J'$ for $i \geq 2$ and we consider $\hbar=1$. Thus $J'$ represents the strength of the nearest-neighbor (NN) coupling. The long-range interaction is introduced by making $n_i >0$ for $i \geq 2$. We must note that $n_i \neq 0$, if and only if $n_j \neq 0$ $\forall j < i$, with the condition $0< n_i \leq 2$ \cite{Dreisow_OL_2008}. The second term in Eq. \eqref{eq:H_n} takes care of the longest range of interactions between modes. \textbf{Note $\mathbf{1}$.} The time evolution operator corresponding to the Hamiltonian in Eq. \eqref{eq:H_n} is given by $\exp(-i \hat{H}t)$. Therefore, upon evolution, the final state of the waveguide system contains terms of the form $J' t$. We relabel such parameters as $J$, representing the interaction strength or the coupling parameter and the range of the interactions is tuned with \(n_i\). Thus, the variation with respect to $J$ also represents the variation in time. Moreover, note that $t=z\mu/c$, where $\mu$ is the refractive index for the waveguide mode, which relates the time duration $t$ to the propagation distance $z$. In order to create a genuine multimode entangled state from a fully product state, we study the dynamics induced by the aforementioned interactions to identify the optimal configuration of the waveguide system. In particular, one of the modes, say, the first mode, is chosen to be a single-mode squeezed state, $\ket{\psi_{\text{s}}}=\exp(\frac{1}{2}(\xi^* \hat{a}_j^2-\xi \hat{a}_j^{\dagger^2}))\ket{0}$ with $j$ being the input site, the squeezing parameter is $\xi= se^{i\theta}$, where $s$ is the squeezing strength and $\theta$ represents the squeezing angle. The rest of the modes are in the vacuum state, $|0\rangle$, i.e., the $N$-mode initial state takes the form as \begin{equation} |\psi\rangle_{\text{in}} = \ket{\psi_{\text{s}}} \otimes |0\rangle^{\otimes N - 1}. \label{eq:initial_state} \end{equation} The covariance matrix corresponding to the above initial state has the form, \begin{eqnarray} \nonumber \Xi_i=\!\!\!\!\!&&\frac{1}{2}\Bigg[\begin{pmatrix} \cosh 2s + \cos \theta \sinh 2s & \sin \theta \sinh 2s \\ \sin \theta \sinh 2s & \cosh 2s - \cos \theta \sinh 2s \end{pmatrix} \\ &&\oplus \mathbb{I}^{\oplus N - 1}\Bigg], \label{eq:initial_cov-mat} \end{eqnarray} where $\mathbb{I} = \text{diag}(1,1)$ is the $2 \times 2$ identity matrix. Note that due to the periodicity present in the model, the position of the mode in which the input squeezed state is taken cannot alter the multimode entanglement content of the final state. The symplectic formalism is used to analyze the evolution of the Gaussian input state and to characterize its entanglement (see Appendix \ref{app:CV} for details of the analytical formalism). The covariance matrix corresponding to the initial state of the system is denoted as $\Xi_{\text{in}}$. The final state of the system, upon evolution, is characterized by ${\Xi_f}=S_H {\Xi_{in}} S_H^T$, where $S_H$ is the symplectic transformation of the waveguide Hamiltonian, as defined in Appendix \ref{app:CV}, for which the generalized geometric measure is computed (see Appendix. \ref{app:CV_GGM} for the computation of GGM for a pure CV Gaussian state). In Appendix \ref{sec:app_3-mode}, we present the simplest model involving a state with three modes propagating through circularly coupled waveguide modes that have only the nearest-neighbor interaction. It is important to emphasize here that such treatment provides the possibility to address this problem involving an arbitrary number of modes. \textbf{Remark $\mathbf{1}$}. Instead of the squeezed state, if one considers a coherent state as the input, such a generation of multimode entanglement is not possible. This can be explained by considering the covariance matrix of the coherent state, which is nothing but $\frac{1}{2} \mathbb{I}$. Thus, in this scenario, the input covariance matrix reduces to $\Xi_{i} = \frac{1}{2} \mathbb{I}^{\oplus N}$ and the final state of the system is denoted by a covariance matrix proportional to the identity matrix (since, $S_H S_H^T = \mathbb{I}^{\oplus N}$). Thus, starting from a product state, we again end up with a product state after evolution and the entanglement generation cannot occur. \textbf{Remark $\mathbf{2}$}. With a squeezed coherent state as input in one of the modes (and vacuum in the rest) of Eq. \eqref{eq:initial_state}, the entanglement generated among the $N$-modes is the same as that obtained via an input squeezed state. \section{Advantage of long-range interaction in Entanglement creation} \label{sec:LR} In typical waveguide systems studied in the literature, only the NN interactions are considered, while higher-order couplings lead to bosonic Hamiltonians with LR interactions as in Eq. \eqref{eq:H_n} which will be the main focus of this work. The motivation behind such consideration is the fact that in several physical systems, especially quantum spin models, LR interactions have been shown to typically create highly multimode entangled states, which serve as resources for quantum information processing tasks. \begin{figure*} \caption{Generation of genuine multimode entanglement in a four-mode circularly coupled waveguide setup. (a). Variation of GGM, $\mathcal{G} \label{fig:4-mode} \end{figure*} Before going into the results concerning circular waveguides with an arbitrary number of modes, let us first investigate the situation involving a small number of modes. Such analysis can also illustrate the benefit of LR interactions for producing genuine multimode entanglement (quantified by the generalized geometric measure), with the addition of higher-order couplings one by one. \subsection{Circular waveguide with four modes} \label{subsec:4-mode_LR} Let us consider a four-mode circular arrangement of waveguides, where, in addition to the NN interaction, the next-nearest-neighbor (NNN) interaction is also introduced. Before considering the situation with both NN and NNN interactions, let us first concentrate on the dynamics of multimode entanglement in the model with only NN interaction. \subsubsection{Waveguide with nearest-neighbor interactions} \label{subsubsec:4-mode_NN} Let us consider the Hamiltonian for the four-mode waveguide system given in Eq. \eqref{eq:H_n} with $N = 4$ by setting $n_i = 0$ (for $i \geq 2$) which simulates only the NN interactions. By taking the initial state of the system as $\ket{\psi_s}\bigotimes\ket{0}^{\otimes3}$, whose corresponding covariance matrix is given by Eq. \eqref{eq:initial_cov-mat}, the GGM is determined by finding the symplectic eigenvalues of the reduced covariance matrices - (i) single mode: $\Xi_f^i$ with $i =1, \dots, 4$ and (ii) two-mode: $\Xi_f^{1j}$ with $2\leq j\leq 4$. It is observed that for each such reduced covariance matrix, there is only one symplectic eigenvalue which is not equal to $1/2$. We represent such symplectic eigenvalues of the bipartitions as $\mathbf{v} = \{\nu_i, \dots, \nu_{1j}, \dots\}$. Therefore, the GGM reduces to \begin{equation} \mathcal{G}_4^{\text{int}} = \mathcal{G}_4^{\text{NN}} = 1 - \max_{\mathbf v} \Big[\frac{2}{1 + 2 \nu_k} \Big], \label{eq:ggm_4_NN} \end{equation} where the superscript, $\text{int}$, represents the maximum LR interaction considered, while the subscript is for the total number of modes, and $k$ runs over the elements of the set $\mathbf{v}$. \textit{Notice first that the above formalism holds for any number of modes and range of interactions (e.g., NN, NNN, etc.) as we will show in the succeeding section.} In the four-mode scenario, we find that the GGM is not affected by the squeezing angle, $\theta$. Additionally, as the squeezing strength increases, so does the GGM, and it is periodic with respect to $J$, with the period being $\pi$ (see Fig. \ref{fig:4-mode} (a)). In this scenario, it is important to note that the $\nu_k$ values are dependent on both $J$ and $s$. \subsubsection{Model with next-nearest-neighbor interactions} \label{subsubsec:4-mode_NNN} The Hamiltonian for simulating both the NN and the NNN interaction in a four-waveguide system can be obtained from Eq. \eqref{eq:H_n} by setting $N = 4$, $J_1 = J$, and $J_2 = n_2 J_1 = nJ$. The method for calculating the GGM is similar to that for the nearest-neighbor case and it is dependent on $s$, $J$, and $n$. The strength of the next nearest-neighbor interaction can be greater than ($n > 1$), or equal to ($n = 1$), or less than ($n<1$) that of the NN interaction. Let us now analyze the behavior of the genuine multimode entanglement with time and compare it with the scenario involving only NN interactions. To study it, we compute $\mathcal{G}_{4}^{\text{NNN}}$. The juxtaposition of $\mathcal{G}_{4}^{\text{NN}}$ and $\mathcal{G}_{4}^{\text{NNN}}$ reveals the following facts: \begin{enumerate} \item Like with NN interactions, $\mathcal{G}_{4}^{\text{NNN}}$ increases with $s$ and is $\pi$-periodic with $J$.\ \item On the other hand, with non-vanishing $n$, we find that $\mathcal{G}_{4}^{\text{NNN}} \geq \mathcal{G}_{4}^{\text{NN}}$ for a fixed value of $J$, although they coincide at the point where both of them reach their maximum as well as when they both are minimum. \item Furthermore, the difference $(\mathcal{G}_{4}^{\text{NNN}} - \mathcal{G}_{4}^{\text{NN}})$ becomes maximum irrespective of $J$ when $n = 1$, i.e., when the strength of the NNN interaction coincides with that of the NN case (see Fig. \ref{fig:4-mode}(b)). In other words, the enhancement of genuine multimode entanglement through LR over NN interaction is more pronounced when all the modes interact with each other equally. \end{enumerate} \subsection{Accumulated GGM} \label{subsubsec:GGM_net} The question that naturally arises from the observations of systems having NN and NNN interaction is, how to manifest the beneficial role of LR interactions from the pattern of the GGM. Towards that aim, we introduce a figure of merit which we call \textit{accumulated GGM} (AcGGM), defined as the area under the GGM curve over a given range of the coupling constant $J$. It can be interpreted as the entanglement assembled among the modes, during a particular time interval when the interaction is switched on. Mathematically, AcGGM can be defined as \begin{equation} \langle \mathcal{G}_{N}^{\text{int}} \rangle_{J_0} = \frac{1}{J_0}\int_{J = 0}^{J_0} \mathcal{G}_{N}^{\text{int}} d J, \label{eq:GGM_avg} \end{equation} where $J_0$ is the interval of $J$ over which the area under the GGM curve is calculated. The higher the value of $\langle \mathcal{G}_{N}^{\text{int}} \rangle_{J_0}$, the better the arrangement for creating genuine multimode entanglement within that range of $J$. \subsubsection{Accumulated GGM highlighting the power of LR interactions} \label{subsubsec:4-mode_ACGGM} We now focus on the impact of LR interactions on AcGGM. A few prominent features emerge -- \begin{itemize} \item \textbf{Input squeezing:} For a given type of interaction, $ \langle \mathcal{G}_{N}^{\text{int}} \rangle_{J_0}$ increases with the increase in the input squeezing strength $s$. \item \textbf{LR interaction:} For a given $s$, we clearly observe that $\langle \mathcal{G}_{N}^{\text{NNN}} \rangle_{J_0} > \langle \mathcal{G}_{N}^{\text{NN}} \rangle_{J_0}$ (see Fig. \ref{fig:4-mode} (c)). \item \textbf{Interaction strength:} Another interesting aspect arises - although the GGM collapses and revives with $J$, AcGGM for a reasonably high $J_0$ value can be obtained without significant fluctuations. \end{itemize} \textbf{Note $\mathbf{2}$.} \textit{Five-mode circular waveguide system} - The GGM for the five-mode waveguide exhibits qualitatively similar properties to $\mathcal{G}_{4}^{\text{int}}$. By taking the same kind of initial state, i.e., by choosing $|\psi_s\rangle \otimes |0\rangle^{\otimes 4}$, which evolves according to $\hat{H}$ in Eq. \eqref{eq:H_n} with $J_1 = J_2 = J$, no periodicity in GGM with $J$ is observed for the nearest-neighbor case, while $\mathcal{G}^{\text{NNN}}_5$ exhibits a period of $\frac{4\pi}{5}$. Similar to the case of the four-mode waveguide, the NNN interaction, with strength equal to that of NN, furnishes a higher AcGGM as compared to the case with only nearest-neighbor coupling. \subsection{Six-mode circular waveguide} \label{subsec:6-mode_LR} \begin{figure} \caption{Circularly coupled waveguide involving $6$-modes. (Upper panel (a).) The genuine six-mode entanglement $\mathcal{G} \label{fig:6-mode} \end{figure} We now proceed to carry out the investigation when the waveguide arrangement comprises six modes, thereby incorporating a higher level of long-range interaction like the next-to-next-nearest-neighbor (NNNN) interaction. It is interesting to find out whether LR interactions are indeed responsible for creating genuine multimode entanglement even in the presence of weak coupling strengths. The Hamiltonian for the evolution, in this case, can be realized according to Eq. \eqref{eq:H_n} with $N = 6$ and $J_1 = J$, $J_2 = J_1 = J$ and $J_3 = n_3 J_1 = n J$, where the same strength of interaction for NN and NNN couplings is considered based on the observations for the four-mode waveguides. Interestingly, the maximum GGM in the NNNN interaction case is again obtained when the coupling strength is equal to that of the short-range interactions, NN and NNN. Again the GGM oscillates with $J$, irrespective of short- and long-range interactions, although, unlike the four-mode scenario, the pattern of the GGM changes with the introduction of LR interactions. In particular, the GGM vanishes with NNNN and NNN interactions when $J$ is a multiple of $\frac{4 \pi}{6}$, which is not the case for NN interactions and the maximal value of the GGM is obtained more frequently with respect to $J$ in the presence of both LR interactions as compared to that of the NN interaction. Akin to the four- and five-mode waveguide scenarios, $\mathcal{G}_{6}^{\text{NNNN}} \geq \mathcal{G}_{6}^{\text{NNN}} \geq \mathcal{G}_{6}^{\text{NN}}$ (see Fig. \ref{fig:6-mode} (a)), although the maximum value of the GGM cannot be increased by the LR interactions for a fixed $J$ value. Quantitatively, this feature can be captured by considering the AcGGM, i.e., $\langle \mathcal{G}_{6}^{\text{NNNN}}\rangle_{J_0} > \langle \mathcal{G}_{6}^{\text{NNN}}\rangle_{J_0} > \langle \mathcal{G}_{6}^{\text{NN}}\rangle_{J_0}$ as depicted in Fig. \ref{fig:6-mode} (b). We observe that LR interactions with low coupling strength can indeed create more GGM than in the NN case, as is illustrated in Fig. \ref{fig:6-mode} (a). for $0 \leq J \leq 1$. \textit{The results of circular waveguide setups with four-, five-, and six-modes strongly indicate that incorporating long-range interactions is beneficial and that the same interaction strength for all kinds of coupling provides the best genuine multimode entanglement.} \subsection{GME produced with $N$-mode circular waveguide} \label{subsec:N-mode_LR} Motivated by the results obtained in the previous subsections, we compute the production of genuine multimode entanglement in arbitrary modes, say, $N$-modes arranged in a circle. The Hamiltonian for the same pertains to an $N$-mode circularly coupled waveguide system, with its input state being specified by $\ket{\psi}_{in}^{1 2 \cdots N}=\ket{\psi_s}\otimes\ket{0}^{\otimes N-1}$. Since the studies in the previous subsections display the preferable role of equal short- and long-range interaction strengths, we take all the modes to be interacting equally with each other. Moreover, this configuration produces the maximum amount of AcGGM, as discussed beforehand. Let us analyze the dynamics of GGM in this situation. Since the definition of GGM involves the Schmidt coefficients in an arbitrary number of bipartitions, the computation of GGM is hard for systems involving an arbitrary number of modes, unless some symmetry present in the system is identified. Previous configurations with four-, five-, and six-modes indicate that there is a symmetry under permutation of the modes in the evolved state, $\ket{\psi}_{f}^{1 2 \cdots N}$, due to the circular configuration. As a consequence of this symmetry, there is only $(N-1)$ number of different bipartitions that require to be considered. For an even number of modes i.e., $N = 2m$, the contributing bipartitions, pertaining to a given number of submodes, can be divided into two sets - one set which involves the mode in which the squeezed input state is taken, and another set that does not include the mode with the squeezed input state. Without loss of generality, if we start with a state in which the input state is plugged in the first mode, the bipartitions among the modes under study are $1:\text{rest}$, $2:\text{rest}$, $12:\text{rest}$, $23:\text{rest}$, $\cdots$, $12\dots[N/2]-1:\text{rest}$, $23\dots[N/2]:\text{rest}$ and $12\dots[N/2]:\text{rest}$, while if $N = 2m + 1$, we must consider $23\dots[N/2]+1:\text{rest}$, as an additional bipartition (here $``\text{rest}"$ in $i:\text{rest}$ denotes all the modes except $i$). We observe that the largest symplectic eigenvalue in $2:\text{rest}$ bipartition ultimately leads to the GGM, given by \begin{widetext} \begin{eqnarray} \nu = \frac{\sqrt{\frac{1}{4} f_1(N)-2 \sinh ^2s \left[f_2(N) \cos \left(\frac{J N}{2}\right)+\cos\left( J N\right)\right]+ f_3(N) \cosh 2 s}}{N^2}, \end{eqnarray} \end{widetext} where $f_1(N) = \left(N^4-4 N^2+12\right)$, $f_2(N) = \left(N^2-4\right)$, and $f_3(N) = \left(N^2-3\right)$. The expression of the GGM then takes the form as \begin{equation} \label{N-mode_GGM} \mathcal{G}^{LR}_N=1-\frac{2}{2 \nu +1},~~~~~~~\text{for}~~N\geq4. \end{equation}\\ \begin{figure} \caption{ Accumulated GGM, $\langle{\mathcal{G} \label{fig:GGM_N} \end{figure} It can be easily seen that $\mathcal{G}^{LR}_N$ is a periodic function of $J$ with a period of $\frac{4\pi}{N}$. Hence, we calculate the accumulated GGM over a period, defined in Eq. \eqref{eq:GGM_avg} with $J_0 = \frac{4 \pi}{N}$, to see how it behaves with an increasing number of modes. $\langle{\mathcal{G}^{LR}_N}\rangle_{\frac{4 \pi}{N}}$ decreases monotonically with an increasing $N$ at a fixed initial squeezing strength as shown in Fig. \ref{fig:GGM_N} with $s = 1.0$. This indicates that for a given initial squeezing strength of the input mode, creating genuine multimode entanglement becomes more difficult with an increase in the number of interacting modes. Moreover, the maximum $\mathcal{G}^{LR}_N$ achievable also decreases monotonically with increasing modes. Both the results demonstrate that there is a trade-off between the increment in the number of modes and the LR interactions involved during the dynamics. \section{Creation of constant GGM in waveguides with disorder} \label{sec:disorder} \begin{figure*} \caption{Quenched averaged genuine multimode entanglement in circular waveguides coupled with disordered interaction strength. (a). The variation of the four-mode quenched GGM, $\langle \mathcal{G} \label{fig:GGM_dis} \end{figure*} Due to the periodic nature of multimode entanglement as described in the preceding section, the method can be argued to have a limitation. In particular, since it collapses and revives with the variation of the interaction strength $J$, we may end up with almost vanishing entanglement among the modes for certain values of $J$. Note that, since $J$ contains an implicit factor of time ($t$), this implies that the genuine multimode entanglement fluctuates with time, thereby creating entanglement that can be used only at certain instants. A natural question at this point is how one can circumvent this feature. We indeed show that a stable (fluctuation-free) multimode entangled state can be produced when the system has some imperfections. Given the experimental challenges in implementing interactions of a fixed strength, it is quite natural to consider that $J$ does not remain constant but fluctuates around the desired value. Typically, the disorder in system parameters is responsible for the detrimental effect on the system properties, although there are certain instances in which imperfections can enhance physical characteristics \cite{Aharony_PRB_1978, Feldman_JPA_1998, Abanin_PRL_2007, Niederberger_PRL_2008, Prabhu_PRA_2011, Sadhukhan_NJP_2015, Mishra_NJP_2016, Sadhukhan_PRE_2016} like magnetization and entanglement in the modes \cite{Bera_PRB_2016, Bera_PRB_2017, Mishra_NJP_2016}. We will illustrate here other aspects of the disordered model. To simulate such behavior, we consider a disordered model, in which $J$ comes from a Gaussian distribution of mean $J_m$ and standard deviation $\sigma$. Here, $J_m$ is the desired interaction strength to be tuned, and a higher $\sigma$ indicates a larger fluctuation around the value $J_m$, thereby measuring the strength of the disorder. We assume that the time scale taken by the disordered interaction strength to attain its equilibrium value is much larger than the implementation time, which allows us to define the \textit{quenched average GGM} over the Gaussian distribution as $$\langle{\mathcal{G}^{LR}_N}\rangle_G=\frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^{+\infty}\mathcal{G}^{LR}_N~\exp(-\frac{(J-J_m)^2}{2\sigma^2})~dJ.$$ It is observed that $\langle \mathcal{G}^{LR}_N \rangle_G$ fluctuates with respect to $J_m$ with the same period $4\pi/N$, albeit the amplitude of the fluctuations decreases with increasing $\sigma$, as shown for a four-mode disordered waveguide setup in Fig. \ref{fig:GGM_dis}(a). For high enough standard deviation, e.g., $\sigma \geq 2$, the entanglement fluctuation is negligible and an almost constant value is present over the entire range of $J_m$. Therefore, our findings manifest that although $\langle \mathcal{G}_{N}^{LR} \rangle_G$ decreases in comparison to the maximum GGM achieved in the ordered model, a constant GGM with lower fluctuations can only be obtained when the evolution occurs according to the disordered model. \subsection{Quantification of disorder rendered GGM stability} \label{subsec:disorder_FOM} As defined in Sec. \ref{subsubsec:GGM_net}, we can define the quenched average accumulated GGM as $$\langle \langle{\mathcal{G}^{LR}_N}\rangle_{G}\rangle_{J_{m0}}=\frac{N}{4\pi}\int_0^{J_{m0}}\langle{\mathcal{G}^{LR}_N}\rangle_G~dJ_m$$ in order to investigate the effect of the disorder. This can be interpreted as the area under the quenched average GGM curve. From Fig. \ref{fig:GGM_dis} (a), it is clear that the mean value of $\langle \mathcal{G}_{N}^{LR} \rangle_G$ over a cycle remains constant with the variation of $\sigma$. Thus although the amplitude changes, $\langle \langle{\mathcal{G}^{LR}_N}\rangle_{G}\rangle_{J_{m0}=4\pi/N}$ does not vary with the strength of the disorder, $\sigma$. Hence, \begin{equation} \langle\langle{\mathcal{G}^{LR}_N}\rangle_{G}\rangle_{4\pi/N}=\langle\mathcal{G}_N^{LR}\rangle_{4\pi/N}, \end{equation} which means, even after the introduction of disorder, the value of GGM, on average over the coupling strength (or, time $t$), remains unchanged. Indeed, the GME content is now dispersed more consistently throughout the evolution time. Furthermore, it is observed that the constructive effect of long-range interactions persists even in the presence of disorder, which implies that the Hamiltonian comprising all interacting modes with equal strength, provides a much higher quenched average GGM than that with NN interactions. The decreasing fluctuations in the quenched average GGM can be quantified by the standard deviation of $\langle \mathcal{G}^{LR}_{N} \rangle_G$, which is defined as \begin{equation} \Delta^2 \mathcal{G} = \langle \langle \mathcal{G}^{LR}_{N} \rangle_{G}^{2} \rangle_{4\pi/N} - \langle \langle{\mathcal{G}^{LR}_N}\rangle_{G}\rangle_{4\pi/N}^{2}, \label{eq:fluctuating} \end{equation} where the average is taken with respect to $J_m$ over a full cycle. We call this quantity as the \textit{breached GGM}, whose low value implies the generation of stable quenched genuine multimode entanglement. We find that this is indeed the case, i.e., the presence of disorder reduces the fluctuations in the quenched average accumulated GGM. Moreover, $\Delta^2 \mathcal{G}$ decreases with the increase of $N$, as illustrated in Fig. \ref{fig:GGM_dis} (b). Our studies demonstrate that the oscillations in the quenched average GGM disappear with the increase of the disorder strength and the number of modes, although the increase of the system size has a destructive effect on the creation of GGM like the ordered system. \section{Block entanglement entropy} \label{sec:entropy} We have already established that the genuine multimode entanglement content can be enhanced with the addition of LR interactions. Instead of quantifying the multimode entanglement geometrically, let us study the entanglement production in bipartitions, i.e., we compare the block entropy of entanglement produced through dynamics with the nearest-neighbor interaction as well as with the long-range interactions. In particular, we look into the scaling of the entropy \cite{Wehrl_RMP_1978} for the reduced density matrices of the final state, $\ket{\psi}_f^{12...N}$, with respect to the number of subsystems comprising the reduced state. Note that we need to consider $[N/2]-1$ number of reduced density matrices for an $N$-mode system which are $\rho_2$, $\rho_{23}$, ..., $\rho_{23...[N/2]}$ where $\rho_{23...i}=\tr_{1,i+1,...N}\ket{\psi}_f^{12...N}\bra{\psi}_f^{12...N}$. For a fixed system size, we compute the Renyi-$2$ block entropy defined as $S(\rho_L) = -\ln [\Tr(\rho_L^2)]$ \cite{Renyi_1961, nielsen_2010} by varying the block size $L$, where $\rho_L = \Tr_{\bar{L}} |\psi\rangle_{f}^{12...N}\langle\psi|_{f}^{12...N}$, with $\bar{L}$ being the rest of the modes which are not included in the block, $L$. In the covariance matrix formalism, it can be simplified to $S(\rho)=\frac{1}{2}\ln(2^{2L} \det \Xi_L)$ where $\det \Xi_L$ is the determinant of the covariance matrix corresponding to an $L$-mode state $\rho_{23...L}$ \cite{Adesso_OSID_2014}. In the case of NN interaction (see Fig. \ref{fig:block_ent}(a)), it can be observed that $S(\rho_{L})$ increases steadily with $L$ for a while, and then saturates with $L$ which increases with $J$. Moreover, the saturation value of $S(\rho_L)$ is the same for all interaction strengths with $J > 1$ whereas when $J \leq 1$, the block entanglement entropy saturates to different values, which again increases with $J$. On the other hand, for long-range interactions with all interaction strengths being equal, as depicted in Fig. \ref{fig:block_ent}(b), $S(\rho_L)$ always increases monotonically with $L$, and the behavior of the block entropy does not follow any order with respect to $J$, contrary to the NN-interaction regime. We recall that $2^{2L} \det \Xi_L = 1$ for a pure Gaussian state while it is greater than unity for a Gaussian mixed state. Thus, in the case of LR interactions, the increase in $S(\rho_{L})$ indicates that the reduced subsystems involving a larger number of modes tend towards more mixed states. It has also been established that the symplectic eigenvalues of pure Gaussian states are all equal to $1/2$ while they are greater for mixed states \cite{Adesso_OSID_2014}. Since the reduced subsystems of larger length have less purity, the symplectic eigenvalues of the single-mode reduced state contribute to the GGM (since we take the maximum of $\frac{2}{1 + 2 \nu}$) as shown in Sec. \ref{subsec:N-mode_LR}, thereby shedding light on the computation of GGM. \begin{figure} \caption{Block entanglement entropy, $S(\rho_L)$ vs the reduced system size, $L$, when the initial squeezing strength is fixed to $s = 1.0$. (a). The interaction is considered to be nearest-neighbor (NN) while in (b), interactions among all the modes are long range. Here $N =40$, i.e., $40$ circularly waveguide modes are coupled. From dark to light, the lines represent $J = 0.1$, $J = 0.5$, $J = 1.0$, $J = 10.0$, $J = 15.0$, and $J = 20.0$ respectively. All axes are dimensionless.} \label{fig:block_ent} \end{figure} \section{Conclusion} \label{sec:conclu} Entangled continuous variable (CV) systems are of fundamental importance in realizing a host of quantum information protocols. Additionally, it has been demonstrated that entangled CV systems provide a key route for resolving issues with other photonic devices, such as challenges with Bell-state measurements. Therefore, designing a scheme to generate multimode-entangled states is of paramount interest. We demonstrated that multiple circularly coupled interacting optical waveguide modes have the potential to create highly genuine multimode-entangled states. Specifically, the interacting circular waveguide can create a genuinely multimode entangled (GME) state, measured by using generalized geometric measure (GGM), from a squeezed or squeezed coherent state in a single mode that is a product with vacuum states in the other modes. We point out that we have considered an experimentally feasible configuration for our study. The waveguide arrays proposed in this work can be fabricated using direct femto-second laser inscription. Waveguide configurations are appropriate because, unlike bulk optical elements, the propagation losses in these systems can be quite low. Additionally, the parametric down-conversion process can be used to generate the squeezed state that we have considered as the input. We analyzed the impact of different ranges of interaction on the generation of a GME state from the product initial state. We illustrated how the incorporation of long-range interactions constructively affects the process. Specifically, long-range interactions help in generating higher genuine entanglement for a fixed strength of coupling constant, compared to the circular waveguide setup with only nearest-neighbor interaction, even though the maximum value of the GGM remains constant for both long-range and nearest-neighbor interactions. When the order of the long-range interaction is such that all the modes interact equally with each other, we analytically found the GGM, which varies periodically with the coupling strength. We noticed that the GGM content can be increased with an increase of the squeezing strength in the input modes. To access the benefit of LR interactions quantitatively, we investigated the area under the GGM curve, which clearly furnishes a higher value for LR interactions, than that for waveguide modes coupled with short-range couplings. We noted, however, that the genuine multimode entanglement generated decreases with an increase in the number of interacting modes, thereby indicating a complementary relation with system size and range of interactions. One of the drawbacks of generating multimode entanglement via such a setup is that its magnitude fluctuates with time and thus is unsuitable for utilization in protocols that require states with a certain value of entanglement. To circumvent this unwanted characteristic, we showed that the presence of disorder in coupling between modes of waveguides can be useful. Starting from a product state, when the system evolves according to the circular waveguide Hamiltonian in which mode-couplings are chosen randomly from a Gaussian distribution of a fixed mean and standard deviation, with a higher standard deviation representing greater disorder in the setup, we calculated the quenched average GGM. Our results indicated that for a sufficient strength of disorder, the multimode entanglement ceases to fluctuate and saturates to a fixed quenched average value. Although the quenched average GGM can never reach the maximum possible value, which can be achieved in the absence of disorder, its constant magnitude can help in its utilization in information processing tasks. In summary, our results of the disordered model used in the evolution operator are an addition to the generic physical systems, and possibly first in photonic waveguides, which report the beneficial effect of disorder for generating genuine multimode entanglement. Apart from the generation of genuine multimode entanglement, we showed that such a process is able to create entanglement in each bipartition. With nearest-neighbor interactions of moderate strength, the block entanglement entropy increases with the increase of the block length, while in the case of long-range interactions, it decreases. The observation is also in good agreement with the way the GGM expression is obtained. Looking at the possibility of realizing waveguide setups in laboratories, our method opens up the possibility to build quantum devices which require multimode entanglement. Although we have concentrated on photonic waveguides, our findings also apply to the coupled-cavity arrays and micro-ring resonator devices. \cite{Hartmann_Nat_2006}. \section{Primer on CV-systems} \label{app:CV} A continuous variable system is characterized by quadrature variables, such as $\hat{X}$ and $\hat{P}$, which are canonically conjugate with each other \cite{Serafini_2017, Braunstein_RMP_2005}. Such observables possess an infinite spectrum and their eigenstates constitute the basis for the infinite-dimensional Hilbert space. For an $N$-mode system, the Hamiltonian comprises $2N$ parameters, $\{\hat{X}_k, \hat{P}_k\}$ (with $k = 1,2,\dots, N$), and is defined as \begin{equation} \hat{H} = \frac{1}{2} \sum_{k = 1}^N (\hat{X}_k^2 + \hat{P}_k^2) = \sum_{k = 1}^N \Big(\hat{a}_k^\dagger \hat{a}_k + \frac{1}{2} \Big), \label{eq:CV_hamiltonian} \end{equation} where $\hat{a}_k^\dagger$ and $\hat{a}_k$ are the creation and annihilation operators respectively for the mode $k$ and are given in terms of the quadrature variables as \begin{equation} \hat{a}_k = \frac{\hat{X}_k + i \hat{P}_k}{\sqrt{2}}, ~~~~~~~ \text{and} ~~~~~~ \hat{a}_k^\dagger = \frac{\hat{X}_k - i \hat{P}_k}{\sqrt{2}}, \label{eq:creation-annihilation_op} \end{equation} with $i = \sqrt{-1}$. The creation and annihilation operators corresponding to a given mode satisfy the bosonic commutation relation, $[\hat{a}_k^\dagger, \hat{a}_k] = 1$. We can define a quadrature vector, $\hat{R} = (\hat{X}_1, \hat{P}_1, \dots, \hat{X}_N, \hat{P}_N)^T$, to rewrite the commutation relation more succinctly as \begin{equation} \left[\hat{R}_k,\hat{R}_l\right]=i \mathcal{M}_{kl}\quad \text{with} ~~ \mathcal{M} = \bigoplus\limits_{j=1}^{\mathcal{N}} \Omega_j. \label{eq:CV_commutation} \end{equation} Here, $\mathcal{M}$ represents the $\mathcal{N}$-mode symplectic form, and $\Omega_j$, for a single mode, is given by \begin{equation} \quad \Omega_j=\begin{pmatrix} 0 & 1\\ -1 & 0 \end{pmatrix} \forall j. \label{eq:CV_omega} \end{equation} Out of the plethora of CV quantum states, Gaussian states constitute the most widely studied class of states \cite{ferraro2005,Weedbrook_RMP_2012}. Such states are the ground and thermal states of Hamiltonians which are at most quadratic functions of the quadrature variables. As the name suggests, Gaussian states can be completely characterized by their first and second moments, encapsulated respectively by the displacement vector $\bold{d}$ and the covariance matrix $\Xi$, in the following way: \begin{eqnarray} && d_k=\expval{\hat{R}_k}_{\rho}, \\ \label{eq:CV_disp} \Xi_{kl}&&= \frac{1}{2}\expval{\hat{R}_k\hat{R}_l+\hat{R}_l\hat{R}_k}_{\rho}-\expval{\hat{R}_k}_{\rho}\expval{\hat{R}_l}_{\rho}. \label{eq:CV_cov} \end{eqnarray} Here, $\rho$ denotes the $N$-mode Gaussian state under consideration and $\Xi$ is a real, symmetric, and positive definite $2N$ dimensional square matrix. Gaussian dynamics are similarly affected by second-order Hamiltonians. For analytical simplicity, we can resort to the symplectic formalism. Given any $N$-mode quadratic Hamiltonian $\hat{\mathcal{H}}$ which can be written as $\hat{\mathcal{H}}=\hat{\xi}^\dagger H \hat{\xi}$ with $\hat{\xi}=\left(\hat{a}_1, \hat{a}_2,..., \hat{a}_N, \hat{a}_1^\dagger,..., \hat{a}_N^\dagger\right)^T$, we can construct its corresponding symplectic matrix $S_H$ as \cite{Luis_QSO_1995, Arvind_Pramana_1995, Adesso_OSID_2014} \begin{eqnarray} S_H = T^\dagger L^\dagger \exp{- i K H} L T, \label{eq:H_symp} \end{eqnarray} where $K, L$ and $T$ are $2N \times 2N$ matrices given by \begin{eqnarray} && K = \begin{pmatrix} \mathbb{I}_N & \mathbb{O}_N \\ \mathbb{O}_N & -\mathbb{I}_N \end{pmatrix}, \label{eq:K_mat} \\ && L = \frac{1}{\sqrt{2}}\begin{pmatrix} \mathbb{I}_N & i \mathbb{I}_N \\ \mathbb{I}_N & -i \mathbb{I}_N \end{pmatrix}, \label{eq:L_mat} \\ && T_{jk} = \delta_{k,2j - 1} + \delta_{k + 2N, 2j} \label{eq:T_mat}. \end{eqnarray} Here, $\mathbb{I}_N$ is the $N$-dimensional identity and $\mathbb{O}_N$ is the null matrix. Thereafter, the evolution of the Gaussian state in terms of its displacement vector and covariance matrix is defined as \cite{Adesso_OSID_2014} \begin{eqnarray} \rho' = e^{-i \hat{\mathcal{H}}t} \rho e^{i \hat{\mathcal{H}} t} \equiv && \bold{d}' = S_H \bold{d}, \label{eq:evolved_disp} \\ && \Xi' = S_H \Xi S_H^T. \label{eq:evolved_cov} \end{eqnarray} \section{Genuine multimode entanglement for CV systems} \label{app:CV_GGM} In the discrete variable regime, a pure multipartite state, $|\psi \rangle_{1,2,\dots,N}$, is said to be genuinely entangled if it has a non-vanishing value of the generalized geometric measure (GGM) \cite{Shimony_ANYAS_1995,Barnum_JPA_2001} defined as follows \begin{equation} \mathbb{G}(|\psi \rangle_{1,2,\dots,N}) = 1 - \max_{|\phi\rangle \in \mathcal{S}}|\langle \phi | \psi \rangle_{1,2,\dots,N}|^2, \label{eq:GGM_def} \end{equation} where $|\phi\rangle$ is an $N$-party pure state which is not genuinely entangled, and the Fubini Study metric is used as the distance measure \cite{Arnold_1978, Kobayashi_1996}. A simpler canonical form of the GGM was derived \cite{SenDe_PRA_2010} which reads as \begin{eqnarray} \nonumber \mathbb{G}(|\psi \rangle_{1,2,\dots,N}) = 1 - \max[\lambda_{A:B} | A \cup B && = \{1,\dots,N\}, \\ A \cap B = \varnothing], \label{eq:GGM_simple} \end{eqnarray} where $\lambda_{A:B}$ is the maximum eigenvalue of the reduced density matrix in the $A:B$ split of the state $|\psi\rangle_{1,2,\dots,N}$. The maximization is performed over all such possible bipartitions. In the case of pure CV Gaussian systems, the genuine multimode entanglement is quantified using a similar measure \cite{Roy_PRA_2020}, defined as \begin{equation} \mathcal{G}(|\psi \rangle_{1,2,\dots,N}) = 1 - \max \mathcal{P}_m \Big[\prod_{i = 1}^m \frac{2}{1 + 2 \nu_i} \Big]_{m = 1}^{[N/2]}, \label{eq:CV_ggm} \end{equation} where $\mathcal{P}_m$ represents all the $m$-mode reduced states corresponding to the $N$-mode pure state $|\psi \rangle_{1,2,\dots,N}$ and $\nu_i$ stand for the symplectic eigenvalues of the $m$-th reduced state. The number of such bipartitions considered is $[N/2]$ with $[x]$ denoting the integer part of $x$. \section{GGM for the three-mode waveguide} \label{sec:app_3-mode} The simplest Hamiltonian corresponding to Eq. \eqref{eq:H_n} is for the three-mode circular waveguide consisting of only nearest-neighbor (NN) interaction, \begin{equation} \hat{H} = \hbar J (\hat{a}_1^{\dagger} \hat{a}_2 + \hat{a}_2^{\dagger} \hat{a}_3 + \hat{a}_3^{\dagger} \hat{a}_1 + H.c.), \label{eq:H_3} \end{equation} where we have considered the interaction strength as $J_1 = J$ and $\hbar=1$. The symplectic eigenvalues corresponding to the evolved three-mode input state, $|\psi\rangle_{\text{in}} = |\psi_\text{s}\rangle \otimes |0\rangle^{\otimes 2}$ are given by \begin{widetext} \begin{eqnarray} && \mathbf{v}_1 = \left| \frac{1}{18} i \sqrt{16 \sinh ^2s \left(\cos \frac{3 J}{2}+2 \cos 3 J\right)-24 \cosh 2 s-57}\right|, \label{eq:3-mode_vA}\\ && \text{and} \,\, \mathbf{v}_2 = \mathbf{v}_3 = \left|\frac{1}{18} i \sqrt{8 \sinh ^2s \left(5\cos \frac{3 h J}{2}+ \cos 3 h J\right)-24 \cosh 2 s-57} \label{eq:3-mode_vB}\right|, \end{eqnarray} \end{widetext} where $\mathbf{v}_i$ represents the symplectic eigenvalue of the single-mode reduced states corresponding to the $i : jk$ bipartition (for $j,k \neq i$ and $i, j, k = 1, 2, 3$). The GGM, in this case, exhibits periodic behavior with variation in $J$ at a period of $4 \pi/3$. As the initial squeezing strength of the input state increases, so does the GGM. For $s = 1.0$, $\mathcal{G}_3^{\max} \approx 0.2$ at $J \approx 1.4$. In this setup, no long-range interaction is possible due to the periodic nature of the waveguide Hamiltonian. \end{document}
\begin{document} \begin{abstract} We introduce a notion of normalised oplax $3$-functor suitable for the elementary homotopy theory of strict $3$-categories, following the combinatorics of orientals. We show that any such morphism induces a morphism of simplicial sets between the Street nerves and we characterise those morphisms of simplicial sets coming from normalised oplax $3$-functors. This allows us to prove that normalised oplax $3$-functors compose. Finally we construct a strictification for normalised oplax $3$-functors whose source is a $1$-category without split-monos or split-epis. \end{abstract} \maketitle \tableofcontents \setcounter{MaxMatrixCols}{20} \makeatletter \def\labelstylecode#1{ \pgfkeys@split@path \edef\label@key{/pentagon/label/\pgfkeyscurrentname} \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysgetvalue{\label@key}#1 \ifx#1\pgfkeysnovalue\else \pgfkeysalso{commutative diagrams/.cd, \style@key} \fi \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \def\arrowstylecode#1{ \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysalso{commutative diagrams/.cd, \style@key} \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \pgfkeys{ /pentagon/label/.cd, 0/.initial = {$\bullet$}, 1/.initial = {$\bullet$}, 2/.initial = {$\bullet$}, 3/.initial = {$\bullet$}, 4/.initial = {$\bullet$}, 01/.initial, 12/.initial, 23/.initial, 34/.initial, 04/.initial, 02/.initial, 03/.initial, 13/.initial, 14/.initial, 24/.initial, 012/.initial, 013/.initial, 014/.initial, 023/.initial, 024/.initial, 034/.initial, 123/.initial, 124/.initial, 134/.initial, 234/.initial, 0123/.initial, 0124/.initial, 0134/.initial, 0234/.initial, 1234/.initial, 01234/.initial, /pentagon/labelstyle/.cd, 01/.@val/.initial, 12/.@val/.initial, 23/.@val/.initial, 34/.@val/.initial, 04/.@val/.initial=\pgfkeysalso{swap}, 02/.@val/.initial=\pgfkeysalso{description}, 03/.@val/.initial=\pgfkeysalso{description}, 13/.@val/.initial=\pgfkeysalso{description}, 14/.@val/.initial=\pgfkeysalso{description}, 24/.@val/.initial=\pgfkeysalso{description}, 012/.@val/.initial=\pgfkeysalso{swap, above}, 013/.@val/.initial=\pgfkeysalso{below = 1pt}, 014/.@val/.initial=\pgfkeysalso{below}, 023/.@val/.initial=\pgfkeysalso{swap, above = 1pt}, 024/.@val/.initial=\pgfkeysalso{below left = -1pt and -1pt}, 034/.@val/.initial=\pgfkeysalso{swap, right}, 123/.@val/.initial=\pgfkeysalso{below left = -1pt and -1pt}, 124/.@val/.initial=\pgfkeysalso{swap, right}, 134/.@val/.initial=\pgfkeysalso{left}, 234/.@val/.initial=\pgfkeysalso{swap, below right = -1pt and -1pt}, 0123/.@val/.initial, 0124/.@val/.initial=\pgfkeysalso{swap}, 0134/.@val/.initial, 0234/.@val/.initial=\pgfkeysalso{swap}, 1234/.@val/.initial, 01234/.@val/.initial, 01/.code=\labelstylecode{#1}, 02/.code=\labelstylecode{#1}, 03/.code=\labelstylecode{#1}, 04/.code=\labelstylecode{#1}, 12/.code=\labelstylecode{#1}, 13/.code=\labelstylecode{#1}, 14/.code=\labelstylecode{#1}, 23/.code=\labelstylecode{#1}, 24/.code=\labelstylecode{#1}, 34/.code=\labelstylecode{#1}, 012/.code=\labelstylecode{#1}, 013/.code=\labelstylecode{#1}, 014/.code=\labelstylecode{#1}, 023/.code=\labelstylecode{#1}, 024/.code=\labelstylecode{#1}, 034/.code=\labelstylecode{#1}, 123/.code=\labelstylecode{#1}, 124/.code=\labelstylecode{#1}, 134/.code=\labelstylecode{#1}, 234/.code=\labelstylecode{#1}, 0123/.code=\labelstylecode{#1}, 0124/.code=\labelstylecode{#1}, 0134/.code=\labelstylecode{#1}, 0234/.code=\labelstylecode{#1}, 1234/.code=\labelstylecode{#1}, 01234/.code=\labelstylecode{#1}, /pentagon/arrowstyle/.cd, 01/.@val/.initial, 12/.@val/.initial, 23/.@val/.initial, 34/.@val/.initial, 04/.@val/.initial, 02/.@val/.initial, 03/.@val/.initial, 13/.@val/.initial, 14/.@val/.initial, 24/.@val/.initial, 012/.@val/.initial=\pgfkeysalso{Rightarrow}, 013/.@val/.initial=\pgfkeysalso{Rightarrow}, 014/.@val/.initial=\pgfkeysalso{Rightarrow}, 023/.@val/.initial=\pgfkeysalso{Rightarrow}, 024/.@val/.initial=\pgfkeysalso{Rightarrow}, 034/.@val/.initial=\pgfkeysalso{Rightarrow}, 123/.@val/.initial=\pgfkeysalso{Rightarrow}, 124/.@val/.initial=\pgfkeysalso{Rightarrow}, 134/.@val/.initial=\pgfkeysalso{Rightarrow}, 234/.@val/.initial=\pgfkeysalso{Rightarrow}, 0123/.@val/.initial=\pgfkeysalso{triple}, 0124/.@val/.initial=\pgfkeysalso{triple}, 0134/.@val/.initial=\pgfkeysalso{triple}, 0234/.@val/.initial=\pgfkeysalso{triple}, 1234/.@val/.initial=\pgfkeysalso{triple}, 01234/.@val/.initial=\pgfkeysalso{quadruple}, 01/.code=\arrowstylecode{#1}, 02/.code=\arrowstylecode{#1}, 03/.code=\arrowstylecode{#1}, 04/.code=\arrowstylecode{#1}, 12/.code=\arrowstylecode{#1}, 13/.code=\arrowstylecode{#1}, 14/.code=\arrowstylecode{#1}, 23/.code=\arrowstylecode{#1}, 24/.code=\arrowstylecode{#1}, 34/.code=\arrowstylecode{#1}, 012/.code=\arrowstylecode{#1}, 013/.code=\arrowstylecode{#1}, 014/.code=\arrowstylecode{#1}, 023/.code=\arrowstylecode{#1}, 024/.code=\arrowstylecode{#1}, 034/.code=\arrowstylecode{#1}, 123/.code=\arrowstylecode{#1}, 124/.code=\arrowstylecode{#1}, 134/.code=\arrowstylecode{#1}, 234/.code=\arrowstylecode{#1}, 0123/.code=\arrowstylecode{#1}, 0124/.code=\arrowstylecode{#1}, 0134/.code=\arrowstylecode{#1}, 0234/.code=\arrowstylecode{#1}, 1234/.code=\arrowstylecode{#1}, 01234/.code=\arrowstylecode{#1} } \def\pent@abc{ \draw [/pentagon/arrowstyle/012] (198:0.45) -- node [/pentagon/labelstyle/012] { \pgfkeysvalueof{/pentagon/label/012}} (198:0.8); } \def\pent@bcd{ \draw [/pentagon/arrowstyle/123] (126:0.45) -- node [/pentagon/labelstyle/123] { \pgfkeysvalueof{/pentagon/label/123}} (126:0.8); } \def\pent@cde{ \draw [/pentagon/arrowstyle/234] (54:0.45) -- node [/pentagon/labelstyle/234] { \pgfkeysvalueof{/pentagon/label/234}} (54:0.8); } \def\pent@ade{ \draw [/pentagon/arrowstyle/034] (-40:0.6) -- node [/pentagon/labelstyle/034] { \pgfkeysvalueof{/pentagon/label/034}} (-5:0.5); } \def\pent@abe{ \draw [/pentagon/arrowstyle/014] (-70:0.55) -- node [/pentagon/labelstyle/014] { \pgfkeysvalueof{/pentagon/label/014}} (-110:0.55); } \def\pent@acd{ \draw [/pentagon/arrowstyle/023] (55:0.3) -- node [/pentagon/labelstyle/023] { \pgfkeysvalueof{/pentagon/label/023}} (125:0.3); } \def\pent@bde{ \draw [/pentagon/arrowstyle/134] (-5:0.4) -- node [/pentagon/labelstyle/134] { \pgfkeysvalueof{/pentagon/label/134}} (35:0.5); } \def\pent@ace{ \draw [/pentagon/arrowstyle/024] (-45:0.45) -- node [/pentagon/labelstyle/024] { \pgfkeysvalueof{/pentagon/label/024}} (-45:0.1); } \def\pent@abd{ \draw [/pentagon/arrowstyle/013] (-90:0.22) -- node [/pentagon/labelstyle/013] { \pgfkeysvalueof{/pentagon/label/013}} (-150:0.46); } \def\pent@bce{ \draw [/pentagon/arrowstyle/124] (188:0.4) -- node [/pentagon/labelstyle/124] { \pgfkeysvalueof{/pentagon/label/124}} (150:0.55); } \def\pent@#1#2{ \begin{scope}[shift=#2, commutative diagrams/every diagram] \foreach \i in {0,1,2,3,4} { \tikzmath{\a = 270 - (72 * \i);} \node (n{#1}\i) at (\a:1) { \pgfkeysvalueof{/pentagon/label/\i}}; } \node (p#1) at (0,0) [circle, inner sep = 0pt, fit = (n{#1}0.center)(n{#1}1.center)(n{#1}2.center) (n{#1}3.center)(n{#1}4.center)] {}; \begin{scope}[commutative diagrams/.cd, every arrow, every label] \ifcase #1 \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 3/4, 0/4, 0/2, 0/3}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 3/4, 0/4, 1/3, 1/4}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 3/4, 0/4, 0/2, 2/4}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 3/4, 0/4, 0/3, 1/3}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 3/4, 0/4, 1/4, 2/4}\else \def0/1, 1/2, 2/3, 1/3, 0/3{}\fi \foreach \s / \e in 0/1, 1/2, 2/3, 1/3, 0/3 { \draw [/pentagon/arrowstyle/\s\e] (n{#1}\s) -- node [/pentagon/labelstyle/\s\e] { \pgfkeysvalueof{/pentagon/label/\s\e}} (n{#1}\e); } \ifcase #1 \pent@abc\pent@acd\pent@ade\or \pent@bcd\pent@bde\pent@abe\or \pent@cde\pent@ace\pent@abc\or \pent@ade\pent@abd\pent@bcd\or \pent@abe\pent@bce\pent@cde \else\fi \end{scope} \end{scope} } \def\pentagon#1{ \pgfkeys{#1} \pent@{2}{(270:3)}\pent@{0}{(198:3)}\pent@{3}{(126:3)} \pent@{1}{(54:3)}\pent@{4}{(342:3)} \begin{scope}[commutative diagrams/.cd, every arrow, every label] \draw [/pentagon/arrowstyle/0123] (p0) -- node [/pentagon/labelstyle/0123] { \pgfkeysvalueof{/pentagon/label/0123}} (p3); \draw [/pentagon/arrowstyle/0134] (p3) -- node [/pentagon/labelstyle/0134] { \pgfkeysvalueof{/pentagon/label/0134}} (p1); \draw [/pentagon/arrowstyle/1234] (p1) -- node [/pentagon/labelstyle/1234] { \pgfkeysvalueof{/pentagon/label/1234}} (p4); \draw [/pentagon/arrowstyle/0234] (p0) -- node [/pentagon/labelstyle/0234] { \pgfkeysvalueof{/pentagon/label/0234}} (p2); \draw [/pentagon/arrowstyle/0124] (p2) -- node [/pentagon/labelstyle/0124] { \pgfkeysvalueof{/pentagon/label/0124}} (p4); \draw [/pentagon/arrowstyle/01234] (270:0.75) -- node [/pentagon/labelstyle/01234] { \pgfkeysvalueof{/pentagon/label/01234}} (90:0.75); \end{scope} } \makeatother \makeatletter \def\labelstylecodes#1{ \pgfkeys@split@path \edef\label@key{/square/label/\pgfkeyscurrentname} \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysgetvalue{\label@key}#1 \ifx#1\pgfkeysnovalue\else \pgfkeysalso{commutative diagrams/.cd, \style@key} \fi \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \def\arrowstylecodes#1{ \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysalso{commutative diagrams/.cd, \style@key} \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \pgfkeys{ /square/label/.cd, 0/.initial = {$\bullet$}, 1/.initial = {$\bullet$}, 2/.initial = {$\bullet$}, 3/.initial = {$\bullet$}, 01/.initial, 12/.initial, 23/.initial, 02/.initial, 03/.initial, 13/.initial, 012/.initial, 013/.initial, 023/.initial, 123/.initial, 0123/.initial, /square/labelstyle/.cd, 01/.@val/.initial, 12/.@val/.initial, 23/.@val/.initial, 03/.@val/.initial=\pgfkeysalso{swap}, 02/.@val/.initial=\pgfkeysalso{description}, 13/.@val/.initial=\pgfkeysalso{description}, 012/.@val/.initial=\pgfkeysalso{below left = -1pt and -1pt}, 013/.@val/.initial=\pgfkeysalso{swap, right}, 023/.@val/.initial=\pgfkeysalso{left}, 123/.@val/.initial=\pgfkeysalso{swap, below right = -1pt and -1pt}, 0123/.@val/.initial, 01/.code=\labelstylecodes{#1}, 02/.code=\labelstylecodes{#1}, 03/.code=\labelstylecodes{#1}, 12/.code=\labelstylecodes{#1}, 13/.code=\labelstylecodes{#1}, 23/.code=\labelstylecodes{#1}, 012/.code=\labelstylecodes{#1}, 013/.code=\labelstylecodes{#1}, 023/.code=\labelstylecodes{#1}, 123/.code=\labelstylecodes{#1}, 0123/.code=\labelstylecodes{#1}, /square/arrowstyle/.cd, 01/.@val/.initial, 12/.@val/.initial, 23/.@val/.initial, 02/.@val/.initial, 03/.@val/.initial, 13/.@val/.initial, 012/.@val/.initial=\pgfkeysalso{Rightarrow}, 013/.@val/.initial=\pgfkeysalso{Rightarrow}, 023/.@val/.initial=\pgfkeysalso{Rightarrow}, 123/.@val/.initial=\pgfkeysalso{Rightarrow}, 0123/.@val/.initial=\pgfkeysalso{triple}, 01/.code=\arrowstylecodes{#1}, 02/.code=\arrowstylecodes{#1}, 03/.code=\arrowstylecodes{#1}, 12/.code=\arrowstylecodes{#1}, 13/.code=\arrowstylecodes{#1}, 23/.code=\arrowstylecodes{#1}, 012/.code=\arrowstylecodes{#1}, 013/.code=\arrowstylecodes{#1}, 023/.code=\arrowstylecodes{#1}, 123/.code=\arrowstylecodes{#1}, 0123/.code=\arrowstylecodes{#1} } \def\sq@abc{ \draw [/square/arrowstyle/012] (126:0.25) -- node [/square/labelstyle/012] { \pgfkeysvalueof{/square/label/012}} (126:0.6); } \def\sq@bcd{ \draw [/square/arrowstyle/123] (54:0.25) -- node [/square/labelstyle/123] { \pgfkeysvalueof{/square/label/123}} (54:0.6); } \def\sq@acd{ \draw [/square/arrowstyle/023] (-55:0.55) -- node [/square/labelstyle/023] { \pgfkeysvalueof{/square/label/023}} (-15:0.4); } \def\sq@abd{ \draw [/square/arrowstyle/013] (235:0.55) -- node [/square/labelstyle/013] { \pgfkeysvalueof{/square/label/013}} (195:0.4); } \def\sq@#1#2{ \begin{scope}[shift=#2, commutative diagrams/every diagram] \foreach \i in {0,1,2,3} { \tikzmath{\a = 225 - (90 * \i);} \node (n{#1}\i) at (\a:1) { \pgfkeysvalueof{/square/label/\i}}; } \node (s#1) at (0,0) [circle, inner sep = 0pt, fit = (n{#1}0.center)(n{#1}1.center)(n{#1}2.center) (n{#1}3.center)] {}; \begin{scope}[commutative diagrams/.cd, every arrow, every label] \ifcase #1 \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 0/2, 0/3}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 1/3, 0/3}\else \def0/1, 1/2, 2/3, 1/3, 0/3{}\fi \foreach \s / \e in 0/1, 1/2, 2/3, 1/3, 0/3 { \draw [/square/arrowstyle/\s\e] (n{#1}\s) -- node [/square/labelstyle/\s\e] { \pgfkeysvalueof{/square/label/\s\e}} (n{#1}\e); } \ifcase #1 \sq@abc\sq@acd\or \sq@abd\sq@bcd \else\fi \end{scope} \end{scope} } \def\square#1{ \pgfkeys{#1} \sq@{0}{(180:1.8)}\sq@{1}{(0:1.8)} \begin{scope}[commutative diagrams/.cd, every arrow, every label] \draw [shorten >=10pt, shorten <=10pt, /square/arrowstyle/0123] (s0) -- node [/square/labelstyle/0123] { \pgfkeysvalueof{/square/label/0123}} (s1); \end{scope} } \makeatother \makeatletter \def\labelstylecode@square#1{ \pgfkeys@split@path \edef\label@key{/squares/label/\pgfkeyscurrentname} \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysgetvalue{\label@key}#1 \ifx#1\pgfkeysnovalue\else \pgfkeysalso{commutative diagrams/.cd, \style@key} \fi \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \def\arrowstylecode@square#1{ \edef\style@key{\pgfkeyscurrentkey/.@val} \def#1{#1} \def\pgfkeysnovalue{\pgfkeysnovalue} \ifx#1\pgfkeysnovalue \pgfkeysalso{commutative diagrams/.cd, \style@key} \else \pgfkeys{\style@key/.code = \pgfkeysalso{#1}} \fi} \pgfkeys{ /squares/label/.cd, 0/.initial = {$\bullet$}, 1/.initial = {$\bullet$}, 2/.initial = {$\bullet$}, 3/.initial = {$\bullet$}, 01/.initial, 12/.initial, 23/.initial, 02/.initial, 03/.initial, 13/.initial, 012/.initial, 013/.initial, 023/.initial, 123/.initial, 0123/.initial, /squares/labelstyle/.cd, 01/.@val/.initial=\pgfkeysalso{swap}, 12/.@val/.initial=\pgfkeysalso{swap}, 23/.@val/.initial=\pgfkeysalso{swap}, 03/.@val/.initial, 02/.@val/.initial=\pgfkeysalso{description}, 13/.@val/.initial=\pgfkeysalso{description}, 012/.@val/.initial=\pgfkeysalso{above left = -1pt and -1pt}, 013/.@val/.initial=\pgfkeysalso{swap, right}, 023/.@val/.initial=\pgfkeysalso{left}, 123/.@val/.initial=\pgfkeysalso{swap, above right = -1pt and -1pt}, 0123/.@val/.initial, 01/.code=\labelstylecode@square{#1}, 02/.code=\labelstylecode@square{#1}, 03/.code=\labelstylecode@square{#1}, 12/.code=\labelstylecode@square{#1}, 13/.code=\labelstylecode@square{#1}, 23/.code=\labelstylecode@square{#1}, 012/.code=\labelstylecode@square{#1}, 013/.code=\labelstylecode@square{#1}, 023/.code=\labelstylecode@square{#1}, 123/.code=\labelstylecode@square{#1}, 0123/.code=\labelstylecode@square{#1}, /squares/arrowstyle/.cd, 01/.@val/.initial, 12/.@val/.initial, 23/.@val/.initial, 02/.@val/.initial, 03/.@val/.initial, 13/.@val/.initial, 012/.@val/.initial=\pgfkeysalso{Rightarrow}, 013/.@val/.initial=\pgfkeysalso{Rightarrow}, 023/.@val/.initial=\pgfkeysalso{Rightarrow}, 123/.@val/.initial=\pgfkeysalso{Rightarrow}, 0123/.@val/.initial=\pgfkeysalso{triple}, 01/.code=\arrowstylecode@square{#1}, 02/.code=\arrowstylecode@square{#1}, 03/.code=\arrowstylecode@square{#1}, 12/.code=\arrowstylecode@square{#1}, 13/.code=\arrowstylecode@square{#1}, 23/.code=\arrowstylecode@square{#1}, 012/.code=\arrowstylecode@square{#1}, 013/.code=\arrowstylecode@square{#1}, 023/.code=\arrowstylecode@square{#1}, 123/.code=\arrowstylecode@square{#1}, 0123/.code=\arrowstylecode@square{#1} } \def\sqs@abc{ \draw [/squares/arrowstyle/012] (235:0.25) -- node [/squares/labelstyle/012] { \pgfkeysvalueof{/squares/label/012}} (235:0.6); } \def\sqs@bcd{ \draw [/squares/arrowstyle/123] (-54:0.25) -- node [/squares/labelstyle/123] { \pgfkeysvalueof{/squares/label/123}} (-54:0.6); } \def\sqs@acd{ \draw [/squares/arrowstyle/023] (55:0.55) -- node [/squares/labelstyle/023] { \pgfkeysvalueof{/squares/label/023}} (15:0.45); } \def\sqs@abd{ \draw [/squares/arrowstyle/013] (125:0.55) -- node [/squares/labelstyle/013] { \pgfkeysvalueof{/squares/label/013}} (165:0.45); } \def\sqs@#1#2{ \begin{scope}[shift=#2, commutative diagrams/every diagram] \foreach \i in {0,1,2,3} { \tikzmath{\a = 135 + (90 * \i);} \node (n{#1}\i) at (\a:1) { \pgfkeysvalueof{/squares/label/\i}}; } \node (s#1) at (0,0) [circle, inner sep = 0pt, fit = (n{#1}0.center)(n{#1}1.center)(n{#1}2.center) (n{#1}3.center)] {}; \begin{scope}[commutative diagrams/.cd, every arrow, every label] \ifcase #1 \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 0/2, 0/3}\or \def0/1, 1/2, 2/3, 1/3, 0/3{0/1, 1/2, 2/3, 1/3, 0/3}\else \def0/1, 1/2, 2/3, 1/3, 0/3{}\fi \foreach \s / \e in 0/1, 1/2, 2/3, 1/3, 0/3 { \draw [/squares/arrowstyle/\s\e] (n{#1}\s) -- node [/squares/labelstyle/\s\e] { \pgfkeysvalueof{/squares/label/\s\e}} (n{#1}\e); } \ifcase #1 \sqs@abc\sqs@acd\or \sqs@abd\sqs@bcd \else\fi \end{scope} \end{scope} } \def\squares#1{ \pgfkeys{#1} \sqs@{0}{(180:1.8)}\sqs@{1}{(0:1.8)} \begin{scope}[commutative diagrams/.cd, every arrow, every label] \draw [shorten >=10pt, shorten <=10pt, /squares/arrowstyle/0123] (s0) -- node [/squares/labelstyle/0123] { \pgfkeysvalueof{/squares/label/0123}} (s1); \end{scope} } \makeatother \tikzset{ between/.style args={#1 and #2}{ at = ($(#1)!0.5!(#2)$) }, betweenl/.style args={#1 and #2}{ at = ($(#1)!0.35!(#2)$) } } \tikzset{pics/.cd, Pent/.style n args={4}{code={ \begin{scope}[font=\footnotesize] \foreach \XX [count=\r starting from 0] in {#3} \node (\r) at (162 + \r * 72:#2) {$\XX$}; \draw[->] (0) -- node[midway,left] (ab) {$\ab$} (1); \draw[->] (1) -- node[midway,below] (bc) {$\bc$} (2); \draw[->] (2) -- node[midway,right] (cd) {$\cd$} (3); \draw[->] (3) -- node[midway,above right] (de) {$\de$} (4); \draw[->] (0) -- node[midway,above left] (ea) {$\ae$} (4); \ifcase#1 \draw[->] (0) -- node[midway,fill=white] (ac) {$\ac$} (2); \draw[->] (0) -- node[midway,fill=white] (ad) {$\ad$} (3); \node[between=1 and ac] {$\abc$}; \node[betweenl=ad and 2] {$\acd$}; \node[betweenl=ad and 4] {$\ade$}; \or \draw[->] (0) -- node[midway,fill=white] (ad) {$\ad$} (3); \draw[->] (1) -- node[midway,fill=white] (bd) {$\bd$} (3); \node[betweenl=ad and 1] {$\abd$}; \node[between=bd and 2] {$\bcd$}; \node[betweenl=ad and 4] {$\ade$}; \or \draw[->] (1) -- node[midway,fill=white] (bd) {$\bd$} (3); \draw[->] (1) -- node[midway,fill=white] (be) {$\be$} (4); \node[between=0 and be] {$\abe$}; \node[betweenl=de and 1] {$\bde$}; \node[between=bd and 2] {$\bcd$}; \or \draw[->] (1) -- node[midway,fill=white] (be) {$\be$} (4); \draw[->] (2) -- node[midway,fill=white] (ce) {$\ce$} (4); \node[between=0 and be] {$\abe$}; \node[betweenl=bc and 4] {$\bce$}; \node[between=ce and 3] {$\cde$}; \or \draw[->] (0) -- node[midway,fill=white] (ac) {$\ac$} (2); \draw[->] (2) -- node[midway,fill=white] (ce) {$\ce$} (4); \node[between=1 and ac] {$\abc$}; \node[betweenl=ea and 2] {$\ace$}; \node[between=ce and 3] {$\cde$}; \fi \end{scope} }}} \tikzset{pics/.cd, Pentscript/.style n args={4}{code={ \begin{scope}[font=\scriptsize] \foreach \XX [count=\r starting from 0] in {#3} \node (\r) at (162 + \r * 72:#2) {$\XX$}; \draw[->] (0) -- node[midway,left] (ab) {$\ab$} (1); \draw[->] (1) -- node[midway,below] (bc) {$\bc$} (2); \draw[->] (2) -- node[midway,right] (cd) {$\cd$} (3); \draw[->] (3) -- node[midway,above right] (de) {$\de$} (4); \draw[->] (0) -- node[midway,above left] (ea) {$\ae$} (4); \ifcase#1 \draw[->] (0) -- node[midway,fill=white] (ac) {$\ac$} (2); \draw[->] (0) -- node[midway,fill=white] (ad) {$\ad$} (3); \node[between=1 and ac] {$\abc$}; \node[betweenl=ad and 2] {$\acd$}; \node[betweenl=ad and 4] {$\ade$}; \or \draw[->] (0) -- node[midway,fill=white] (ad) {$\ad$} (3); \draw[->] (1) -- node[midway,fill=white] (bd) {$\bd$} (3); \node[betweenl=ad and 1] {$\abd$}; \node[between=bd and 2] {$\bcd$}; \node[betweenl=ad and 4] {$\ade$}; \or \draw[->] (1) -- node[midway,fill=white] (bd) {$\bd$} (3); \draw[->] (1) -- node[midway,fill=white] (be) {$\be$} (4); \node[between=0 and be] {$\abe$}; \node[betweenl=de and 1] {$\bde$}; \node[between=bd and 2] {$\bcd$}; \or \draw[->] (1) -- node[midway,fill=white] (be) {$\be$} (4); \draw[->] (2) -- node[midway,fill=white] (ce) {$\ce$} (4); \node[between=0 and be] {$\abe$}; \node[betweenl=bc and 4] {$\bce$}; \node[between=ce and 3] {$\cde$}; \or \draw[->] (0) -- node[midway,fill=white] (ac) {$\ac$} (2); \draw[->] (2) -- node[midway,fill=white] (ce) {$\ce$} (4); \node[between=1 and ac] {$\abc$}; \node[betweenl=ea and 2] {$\ace$}; \node[between=ce and 3] {$\cde$}; \fi \end{scope} }}} \section*{Introduction} The homotopy theory of small categories was born with the introduction by Grothendieck of the nerve functor \[ N \colon {\mathcal{C}\mspace{-2.mu}\it{at}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \] in~\cite{grothendieck_techniques_III}, where ${\mathcal{C}\mspace{-2.mu}\it{at}}$ is the category of small categories and ${\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ is the category of simplicial sets, allowing us to define a class of weak equivalences in ${\mathcal{C}\mspace{-2.mu}\it{at}}$: a functor is a weak equivalence precisely when its nerve is a simplicial weak homotopy equivalence. We call these functors \ndef{Thomason equivalences} of ${\mathcal{C}\mspace{-2.mu}\it{at}}$. The nerve functor preserves by definition the weak equivalences, \ie maps Thomason equivalences to simplicial weak equivalences, and therefore there is an induced functor \[\bar{N} \colon \text{Ho}({\mathcal{C}\mspace{-2.mu}\it{at}}) \longto \text{Ho}({\mathcal{S}\mspace{-2.mu}\it{et}}Simp)\] at the level of the homotopy categories. The first striking result of this theory appears in Illusie's thesis~\cite{cotangent} (who credits it to Quillen) and states that this induced functor $\bar{N} \colon \text{Ho}({\mathcal{C}\mspace{-2.mu}\it{at}}) \longto \text{Ho}({\mathcal{S}\mspace{-2.mu}\it{et}}Simp)$ is an equivalence of categories. The homotopy inverse of the nerve functor $N \colon {\mathcal{C}\mspace{-2.mu}\it{at}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ is not induced by its left adjoint $c \colon {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \to {\mathcal{C}\mspace{-2.mu}\it{at}}$, \ie the \ndef{categorical realisation} functor, which behaves poorly homotopically, but instead by the \ndef{category of elements} functor $i_{\Delta} \colon {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \to {\mathcal{C}\mspace{-2.mu}\it{at}}$, mapping a simplicial set $X$ to its category of elements $i_\Delta(X) = \cDelta/X$. A careful study of the subtle homotopy theory of small categories by Thomason~\cite{Cat_closed} led him to show another important result: the existence of a model category structure on ${\mathcal{C}\mspace{-2.mu}\it{at}}$ which is Quillen equivalent to the Kan--Quillen model category structure on simplicial sets. This important result implies that small categories and simplicial sets are not only equivalent as homotopy categories, but actually as weak $(\infty, 1)$-categories, \ie as homotopy theories. One drawback of working with small categories as preferred model for homotopy types is that there are no simple geometric models of simplicial complexes. For instance, the homotopy type of the two-dimensional sphere~$S^2$ is often modelled by the poset \[ \begin{tikzcd} \bullet \ar[r] & \bullet \ar[r] & \bullet \\ \bullet \ar[r] \ar[ur] & \bullet \ar[r] \ar[ur] & \bullet \ar[from=1-1, to=2-2, crossing over] \ar[from=1-2, to=2-3, crossing over] \end{tikzcd}\,, \] see for instance~\cite{FiniteSpaces}. This is mainly due to the intrinsic $1$-dimensional shape of categories. On the other hand, the homotopy type of $S^2$ can be easily modelled in a geometric fashion by a small $2$-category, namely \begin{center} \begin{tikzpicture}[scale=1.5] \node (0) at (180:1) {$a$}; \node (1) at (0:1) {$b$}; \draw[->] (0) to [bend left=50] node [above] {$f$} (1); \draw[->] (0) to [bend right=50] node [below] {$g$} (1); \draw[double equal sign distance,>={Implies},->] (65:.4) to [bend left] node [right] {$\alpha$} (-65:.4); \draw[dashed, double equal sign distance, >={Implies}, ->] (117:.38) to [bend right] node [left] {$\beta$} (-117:.38); \end{tikzpicture} \end{center} This suggests that strict higher categories may provide a more convenient framework for setting up a categorical model for homotopy types and a source of motivation for generalising the homotopy theory of ${\mathcal{C}\mspace{-2.mu}\it{at}}$ to strict higher categories. In fact, Ara and Maltsiniotis construct a functor $\Or$ assigning to any ordered simplicial complex an \oo-category, see~\cite[ยง9]{AraMaltsiCondE}. In a seminal article~\cite{Street}, Street introduced a nerve functor \[ N_\infty \colon \nCat{\infty} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \] for strict $\infty$-categories, allowing one to define and study the homotopy theory of $\nCat{\infty}$, the category of small strict $\infty$-categories, as well as of $\nCat{n}$, the category of strict $n$-categories, for every positive integer $n$. The class of weak equivalences of $\nCat{n}$ pulled back via the Street nerve shall still be called \ndef{Thomason equivalences}. This functor is homotopically meaningful, since for instance it sends the above $2$\nbd-cat\-egorical model of~$S^2$ to a simplicial set with the homotopy type of~$S^2$. The particular case of small $2$-categories was studied by Bullejos and Cegarra~\cite{BullejosCegarra}, Cegarra~\cite{Cegarra}, Chiche~\cite{chiche_homotopy} and del~Hoyo~\cite{del-Hoyo}. Their approach stresses on the importance played by (normalised) oplax $2$-functors. In fact, it was already noticed that oplax $2$-functors are geometrically meaningful, see for instance~\cite[Section 10]{StreetCatStructures}. This is consequence of the fact that the Street nerve for $2$\nbd-cat\-egories $N_2 \colon \nCat{2} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ is faithful but \emph{not} full; the set of morphisms of simplicial sets between the nerves $N_2(A)$ and $N_2(B)$ of two small $2$-categories $A$ and $B$ is in fact in bijection with the set of \ndef{normalised oplax $2$-functors} from $A$ to $B$. Ara and Maltsiniotis~\cite{AraMaltsiNThom} provide an abstract framework in which to transfer the Kan--Quillen model category structure on simplicial sets to strict $n$-categories and showed that this is the case for small $2$-categories. Their strategy makes use of normalised oplax $2$-functors in order to define some maps needed for a homotopy cobase change property. It is therefore natural to study a notion of normalised oplax $n$-functor which could be used to generalise the results listed above, thus establishing a satisfactory homotopy theory of $n$-categories. By this we mean showing that $\nCat{n}$ can be equipped with a Quillen model category structure which is Quillen equivalent to the Kan--Quillen model category structure on simplicial sets. Providing a sensitive definition of such a normalised oplax $n$-functor for the case $n = 3$, which is the first one not well understood, is the aim of the present paper. A normalised oplax $n$-functor $F \colon A \to B$, with $n\ge 1$, should roughly be a morphism of $n$\nbd-graphs which respects the identities on the nose, but that respects compositions of arrows only up to oriented coherences. For example, given a composition $a \xto{f} b \xto{g} c$ of two $1$-arrows of $A$, the normalised oplax $n$-functor $F$ should provide: \begin{itemize} \item a $1$-arrow $F(a) \xto{F(gf)} F(c)$ of $B$, \item two composable $1$-arrows $F(a) \xto{F(f)} F(b)$ and $F(b) \xto{F(g)} F(c)$ of $B$, \item a $2$-arrow $F(g, f) \colon F(gf) \Rightarrow F(g)F(f)$, which represents the coherence for the composition of $f$ and $g$. \end{itemize} We observed that a central tool for the elementary homotopy theory of $2$-categories is the notion of normalised oplax $2$-functor. Moreover, this turns out to be a crucial ingredient in establishing the model category structure \emph{ร la} Thomason on $\nCat{2}$, too. Normalised oplax $2$-functors can be composed and hence form a category $\widetilde{\nCat{2}}$. There is a canonical nerve functor $\widetilde N_2 \colon \widetilde{\nCat{2}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ extending the Street nerve for $2$-categories, that is, there is a commutative triangle \[ \begin{tikzcd}[column sep=small] & {\mathcal{S}\mspace{-2.mu}\it{et}}Simp & \\ \nCat{2} \ar[ur, "N_2"] \ar[rr] && \widetilde{\nCat{2}} \ar[lu, "\widetilde N_2"'] \end{tikzcd} \] of functors, where the functor $\nCat{2} \to \widetilde{\nCat{2}}$ is simply the embedding given by the fact that any $2$-functor is in particular a normalised oplax $2$-functor. The Street nerve $N_n$ is a faithful functor but not full for $n>1$. In the $2$-categorical case, this deficiency is solved by normalised oplax $2$-functors: the nerve $\widetilde N_2 \colon \widetilde{\nCat{2}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ is fully faithful. Following this idea, Street proposes in~\cite{Conspectus} to define a normalised oplax $3$-functor from $A$ to $B$ as a simplicial morphism from $N_3(A)$ to $N_3(B)$. A careful investigation of such a morphism shows that this might not be an optimal definition since in general simplicial morphisms between Street nerves of $3$\nbd-cat\-egories fail to preserve the underlying $3$-graph. Indeed, we analyse the case where $A$ is the ``categorical $2$-disk'' \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""'{name=f}] \ar[r, bend right, "g"', ""{name=g}] \ar[Rightarrow, from=f, to=g, "\disk"] & a' \end{tikzcd}\ , \] \ie the $2$-category with two parallel $1$-cells and a single $2$-cell between them, and $B$ is the ``invertible categorical $3$-disk'' \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=60, ""'{name=f}] \ar[r, bend right=60, ""{name=g}] \ar[Rightarrow, from=f, to=g, shift right=0.5em, bend right, shorten <=1mm, shorten >=1mm, ""{name=al}] \ar[Rightarrow, from=f, to=g, shift left=0.5em, bend left, shorten <=1mm, shorten >=1mm, ""'{name=ar}] \arrow[triple, from=al, to=ar, "\cong"]{} & \bullet \end{tikzcd} , \] \ie the $3$-category with two parallel $1$-cells, two parallel $2$-cells between them and a single invertible $3$-cell between these $2$-cells, and we show that there are more simplicial morphisms than expected between the respective Street nerves. On the one hand, the $2$-category $A$ has no compositions and so the normalised oplax $3$-functors from $A$ to $B$ should coincide with the strict $3$-functors. On the other hand, there are simplicial morphisms from $N_3(A)$ to $N_3(B)$ which do not come from the nerve of any strict $3$-functors. This is a consequence of the fact that, for instance, there are two ways to capture the $2$-cell $\disk$ of $A$ with a $2$-simplex of $N_3(A)$, namely \[ \begin{tikzcd}[column sep=small] & a' \ar[dr, equal, "1_{a'}"] & \\ a \ar[rr, "f"', ""{name=f}] \ar[ru, "g"] && a' \ar[Rightarrow, from=f, to=1-2, shorten >=3pt, "\disk"{near start}] \end{tikzcd} \quadet \begin{tikzcd}[column sep=small] & a \ar[dr, "g"] & \\ a \ar[rr, "f"', ""{name=f}] \ar[ru, equal, "1_a"] && a' \ar[Rightarrow, from=f, to=1-2, shorten >=3pt, "\disk"{near start}] \end{tikzcd} \ , \] and these two different ways are related by $3$-simplices, for instance \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0={$\bullet$}, 1={$\bullet$}, 2={$\bullet$}, 3=$\bullet$, 01={}, 12=$g$, 23={}, 02=$g$, 03=$f$, 13=$g$, 012={$=$}, 023=$\disk$, 123={$=$}, 013=$\disk$, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture}\ , \end{center} which are sent by any simplicial morphism to $3$-simplices of $N_3(B)$ for which the principal $3$-cell is invertible, but non necessarily trivial. Said otherwise, the different ways of encoding cells, or simple compositions of cells, with simplices are linked together by higher simplices having the property that the cell of greatest dimension is invertible; these higher simplices act as invertible constraints for morphisms between Street nerves of $3$-categories and it is therefore natural to imagine that a normalised oplax $3$-functor would correspond to a simplicial morphism for which all these higher simplices acting as constraints have \emph{trivial} greatest cell, instead of only invertible. In order to determine a substantial set of these constraints, we analyse the simplicial morphism canonically associated to our notion of oplax normalised $3$-functor. This provides a simplicial notion of oplax $3$-functor preserving the underlying $3$-graph, that we call \ndef{simplicial oplax $3$-morphisms}. We show that they compose and thus form a category whose objects are small $3$-categories. The standard definition of a normalised oplax $2$-functor $F \colon A \to B$ has objects, $1$-cells, $2$-cells and composition of $1$-cells as datum, that is, to any object, $1$-arrow or $2$-arrow $x$ of $A$, we associate an object, a $1$-arrow or a $2$-arrow, respectively, $F(x)$ of $B$ and for any pair $a \xto{f} b \xto{g} c$ of composable $1$-arrows of $A$, we associate a $2$\nbd-arrow $F(g, f)$ of $B$ going from $F(gf)$ to $F(g)F(f)$. These data must satisfy some normalisation conditions, for instance $F(1_a) = 1_{F(a)}$, for any object $a$ of $A$, and $F(1_b, f) = 1_{F(f)}$, for any $1$-cell $f \colon a \to b$ of $A$, and a cocycle condition, which is a coherence for the composition of three $1$-arrows of $A$, and the vertical and horizontal compatibility for $2$-arrows as coherences. Another take on this notion is to see the data, the normalisations and the coherences indexed by Joyal's cellular cat\-e\-gory~$\Theta_3$, whose objects are trees of height at most $3$. Any tree has a dimension, given by the number of its edges, and a normalised oplax $2$-functor can be defined as a set of maps index on the trees $\treeDot$, $\treeL$, $\treeLL$ and $\treeV$ of dimension at most $2$ for the data, which represents precisely objects, $1$-arrows, $2$-arrows and compositions of two $1$-arrows; the same trees are the indices for the normalisation conditions. Finally the four trees $\treeW$, $\treeY$, $\treeVLeft$ and $\treeVRight$ of dimension $3$, representing the composition of three composable $1$-arrows, the vertical composition of two $2$-arrows and the two possible whiskerings of a $2$-arrow with a $1$-arrow, are the indices for the coherences. More precisely, a normalised oplax $2$-functor $F \colon A \to B$ will consist of a map $F_{\treeDot}$ from the objects of $A$ to the objects of~$B$, a map $F_{\treeL}$ from the $1$-arrows of $A$ to the $1$-arrows of~$B$ respecting source and target, \ie for $f \colon a \to b$ in $A$ we get $F_{\treeL}(f) \colon F_{\treeDot}(a) \to F_{\treeDot}(b)$ in~$B$, a map $F_{\treeLL}$ from the $2$-arrows of $A$ to the $2$-arrows of~$B$ similarly respecting source and target and a map $F_{\treeV}$ that for any pair of composable $1$-arrows $a \xto{f} b \xto{g} c$ of $A$ associates a $2$-cell~$F_{\treeV}(g, f)$ \[ \begin{tikzcd}[column sep=small] F_{\treeDot}(a) \ar[rd, "F_{\treeLog}(f)"'] \ar[rr, "F_{\treeLog}(g f)"{name=gf}] && F_{\treeDot}(a'') \\ & F_{\treeDot}(a') \ar[ru, "F_{\treeL}(g)"'] & \ar[Rightarrow, shorten <=1.5mm, from=gf, to=2-2] \end{tikzcd} \] as explained above. The coherence $F_{\treeW}(h, g, f)$ for a triple of composable $1$-arrows of~$A$, say $a \xto{f} b \xto{g} c \xto{h} d$, can be represented by the following diagram \begin{center} \centering \begin{tikzpicture}[scale=2, font=\footnotesize, every label/.style={fill=white}] \squares{ /squares/label/.cd, 0=$F_{\treeDot}(a)$, 1=$F_{\treeDot}(a')$, 2=$F_{\treeDot}(a'')$, 3=$F_{\treeDot}(a''')$, 01=${F_{\treeLog}(f)}$, 12=${F_{\treeL}(g)}$, 23=${F_{\treeL}(h)}$, 02=${F_{\treeL}(g\comp_0 f)}$, 03=${F_{\treeLog}(h\comp_0 g f)}$, 13=${F_{\treeL}(h g)}$, 012=${F_{\treeV}(g, f)}$, 023={${F_{\treeV}(h, g f)}$}, 123=${F_{\treeV}(h, g)}$, 013={${F_{\treeV}(h g, f)}$}, 0123={$F_{\treeW}(h, g, f)$}, /squares/arrowstyle/.cd, 0123={equal}, /squares/labelstyle/.cd, 012={below right = -1pt and -1pt}, 123={below left = -1pt and -1pt}, 023={swap, above left = -1pt and 3pt}, 013={swap, above right = -1pt and 3pt} } \end{tikzpicture}\ . \end{center} The tree $\treeY$ representing the vertical composition of $2$-cells plays a key role, which in this low dimensional case is hidden but becomes much clearer in the $3$-dimensional case. We take this latter definition of normalised oplax $2$-functor, and the ``cellular'' point of view behind it, as the starting point for a generalisation of this notion for the case of $3$\nbd-cat\-e\-gories. Indeed, a normalised oplax $3$-functor shall consist of a family of maps, the data, indexed by the trees of dimension at most $3$ \emph{except for $\treeY$}, subject to normalisation conditions indexed by these same trees as well as to a set of coherences indexed by the trees of dimension $4$ joint with the tree $\treeY$. Listing the tree $\treeY$, that is the only tree of dimension $3$ representing a composition of cells which does not ``branch'' at height $0$, among the coherences is essential, since the datum associated to such a tree must consist of a \emph{trivial cell} (or invertible, but we do not follow this path) for the composition of two such normalised oplax $3$-functors to be defined. It can be read as a condition of local strictness. It is already crucial in showing that a normalised oplax $3$\nbd-func\-tor $F$ induces a canonical morphism of simplicial sets $\SNn{l}(F)$, \ie that it can be pre-composed with normalised oplax $3$-functors with source a simplex; we show that this induced morphism of simplicial sets is in fact a simplicial oplax $3$-morphism. Nevertheless, showing directly that the composition of two normalised oplax $3$-functors is still a normalised oplax $3$-functor is a very hard task. Indeed, the proof that a normalised oplax $3$-functor induces a canonical simplicial oplax $3$-morphism boils down to show that the coherence for the tree $\treeVV$, representing the composition of four $1$-arrows, is satisfied by the ``obvious'' representative for the composition of normalised oplax $3$-functors; this proof is highly non-trivial and involves a long and careful study of pastings of all the coherences of the two composed normalised oplax $3$-functors. This is just one of the 14 coherences that one would need to check for the composition of normalised oplax $3$-functors to be well-defined. As remarked above, a careful examination of $\SNn{l}(F) \colon N_3(A) \to N_3(B)$, the simplicial morphism associated to a normalised oplax $3$-functor $F \colon A \to B$, reveals that certain non-trivial $3$-simplices of $N_3(A)$ with trivial principal $3$-cell are sent via $\SNn{l}(F)$ to $3$-simplices of $N_3(B)$ where the principal cell is also trivial. Such simplices were called constraints before and if these constraints are taken as a property, they allow us to give a simplicial definition of a normalised oplax morphism between nerves of $3$-categories which preserves their underlying $3$-graph. We called \emph{simplicial oplax $3$-morphisms} such morphisms of simplicial sets. Hence, we come equipped with two notions of normalised oplax morphisms for $3$-categories: one that is cellular in spirit and the other that is simplicial. The latter has the advantage that it is easy to see that it composes and forms a category; while the advantage of the former is that it allows us to reason on cells or simple composition of cells to define complicated morphisms, instead of describing it for objects of every dimension as for a morphism of simplicial sets. We show how to associate a normalised oplax $3$-functor $c_l(F)$ to any simplicial oplax $3$-morphism $F$. In order to do this, we have to check that the ``obvious'' data that we can associate to a simplicial oplax $3$-morphism satisfies the normalisation conditions and the coherences of a normalised oplax $3$-functor. The normalisations are simply encoded in the degeneracies, while the coherences are non-trivially encoded by appropriate $4$-simplices. As one might probably expect, the coherence for the tree representing the horizontal composition of $2$-cells is the hardest to prove. We then show that this assignment going from simplicial to cellular is inverse to the map giving a simplicial morphism to any normalised oplax $3$-functor. Contrarily to what happens for the $2$-categorical case, this is also a non-trivial task. For $F \colon N_3(A) \to N_3(B)$ a simplicial oplax $3$-morphism, we are led to provide, for any $3$-simplex $x$ of $N_3(A)$, an explicit description of the principal $3$-cell of the $3$-simplex $\SNn{l}c_l(F)(x)$ in terms of the normalised oplax $3$-functor $c_l(F)$. This gives a bijection between the normalised oplax $3$\nbd-functors from $A$ to $B$ and the simplicial oplax $3$\nbd-morphisms from $N_3(A)$ to $N_3(B)$. We can then deduce that normalised oplax $3$-functors compose and form a category and that this category is isomorphic to that of simplicial oplax $3$\nbd-mor\-phisms. The notion of oplax $3$-morphism already appears in the literature. Indeed, Gordon--Power--Street in~\cite{GordonPowerStreet} and Gurski in~\cite{GurskiCoherence} provide similar, although slightly different, general definitions for trimorphisms between tricategories, with oplax variants. However, if we specialise to strict $3$-categories we see that these notions are different from our. In fact, the main difference lies in the oriented coherence, by which we mean the datum, associated to the horizontal composition of $2$-cells, that in our case expresses instead a relation between the two pieces of data associated to the two possible whiskerings of a $2$-cell with a $1$-cell; the data of these two whiskering are symmetric, as it is imposed by the algebra of the orientals, which is incompatible with the choice of a prescribed lax/oplax orientation for the horizontal composition of $2$-cells, as it is imposed in the definition of trimorphism. An important and motivating example of normalised oplax $3$-functor is given by the ``sup'' morphism $\sup \colon \cDelta/N_3(A) \to A$, where $A$ is a $3$-category and $\cDelta/N_3(A)$ is the category of elements of its Street nerve. In the $1$-categorical case, this morphism is actually a $1$-functor mapping an object $(\Deltan{n}, x)$ of $\Delta/N(A)$, where $x \colon \Deltan{n} \to A$ is a sequence of $n$ composable arrows of $A$, to the object $x(n)$ of $A$ and a morphism $f \colon (\Deltan{n}, y) \to (\Deltan{p}, y)$ of $\cDelta/N(A)$, that we can depict by the triangle \[ \begin{tikzcd}[column sep=small] \Deltan{n} \ar[rr, "f"] \ar[rd, "x"'] && \Deltan{p} \ar[ld, "y"] \\ & A & \end{tikzcd}\ , \] to the arrow $y_{\{f(n), p\}} \colon y(f(n)) \to y(p)$. This arrow can be seen as a functor \[ \begin{tikzcd} \Deltan{1} \ar[rr, "{\{f(n), p\}}"] &&\Deltan{p} \ar[r, "y"] & A\ . \end{tikzcd} \] The sup $1$-functor is always a Thomason equivalence and it plays an important role in the elementary homotopy theory of $1$-categories. Del Hoyo~\cite{del-Hoyo} and Chiche~\cite{chiche_homotopy} generalised and studied this $\sup$ morphism for the case of $2$-categories. For instance, for a pair of composable $1$-arrows $(\Deltan{m}, x) \xto{f} (\Deltan{n}, y) \xto{g} (\Deltan{p}, z)$ of~$\cDelta/N_2(A)$, we assign the $2$-cell of $A$ given by the principal $2$-arrow of the $2$-functor \[ \begin{tikzcd} c_2N_2(\Deltan{2}) \ar[rrr, "{\{gf(m), g(n), p\}}"] &&& c_2N_2(\Deltan{p}) \ar[r, "z"] & A \end{tikzcd}\ . \] This normalised oplax $2$-functor proved to be crucial for the elementary homotopy theory of $2$-categories and we provide a $3$-dimensional definition with our notion of normalised oplax $3$-functor. We also study the ``strictification'' of such a morphism. By this we mean the following general procedure: given a $1$-category $A$ and a $3$-category $B$, there exists a $3$\nbd-cat\-e\-gory $\tilde A$ and a normalised oplax $3$-functor $\eta_A \colon A \to \tilde A$ such that the set of strict $3$-functors from $\tilde A$ to $B$ is in bijection with the set of normalised oplax $3$-functors from $A$ to $B$ and moreover this bijection is obtained by pre-composing by~$\eta_A$. By the correspondence described above between normalised oplax $3$-functors and simplicial oplax $3$-morphisms, the $3$-category $\tilde A$ is given by $c_3N_3(A)$ and the morphism $\eta_A$ is just the unit $\eta_A \colon N_3(A) \to N_3c_3N_3(A)$. A nice description for the $2$-categorical case has been given by del Hoyo~\cite{del-Hoyo}, so in particular we already know how to describe the $1$-cells of $\tilde A$. We tackle this problem more generally and we provide a complete description of the \oo-category $c_\infty N_\infty(A)$, for any $1$-category $A$ without split-monos or split-epis; all posets and the subdivision $c \Sd N(C)$ of any $1$-category $C$ have such a property. The objects of $c_\infty N(A)$ are the same as those of $A$ and, as we observed above, we already know the $1$-cells, which are given by non-degenerate simplices of $N(A)$. We define an \oo-category $c_\infty N(A)(f, g)$, for any pair of parallel $1$-arrows $f$ and $g$ of $c_\infty N(A)$, as well as ``vertical compositions'' \[ c_\infty N(A)(g, h) \times c_\infty N(A)(f, g) \to c_\infty N(A)(f, h) \] of $2$-cells and ``horizontal compositions'' \[ c_\infty N(A)(y, z) \times c_\infty N(A)(x, y) \to c_\infty N(A)(x, z)\,, \] where $x$, $y$ and $z$ are objects of $c_\infty N(A)$, and we check that they satisfy all the axioms of \oo-category. Finally, we prove that our definition of $c_\infty N(A)$ indeed satisfies the expected universal property. In an appendix, we recall some technicalities that we need in order to develop this last section about strictifications. In fact, we shall need some precise properties about the internal Hom-$\infty$-categories of the orientals. We begin by recalling few elements on Steiner's theory of augmented directed complexes. We then give a brief glance at Joyal's $\Theta$ category and finally we introduce the orientals together with some results about them that we need in section~\ref{sec:tilde}. \begin{notations}\label{notation} The category of small strict $n$-category will be denoted by $\nCat{n}$, for ay $1\leq n\leq \infty$. For a definition of $\nCat{n}$, see Appendix~\ref{app:higher_cats}. The simplex category shall be denoted by $\cDelta$ and ${\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ shall denote the category of simplicial sets. The functor ${\mathcal{S}\mspace{-2.mu}\it{et}}Simp \to {\mathcal{C}\mspace{-2.mu}\it{at}}$ that to any simplicial set $X$ associate its category of elements $\cDelta/X$ shall be denoted by $i_\cDelta$. All the \oo-categories will be strict, with the composition operations denoted by $\comp_i$ and the identity of a cell $x$ denoted by $1_x$. That is, if $A$ is an \oo-category, $0\leq i < j$ are integers and $x$ and $y$ are $j$-cells of $A$ such that the $i$-target $t_i(x)$ of $x$ is equal to the $i$-source $s_i(y)$ of $y$, then there exists a unique $j$-cell $y \comp_i x$ of $A$ which is the $i$-composition of $x$ and $y$; similarly, there is a $(j+1)$-cell $1_x$ which is the identity of $x$. We shall often call \emph{trivial} the identity cells of $A$. We shall say that two $i$-cells $x$ and $y$ of $A$ are \emph{parallel} is they have same source and target; if this is the case, we shall denote by $\operatorname{\mathsf{Hom}}i_A(x, y)$ the \oo-category whose $j$-cells, for $j \geq 0$, are the $(i+j+1)$-cells of $A$ having $x$ as $i$-source and $y$ as $i$-target. The Street nerve from $n$-categories to simplicial sets shall always be denoted by $N_\infty \colon \nCat{n} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$, for any $1 \leq n \leq \infty$. This is justified by the fact that we can embed $n$-categories in \oo-categories. The left adjoint to the Street nerve shall be denoted by $c_n \colon {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \to \nCat{n}$. \end{notations} \begin{remerciements} I would like to thank Dimitri Ara for his support and guidance, who supervised my Ph.D.~thesis from which this work originated. I also want to thank Steve Lack for many interesting comments that overall improved the readability of this paper. I heartily thank Fosco Loregian and Dominic Verity for their invaluable \TeX{}nical help. Finally, I greatfully acknowledge the support of GA\v{C}R EXPRO 19-28628X. \end{remerciements} \section{Normalised oplax 3-functors} \subsection{Recall: normalised oplax 2-functors} We begin this chapter by recalling the classical notion of \ndef{normalised oplax $2$\nbd-func\-tor}. \begin{paragr} Let $A$ and $B$ be two $2$-categories. A \ndef{normalised oplax $2$-functor}\index{normalised oplax $2$-functor} $F \colon A \to B$ is given by: \begin{itemize} \item[-] a map $\operatorname{\mathsf{Ob}}(A) \to \operatorname{\mathsf{Ob}}(B)$ that to any object $x$ of $A$ associates an object $F(a)$ of $B$; \item[-] a map $\operatorname{\mathsf{Cell}}_1(A) \to \operatorname{\mathsf{Cell}}_1(B)$ that to any $1$-cell $f \colon x \to y$ of $A$ associates a $1$-cell $F(f) \colon F(x) \to F(y)$ of $B$; \item[-] a map $\operatorname{\mathsf{Cell}}_2(A) \to \operatorname{\mathsf{Cell}}_2(B)$ that to any $2$-cell $\alpha \colon f \to g$ of $A$ associates a $2$-cell $F(\alpha) \colon F(f) \to F(g)$ of $B$; \item[-] a map that to any composable $1$-cells $x \xrightarrow{f} y \xrightarrow{g} z$ of $A$ associates a $2$-cell \[ F(g, f) \colon F(g\comp_0 f) \to F(g) \comp_0 F(f) \] of $B$. \end{itemize} These data are subject to the following coherences: \begin{description} \item[normalisation] for any object $x$ of $A$ (resp.~any $1$-cell $f$ of $A$) we have $F(1_x) = 1_{F(x)}$ (resp.~$F(1_f) = 1_{F(f)}$); moreover for any $1$-cell $f\colon x \to y$ of $A$ we have \[F(1_y, f) = 1_{F(f)} = F(f, 1_x)\,;\] \item[cocycle] for any triple $x \xto{f} y \xto{g} z \xto{h} t$ of composable $1$-cells of $A$ we have \[ \bigl(F(h) \comp_0 F(g, f)\bigr) \comp_1 F(h, g\comp_0 f) = \bigl(F(h, g) \comp_1 F(f)\bigr) \comp_1 F(h\comp_0 g, f)\,; \] \item[vertical compatibility] for any pair \[ \begin{tikzcd}[column sep=4.5em] a\phantom{'} \ar[r, bend left=50, looseness=1.2, "f", ""{below, name=f}] \ar[r, "g" description, ""{name=gu}, ""{below, name=gd}] \ar[r, bend right=50, looseness=1.2, "h"', ""{name=h}] \ar[Rightarrow, from=f, to=gu, "\alpha"] \ar[Rightarrow, from=gd, to=h, "\beta"]& a' \end{tikzcd} \] of $1$-composable $2$-cells $\alpha$ and $\beta$ of $A$, we have $F(\beta\comp_1 \alpha) = F(\beta) \comp_1 F(\alpha)$; \item[horizontal compatibility] for any pair \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f1}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f1, to=f2, "\alpha"] & \bullet \ar[r, bend left, "g", ""{below, name=g1}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g1, to=g2, "\beta"] & \bullet \end{tikzcd} \] of $0$-composable $2$-cells $\alpha$ and $\beta$ of $A$, we have \[ F(g', f') \comp_1 F(\beta \comp_0 \alpha) = \bigl( F(\beta) \comp_0 F(\alpha)\bigr) \comp_1 F(g, f)\,. \] \end{description} \end{paragr} \begin{paragr} The coherence given by the compatibility with respect to ``horizontal composition'' can be equivalently decomposed in two coherences, which correspond to the two possible ``whiskerings'' of a $2$-cell with a $1$-cell: \begin{description} \item[$\TreeVRight\ $] for any diagram \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & a' \ar[r,"g"] & a'' \end{tikzcd} \] of $A$, we have \[ F(g) \comp_0 F(\alpha) \comp_1 F(g, f) = F(g, f') \comp_1 F(g \comp_0 \alpha)\,; \] \item[$\treeVLeft\ $] for any diagram \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, "f"] & a' \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& a'' \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$, we have \[ F(g', f) \comp_1 F(\beta \comp_0 f) = F(\beta) \comp_0 F(f) \comp_1 F(g, f)\,. \] \end{description} These two coherences are a particular case of the horizontal coherence of the previous paragraph and reciprocally one checks immediately these two coherences joint with the vertical coherence imply the horizontal coherence. The advantage of this latter reformulation is that now the coherence datum of a normalised oplax $2$-functor is indexed over \begin{itemize} \item the trees of dimension $\le 2$ for the normalisation; \item the trees of dimension $3$ for the other coherences. \end{itemize} This will be the starting point in our generalisation towards a notion of normalised oplax $3$-functor. In order to clarify what we mean, we repropose the definition of normalised oplax $2$-functor with data and coherences indexed by trees. \end{paragr} \begin{paragr} A normalised oplax $2$-functor $F \colon A \to B$ consists of the following data: \begin{description} \item[$\TreeDot\:$] a map $F_{\treeDot}$ that to each object $a$ of $A$ assigns an object $F_{\treeDot}(a)$ of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [[]] \end{forest} }] a map $F_{\treeLog}$ that to each $1$-cell $f \colon a \to a'$ of $A$ assigns a $1$-cell $F_{\treeLog}(f) \colon F_{\treeDot}(a) \to F_{\treeDot}(a')$ of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [[][]] \end{forest} }] a map $F_{\treeV}$ that to to each pair of $0$\hyp{}composable $1$\hyp{}cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ assigns a $2$-cell $F_{\treeV}(g, f)$ \[ \begin{tikzcd}[column sep=small] F_{\treeDot}(a) \ar[rd, "F_{\treeLog}(f)"'] \ar[rr, "F_{\treeLog}(g\comp_0 f)"{name=gf}] && F_{\treeDot}(a'') \\ & F_{\treeDot}(a') \ar[ru, "F_{\treeL}(g)"'] & \ar[Rightarrow, shorten <=1.5mm, from=gf, to=2-2] \end{tikzcd} \] of $B$, that is \[ F_{\treeV}(g, f) \colon F_{\treeLog}(g \comp_0 f) \to F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] ] \end{forest} }] a map $F_{\treeLL}$ that to each $2$-cell $\alpha \colon f \to g$ of $A$ associates a $2$-cell $F_{\treeLL}(\alpha) \colon F_{\treeLog}(f) \to F_{\treeLog}(g)$ of $B$. \end{description} These data are subject to the following conditions of normalisation: \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [] \end{forest} } ] for any object $a$ of $A$, we have \[ F_{\treeLog}(1_a) = 1_{F_{\treeDot}(a)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] for any $1$-cell $f$ of $A$ we have \[ F_{\treeLL}(1_f) = 1_{F_{\treeLog}(f)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] ] \end{forest} } ] for any $1$\nbd-cell $f \colon a \to a'$ of $A$, we have \[ F_{\treeV}(1_{a'}, f) = F_{\treeV}(f, 1_a) = 1_{F_{\treeL}(f)}\,. \] \end{description} Finally, we impose the following coherences: \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] ] \end{forest} }] for any triple of $0$-composable $1$ cells \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$ we have \begin{center} \centering \begin{tikzpicture}[scale=2, font=\footnotesize, every label/.style={fill=white}] \squares{ /squares/label/.cd, 0=$F_{\treeDot}(a)$, 1=$F_{\treeDot}(a')$, 2=$F_{\treeDot}(a'')$, 3=$F_{\treeDot}(a''')$, 01=${F_{\treeLog}(f)}$, 12=${F_{\treeL}(g)}$, 23=${F_{\treeL}(h)}$, 02=${F_{\treeL}(g\comp_0 f)}$, 03=${F_{\treeLog}(h\comp_0 g f)}$, 13=${F_{\treeL}(h g)}$, 012=${F_{\treeV}(g, f)}$, 023={${F_{\treeV}(h, g f)}$}, 123=${F_{\treeV}(h, g)}$, 013={${F_{\treeV}(h g, f)}$}, 0123={$F_{\treeW}(h, g, f)$}, /squares/arrowstyle/.cd, 0123={equal}, /squares/labelstyle/.cd, 012={below right = -1pt and -1pt}, 123={below left = -1pt and -1pt}, 023={above left = 1pt and 3pt}, 013={above right = 1pt and 3pt} } \end{tikzpicture}\ . \end{center} that is we impose the equality \[ F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, g \comp_0 f) = F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h \comp_0 g, f)\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[][]] ] \end{forest} }] for any pair \[ \begin{tikzcd}[column sep=4.5em] a\phantom{'} \ar[r, bend left=50, looseness=1.2, "f", ""{below, name=f}] \ar[r, "g" description, ""{name=gu}, ""{below, name=gd}] \ar[r, bend right=50, looseness=1.2, "h"', ""{name=h}] \ar[Rightarrow, from=f, to=gu, "\alpha"] \ar[Rightarrow, from=gd, to=h, "\beta"]& a' \end{tikzcd} \] of $1$-composable $2$-cells $\alpha$ and $\beta$ of $A$, we impose \[F(\beta\comp_1 \alpha) = F(\beta) \comp_1 F(\alpha)\,;\] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [[]] ] \end{forest} }] for any whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & a' \ar[r,"g"] & a'' \end{tikzcd} \] of $A$ we impose \[ F_{\treeLog}(g) \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeV}(g, f) = F_{\treeV}(g, f') \comp_1 F_{\treeLL}(g \comp_0 \alpha)\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] [] ] \end{forest} }] for any whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, "f"] & a' \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& a'' \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$ we impose \[ F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\beta \comp_0 f) = F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(g, f)\,. \] \end{description} \end{paragr} \begin{paragr} Given two normalised oplax $2$-functors $F \colon A \to B$ and $G \colon B \to C$, there is an obvious candidate for the composition $GF \colon A \to C$ and one checks that this is still a normalised oplax $2$-functor; furthermore, the identity functor on a category is clearly an identity element for normalised oplax $2$-functor too. Hence, there is a category $\widetilde{\nCat{2}}$ with small $2$-categories as objects and normalised oplax $2$-functors as morphisms. The cosimplicial object $\Delta \to \nCat{2} \to \widetilde{\nCat{2}}$ of $\widetilde{\nCat{2}}$ induces a nerve functor $\widetilde{N_2} \colon \widetilde{\nCat{2}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$. For any $n \ge 0$, the normalised oplax $2$-functors $\Deltan{n} \to A$ correspond precisely to $2$\nbd-func\-tors $\Onm{n}{2} \to A$ (see, for instance, \cite[\href{https://kerodon.net/tag/00BE}{Tag 00BE}]{kerodon}). Hence, we get a triangle diagram of functors \[ \begin{tikzcd} & {\mathcal{S}\mspace{-2.mu}\it{et}}Simp & \\ \nCat{2} \ar[ru, "N_2"] \ar[rr, hook] && \widetilde{\nCat{2}} \ar[lu, "\widetilde{N_2}"'] \end{tikzcd} \] which is commutative (up to a canonical isomorphism). Moreover, it is a standard fact that the functor $\widetilde{N_2}$ is fully faithful (see, for instance~\cite{BullejosFaroBlanco}, \cite{LackPaoli} or~\cite[\href{https://kerodon.net/tag/00AU}{Tag 00AU}]{kerodon}). \end{paragr} \subsection{Definition of normalised oplax 3-functor} Let $A$ and $B$ be two $3$-cat\-e\-go\-ries. We now give the definition of \ndef{normalised oplax $3$-functor} \index{normalised oplax $3$-functor} $F \colon A \to B$, which amounts to giving the structure of a family of maps indexed by the objects of $\Theta$ of dimension at most $3$ subject to a family of relations indexed by the objects of $\Theta$ of dimension $4$ as well as normalisation for identities of every dimension. A quick description of~$\Theta$ is provided in Appendix~\ref{app:theta}. The choice of the orientation for the structural maps is strongly guided by the algebra of the orientals and presents therefore a symmetry (or better, a duality) for symmetric trees of $\Theta$, see for instance the structural cells for the trees $\TreeVLeft$ and $\TreeVRight$ denoting the two possible whiskerings of a $2$-cell with a $1$-cell. The arboreal rule for indexing structure, normalisation and coherence that we have stated right above has an exception. In fact, we are forced to list the tree $\TreeY$ denoting the vertical composition of two $2$-cells among the coherences. This imposes a local strictness on the lax $3$-functor, meaning that as a result a normalised $3$-functor will induce a strict $2$-functor on the hom-$2$-categories, and it is necessary in order to provide a reasonable coherence for the tree $\treeVLR$ representing the horizontal composition of two $2$-cells; we shall say more about this in a remark right after the definition. Another explanation for this choice, more simplicial in spirit, will be offered in section~\ref{sec:simplicial} (see in particular Remark~\ref{rem:treeY}). \begin{paragr}[Data]\label{paragr:lax_3functor_cellular_data} A \ndef{normalised oplax $3$-functor} $F$ from $A$ to $B$ consists of: \begin{description} \item[$\TreeDot\ $] a map $F_{\treeDot}$ that to each object $a$ of $A$ assigns an object $F_{\treeDot}(a)$ of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [[]] \end{forest} }] a map $F_{\treeLog}$ that to each $1$-cell $f \colon a \to a'$ of $A$ assigns a $1$-cell $F_{\treeLog}(f) \colon F_{\treeDot}(a) \to F_{\treeDot}(a')$ of $B$; \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [[][]] \end{forest} }] a map $F_{\treeV}$ that to to each pair of $0$\hyp{}composable $1$\hyp{}cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ assigns a $2$-cell $F_{\treeV}(g, f)$ \[ \begin{tikzcd}[column sep=small] F_{\treeDot}(a) \ar[rd, "F_{\treeLog}(f)"'] \ar[rr, "F_{\treeLog}(g\comp_0 f)"{name=gf}] && F_{\treeDot}(a'') \\ & F_{\treeDot}(a') \ar[ru, "F_{\treeL}(g)"'] & \ar[Rightarrow, shorten <=1.5mm, from=gf, to=2-2] \end{tikzcd} \] of $B$, that is \[ F_{\treeV}(g, f) \colon F_{\treeLog}(g \comp_0 f) \to F_{\treeLog}(g) \comp_0 F_{\treeLog}(f)\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] ] \end{forest} }] a map $F_{\treeLL}$ that to each $2$-cell $\alpha \colon f \to g$ of $A$ associates a $2$-cell $F_{\treeLL}(\alpha) \colon F_{\treeLog}(f) \to F_{\treeLog}(g)$ of $B$; \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] ] \end{forest} }] a map $F_{\treeW}$, that to each triple of $0$-composable $1$ cells \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$ associates a $3$-cell $F_{\treeW}(h, g, f)$ \begin{center} \centering \begin{tikzpicture}[scale=1.8, font=\footnotesize, every label/.style={fill=white}] \squares{ /squares/label/.cd, 0=$F_{\treeDot}(a)$, 1=$F_{\treeDot}(a')$, 2=$F_{\treeDot}(a'')$, 3=$F_{\treeDot}(a''')$, 01=${F_{\treeLog}(f)}$, 12=${F_{\treeL}(g)}$, 23=${F_{\treeL}(h)}$, 02=${F_{\treeL}(g\comp_0 f)}$, 03=${F_{\treeLog}(h\comp_0 g f)}$, 13=${F_{\treeL}(h g)}$, 012=${F_{\treeV}(g, f)}$, 023={${F_{\treeV}(h, g f)}$}, 123=${F_{\treeV}(h, g)}$, 013={${F_{\treeV}(h g, f)}$}, 0123={$F_{\treeW}(h, g, f)$}, /squares/labelstyle/.cd, 012={below right = -1pt and -1pt}, 123={below left = -1pt and -1pt}, 023={above left = 1pt and 3pt}, 013={above right = 1pt and 3pt} } \end{tikzpicture}\ , \end{center} that is the $3$-cell $F_{\treeW}(h, g, f)$ has \[ F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, g \comp_0 f) \] as source and \[ F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h \comp_0 g, f) \] as target; \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [[]] ] \end{forest} }] a map $F_{\treeVRight}$ that to any whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & a' \ar[r,"g"] & a'' \end{tikzcd} \] of $A$ associates a $3$-cell \[ F_{\treeVRight}(g, \alpha) \colon F_{\treeLog}(g) \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeV}(g, f) \to F_{\treeV}(g, f') \comp_1 F_{\treeLL}(g \comp_0 \alpha) \] of $B$; \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] [] ] \end{forest} }] a map $F_{\treeVLeft}$, that to each whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, "f"] & a' \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& a'' \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$ associates a $3$-cell \[ F_{\treeVLeft}(\beta, f) \colon F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\beta \comp_0 f) \to F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(g, f) \] of $B$; \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[]]] ] \end{forest} }] a map $F_{\treeLLL}$, that to any $3$-cell $\gamma \colon \alpha \to \alpha'$ of $A$, \ie any tree of $A_{\treeLLL}$, associates a $3$-cell \[F_{\treeLLL}(\gamma) \colon F_{\treeLL}(\alpha) \to F_{\treeLL}(\alpha')\] of $B$. \end{description} \end{paragr} \begin{paragr}[Normalisation]\label{paragr:lax_3functor_cellular_norm} The normalisation requires the following constraints: \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [] \end{forest} } ] for any object $a$ of $A$, we have \[ F_{\treeLog}(1_a) = 1_{F_{\treeDot}(a)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] for any $1$-cell $f$ of $A$ we have \[ F_{\treeLL}(1_f) = 1_{F_{\treeLog}(f)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] ] \end{forest} } ] for any $1$\nbd-cell $f \colon a \to a'$ of $A$, we have \[ F_{\treeV}(1_{a'}, f) = F_{\treeV}(f, 1_a) = 1_{F_{\treeL}(f)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] ] \end{forest} } ] for any $2$-cell $\alpha$ of $A$ we have \[ F_{\treeLLL}(1_\alpha) = 1_{F_{\treeLL}(\alpha)}\,. \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] ] \end{forest} } ] for any pair $a \xto{f} a' \xto{g} a''$ of composable $1$-cell of $A$, we have \[ F_{\treeW}(g, f, 1_a) = F_{\treeW}(g, 1_{a'}, f) = F_{\treeW}(1_{a''}, g, f) = 1_{F_{\treeV}(g, f)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [[]] ] \end{forest} }] for any pair $a \xto{f} a' \xto{g} a''$ of composable $1$\nbd-cells of~$A$, we have \[ F_{\treeVRight}(g, 1_f) = 1_{F_{\treeV}(g, f)}\,, \] and for any $2$-cell $\alpha \colon f \to f'$ of $A$, we have \[ F_{\treeVRight}(1_{a'}, \alpha) = 1_{F_{\treeLL}(\alpha)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] [] ] \end{forest} }] for any pair $a \xto{f} a' \xto{g} a''$ of composable $1$\nbd-cells of~$A$, we have \[ F_{\treeVLeft}(1_g, f) = 1_{F_{\treeV}(g, f)}\,, \] and for any $2$-cell $\beta \colon g \to g'$ of $A$, we have \[ F_{\treeVLeft}(\beta, 1_{a'}) = 1_{F_{\treeLL}(\beta)}\,; \] \end{description} \end{paragr} \begin{paragr}[Coherences]\label{paragr:lax_3functor_cellular_coherences} These maps are subject to the following relations: \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[][]] ] \end{forest} }] for any pair of $1$-composable $2$-cells \[ \begin{tikzcd}[column sep=4.5em] a\phantom{'} \ar[r, bend left=50, looseness=1.2, "f", ""{below, name=f}] \ar[r, "g" description, ""{name=gu}, ""{below, name=gd}] \ar[r, bend right=50, looseness=1.2, "h"', ""{name=h}] \ar[Rightarrow, from=f, to=gu, "\alpha"] \ar[Rightarrow, from=gd, to=h, "\beta"]& a' \end{tikzcd} \] of $A$, \ie to any tree of $A_{\treeY}$, we have an equality \[ F_{\treeLL}(\beta) \comp_1 F_{\treeLL}(\alpha) = F_{\treeLL}(\beta \comp_1 \alpha) \] in $B$. We shall sometimes write $F_{\treeY}(\beta, \alpha)$ for the identity $3$-cell of this $2$-cell above. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] [] ] \end{forest} } ] For any quadruple \[ \begin{tikzcd} \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd} \] of $0$-composable $1$-cells of $A$ we impose that the $3$-cells \begin{gather*} F_{\treeV}(i, h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(ih, g, h) \\ \comp_2\\ F_{\treeLog}(i) \comp_0 F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeW}(i, h, gf) \end{gather*} and \begin{gather*} F_{\treeW}(i, h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ F_{\treeLog}(i) \comp_0 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ F_{\treeLog}(i) \comp_0 F_{\treeW}(h, g, f) \comp_1 F_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} of $B$ are equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [] [] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, bend left, "h", ""{below, name=h}] \ar[r, bend right, "h'"', ""{name=h2}] \ar[Rightarrow, from=h, to=h2, "\alpha"] & \bullet \end{tikzcd} \] of $0$-composable cells $f$, $g$ and $\alpha$ of $A$ we impose the $3$-cells \begin{gather*} F_{\treeLL}(\alpha) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(h, g, f)\\ \comp_2\\ F_{\treeLog}(h') \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeVLeft}(\alpha, g \comp_0 f) \end{gather*} and \begin{gather*} F_{\treeVLeft}(\alpha, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h\comp_0 g, f)\\ \comp_2\\ F_{\treeV}(h', g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha \comp_0 g, f)\\ \comp_2\\ F_{\treeW}(h', g, f) \comp_1 F_{\treeLL}(\alpha \comp_0 g \comp_0 f) \end{gather*} of $B$ to be equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [] ] [] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g, to=g2, "\alpha"] & \bullet \ar[r, "h"] & \bullet \end{tikzcd} \] of $0$-composable cells $f$, $\alpha$ and $h$ of $A$, we impose that the $3$-cells \begin{gather*} F_{\treeV}(h, g') \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(h\comp_0 \alpha, f)\\ \comp_2\\ F_{\treeW}(h, g', f) \comp_1 F_{\treeLL}(h\comp_0 \alpha \comp_0 f)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeV}(g', f) \comp_1 F_{\treeVRight}(h, \alpha \comp_0 f) \end{gather*} and \begin{gather*} F_{\treeVRight}(h, \alpha) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h \comp_0 g, f)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeLL}(\alpha) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(h, g, f)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeVLeft}(\alpha, f) \comp_1 F_{\treeV}(h, g\comp_0 f) \end{gather*} of $B$ to be equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [ [] ] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f, to=f2, "\alpha"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \end{tikzcd} \] of $0$-composable cells $\alpha$, $g$ and $h$ of $A$ we impose the $3$-cells \begin{gather*} F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(h\comp_0 g, \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeW}(h, g, f) \end{gather*} and \begin{gather*} F_{\treeW}(h, g, f') \comp_1 F_{\treeLL}(h \comp_0 g \comp_0 \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f') \comp_1 F_{\treeVRight}(h, g \comp_0 \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeVRight}(g, \alpha) \comp_1 F_{\treeV}(h, g \comp_0 f) \end{gather*} of $B$ to be equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=55, looseness=1.3, "f", ""{below, name=f1}] \ar[r, "f'"{description}, ""{name=f2u}, ""{below, name=f2d}] \ar[r, bend right=50, looseness=1.3, "f''"', ""{name=f3}] \ar[Rightarrow, from=f1, to=f2u, "\alpha"] \ar[Rightarrow, from=f2d, to=f3, "\beta"] & \bullet \ar[r, "g"] & \bullet \end{tikzcd} \] of cells $\alpha$, $\beta$ and $g$ of $A$ we impose the equality of the $3$-cells \[ F_{\treeV}(g, f'') \comp_1 F_{\treeVRight}(g, \beta \comp_1 \alpha) \] and \[ F_{\treeVRight}(g, \beta) \comp_1 F_{\treeLL}(g \comp_0 \alpha)\ \comp_2\ F_{\treeLog}(g) \comp_0 F_{\treeLL}(\beta) \comp_1 F_{\treeVRight}(g, \alpha) \] of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left=55, looseness=1.3, "g", ""{below, name=g1}] \ar[r, "f'"{description}, ""{name=g2u}, ""{below, name=g2d}] \ar[r, bend right=50, looseness=1.3, "g''"', ""{name=g3}] \ar[Rightarrow, from=g1, to=g2u, "\alpha"] \ar[Rightarrow, from=g2d, to=g3, "\beta"] & \bullet \end{tikzcd} \] of cells $\alpha$, $\beta$ and $g$ of $A$ we impose that the $3$-cells \[ F_{\treeVLeft}(\beta \comp_1 \alpha, f) \] and \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha, f)\ \comp_2\ F_{\treeVLeft}(\beta, f) \comp_1 F_{\treeLL}(\alpha \comp_0 f) \] of $B$ to be equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [ [] ] ] \end{forest} } ] Notice first that for any pair \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f1}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f1, to=f2, "\alpha"] & \bullet \ar[r, bend left, "g", ""{below, name=g1}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g1, to=g2, "\beta"] & \bullet \end{tikzcd} \] of $0$-composable $2$-cells $\alpha$ and $\beta$ of $A$, we have an equality of $2$-cells \[ \begin{tikzcd}[column sep=-5.5em] \null & F_{\treeLL}(g' \comp_0 \alpha \comp_1 \beta \comp_0 f) \ar[ldd, equal, "{\treeY}"'] = F_{\treeLL}(\beta \comp_0 f' \comp_1 g \comp_0 \alpha) \ar[rdd, equal, "{\treeY}"] & \null \\ \\ F_{\treeLL}(g' \comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) & \null & F_{\treeLL}(\beta \comp_0 f') \comp_1 F_{\treeLL}(g \comp_0 \alpha) \end{tikzcd} \] of $B$, where the equality in the higher row is just the exchange law. We shall denote by $F_{\text{ex}}(\beta, \alpha)$ the identity $3$-cell going from $F_{\treeLL}(g' \comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f)$ to $F_{\treeLL}(\beta \comp_0 f') \comp_1 F_{\treeLL}(g \comp_0 \alpha)$. For any pair of $2$-cells $\alpha$ and $\beta$ of $A$ as above, we impose the $3$-cells \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(g, \alpha)\ \comp_2\ F_{\treeLog}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeVLeft}(\beta, f) \] and \[ F_{\treeVLeft}(\beta, f') \comp_1 F_{\treeLL}(g\comp_0 \alpha)\ \comp_2\ F_{\treeV}(g', f') \comp_1 F_{\text{ex}}(\beta, \alpha)\ \comp_2\ F_{\treeVRight}(g', \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] of $B$ to be equal. Since $F_{\text{ex}}(\beta, \alpha)$ is a trivial $3$-cell, this coherence is actually imposing the equality between the $3$-cells \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(g, \alpha)\ \comp_2\ F_{\treeLog}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeVLeft}(\beta, f) \] and \[ F_{\treeVLeft}(\beta, f') \comp_1 F_{\treeLL}(g\comp_0 \alpha)\ \comp_2\ F_{\treeVRight}(g', \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] [] ] ] \end{forest} } ] For any triple \[ \begin{tikzcd}[column sep=4.7em] \bullet \ar[r, bend left=80, looseness=1.6, ""{below, name=1}] \ar[r, bend left, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right, ""{name=3u}, ""{below, name=3d}] \ar[r, bend right=80, looseness=1.6, ""{name=4}] \ar[Rightarrow, from=1, to=2u, "\alpha"] \ar[Rightarrow, from=2d, to=3u, "\beta"] \ar[Rightarrow, from=3d, to=4, "\gamma"] & \bullet \end{tikzcd} \] of $1$-composable $2$-cells $\alpha$, $\beta$ and $\gamma$ of $A$ we have the equalities between the identity $3$-cell \[ F_{\treeY}(\gamma, \beta\comp_1 \alpha) \comp_2 F_{\treeLL}(\gamma) \comp_1 F_{\treeY}(\beta, \alpha) \] and the identity $3$-cell \[ F_{\treeY}(\gamma\comp_1 \beta, \alpha) \comp_2 F_{\treeY}(\gamma, \beta) \comp_1 F_{\treeLL}(\alpha) \] of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] [] ] ] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=7em] \bullet \ar[r, bend left=60, looseness=1.2, "\phantom{bullet}"{below, name=1}] \ar[r, bend right=60, looseness=1.2, "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=4ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, ""'{name=beta2d}, ""{name=beta2u}] \ar[Rightarrow, from=1, to=3, shift left=4ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta2d, "\gamma"]{} \arrow[triple, from=beta2u, to=beta3, "\delta"]{} & \bullet \end{tikzcd} \] of $2$-composable $3$-cells $\gamma$ and $\delta$ of $A$ we impose the equality \[ F_{\treeLLL}(\delta \comp_2 \gamma) = F_{\treeLLL}(\delta) \comp_2 F_{\treeLLL}(\gamma) \] between these two $3$-cells of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [ [] ] ] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, ""{below, name=1}] \ar[r, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right=60, looseness=1.2, ""{name=3}] \ar[Rightarrow, from=1, to=2u, "\alpha"] \ar[Rightarrow, from=2d, to=3, shift right=2.6ex, ""{name=beta1}] \ar[Rightarrow, from=2d, to=3, shift left=2.6ex, ""'{name=beta2}] \arrow[triple, from=beta1, to=beta2, "\gamma"]{} & \bullet \end{tikzcd} \] of $1$-composable cells $\alpha$ and $\gamma$ of $A$, we impose that the $3$-cells \[ F_{\treeLLL}(\gamma) \comp_1 F_{\treeLL}(\alpha) \] and \[ F_{\treeLLL}(\gamma \comp_1 \alpha) \] of $B$ are equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] [] ] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, ""{below, name=1}] \ar[r, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right=60, looseness=1.2, ""{name=3}] \ar[Rightarrow, from=2d, to=3, "\beta"] \ar[Rightarrow, from=1, to=2u, shift right=2.6ex, ""{name=beta1}] \ar[Rightarrow, from=1, to=2u, shift left=2.6ex, ""'{name=beta2}] \arrow[triple, from=beta1, to=beta2, "\gamma"]{} & \bullet \end{tikzcd} \] of $1$-composable cells $\gamma$ and $\beta$ of $A$, we impose that the $3$-cells \[ F_{\treeLL}(\beta) \comp_1 F_{\treeLLL}(\gamma) \] and \[ F_{\treeLLL}(\beta \comp_1 \gamma) \] of $B$ are equal. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [ [] ] ] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left=60, looseness=1.2, "g", "\phantom{bullet}"'{name=1}] \ar[r, bend right=60, looseness=1.2, "g'"', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=2ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, shift left=2ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta3, "\Gamma"]{} & \bullet \end{tikzcd} \] of $0$-composable cells $f$ and $\Gamma$ of $A$, we impose the equality \[ F_{\treeV}(g', f) \comp_1 F_{\treeLLL}(\Gamma \comp_0 f) = F_{\treeLLL}(\Gamma) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(g, f) \] between these two $3$-cells of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] ] [] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, "f", "\phantom{bullet}"'{name=1}] \ar[r, bend right=60, looseness=1.2, "f'"', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=2ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, shift left=2ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta3, "\Gamma"]{} & \bullet \ar[r, "g"] & \bullet \end{tikzcd} \] of $0$-composable cells $\Gamma$ and $g$ of $A$, we impose the equality \[ F_{\treeV}(g, f') \comp_1 F_{\treeLLL}(g \comp_0 \Gamma) = F_{\treeLog}(g) \comp_0 F_{\treeLLL}(\Gamma) \comp_1 F_{\treeV}(g, f) \] between these two $3$-cells of $B$. \end{description} \begin{rem} Let us clarify a bit better why we need the data for the tree $\treeY$ representing the vertical composition of two $2$-cells to be trivial. In fact, one would expect that for any pair of $1$-composable $2$-cells $x$ and $y$ of $A$, a (normalised) oplax $3$-functor would associate a $3$-cell \[ F_{\treeY}(x, y) \colon F_{\treeLL}(x \comp_1 y) \to F_{\treeLL}(x) \comp_1 F_{\treeLL}(y). \] At the same time, for any pair \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f1}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f1, to=f2, "\alpha"] & \bullet \ar[r, bend left, "g", ""{below, name=g1}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g1, to=g2, "\beta"] & \bullet \end{tikzcd} \] of $0$-composable $2$-cells $\alpha$ and $\beta$ of $A$, the coherence for the tree $\treeVLR$ should express a relationship among $F_{\treeVRight}(g\comp_0 \alpha)$, $F_{\treeVLeft}(\beta \comp_0 f)$, $F_{\treeVRight}(g'\comp_0 \alpha)$ and $F_{\treeVLeft}(\beta \comp_0 f')$. On the one hand, we can compose \[ \bigl(F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(g, \alpha)\bigr)\ \comp_2\ \bigl(F_{\treeLog}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeVLeft}(\beta, f)\bigr), \] getting a $3$-cell from \[ F_{\treeL}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] to \[ F_{\treeLL}(\beta)\comp_0 F_{\treeL}(f') \comp_1 F_{\treeV}(g, f') \comp_1 F_{\treeLL}(g \comp_0 \alpha). \] On the other hand, we have the $3$-cell \begin{equation}\label{cell:VR} F_{\treeVRight}(g', \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f), \tag{A} \end{equation} with source \[ F_{\treeL}(g') \comp_0 F_{\treeLL}()\alpha) \comp_1 F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] and target \[ F_{\treeV}(g', f') \comp_1 F_{\treeL}(g'\comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f), \] as well as the $3$-cell \begin{equation}\label{cell:VL} F_{\treeVLeft}(\beta, f') \comp_1 F_{\treeLL}(g\comp_0 \alpha), \tag{B} \end{equation} with source \[ F_{\treeV}(g', f') \comp_1 F_{\treeL}(\beta\comp_0 f') \comp_1 F_{\treeLL}(g \comp_0 \alpha) \] and target \[ F_{\treeLL}(\beta)\comp_0 F_{\treeL}(f') \comp_1 F_{\treeV}(g, f') \comp_1 F_{\treeLL}(g \comp_0 \alpha). \] The $3$-cells \eqref{cell:VR} and \eqref{cell:VL} are \emph{not} composable, since the target of the first one and the source of the second one are respectively the bottom left and the bottom right $2$-cells of the following diagram \[ \begin{tikzcd}[column sep=-13.5em] \null & F_{\treeV}(g', f') \comp_1 F_{\treeLL}(g' \comp_0 \alpha \comp_1 \beta \comp_0 f) \ar[ldd, triple, "{\treeY}"'] = F_{\treeV}(g', f') \comp_1 F_{\treeLL}(\beta \comp_0 f' \comp_1 g \comp_0 \alpha) \ar[rdd, triple, "{\treeY}"] & \null \\ \\ F_{\treeV}(g', f') \comp_1 F_{\treeLL}(g' \comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) & \null & F_{\treeV}(g', f') \comp_1 F_{\treeLL}(\beta \comp_0 f') \comp_1 F_{\treeY}(g \comp_0 \alpha) \end{tikzcd} \] of $B$, where the equality in the higher row is just the exchange law. Unless the data for the tree $\treeY$ is trivial, it is impossible to provide a relationship among the $3$-cells listed above and involving the whiskerings. \end{rem} \begin{rem} Gurski defines in~\cite{GurskiCoherence} a notion of lax trimorphism between tricategories, see Definition~4.11 of~\loccit; one can easily adapt and dualise suitably the definition and get a notion of normalised oplax trimorphism between tricategories. Consider two strict $3$-categories $A$ and $B$, a normalised oplax $3$-functor $F \colon A \to B$ and a normalised oplax trimorphism $G \colon A \to B$. There are two main differences between Gurski's notion of normalised oplax trimorphism and the notion of normalised oplax $3$-functor that we presented above. \begin{itemize} \item The first difference concerns the tree~$\treeY$. Gurski's notion requires that, for any pair $(\beta, \alpha)$ of $1$-composable $2$-cells of~$A$, there is a $3$-cell of~$B$ going from $G(\beta \comp_1 \alpha)$ to $G(\beta) \comp_1 G(\alpha)$ which is \emph{not} invertible in general. It is essential in our definition of normaised oplax $3$-functor that the tree~$\treeY$ appears in the coherences. This is slightly unnatural even from our arboreal point of view and it may be reasonable to actually impose this condition on Gurski's normalised oplax trimorphisms, in light of a comparison with normalised oplax $3$-functors. Gurski calls this condition \emph{local strictness}. Notice that it implies in particular that~$F$ as well as~$G$ induce strict $2$-functors on the hom-$2$-categories. \item The second difference is deeper and somehow irreconcilable. For any pair $(\beta, \alpha)$ of $0$-composable $2$-cells of~$A$, the normalised oplax trimorphism provides a $3$-cell \[ G_{\treeVLR}(\beta, \alpha) \colon G(t_1(\beta), t_1(\alpha)) \comp_1 G(\beta \comp_0 \alpha) \to \bigl(G(\beta) \comp_0 G(\alpha)\bigr) \comp_1 G(s_1(\beta), s_1(\alpha))\,. \] This is incompatible with the algebra of the orientals, as we shall better explain in the following section. In fact, a normalised oplax $3$-functor has the tree~$\treeVLR$ as a coherence and instead the trees~$\treeVLeft$ and~$\treeVRight$ as part of the data. But these pieces of data are symmetric (or better dual), hence they cannot fit as a particular case of a single lax or oplax datum for~$\treeVLR$. \end{itemize} \end{rem} \begin{exem}\label{exem:sup} Let $C$ be a $3$-category. We now define a normalised oplax $3$-functor $\sup \colon i_{\cDelta}(N_{3}(C)) \to C$ (cf.~\hyperref[notation]{Notations and Terminology}). \begin{description} \item[$\TreeDot\ \ $] The map $\sup_{\treeDot}$ is defined by mapping an object $(a, x)$, where $x \colon \On{m} \to C$, to $x(\atom{m})$. \item[$\TreeLog\ \ \:$] The map $\sup_{\treeLog}$ assigns to any morphism $f \colon (a, x) \to (b, y)$ of $i_{\cDelta}(N_{3}(C))$, where $x \colon \On{m} \to C$ and $y \colon \On{n} \to C$, the $1$-cell $x'(\atom{f(m), n})$ of $C$. \item[$\TreeV\ $] The map $\sup_{\treeV}$ assigns to any pair of composable morphisms \[ \begin{tikzcd} (a, x) \ar[r, "f"] & (b, y) \ar[r, "g"] & (c, z) \end{tikzcd} \] of $i_{\cDelta}(N_{3}(C))$, with $x \colon \On{m} \to C$, $y \colon \On{n} \to C$ and $z \colon \On{p} \to C$, the $2$-cell $\sup_{\treeV}(g, f)$ of $C$ given by \[ z\bigl(\atom{gf(m), g(n), p}\bigr) \] \item[$\TreeW\ $] The map $\sup_{\treeW}$, assigns to any triple of composable morphisms \[ \begin{tikzcd}[column sep=small] (a,x) \ar[r, "f"] & (b, y) \ar[r, "g"] & (c, z) \ar[r, "h"] & (d, t) \end{tikzcd} \] of $i_{\cDelta}(N_{3}(C))$, with $x \colon \On{m} \to C$, $y \colon \On{n} \to C$, $z \colon \On{p} \to C$ and $t \colon \On{q} \to C$, the $3$-cell $\sup_{\treeW}(h, g, f)$ of $C$ given by \[ t\bigl(\atom{hgf(m), hg(n), h(p), q}\bigr) \] \end{description} Notice that by definition we have that $1_{\sup_{\treeDot}(a, x)}$ is precisely $\sup_{\treeL}(1_{(a, x)})$ and that the other conditions of normalisation are equally trivial by definition. We now check the coherence for the tree $\treeVV$. Consider four composable morphisms of~$i_{\cDelta}(N_{3}(C))$ \[ \begin{tikzcd}[column sep=small] (a, x) \ar[r, "f"] & (b, y) \ar[r, "g"] & (c, z) \ar[r, "h"] & (d, t) \ar[r, "i"] & (e, w) \end{tikzcd}\ , \] with $x \colon \On{m} \to C$, $y \colon \On{n} \to C$, $z \colon \On{p} \to C$, $t \colon \On{q} \to C$ and $w \colon \On{r} \to C$. We have to show that the $3$-cells \begin{gather*} \textstyle \sup_{\treeW}(i, h, g) \comp_0 \sup_{\treeLog}(f) \comp_1\sup_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ \textstyle \sup_{\treeLog}(i) \comp_0 \sup_{\treeV}(h, g) \comp_0 \sup_{\treeLog}(f) \comp_1 \sup_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ \textstyle \sup_{\treeLog}(i) \comp_0 \sup_{\treeW}(h, g, f) \comp_1 \sup_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} and \begin{gather*} \textstyle \sup_{\treeV}(i, h) \comp_0 \sup_{\treeLog}(g) \comp_0 \sup_{\treeLog}(f) \comp_1 \sup_{\treeW}(ih, g, f) \\ \comp_2\\ \textstyle \sup_{\treeLog}(i) \comp_0 \sup_{\treeLog}(h) \comp_0 \sup_{\treeV}(g, f) \comp_1 \sup_{\treeW}(i, h, gf)\,. \end{gather*} But these two $3$-cells are precisely the target and the source of the main $4$-cell of $\On{4}$ via the \oo-functor $\On{4} \xto{\phi} \On{r} \xto{w} C$, where \[ \phi = \On{\{ihgf(m), ihg(n), ih(p), i(q), r\}}\,. \] \end{exem} \end{paragr} \subsection{From cellular to simplicial}\label{section:cellular-to-simplicial} In this subsection we shall show that to any normalised oplax $3$-functor $F \colon B \to C$ there is a canonically associated morphism $\SNn{l}(F) \colon N_\infty(B) \to N_\infty(C)$ of simplicial sets. Throughout this section we shall sometimes write oplax $3$-functor for normalised oplax $3$-functor, since no confusion is possible. \begin{paragr}\label{paragr:def_cellular_to_simplicial} Let $A$ be a $1$-category and $B$ and $C$ be two $3$\nbd-cat\-egories. Fix two oplax $3$\nbd-func\-tors $F \colon A \to B$ and $G \colon B \to C$. We now define a candidate $GF$ for the composition of $F$ and $G$ and we dedicate the rest of the subsection to prove the coherences. Since $A$ is a $1$-category, the amount of data that we have to provide in order to define an oplax $3$-functor, \ie the trees of dimension less than~$3$, is limited to the trees $\TreeDot$, $\TreeLog$, $\TreeV$ and $\TreeW$. The $3$-functor $GF$ is defined as follows: \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ ] \end{forest} } ] The map $GF_{\treeDot}$ assigns to any object $a$ of $A$ the object $G_{\treeDot}(F_{\treeDot}(a))$ of $C$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] The map $GF_{\treeLog}$ assigns to any $1$-cell $f \colon a \to a'$ of $A$, \ie any tree of $A_{\treeLog}$, the $1$-cell \[G_{\treeLog}(F_{\treeLog}(f)) \colon GF_{\treeDot}(a) \to GF_{\treeDot}(a')\] of $C$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][] ] \end{forest} } ] The map $GF_{\treeV}$ assigns to any pair of $0$\hyp{}composable $1$\hyp{}cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ the $2$-cell $GF_{\treeV}(g, f)$ \[ \begin{tikzcd}[row sep=1.35em] a^{\phantom\prime} \ar[rr, bend left=75, "{GF_{\treeLog}(gf)}", ""'{name=f}] \ar[rr, "{G_{\treeLog}(F_{\treeLog}(g)F_{\treeLog}(f))}"{description, name=g}] \ar[rd, bend right, "{GF_{\treeLog}(f)}"'] && a'' \\ & a' \ar[ru, bend right, "{GF_{\treeLog}(g)}"'] & \ar[Rightarrow, from=f, to=g, shorten <=1mm, shorten >=2mm, "\alpha"] \ar[Rightarrow, from=g, to=2-2, shorten <=1mm, pos=0.4, "\beta" near end] \end{tikzcd} \] of $C$, where $\alpha = G_{\treeLL}(F_{\treeV}(g, f))$ and $\beta = G_{\treeV}(F_{\treeLog}(g), F_{\treeLog}(f))$, that is \[ G_{\treeV}(F_{\treeLog}(g), F_{\treeLog}(f)) \comp_1 G_{\treeLL}(F_{\treeV}(g, f))\,. \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][] ] \end{forest} } ] The map $GF_{\treeW}$, assigns to any triple of $0$-composable $1$ cells \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$ the $3$-cell $GF_{\treeW}(h, g, f)$ defined as \begin{gather*} G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(hg, f)\\ \comp_2\\ G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(h, g, f)) \\ \comp_2\\ GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeV}(h, gf))\,. \end{gather*} Notice that for the $3$-cell $G_{\treeLLL}(F_{\treeW}(h, g, f))$ in the second line we are implicitly using the coherence $G_{\treeY}$, as we use the equality \[ G_{\treeLL}(F_{\treeLog}h \comp_0 F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeV}(h, gf)) = G_{\treeLL}(F_{\treeLog}h \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, gf)) \] as well as the equality \[ G_{\treeLL}( F_{\treeV}(h, g) \comp_0 F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(hg, f)) = G_{\treeLL}(F_{\treeV}(h, g) \comp_0 F_{\treeLog}f \comp_1 F_{\treeV}(hg, f)) \] of $2$-cells of $A$, which are respectively the source and target of the $3$-cell $G_{\treeLLL}(F_{\treeW}(h, g, f))$. \end{description} \end{paragr} \begin{paragr} The conditions of normalisation for the composite $GF$ are tedious but straightforward. We give here a few examples and we leave the other similar verifications to the reader. \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] For any $0$-cell $a$ of $A$ we have \[ GF_{\treeLL}(1_a) = s^0_1(GF_{\treeL}(a)) = 1_{GF_{\treeL}(a)}\,. \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][] ] \end{forest} } ] For any $1$-cell $f \colon a \to a'$ of $A$, we have \[ G_{\treeV}\bigl(F_{\treeL}(f), F_{\treeL}(1_a)\bigr) = G_{\treeV}\bigl(F_{\treeL}(f), 1_{F_{\treeDot}(a)}\bigr) = 1_{G_{\treeL}(F_{\treeL}(f))} = 1_{GF_{\treeL}(f)} \] and also \[ G_{\treeLL}\bigl(F_{\treeV}(f, 1_a)\bigr) = G_{\treeLL}(1_{F_{\treeL}(f)}) = 1_{G_{\treeL}(F_{\treeL}(f))} = 1_{GF_{\treeL}(f)}\,, \] so that \[ GF_{\treeV}(f, 1_a) = 1_{GF_{\treeL}(f)} \comp_1 1_{GF_{\treeL}(f)} = 1_{GF_{\treeL}(f)}\,. \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][] ] \end{forest} } ] For any pair $a \xto{f} a' \xto{g} a''$ of $1$-cells of $A$, we have \[ G_{\treeVLeft}\bigl(F_{\treeV}(g, f), F_{\treeL}(1_a)\bigr) = G_{\treeVLeft}\bigl(F_{\treeV}(g, f), 1_{F_{\treeDot}(a)}\bigr) = 1_{G_{\treeLL}(F_{\treeV}(g, f))} \] and \[ G_{\treeW}\bigl(F_{\treeL}(g), F_{\treeL}(f), F_{\treeL}(1_a)\bigr) = G_{\treeW}\bigl(F_{\treeL}(g), F_{\treeL}(f), 1_{F_{\treeDot}(a)}\bigr) = 1_{G_{\treeLL}(F_{\treeL}(g), F_{\treeL}(f))} \] and \[ G_{\treeLLL}\bigl(F_{\treeW}(g, f, 1_a)\bigr) = G_{\treeLLL}(1_{F_{\treeV}(g, f)}) = 1_{G_{\treeLL}(F_{\treeV}(g, f))} \] and \[ G_{\treeVRight}\bigl(F_{\treeL}(g), F_{\treeV}(f, 1_a)\bigr) = G_{\treeVRight}\bigl(F_{\treeL}(g), 1_{F_{\treeL}(f)}\bigr) = 1_{F_{\treeV}(g, f)}\,. \] Hence, we get that \[ GF_{\treeW}(g, f, 1_a) = 1_{G_{\treeLL}(F_{\treeL}(g), F_{\treeL}(f))} \comp_1 1_{G_{\treeLL}(F_{\treeV}(g, f))} = 1_{GF_{\treeV}(g, f)}\,. \] \end{description} \end{paragr} \begin{paragr}\label{paragr:pentagon_coherence} Since $A$ is a $1$-category, the only coherence we have to prove is the coherence associated to the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] [] ] \end{forest} }. Consider four composable $1$\nbd-cells of~$A$ \[ \begin{tikzcd}[column sep=small] \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd}\ . \] We have to show that the $3$-cells \begin{gather*} GF_{\treeW}(i, h, g) \comp_0 GF_{\treeLog}(f) \comp_1 GF_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ GF_{\treeLog}(i) \comp_0 GF_{\treeV}(h, g) \comp_0 GF_{\treeLog}(f) \comp_1 GF_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ GF_{\treeLog}(i) \comp_0 GF_{\treeW}(h, g, f) \comp_1 GF_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} and \begin{gather*} GF_{\treeV}(i, h) \comp_0 GF_{\treeLog}(g) \comp_0 GF_{\treeLog}(f) \comp_1 GF_{\treeW}(ih, g, f) \\ \comp_2\\ GF_{\treeLog}(i) \comp_0 GF_{\treeLog}(h) \comp_0 GF_{\treeV}(g, f) \comp_1 GF_{\treeW}(i, h, gf) \end{gather*} of $C$ are equal. The five $3$-cells involved in this compositions are: \begin{enumerate} \item\label{item:h-g-f} the $3$-cell $GF_{\treeW}(h, g, f)$ of $C$, which is defined as \begin{gather*} G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(hg, f))\\ \comp_2\\ G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(h, g, f)) \\ \comp_2\\ GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeV}(h, gf))\,, \end{gather*} which is a suitably whiskered $1$-composition of the $3$-cells \begin{enumerate} \item\label{item:h-g-f-1} $G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f))$, \item\label{item:h-g-f-2} $G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(h, g, f))$, \item\label{item:h-g-f-3} $G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f)$, \end{enumerate} of $C$; \item\label{item:i-hg-f} the $3$-cell $GF_{\treeW}(i, h \comp_0 g, f)$ of $C$, which is defined as \begin{gather*} G_{\treeV}(F_{\treeLog}i, F_{\treeLog}hg) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(i, hg), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(ihg, f))\\ \comp_2\\ G_{\treeW}(F_{\treeLog}i, F_{\treeLog}hg, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(i, hg, f)) \\ \comp_2\\ GF_{\treeLog}(i) \comp_0 G_{\treeV}(F_{\treeLog}hg, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(hg, f)) \comp_1 G_{\treeLL}(F_{\treeV}(i, hgf))\,, \end{gather*} which is a suitably whiskered $1$-composition of the $3$-cells \begin{enumerate} \item\label{item:i-hg-f-1}\label{item:b1} $G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(hg, f))$, \item\label{item:i-hg-f-2}\label{item:b2} $G_{\treeW}(F_{\treeLog}i, F_{\treeLog}hg, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(i, hg, f))$, \item\label{item:i-hg-f-3}\label{item:b3} $G_{\treeVLeft}(F_{\treeV}(i, hg), F_{\treeLog}f)$, \end{enumerate} of $C$; \item\label{item:i-h-g} the $3$-cell $GF_{\treeW}(i, h, g)$ of $C$, which is defined as \begin{gather*} G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g) \comp_1 G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}g) \comp_1 G_{\treeLL}(F_{\treeV}(ih, g))\\ \comp_2\\ G_{\treeW}(F_{\treeLog}i, F_{\treeLog}h, F_{\treeLog}g) \comp_1 G_{\treeLLL}(F_{\treeW}(i, h, g)) \\ \comp_2\\ GF_{\treeLog}(i) \comp_0 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g) \comp_1 G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, g)) \comp_1 G_{\treeLL}(F_{\treeV}(i, hg))\,, \end{gather*} which is a suitably whiskered $1$-composition of the $3$-cells \begin{enumerate} \item\label{item:i-h-g-1} $G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, g))$, \item\label{item:i-h-g-2} $G_{\treeW}(F_{\treeLog}i, F_{\treeLog}h, F_{\treeLog}g) \comp_1 G_{\treeLLL}(F_{\treeW}(i, h, g))$, \item\label{item:i-h-g-3} $G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}g)$, \end{enumerate} of $C$; \item\label{item:i-h-gf} the $3$-cell $GF_{\treeW}(i, h, g)$ of $C$, which is defined as \begin{gather*} G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(gf) \comp_1 G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}gf) \comp_1 G_{\treeLL}(F_{\treeV}(ih, gf))\\ \comp_2\\ G_{\treeW}(F_{\treeLog}i, F_{\treeLog}h, F_{\treeLog}gf) \comp_1 G_{\treeLLL}(F_{\treeW}(i, h, gf)) \\ \comp_2\\ GF_{\treeLog}(i) \comp_0 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}gf) \comp_1 G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf)) \comp_1 G_{\treeLL}(F_{\treeV}(i, hgf))\,, \end{gather*} which is a suitably whiskered $1$-composition of the $3$-cells \begin{enumerate} \item\label{item:i-h-gf-1} $G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf))$, \item\label{item:i-h-gf-2} $G_{\treeW}(F_{\treeLog}i, F_{\treeLog}h, F_{\treeLog}gf) \comp_1 G_{\treeLLL}(F_{\treeW}(i, h, gf))$, \item\label{item:i-h-gf-3} $G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}gf)$, \end{enumerate} of $C$; \item\label{item:ih-g-f} the $3$-cell $GF_{\treeW}(ih, g, f)$ of $C$, which is defined as \begin{gather*} G_{\treeV}(F_{\treeLog}ih, F_{\treeLog}g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(ih, g), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(ihg, f))\\ \comp_2\\ G_{\treeW}(F_{\treeLog}ih, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}i(h, g, f)) \\ \comp_2\\ GF_{\treeLog}(ih) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}ih, F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeV}(ih, gf))\,, \end{gather*} which is a suitably whiskered $1$-composition of the $3$-cells \begin{enumerate} \item\label{item:ih-g-f-1} $G_{\treeVRight}(F_{\treeLog}ih, F_{\treeV}(g, f))$, \item\label{item:ih-g-f-2} $G_{\treeW}(F_{\treeLog}ih, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(ih, g, f))$, \item\label{item:ih-g-f-3} $G_{\treeVLeft}(F_{\treeV}(ih, g), F_{\treeLog}f)$, \end{enumerate} of $C$. \end{enumerate} In summary, we have to show that the pentagon \begin{center} \begin{tikzpicture}[scale=1.5] \foreach \i in {0,1,2,3,4} { \tikzmath{\a = 270 - (72 * \i);} \node (\i) at (\a:2) {$\bullet$}; } \draw[->, >=latex] (1) -- node [left] {\ref{item:h-g-f}} (2); \draw[->, >=latex] (2) -- node [above] {\ref{item:i-hg-f}} (3); \draw[->, >=latex] (3) -- node [right] {\ref{item:i-h-g}} (4); \draw[->, >=latex] (1) -- node [below left] {\ref{item:i-h-gf}} (0); \draw[->, >=latex] (0) -- node [below right] {\ref{item:ih-g-f}} (4); \end{tikzpicture} \end{center} of $2$-compositions of $3$-cells of $C$ is commutative. Using the decomposition of each of these $3$-cells as suitably whiskered $1$-composition of other $3$-cells of $C$, we have to show that the following diagram \begin{center} \begin{tikzpicture}[scale=1.2] \foreach \i in {0,1,2,3,4} { \tikzmath{\a = 270 - (72 * \i);} \node (e\i) at (\a:3) {$\bullet$}; } \foreach \j in {1,2,3,4} { \tikzmath{\a = 270 - (72 * \j);} \pgfmathtruncatemacro\bj{\j-1} \node (e\j-a) at ($(e\bj)!0.33!(e\j)$) {$\bullet$}; \node (e\j-b) at ($(e\bj)!0.67!(e\j)$) {$\bullet$}; } \node (e0-a) at ($(e0)!0.33!(e4)$) {$\bullet$}; \node (e0-b) at ($(e0)!0.66!(e4)$) {$\bullet$}; \draw[->, >=latex] (e1) to node [left] {\ref{item:h-g-f-1}} (e2-a); \draw[->, >=latex] (e2-a) to node [left] {\ref{item:h-g-f-2}} (e2-b); \draw[->, >=latex] (e2-b) to node [left] {\ref{item:h-g-f-3}} (e2); \draw[->, >=latex] (e2) to node [above] {\ref{item:i-hg-f-1}} (e3-a); \draw[->, >=latex] (e3-a) to node [above] {\ref{item:i-hg-f-2}} (e3-b); \draw[->, >=latex] (e3-b) to node [above] {\ref{item:i-hg-f-3}} (e3); \draw[->, >=latex] (e3) to node [right] {\ref{item:i-h-g-1}} (e4-a); \draw[->, >=latex] (e4-a) to node [right] {\ref{item:i-h-g-2}} (e4-b); \draw[->, >=latex] (e4-b) to node [right] {\ref{item:i-h-g-3}} (e4); \draw[->, >=latex] (e1) to node [below left] {\ref{item:i-h-gf-1}} (e1-b); \draw[->, >=latex] (e1-b) to node [below left] {\ref{item:i-h-gf-2}} (e1-a); \draw[->, >=latex] (e1-a) to node [below left] {\ref{item:i-h-gf-3}} (e0); \draw[->, >=latex] (e0) to node [below right] {\ref{item:ih-g-f-1}} (e0-a); \draw[->, >=latex] (e0-a) to node [below right] {\ref{item:ih-g-f-2}} (e0-b); \draw[->, >=latex] (e0-b) to node [below right] {\ref{item:ih-g-f-3}} (e4); \end{tikzpicture} \end{center} of $2$-compositions of $3$-cells of $C$ commutes; notice that in the latter diagram the referenced $3$-cells of $C$ are not $2$-composable: we are making the abuse of denoting each arrow of the diagram with the reference to a particular $3$-cell, without the suitable whiskerings making all these $3$-cells $2$-composable. These whiskerings are written explicitly above. \end{paragr} \begin{paragr} In order to show that the diagram of the previous paragraph is commutative, we shall decompose it in several smaller diagrams and we shall show that each of them is commutative. This decomposition is displayed in figure~\ref{fig:diagram_composition}. There is a duality involving the diagram numbered with ($n$) and with ($n'$) and the one commutes if and only if the other one does. We shall illustrate this phenomenon with the diagrams (1) and (1') and (2) and (2'), but then we will limit ourself to prove the commutativity of the diagrams of the type ($n$), leaving the diagrams of type ($n'$) to the reader. \end{paragr} \begin{figure} \caption{The diagram for the coherence $\protect\treeVV$.} \label{fig:diagram_composition} \end{figure} \begin{paragr}[1] Consider the diagram (1), where again we abuse of notation by forgetting the suitable whiskerings making these $3$-cells of $C$ actually $2$-composable: \begin{center} \begin{tikzpicture} \node (e1) at (180:2) {e1}; \node (e2-a) at (90:2) {e2-a}; \node (m1) at (0:2) {m1}; \node (e1-b) at (270:2) {e1-b}; \draw[->, >=latex] (e1) -- node [left] {\ref{item:h-g-f-1}} (e2-a); \draw[->, >=latex] (e1) -- node [left] {\ref{item:i-h-gf-1}} (e1-b); \draw[->, >=latex] (e2-a) -- node [right] {\ref{item:i-h-gf-1}} (m1); \draw[->, >=latex] (e1-b) -- node [right] {\ref{item:h-g-f-1}} (m1); \end{tikzpicture} \end{center} To be precise, the $3$-cell from e1 to e2-a is \begin{gather*} GF_{\treeLog}(i) \comp_0 GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \\\comp_1\\ GF_{\treeLog}(i) \comp_0 G_{\treeLL}(F_{\treeV}(h, gf))\bigr) \comp_1 GF_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} and the $3$-cell from e1-b to m1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \\ \comp_1 \\ G_{\treeV}(F_{\treeLog}i, F_{\treeLog}(h)\comp_0 F_{\treeLog}(gf)) \comp_1 G_{\treeLL}(F_{\treeLog}i \comp_0 F_{\treeV}(h, gf)) \comp_1 G_{\treeLL}(F_{\treeLL}(i, hgf))\,, \end{gather*} while the $3$-cell from e1 to e1-b is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 GF_{\treeV}(g, f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}gf)\bigr) \\ \comp_1 \\ G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf)) \comp_1 G_{\treeLL}(F_{\treeV}(i, hgf)) \end{gather*} and the $3$-cell from e2-a to m1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeLog}h \comp_0 F_{\treeV}(g, f))\bigr) \\\comp_1\\ G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf)) \comp_1 G_{\treeLL}(F_{\treeV}(i, hgf))\,. \end{gather*} The commutativity of the diagram is simply an instance of the interchange law. Indeed, the source and the target of the $3$-cell of $C$ \[ GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \] are the $2$-cells of $C$ \[ GF_{\treeLog}(h) \comp_0 GF_{\treeV}(g, f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}gf) \] and \[ GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeLog}h \comp_0 F_{\treeV}(g, f)) \] respectively; while the source and target of the $3$-cell \[ G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf))\comp_1 G_{\treeLL}(F_{\treeV}(i, hgf)) \] of $C$ are the $2$-cells \[ GF_{\treeLog}(i) \comp_0 G_{\treeLL}(F_{\treeV}(h, gf))\bigr) \comp_1 GF_{\treeV}(i, h\comp_0 g \comp_0 f) \] and \[ G_{\treeV}(F_{\treeLog}i, F_{\treeLog}(h)\comp_0 F_{\treeLog}(gf)) \comp_1 G_{\treeLL}(F_{\treeLog}i \comp_0 F_{\treeV}(h, gf)) \comp_1 G_{\treeLL}(F_{\treeLL}(i, hgf)) \] respectively. \end{paragr} \begin{paragr}[1'] Consider the diagram (1'), where we abuse of notation by forgetting the suitable whiskerings making these $3$-cells of $C$ actually $2$-composable: \begin{center} \begin{tikzpicture} \node (m4) at (180:2) {m4}; \node (e4-b) at (90:2) {e4-b}; \node (e4) at (0:2) {e4}; \node (e0-b) at (270:2) {e0-b}; \draw[->, >=latex] (m4) -- node [left] {\ref{item:ih-g-f-3}} (e4-b); \draw[->, >=latex] (m4) -- node [left] {\ref{item:i-h-g-3}} (e0-b); \draw[->, >=latex] (e4-b) -- node [right] {\ref{item:ih-g-f-3}} (e4); \draw[->, >=latex] (e0-b) -- node [right] {\ref{item:i-h-g-3}} (e4); \end{tikzpicture} \end{center} To be precise, the $3$-cell from e4-b to e4 is \begin{gather*} G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}g) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(ih, g))\bigr) \comp_0 GF_{\treeLog}(f) \comp_1 GF_{\treeV}(i \comp_0 h \comp_0 g, f) \end{gather*} and the $3$-cell from m4 to e0-b is \begin{gather*} G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(i, h), F_{\treeLog}g) \\ \comp_1 \\ G_{\treeV}(F_{\treeLog}i \comp_0 F_{\treeLog}(hg), F_{\treeLog}(f)) \comp_1 G_{\treeLL}(F_{\treeV}(ih, g) \comp_0 F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeLL}(ihg, f))\,, \end{gather*} while the $3$-cell from e0-b to e4 is \begin{gather*} \bigl(GF_{\treeV}(i, h) \comp_0 GF_{\treeLog}(g) \comp_1 G_{\treeV}(F_{\treeLog}ih, F_{\treeLog}g)\bigr) \comp_0 GF_{\treeLog}(i) \\ \comp_1 \\ G_{\treeVLeft}(F_{\treeV}(ih, g), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(ihg, f)) \end{gather*} and the $3$-cell from m4 to e4-b is \begin{gather*} \bigl(G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g) \comp_1 G_{\treeV}(F_{\treeLog}i \comp_0 F_{\treeLog}h, F_{\treeLog}g) \comp_1 G_{\treeLL}(F_{\treeV}(i, h) \comp_0 F_{\treeLog}g)\bigr) \comp_0 GF_{\treeLog}(f) \\\comp_1\\ G_{\treeVLeft}( F_{\treeV}(ih, g), F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(ihg, f))\,. \end{gather*} Analogously to the previous case, the commutativity of the diagram is simply an instance of the interchange law. \end{paragr} \begin{paragr}[2] Consider the diagram (2), where we always adopt the same notational abuse: \begin{center} \begin{tikzpicture}[scale=2] \node (e2-a) at (135:1.5) {e2-a}; \node (m1) at (0:0) {m1}; \node (i1) at (0:1.5) {i1}; \draw[->, >=latex] (e2-a) -- node [left] {\ref{item:i-h-gf-1}} (m1); \draw[->, >=latex] (m1) -- node [below] {$(f1)$} (i1); \draw[->, >=latex] (e2-a) -- node [above right] {$(f2)$} (i1); \end{tikzpicture} \end{center} where \begin{equation} \tag*{$(f1)$} G_{\treeVRight}(F_{\treeLog}i, F_{\treeLog} h \comp_0 F_{\treeV}(g, f)) \label{item:f1} \end{equation} is the principal $3$-cell of ($f1$) and \begin{equation} \tag*{$(f2)$} G_{\treeVRight}(F_{\treeLog}i, F_{\treeLog}h \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, gf) ) \label{item:f2} \end{equation} is the principal $3$-cell of ($f2$). More precisely, the $3$-cell of $C$ from e2-a to m1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f) \bigr) \\\comp_1\\ GF_{\treeLog}(i) \comp_0 G_{\treeLL}(F_{\treeLog}h \comp_0 F_{\treeV}(g, f)) \comp_1 G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf)) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(i, hgf))\,, \end{gather*} the $3$-cell from m1 to i1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f)\bigr) \\\comp_1\\ G_{\treeVRight}(F_{\treeLog}i, F_{\treeLog} h \comp_0 F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeLog}i \comp_0 F_{\treeV}(h, gf)) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(i, hgf)) \end{gather*} and the $3$-cell from e2-a to i1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f)\bigr) \\\comp_1\\ G_{\treeVRight}\bigl(F_{\treeLog}i, F_{\treeLog}h \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, gf)\bigr) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(i, hgf))\,. \end{gather*} The $3$-cells appearing in the middle lines of these $1$-compositions are precisely the $3$-cells of the coherence for $G$ for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } for the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=55, looseness=1.3, "", ""{below, name=f1}] \ar[r, ""{name=f2u}, ""{below, name=f2d}] \ar[r, bend right=50, looseness=1.3, ""', ""{name=f3}] \ar[Rightarrow, from=f1, to=f2u, "\alpha"] \ar[Rightarrow, from=f2d, to=f3, "\beta"] & \bullet \ar[r, "l"] & \bullet \end{tikzcd} \] where $\alpha = F_{\treeV}(h, gf)$, $\beta = F_{\treeLog}(h)\comp_0 F_{\treeV}(g, f)$ and $l = F_{\treeLog}(i)$. Hence the diagram commutes. \end{paragr} \begin{paragr}[2'] Consider the diagram (2'), where we always adopt the same notational abuse: \begin{center} \begin{tikzpicture}[scale=2] \node (e4-b) at (45:1.5) {e4-b}; \node (m4) at (0:0) {m4}; \node (i4) at (0:1.5) {i4}; \draw[->, >=latex] (m4) -- node [left] {\ref{item:ih-g-f-3}} (e4-b); \draw[->, >=latex] (i4) -- node [below] {$(f1')$} (m4); \draw[->, >=latex] (i4) -- node [above right] {$(f2')$} (e4-b); \end{tikzpicture} \end{center} where the $3$-cell of $C$ from m4 to e4-b is \begin{gather*} \bigl(G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g)\comp_1 G_{\treeV}(F_{\treeLog}i \comp_0 F_{\treeLog}h, F_{\treeLog}g) \bigr) \comp_0 GF_{\treeLog}(f) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(i, h) \comp_0 F_{\treeLog}g) \comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeVLeft}(F_{\treeV}(ih, g), F_{\treeLog}f) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(ihg, f))\,, \end{gather*} the $3$-cell from i4 to m4 is \begin{gather*} \bigl(G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g) \comp_1 G_{\treeV}(F_{\treeLog}i \comp_0 F_{\treeLog}h, F_{\treeLog}g)\bigr) \comp_0 GF_{\treeLog}(f) \\\comp_1\\ G_{\treeVLeft}(F_{\treeV}(i, h) \comp_0 F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLL}(F_{\treeV}(ih, g) \comp_0 F_{\treeLog}f) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(ihg, f)) \end{gather*} and the $3$-cell from i4 to e4-b is \begin{gather*} \bigl(G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h) \comp_0 GF_{\treeLog}(g)\comp_1 G_{\treeV}(F_{\treeLog}i \comp_0 F_{\treeLog}h, F_{\treeLog}g)\bigr) \comp_0 GF_{\treeLog}(f) \\\comp_1\\ G_{\treeVLeft}\bigl(F_{\treeV}(ih, g) \comp_1 F_{\treeV}(i, h) \comp_0 F_{\treeLog}g, F_{\treeLog}f\bigr) \\\comp_1\\ G_{\treeLL}(F_{\treeV}(ihg, f))\,. \end{gather*} The $3$-cells appearing in the middle lines of these $1$-compositions are precisely the $3$-cells of the coherence for $G$ for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [] [] ] ] \end{forest} } for the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "r"] & \bullet \ar[r, bend left=55, looseness=1.3, "", ""{below, name=f1}] \ar[r, ""{name=f2u}, ""{below, name=f2d}] \ar[r, bend right=50, looseness=1.3, ""', ""{name=f3}] \ar[Rightarrow, from=f1, to=f2u, "\alpha"] \ar[Rightarrow, from=f2d, to=f3, "\beta"] & \bullet \end{tikzcd} \] where $\alpha = F_{\treeV}(ih, g)$, $\beta = F_{\treeV}(i, h)\comp_0 F_{\treeLog}(g)$ and $r = F_{\treeLog}(f)$. Hence the diagram commutes. \end{paragr} \begin{paragr}[3] Consider the diagram (3): \begin{center} \begin{tikzpicture} \node (e2-b) at (150:2) {e2-b}; \node (e2-a) at (210:2) {e2-a}; \node (i2) at (30:2) {i2}; \node (i1) at (-30:2) {i1}; \draw[->, >=latex] (e2-a) -- node [left] {\ref{item:h-g-f-2}} (e2-b); \draw[->, >=latex] (e2-a) -- node [below] {\ref{item:f2}} (i1); \draw[->, >=latex] (e2-b) -- node [above] {$(f3)$} (i2); \draw[->, >=latex] (i1) -- node [right] {$(g3)$} (i2); \end{tikzpicture} \end{center} where \begin{equation} \tag*{$(f3)$} G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, g)\comp_0F_{\treeLog}f \comp_1 F_{\treeV}(hg, f)) \label{item:f3} \end{equation} is the principal $3$-cell of ($f3$) and \begin{equation} \tag*{$(g3)$} GF_{\treeLog}(i)\comp_0 G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}i, F_{\treeLog}h\,F_{\treeLog}g\,F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeLog}(i) \comp_0 F_{\treeW}(h, g, f)) \label{item:g3} \end{equation} is the principal $3$-cell of ($g3$). More precisely, the $3$-cell from e2-a to e2-b is \begin{gather*} G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(h, g, f)) \\ \comp_1 \\ GF_{\treeV}(i, hgf)\,; \end{gather*} the $3$-cell from e2-a to i1 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g\comp_0 F_{\treeLog}f)\bigr) \\ \comp_1 \\ G_{\treeVRight}(F_{\treeLog}i, F_{\treeV}(h, gf) \comp_1 F_{\treeLog}h \comp_0 F_{\treeV}(g, f)) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, hgf))\,; \end{gather*} the $3$-cell from e2-b to i2 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeLog}h F_{\treeLog}g)\comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeV}(F_{\treeLog}h\,F_{\treeLog}g, F_{\treeLog}f)\bigr) \\ \comp_1 \\ G_{\treeVRight}\bigl(F_{\treeLog}(i), F_{\treeV}(hg,f) \comp_1 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, ghf))\,; \end{gather*} the $3$-cell from i1 to i2 is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeW}\bigl(F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeW}(h, g, f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,. \end{gather*} The coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] ] [ ] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, "", "\phantom{bullet}"'{name=1}] \ar[r, bend right=60, looseness=1.2, ""', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=2ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, shift left=2ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta3, "\Gamma"]{} & \bullet \ar[r, "l"] & \bullet \end{tikzcd} \] where $\Gamma = F_{\treeW}(h, g, f)$ and $l = F_{\treeL}(i)$ gives the equality \begin{gather*} G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g) \comp_0 F_{\treeL}(f) \comp_1 F_{\treeV}(hg, f)\bigr) \\ \comp_2 \\ GF_{\treeL}(i) \comp_0 G_{\treeLLL}\bigl(F_{\treeW}(h, g, f)\bigr) \\ = \\ G_{\treeLLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeW}(h, g, f)\bigr) \\ \comp_2 \\ G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, gf) \bigr) \end{gather*} of $3$-cells of $C$; $1$-precomposing, \ie whiskering by $\comp_1$, this equality on both sides by the $2$-cell $G_{\treeLL}(F_{\treeV}(i, hgf))$ and $1$-post-composing, \ie whiskering by $\comp_1$ again, both sides by the $3$-cell \[ GF_{\treeL}(i) \comp_0 G_{\treeW}\bigl(F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr)\,, \] the two sides of the equality are precisely the two paths of diagram (3), which is therefore commutative. \end{paragr} \begin{paragr}[4] Consider the diagram (4) \begin{center} \begin{tikzpicture}[scale=2] \node (m2) at (0:0) {m2}; \node (e2-b) at (180:1.5) {e2-b}; \node (i2) at (-45:1.5) {i2}; \draw [->, >=latex] (e2-b) -- node [below left] {\ref{item:f3}} (i2); \draw [->, >=latex] (e2-b) -- node [above] {\ref{item:b1}} (m2); \draw [->, >=latex] (m2) -- node [above right] {$(g4)$} (i2); \end{tikzpicture} \end{center} where the principal $3$-cell of ($g4$) is \begin{equation} \tag*{$(g4)$} \label{item:g4} G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g) \comp_0 F_{\treeL}(f)\bigr)\,. \end{equation} More precisely, we have already seen that the $3$-cell from e2-b to i2 is set to be \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeLog}(h) F_{\treeLog}(g))\comp_0 GF_{\treeLog}(f) \comp_1 G_{\treeV}(F_{\treeLog}(h)\,F_{\treeLog}(g), F_{\treeLog}(f))\bigr) \\ \comp_1 \\ G_{\treeVRight}\bigl(F_{\treeLog}(i), F_{\treeV}(hg,f) \comp_1 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, ghf))\,; \end{gather*} the $3$-cell from m2 to i2 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeL}(h), F_{\treeL}(g))\comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}(F_{\treeL}(h)\,F_{\treeL}(g), F_{\treeL}(f))\bigr) \\ \comp_1 \\ G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\comp_0F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(hg, f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,; \end{gather*} the $3$-cell of $C$ from e2-b to m2 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeL}(h), F_{\treeL}(g))\comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}(F_{\treeL}(h)\,F_{\treeL}(g), F_{\treeL}(f))\bigr) \\ \comp_1 \\ GF_{\treeLog}(i) \comp_0 G_{\treeL}(F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeVRight}(F_{\treeLog}(i), F_{\treeV}(hg, f)) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, hgf))\,. \end{gather*} The coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=55, looseness=1.3, ""{below, name=f1}] \ar[r, ""{name=f2u}, ""{below, name=f2d}] \ar[r, bend right=50, looseness=1.3, ""{name=f3}] \ar[Rightarrow, from=f1, to=f2u, "\alpha"] \ar[Rightarrow, from=f2d, to=f3, "\beta"] & \bullet \ar[r, "l"] & \bullet \end{tikzcd} \] of $C$, where $\alpha = F_{\treeV}(hg, f)$, $\beta = F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)$ and $l = F_{\treeL}(i)$ gives the equality \begin{gather*} G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g) \comp_0 F_{\treeL}(f) \comp_1 F_{\treeV}(hg, f)\bigr) \\ = \\ G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeV}(hg, f)\bigr) \\ \comp_2 \\ GF_{\treeL}(i) \comp_0 G_{\treeLL}\bigl(F_{\treeV}(hg, f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g) \comp_0 F_{\treeL}(f)\bigr) \end{gather*} of $3$-cells of $C$; $1$-precomposing,\ie whiskering by $\comp_1$, both members of the equality by the $2$-cell $G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)$ and $1$-post-composing, \ie whiskering by $\comp_1$, by the $2$-cell \[ GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeL}(h), F_{\treeL}(g))\comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}(F_{\treeL}(h)\,F_{\treeL}(g), F_{\treeL}(f))\bigr) \] we get precisely the $3$-cells of diagram (4), which therefore commutes. \end{paragr} \begin{paragr}[5] Consider the diagram (5) \begin{center} \begin{tikzpicture} \node (e2) at (150:2) {e2}; \node (e2-b) at (210:2) {e2-b}; \node (e3-a) at (30:2) {e3-a}; \node (m2) at (-30:2) {m2}; \draw[->, >=latex] (e2-b) -- node [left] {\ref{item:h-g-f-3}} (e2); \draw[->, >=latex] (e2-b) -- node [below] {\ref{item:b1}} (m2); \draw[->, >=latex] (e2) -- node [above] {\ref{item:b1}} (e3-a); \draw[->, >=latex] (m2) -- node [right] {\ref{item:h-g-f-3}} (e3-a); \end{tikzpicture}\ . \end{center} More precisely, we already know that the $3$-cell from e2 to e3-a is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(GF_{\treeV}(h, g)\comp_0 GF_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeLog}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(hg), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeVRight}(F_{\treeLog}(i), F_{\treeV}(hg, f)) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, hgf))\,, \end{gather*} the $3$-cell from e2-b to e2 is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\bigr) \comp_0 GF_{\treeLog}(f) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(h, g) \comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeV}(i, hgf) \end{gather*} and the $3$-cell from e2-b to m2 is \begin{gather*} GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeL}(h), F_{\treeL}(g))\comp_0 GF_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeLog}(i) \comp_0 \bigl(G_{\treeV}(F_{\treeL}(h)\,F_{\treeL}(g), F_{\treeL}(f)) \comp_1 G_{\treeL}(F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeVRight}(F_{\treeLog}(i), F_{\treeV}(hg, f)) \\ \comp_1 \\ G_{\treeLL}(F_{\treeV}(i, hgf))\,; \end{gather*} the $3$-cell from m2 to e3-a is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\bigr) \comp_0 GF_{\treeLog}(f) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f) \\ \comp_1 \\ G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(hg) \comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(hg, f) \bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,. \end{gather*} It is clear from this explicit description of the $3$-cells involved that diagram (5) is commutative by virtue of the interchange law. \end{paragr} \begin{paragr}[6] Consider the diagram (6) \begin{center} \begin{tikzpicture}[scale=2] \node (m3) at (0:1) {m3}; \node (e3-b) at (60:1) {e3-b}; \node (e3-a) at (120:1) {e3-a}; \node (m2) at (180:1) {m2}; \node (i2) at (240:1) {i2}; \node (i3) at (300:1) {i3}; \draw[->, >=latex] (m2) -- node [left] {\ref{item:h-g-f-3}} (e3-a); \draw[->, >=latex] (e3-a) -- node [above] {\ref{item:b2}} (e3-b); \draw[->, >=latex] (e3-b) -- node [right] {\ref{item:i-h-g-1}} (m3); \draw[->, >=latex] (m2) -- node [left] {\ref{item:g4}} (i2); \draw[->, >=latex] (i2) -- node [below] {\ref{item:b2}} (i3); \draw[->, >=latex] (i3) -- node [right] {$(g4')$} (m3); \end{tikzpicture} \end{center} where the principal $3$-cell of ($g4'$) is \begin{equation} \tag*{$(g4')$} \label{item:g4p} G_{\treeVLeft} \bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(h, g), F_{\treeL}(f)\bigr)\,. \end{equation} More precisely, the $3$-cell from i3 to m3 is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \\ \comp_1 \\ G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeVLeft} \bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(h, g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, hg)\comp_0F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ihg, f)\bigr)\,; \end{gather*} the $3$-cell from i2 to i3 is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \\ \comp_1 \\ G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(h) \comp_0 F_{\treeL}(h, g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeW}(i, hg, f)\bigr)\,; \end{gather*} we already know that the $3$-cell from m2 to i2 is \begin{gather*} GF_{\treeLog}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr)\comp_0 GF_{\treeL}(f) \\ \comp_1 \\ GF_{\treeLog}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(hg, f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,; \end{gather*} the $3$-cell from e3-b to m3 is \begin{gather*} GF_{\treeLog}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr)\comp_0 GF_{\treeL}(f) \\ \comp_1 \\ G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h, g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, hg)\comp_0F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ihg, f)\bigr)\,; \end{gather*} we already know that the $3$-cell from e3-a to e3-b is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeLL}\bigl(F_{\treeL}(h, g)\bigr) \comp_0 F_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(hg), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeW}(i, hg, f)\bigr)\,; \end{gather*} we also know that the $3$-cell from m2 to e3-a is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\bigr) \comp_0 GF_{\treeLog}(f) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeVLeft}\bigl(F_{\treeV}(h, g), F_{\treeLog}(f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(hg) \comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(hg, f) \bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,. \end{gather*} Notice that there is a complete duality between the $3$-cell from m2 to e3-a and the $3$-cell from e3-b to m3 and also between the $3$-cell from m2 to i2 and the $3$-cell from i3 to m3. The coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [] ] [] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "k"] & \bullet \ar[r, bend left, ""{below, name=g}] \ar[r, bend right, ""{name=g2}] \ar[Rightarrow, from=g, to=g2, "\alpha"] & \bullet \ar[r, "l"] & \bullet \end{tikzcd} \] of $C$ with $k= F_{\treeL}(f)$, $\alpha = F_{\treeV}(h, g)$ and $l = F_{\treeL}(i)$ gives the equality between the $3$\nbd-cell \begin{gather*} G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeVLeft} \bigl(F_{\treeL}(i) \comp_0 F_{\treeV}(h, g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(h) \comp_0 F_{\treeL}(h, g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeLog}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\comp_0 F_{\treeL}(f)\bigr) \end{gather*} of $C$ and the $3$-cell \begin{gather*} G_{\treeVRight}\bigl(F_{\treeL}(i), F_{\treeV}(h, g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h, g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeLL}\bigl(F_{\treeL}(h, g)\bigr) \comp_0 F_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(hg), F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeVLeft}\bigl(F_{\treeV}(h, g), F_{\treeLog}(f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(hg) \comp_0 F_{\treeL}(f)\bigr) \end{gather*} of $C$. We get the diagram (6) by $1$-precomposing, \ie whiskering by $\comp_1$, both terms of this equality with the $2$-cell \[ GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\bigr) \comp_0 GF_{\treeLog}(f) \] of $C$ and by $1$-post-composing with the $3$-cell $G_{\treeLLL}\bigl(F_{\treeW}(i, hg, f)\bigr)$, \ie a ``vertical composition'' of $3$-cells. Hence, the diagram is commutative. \end{paragr} \begin{paragr}[7] Consider the diagram (7) \begin{center} \begin{tikzpicture} \node (e1-a) at (270:2) {e1-a}; \node (e1-b) at (198:2) {e1-b}; \node (m1) at (126:2) {m1}; \node (i1) at (54:2) {i1}; \node (i0) at (-18:2) {i0}; \draw[->,>=latex] (e1-b) -- node [below left] {\ref{item:i-h-gf-2}} (e1-a); \draw[->,>=latex] (e1-a) -- node [below right] {$(g1)$} (i0); \draw[->,>=latex] (e1-b) -- node [left] {\ref{item:h-g-f-1}} (m1); \draw[->,>=latex] (m1) -- node [above] {\ref{item:f1}} (i1); \draw[->,>=latex] (i1) -- node [right] {$(g2)$} (i0); \end{tikzpicture} \end{center} where \begin{equation} \tag*{$(g1)$} \label{item:g1} G_{\treeVRight}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeV}(g, f)\bigr) \end{equation} is the principal $3$-cell of ($g1$) and \begin{equation} \tag*{$(g2)$} \label{item:g2} G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \end{equation} is the principal $3$-cell of ($g2$). More precisely, the $3$-cell of $C$ from e1-a to i0 is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h)\bigr) \comp_0 G_{\treeL}\bigl(F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeV}(g, f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, h)\comp_0 F_{\treeL}(gf)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,; \end{gather*} the $3$-cell from e1-b to e1-a, as we know, is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 GF_{\treeL}(h) \comp_0 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(gf)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeW}(i, h, gf)\bigr)\,; \end{gather*} the $3$-cell from i1 to i0 is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h) \comp_0 F_{\treeV}(g, f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeW}(i, h, gf)\bigr)\,; \end{gather*} the $3$-cell from m1 to i1, as we know, is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeL}(i)\comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\comp_0 F_{\treeLog}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeLog}(i), F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, h)\comp_0 F_{\treeL}(gf)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,; \end{gather*} finally we know that the $3$-cell from e1-b to m1 is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ GF_{\treeL}(i) \comp_0 G_{\treeVRight}\bigl(F_{\treeLog}(h), F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeLog}(i), F_{\treeLog}(h)\comp_0 F_{\treeLog}(gf)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, h)\comp_0 F_{\treeL}(gf)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,. \end{gather*} The coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [ [] ] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, ""{below, name=f}] \ar[r, bend right, ""{name=f2}] \ar[Rightarrow, from=f, to=f2, "\alpha"] & \bullet \ar[r, "k"] & \bullet \ar[r, "l"] & \bullet \end{tikzcd} \] of $C$, with $\alpha = F_{\treeV}(g, f)$, $k = F_{\treeL}(h)$ and $l = F_{\treeL}(i)$ give the equality \begingroup \allowdisplaybreaks \begin{gather*} G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h)\bigr) \comp_0 G_{\treeL}\bigl(F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeV}(g, f)\bigr) \nobreak \\ \comp_1 \nobreak \\ GF_{\treeL}(i) \comp_0 GF_{\treeL}(h) \comp_0 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(gf)\bigr) \nobreak \\ = \nobreak \\ G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h) \comp_0 F_{\treeV}(g, f)\bigr) \nobreak \\ \comp_1 \nobreak \\ GF_{\treeL}(i)\comp_0 G_{\treeV}\bigl(F_{\treeLog}(h), F_{\treeLog}(g)\comp_0 F_{\treeLog}(f)\bigr) \comp_1 G_{\treeVRight}\bigl(F_{\treeLog}(i), F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f)\bigr) \nobreak \\ \comp_1 \nobreak \\ GF_{\treeL}(i) \comp_0 G_{\treeVRight}\bigl(F_{\treeLog}(h), F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeLog}(i), F_{\treeLog}(h)\comp_0 F_{\treeLog}(gf)\bigr) \end{gather*} \endgroup of $3$-cells of $C$. If we $1$-post-compose, \ie we whisker with $\comp_1$, with the $2$-cell \[ GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \] of $C$ and we $1$-precompose with the $3$-cell $G_{\treeLLL}\bigl(F_{\treeW}(i, h, gf)\bigr)$, \ie we perform a ``vertical composition'' of $3$-cells, both members of the equality above, then we get precisely diagram (7), which therefore commutes. \end{paragr} \begin{paragr}[8] Consider the diagram (8) \begin{center} \begin{tikzpicture} \node (e1-a) at (180:2) {e1-a}; \node (i0) at (90:2) {i0}; \node (e0-a) at (0:2) {e0-a}; \node (e0) at (270:2) {e0}; \draw[->, >=latex] (e1-a) -- node [left] {\ref{item:g1}} (i0); \draw[->, >=latex] (e1-a) -- node [left] {\ref{item:i-h-g-3}} (e0); \draw[->, >=latex] (i0) -- node [right] {$(g1')$} (e0-a); \draw[->, >=latex] (e0) -- node [right] {\ref{item:ih-g-f-1}} (e0-a); \end{tikzpicture} \end{center} where the principal $3$-cell of ($g1'$) is \begin{equation} G_{\treeVLeft}\bigl(F_{\treeV}(i, h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr)\,. \end{equation} More precisely, the $3$-cell of $C$ from e0 to e0-a, as we know, is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, h)\bigr) \comp_0 G_{\treeL}\bigl(F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeVLeft}\bigl(F_{\treeV}(i, h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,; \end{gather*} the $3$-cell from e1-a to e0 is, as we know, \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h)\bigr) \comp_0 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeVLeft}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(gf)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,; \end{gather*} the $3$-cell from e0 to e0-a is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeVLeft}\bigl(F_{\treeV}(i, h), F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(ih) \comp_0 F_{\treeV}(g, f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,; \end{gather*} finally the $3$-cell from e1-a to e0, as we know, is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeVRight}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h), F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(gf)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)\,. \end{gather*} The coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [ [] ] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, ""{below, name=f1}] \ar[r, bend right, ""{name=f2}] \ar[Rightarrow, from=f1, to=f2, "\alpha"] & \bullet \ar[r, bend left, ""{below, name=g1}] \ar[r, bend right, ""{name=g2}] \ar[Rightarrow, from=g1, to=g2, "\beta"] & \bullet \end{tikzcd} \] of $C$, where $\alpha = F_{\treeV}(g, f)$ and $\beta = F_{\treeV}(h, i)$, gives us the equality \begingroup \allowdisplaybreaks \begin{gather*} G_{\treeLL}\bigl(F_{\treeV}(i, h)\bigr) \comp_0 G_{\treeL}\bigl(F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeVLeft}\bigl(F_{\treeV}(i, h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \nobreak \\ \comp_1 \nobreak\\ G_{\treeL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h)\bigr) \comp_0 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeVLeft}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(gf)\bigr) \\ = \\ G_{\treeVLeft}\bigl(F_{\treeV}(i, h), F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(ih) \comp_0 F_{\treeV}(g, f)\bigr) \nobreak \\ \comp_1 \nobreak \\ G_{\treeVRight}\bigl(F_{\treeL}(i) \comp_0 F_{\treeL}(h), F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(gf)\bigr) \end{gather*} \endgroup of $3$-cells of $C$. By $1$-post-composing with the $2$-cell \[ GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \] of $C$ and $1$-precomposing with the $2$-cell $G_{\treeLL}\bigl(F_{\treeV}(ih, gf)\bigr)$ we get the commutativity of the diagram (8). \end{paragr} \begin{paragr}[9] Consider the diagram (9) \begin{center} \begin{tikzpicture} \node (i0) at (270:2) {i0}; \node (i1) at (198:2) {i1}; \node (i2) at (126:2) {i2}; \node (i3) at (54:2) {i3}; \node (i4) at (-18:2) {i4}; \draw[->,>=latex] (i1) -- node [below left] {\ref{item:g2}} (i0); \draw[->,>=latex] (i0) -- node [below right] {$(g2')$} (i4); \draw[->,>=latex] (i1) -- node [left] {\ref{item:g3}} (i2); \draw[->,>=latex] (i2) -- node [above] {\ref{item:b2}} (i3); \draw[->,>=latex] (i3) -- node [right] {$(g3')$} (i4); \end{tikzpicture} \end{center} where \begin{gather*} G_{\treeW}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \\\comp_1 \\ \notag G_{\treeLLL}\bigl(F_{\treeW}(ih, g, f)\bigr) \end{gather*} is the principal $3$-cell of ($g2'$) and \begin{gather*} G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ \notag G_{\treeLLL}\bigl(F_{\treeW}(i, hg, f)\bigr) \end{gather*} is the principal $3$-cell of ($g3'$). More precisely, the $3$-cell of $C$ from i0 to i4 is \begin{gather*} G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h)\bigr) \comp_0 GF_{\treeL}(g) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f) \bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(ih, g, f)\bigr)\,; \end{gather*} the $3$-cell from i1 to i0, as we know, is \begin{gather*} GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h) \comp_0 F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(i, h, gf)\bigr)\,; \end{gather*} the $3$-cell from i3 to i4 is \begin{gather*} G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(H) \comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeW}(i, h, g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(ihg, f)\bigr)\,; \end{gather*} the $3$-cell from i2 to i3, as we know, is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLL}\bigl(F_{\treeL}(h) \comp_0 F_{\treeL}(h, g)\comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(i, hg, f)\bigr)\,; \end{gather*} finally we also already know that the $3$-cell from i1 to i2 is \begin{gather*} GF_{\treeL}(i) \comp_0 G_{\treeW}\bigl(F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \\ \comp_1 \\ G_{\treeLLL}\bigl(F_{\treeL}(i) \comp_0 F_{\treeW}(h, g, f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, hgf)\bigr)\,. \end{gather*} Notice that diagram (9) is actually formed by ``vertical compositions'' of these $3$-cells; that is, the first lines of these $3$-cells are $2$-composable and the same holds for the $3$-cells of the second line. Observe that in order to $2$-compose the $3$-cell from i1 to i0 with the $3$-cell from i0 to i4 we use the relations \begin{gather*} G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h) \comp_0 F_{\treeV}(g, f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(g\comp_0 f)\bigr) \\ = \\ G_{\treeLL}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(i, h) \comp_0 F_{\treeL}(g \comp_0 f)\bigr) \\ = \\ G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f) \comp_1 F_{\treeL}(i \comp_0 h) \comp_0 F_{\treeV}(g, f)\bigr) \\ = \\ G_{\treeLL}\bigl(F_{\treeV}(i, h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeL}(i \comp_0 h) \comp_0 F_{\treeV}(g, f)\bigr)\,, \end{gather*} where the first and the last equality are instances of the coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [ [] ] ] \end{forest} }, \ie the $0$-composition of $2$-cells, and the equality in the middle is simply given by the interchange law. Now, the coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][][] ] \end{forest} } applied to the pasting diagram \[ \begin{tikzcd} \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd} \] of $0$-composable $1$-cells of $A$ gives us the equality \begingroup \allowdisplaybreaks \begin{gather*} F_{\treeV}(i, h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(ih, g, h) \nobreak \\ \comp_2 \nobreak \\ F_{\treeLog}(i) \comp_0 F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeW}(i, h, gf) \nobreak \\ = \\ F_{\treeW}(i, h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(i\comp_0 h \comp_0 g, f) \nobreak \\ \comp_2 \nobreak \\ F_{\treeLog}(i) \comp_0 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(i, h \comp_0 g, f) \nobreak \\ \comp_2 \nobreak \\ F_{\treeLog}(i) \comp_0 F_{\treeW}(h, g, f) \comp_1 F_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} \endgroup of $3$-cells of $B$. Applying $G_{\treeLLL}$ to both terms of this equality and using the coherences for the trees \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[][]]] ] \end{forest} }, \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[]][]] ] \end{forest} } and \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[][[]]] ] \end{forest} } we get the following equality \begingroup \allowdisplaybreaks \begin{gather*} G_{\treeLL}\bigl( F_{\treeV}(i, h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f)\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(ih, g, h) \bigr) \nobreak \\ \comp_2 \nobreak \\ G_{\treeLL}\bigl( F_{\treeLog}(i) \comp_0 F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(i, h, gf) \bigr) \nobreak \\ = \\ G_{\treeLLL}\bigl( F_{\treeW}(i, h, g) \comp_0 F_{\treeLog}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(i\comp_0 h \comp_0 g, f) \bigr) \nobreak \\ \comp_2 \nobreak\\ G_{\treeLL}\bigl( F_{\treeLog}(i) \comp_0 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f)\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeW}(i, h \comp_0 g, f) \bigr) \nobreak \\ \comp_2 \nobreak \\ G_{\treeLLL}\bigl( F_{\treeLog}(i) \comp_0 F_{\treeW}(h, g, f)\bigr) \comp_1 G_{\treeLL} \bigl(F_{\treeV}(i, h\comp_0 g \comp_0 f)\bigr)\,; \end{gather*} \endgroup applying instead the coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][][] ] \end{forest} } to the pasting scheme given by the four $0$-composable $1$-cells of $B$ \[ \begin{tikzcd} \bullet \ar[r, "F_{\treeL}(f)"] & \bullet \ar[r, "F_{\treeL}(g)"] & \bullet \ar[r, "F_{\treeL}(h)"] & \bullet \ar[r, "F_{\treeL}(i)"] & \bullet \end{tikzcd} \] we get the equality \begingroup \allowdisplaybreaks \begin{gather*} G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h)\bigr) \comp_0 GF_{\treeL}(g) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \nobreak \\ \comp_1 \nobreak\\ GF_{\treeL}(i)\comp_0 GF_{\treeL}(h) \comp_0 G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\comp_0 F_{\treeL}(f)\bigr) \nobreak \\ = \\ G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i)\comp_0 F_{\treeL}(H) \comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \nobreak \\ \comp_2 \nobreak\\ GF_{\treeL}(i) \comp_0 G_{\treeV}\bigl(F_{\treeL}(h), F_{\treeL}(g)\bigr) \comp_0 GF_{\treeL}(f) \comp_1 G_{\treeW}\bigl(F_{\treeL}(i), F_{\treeL}(h)\comp_0 F_{\treeL}(g), F_{\treeL}(f)\bigr) \nobreak \\ \comp_1 \nobreak \\ GF_{\treeL}(i) \comp_0 G_{\treeW}\bigl(F_{\treeL}(h), F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeV}\bigl(F_{\treeL}(i), F_{\treeL}(h) \comp_0 F_{\treeL}(g) \comp_0 F_{\treeL}(f)\bigr)\,. \end{gather*} \endgroup Notice that the $1$-composition line by line of the $3$-cells of these two equalities give precisely the $3$-cells defining diagram (9), which by the interchange law is therefore commutative. \end{paragr} The previous paragraph ends the proof of the coherence presented in paragraph~\ref{paragr:pentagon_coherence}, hence achieving the following proposition. \begin{prop} Let $F \colon A \to B$ and $G \colon B \to C$ be two normalised oplax $3$-functors. If $A$ is a $1$-category, then the data of $GF$ defined in paragraph~\ref{paragr:def_cellular_to_simplicial} define a normalised oplax $3$-functor. \end{prop} \begin{rem} It is suggestive, in light of the coherences described above, to decorate the polygons of figure~\eqref{fig:diagram_composition} with Stasheff trees. We give such a representation in figure~\eqref{fig:diagram_composition_stasheff} (the unlabelled polygons being commutative by exchange law), but we do not pursue this informal approach any further. \end{rem} \begin{figure} \caption{The diagram for the coherence $\protect\treeVV$, again.} \label{fig:diagram_composition_stasheff} \end{figure} \begin{prop} Let $u \colon A \to A'$, $F \colon A \to B$ and $G \colon B \to C$ be normalised oplax $3$-functors, where $A$ and $a'$ are $1$-categories. Then $G(Fu) = (GF)u$. \end{prop} \begin{proof} Notice that a normalised oplax $3$-functor $u \colon A' \to A$ between $1$\nbd-categories is simply a $1$-functor and one immediately checks that the equality $G(Fu) = (GF)u$ of normalised oplax $3$-functor is verified. \end{proof} \begin{lemme} Let $B$ be a $3$-category. The $n$-simplices of $N_\infty(B)$ are in bijection with the oplax normalised $3$-functors $\Deltan{n} \to B$. \end{lemme} \begin{proof} For any $3$-category $B$ the simplicial set $N_\infty(B)$ is $4$-coskeletal (see~\cite{Street}) and so we have to check that the set of normalised oplax $3$-functors $x \colon \Deltan{n} \to B$ are in bijection with the set of morphisms of simplicial sets $x \colon \text{Sk}_4(\Deltan{n}) \to N_\infty(B)$; it is enough to define the latter ones on the $i$-simplices of $\Deltan{n}$, with $i=0,1, \dots, 4$. But such a definition corresponds precisely to the data $\treeDot$, $\treeL$, $\TreeV$ and $\TreeW$ joint with the coherence $\treeVV$ of a normalised oplax $3$-functor from the $1$-category $\Deltan{i}$ to the $3$-category $B$. \end{proof} \begin{thm}\label{thm:cellular-simplicial} Let $G \colon B \to C$ be a normalised oplax $3$-functor. Then there is a morphism of simplicial sets $\SNn{l}(G) \colon N_\infty(B) \to N_\infty(C)$, where, for any $n \ge 0$, an $n$\nbd-simplex of $N_\infty(B)$ corresponding to a normalised oplax $3$-functor $x \colon \Deltan{n} \to B$ is sent to the $n$-simplex of $C$ corresponding to the normalised oplax $3$-functor $Gx \colon \Deltan{n}\to C$. \end{thm} \begin{proof} To any normalised oplax $3$-functor $G \colon B \to C$, we have seen along this section that we can define a composition $Gx \colon \Deltan{n} \to C$ which is still a normalised oplax $3$\nbd-functor and so it canonically corresponds to an $n$-simplex $\SNn{l} G_n(x)$ of $N_\infty(C)$. The functoriality of this correspondence, given by the preceding proposition, implies the naturality of the functions $\SNn{l} G_n$, which assemble to a morphism of simplicial sets $\SNn{l}(G) \colon N_\infty(B) \to N_\infty(C)$. \end{proof} \begin{exem}\label{exem:sup_we} Let $C$ be a small $3$-category and consider the normalised oplax $3$-functor $\sup \colon i_{\cDelta}(N_3(C))\to C$ defined in Example~\ref{exem:sup}. One checks that the associated morphism of simplicial sets $\SNn{l}(\sup)\colon N i_{\cDelta}(N_3(C)) \to N_3(C)$ coincides with the morphism of simplicial sets called $\tau_{N_3(C)}$ in paragraph~7.3.14 of~\cite{CisinskiHigherCats}. Hence, Proposition~7.3.15 of~\loccit implies that $\SNn{l}(\sup)$ is a simplicial weak equivalence. \end{exem} \section{The simplicial definition}\label{sec:simplicial} It is expected that a good notion of normalised oplax $3$-functor would satisfy the following property: for any $3$-categories $A$ and $B$, the set of normalised oplax $3$\nbd-func\-tors from $A$ to $B$ is in bijection with the set of simplicial morphisms from $N_3(A)$ to $N_3(B)$. Nevertheless, a careful investigation of this latter notion shows that they might not be optimal as they fail to preserve the underlying $3$-graph. Indeed, we will analyse the case where $A$ is the ``$2$-disk'', \ie the $2$-category with two parallel $1$-cells and a single $2$-cell between them, and $B$ is the ``invertible $3$-disk'', \ie the $3$-category with two parallel $1$-cells, two parallel $2$-cells between them and a single invertible $3$-cell between these $2$-cells, and we show that there are more simplicial morphisms than expected between the respective Street nerves. On the one hand, the $2$-category $A$ has no compositions and so the normalised oplax $3$-functors from $A$ to $B$ should coincide with the strict $3$-functors. On the other hand, there are simplicial morphisms from $N_3(A)$ to $N_3(B)$ which do not come from the nerve of strict $3$-functors. This is a consequence of the fact that, for instance, there are two ways to capture the $2$-cell of $A$ with a $2$-simplex of $N_3(A)$ and these two different ways are related by $3$-simplices which are sent by any simplicial morphism $N_3(A) \to N_3(B)$ to $3$-simplices of $N_3(B)$ for which the main $3$-cell is invertible. Said otherwise, the different ways to encode cells, or simple compositions of cells, with simplices are linked together by higher simplices with the property of having the cell of greatest dimension invertible; these higher simplices act as invertible constraints for morphisms between Street nerves of $3$-categories and it is therefore natural to imagine that a normalised oplax $3$-functor would correspond to a simplicial morphism for which all these higher simplices acting as constraints have \emph{trivial} greatest cell, instead of only invertible. In order to identify such constraints, we shall examine in further detail the nerve $\SNn{l}(F)$ of any normalised oplax $3$-functor $F$. \subsection{Case study: {$\Dn{2}$}} \begin{paragr} Consider the $2$-category $\Dn{2}$ \[ \begin{tikzcd}[column sep=4.5em] \disk^0_0 \ar[r, bend left, "\disk^0_1", ""'{name=f}] \ar[r, bend right, "\disk^1_1"', ""{name=g}] \ar[Rightarrow, from=f, to=g, "\disk"] & \disk^1_0 \end{tikzcd}\ . \] We know from paragraphs~\ref{paragr:encode_2cell} that the simplicial set~$N_\infty(\Dn{2})$ has at least two non-degenerate $2$-simplices $\disk_l$ and $\disk_r$, at least two non-degenerate $3$\hyp{}simplices that we shall call $\tau_{\text{u}}$ and $\tau_{\text{d}}$ and also at least two non-degenerate $4$\hyp{}simplices that we shall name $x_\tau$ and $y_\tau$. In fact, Ozornova and Rovelli have shown in~\cite{OzornovaRovelliDisk} that these are the only non-degenerate $i$-simplices, for $i=2, 3, 4$. Therefore we get an explicit description of the $3$\hyp{}category $\ti{3}\cON_\infty(\Dn{2})$ given by \[ \begin{tikzcd}[column sep=4.5em] \disk^0_0 \ar[r, bend left=60, "\disk^0_1", ""'{name=f}] \ar[r, bend right=60, "\disk^1_1"', ""{name=g}] \ar[Rightarrow, from=f, to=g, shift right=0.5em, bend right, shorten <=1mm, shorten >=1mm, "\disk_l"', ""{name=al}] \ar[Rightarrow, from=f, to=g, shift left=0.5em, bend left, shorten <=1mm, shorten >=1mm, "\disk_r", ""'{name=ar}] \arrow[triple, from=al, to=ar, "\tau_{\text{d}}", "\cong"']{} & \disk^1_0 \end{tikzcd} \] where the inverse of the $3$-cell $\tau_{\text{d}}$ is given by the $3$-cell $\tau_{\text{u}} \colon \disk_r \to \disk_l$. We shall call $\Dn 3^\sharp$ this $3$-category. This is motivated by the fact that the $2$-skeleton of this $3$-category is equal to that of~$\Dn{3}$, but the top dimensional cell is invertible. \end{paragr} \begin{paragr} There are no compositions of cells in the $3$-category $\Dn{2}$ and therefore a good notion of oplax $3$-functor $F$ with source $\Dn{2}$ and target a $3$\hyp{}category $B$ should coincide with a strict $3$\hyp{}functor, since there is no composition to ``laxify'' in $\Dn{2}$. This is not the case if we set the oplax $3$-functors from $A$ to $B$, where $A$ and $B$ are two small $3$-categories, to be the set \[ \operatorname{\mathsf{Hom}}_{{\mathcal{S}\mspace{-2.mu}\it{et}}Simp}(N_\infty(A), N_\infty(B)) \cong \operatorname{\mathsf{Hom}}_{\nCat{3}}(\cOn{3}N_\infty (A), B)\,. \] Indeed, let $A = \Dn{2}$ and $B = \Dn{3}^\sharp$ and let us restrict our attention to the $3$\hyp{}functors mapping $\disk^\eps_1$ to $\disk^\eps_1$, for $\eps=0, 1$, \ie mapping the top cell $\disk$ of $\Dn 2$ to a non-trivial $2$-cell of $\Dn{3}^\sharp$. We have precisely two such $3$\hyp{}functors: one sends $\disk$ to $\disk_l$ and the other to $\disk_r$. Nevertheless, is we consider the $3$-functors in \[ \operatorname{\mathsf{Hom}}_{\nCat{3}}(\cOn{3}N_\infty (A), B) \cong \operatorname{\mathsf{Hom}}_{\nCat{3}}({\Dn{3}}^\sharp, {\Dn{3}}^\sharp) \] mapping $\disk^\eps_1$ to $\disk^\eps_1$, then we count four of them and they are determined by their behaviour with respect to the $3$-cell $\tau_{\text{d}}$: there are two of them sending $\tau_{\text{d}}$ to the identity of $\disk_l$ and to the identity of $\disk_r$ respectively, which are the (mates of the) nerve of the $3$-functors from $\Dn 2$ to ${\Dn{3}}^\sharp$ we considered above; furthermore, there are two $3$-functors, corresponding to the automorphisms of the $3$-category $\Dn{3}^\sharp$, mapping $\tau_{\text{d}}$ to itself and to $\tau_{\text{u}}$ respectively. \end{paragr} \subsection{The nerve of a normalised oplax 3-functor}\label{subsec:constraints} Let $A$ and $B$ be two small $3$-categories and consider a morphism $F \colon N_\infty(A) \to N_\infty(B)$ of simplicial sets. In this subsection we shall study some of the constraints to which the morphism $F$ is subject. As explained above, here by \emph{constraint} we mean an invertible and not necessarily trivial cell of $B$, normally a $3$-cell, which is the principal cell of a $3$-simplex $F(x)$, where on the other hand the $3$-cell of $A$ defined by~$x$ is a trivial cell of~$A$. The term constraints is due to the fact that, when trying to extract a cellular form of oplax $3$-functor from such a morphism, these particular $3$-simplices act as additional data which do not respect the underlying $3$-graphs or as invertible coherences. \begin{paragr} Any object $a$ of $A$, that is $0$-simplex of $N_\infty(A)$, is mapped to an object $F(a)$ of $B$ and any $1$-cell $f \colon a \to a'$ of $A$, that is $1$-simplex of $N_\infty(A)$, is mapped to a $1$-cell $F(f) \colon F(a) \to F(a')$ of $B$. Encoding the behaviour of higher cells in a morphism between the nerve of two $3$\nbd-cat\-e\-gories requires choices and leads to a web of coherences which increasingly become hard to control. The prototypical example of such a phenomenon is given by the way a simplicial morphism between nerves of $3$-categories encodes a $2$-cell. There are two different way of encoding a $2$-cell $\alpha$ of $A$ as a $2$-simplex. The main $2$-cells of $B$ of the images under $F$ of these two $2$-simplices are possibly two different $2$-cells; this can be read as the fact that simplicial morphisms do not respect the underlying $3$-graph in general. Nonetheless, these two different $2$-cells of $B$ can be proven to be linked one another by an invertible $3$-cell of $B$. This is described in detail in the next paragraph. \end{paragr} \begin{paragr}\label{paragr:encode_2cell} Consider a $2$-cell \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, ""{name=g}, "g"'] \ar[Rightarrow, from=f, to=g, "\alpha"] & a' \end{tikzcd} \] of $A$. The simplicial set $N_\infty(A)$ encodes the cell $\alpha$ in two different $2$-simplices, namely \[ \alpha_l := \begin{tikzcd}[column sep=small] a \ar[rr, "f", ""{below, name=f}] \ar[rd, equal, "1_a"'] && a' \\ & a \ar[ur, "g"'] & \ar[Rightarrow, from=f, to=2-2, shorten >=3pt, "\alpha"] \end{tikzcd} \quadet \alpha_r := \begin{tikzcd}[column sep=small] a \ar[rr, "f", ""{below, name=f}] \ar[rd, "g"'] && a' \\ & a' \ar[ur, equal, "1_{a'}"'] & \ar[Rightarrow, from=f, to=2-2, shorten >=2pt, "\alpha"] \end{tikzcd}\ . \] These two $2$-simplices of $N_\infty(A)$ are linked together by the following two non-degenerate $3$\hyp{}simplices \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$g$, 23={}, 02=$g$, 03=$f$, 13=$g$, 012=${=}$, 023=$\alpha$, 123=${=}$, 013=$\alpha$, 0123=${=}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$g$, 23={}, 02=$f$, 03=$f$, 13=$f$, 012=$\alpha$, 023=${=}$, 123=$\alpha$, 013=${=}$, 0123=${=}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 013={anchor=center}, 023={anchor=center}, 0123={anchor=center} } \end{tikzpicture}\ . \end{center} The images under $F$ of these two~$3$-sim\-plices of $N_\infty(A)$ give the following two $3$-simplices of $N_\infty(B)$: \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$F(g)$, 23={}, 02=$F(g)$, 03=$F(f)$, 13=$F(g)$, 012=${=}$, 023=$F(\alpha_r)$, 123=${=}$, 013=$F(\alpha_l)$, 0123=$\tau_{\text{u}}(\alpha)$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$F(g)$, 23={}, 02=$F(f)$, 03=$F(f)$, 13=$F(f)$, 012=$F(\alpha_l)$, 023=${=}$, 123=$F(\alpha_r)$, 013=${=}$, 0123=$\tau_{\text{d}}(\alpha)$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 013={anchor=center}, 023={anchor=center}, 012={below right}, 123={below left} } \end{tikzpicture}\ . \end{center} \end{paragr} \begin{rem} If $B$ is a $2$-category, then the $2$-cells $F(\alpha_l)$ and $F(\alpha_r)$ coincide. \end{rem} \begin{paragr}\label{paragr:tau_invertible} The $3$-cells $\tau_\text{u}(\alpha)$ and $\tau_\text{d}(\alpha)$ of $B$ described in the preceding paragraph turn out to be connected by two non-degenerate $4$-simplices of $N_\infty(B)$, that we shall call $x_\tau$ and $y_\tau$. The first one, displayed in diagram~\eqref{fig:tau1}, witnesses the relation \[\tau_\text{d}(\alpha)\comp_2 \tau_\text{u}(\alpha) = 1_{F(\alpha_r)}\,.\] The second one, displayed in diagram~\eqref{fig:tau2}, witnesses instead the relation \[\tau_\text{u}(\alpha)\comp_2 \tau_\text{d}(\alpha) = 1_{F(\alpha_l)}\,,\] so that in fact $\tau_\text{u}(\alpha)$ and $\tau_\text{d}(\alpha)$ are two invertible $3$-cells of $B$. \begin{figure} \caption{The $4$-simplices governing $\tau_{\text{u} \label{fig:tau1} \label{fig:tau2} \end{figure} \end{paragr} \begin{paragr}\label{paragr:cond-i} Let $F \colon A \to B$ be a normalised oplax $3$-functor. Consider a $2$-cell $\alpha \colon f \to g$ of $A$ and the two normalised oplax $3$-functors $L \colon \Deltan{2} \to A$ and $R \colon \Deltan{2} \to A$ defined by mapping \begin{align*} \atom{01} \mapsto 1_{s_0(\alpha)} = a &,& \atom{01} \mapsto g,\\ \atom{12} \mapsto g &,& \atom{12} \mapsto 1_{t_0(\alpha)} = a',\\ \atom{02} \mapsto f &,& \atom{02} \mapsto f,\\ \atom{12}\comp_0 \atom{01} \mapsto g &,& \atom{12}\comp_0 \atom{01} \mapsto g,\\ L_{\treeV}(\atom{12}, \atom{01}) = \alpha &,& R_{\treeV}(\atom{12}, \atom{01} = \alpha, \end{align*} respectively, that we can depict as \[ \begin{tikzcd} \bullet \ar[r, "g", ""'{name=g}] & \bullet \\ \bullet \ar[u, equal] \ar[ur, "f"', ""{name=f}] \ar[Rightarrow, from=f, to=1-1, shorten >=1pt, "\alpha"'] \end{tikzcd} \quadet \begin{tikzcd} \bullet \ar[r, "g", ""'{name=g}] \ar[rd, "f"', ""{name=f}] & \bullet \ar[d,equal] \\ & \bullet \ar[Rightarrow, from=f, to=1-2, shorten >=1pt, "\alpha"] \end{tikzcd}\ . \] Now, we can view $L$ and $R$ as the $3$-face and $0$-face respectively of the normalised oplax $3$-functor $T \colon \Deltan{3} \to A$ that we can represent as \begin{center} \begin{tikzpicture} \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 12=$g$, 02=$f$, 03=$f$, 13=$f$, 012=$\alpha$, 023=${=}$, 123=$\alpha$, 013=${=}$, 0123=$1_{\alpha}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 023={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} The conditions of normalisations impose that the image under $F$ of such a diagram of $A$, \ie the image of the normalised oplax $3$-functor $FT \colon \Deltan{3} \to B$, must be \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 12=$F_{\treeL}(g)$, 02=$F_{\treeL}(f)$, 03=$F_{\treeL}(f)$, 13=$F_{\treeL}(f)$, 012=$F_{\treeLL}(\alpha)$, 023=${=}$, 123=$F_{\treeLL}(\alpha)$, 013=${=}$, 0123=${\Gamma}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 023={anchor=center}, 013={anchor=center} } \end{tikzpicture}\ , \end{center} where \[ \Gamma = FT_{\treeW}(\atom{23}, \atom{12}, \atom{01})\,. \] Now, the four $3$-cells of $B$ appearing in the definition of $\Gamma$ are (see~\ref{paragr:def_cellular_to_simplicial}) \[ F_{\treeVLeft}(\alpha, 1_a) = 1_{F_{\treeLL}(\alpha)}\,, \] \[ F_{\treeW}(1_{a'}, g, 1_a) = 1_{F_{\treeL}(g)}\,, \] and \[ F_{\treeLLL}(1_\alpha) = 1_{F_{\treeLL}(\alpha)} \] and \[ F_{\treeVRight}(1_{a'}, \alpha) = 1_{F_{\treeLL}(\alpha)}\,. \] Hence, for any $2$-cell $\alpha$ of $A$, we have that the $3$-cells $\tau_{\text{d}}(\alpha)$ and $\tau_{\text{u}}(\alpha)$ of $B$ associated to the morphism of simplicial sets $\SNn{l}(F)\colon N_\infty(A) \to N_\infty(B)$ are both trivial. \end{paragr} \begin{paragr}\label{paragr:encode_comp_1cells} Consider two composable $1$-cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$. The simplicial set $N_\infty(A)$ encodes the composition of $f$ and $g$ with the $2$-simplex \[ \begin{tikzcd}[column sep=small] a \ar[rr, "g\comp_1 f", ""{below, name=gf}] \ar[rd, "f"'] && a'' \\ & a' \ar[ur, "g"'] & \ar[phantom, from=gf, to=2-2, shorten >=3pt, "=" description] \end{tikzcd}\ . \] The morphism $F$ maps this $2$-simplex to a $2$-simplex \[ \begin{tikzcd}[column sep=small] Fa \ar[rr, "F(g\comp_1 f)", ""{below, name=gf}] \ar[rd, "Ff"'] && Fa'' \\ & a' \ar[ur, "Fg"'] & \ar[Rightarrow, from=gf, to=2-2, shorten >=3pt] \end{tikzcd}\ , \] where we call $F_{g, f}$ the $2$-cell of $B$ filling the triangle, \ie having $F(g \comp_0 f)$ as source and $Fg \comp_0 Ff$ as target; we shall often write $Fgf$ for the $1$-cell $F(g \comp_0 f)$ of $B$. If the morphism of simplicial sets $F$ is the nerve of a normalised oplax $3$-funtor $G \colon A \to B$, then by definition $F_{g, f} = G_{\treeV}(g, f)$ (see~\ref{paragr:def_cellular_to_simplicial}). \end{paragr} \begin{paragr}\label{paragr:encode_triangle} Consider a $2$-cell \[ \begin{tikzcd}[column sep=small, row sep=2pt] a \ar[rr, bend left=30, "f", ""{below, name=f}] \ar[rd, bend right=20, "g"'] && a'' \\ & a' \ar[ru, bend right=20, "h"'] & \ar[Rightarrow, from=f, to=2-2, "\alpha"] \end{tikzcd} \] of $A$. The simplicial set $N_\infty(A)$ can encode the cell $\alpha$ with the $2$-simplices $\alpha_l$ and $\alpha_r$ described above in paragraph~\ref{paragr:encode_2cell}, but also with the $2$-simplex \[ \bar{\alpha} := \begin{tikzcd}[column sep=small] a \ar[rr, "f", ""{below, name=f}] \ar[rd, "g"'] && a'' \\ & a' \ar[ur, "h"'] & \ar[Rightarrow, from=f, to=2-2, shorten >=3pt, "\alpha"] \end{tikzcd} \] of $N_\infty(A)$. These three $2$-simplices of $N_\infty(A)$ are tied together by the two $3$-simplices described in paragraph~\ref{paragr:encode_2cell}, but also by the following two $3$-simplices \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$g$, 23=$h$, 02=$g$, 03=$f$, 13=$hg$, 012=${=}$, 023=$\alpha$, 123=${=}$, 013=$\alpha$, 0123=${=}$, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01=$g$, 12=$h$, 23={}, 02=$hg$, 03=$f$, 13=$h$, 012=${=}$, 023=$\alpha$, 123=${=}$, 013=$\alpha$, 0123=${=}$, /squares/arrowstyle/.cd, 23={equal}, 012={phantom, description}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$, whose image under $F$ gives the following two $3$-simplices of $N_\infty(B)$: \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$Fg$, 23=$Fh$, 02=$Fg$, 03=$Ff$, 13=$Fhg$, 012=${=}$, 023=$F(\bar\alpha)$, 123=$F_{g, f}$, 013=$F(\alpha_l)$, 0123=$\gamma_{\text{l}}(\alpha)$, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.6, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01=$Fg$, 12=$Fg$, 23={}, 02=$Fhg$, 03=$Ff$, 13=$Fh$, 012=$F_{g, f}$, 023=$F(\alpha_r)$, 123=${=}$, 013=$F(\bar\alpha)$, 0123=$\gamma_{\text{r}}(\alpha)$, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, /squares/labelstyle/.cd, 123={anchor=center} } \end{tikzpicture}\ . \end{center} \end{paragr} \begin{rem} If $B$ is a $2$-category, then the $2$-cells $F_{g, f}\comp_1 F(\alpha_l)$, $F_{g, f} \comp_1 F(\alpha_r)$ and $F(\bar\alpha)$ coincide. \end{rem} \begin{paragr}\label{paragr:gamma_invertible} The $3$-cells $\gamma_\text{l}(\alpha)$ and $\gamma_\text{r}(\alpha)$ of $B$ described in the preceding paragraph turn out to be connected by two non-degenerate $4$-simplices of $N_\infty(B)$, that we shall call $x_\gamma$ and $y_\gamma$. The first one, displayed in diagram~\ref{fig:gamma1}, witnesses the relation \[ \gamma_\text{l}(\alpha) \comp_2 \gamma_\text{r}(\alpha) = F_{g, f} \comp_1 \tau_\text{u}(\alpha)\,. \] The second one, displayed in diagram~\ref{fig:gamma2}, witnesses instead the relation \[ \gamma_\text{r}(\alpha) \comp_2 (F_{g, f} \comp_1 \tau_\text{d}(\alpha)) \comp_2 \gamma_\text{l}(\alpha) = 1_{F\bar\alpha}\,. \] We already know by paragraph~\ref{paragr:tau_invertible} that $\tau_\text{u}(\alpha)$ and $\tau_\text{d}(\alpha)$ are two invertible $3$-cells of $B$, inverses of each others. Hence we obtain that $\gamma_\text{l}(\alpha)$ and $\gamma_\text{r}(\alpha)$ are invertible $3$-cells of $B$, with $F_{g, f}\comp_1 \tau_\text{d}(\alpha) \comp_2 \gamma_\text{l}(\alpha)$ as inverse of $\gamma_\text{r}(\alpha)$. \begin{figure} \caption{The $4$-simplices governing $\gamma_{\text{l} \label{fig:gamma1} \label{fig:gamma2} \end{figure} \end{paragr} \begin{paragr}\label{paragr:cond-ii} Let $F \colon A \to B$ be a normalised oplax $3$-functor. Consider the normalised oplax $3$-functor $T \colon \Deltan{3} \to A$ given by \begin{center} \begin{tikzpicture}[scale=1.3] \squares{ /squares/label/.cd, 01={}, 12={$g$}, 23={$h$}, 03={$f$}, 02={$g$}, 13={$hg$}, 012={$=$}, 023={$\alpha$}, 013={$\alpha$}, 123={$=$}, 0123={$1_{\alpha}$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center} } \end{tikzpicture}\ . \end{center} The conditions of normalisations impose that the image under $F$ of $T$ is \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 01={}, 12={$F_{\treeL}(g)$}, 23={$F_{\treeL}(h)$}, 03={$F_{\treeL}(f)$}, 02={$F_{\treeL}(g)$}, 13={$F_{\treeL}(hg)$}, 012={$=$}, 023={$F(\bar\alpha)$}, 013={$F_{\treeLL}(\alpha)$}, 123={$F_{\treeV(g, f)}$}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center} } \end{tikzpicture}\,, \end{center} Moreover, the four main $3$-cells of $\Gamma$ are by definition (see~\ref{paragr:def_cellular_to_simplicial}): \[ F_{\treeVLeft}(1_{hg}, 1_a) = 1_{F_{\treeLL}(hg)}\,, \] \[ F_{\treeW}(h, g, 1_a) = 1_{F_{\treeV}(g, f)}\,, \] \[ F_{\treeLLL}(1_{\alpha}) = 1_{F_{\treeLL}(\alpha)} \] and \[ F_{\treeVRight}(h, 1_g) = 1_{F_{\treeV(g, f)}}\,. \] Hence the $3$-cell $\Gamma$ is trivial. This is equivalent to saying that for any diagram \[ \begin{tikzcd}[column sep=small, row sep=2pt] a \ar[rr, bend left=30, "f", ""{below, name=f}] \ar[rd, bend right=20, "g"'] && a'' \\ & a' \ar[ru, bend right=20, "h"'] & \ar[Rightarrow, from=f, to=2-2, "\alpha"] \end{tikzcd} \] of $A$, the $3$-cells $\gamma_{\text{l}}(\alpha)$ and $\gamma_{\text{r}}(\alpha)$ associated to the morphism of simplicial set $\SNn{l}(F) \colon N_\infty(A) \to N_\infty(B)$ are trivial. \end{paragr} \begin{paragr}\label{paragr:sigma} Consider two $1$-composable $2$-cells \[ \begin{tikzcd}[column sep=4.8em] a^{\phantom\prime} \ar[r, bend left=70, looseness=1.4, "f", ""'{name=f}] \ar[r, "g"{description, name=g}] \ar[r, bend right=70, looseness=1.4, ""{name=h}, "h"'] \ar[Rightarrow, from=f, to=g, shorten <=1mm, shorten >= 2mm, "\alpha"] \ar[Rightarrow, from=g, to=h, shorten <=2mm, shorten >= 1mm, "\beta"] & a' \end{tikzcd} \] of $A$. We have a $3$-simplex $\sigma_{\alpha, \beta}$ \begin{center} \begin{tikzpicture}[scale=1.6] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$h$, 23={}, 03=$f$, 02=$g$, 13=$g$, 012=$\beta$, 023=$\alpha$, 013=$\alpha$, 123=$\beta$, 0123=${=}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 0123={phantom, description}, /squares/labelstyle/.cd, 0123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$ whose image under $F$ is given by the following $3$\hyp{}simplex $F(\sigma_{\alpha, \beta})$ of $N_\infty(B)$ \begin{center} \begin{tikzpicture}[scale=1.7] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$Fh$, 23={}, 03=$Ff$, 02=$Fg$, 13=$Fg$, 012=$F(\beta_l)$, 023=$F(\alpha_r)$, 013=$F(\alpha_l)$, 123=$F(\beta_r)$, 0123=${\sigma(\beta, \alpha)}$, /squares/arrowstyle/.cd, 01={equal}, 23={equal} } \end{tikzpicture}\ . \end{center} There is a close relationship between the $3$-cell $\sigma(\beta, \alpha)$ and the $3$-cells $\tau_{\text{u}}(\alpha)$ and $\tau_{\text{d}}(\beta)$ of $B$, as displayed by the $4$-simplex $x_\sigma$ in figure~\ref{fig:sigma}. In particular, $\sigma(\beta, \alpha)$ is an invertible $3$-cell of $B$. Being more precise, we have \[ \sigma(\beta, \alpha) = \tau_{\text{d}}(\beta) \comp_1 \tau_{\text{u}}(\alpha)\,. \] \begin{figure} \caption{The $4$-simplex $x_\sigma$} \label{fig:sigma} \end{figure} \end{paragr} \begin{rem}\label{rem:treeY} If $F \colon A\to B$ be a normalised oplax $3$-functor, then it follows by paragraph~\ref{paragr:cond-i} and by the relation of the previous paragraph that given any pair $(\beta, \alpha)$ of $1$-composable $2$-cells of $A$, the $3$-cell $\sigma(\beta, \alpha)$ of~$B$ associated to the morphism of simplicial sets $\SNn{l}(F) \colon N_\infty(A) \to N_\infty(B)$ is \emph{trivial}. This can be taken as a further justification for the choice of listing the datum associated to the tree $\treeY$, representing the vertical composition of $2$-cells, as a coherence and not as a structural cell in the definition of normalised oplax $3$-functor. Indeed, any preferred direction (lax/oplax) would be incompatible with the combinatorics dictated by the simplicial sets; more precisely, it would be irreconcilable with the combinatorics of the orientals and thus with the data encoded by morphisms of simplicial sets between Street nerve of $3$-categories. \end{rem} \begin{paragr}\label{paragr:encode_horizontal_comp} The images of vertical compositions of cells encode a great deal of information into the form of coherences, \ie invertible cells of $B$. An example of critical importance for the following sections is presented in this paragraph. Consider two $1$-composable $2$-cells \[ \begin{tikzcd}[row sep=1.35em] a^{\phantom\prime} \ar[rr, bend left=75, "f", ""'{name=f}] \ar[rr, "g"{description, name=g}] \ar[rd, bend right, "h"'] && a'' \\ & a' \ar[ru, bend right, "i"'] & \ar[Rightarrow, from=f, to=g, shorten <=1mm, shorten >=2mm, "\alpha"] \ar[Rightarrow, from=g, to=2-2, shorten <=1mm, pos=0.4, "\beta" near end] \end{tikzcd} \] of $A$, to which we can associate the following two $3$-simplices \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 01={$h$}, 12={$i$}, 23={}, 03={$f$}, 02={$g$}, 13={$i$}, 012={$\beta$}, 023={$\alpha$}, 013={$\beta\comp_1\alpha$}, 123={$=$}, 0123={$=$}, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 013={description, near start}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 01={}, 12={$h$}, 23={$i$}, 03={$f$}, 02={$h$}, 13={$i\comp_0 h$}, 012={$=$}, 023={$\beta\comp_1 \alpha$}, 013={$\alpha$}, 123={$\beta$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 023={description, near start}, 012={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$. These are mapped under $F$ to the following two $3$\hyp{}simplices of $N_\infty(B)$: \begin{center} \begin{tikzpicture}[scale=1.65] \squares{ /squares/label/.cd, 01={$Fh$}, 12={$Fi$}, 23={}, 03={$Ff$}, 02={$Fg$}, 13={$Fi$}, 012={$F(\bar\beta)$}, 023={$F(\alpha_r)$}, 013={$F(\overline{\beta\comp_1\alpha})$}, 123={$=$}, 0123={$\eps_\text{l}(\beta, \alpha)$}, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, /squares/labelstyle/.cd, 013={description, near start = 1pt and 1pt}, 123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.65] \squares{ /squares/label/.cd, 01={}, 12={$Fh$}, 23={$Fi$}, 03={$Ff$}, 02={$Fh$}, 13={$F(i\comp_0 h)$}, 012={$=$}, 023={$F(\overline{\beta\comp_1 \alpha})$}, 013={$F(\alpha_l)$}, 123={$F(\bar\beta)$}, 0123={$\eps_\text{r}(\beta, \alpha)$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, /squares/labelstyle/.cd, 023={description, near start = 1pt and 1pt}, 012={anchor=center} } \end{tikzpicture}\ . \end{center} \end{paragr} \begin{figure} \caption{The $4$-simplices governing $\eps_\text{l} \label{fig:eps1} \label{fig:eps2} \label{fig:eps} \end{figure} \begin{paragr}\label{paragr:eps_invertible} Under the assumptions of the preceding paragraph, we can construct the two $4$-simplices $x_\eps$ and $y_\eps$ of $N_\infty(B)$ displayed in figure~\ref{fig:eps}. We have the following equalities of $3$-cells of $B$: \[ \begin{split} & \gamma_{\text{r}}(\beta)\comp_1 F\alpha_l \comp_2 F_{i, h} \comp_1 \sigma(\beta, \alpha) \comp_2 \gamma_{\text{l}}(\beta)\comp_1 F\alpha_r\\ =\ & \gamma_{\text{r}}(\beta)\comp_1 F\alpha_l \comp_2 (F_{i, h} \comp_1 \tau_{\text{d}}(\beta) \comp_1 \tau_{\text{u}}(\alpha) ) \comp_2 \gamma_{\text{l}}(\beta)\comp_1 F\alpha_r \\ =\ & (\gamma_{\text{r}}(\beta)\comp_1 F_{i, h} \comp_2 \tau_{\text{d}}(\beta) \comp_2\gamma_{\text{l}}(\beta) ) \comp_1 \tau_{\text{u}}(\alpha) \\ =\ & 1_{F\beta_r\comp_1 F\alpha_l} \comp_1 \tau_{\text{u}}(\alpha) = F\bar\beta \comp_1 \tau_{\text{u}}(\alpha)\,, \end{split} \] where the first equality follows by paragraph~\ref{paragr:sigma}, the second one by the exchange law and the third one by paragraph~\ref{paragr:gamma_invertible}. Hence the $4$\hyp{}simplex $x_\eps$ depicted in figure~\ref{fig:eps1} witnesses the relation \[ \eps_{\text{r}}(\beta, \alpha) \comp_2 \eps_{\text{l}}(\beta, \alpha) = F\bar\beta \comp_1 \tau_{\text{u}}(\alpha)\,, \] that by paragraph~\ref{paragr:tau_invertible} is equivalent to saying that the $3$-cell \[ F\bar\beta \comp_1 \tau_{\text{d}}(\alpha) \comp_2 \eps_{\text{r}}(\beta, \alpha) \comp_2 \eps_{\text{l}}(\beta, \alpha) \] is precisely the identity of the $2$-cell \[ F_{i,h} \comp_1 F\bar\beta \comp_1 F\alpha_l\,. \] Moreover, the $4$-simplex $y_\eps$ depicted in figure~\ref{fig:eps2} gives us that the $3$-cell \[ \eps_{\text{l}}(\beta, \alpha) \comp_2 F\beta_r \comp_1 \tau_{\text{d}}(\alpha) \comp_2 \eps_{\text{r}}(\beta, \alpha) \] of $B$ is an identity cell, too. Therefore both the $3$-cells $\eps_{\text{l}}(\beta, \alpha)$ and $\eps_{\text{r}}(\beta, \alpha)$ are invertible. \end{paragr} \begin{paragr}\label{paragr:encode_simple_horizontal_comp} Consider two $1$-composable $2$-cells $\alpha$ and $\beta$ of $A$ as in paragraph~\ref{paragr:sigma} and the two $3$-simplices \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$h$}, 23={}, 03={$f$}, 02={$g$}, 13={$h$}, 012={$\beta$}, 023={$\alpha$}, 013={$\beta\comp_1 \alpha$}, 123={$=$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 123={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 013={description, near start}, 123={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$h$}, 23={}, 03={$f$}, 02={$h$}, 13={$h$}, 012={$=$}, 023={$\beta\comp_1\alpha$}, 013={$\alpha$}, 123={$\beta$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 023={description, near start}, 012={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$. It follows immediately from the previous paragraph that the main $3$-cells of the images by $F$ of the two $3$-simplices above is trivial. There are two others $3$-simplices of $N_\infty(A)$ that is natural to consider, that is \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$h$}, 23={}, 03={$f$}, 02={$g$}, 13={$f$}, 012={$\beta$}, 023={$\alpha$}, 013={$=$}, 123={$\beta\comp_1 \alpha$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 013={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 123={description, near start}, 013={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} and \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$h$}, 23={}, 03={$f$}, 02={$h$}, 13={$f$}, 012={$\beta\comp_1 \alpha$}, 023={$=$}, 013={$\alpha$}, 123={$\beta$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={description, near start}, 023={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} Let us denote by $\omega_{\text{r}}(\beta, \alpha)$ and $\omega_{\text{l}}(\beta, \alpha)$ respectively the main $3$-cells of $B$ of the images under $F$ of the $3$-simplices of $N_\infty(A)$ above. Both these $3$-cells of $B$ are invertible. The proof for $\omega_{\text{r}}(\beta, \alpha)$ is given by the $4$-simplex $x_{\omega}$ of $N_\infty(B)$ depicted in figure~\ref{fig:omega_r} and the proof for $\omega_{\text{l}}(\beta, \alpha)$ is completely similar and we leave it to the reader. One can actually check that the four $3$-cells $\eps_{\text{l}}(\beta, \alpha)$, $\eps_{\text{r}}(\beta, \alpha)$, $\omega_{\text{l}}(\beta, \alpha)$ and $\omega_{\text{r}}(\beta, \alpha)$ are all tied together by a $5$-simplex of $B$, that we will not need and so we are not going to describe. \begin{figure} \caption{The $4$-simplex $x_\omega$, showing that $\omega_{\text{r} \label{fig:omega_r} \end{figure} \end{paragr} \begin{paragr}\label{paragr:cond-iii} Let $F \colon A \to B$ be a normalised oplax $3$-functor and consider the normalised oplax $3$-functor $T \colon \Deltan{3} \to A$ given by \begin{center} \begin{tikzpicture}[scale=1.3] \squares{ /squares/label/.cd, 01={$h$}, 12={$i$}, 23={}, 03={$f$}, 02={$g$}, 13={$i$}, 012={$\beta$}, 023={$\alpha$}, 013={$\beta\comp_1\alpha$}, 123={$=$}, 0123={$1_{\beta \comp_1 \alpha}$}, /squares/arrowstyle/.cd, 23={equal}, 013={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 013={anchor=center}, 123={anchor=center} } \end{tikzpicture}\ . \end{center} Following the definition given in paragraph~\ref{paragr:def_cellular_to_simplicial}, the conditions of normalisations impose that the image under $F$ of $T$ is \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 01={$F_{\treeL}(h)$}, 12={$F_{\treeL}(i)$}, 23={}, 03={$F_{\treeL}(f)$}, 02={$F_{\treeL}(g)$}, 13={$F_{\treeL}(i)$}, 012={$F(\bar\beta)$}, 023={$F_{\treeLL}(\alpha)$}, 013={$\phantom{O}F(\overline\beta\comp_1\alpha)$}, 123={}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 23={equal}, 013={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 013={anchor=center}, 123={anchor=center} } \end{tikzpicture}\,, \end{center} where paragraph~\ref{paragr:cond-ii} and Remark~\ref{rem:treeY} give \[F(\bar\alpha) = F_{\treeV(g, f)} \comp_1 F_{\treeLL}(\alpha)\] as well as \[ F(\overline\beta\comp_1\alpha) = F_{\treeV(g, f)} \comp_1 F_{\treeLL}(\beta) \comp_1 F_{\treeLL}(\alpha)\,. \] Moreover, the four main $3$-cells of $\Gamma$ are by definition \[ F_{\treeVLeft}(1_{i}, h) = 1_{F_{\treeV}(i, h)}\,, \] \[ F_{\treeW}(1_{a''}, i, h) = 1_{F_{\treeV}(i, h)}\,, \] \[ F_{\treeLLL}(1_{\beta\comp_1\alpha}) = 1_{F_{\treeLL}(\beta)\comp_1 F_{\treeLL}(\alpha)} \] and \[ F_{\treeVRight}(1_{a''}, \beta) = 1_{F_{\treeLL(\beta)}}\,. \] Hence the $3$-cell $\Gamma$ is trivial, which implies that the $3$-cells $\eps_{\text{l}}(\beta, \alpha)$, $\eps_{\text{r}}(\beta, \alpha)$, $\omega_{\text{l}}(\beta, \alpha)$ and $\omega_{\text{r}}(\beta, \alpha)$ associated to the morphism of simplicial sets $\SNn{l}(F)\colon N_\infty(A) \to N_\infty(B)$ are all trivial. \end{paragr} \begin{paragr}\label{paragr:encode_3cells} The last piece of information we want to analyse in this section is the behaviour of the morphism $F \colon N_\infty(A) \to N_\infty(B)$ with respect to the $3$\nbd-cells of $A$. Let \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=60, "f", ""'{name=f}] \ar[r, bend right=60, "g"', ""{name=g}] \ar[Rightarrow, from=f, to=g, shift right=0.5em, bend right, shorten <=1mm, shorten >=1mm, "\alpha"', ""{name=al}] \ar[Rightarrow, from=f, to=g, shift left=0.5em, bend left, shorten <=1mm, shorten >=1mm, "\beta", ""'{name=ar}] \arrow[triple, from=al, to=ar, "\Gamma"]{} & \bullet \end{tikzcd} \] be a $3$-cell of $A$. We have several ways of encoding $\Gamma$ as a $3$-simplex of $N_\infty(A)$. In particular, we have the following $3$-simplices: \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$g$}, 23={}, 03={$f$}, 02={$g$}, 13={$g$}, 012={$\alpha$}, 023={$=$}, 013={$=$}, 123={$\beta$}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 023={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$g$}, 23={}, 03={$f$}, 02={$g$}, 13={$g$}, 012={$\alpha$}, 023={$=$}, 013={$\beta$}, 123={$=$}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 023={anchor=center}, 123={anchor=center} } \end{tikzpicture} \end{center} \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$g$}, 23={}, 03={$f$}, 02={$g$}, 13={$g$}, 012={$=$}, 023={$\alpha$}, 013={$=$}, 123={$\beta$}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$g$}, 23={}, 03={$f$}, 02={$g$}, 13={$g$}, 012={$=$}, 023={$\alpha$}, 013={$\beta$}, 123={$=$}, 0123={$\Gamma$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 123={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 123={anchor=center} } \end{tikzpicture} \end{center} We claim that all these $3$-simplices of $N_\infty(A)$ are sent under $F$ to $3$-simplices of $N_\infty(B)$ such that their principal $3$-cell is invertible, principal cells that we shall call $F\Gamma_i$, $i=1, 2, 3, 4$, respectively. The $4$-simplex $z_\Gamma$ of $N_\infty(B)$ depicted in~\eqref{fig:Gamma1} shows this claim for the last two $3$-simplices, while the $4$-simplex~$y_\Gamma$ depicted in figure~\eqref{fig:Gamma2} proves the claim for the middle two. We leave to the reader the easy assignment of describing a $4$-simplex of $N_\infty(B)$ showing the claim for the first two $3$-simplices above. \end{paragr} \begin{figure} \caption{The $4$-simplices governing the images of $\Gamma$.} \label{fig:Gamma1} \label{fig:Gamma2} \label{fig:Gamma} \end{figure} \begin{rem} Notice that the $3$-cells $F\Gamma_i$, $i=1, 2, 3, 4$,of the preceding paragraph are linked together by whiskering with invertible $2$-cells studied in this subsection, such as $\tau_{\text{d}}$ and $\tau_{\text{u}}$. If $F$ is actually the image of a normalised oplax $3$-functor $G$, then we have shown that these $2$-cells are trivial and therefore in this case the four $3$-cells $\SNn{l}(G)\Gamma_i$ are in fact all equal to $G_{\treeLLL}(\Gamma)$. \end{rem} \section{Simplicial oplax 3-morphisms} The previous subsection shows that if we consider a normalised oplax $3$-functor $F \colon A \to B$, then its nerve $\SNn{l}(F)$ has the property that some particular (non-degenerate) $3$-simplices of $N_\infty(A)$ with trivial principal $3$-cell are sent to $3$-simplices of $N_\infty(3)$ where the principal $3$-cell is also trivial. In this section we consider the class of morphisms of simplicial sets between Street nerve of $3$-categories having precisely this property and we show that they form a subcategory of simplicial sets. In fact, we will prove that they are canonically equivalent to normalised oplax $3$-functors. As a first main step towards this correspondence, in the following subsection we shall show how to associate a normalised oplax $3$-functor to a simplicial morphism between nerves of $3$-categories satisfying these trivialising properties. Throughout this subsection, we shall make heavy use of the notations introduced in the preceding subsection. \begin{definition}\label{def:simpl_oplax} Let $A$ and $B$ be two small $3$-categories. We say that a morphism $F \colon N_\infty(A) \to N_\infty(B)$ is a \ndef{simplicial oplax $3$-morphism} \index{simplicial oplax $3$-morphism} if the following conditions are satisfied: \begin{enumerate} \item\label{cond:simpl_oplax-i} for any $2$-cell $\alpha$ of $A$, the $3$-cell $\tau_{\text{d}}(\alpha)$ of $B$ is trivial; \item\label{cond:simpl_oplax-ii} for any $2$-cell $\alpha \colon f \to h\comp_0 g$ of $A$, the $3$-cell $\gamma_{\text{l}}(\alpha)$ of $B$ is trivial; \item\label{cond:simpl_oplax-iii} for any pair of $1$-composable $2$-cells $\alpha$ and $\beta$ of $A$ as in~\ref{paragr:encode_horizontal_comp}, the $3$-cell $\eps_{\text{l}}(\beta, \alpha)$ of $B$ is trivial. \end{enumerate} \end{definition} \begin{rem} It is clear that the definition above can be framed within stratified simplicial sets. However, the author sees little or no advantage in pursuing this point of view, since the $3$-simplices involved are very particular and no lifting property is present. \end{rem} \begin{rem}\label{rem:trivial_cells} The relations described in paragraphs~\ref{paragr:tau_invertible} and~\ref{paragr:sigma} tell us that under condition~\ref{cond:simpl_oplax-i} above also the $3$-cells $\tau_{\text{u}}(\gamma)$ and $\sigma(\beta, \alpha)$ of $B$ are also trivial, for any choice of $2$-cells $\alpha$, $\beta$ and $\gamma$ of $A$, such that the first two are $1$-composable. Assuming conditions~\ref{cond:simpl_oplax-i} and~\ref{cond:simpl_oplax-ii} and using what we just observed in the relations of paragraph~\ref{paragr:gamma_invertible} we get immediately that the $3$-cell $\gamma_{\text{r}}(\alpha)$ is trivial. If $F \colon N_\infty(A) \to N_\infty(B)$ is a simplicial oplax $3$-morphism, putting together all we have said right above and the relations analysed in paragraphs~\ref{paragr:eps_invertible} and~\ref{paragr:encode_simple_horizontal_comp} gives us that the $3$-cells $\eps_{\text{r}}(\beta, \alpha)$, $\omega_{\text{r}}(\beta, \alpha)$ and $\omega_{\text{l}}(\beta, \alpha)$ are trivial, for any appropriate choice of $3$-cells $\alpha$ and $\beta$ of $A$. In short, all the invertible $3$-cells of $B$ we described in the previous subsection are actually trivial whenever $F$ is a simplicial oplax $3$-morphism. \end{rem} \begin{paragr}\label{paragr:2-cell_B} Let $F \colon N_\infty(A) \to N_\infty(B)$ be a morphism of simplicial sets satisfying condition~\ref{cond:simpl_oplax-i}. It follows from the previous remark that for any $2$-cell $\alpha$ of $A$, the $2$-cells $F(\alpha_l)$ and $F(\alpha_r)$ of $B$ coincide. Whenever this happens we shall then simply write $F(\alpha)$, or more often just $F\alpha$ for this $2$-cell of $B$. Furthermore, the relations observed in paragraph~\ref{paragr:encode_3cells} give us that for any $3$-cell $\Gamma \colon\alpha \to \beta$ of $A$, the four ways we described in that paragraph to encode the image of $\Gamma$ via $F$ are all the same $3$-cell of $B$, that we shall then call $F(\Gamma)$ or simply $F\Gamma$. \end{paragr} \begin{exem} For any normalised oplax $3$-functor $F \colon A \to B$,the previous subsection shows that its nerve $\SNn{l}(F)$ is a simplicial oplax $3$-morphism. \end{exem} We now check that simplicial oplax $3$-morphisms are closed under composition. Let $F \colon A \to B$ and $G \colon B \to C$ be two simplicial oplax $3$-morphisms. \begin{paragr}\label{paragr:simpl_oplax_i} Let $\alpha \colon f \to g$ be a $2$-cell of $A$. By assumption, we have the $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$F(g)$}, 23={}, 03={$F(f)$}, 02={$F(f)$}, 13={$F(f)$}, 012={$F(\alpha_l)$}, 023={$=$}, 013={$=$}, 123={$F(\alpha_r)$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={below right= 1pt and 1pt}, 123={below left = 1pt and 1pt}, 023={anchor=center}, 013={anchor=center}, 0123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(B)$ . Setting $\beta = F(\alpha_l) = F(\alpha_r)$ and applying $G$ to the two $3$-simplices above, we get the following $3$-simplex of $N_\infty(C)$: \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$GFg$}, 23={}, 03={$GFf$}, 02={$GFf$}, 13={$GFf$}, 012={$G(\beta_l)$}, 023={$=$}, 013={$=$}, 123={$G(\beta_r)$}, 0123={$=$}, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 023={phantom, description}, 013={phantom, description}, 0123={phantom, description}, /squares/labelstyle/.cd, 012={below right= 1pt and 1pt}, 123={below left = 1pt and 1pt}, 023={anchor=center}, 013={anchor=center}, 0123={anchor=center} } \end{tikzpicture}\ . \end{center} Since $\beta_l = F(\alpha_l)$ and $\beta_r = F(\alpha_r)$ by definition, we have that $G(\beta_l) = GF(\alpha_l)$ and $G(\beta_r) = GF(\alpha_r)$. Thus the morphism $GF \colon N_\infty(A) \to N_\infty(C)$ of simplicial sets satisfies condition~\ref{cond:simpl_oplax-i} of the definition of simplicial oplax $3$-morphisms. \end{paragr} \begin{paragr}\label{paragr:simpl_oplax_ii} Let $\alpha \colon f \to h\comp_0 g$ be a $2$-cell of $A$. By assumption, we have a $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$F(g)$}, 23={$F(h)$}, 03={$F(f)$}, 02={$F(g)$}, 13={$F(hg)$}, 012={$=$}, 023={$F(\bar\alpha)$}, 013={$F(\alpha)$}, 123={$F_{g, f}$}, 0123={$\gamma_{\text{l}}(\alpha)$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 012={below right= 1pt and 1pt}, 123={below left = 1pt and 1pt}, 012={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(B)$, where the main $3$-cell is trivial as the morphism $F$ verifies condition~\ref{cond:simpl_oplax-ii}. We have to show that the $3$-cell of the $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$GFg$}, 23={$GFh$}, 03={$GFf$}, 02={$GFg$}, 13={$GFhg$}, 012={$=$}, 023={$GF(\bar\alpha)$}, 013={$GF(\alpha)$}, 123={$GF_{g, f}$}, 0123={$\gamma_{\text{l}}(\alpha)$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, /squares/labelstyle/.cd, 012={below right= 1pt and 1pt}, 123={below left = 1pt and 1pt}, 023={above left = -1pt and 1pt}, 013={swap, above right = -1pt and 1pt}, 012={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(C)$ is trivial. The $2$-cell $\beta = F_{g, f} \comp_1 F\alpha$ of $B$ has $Ff$ as source and $Fh \comp_0 Fg$ as target. So applying the morphism $G \colon N_\infty(B) \to N_\infty(C)$ we get a $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={}, 12={$GFg$}, 23={$GFh$}, 03={$GFf$}, 02={$GFg$}, 13={$GFhg$}, 012={$=$}, 023={$G\bar\beta$}, 013={$G(\beta)$}, 123={$G_{Fg, Ff}$}, 0123={$\gamma_{\text{l}}(\beta)$}, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 123={below left = 1pt and 1pt}, 012={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(C)$ where the main $3$-cell is trivial by condition~\ref{cond:simpl_oplax-ii}. Notice that by definition $G\bar\beta = GF\bar\alpha$ and $G\beta = G(F_{h, g} \comp_1 F\alpha)$; this latter $2$-cell of~$C$ is equal to $G(F_{h, g}) \comp_1 GF\alpha$ by condition~\ref{cond:simpl_oplax-iii}. Moreover, condition~\ref{cond:simpl_oplax-ii} also entails that the following $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={$GFg$}, 12={$GFh$}, 23={}, 03={$GFhg$}, 02={$G(FhFg)$}, 13={$GFh$}, 012={$G_{Fh, Fg}$}, 023={$G(F_{h, g})$}, 013={$G(\overline{F_{h, g}})$}, 123={$=$}, 0123={$\gamma_{\text{r}}(F_{h, g})$}, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 012={below right= 0pt and 1pt}, 123={anchor=center}, 023={above left}, 013={swap, above right = 1pt and 1pt} } \end{tikzpicture} \end{center} of $N_\infty(C)$ is trivial, as we have observed in Remark~\ref{rem:trivial_cells}. The $4$-simplex of $N_\infty(C)$ displayed in figure~\ref{fig:simpl_oplax_ii} allows to conclude. \begin{figure} \caption{The $3$-cell $\gamma_\text{l} \label{fig:simpl_oplax_ii} \end{figure} \end{paragr} \begin{paragr}\label{paragr:simpl_oplax_iii} Consider two $1$-composable $2$-cells \[ \begin{tikzcd}[row sep=1.35em] a^{\phantom\prime} \ar[rr, bend left=75, "f", ""'{name=f}] \ar[rr, "g"{description, name=g}] \ar[rd, bend right, "h"'] && a'' \\ & a' \ar[ru, bend right, "i"'] & \ar[Rightarrow, from=f, to=g, shorten <=1mm, shorten >=2mm, "\alpha"] \ar[Rightarrow, from=g, to=2-2, shorten <=1mm, pos=0.4, "\beta" near end] \end{tikzcd} \] of $A$. The $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={$F(h)$}, 12={$F(i)$}, 23={}, 03={$F(f)$}, 02={$F(g)$}, 13={$F(i)$}, 012={$F(\bar\beta)$}, 023={$F(\alpha)$}, 013={$F(\overline{\beta\comp_1\alpha})$}, 123={$=$}, 0123={$\eps_{\text{l}}(\beta, \alpha)$}, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 123={anchor=center}, 013={swap, above right = 2pt and 1pt} } \end{tikzpicture} \end{center} of $N_\infty(B)$ has trivial main $3$-cell $\eps_\text{l}(\beta, \alpha)$ by condition~\ref{cond:simpl_oplax-iii}. Thus $F\overline{\beta \comp_1 \alpha}$ is equal to $F\bar \beta \comp_1 F\alpha$ (which by the preceding paragraph we also know to be equal to the $3$\nbd-cell $F_{h, i} \comp_1 F\beta \comp_1 F\alpha$). Now, applying the morphism $G$ with condition~\ref{cond:simpl_oplax-iii} at hand we get the $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5, font=\footnotesize] \squares{ /squares/label/.cd, 01={$GFh$}, 12={$GFi$}, 23={}, 03={$GFf$}, 02={$GFg$}, 13={$GFi$}, 012={$G\overline{F\bar\beta}$}, 023={$GF\alpha$}, 013={$\;G\overline{F\overline{\beta\comp_1\alpha}}$}, 123={$=$}, 0123={$\eps_{\text{l}}(F\beta, F\alpha)$}, /squares/arrowstyle/.cd, 23={equal}, 123={phantom, description}, 013={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 123={anchor=center}, 013={anchor=center}, 012={below right} } \end{tikzpicture} \end{center} with trivial main $3$-cell $\eps_\text{l}(F\beta, F\alpha)$. Noticing that the $2$-simplex $G\overline{F\bar\beta}$ of $N_\infty(C)$ is precisely $GF\bar \beta$ and that the $2$-simplex $G\overline{F\bar\beta\comp_1\alpha}$ of $N_\infty(C)$ is equal to $GF\overline{\beta \comp_1 \alpha}$, we conclude that the morphism $GF \colon N_\infty(A) \to N_\infty(B)$ satisfies condition~\ref{cond:simpl_oplax-iii}. \end{paragr} \begin{thm}\label{thm:simpl_oplax_category} The class of simplicial oplax $3$-morphisms is stable under composition. \end{thm} \begin{proof} This follows immediately from paragraphs~\ref{paragr:simpl_oplax_i}, \ref{paragr:simpl_oplax_ii} and~\ref{paragr:simpl_oplax_iii}. \end{proof} \begin{definition}\label{def:simpl_oplax_category} We shall denote by $\nCat{3}_{\cDelta}$ the subcategory of ${\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ whose objects are the nerves of the small $3$-categories and whose morphisms are simplicial oplax $3$-functors. This shall be called the category of \ndef{small $3$-categories and simplicial oplax $3$-morphisms}. \end{definition} \begin{rem} The nerve of any $3$-functor $u \colon A \to B$ is clearly a simplicial oplax $3$-morphism, since the Street nerve of a $3$-functor sends $3$-simplices of $A$ with trivial principal $3$-cell to $3$-simplices of $B$ with trivial principal $3$-cell; hence, the nerve induces a faithful functor $\nCat{3} \hookrightarrow \nCat{3}_{\cDelta}$. \end{rem} \subsection{Simplicial to cellular}\label{section:simplicial-to-cellular} Let $F \colon A \to B$ be a simplicial oplax $3$-morphism. \begin{paragr}[Data]\label{paragr:simplicial_to_cellular-data} We now associate to $F$ the data of a normalised oplax $3$-functor. \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ ] \end{forest} } ] The map $F_{\treeDot}$ that to each object $a$ of $A$, \ie any $0$-simplex of $N_\infty(A)_0$, assigns an object $F_{\treeDot}(a)$ of $B$, \ie a $0$-simplex of $N_\infty(B)$, is simply defined to be $F_0$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] The map $F_{\treeLog}$ that to each $1$-cell $f \colon a \to a'$ of $A$, \ie any $1$-simplex of $N_\infty(A)_1$, assigns a $1$-cell $F_{\treeLog}(f) \colon F_{\treeDot}(a) \to F_{\treeDot}(a')$, \ie a $1$-simplex of $N_\infty(B)$, is simply defined to be $F_1 \colon N_\infty(A)_1 \to N_\infty(B)_1$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][] ] \end{forest} } ] The map $F_{\treeV}$ that to each pair of $0$\hyp{}composable $1$\hyp{}cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ assigns a $2$-cell $F_{\treeV}(g, f)$ \[ \begin{tikzcd}[column sep=small] F_{\treeDot}(a) \ar[rd, "F_{\treeLog}(f)"'] \ar[rr, "F_{\treeLog}(g\comp_0 f)"{name=gf}] && F_{\treeDot}(a'') \\ & F_{\treeDot}(a') \ar[ru, "F_{\treeV}(g)"'] & \ar[Rightarrow, shorten <=1.5mm, from=gf, to=2-2] \end{tikzcd} \] of $B$, that is \[ F_{\treeV}(g, f) \colon F_{\treeLog}(g \comp_0 f) \to F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \] is defined as $F_{\treeV}(g, f) := F_{g, f}$ (see paragraph~\ref{paragr:encode_comp_1cells}). \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] ] \end{forest} } ] The map $F_{\treeLL}$ that to each $2$-cell $\alpha \colon f \to g$ of $A$ associates a $2$-cell \[F_{\treeLL}(\alpha) \colon F_{\treeLog}(f) \to F_{\treeLog}(g)\] of $B$ is defined to be $F_{\treeLL}(\alpha) = F(\alpha)$, with the notation of paragraph~\ref{paragr:2-cell_B}. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] ] \end{forest} } ] We define the map $F_{\treeW}$ by mapping any triple of $0$-composable $1$ cells \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$, \ie any $3$-simplex of $N_\infty(A)$ of the form \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 0=$a$, 1=$a'$, 2=$a''$, 3=$a'''$, 01=$f$, 12=$g$, 23=$h$, 02=$gf$, 03=$hgf$, 13=$hg$, 012=${=}$, 023=${=}$, 123=${=}$, 013=${=}$, /squares/arrowstyle/.cd, 012={phantom, description}, 023={phantom, description}, 123={phantom, description}, 013={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 012={anchor=center}, 023={anchor=center}, 123={anchor=center}, 013={anchor=center} } \end{tikzpicture}\ , \end{center} to the main $3$-cell $F_{\treeW}(h, g, f)$ of the $3$-simplex of $B$ image of the above $3$-simplex of $A$ by the morphism $F$: \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 0=$Fa$, 1=$Fa'$, 2=$Fa''$, 3=$Fa'''$, 01=$Ff$, 12=$Fg$, 23=$Fh$, 02=$F(gf)$, 03=$F(hgf)$, 13=$F(hg)$, 012=${F_{g, f}}$, 023=${F_{h, gf}}$, 123=${F_{h, g}}$, 013=${F_{hg, f}}$, 0123=${F(h, g, f)}$, /squares/arrowstyle/.cd, 012={phantom, description}, 023={phantom, description}, 123={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 023={anchor=center}, 123={anchor=center}, 013={anchor=center} } \end{tikzpicture}\ , \end{center} where $F_{h, gf} = F_{\treeV}(h, g \comp_0 f)$ and $F_{hg, f} = F_{\treeV}(h \comp_0 g, f)$, so that the $3$-cell $F_{\treeW}(h, g, f)$ has \[ F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeV}(h, g \comp_0 f) \] as source and \[ F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h \comp_0 g, f) \] as target. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [[]] ] \end{forest} } ] Consider a whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & a' \ar[r,"g"] & a'' \end{tikzcd} \] of $A$ and the following associated $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 0=$a$, 1=$a$, 2=$a'$, 3=$a''$, 12=$f'$, 23=$g$, 02=$f$, 03=$gf$, 13=$gf'$, 012=$\alpha$, 023=${=}$, 123=${=}$, 013={$g\alpha$}, /squares/arrowstyle/.cd, 01={equal}, 023={phantom, description}, 123={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 023={anchor=center}, 123={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$, where we wrote $g\alpha$ for $g \comp_0 \alpha$. We define $F_{\treeVRight}(g, \alpha)$ to be the main $3$-cell of the image under $F$ of the above $3$-simplex: \begin{center} \begin{tikzpicture}[scale=1.5] \squares{ /squares/label/.cd, 0=$Fa$, 1=$Fa$, 2=$Fa'$, 3=$Fa''$, 12=$Ff'$, 23=$Fg$, 02=$Ff$, 03=$F(gf)$, 13=$F(gf')$, 012=$F\alpha$, 023=$F_{g, f}$, 123={$F_{g, f'}$}, 013={$Fg\alpha$}, 0123=${F(g, \alpha)}$, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 023={phantom, description}, 123={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 023={anchor=center}, 123={anchor=center}, 013={anchor=center} } \end{tikzpicture}\ ; \end{center} so that \[ F_{\treeVRight}(\alpha, g) \colon F_{\treeLog}(g) \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeV}(g, f) \to F_{\treeV}(g, f') \comp_1 F_{\treeLL}(g \comp_0 \alpha) \] as a $3$-cell of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] [] ] \end{forest} } ] Consider a whiskering \[ \begin{tikzcd}[column sep=4.5em] a \ar[r, "f"] & a' \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& a'' \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$ and the following associated $3$-simplex \begin{center} \begin{tikzpicture}[font=\footnotesize, scale=1.5] \squares{ /squares/label/.cd, 0=$a$, 1=$a'$, 2=$a''$, 3=$a''$, 01=$f$, 12=$g'$, 02=$gf$, 03=$gf$, 13=$g$, 012={$=$}, 023={$\beta f$}, 123=${\beta}$, 013=${=}$, /squares/arrowstyle/.cd, 23={equal}, 012={phantom, description}, 013={phantom, description}, 0123={equal}, /squares/labelstyle/.cd, 012={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} of $SN(A)$, where we wrote $\beta f$ for $\beta \comp_0 f$. We define $F_{\treeVLeft}(\beta, f)$ as the main $3$-cell of the image under $F$ of the above $3$-simplex: \begin{center} \begin{tikzpicture}[font=\footnotesize, scale=1.6] \squares{ /squares/label/.cd, 0=$Fa$, 1=$Fa'$, 2=$Fa''$, 3=$Fa''$, 01=$Ff$, 12=$Fg'$, 02=$F(gf)$, 03=$F(gf)$, 13=$Fg$, 012={$F_{g', f}$}, 023={$F(\beta f)$}, 123=${F\beta}$, 013=${F_{g, f}}$, 0123=${F(\beta, f)}$, /squares/arrowstyle/.cd, 23={equal} } \end{tikzpicture}\ ; \end{center} so that \[ F_{\treeVLeft}(\beta, f) \colon F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\beta \comp_0 f) \to F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(g, f) \] as a $3$-cell of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[]]] ] \end{forest} }] Consider a $3$-cell $\gamma \colon \alpha \to \alpha'$ of $A$, and the following associated $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.6] \squares{ /squares/label/.cd, 12=$f'$, 02=$f'$, 03=$f$, 13=$f$, 012=${=}$, 023=$\alpha$, 123=$\alpha'$, 013=${=}$, 0123=$\gamma$, /squares/arrowstyle/.cd, 01={equal}, 23={equal}, 012={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$. The $3$-cell \[F_{\treeLLL}(\gamma) \colon F_{\treeLL}(\alpha) \to F_{\treeLL}(\alpha')\] of $B$ is defined to be the main $3$-cell of the image under $F$ of the above $3$-simplex: \begin{center} \begin{tikzpicture}[scale=1.6] \squares{ /squares/label/.cd, 12=$Ff'$, 02=$Ff'$, 03=$Ff$, 13=$Ff$, 012=${=}$, 023=$F\alpha$, 123=$F\alpha'$, 013=${=}$, 0123=$F\gamma$, /squares/arrowstyle/.cd, 012={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 013={anchor=center} } \end{tikzpicture}\ . \end{center} \end{description} \end{paragr} These data satisfy the normalisation conditions. \begin{paragr}[Normalisation]\label{paragr:simplicial_to_cellular-norm} In this paragraph we shall commit the abuse of denoting the main cell of a simplex of $N_\infty(B)$ by the simplex itself. The normalisation is an immediate consequence of the degeneracies of $F$. \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [] \end{forest} } ] for any object $a$ of $A$, we have \[ F_{\treeLog}(1_a) = s^0_1(F_{\treeDot} a) = F_{\dgn{0}{1}}(a) = 1_{F_{\treeDot}(a)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} } ] for any $1$-cell $f$ of $A$, we have \[ F_{\treeLL}(1_f) = s^0_2(F_{\treeL} f) = F_{\dgn{0}{2}}(f) = 1_{F_{\treeLog}(f)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][] ] \end{forest} } ] for any $2$-cell $f \colon a \to a'$ of $A$, we have \[ F_{\treeV}(1_{a'}, f) = s^1_2(F_{\treeL} f) = 1_{F_{\treeL}(f)} = s^0_2(F_{\treeL}(f)) = F_{\treeV}(f, 1_a)\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] ] \end{forest} } ] for any $2$-cell $\alpha$ of $A$ we have \[ F_{\treeLLL}(1_\alpha) = s^0_3(\alpha_r) = F_{\dgn{0}{3}}(\alpha_r) = 1_{F_{\treeLL}(\alpha)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][] ] \end{forest} } ] for any pair $a \xto{f} a' \xto{g} a''$ of $1$-cells of $A$, we have \begin{align*} F_{\treeW}(g, f, 1_a) &= s^0_3(F_{\treeV}(g, f)) = 1_{F_{\treeV}(g, f)}\,,\\ F_{\treeW}(g, 1_{a'}, f) &= s^1_3(F_{\treeV}(g, f)) = 1_{F_{\treeV}(g, f)}\,,\\ F_{\treeW}(1_{a''}, g, f) &= s^2_3(F_{\treeV}(g, f)) = 1_{F_{\treeV}(g, f)}\,; \end{align*} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][[]] ] \end{forest} } ] for any pair $a \xto{f} a' \xto{g} a''$ of $1$-cells of $A$, we have \[ F_{\treeVRight}(g, 1_f) = s^0_3(F_{\treeV}(g, f)) = 1_{F_{\treeV}(g, f)}\,, \] and for any $2$-cell $\alpha \colon f \to f'$ of $A$, we have \[ F_{\treeVRight}(1_a', \alpha) = s^2_3(F_{\treeLL}(\alpha)) = 1_{F_{\treeLL}(\alpha)}\,; \] \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]][] ] \end{forest} } ] this normalisation is dual to the previous one; \end{description} \end{paragr} Checking the coherences is much strenuous. The next subsection is devoted to establish that they hold, thus proving the following result. \begin{thm} Let $F \colon A \to B$ be a simplicial oplax $3$-morphism. With the data defined right above, $F$ is a normalised oplax $3$-functor from $A$ to $B$. \end{thm} \subsection{Coherences} Let $F \colon A \to B$ be a simplicial oplax $3$-morphism. In this subsection we are going to check that the data of~$F_{\treeDot}$, $F_{\treeLog}$, $F_{\treeV}$, $F_{\treeLL}$, $F_{\treeW}$ $F_{\treeVLeft}$, $F_{\treeVRight}$ and~$F_{\treeLLL}$ defined above satisfy the set of coherences for a normalised oplax $3$-functor defined in paragraph~\ref{paragr:lax_3functor_cellular_coherences}. We shall do so by showing that every coherence can be encoded in a particular $4$-simplex of~$N_\infty(B)$, image of a $4$\hyp{}simplex of~$N_\infty(A)$. We shall only draw the former, the latter being clear. Moreover, we shall omit the various whiskerings when denoting the $3$-cells of these $4$-simplices, which are nevertheless clear from the picture. \begin{description} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[][]] ] \end{forest} } ] For any pair of $1$-composable $2$-cells \[ \begin{tikzcd}[column sep=4.5em] a\phantom{'} \ar[r, bend left=50, looseness=1.2, "f", ""{below, name=f}] \ar[r, "g" description, ""{name=gu}, ""{below, name=gd}] \ar[r, bend right=50, looseness=1.2, "h"', ""{name=h}] \ar[Rightarrow, from=f, to=gu, "\alpha"] \ar[Rightarrow, from=gd, to=h, "\beta"]& a' \end{tikzcd} \] of $A$, all the images by $F$ of the related $3$-simplices have a trivial main $3$-cell of $B$; indeed, this is the case for the $3$-cells $\eps_{\text{l}}(\beta, \alpha)$, $\eps_{\text{r}}(\beta, \alpha)$, $\omega_{\text{l}}(\beta, \alpha)$ and $\omega_{\text{l}}(\beta, \alpha)$ of $B$. Thus we have an \emph{identity} $3$-cell \[ F_{\treeLL}(\beta) \comp_1 F_{\treeLL}(\alpha) \to F_{\treeLL}(\beta \comp_1 \alpha) \] of $B$, establishing the coherence for $\treeY$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] [] ] \end{forest} } ] Consider a quadruple \[ \begin{tikzcd} \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd} \] of $0$-composable $1$-cells of $A$. The $4$-simplex of $N_\infty(B)$ in figure~\ref{fig:coherence_ifgh}, \begin{figure} \caption{Establishing the coherence $ihgf$.} \label{fig:coherence_ifgh} \end{figure} where $F_{i, hgf} = F_{\treeV}(i, ghf)$ by definition, shows that the $3$-cells \begin{gather*} F_{\treeV}(i, h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(ih, g, h) \\ \comp_2\\ F_{\treeLog}(i) \comp_0 F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeW}(i, h, gf) \end{gather*} and \begin{gather*} F_{\treeW}(i, h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ F_{\treeLog}(i) \comp_0 F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ F_{\treeLog}(i) \comp_0 F_{\treeW}(h, g, f) \comp_1 F_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} of $B$ are equal. This establishes the coherence \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] [] ] \end{forest} }. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [] [] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, bend left, "h", ""{below, name=h}] \ar[r, bend right, "h'"', ""{name=h2}] \ar[Rightarrow, from=h, to=h2, "\alpha"] & \bullet \end{tikzcd} \] of $0$-composable cells $f$, $g$ and $\alpha$ of $A$. The $4$\hyp{}simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_alpha-g-f} ensures that the $3$-cells \begin{gather*} F_{\treeLL}(\alpha) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(h, g, f)\\ \comp_2\\ F_{\treeLog}(h') \comp_0 F_{\treeV}(g, f) \comp_1 F_{\treeVLeft}(\alpha, g \comp_0 f) \end{gather*} and \begin{gather*} F_{\treeVLeft}(\alpha, g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h\comp_0 g, f)\\ \comp_2\\ F_{\treeV}(h', g) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha \comp_0 g, f)\\ \comp_2\\ F_{\treeW}(h', g, f) \comp_1 F_{\treeLL}(\alpha \comp_0 g \comp_0 f) \end{gather*} of $B$ are equal. This establishes the coherence \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [] [] ] \end{forest} }. \begin{figure} \caption{Establishing the coherence $\alpha g f$.} \label{fig:coherence_alpha-g-f} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [] ] [] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g, to=g2, "\alpha"] & \bullet \ar[r, "h"] & \bullet \end{tikzcd} \] of $0$-composable cells $f$, $\alpha$ and $h$ of $A$. The proof of the coherence for this cellular pasting diagram is quite involved and relies on the construction and analysis of four $4$-simplices of $N_\infty(B)$. The main such $4$-simplex is depicted in figure~\ref{fig:coherence_h-alpha-f}, where \[ \beta = F_{\treeV}(g', f) \comp_1 F_{\treeLL}(\alpha \comp_0 f) \quadet \gamma = F_{\treeV}(h\comp_0 g, f) \comp_1 F_{\treeLL}(h \comp_0 \alpha \comp_0 f)\,, \] and shows that the $3$-cell \begin{gather}\label{eq:coherence_h-alpha-f_up} F_{\treeVRight}(h, \alpha) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(h \comp_0 g, f) \notag\\ \comp_2 \notag\\ F_{\treeLog}(h) \comp_0 F_{\treeLL}(\alpha) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeW}(h, g, f)\\ \comp_2\notag\\ F_{\treeLog}(h) \comp_0 \Omega \comp_1 F_{\treeV}(h, g\comp_0 f) \notag \end{gather} of $B$ is equal the following $3$-cell \begin{equation*} F_{\treeV}(h, g') \comp_0 F_{\treeLog}(f) \comp_1 \Phi\: \comp_2\: \Psi \end{equation*} of $B$. Now, the $4$-simplices depicted in figures~\ref{fig:Omega}, \ref{fig:Psi} and~\ref{fig:Phi} entail the equalities \[ \Omega = F_{\treeVLeft}(\alpha, f)\,, \] \[ \Phi = F_{\treeVLeft}(h\comp_0 \alpha , f) \] and \[ \Psi = F_{\treeW}(h, g', f) \comp_1 F_{\treeLL}(h\comp_0 \alpha \comp_0 f)\: \comp_2 \: F_{\treeLog}(h) \comp_0 F_{\treeV}(g', f) \comp_1 F_{\treeVRight}(h, \alpha \comp_0 f)\,. \] We can then conclude that the $3$-cell~\eqref{eq:coherence_h-alpha-f_up} of $B$ is equal to \begin{gather*} F_{\treeV}(h, g') \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(h\comp_0 \alpha, f)\\ \comp_2\\ F_{\treeW}(h, g', f) \comp_1 F_{\treeLL}(h\comp_0 \alpha \comp_0 f)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeV}(g', f) \comp_1 F_{\treeVRight}(h, \alpha \comp_0 f)\,, \end{gather*} thereby establishing the coherence of \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [] ] [] ] \end{forest} }. \begin{figure} \caption{Establishing the coherence $h\alpha f$.} \label{fig:coherence_h-alpha-f} \end{figure} \begin{figure} \caption{The $3$-cell $\Omega$ of $B$.} \label{fig:Omega} \end{figure} \begin{figure} \caption{The $3$-cell $\Psi$ of $B$.} \label{fig:Psi} \end{figure} \begin{figure} \caption{The $3$-cell $\Phi$ of $B$.} \label{fig:Phi} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [ [] ] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f, to=f2, "\alpha"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \end{tikzcd} \] of $0$-composable cells $\alpha$, $g$ and $h$ of $A$. The $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_h-g-alpha}, totally symmetric to~\ref{fig:coherence_alpha-g-f}, shows that the $3$-cells \begin{gather*} F_{\treeW}(h, g, f') \comp_1 F_{\treeLL}(h \comp_0 g \comp_0 \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeV}(g, f') \comp_1 F_{\treeVRight}(h, g \comp_0 \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeVRight}(g, \alpha) \comp_1 F_{\treeV}(h, g \comp_0 f) \end{gather*} and \begin{gather*} F_{\treeV}(h, g) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(h\comp_0 g, \alpha)\\ \comp_2\\ F_{\treeLog}(h) \comp_0 F_{\treeLog}(g) \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeV}(h, g \comp_0 f) \end{gather*} of $B$ are equal, therefore establishing the coherence \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [ [] ] ] \end{forest} }. \begin{figure} \caption{Establishing the coherence $hg\alpha$.} \label{fig:coherence_h-g-alpha} \end{figure} \begin{figure} \caption{Establishing the coherence $g\beta\alpha$.} \label{fig:coherence_g-beta-alpha} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left=55, looseness=1.3, "f", ""{below, name=f1}] \ar[r, "f'"{description}, ""{name=f2u}, ""{below, name=f2d}] \ar[r, bend right=50, looseness=1.3, "f''"', ""{name=f3}] \ar[Rightarrow, from=f1, to=f2u, "\alpha"] \ar[Rightarrow, from=f2d, to=f3, "\beta"] & \bullet \ar[r, "g"] & \bullet \end{tikzcd} \] of cells $\alpha$, $\beta$ and $g$ of $A$ as in the drawing. The $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_g-beta-alpha} shows that the $3$-cells \begin{gather*} F_{\treeVRight}(g, \beta) \comp_1 F_{\treeLL}(g \comp_0 \alpha)\ \comp_2\\ F_{\treeLog}(g) \comp_0 F_{\treeLL}(\beta) \comp_1 F_{\treeVRight}(g, \alpha)\ \comp_2\\ F_{\treeLog}(g) \comp_0 F_{\treeY}(\beta, \alpha) \comp_1 F_{\treeV}(g, f) \end{gather*} and \[ F_{\treeV}(g, f'') \comp_1 F_{\treeY}( g\comp_0 \beta, g \comp_0 \alpha)\ \comp_2\ F_{\treeVRight}(g, \beta \comp_1 \alpha) \] of $B$ are equal. Since $F_{\treeY}(\beta, \alpha)$ and $F_{\treeY}( g\comp_0 \beta, g \comp_0 \alpha)$ are trivial by condition~\ref{cond:simpl_oplax-iii}, the $4$-simplex actually exhibits the equality of the $3$-cells \[ F_{\treeVRight}(g, \beta) \comp_1 F_{\treeLL}(g \comp_0 \alpha)\ \comp_2\ F_{\treeLog}(g) \comp_0 F_{\treeLL}(\beta) \comp_1 F_{\treeVRight}(g, \alpha) \] and \[ F_{\treeV}(g, f'') \comp_1 F_{\treeY}(\beta, \alpha) \] of $B$. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] ] [] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left=55, looseness=1.3, "g", ""{below, name=g1}] \ar[r, "f'"{description}, ""{name=g2u}, ""{below, name=g2d}] \ar[r, bend right=50, looseness=1.3, "g''"', ""{name=g3}] \ar[Rightarrow, from=g1, to=g2u, "\alpha"] \ar[Rightarrow, from=g2d, to=g3, "\beta"] & \bullet \end{tikzcd} \] of cells $\alpha$, $\beta$ and $g$ of $A$ as in the drawing. The $4$-simplex of $N_\infty(B)$ displayed in figure~\ref{fig:coherence_beta-alpha-f}, completely dual to the $4$-simplex~\ref{fig:coherence_g-beta-alpha}, shows that the $3$-cells \begin{gather*} \bigl(F_{\treeY}(\beta, \alpha) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeV}(g, f)\bigr) \comp_2\\ \bigl(F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha, f) \bigr)\comp_2\\ \bigl(F_{\treeVLeft}(\beta, f) \comp_1 F_{\treeLL}(\alpha \comp_0 f) \bigr) \end{gather*} and \[ F_{\treeVLeft}(\beta \comp_1 \alpha, f)\ \comp_2\ F_{\treeV}(g'', f) \comp_1 F_{\treeY}(\beta \comp_0 f, \alpha \comp_0 f) \] of $B$ are equal. Since the $3$-cells $F_{\treeY}(\beta, \alpha)$ and $F_{\treeY}(\beta \comp_0 f, \alpha \comp_0 f)$ are trivial by condition~\ref{cond:simpl_oplax-iii}, the $4$-simplex is actually imposing the equality of the $3$-cells \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha, f)\ \comp_2\ F_{\treeVLeft}(\beta, f) \comp_1 F_{\treeLL}(\alpha \comp_0 f) \] and \[ F_{\treeVLeft}(\beta \comp_1 \alpha, f) \] of $B$. \begin{figure} \caption{Establishing the coherence $\beta\alpha f$.} \label{fig:coherence_beta-alpha-f} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [ [] ] ] \end{forest} } ] \begin{figure} \caption{Establishing the coherence $\beta \alpha$.} \label{fig:coherence_beta-alpha} \end{figure} Consider a pair \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f1}] \ar[r, bend right, "f'"', ""{name=f2}] \ar[Rightarrow, from=f1, to=f2, "\alpha"] & \bullet \ar[r, bend left, "g", ""{below, name=g1}] \ar[r, bend right, "g'"', ""{name=g2}] \ar[Rightarrow, from=g1, to=g2, "\beta"] & \bullet \end{tikzcd} \] of $0$-composable $2$-cells $\alpha$ and $\beta$ of $A$. The $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_beta-alpha} shows that the $3$-cells \[ F_{\treeVLeft}(\beta, f') \comp_1 F_{\treeLL}(g\comp_0 \alpha)\ \comp_2\ F_{\treeV}(g', f') \comp_1 F_{\text{ex}}(\beta, \alpha)\ \comp_2\ F_{\treeVRight}(g', \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] and \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(g, \alpha)\ \comp_2\ F_{\treeLog}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeVRight}(\beta, f) \] of $B$ are equal. Here we denote by $F_{\text{ex}}(\beta, \alpha)$ the identity $3$-cell going from $F_{\treeLL}(g' \comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f)$ to $F_{\treeLL}(\beta \comp_0 f') \comp_1 F_{\treeY}(g \comp_0 \alpha)$ that we get from the composition of the following pair of trivial $3$-cells \[ \begin{tikzcd}[column sep=-6em] \null & F_{\treeLL}(g' \comp_0 \alpha \comp_1 \beta \comp_0 f) \arrow[triple, swap, "{F_{\treeY}(g'\comp_0 \alpha, \beta \comp_0 f)}"]{ldd} = F_{\treeLL}(\beta \comp_0 f' \comp_1 g \comp_0 \alpha) \arrow[triple, "{F_{\treeY}(\beta \comp_0 f', g \comp_0 \alpha)}"]{rdd} & \null \\ \\ F_{\treeLL}(g' \comp_0 \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) & \null & F_{\treeLL}(\beta \comp_0 f') \comp_1 F_{\treeY}(g \comp_0 \alpha) \end{tikzcd} \] of $B$, where the equality in the upper row is just the exchange law. Since $F_{\text{ex}}(\beta, \alpha)$ is a trivial $3$-cell, the $4$-simplex is actually imposing the equality between the $3$-cells \[ F_{\treeVLeft}(\beta, f') \comp_1 F_{\treeLL}(g\comp_0 \alpha)\ \comp_2\ F_{\treeVRight}(g', \alpha) \comp_1 F_{\treeLL}(\beta \comp_0 f) \] and \[ F_{\treeLL}(\beta) \comp_0 F_{\treeLog}(f') \comp_1 F_{\treeVRight}(g, \alpha)\ \comp_2\ F_{\treeLog}(g') \comp_0 F_{\treeLL}(\alpha) \comp_1 F_{\treeVRight}(\beta, f) \] of $B$, thereby establishing the coherence \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] ] [ [] ] ] \end{forest} }. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [] [] ] ] \end{forest} } ] Consider a triple \[ \begin{tikzcd}[column sep=4.7em] \bullet \ar[r, bend left=80, looseness=1.6, ""{below, name=1}] \ar[r, bend left, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right, ""{name=3u}, ""{below, name=3d}] \ar[r, bend right=80, looseness=1.6, ""{name=4}] \ar[Rightarrow, from=1, to=2u, "\alpha"] \ar[Rightarrow, from=2d, to=3u, "\beta"] \ar[Rightarrow, from=3d, to=4, "\gamma"] & \bullet \end{tikzcd} \] of $1$-composable $2$-cells $\alpha$, $\beta$ and $\gamma$ of $A$. The simplicial oplax $3$-morphism $F$ trivially satisfies the coherence associated to this tree, which is the trivial equality between the following identity $3$-cell \[ F_{\treeY}(\gamma\comp_1 \beta, \alpha) \comp_2 F_{\treeY}(\gamma, \beta) \comp_1 F_{\treeLL}(\alpha) \] and \[ F_{\treeY}(\gamma, \beta\comp_1 \alpha) \comp_2 F_{\treeLL}(\gamma) \comp_1 F_{\treeY}(\beta, \alpha) \] of $B$. This coherence is encoded in the $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_gamma-beta-alpha}. \begin{figure} \caption{Representing the trivial coherence $\gamma\beta\alpha$} \label{fig:coherence_gamma-beta-alpha} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [] [ [] ] ] ] \end{forest} } ] Consider a pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, ""{below, name=1}] \ar[r, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right=60, looseness=1.2, ""{name=3}] \ar[Rightarrow, from=1, to=2u, "\alpha"] \ar[Rightarrow, from=2d, to=3, shift right=2.6ex, ""{name=beta1}] \ar[Rightarrow, from=2d, to=3, shift left=2.6ex, ""'{name=beta2}] \arrow[triple, from=beta1, to=beta2, "\gamma"]{} & \bullet \end{tikzcd} \] of $1$-composable cells $\alpha$ and $\gamma$ of $A$. The $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_gamma-alpha} shows that the $3$-cells \[ F_{\treeLLL}(\gamma) \comp_1 F_{\treeLL}(\alpha) \] and \[ F_{\treeLLL}(\gamma \comp_1 \alpha) \] of $B$ are equal, which establishes the coherence for this tree. \begin{figure} \caption{Establishing the coherence $\gamma \alpha$.} \label{fig:coherence_gamma-alpha} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] [] ] ] \end{forest} } ] For any pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, ""{below, name=1}] \ar[r, ""{name=2u}, ""{below, name=2d}] \ar[r, bend right=60, looseness=1.2, ""{name=3}] \ar[Rightarrow, from=2d, to=3, "\beta"] \ar[Rightarrow, from=1, to=2u, shift right=2.6ex, ""{name=beta1}] \ar[Rightarrow, from=1, to=2u, shift left=2.6ex, ""'{name=beta2}] \arrow[triple, from=beta1, to=beta2, "\gamma"]{} & \bullet \end{tikzcd} \] of $1$-composable cells $\gamma$ and $\beta$ of $A$, there is a $4$-simplex of $N_\infty(B)$ dual to the one depicted in~\ref{fig:coherence_gamma-alpha} showing the equality between the $3$-cells \[ F_{\treeLL}(\beta) \comp_1 F_{\treeLLL}(\gamma) \] and \[ F_{\treeLLL}(\beta \comp_1 \gamma) \] of $B$ and thus establishing the coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] [] ] ] \end{forest} }. \begin{figure} \caption{Establishing the coherence $\Gamma'\Gamma$.} \label{fig:coherence_Gamma-Gamma} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] [] ] ] ] \end{forest} } ] Consider a pair \[ \begin{tikzcd}[column sep=7em] \bullet \ar[r, bend left=60, looseness=1.2, "f", "\phantom{bullet}"'{name=1}] \ar[r, bend right=60, looseness=1.2, "g"', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=4ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, ""'{name=beta2d}, ""{name=beta2u}] \ar[Rightarrow, from=1, to=3, shift left=4ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta2d, "\Gamma"]{} \arrow[triple, from=beta2u, to=beta3, "\Gamma'"]{} & \bullet \end{tikzcd} \] of $2$-composable $3$-cells $\Gamma\colon \alpha \to \beta$ and $\Gamma'\colon \beta \to \delta$ of $A$. The $4$-simplex of $N_\infty(B)$ displayed in figure~\ref{fig:coherence_Gamma-Gamma} shows that we have the equality \[ F_{\treeLLL}(\delta \comp_2 \gamma) = F_{\treeLLL}(\delta) \comp_2 F_{\treeLLL}(\gamma) \] between these two $3$-cells of $B$. \begin{figure} \caption{Establishing the coherence $\Gamma f$.} \label{fig:coherence_Gamma-f} \end{figure} \begin{figure} \caption{the $3$-cell $\Delta$ of $B$.} \label{fig:Delta} \end{figure} \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [ [ [] ] ] ] \end{forest} } ] Consider a pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left=60, looseness=1.2, "g", "\phantom{bullet}"'{ name=1}] \ar[r, bend right=60, looseness=1.2, "g'"', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=2ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, shift left=2ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta3, "\Gamma"]{} & \bullet \end{tikzcd} \] of $0$-composable cells $f$ and $\Gamma \colon \alpha \to \beta$ of $A$. The $4$-simplex of $N_\infty(B)$ depicted in figure~\ref{fig:coherence_Gamma-f} shows that the equality \[ F_{\treeVLeft}(\beta, f) \comp_1 F_{\treeLLL}(\Gamma \comp_0 f) = \Delta \comp_0 F_{\treeLog}(f) \comp_1 F_{\treeVLeft}(\alpha, f)\,. \] The $3$-cell $\Delta$ is in fact equal to $F_{\treeLLL}(\Gamma)$, as the $4$-simplex of~$N_\infty(B)$ depicted in figure~\ref{fig:Delta} shows. Hence, the coherence for this tree is verified. \item[ \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] ] [] ] \end{forest} } ] Consider a pair \[ \begin{tikzcd}[column sep=5em] \bullet \ar[r, bend left=60, looseness=1.2, "f", "\phantom{bullet}"'{name=1}] \ar[r, bend right=60, looseness=1.2, "f'"', "\phantom{bullet}"{name=3}] \ar[Rightarrow, from=1, to=3, shift right=2ex, bend right, ""{name=beta1}] \ar[Rightarrow, from=1, to=3, shift left=2ex, bend left, ""'{name=beta3}] \arrow[triple, from=beta1, to=beta3, "\Gamma"]{} & \bullet \ar[r, "g"] & \bullet \end{tikzcd} \] of $0$-composable cells $\Gamma\colon \alpha \to \beta$ and $g$ of $A$. There is a $4$-simplex of $N_\infty(B)$ dual to the one depicted in figure~\ref{fig:coherence_Gamma-f} showing that the following equality \[ F_{\treeVRight}(g, \beta) \comp_1 F_{\treeLLL}(g \comp_0 \Gamma) = F_{\treeLog}(g) \comp_0 F_{\treeLLL}(\Gamma) \comp_1 F_{\treeVRight}(g, \alpha) \] of $3$-cells of $B$ holds and thus establishing the coherence \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ [] ] ] [] ] \end{forest} }. \end{description} \section{Correspondence} The results of the preceding sections give us an application that associates to a simplicial oplax $3$-morphism $F$ a normalised oplax $3$-functor, that we shall denote by $c_l(F)$ as well as an application in the opposite direction assigning to each normalised oplax $3$-functor $G$ a simplicial oplax $3$-morphism $\SNn{l}(G)$. These two applications are actually inverses of one another, so that we have a precise bijective correspondence between oplax $3$-functors and simplicial oplax $3$-morphisms. The aim of this section is to check this statement. \begin{paragr} It results immediately from the definitions and from conditions~\ref{cond:simpl_oplax-i}, \ref{cond:simpl_oplax-ii} and~\ref{cond:simpl_oplax-iii} that given any normalised oplax $3$-functor $F \colon A \to B$, we have an equality $F = c_l\SNn{l}(F)$. For instance, for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][] ] \end{forest} } we have that to any triple $(h, g, f)$ of composable $1$-cells of $A$, the simplicial oplax $3$-morphism $\SNn{l}(F) \colon N_\infty(A) \to N_\infty(B)$ associates the $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.4, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01=${F_{\treeL}(f)}$, 12=${F_{\treeL}(g)}$, 23=${F_{\treeL}(h)}$, 02=${F_{\treeL}(gf)}$, 03=${F_{\treeL}(hgf)}$, 13=${F_{\treeL}(hg)}$, 012=${F_{\treeV}(g, f)}$, 023=${F_{\treeV}(h, gf)\phantom{o}}$, 123=${F_{\treeV}(h, g)}$, 013=${\phantom{o}F_{\treeV}(hg, f)}$, 0123=${F_{\treeW}(h, g, f)}$, /squares/arrowstyle/.cd, 012={phantom, description}, 023={phantom, description}, 123={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 023={anchor=center}, 123={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(A)$; by definition, we set $c_l\SNn{l}(F)_{\treeW}(h, g, f)$ to be the main $3$-cell of this $3$-simplex, \ie $F_{\treeW}(h, g, f)$. \end{paragr} \begin{paragr} Let $F \colon N_\infty(A) \to N_\infty(B)$ be a simplicial oplax $3$-morphism. We have seen in subsection~\ref{section:simplicial-to-cellular} that there is a canonically associated normalised oplax $3$-functor $c_l(F) \colon A \to B$. Moreover, in subsection~\ref{section:cellular-to-simplicial} we have shown that given any $n$-simplex of $N_\infty(A)$ in the form of a normalised oplax $3$-functor $x \colon \Deltan{n} \to A$, for $n \ge 0$, we get an $n$-simplex $\SNn{l}c_l F(x)$ of $N_\infty(B)$ given by the composition $F\circ x$. We want to check that $F(x) = \SNn{l}c_l F(x)$. Since $N_\infty(B)$ is $4$-coskeletal (see~\cite[Theorem 5.2]{Street}), it is enough to check that $F(x) = \SNn{l}c_l F(x)$ for all $n$-simplices $x$ of $N_\infty(A)$ with $0 \le n \le 4$. The result is trivially verified for $0$-simplices and $1$-simplices. Consider a $2$\nbd-sim\-plex~$x$ \[ \begin{tikzcd}[column sep = small] & \bullet \ar[dr, "g"]& \\ \bullet \ar[ur, "f"] \ar[rr, "gf"', ""{name=gf}] && \bullet \ar[Rightarrow, from=gf, to=1-2, shorten <=1mm, shorten >=1mm, "\alpha"] \end{tikzcd} \] of $N_\infty(A)$. The $1$-skeletons of $F(x)$ and $\SNn{l}c_l F(x)$ coincide and the $2$-cell of $B$ filling the $2$-simplex $\SNn{l}c_l F(x)$ is defined by $c_l F_{\treeV}(g, f) \comp_1 c_l F_{\treeLL}(\alpha)$. Condition~\ref{cond:simpl_oplax-ii} states precisely that this is the $2$-cell of $B$ filling the $2$-simplex $F(x)$. \begin{figure} \caption{The $3$-simplex $x$ of $N_\infty(A)$.} \label{fig:Gamma-corr} \end{figure} Consider a $3$-simplex $x$ of $N_\infty(A)$ as depicted in figure~\ref{fig:Gamma-corr}. By the definition given in paragraph~\ref{paragr:def_cellular_to_simplicial}, the main $3$-cell of the $3$-simplex $\SNn{l}c_l F(x)$ of $N_\infty(B)$ is defined as \begin{gather}\label{eq:3cell-3simplex} c_l F_{\treeV}(h, g) \comp_0 c_l F(f) \comp_1 c_l F_{\treeVLeft}(\delta, f) \comp_1 c_l F_{\treeLL}(\gamma) \notag\\ \comp_2\notag\\ c_l F_{\treeW}(h, g, f) \comp_1 c_l F_{\treeLLL}(\Gamma) \\ \comp_2 \notag\\ c_l F(h) \comp_0 F_{\treeV}(g, f) \comp_1 c_l F_{\treeVRight}(h, \beta) \comp_1 c_l F_{\treeLL}(\alpha)\,. \notag \end{gather} We already know that the $2$-skeleton of $F(x)$ and $\SNn{l}c_l F(x)$ coincide, so we are left with showing that the main $3$-cell $\Psi$ of the $3$-simplex $F(x)$ of $N_\infty(B)$ corresponds to the above $3$-cell. This demands a careful analysis of some $4$-simplices encoding valuable information for an explicit description of $F(x)$, which will be carried over in the next paragraph. We end this paragraph by remarking that once the equivalence of translation of $3$-simplices is verified, then the correspondence for $4$-simplices follows easily, since it is completely characterised by the $2$-composition of $3$-cells in $B$. \end{paragr} \begin{paragr}\label{paragr:image_3-simplex} \begin{figure} \caption{The $4$-simplex $y$ of $N_\infty(A)$} \label{fig:Gamma1-corr} \end{figure} \begin{figure} \caption{The $3$-cells $(\bigstar)$ and $(\spadesuit)$.} \label{fig:Star} \label{fig:Spadesuit} \end{figure} \begin{figure} \caption{The $4$-simplex $z$ of $N_\infty(A)$.} \label{fig:Gamma3} \end{figure} \begin{figure} \caption{The $4$-simplex $z'$ of $N_\infty(A)$.} \label{fig:Gamma4} \end{figure} Consider a $3$-simplex $x$ of $N_\infty(A)$ as depicted in figure~\ref{fig:Gamma-corr} and call $\Psi$ the main $3$-cell of the $3$-simplex $F(x)$ of $N_\infty(B)$. The $4$-simplex of $N_\infty(A)$ depicted in figure~\ref{fig:Gamma1-corr} shows that $\Psi$ is given by the $2$-composition of the image of the $3$-cell denoted by $(\bigstar)$, whiskered with $c_l F(\alpha)$, followed by the image of the main $3$-cell of the following $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.3] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01=$f$, 12=$hg$, 02=$hj$, 03=$i$, 13=$k$, 23={}, 012=$h\beta$, 023=$\alpha$, 123=$\delta$, 013=$\gamma$, 0123=$\Gamma$, /squares/arrowstyle/.cd, 23={equal} } \end{tikzpicture} \end{center} of $N_\infty(A)$, which is a $3$-cell of $N_\infty(B)$ that we shall call $\Phi$, whiskered by $c_l F_{\treeV}(h, g)$. The $4$-simplices depicted in figure~\ref{fig:Star} show that the image under $F$ of the $3$-cell $(\bigstar)$ is given by \[ c_l F_{\treeLL}(\spadesuit) \comp_2 c_l F_{\treeV}(g, f) \comp_1 c_l F_{\TreeVRight}(h, \beta) \] and in turn the $4$-simplex of $N_\infty(A)$ depicted in figure~\ref{fig:Spadesuit} entails that \[ c_l F_{\treeLL}(\spadesuit) = c_l F_{\treeW}(h, g, f) \comp_1 c_l F_{\treeLL}(h\beta)\,. \] Therefore we get that $\Psi$ is given by the composition \begin{gather*} c_l F_{\treeV}(h, g) \comp_0 c_l F_{\treeL}(f) \comp_1 \Phi \\ \comp_2\\ c_l F_{\treeW}(h, g, f) \comp_1 c_l F_{\treeLL}(h\comp_0 \beta) \comp_1 c_l F_{\treeLL}(\alpha) \\ \comp_2\\ c_l F(h) \comp_0 F_{\treeV}(g, f) \comp_1 c_l F_{\treeVRight}(h, \beta) \comp_1 c_l F_{\treeLL}(\alpha)\,. \end{gather*} Finally, the $4$-simplex~$z$ of~$N_\infty(A)$ depicted in figure~\ref{fig:Gamma3}, joint with the $4$-simplex~$z'$ of $N_\infty(A)$ depicted in figure~\ref{fig:Gamma4}, where we have denoted by $\beta + \alpha$ and $\delta + \gamma$ the evident whiskered compositions, and another one totally similar but dual show that the $3$-cell $\Phi$ of $B$ is in fact \begin{gather*} c_l F_{\treeV}(h, g) \comp_0 c_l F_{\treeL}(f) \comp_1 c_l F_{\treeVLeft}(\delta, f) \comp_1 c_l F_{\treeLL}(\gamma) \\ \comp_2 \\ c_l F_{\treeV}(h, g) \comp_0 c_l F_{\treeL}(f) \comp_1 c_l F_{\treeV}(h\comp_0 g, f) \comp_1 c_l F_{\treeLLL}(\Gamma)\,. \end{gather*} Using the interchange law we immediately deduce from the above that the $3$-cell $\Psi$ of $B$ is precisely the $3$-cell detailed in~\eqref{eq:3cell-3simplex}. \end{paragr} \begin{paragr} Given two oplax $3$-functors $F \colon A \to B$ and $G \colon B \to C$, we now check that the ``obvious'' candidate for the composite oplax $3$-functor $G\circ F$ corresponds via the bijections established above to the composite of the simplicial oplax $3$-morphisms associated to $F$ and $G$. That is, we shall show that $G \circ F = c_l(\SNn{l} G \circ \SNn{l} F)$. Hence, we will deduce that oplax $3$-functors admit a composition operation and that the category of $3$-categories and oplax $3$-functors and $3$-categories and simplicial oplax $3$-morphisms are isomorphic. We already know from section~\ref{section:cellular-to-simplicial} that: \begin{description} \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ ] \end{forest} }] for any object $a$ of $A$, we have $GF_{\treeDot}(a) = G_{\treeDot}\bigl(F_{\treeDot}(a)\bigr)$; \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] ] \end{forest} }] for any $1$-cell $f \colon a \to a'$ of $A$, we have $GF_{\treeL}(f) = G_{\treeL}\bigl(F_{\treeL}(f)\bigr)$; \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][] ] \end{forest} }] for any pair of $0$\hyp{}composable $1$\hyp{}cells \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$, we have $GF_{\treeV}(g, f) = G_{\treeV}\bigl(F_{\treeL}(g), F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr)$; \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]] ] \end{forest} }] for any $2$-cell $\alpha \colon f \to g$ of $A$, we have $GF_{\treeLL}(\alpha) = G_{\treeLL}\bigl(F_{\treeLL}(\alpha)\bigr)$; \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][][] ] \end{forest} }] for any triple of $0$-composable $1$-cells \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$, we have that $GF_{\treeW}(h, g, f)$ is the $3$-cell \begin{gather*} G_{\treeV}(F_{\treeLog}h, F_{\treeLog}g) \comp_0 GF_{\treeLog}(F_{\treeLog}f) \comp_1 G_{\treeVLeft}(F_{\treeV}(h, g), F_{\treeLog}f)\\ \comp_2\\ G_{\treeW}(F_{\treeLog}h, F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeLLL}(F_{\treeW}(h, g, f)) \\ \comp_2\\ GF_{\treeLog}(h) \comp_0 G_{\treeV}(F_{\treeLog}g, F_{\treeLog}f) \comp_1 G_{\treeVRight}(F_{\treeLog}h, F_{\treeV}(g, f)) \comp_1 G_{\treeLL}(F_{\treeV}(h, gf))\,. \end{gather*} \end{description} As for the remaining trees, we have: \begin{description} \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][[]] ] \end{forest} }] for any whiskering \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & \bullet \ar[r,"g"] & \bullet \end{tikzcd} \] of $A$, we define $GF_{\treeVRight}(g, \alpha)$ to be the $3$-cell \begin{gather*} G_{\treeV}\big(F_{\treeL}(g), F_{\treeL}(f')\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeVRight}(g, \alpha)\bigr) \\ \comp_2 \\ G_{\treeVRight}\bigl(F_{\treeL}(g), F_{\treeLL}(\alpha)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \end{gather*} of $C$. \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]][] ] \end{forest} }] for any whiskering \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& \bullet \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$, the $3$-cell $GF_{\treeVLeft}(\beta, f)$ is defined to be \begin{gather*} G_{\treeVLeft}\bigl(F_{\treeLL}(\beta), F_{\treeL}(f)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr) \\ \comp_2 \\ G_{\treeV}\big(F_{\treeL}(g'), F_{\treeL}(f)\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeVLeft}(\beta, f)\bigr)\,. \end{gather*} \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[]]] ] \end{forest} }] for any $3$-cell $\gamma \colon \alpha \to \alpha'$ of $A$, we set \[ GF_{\treeLLL}(\gamma) = G_{\treeLLL}\bigl(F_{\treeLLL}(\gamma)\bigr)\,. \] \end{description} \end{paragr} \begin{paragr} We have to show that the definition we have given for the composition for the trees $\treeVLeft$, $\treeVRight$ and $\treeLLL$ agree with the data encoded by the composition of the associated simplicial morphisms. \begin{description} \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [][[]] ] \end{forest} }] given a whiskering \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, bend left, "f", ""{below, name=f}] \ar[r, bend right, "f'"', ""{name=fp}] \ar[Rightarrow, from=f, to=fp, "\alpha"] & \bullet \ar[r,"g"] & \bullet \end{tikzcd} \] of $A$, consider the $3$-simplex~$x$ \begin{center} \begin{tikzpicture}[scale=1.3, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=$f'$, 23=$g$, 02=$f$, 03=$gf$, 13=$gf'$, 012=$\alpha$, 023={}, 123={}, 013=$g\comp_0 \alpha$, 0123=$1_{g\comp_0 \alpha}$, /squares/arrowstyle/.cd, 01={equal}, 023={equal}, 123={equal} } \end{tikzpicture} \end{center} of $N_\infty(A)$. This is sent by $\SNn{l}(G)\SNn{l}(F)$ to the $3$-simplex \begin{center} \begin{tikzpicture}[scale=1.8, font=\footnotesize] \squares{ /squares/label/.cd, 0=$\bullet$, 1=$\bullet$, 2=$\bullet$, 3=$\bullet$, 01={}, 12=${GF_{\treeL}(f')}$, 23=${GF_{\treeL}(g)}$, 02=${GF_{\treeL}(f)}$, 03=${GF_{\treeL}(gf)}$, 13=${GF_{\treeL}(gf')}$, 012=${GF_{\treeLL}(\alpha)}$, 023=${GF_{\treeV}(g, f)\phantom{O}}$, 123=${GF_{\treeV}(g, f')}$, 013=${\phantom{OO}GF_{\treeLL}(g\comp_0 \alpha)}$, 0123=${\SNn{l}(G)\bigl(F_{\treeVRight}(g, \alpha)\bigr)}$, /squares/arrowstyle/.cd, 01={equal}, 012={phantom, description}, 023={phantom, description}, 123={phantom, description}, 013={phantom, description}, /squares/labelstyle/.cd, 012={anchor=center}, 023={anchor=center}, 123={anchor=center}, 013={anchor=center} } \end{tikzpicture} \end{center} of $N_\infty(C)$, where we denoted by $\SNn{l}(G)\bigl(F_{\treeVRight}(g, \alpha)\bigr)$ the image under $Nl(G)$ of the main $3$-cell $F_{\treeVRight}(g, \alpha)$ of the $3$-simplex $\SNn{l}(F)(x)$. By paragraph~\ref{paragr:image_3-simplex}, we find that the $3$-cell $N_\infty(G)\bigl(F_{\treeVRight}(g, \alpha)\bigr)$ of $C$ is precisely \begin{gather*} G_{\treeV}\big(F_{\treeL}(g), F_{\treeL}(f')\bigr) \comp_1 G_{\treeLLL}\bigl(F_{\treeVRight}(g, \alpha)\bigr) \\ \comp_2 \\ G_{\treeVRight}\bigl(F_{\treeL}(g), F_{\treeLL}(\alpha)\bigr) \comp_1 G_{\treeLL}\bigl(F_{\treeV}(g, f)\bigr)\,; \end{gather*} \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[]][] ] \end{forest} }] given a whiskering \[ \begin{tikzcd}[column sep=4.5em] \bullet \ar[r, "f"] & \bullet \ar[r, bend left, "g", ""{below, name=g}] \ar[r, bend right, "g'"', ""{name=gp}]& \bullet \ar[Rightarrow, from=g, to=gp, "\beta"] \end{tikzcd} \] of $A$, an argument dual with respect to the previous point gives us that this is indeed $N_\infty(G)\bigl(F_{\treeVLeft}(\beta, f)\bigr)$. \item[\scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [[[]]] ] \end{forest} }] they trivially agree by definition. \end{description} \end{paragr} Summing up the results of this chapter we get the following theorem. \begin{thm}\label{thm:iso_oplax} The class of small $3$-categories and normalised oplax $3$-functors are organised in a category $\widetilde{\nCat{3}}$, which is isomorphic via the functor $\SNn{l}$ to the category $\nCat{3}_{\cDelta}$ of $3$-categories and simplicial oplax $3$-morphisms. \end{thm} \section{Strictification}\label{sec:tilde} In this section we are going to explicitly describe the \oo-category $c_\infty(A)$, where $A$ is a $1$-category without split-monos and split-epis. \begin{paragr} We say that a $1$-category $A$ is \ndef{split-free} \index{split-free category} if it does not have any split-monos or split-epis. \end{paragr} \begin{exem} Any poset is a split-free category. Moreover, for any category $A$, the category $c\,\Sd N(A)$ is split-free. \end{exem} \emph{We fix a side-free category $A$.} \begin{paragr} We now define a reflexive $\infty$-graph $\tilde{A}$ associated to the $1$-category $A$. The objects of $\tilde A$ are precisely the objects of $A$. For any pair of objects $(a, a')$ of $A$, we then define a reflexive $\infty$-graph $\tilde{A}(a, a')$ whose objects, \ie the $1$-cells of $\tilde{A}$ having $a$ as source and $a'$ as target, are given by the set of non-degenerate simplices $x \colon \Deltan{n} \to A$ of $N_1(A)$ such that $x_0 = a$ and $x_n = a'$, for $n \ge 0$; that is to say, the objects of $\tilde{A}(a, a')$ are the tuples $(f_1, \dots, f_n)$ of composable non-trivial arrows of $A$ such that $s(f_1) = a$ and $t(f_n) = a'$, with $n \ge 0$. The $0$-tuple, where necessarily $a = a'$, corresponds to the non-degenerate simplex $\Deltan{0} \to A$ pointing at $a$, and it is by definition the identity $1$-cell of the object $a$ of $\tilde{A}$. Consider two objects $x$ and $y$ of $\tilde{A}(a, a')$, \ie two tuples $x = (f_1, \dots, f_m)$ and $y = (g_1, \dots, g_n)$ as described above. We define $\tilde{A}(x, y)$ as follows: \begin{description} \item[$m=0$] if $x \colon \Deltan{0} \to A$ is a $0$-simplex of $A$, we set $\tilde{A}(x, y)$ to be the final \emph{\oo-category} $\On{0}$; \item[$m=1$] we define \[ \tilde{A}\bigl( (f), (g_1, \dots, g_p)\bigr)\,, \] where $s(f) = s(g_1)$ and $t(f) = t(g_p)$, to be the \emph{\oo-category} \[ \tilde{A}\bigl( (f), (g_1, \dots, g_p)\bigr) = \On{\omega}\bigl(\atom{0, p},\atom{0, 1} + \atom{1, 2} + \dots + \atom{p-1, p}\bigr)\,. \] \item[$m>1$] otherwise, we set $\tilde{A}(x, y)$ to be the \emph{\oo-category} \[ \coprod \tilde{A}\bigl((f_1), (g_1, \dots, g_{\phi(1)})\bigr) \times \dots \times \tilde{A}\bigl((f_m), (g_{\phi(m-1)+1}, \dots, g_n)\bigr)\,, \] where the sum runs over all the arrows $\phi \colon \Deltan{m} \to \Deltan{n}$ of $\cDelta$ which are: \begin{enumerate} \item\label{item:cells-tilde-i} strictly increasing; \item we have $\phi(0) = 0$ and $\phi(m) = n$; \item\label{item:cells-tilde-ii} such that, for all $1 < i \leq m$, we have \[ g_{\phi(i)} \comp_0 \dots \comp_0 g_{\phi(i-1)+1} = f_i \] in the category $A$. \end{enumerate} These conditions ensure that $s(f_i) = s(g_{\phi(i-1)+1})$ and $t(f_i) = t(g_{\phi(i)})$, for all $1 \le i \le m$, so that in particular we have $x_i = y_{\phi(i)}$ for every $0 \le i \le m$; notice that the condition imposing that $\phi$ is an active morphism, \ie $\phi(0) = 0$ and $\phi(m) = n$ is actually implied by the others. We shall sometimes write the above sum as \[ \coprod_{\phi} \tilde{A}_{\phi}(x, y)\,. \] Using the canonical isomorphism of \oo-categories described in Proposition~A.4 of~\cite{AraMaltsiCondE}, we shall often identify the \oo-category $\tilde{A}_{\phi}(x, y)$ with \[ \On{\omega}\bigl(\atom{0, \phi(1)}+ \dots + \atom{\phi(m-1), \phi(m)}, \atom{0, 1} + \dots + \atom{n-1, n}\bigr)\,. \] \end{description} Note that if the index of the sum above is empty, that is there is no arrow $\phi \colon \Deltan{m} \to \Deltan{n}$ satisfying conditions~\ref{item:cells-tilde-i} and~\ref{item:cells-tilde-ii}, then $\tilde{A}(x, y)$ is set to be the empty \oo-category. This happens in particular every time $m > n$. Observe also that condition~\ref{item:cells-tilde-ii} entails that if there is a cell between $(f_1, \dots, f_m)$ and $(g_1, \dots, g_n)$, then necessarily \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1\,. \] For any $1$-cell $x = (f_1, \dots, f_n)$ of $\tilde{A}$, the identity of $x$ is given by the only trivial $2$-cell of the \oo-category \[ \tilde{A}(f_1, f_1) \times \dots \times \tilde{A}(f_n, f_n)\,. \] Indeed, observe that for any arrow $f$ of $A$, the only morphism satisfying~\ref{item:cells-tilde-i} and~\ref{item:cells-tilde-ii} is $phi = 1_{\Deltan{n}}$, so that $\tilde{A}(f, f)$ is isomorphic to $\On{\omega}(\atom{0, 1}, \atom{0, 1})$, which is the terminal \oo-category. \end{paragr} \begin{rem} Without the hypothesis on the category $A$, that is in the general situation in which we have split-monos and split-epis, the definition of the hom-\oo-category $\tilde{A}\bigl((f), (g_1, \dots, g_n)\bigr)$ is more complicated. This is due to the fact that, although the simplex $(g_1, \dots, g_n)$ is non-degenerate, there could be two consecutive arrows, say $g_i$ and $g_{i+1}$ which compose to the identity. When introducing the operations on the \oo-graph $\tilde{A}$, this becomes a serious issue. \end{rem} \begin{paragr} In this paragraph we want to endow the reflexive \oo-graph $\tilde{A}(a, a')$ with the structure of an \oo-category, for any pair $(a, a')$ of objects of $\tilde{A}$. In order to do so, for any $x = (f_1, \dots, f_\ell)$, $y = (g_1, \dots, g_m)$ and $z = (h_1, \dots, h_n)$ of $\tilde{A}(a, a')$, we want to define an \oo-functor \[ \tilde{A}(y, z) \times \tilde{A}(x, y) \to \tilde{A}(x, z)\,. \] Without any loss of generality, we can suppose $\ell \le m \le n$ (see the preceding paragraph) and consider the case $\ell >0$, since the other cases are trivial. Let us fix two morphisms $\phi \colon \Deltan{\ell} \to \Deltan{m}$ and $\psi \colon \Deltan{m} \to \Deltan{n}$ satisfying conditions~\ref{item:cells-tilde-i} and~\ref{item:cells-tilde-ii} of the previous paragraph. We set \[ \Phi(i) = \phi(i)- \phi(i-1) \quadet \Psi(j) = \psi(j) - \psi(j-1) \] for any $1 \le i \le \ell$ and $1 \le j \le m$. We have to give an \oo-functor which has \begin{gather}\label{eq:mapping-oo-category} \tilde{A}\bigl((g_1), (h_1, \dots, h_{\psi(1)}\bigr) \times \dots \times \tilde{A}\bigl((g_m), (h_{\psi(m-1)+1}, \dots, h_n)\bigr) \notag \\ \times \phantom{OOOO}\\ \tilde{A}\bigl((f_1), (g_1, \dots, g_{\phi(1)}\bigr) \times \dots \times \tilde{A}\bigl((f_\ell), (g_{\phi(\ell-1)+1}, \dots, g_m)\bigr) \notag \end{gather} as source, which by definition is the \oo-category \begin{gather*} \prod_{i=1}^{m} \On{\omega}\bigl(\atom{0, \Psi(i)}, \atom{0, 1} + \dots + \atom{\Psi(i)-1, \Psi(i)}\bigr) \\ \times \\ \prod_{i=1}^{\ell} \On{\omega}\bigl(\atom{0, \Phi(i)}, \atom{0, 1} + \dots + \atom{\Phi(i)-1, \Phi(i)}\bigr)\,. \end{gather*} Notice that for every $1 \le p \le \ell$ we have \[ f_p = g_{\phi(p)} \comp_0 g_{\phi(p) -1} \comp_0 \dots \comp_0 g_{\phi(p-1)+1}\,, \] and for every $1 \le q \le m$ we have \[ g_q = h_{\psi(q)} \comp_0 h_{\psi(q) -1} \comp_0 \dots \comp_0 h_{\psi(q-1)+1}\,, \] so that in fact \begin{equation*} \begin{split} f_p = & \phantom{\comp_0}\ h_{\psi(\phi(p))} \comp_0 h_{\psi(\phi(p)) -1} \comp_0 \dots \comp_0 h_{\psi(\phi(p)-1)+1} \\ &\comp_0 h_{\psi(\phi(p)-1)} \comp_0 h_{\psi(\phi(p)-1)-1} \comp_0 \dots \comp_0 h_{\psi(\phi(p)-2)+1} \\ &\comp_0 \dots \\ &\comp_0 h_{\psi(\phi(p-1)+1)} \comp_0 h_{\psi(\phi(p-1)+1)-1} \comp_0 \dots \comp_0 h_{\psi(\phi(p-1))+1}\,, \end{split} \end{equation*} for every $1 \le p \le \ell$. Now, for every $1 \le i \le m$, we have that the \oo-category \[ \On{\omega}\bigl(\atom{0, \Psi(i)}, \atom{0, 1} + \dots + \atom{\Psi(i)-1, \Psi(i)}\bigr) \] is canonically isomorphic by Corollary~\ref{coro:suboriental} to the \oo-category \[ \On{\omega}\bigl(\atom{\psi(i-1), \psi(i)}, \atom{\psi(i-1), \psi(i-1)+ 1} + \dots + \atom{\psi(i) -1, \psi(i)}\bigr)\,. \] In order to simplify the notations, let us set \[ b_i = \atom{\psi(i-1), \psi(i)} \quad , \quad c_i = \sum_{k= 0}^{\Psi(i)-1} \atom{\psi(i-1) + k, \psi(i-1) + k + 1}\,, \] for $1 \le i \le m$, and also \[ b = b_1 + b_2 + \dots + b_m \quadet c = c_1 + c_2 + \dots + c_m\,. \] There is a canonical \oo-functor \[ \prod_{i=1}^m \On{\omega}(b_i, c_i) \to \On{\omega}(b, c) \] given by ``horizontal composition'' $\comp_0$, \ie mapping a tuple $(x_1, \dots, x_m)$ of $p$-cells to the $p$-cell $x_1 \comp_0 x_2 \comp_0 \dots \comp_0 x_m$ of $\On{\omega}(b, c)$. Proposition~\ref{prop:2-cells_orientals} actually shows that this \oo-functor is an isomorphism of \oo-categories. The same argument entails that the \oo-category \[ \prod_{i=1}^{\ell} \On{\omega}\bigl(\atom{0, \phi(i)}, \atom{0, 1} + \dots + \atom{\phi(i)-1, \phi(i)}\bigr) \] is canonically isomorphic to $\On{\omega}(a', b')$ via the ``horizontal composition''~$\comp_0$, where we have set \[ a' = \sum_{i=1}^{\ell} \atom{\phi(i-1), \phi(i)} \quadet b' = \sum_{i=1}^{m} \atom{i-1, i}\,. \] Applying the increasing morphism $\psi$ and setting \[ a = \sum_{i=1}^{\ell} \atom{\psi\phi(i-1), \psi\phi(i)} \] we get, again by Corollary~\ref{coro:suboriental}, a canonical isomorphism of \oo-categories $\On{\omega}(a', b') \cong \On{\omega}(a, b)$. The \oo-cat\-e\-gory in~\eqref{eq:mapping-oo-category} is thus canonically isomorphic to the \oo-cat\-e\-go\-ry \[ \On{\omega}(b, c) \times \On{\omega}(a, b)\,. \] On the other hand, the target \oo-category of the \oo-functor we are set to construct is \[ \tilde{A}(x, z) = \prod_{i=1}^\ell \tilde{A}\bigl((f_i), (h_{\psi\phi(i-1)+1}, \dots, h_{\psi\phi(i)} \bigr)\,, \] which is by definition \[ \prod_{i=1}^\ell \On{\omega}\bigl(\atom{0, \psi\phi(i)}, \atom{0, 1} + \dots + \atom{\psi\phi(i)-1, \psi\phi(i)}\bigr). \] The same argument used above gives us that this \oo-category is canonically isomorphic to the \oo-category \[ \On{\omega}(a, c)\,. \] We then define the \oo-functor \[ \On{\omega}(b, c) \times \On{\omega}(a, b) \to \On{\omega}(a, c) \] to be the ``vertical composition'' $\comp_1$, \ie a pair of $p$-cells $(x, y)$ of the source is mapped to the $p$-cell $x \comp_1 y$ of $\On{\omega}(a, c)$. Alternatively, for any $1 \le i \le \ell$ we can consider the \oo-category \[ \tilde{A}\bigl((f_i), (g_{\phi(i-1)+1}, \dots, g_{\phi(i)-1}, g_{\phi(i)})\bigr)\,, \] which is defined as \[ \On{\omega}\bigl(\atom{0, \Phi(i)}, \atom{0, 1} + \dots + \atom{\Phi(i)-1, \Phi(i)}\bigr)\,. \] The latter is canonical isomorphic by Corollary~\ref{coro:suboriental} to \[ \On{\omega}\bigr(\atom{\phi(i-1), \phi(i)}, \atom{\phi(i-1), \phi(i-1)+1} + \dots + \atom{\phi(i)-1, \phi(i)}\bigr)\,, \] which in turn is isomorphic to the \oo-category \[ \On{\omega}\bigl(\atom{\psi\phi(i-1), \psi\phi(i)}, \atom{\psi(\phi(i-1)), \psi(\phi(i-1)+1)} + \dots + \atom{\psi(\phi(i)-1), \psi(\phi(i))}\bigr)\,. \] If we set $a_i = \atom{\psi\phi(i-1), \psi\phi(i)}$ for every $1 \le i \le \ell$, then we can write the above \oo-category as \[ \On{\omega}(a_i, b_{\phi(i-1)+1} + b_{\phi(i-1)+2}\dots + b_{\phi(i)})\,. \] Similarly, for any $1 \le i \le m$ we have seen above that the \oo-category \[ \tilde{A}\bigl((g_i), (h_{\psi(i-1)}, \dots,h_{\psi(i)})\bigr) \] is canonically isomorphic to the \oo-category \[ \On{\omega}\bigl(\atom{\psi(i-1), \psi(i)}, \atom{\psi(i-1), \psi(i-1)+ 1} + \dots + \atom{\psi(i) -1, \psi(i)}\bigr)\,, \] which we can denote by $\On{\omega}(b_i, c_i)$. Therefore, for a fixed $1 \le p \le \ell$ we have that the \oo-category \begin{gather*} \prod_{k=\phi(p-1)+1}^{\phi(p)} \tilde{A}\bigl((g_k), (h_{\psi(k-1)+1}, \dots, h_{\psi(k)})\bigr) \\ \times \\ \tilde{A}\bigl((f_p), (g_{\phi(p-1)+1}, \dots, g_{\phi(p)})\bigr) \end{gather*} is canonically isomorphic to the \oo-category \[ \prod_{k=\phi(p-1)+1}^{\phi(p)} \On{\omega}(b_k, c_k) \ \times\ \On{\omega}(a_p, b_{\phi(p-1)+1} + \dots + b_{\phi(p)})\,. \] Using Proposition~\ref{prop:2-cells_orientals}, we obtain \[ \prod_{k=\phi(p-1)+1}^{\phi(p)} \On{\omega}(b_k, c_k) \cong \On{\omega}(b_{\phi(p-1)+1} + \dots + b_{\phi(p)}, c_{\phi(p-1)+1} + \dots + c_{\phi(p)}) \] and hence the former \oo-category is canonically isomorphic to \begin{gather*} \On{\omega}(b_{\phi(p-1)+1} + \dots + b_{\phi(p)}, c_{\phi(p-1)+1} + \dots + c_{\phi(p)}) \\ \times \\ \On{\omega}(a_p, b_{\phi(p-1)+1} + \dots + b_{\phi(p)})\,. \end{gather*} We set \[ B_p = b_{\phi(p-1)+1} + \dots + b_{\phi(p)} \quadet C_p = c_{\phi(p-1)+1} + \dots + c_{\phi(p)} \] Applying the ``vertical composition'' $\comp_1$ to this product of \oo-categories we get an \oo-functor \[ \On{\omega}(B_p, C_p) \times \On{\omega}(a_p, B_p) \to \On{\omega}(a_p, C_p)\,. \] Finally, the ``horizontal composition'' $\comp_0$ provides us with an \oo-functor \[ \prod_{p=1}^\ell \On{\omega}(a_p, C_p) \to \On{\omega}(a, c)\,, \] since $a = a_1 + \dots + a_\ell$ and $c = C_1 + \dots + C_\ell$. These two approaches are equivalent by virtue of the exchange law between $\comp_0$ and~$\comp_1$. This endows the reflexive \oo-graph $\tilde{A}(a, a')$ with the structure of an \oo-category. \end{paragr} \begin{paragr}\label{paragr:tA_oo-category} In this paragraph we put an \oo-category structure on the reflexive \oo-graph~$\tilde{A}$. In order to do this, we shall define, for any objects $a$, $a'$ and $a''$ of $\tilde{A}$, an \oo-functor \[ \tilde{A}(a', a'') \times \tilde{A}(a, a') \to \tilde{A}(a, a'')\,. \] As \oo-categories are categories enriched in \oo-categories, an \oo-functor $F$ between two \oo-categories $C$ and $D$ can be given by a map $F_0 \colon C_0 \to D_0$ on objects and a family of \oo-functors $C(c, c') \to D(Fc, Fc')$, indexed by the pairs of objects $(c, c')$ of $C$, satisfying the axioms described in paragraph~\ref{def_enriched}. In light of the above, we have to provide a map \[ \tilde{A}(a', a'')_0 \times \tilde{A}(a, a')_0 \to \tilde{A}(a, a'')_0\,, \] that we define by sending a pair $(y, x)$ with $x \colon \Deltan{m} \to A$ and $y \colon \Deltan{n} \to A$ to the concatenation simplex \[ y \cdot x \colon \Deltan{m+n} \to A \quad , \quad \atom{i, i+1} \mapsto \begin{cases} x_{\{i, i+1\}}\,, & \text{if $i < m$,}\\ y_{\{i-m, i+1-m\}}\,, & \text{if $i \ge m$.} \end{cases} \] Furthermore, for any choice of objects $(y, x)$ and $(t, z)$ of $\tilde{A}(a', a'')_0 \times \tilde{A}(a, a')_0$, we have to provide an \oo-functor \[ \tilde{A}(y, t) \times \tilde{A}(x, z) \to \tilde{A}(y\cdot x, t \cdot z)\,. \] Notice that if either $\tilde{A}(x, z)$ or $\tilde{A}(y, t)$ are empty, then the same holds for $\tilde{A}(y\cdot x, t \cdot z)$. If $x$ (resp.~$y$) is a $0$-simplex, then so is $z$ (resp.~$t$) and the \oo-functor above is simply the identity on $\tilde{A}(y, t)$ (the identity on $\tilde{A}(x, z)$). We can therefore suppose that~$\tilde{A}(x, z)$ and $\tilde{A}(y, t)$ are non-empty and that $x$ and $y$ are not trivial. Following the reasoning of the previous paragraph, we know that there are integers \[ 0 = i_0 < i_1 < \dots < i_m \quadet 0 = j_0 < j_1 < \dots < j_n \] such that, if we set \begin{align*} a = \sum_{k=0}^{m-1} \atom{i_k, i_{k+1}}\,, && c = \sum_{p=0}^{i_m -1} \atom{p, p+1}\,, \\ b = \sum_{k=0}^{n-1} \atom{j_k, j_{k+1}}\,, && d = \sum_{p=0}^{j_n -1} \atom{p, p+1}\,, \end{align*} then we have canonical isomorphisms \[ \tilde{A}(x, z) \cong \On{\omega}(a, c) \quadet \tilde{A}(y, t) \cong \On{\omega}(b, d) \] of \oo-categories. Moreover, setting \[ b' = \sum_{k=0}^{n-1} \atom{i_m + j_k, i_m + j_{k+1}} \quadet d' = \sum_{p=0}^{j_n -1} \atom{i_m + p, i_m + p+1}\,, \] we have by Corollary~\ref{coro:suboriental} a canonical isomorphism \[ \On{\omega}(b, d) \cong \On{\omega}(b', d') \] and by the same argument we can build a further canonical isomorphism \[ \tilde{A}(y\cdot x, t \cdot z) \cong \On{\omega}(a + b', c + d') \] of \oo-categories. We are thus left to provide an \oo-functor \[ \On{\omega}(b', d') \times \On{\omega}(a, c) \to \On{\omega}(a + b', c + d')\,, \] which we set to be the ``horizontal composition'' by $\comp_0$. Notice that by Proposition~\ref{prop:2-cells_orientals}, this \oo-functor is in fact an isomorphism. The identity axioms are trivial from the definition and the associativity follows immediately from the associativity of the ``horizontal composition''~$\comp_0$ as an operation of the \oo-category~$\On{\omega}$. \end{paragr} \begin{lemme} Let $a$ and $a'$ be two objects of $A$ and consider two elements \[x = (f_1, \dots, f_m) \quadtext{and}\quad y = (g_1, \dots, g_n)\] of~$\tilde{A}(a, a')$. Then there is a zig-zag of $2$-cells linking $x$ to $y$ if and only if \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1 \] in $A$. \end{lemme} \begin{proof} This is trivially true if $x$, and then also $y$, is a trivial cell of $A$. So let us suppose $m>0$ and $n>0$. On the one hand, condition~\ref{item:cells-tilde-ii} immediately implies that two $1$-cells $x$ and $y$ as above are connected by a zig-zag of $2$-cells only if \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1\,. \] On the other hand, let \[ h = f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1 \] and consider the $1$-cell $z = (h)$ of $\tilde{A}$. It results immediately from the structure of the oriental $\On{\omega}$ that the \oo-categories \[ \tilde{A}\bigl((h), x\bigr) = \On{\omega}\bigl(\atom{0, m}, \atom{0, 1} + \dots + \atom{m-1, m}\bigr) \] and \[ \tilde{A}\bigl((h), y\bigr) = \On{\omega}\bigl(\atom{0, n}, \atom{0, 1} + \dots + \atom{n-1, n}\bigr) \] are non-empty; hence $x$ and $y$ are connected by a zig-zag of length two. \end{proof} \begin{coro} We have $\ti{1}(\tilde{A}) \cong A$. \end{coro} \begin{proof} We have a canonical \oo-functor $\eps_A \colon \tilde{A} \to A$ which is the identity on objects and that maps a $1$-cell $x = (f_1, \dots, f_n)$ to $f_n \comp_0 \dots \comp_0 f_1$ if $n>0$ and a $0$-simplex $a \colon \Deltan{0} \to A$ to the identity of $a$ in $A$. The identity is clearly preserved, the functoriality follows by the definition of $0$-composition of $1$-cells of $\tilde{A}$ by concatenation and moreover the assignment is well-defined by the previous lemma. We are left with showing that for any pair of objects $(a, a')$ of $A$, the map \[ \ti{0}(A)(a, a') \to A(a, a') \] is a bijection. It is clearly surjective, since for any morphism $f \colon a \to a'$ of $A$ we have $\eps_A\bigl((f)) = f$ (and similarly if $f$ is an identity cell of $A$). It results from the previous lemma that this map is also injective, hence completing the proof of the corollary. \end{proof} \begin{paragr} We now turn to constructing a normalised oplax $3$-functor \[\eta_A \colon A \to \ti{3}(\tilde{A})\,.\] \begin{description} \item[$\TreeDot\ \ $] The map $(\eta_A)_{\treeDot}$ is defined to be the identity map on objects. \item[$\TreeLog\ \ \:$] The map $(\eta_A)_{\treeLog}$ assigns to any non-trivial morphism $f \colon a \to a'$ of $A$ the $1$-cell $(f) \colon \Deltan{1} \to A$ of $\tilde{A}$ and to any identity $1_a$ of $A$ the \emph{trivial} $1$-cell $a \colon \Deltan{0} \to A$ of~$\tilde{A}$. \item[$\TreeV\ $] The map $(\eta_A)_{\treeV}$ assigns to any pair of composable morphisms \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ the unique $2$-cell $(\eta_A)_{\treeV}(g, f)$ of $\tilde{A}$ with source $(g \comp_0 f)$ and target $(f, g)$, \ie the unique element $\atom{0, 1, 2}$ of the set \[ \tilde{A}\bigl((g\comp_0 f), (g, f)\bigr) = \On{\omega}\bigl(\atom{0, 2}, \atom{0, 1} + \atom{1, 2})\,. \] \item[$\TreeW\ $] The map $(\eta_A)_{\treeW}$, assigns to any triple of composable morphisms \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$ the unique $3$-cell $(\eta_A)_{\treeW}(h, g, f)$ of $\tilde{A}$ with $1$-source $(h\comp_0 g \comp_0 f)$ and $1$-target $(h, g, f)$, \ie the unique arrow $\atom{0, 1, 2, 3}$ of the $1$-category \[ \tilde{A}\bigl((h\comp_0 g \comp_0 f), (f, g, h)\bigr)\On{\omega}\bigl(\atom{0, 3}, \atom{0, 1} + \atom{1, 2} + \atom{2, 3}\bigr)\,. \] \end{description} Notice that by definition we have that $1_{(\eta_A)_{\treeDot}(a)}$, that is the $1$-cell $a \colon \Deltan{0} \to A$, is precisely $(\eta_A)_{\treeL}(1_a)$; the other conditions of normalisation are trivial. We are left with checking the coherence for the tree $\treeVV$. Consider four composable morphisms of~$A$ \[ \begin{tikzcd}[column sep=small] \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd}\ . \] We have to show that the $3$-cells \begin{gather*} (\eta_A)_{\treeW}(i, h, g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeV}(h, g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeW}(h, g, f) \comp_1 (\eta_A)_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} and \begin{gather*} (\eta_A)_{\treeV}(i, h) \comp_0 (\eta_A)_{\treeLog}(g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeW}(ih, g, f) \\ \comp_2\\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeLog}(h) \comp_0 (\eta_A)_{\treeV}(g, f) \comp_1 (\eta_A)_{\treeW}(i, h, gf) \end{gather*} of $\ti{3}(A)$ are equal, which is equivalent to exhibiting a zig-zag of $4$-cells connecting them. In fact, they are precisely the target and the source of the unique $2$-cell of the $2$-category \[\tilde{A}\bigl((i\comp_0 h \comp_0 g \comp_0 f), (f, g, h, i)\bigr)\,,\] \ie the cell $\atom{0, 1, 2, 3, 4}$ of the $2$-category \[ \On{\omega}\bigl(\atom{0, 4}, \atom{0, 1} + \atom{1, 2} + \atom{2, 3} + \atom{3, 4})\,. \] \end{paragr} \begin{paragr} The construction of the preceding paragraph is in fact the cellular version of the truncation of a simplicial morphism $N_\infty(A) \to N_\infty(\tilde{A})$, which we shall still denote by $\eta_A$. We shall dedicate the rest of the chapter to define such a map and moreover show that it is the unit map of the adjoint pair $(c_\infty, N_\infty)$ applied to the simplicial set $N_\infty(A)$, so that in particular $\tilde{A} \cong c_\inftyN_\infty(A)$. \end{paragr} \begin{comment} \begin{paragr}\label{paragr:sections} \color{blue} Let $x \colon \Deltan{n} \to A$ be an $n$-simplex of $N_\infty(A)$ and consider its Eilenberg–Zilber decomposition $(\pi, y)$, where $\pi \colon \Deltan{n} \to \Deltan{m}$ is a degenerate map and $y \colon \Deltan{m} \to A$ is a non-degenerate $m$-simplex of $N_\infty(A)$ such that $y \pi = x$. Notice that for any section $\iota \colon \Deltan{m} \to \Deltan{n}$ of $\pi$, we have $x \iota = y \pi \iota = y$. We shall always consider the section given as follows. Set $f_i = x_{\{i-1, i\}}$ for $0 < i \le n$ and consider the sequence $1 \le i_1 < \dots < i_{m} \le n$ of all integers such that $f_{i_\ell}$ is \emph{not} degenerate, for $1 \le \ell \le m$. We define $\iota(0) = 0$, $\iota(m) = n$ and $\iota(\ell) = i_{\ell}$ for all $1 \le \ell < m$. \end{paragr} \begin{paragr} \color{blue} Consider two simplices $a \colon \Deltan{p} \to A$ and $b \colon \Deltan{q} \to A$ of $N_\infty(A)$ and their Eilenberg--Zilber decompositions $(\pi, a')$ and $(\rho, b')$, where $\pi \colon \Deltan{p} \to \Deltan{p'}$, $\rho \colon \Deltan{q} \to \Deltan{q'}$ are morphisms of $\cDelta$ and $a' \colon \Deltan{p'} \to A$ and $b' \colon \Deltan{q'} \to A$ are two non-degenerate simplices of $N_\infty(A)$ such that $a' \pi = a$ and $b' \rho = b$. Suppose that $p \le q$ and that there is a strictly increasing morphism $\phi \colon \Deltan{p} \to \Deltan{q}$ of $\cDelta$ such that $b \phi = a$. We fix the following notation: \begin{itemize} \item $f_i$ for the arrow $a_{\{i-1, i\}}$ of $A$, for $1 \le i \le p$; \item $g_i$ for the arrow $b_{\{i-1, i\}}$ of $A$, for $1 \le i \le q$; \item $f'_i$ for the arrow $a'_{\{i-1, i\}}$ of $A$, for $1 \le i \le p'$; \item $g'_i$ for the arrow $b'_{\{i-1, i\}}$ of $A$, for $1 \le i \le q'$. \end{itemize} Consider the subfamily $(f_{i_k})_{k}$, with $k=1, \dots , p'$, of the family of arrows $(f_i)$ which are not identities, that is such that $f_{i_k} = f'_k$ for all $k=1, \dots, p'$. Similarly, we consider the subfamily $(g_{J_k})_k$, with $k=1, \dots , q'$ of the family of arrows $(g_j)$ which are not identities, that is such that $g_{j_k} = g'_k$ for all $k=1, \dots, q'$. By definition, we have \[ f_i = g_{\phi(i)} \comp_0 \dots \comp_0 g_{\phi(i-1)+1} \] for any $1 \le i \le p$ and so in particular \[ f_{i_k} = g_{\phi(i_k)} \comp_0 \dots \comp_0 g_{\phi(i_k-1)+1} \] for any $1 \le k \le p'$. We now let \[ q'' = \sum_{k=1}^{p'} \rho\phi(i_k) - (\rho\phi(i_k - 1) + 1) \] and consider the degenerate morphism $\rho' \colon \Deltan{q'} \to \Deltan{q''}$ of $\cDelta$ which collapses all the points $\ell$ such that $\rho\phi(i) \le \ell \le \rho\phi(i+1)$ whenever $i \neq i_k$ for all $k = 1 , \dots, p'$; equivalently, the morphism $\rho'$ collapses to identity all the arrows $\ell \to \ell+1$ of $\Deltan{q'}$ belonging to the image of $i \to i+1$ of $\Deltan{p}$ under $\rho\phi$ and such that $\rho(i) = \rho(i+1)$. We define a $q''$-simplex $b'' \colon \Deltan{q''} \to A$ as the unique simplex such that $a' \pi = b'' \rho' \rho \phi$, which is equivalent to ask that $b' = b'' \rho'$. Explicitly, if $\ell = \rho'\rho\phi(i_k) + j$, with $1 \le k \le p'$, $j \ge 1$ and $\ell < \rho'\rho\phi(i_k +1)$, then the arrow $\ell-1 \to \ell$ of $\Deltan{q''}$ is mapped to $g_{\rho\phi(i_k) + j}$ by $b''$. Then there exists a unique strictly increasing morphism $\phi' \colon \Deltan{p'} \to \Deltan{q''}$ of $\cDelta$ verifying $\phi' \pi = \rho'\rho \phi$. If we denote by $g''_i$ the morphisms $\b''_{\{i-1, i\}}$ of $A$, for $i = 1, \dots, q''$, then we have \begin{equation*} \begin{split} f'_k & = g_{\phi(i_k)} \comp_0 \dots \comp_0 g_{\phi(i_k-1)+1} \\ & = g''_{\phi'(k)} \comp_0 \dots \comp_0 g''_{\phi(k-1)+1} \end{split} \end{equation*} for any $k = 1, \dots, p'$. We observe that none all the morphisms of $A$ in the last line is degenerate, \ie an identity. \end{paragr} \begin{paragr}\label{paragr:induced_injection} Let $x \colon \Deltan{n} \to A$ be an $n$-simplex of $N_\infty(A)$ and consider its Eilenberg–Zilber decomposition $(\pi, y)$, where $\pi \colon \Deltan{n} \to \Deltan{m}$ is a degenerate map and $y \colon \Deltan{m} \to A$ is a non-degenerate $m$-simplex of $N_\infty(A)$ such that $y \pi = x$. We want to define an $n$-simplex $\tilde x \colon \On{n} \to \tilde{A}$ of $N_\infty(\tilde{A})$. We shall do so by first defining an $m$-simplex $\tilde y \colon \On{m} \to \A$ of $N_\infty(\A)$, that we shall work out in few steps, and then by setting $\tilde x = \tilde y \On{\pi}$. We shall denote by $\iota \colon \Deltan{m} \to \Deltan{n}$ the section of $\pi$ as defined in paragraph~\ref{paragr:sections}. \end{paragr} \end{comment} \begin{paragr} For any object $\atom{i}$ of $\On{m}$, with $0\le i \le m$, we set $\tilde y(\atom{i}) = y(i)$. For any $0 < i \le m$, we denote by $f_i$ the arrow $y_{\{i-1, i\}}$ of $A$ and consider the $1$-cell \[ a = \atom{i_0, i_1} + \atom{i_1, i_2} + \dots + \atom{i_{k-1}, i_k} \] of $\On{m}$, with $0 \le i_0 < i_1 < \dots < i_k \le m$, that we can see as a strictly increasing morphism $a \colon \Deltan{k} \to \Deltan{m}$. To this $1$-cell, it is canonically associated the $k$-simplex $z \colon \Deltan{k} \to A$ of $N_\infty(A)$ defined by \[ z_{\{p, p+1\}} = f_{i_{p+1}} \comp_0 \dots \comp_0 f_{i_p + 1}\,, \] that is to say $z = y a$. This is a non-degenerate $k$-simplex of $A$ and thus defines a $1$-cell of $\tilde{A}$. Hence, we set $\tilde y(a) = z = ya$. \end{paragr} \begin{paragr} Consider two $1$-cells $a$ and $b$ of $\On{m}$ that we can write as two non-degenerate, that is strictly increasing, simplices \[ a \colon \Deltan{p} \to \Deltan{m} \quadet b \colon \Deltan{q} \to \Deltan{m}\,, \] and suppose they are such that $a(0) = b(0)$ and $a(p) = b(q)$. More explicitly, the $1$-cells $a$ and $b$ of $\On{m}$ correspond respectively to the $1$-cells \[ a = \atom{a_0, a_1} + \dots + \atom{a_{p-1}, a_p} \] and \[ b = \atom{b_0, b_1} + \dots + \atom{b_{q-1}, b_{q})} \] such that $a_0 = b_0$ and $a_p = b_{q}$, where we have set $a_i = a(i)$, for $0 \le i \le p$ and $b_j = b(j)$ for $0\le j \le q$. It results from Lemma~10.4 of~\cite{AraMaltsiCondE} that there is a $2$-cell from $a$ to $b$ if and only if there exists a strictly increasing morphism $\phi \colon \Deltan{p} \to \Deltan{q}$ of $\cDelta$ such that $a = b \phi$. Notice that if such a morphism $\phi$ exists, than it is unique, as $b$ is a monomorphism. We suppose that this is the case and we define an \oo-functor \[ \tilde x_{a, b} \colon \On{m}(a, b) \longrightarrow \tilde{A}\bigl(\tilde y(a), \tilde y(b)\bigr)\,. \] The source of this \oo-functor is the \oo-category \[ \On{m}(a, b) = \On{m}\bigl(\atom{a_0, a_1} + \dots + \atom{a_{p-1}, a_p}, \atom{b_0, b_1} + \dots + \atom{b_{q-1}, b_{q})}\bigr)\,, \] that by virtue of Proposition~\ref{prop:2-cells_orientals} is canonically isomorphic to \[ \prod_{i= 1}^{p} \On{m}\bigl(\atom{a_{i-1}, a_{i}}, \atom{b_{\phi(i-1)+1}, b_{\phi(i-1)+2}} + \dots + \atom{b_{\phi(i) -1}, b_{\phi(i)}}\bigr)\,, \] while the target \oo-category $\tilde{A}\bigl(\tilde y(a), \tilde y(b)\bigr) = \tilde{A}(ya, yb)$ is a sum of \oo-categories $\tilde{A}_\psi(ya, yb)$ indexed on strictly increasing morphisms $\psi \colon \Deltan{p} \to \Deltan{q}$ of $\cDelta$ such that $(ya)_{\{i-1, i\}} = (yb)_{\{\phi(i-1), \dots , \phi(i)\}}$, that is to say verifying $ya = yb\psi$. Any strictly increasing morphism $\phi \colon \Deltan{p} \to \Deltan{q}$ such that $a = b \phi$ trivially verifies $ya = yb\phi$ and we observed above that there is at most one such morphism. Therefore, if such a morphism $\phi$ exists, than it appears as index in the sum of \oo-categories defining $\tilde{A}(ya, yb)$ and we have \[ \tilde{A}_\phi(ya, yb) \cong \On{\omega}\bigl(\atom{0, \phi(1)} + \dots + \atom{\phi(p-1), \phi(p)}, \atom{0, 1} + \dots + \atom{q-1, q}\bigr)\,. \] Now, this \oo-category is equal to the \oo-category \[ \On{q}\bigl(\atom{0, \phi(1)}, + \dots + \atom{\phi(p-1), \phi(p)}, \atom{0, 1} + \dots + \atom{q-1, q}\bigr) \] and the injective morphism $b \colon \Deltan{q} \to \Deltan{m}$ induces by Corollary~\ref{coro:suboriental} a canonical isomorphism between the latter \oo-category and $\On{m}(a, b)$. We set the \oo-functor \[ \tilde y_{a, b} \colon \On{m}(a, b) \to \tilde{A}(ya, yb) \] to be the composition $\On{m}(a, b) \to \tilde{A}_\phi(ya, yb)$ of the isomorphisms we have just described followed by the embedding $\tilde{A}_\phi(ya, yb) \to \tilde{A}(ya, yb)$. We have to check that for any triple $(a, b, c)$ of composable $1$-cells of $\On{m}$ we have a commutative diagram \begin{equation}\label{dia:functoriality_tilde} \begin{tikzcd} \On{m}(b, c) \times \On{m}(a, b) \ar[r, "\comp_1"] \ar[d, "\tilde y_{b, c} \times \tilde y_{a, b}"'] & \On{m}(a, c) \ar[d, "\tilde y_{a, c}"] \\ \tilde{A}\bigl(\tilde y(b), \tilde y(c)\bigr) \times \tilde{A}\bigl(\tilde y(a), \tilde y(b)\bigr) \ar[r, "\comp_1"] & \tilde{A}\bigl(\tilde y(a), \tilde y(c)\bigr) \end{tikzcd} \end{equation} of \oo-categories. Suppose that we have $a \colon \Deltan{p} \to \Deltan{m}$, $b \colon \Deltan{q} \to \Deltan{m}$ and $c \colon \Deltan{r} \to \Deltan{m}$, with $1 \le p \le q \le r \le m$, and that $\phi \colon \Deltan{p} \to \Deltan{q}$ and $\psi \colon \Deltan{q} \to \Deltan{r}$ are the unique morphisms of $\cDelta$ such that $a = \phi b$ and $b = \psi c$. Thus we get \[ \tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr) = \On{\omega}\bigl(\atom{0, \phi(1)} + \dots + \atom{\phi(p-1), \phi(p)}, \atom{0, 1} + \dots + \atom{q-1, q}\bigr) \] and \[ \tilde{A}_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) = \On{\omega}\bigl(\atom{0, \psi(1)} + \dots + \atom{\psi(q-1), \psi(q)}, \atom{0, 1} + \dots + \atom{r-1, r}\bigr) \] and \[ \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr) = \On{\omega}\bigl(\atom{0, \psi\phi(1)} + \dots + \atom{\psi\phi(p-1), \psi\phi(p)}, \atom{0, 1} + \dots + \atom{r-1, r}\bigr)\,. \] We set \[ a' = \sum_{i=1}^p\atom{\psi\phi(i-1), \psi\phi(i)}\quadet b' = \sum_{i=1}^q \atom{\psi(i-1), \psi(i)}\,. \] Remember that the \oo-functor $tA_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) \times \tilde{A}_\phi\bigl(\tilde x(a), \tilde x(b)\bigr) \to \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr)$ is defined by making use of the canonical isomorphism between $\tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr)$ and the \oo-category $\On{\omega}(a', b')$. We thus have canonical isomorphisms \[ \tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr) \cong \On{r}(a', b')\quad , \quad \tilde{A}_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) \cong \On{r}(b', \atom{0, 1} + \dots + \atom{r-1, r}) \] and \[ \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr) \cong \On{r}(a', \atom{0, 1} + \dots + \atom{r-1, r})\,. \] Moreover, the morphism $c \colon \Deltan{r} \to \Deltan{m}$ induces by Corollary~\ref{coro:suboriental} canonical isomorphisms \[ \On{r}(a', b') \cong \On{m}(a, b) \quad , \quad \On{r}(b', \atom{0, 1} + \dots + \atom{r-1, r}) \cong \On{m}(b, c) \] and \[ \On{r}(a', \atom{0, 1} + \dots + \atom{r-1, r}) \cong \On{m}(a, c)\,. \] Under this isomorphisms, we claim that the square \[ \begin{tikzcd} \On{r}(b', c') \times \On{r}(a', b') \ar[r, "\comp_1"] \ar[d] & \On{r}(a', c') \ar[d] \\ \On{m}(b, c) \times \On{m}(a, b) \ar[r, "\comp_1"] & \On{m}(a, c) \end{tikzcd} \] of \oo-categories is commutative, where we have set $c' = \atom{0, 1} + \dots + \atom{r-1, r}$. Indeed, consider two $k$-cells $\alpha$ in $\On{r}(a', b')$ and $\beta$ in $\On{r}(b', c')$ and suppose that they are $1$-composable, $k>0$. We can express $\alpha$ and $\beta$ as homogeneous elements of $\cC(\Deltan{r})_{k+1}$ and thus as sums of atoms, say \[ \alpha = \sum_{i=0}^s \alpha_i \quadet \beta = \sum_{i=0}^t \beta_i\,. \] The operation $\comp_1$ at this level is simply the sum $\alpha + \beta$ and the morphism $\cC(\Deltan{r})_{k+1} \to \cC{\Deltan{m}}_{k+1}$ induced by $c \colon \Deltan{r} \to \Deltan{m}$ sends an atom $\atom{j_0, \dots, j_{k+1}}$ of the source to the atom $\atom{c(j_0), \dots, c(j_{k+1})}$ of the target. The morphism $\cC(c) \colon \cC(\Deltan{r}) \to \cC{\Deltan{m}}$ respects sums, since it is a morphism of augmented directed complexes and therefore the square above commutes. Observe that (up to canonical isomorphisms of the factors in the line below) picking the inverses to the vertical isomorphisms of \oo-categories of the commutative square above gives the following commutative square \[ \begin{tikzcd} \On{m}(b, c) \times \On{m}(a, b) \ar[r, "\comp_1"] \ar[d, "\tilde y_{b, c} \times \tilde y_{a, b}"'] & \On{m}(a, c) \ar[d, "\tilde y_{a, c}"] \\ \tilde{A}_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) \times \tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr) \ar[r, "\comp_1"] & \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr) \end{tikzcd} \] of \oo-categories. Since the square of embeddings \[ \begin{tikzcd} \tilde{A}_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) \times \tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr) \ar[r, "\comp_1"] \ar[d]& \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr) \ar[d]\\ \tilde{A}_\psi\bigl(\tilde y(b), \tilde y(c)\bigr) \times \tilde{A}_\phi\bigl(\tilde y(a), \tilde y(b)\bigr) \ar[r, "\comp_1"] & \tilde{A}_{\psi\phi}\bigl(\tilde y(a), \tilde y(c)\bigr) \end{tikzcd} \] is obviously commutative, we obtain the commutativity of the square depicted in~\eqref{dia:functoriality_tilde}. Hence, we have checked that the assignment \[\tilde y \colon \On{m}(\atom{i}, \atom{j}) \to \tilde{A}(y(i), y(j))\] defines an \oo-functor for any object $\atom{i}$ and $\atom{j}$ of $\On{m}$. In order to conclude that $\tilde y \colon \On{m} \to \tilde{A}$ is an \oo-functor, it remains to show that for any $0 \le i < j < k \le m$ the square \[ \begin{tikzcd} \On{m}(\atom{j}, \atom{k}) \times \On{m}(\atom{i}, \atom{j}) \ar[r, "\comp_0"] \ar[d, "\tilde y_{j, k} \times \tilde y_{i, j}"'] & \On{m}(\atom{i}, \atom{k}) \ar[d, "\tilde y_{i, k}"] \\ \tilde{A}\bigl(y(j), y(k)\bigr) \times \tilde{A}\bigl(y(i), y(j)\bigr) \ar[r, "\comp_0"] & \tilde{A}\bigl(y(i), y(k)\bigr) \end{tikzcd} \] of \oo-categories is commutative. The proof uses the same strategy adopted in paragraph~\ref{paragr:tA_oo-category}. On the objects, that is for any choice of composable $1$-cells $a$ and $b$ of $\On{m}$, with $s(a) = i$, $t(a) = s(b) = j$ and $t(b) = k$, the commutativity of the above diagram is equivalent to the equality $yb \cdot ya = y(b\cdot a)$, which is clearly verified. Moreover, for any $(a, b)$ and $(c, d)$ in $\On{m}(\atom{i}, \atom{j}) \times \On{m}(\atom{j}, \atom{k})$ such that the image of $a$ is contained in the image of $c$ and the image of $b$ is contained in the image of $d$ (the other cases being trivial), one easily checks the commutativity of the square \[ \begin{tikzcd} \On{m}(b, d) \times \On{m}(a, c) \ar[r, "\comp_0"] \ar[d, "\tilde y_{b, c} \times \tilde y_{a, c}"'] & \On{m}(b\cdot a, d \cdot c) \ar[d, "\tilde y_{b\cdot a, d \cdot c}"] \\ \tilde{A}\bigl(yb, yd\bigr) \times \tilde{A}\bigl(ya, yc\bigr) \ar[r, "\comp_0"] & \tilde{A}\bigl(y(b\cdot a), y(d\cdot c)\bigr) \end{tikzcd} \] by reducing to the atoms, as we did for the square~\eqref{dia:functoriality_tilde}. \end{paragr} \begin{paragr} In this paragraph we show that the assignment sending a functor $x \colon \Deltan{n} \to A$ to the \oo-functor $\tilde x \colon \On{n} \to \tilde{A}$ defines a morphism of simplicial sets $N_\infty(A) \to N_\infty(\tilde{A})$. Let $f \colon \Deltan{p} \to \Deltan{q}$ be a morphism in $\cDelta$ and $x \colon \Deltan{q} \to A$ a functor. We have to show that the equality $\tilde x \On{f} = \widetilde{xf}$ holds true. Consider the Eilenberg--Zilber decompositions $(\pi, x')$ of $x$ and $(\rho, y)$ of $xf$, where $x' \colon \Deltan{q'} \to A$ and $y \colon \Deltan{p'} \to A$. We can depict the situation with the following diagram \[ \begin{tikzcd} \Deltan{p} \ar[rr, "f"] \ar[d, "\rho"'] && \Deltan{q} \ar[d, "\pi"] \\ \Deltan{p'} \ar[dr, "y"'] && \Deltan{q'} \ar[dl, "x'"] \\ & A & \end{tikzcd}\ . \] The morphism $\pi f$ of $\cDelta$ admits a decomposition of a degeneracy $e \colon \Deltan{p} \to \Deltan{\ell}$ followed by a face $g \colon \Deltan{\ell} \to \Deltan{q'}$. Now, the composition $x'g \colon \Deltan{\ell} \to A$ is a non-degenerate element of $N_\infty(A)$ and so we must have $\Deltan{\ell} = \Deltan{p'}$, $e = \rho$ and $x'g = y$, by the uniqueness of the Eilenberg--Zilber decomposition. We have to show that $\tilde y = \widetilde{x'}\On{g}$, so that the following triangle \[ \begin{tikzcd} \On{p'} \ar[rr, "\On{g}"] \ar[rd, "\tilde y"'] && \On{q'} \ar[ld, "\widetilde{x'}"] \\ & \tilde{A} & \end{tikzcd} \] of \oo-functors is commutative. It trivially commutes at the level of objects. For any injective map $a \colon \Deltan{n} \to \Deltan{p'}$, we clearly have $ya = x' g a$, so that by definition $\tilde y(a) = \widetilde{x'}\On{g}(a)$ and therefore the triangle is commutative on $1$-cells. Let $a$ and $b$ be two parallel $1$-cells of $\On{p'}$, say $a \colon \Deltan{m} \to \Deltan{p'}$ and $b \colon \Deltan{n} \to \Deltan{p'}$. Observe that $\On{p'}(a, b)$ is empty if and only if $\On{q'}(ma, mb)$ is empty if and only if $\tilde{A}(\tilde y(a), \tilde y(b))$ is so. Otherwise, there exist a unique monomorphism $\phi \colon \Deltan{m} \to \Deltan{n}$ of $\cDelta$, an integer $r \ge 0$, an unique injective morphism $h \colon \Deltan{r} \to \Deltan{p'}$ of $\cDelta$ and $1$-cells $a'$ and $b'$ of $\On{r}$ such that $b \phi = a$, $ha' = a$, $hb' = b$ and \[ b' = \atom{0, 1} + \atom{1, 2} + \dots + \atom{r-1, r}\,. \] By definition, $\tilde{A}(\tilde y(a), \tilde y(b)) = \On{r}(a', b')$ and we have a commutative square of isomorphism \[ \begin{tikzcd} \On{p'}(a, b) \ar[rr, "(\On{g})_{a, b}"] && \On{q'}(ma, mb) \\ & \On{r}(a', b') \ar[ul, "(\On{h})_{a', b'}"] \ar[ur, "(\On{hg})_{a', b'}"'] & \end{tikzcd} \] of \oo-categories by Corollary~\ref{coro:suboriental} and this immediately implies that the triangle \[ \begin{tikzcd} \On{p'}(a, b) \ar[rr, "\On{g}"] \ar[rd, "\tilde y_{a, b}"'] && \On{q'}(ma, mb) \ar[ld, "\widetilde{x'}_{ma, mb}"] \\ & \tilde{A}_\phi(\tilde y(a), \tilde y(b)) & \end{tikzcd} \] of \oo-categories commutes, as $\tilde y_{a, b}$ is defined as the inverse of $(\On{h})_{a', b'}$ and $\widetilde{x'}_{ma, mb}$ as the inverse of $(\On{hg})_{a', b'}$. This concludes the proof, showing that the assignment $\eta_A \colon N_\infty(A) \to N_\infty(\tilde{A})$ is indeed a morphism of simplicial sets. \end{paragr} \begin{paragr} We now want to show that the morphism $\eta_A \colon N_\infty(A) \to N_\infty(\tilde{A})$ is the counit of the adjoint pair $(c_\infty, N_\infty)$ for the object $N_\infty(A)$. This is equivalent to say that the precomposition by $\eta_A$ induces a bijection \[ \operatorname{\mathsf{Hom}}_{\nCat{\infty}}(\tilde{A}, B) \cong \operatorname{\mathsf{Hom}}_{{\mathcal{S}\mspace{-2.mu}\it{et}}Simp}(N_\infty(A), N_\infty(B)) \] of sets for any \oo-category $B$. In turn, this bijection means that for any morphism of simplicial sets $F \colon N_\infty(A) \to N_\infty(B)$ there exists a unique \oo-functor $\bar{F} \colon \tilde{A} \to B$ such that the triangle \begin{equation}\label{dia:lifting_unit} \begin{tikzcd} N_\infty(A) \ar[r, "F"] \ar[d, "\eta_A"] & N_\infty(B) \\ N_\infty(\tilde{A}) \ar[ru, "N_\infty(\bar{F})"'] \end{tikzcd} \end{equation} is commutative. This would show in particular that $\tilde{A} \cong c_\inftyN_\infty(A)$. We shall first prove the uniqueness and then the existence of such an \oo-functor $\bar{F}$. \end{paragr} \begin{paragr}[Uniqueness] Suppose a functor $G \colon \tilde{A} \to B$ such that $F = N_\infty(G) \eta_A$ exists. Object-wise, the functor $G$ must coincide with $F_0$. A $1$-cell of $\tilde{A}$ is a tuple $a = (f_1, \dots, f_n)$ of non-trivial composable arrows of $A$. Let $f$ be the composite $f_n \comp_0 \dots \comp_0 f_1$ of the arrows which are components of $a$; we can view $f$ as a non-degenerate $1$-simplex $f \colon \Deltan{1} \to N_\infty(A)$. Then $G(a)$ must be equal to $F(f)$. For observe that $N_\infty(\eps_A) \eta_A$ is the identity on $N_\infty(A)$ and $\eps_A(a) = f$. Let $a$ and $b$ be two $1$-cells of $\tilde{A}$, say $a \colon \Deltan{m} \to A$ and $b \colon \Deltan{n} \to A$. We can suppose that there is an injective morphism $\phi \colon \Deltan{m} \to \Deltan{n}$ of $\cDelta$ such that $b \phi = a$, otherwise $\tilde{A}(a, b)$ is empty; we fix such a morphism $\phi$. By definition, \[ \tilde{A}_\phi(a, b) = \On{n}\bigl(\atom{\phi(0), \phi(1)} + \dots + \atom{\phi(m-1), \phi(m)}, \atom{0, 1} + \dots + \atom{n-1, n} \bigr) \] and we have an \oo-functor \[ F(b) \colon \On{n} \to B\,. \] Hence, the \oo-functor $\tilde{A}_\phi(a, b) \to B(Ga, Gb)$ is the composition of the following \oo-functors \[ \begin{tikzcd} \tilde{A}_\phi(a, b) \ar[r, "\cong"] & \On{n}(a', b') \ar[r, "G(b)"] & B(Ga, Gb) \end{tikzcd}\ , \] where we have set \[ a' = \atom{\phi(0), \phi(1)} + \dots + \atom{\phi(m-1), \phi(m)} \quadet b' = \atom{0, 1} + \dots + \atom{n-1, n}\,. \] Varying $\phi$ this gives a unique \oo-functor $\tilde{A}(a, b) \to B(Ga, Gb)$, thus proving the uniqueness of $G$. \end{paragr} \begin{paragr}[Existence] The previous paragraph already shows how the functor $\bar F \colon \tilde{A} \to B$ must be define, if it exists. It remains to check that this assignment is indeed an \oo-functor. Let $x$ and $y$ be objects of $\tilde{A}$ and $a \colon \Deltan{\ell} \to A$, $b \colon \Deltan{m} \to A$ and $c \colon \Deltan{n} \to A$ be $1$-cells of $\tilde{A}(x, y)$. Without loss of generality, we can suppose that there are injective morphisms $\phi \colon \Deltan{\ell} \to \Deltan{m}$ and $\psi \colon \Deltan{m} \to \Deltan{n}$ such that $c\psi = b$ and $b \phi = a$. We set \begin{align*} a' &= \atom{\psi\phi(0), \psi\phi(1)} + \dots + \atom{\psi\phi(\ell-1), \psi\phi(\ell)}\,, \\ b' &= \atom{\psi(0), \psi(1)} + \dots + \atom{\psi(m-1), \psi(m)}\,, \\ c' &= \atom{0, 1} + \dots + \atom{n-1, n}\,. \end{align*} We have a diagram \[ \begin{tikzcd} \tilde{A}_\psi(b, c) \times \tilde{A}_\phi(a, b) \ar[r, "\comp_1"] \ar[d, "\cong"'] & \tilde{A}_{\psi\phi}(a, c) \ar[d, "\cong"] \\ \On{n}(b', c') \times \On{n}(a', b') \ar[d, "F(c)"'] \ar[r, "\comp_1"] & \On{n}(a', c') \ar[d, "F(c)"] \\ B(\bar{F}b, \bar{F}c) \times B(\bar{F}a, \bar{F}b) \ar[r, "\comp_1"] & B(\bar{F}a, \bar{F}c) \end{tikzcd}\ , \] where the upper square commutes by definition and the lower square commutes by the \oo-functoriality of $F(c) \colon \On{n} \to B$. Making the morphisms $\phi$ and $\psi$ varying among the index defining the sum $\tilde{A}(a, b)$ and $\tilde{A}(a, b)$, we get a commutative square \[ \begin{tikzcd} \tilde{A}(b, c) \times \tilde{A}(a, b) \ar[r, "\comp_1"] \ar[d, "\bar{F}_{b, c}\times \bar{F}_{a, b}"'] & \tilde{A}_{\psi\phi}(a, c) \ar[d, "\bar{F}_{a, c}"] \\ B(\bar{F}b, \bar{F}c) \times B(\bar{F}a, \bar{F}b) \ar[r, "\comp_1"] & B(\bar{F}a, \bar{F}c) \end{tikzcd} \] of \oo-categories. Let $x$, $y$ and $z$ be three objects of $\tilde{A}$. We have to show that the square of \oo-categories \[ \begin{tikzcd} \tilde{A}(y, z) \times \tilde{A}(x, y) \ar[r, "\comp_0"] \ar[d, "\bar{F}_{y, z}\times \bar{F}_{x, y}"'] & \tilde{A}_{\psi\phi}(x, z) \ar[d, "\bar{F}_{x, z}"] \\ B(\bar{F}y, \bar{F}z) \times B(\bar{F}x, \bar{F}y) \ar[r, "\comp_0"] & B(\bar{F}x, \bar{F}z) \end{tikzcd} \] is commutative. As for the objects, that is the $1$-cells of $\tilde{A}$ and $B$, it is clear: indeed, for any $a \colon x \to y$ and $b \colon y \to z$ of $\tilde{A}$, we have on the one hand that $b \comp_0 a$ is just the concatenation of the simplices $a$ and $b$ of $A$, while on the other hand $\bar{F}(c)$ applied to a $1$-cell $c = (f_1, \dots, f_n)$ of $\tilde{A}$ gives image under $F$ of the composition $F(f_n \comp_0 \dots \comp_0 f_1)$ of its components. Therefore we have to check that, for any choice $(b, a)$ and $(d, c)$ of elements of $\tilde{A}(y, z)\times \tilde{A}(x, y)$, the square \[ \begin{tikzcd} \tilde{A}(b, d) \times \tilde{A}(a, c) \ar[r, "\comp_0"] \ar[d, "\bar{F}_{b, d}\times \bar{F}_{a, c}"'] & \tilde{A}_{\psi\phi}(b\cdot a, d \cdot c) \ar[d, "\bar{F}_{b\cdot a, d\cdot c}"] \\ B(\bar{F}b, \bar{F}d) \times B(\bar{F}a, \bar{F}c) \ar[r, "\comp_0"] & B(\bar{F}(b\cdot a), \bar{F}(d\cdot c)) \end{tikzcd} \] of \oo-categories is commutative. This is completely analogous to the case of the ``vertical composition'' $\comp_1$ that we showed above: we reduce to components $\tilde{A}_\psi(b, d)$ and $\tilde{A}_\phi(a, c)$, for which there is a diagram \[ \begin{tikzcd} \tilde{A}_\psi(b, d) \times \tilde{A}_\phi(a, c) \ar[r, "\comp_0"] \ar[d, "\cong"'] & \tilde{A}_{\psi\cdot \phi}(a, c) \ar[d, "\cong"] \\ \On{n}(b', d') \times \On{n}(a', c') \ar[d, "F(d\cdot c)"'] \ar[r, "\comp_0"] & \On{n}(b'\cdot a', d'\cdot c') \ar[d, "F(d\cdot c)"] \\ B(\bar{F}b, \bar{F}d) \times B(\bar{F}a, \bar{F}c) \ar[r, "\comp_0"] & B(\bar{F}(b\cdot a), \bar{F}(d\cdot c)) \end{tikzcd} \] of \oo-categories in which the upper square commutes by definition and the lower square by \oo-functoriality of $F(d\cdot c) \colon \On{n} \to B$, and finally we conclude by varying among all the morphisms $\phi$ and $\psi$ indexing the coproducts $\tilde{A}(a, c)$ and $\tilde{A}(b, d)$. This achieves the proof of the existence of the \oo-functor $\bar{F} \colon \tilde{A} \to B$ and so this establishes the lifting problem depicted in~\eqref{dia:lifting_unit}. Equivalently, the precomposition by $\eta_A \colon N_\infty(A) \to N_\infty(\tilde{A})$ gives a bijection \[ \operatorname{\mathsf{Hom}}_{\nCat{\infty}}(\tilde{A}, B) \cong \operatorname{\mathsf{Hom}}_{{\mathcal{S}\mspace{-2.mu}\it{et}}Simp}(N_\infty(A), N_\infty(B))\,, \] from which we deduce the isomorphism $\tilde{A} = c_\inftyN_\infty(A)$. \end{paragr} \begin{thm} Let $A$ be a split-free category. Then the \oo-category $\tilde{A}$ defined in paragraph~\ref{paragr:tA_oo-category} is isomorphic to the \oo-category $c_\inftyN_\infty(A)$. \end{thm} \begin{exem} Let $C$ be a $3$-category and consider the normalised oplax $3$-functor $\sup \colon i_{\cDelta}(N_3(C)) \to C$ defined in example~\ref{exem:sup}. For any $1$-category $A$, the category $c\Sd N(A)$ is split-free and moreover it is shown in Theorem~32 of~\cite{delHoyo} that the canonical morphism $c\Sd N(A) \to A$ is a Thomason equivalence. Hence, we get a diagram \[ c\,\Sd N\bigl(i_{\cDelta}(N_3(C))\bigr) \to i_{\cDelta}(N_3(C)) \to C \] whose composition if still a normalised oplax $3$-functor by Theorem~\ref{thm:iso_oplax}. Now, the category $C' = c\,\Sd N\bigl(i_{\cDelta}(N_3(C))\bigr)$ is split-free and therefore we get a span \[ C' \leftarrow \ti{3}\widetilde{C'} \to C \] of $3$-functors. We conjecture that both the $3$-functors above are Thomason equivalences. Since we observed in Example~\ref{exem:sup_we} that the morphism $\SNn{l}(\sup)$ of simplicial sets is a simplicial weak equivalence and we observed above that the functor $C' \to i_{\cDelta}(N_3(C))$ is a Thomason equivalence, by a 2-out-of-3 argument one of these $3$-functor is a Thomason equivalence if and only if the other is so. This is a partial generalisation to $3$-categories of the approach used by Chiche in~\cite{chiche_homotopy} to show that the minimal fundamental localiser of $\nCat{2}$ is given by the class of Thomason equivalences, thus showing that $2$-categories intrinsically model homotopy types. In order to generalise this result to higher category, one would need to prove that both the $3$-functors of the above span are aspherical, \ie they satisfy the $3$\nbd-cat\-egorical generalisation of Quillen's Theorem~A. The author does not even know if this is true for $2$-categories. In fact, Chiche avoids this problem introducing a notion of asphericity for oplax $2$-functors, that seems out of reach for higher dimension. We can nonetheless say something interesting about the homotopy theory of normalised $3$-functors, as pointed out in the following remark. \end{exem} \begin{rem} The nerve functor $\SNn{l} \colon \widetilde{\nCat{3}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ allows us to define a class of weak equivalences on $\widetilde{\nCat{3}}$, that we call \emph{Thomason equivalences}. More precisely, a normalised oplax $3$-functor is a Thomason equivalence if and only if its image via $\SNn{l}$ is a weak homotopy equivalence. Since the triangle \[ \begin{tikzcd}[column sep=small] & {\mathcal{S}\mspace{-2.mu}\it{et}}Simp & \\ {\mathcal{C}\mspace{-2.mu}\it{at}} \ar[ur, "N"] \ar[rr, hookrightarrow] && \widetilde{\nCat{3}} \ar[ul, "\SNn{l}"'] \end{tikzcd} \] commutes, a classical result of Illusie--Quillen tells us that the composite functor \[ \begin{tikzcd} {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \ar[r, "i_\cDelta"] & \widetilde{\nCat{3}} \ar[r, "\SNn{l}"] & {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \end{tikzcd} \] is weakly homotopy equivalent to the identity on simplicial sets. Moreover, Example~\ref{exem:sup_we} gives us that a normalised oplax $3$-functor $u \colon A \to B$ is a Thomason equivalence if and only if the functor $i_\cDelta(\SNn{l}(u)) \colon i_\cDelta(\SNn{l}(A)) \to i_\cDelta(\SNn{l}(B))$ is so. Hence, the composite functor \[ \begin{tikzcd} \widetilde{\nCat{3}} \ar[r, "\SNn{l}"] & {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \ar[r, "i_\cDelta"] & \widetilde{\nCat{3}} \end{tikzcd} \] is homotopic to the identity functor on $\widetilde{\nCat{3}}$. We conclude that the nerve functor $\SNn{l} \colon \widetilde{\nCat{3}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ induces an equivalence at the level of the underlying homotopy categories. \end{rem} \begin{comment} \section{Non-regular version} \begin{paragr} We now define a reflexive $\infty$-graph $\tilde{A}$ associated to the $1$-category $A$. The objects of $\tilde A$ are precisely the objects of $A$. For any pair of objects $(a, a')$ of $A$, we then define a reflexive $\infty$-graph $\tilde{A}(a, a')$ whose objects, \ie the $1$-cells of $\tilde{A}$ having $a$ as source and $a'$ as target, are given by the set of non-degenerate simplices $x \colon \Deltan{n} \to A$ of $N_1(A)$ such that $x_0 = a$ and $x_n = a'$, for $n \ge 0$; that is to say, the objects of $\tilde{A}(a, a')$ are the tuples $(f_1, \dots, f_n)$ of composable, non-trivial arrows of $A$ such that $s(f_1) = a$ and $t(f_n) = a'$, with $n \ge 0$. The empty tuple $\Deltan{0} \to A$, where necessarily $a = a'$, corresponds to the object $a$ by definition and it is the identity $1$-cell of the object $a$ of $\tilde{A}$. \end{paragr} \begin{paragr} Consider two objects $x \colon \Deltan{m} \to A$ and $y \colon \Deltan{n} \to A$ of $\tilde{A}(a, a')$, \ie two tuples $x = (f_1, \dots, f_m)$ and $y = (g_1, \dots, g_n)$ as described above. Let $\operatorname{\mathsf{Par}}t_{x, y}$ denote the set of partitions $(I_1, \dots, I_k)$ of $\Deltan{n}$ such that: \begin{itemize} \item we have $m \leq k \leq n$; \item $\min(I_1) = 0$, $\max(I_k) = n$ and $\max(I_j) +1= \min(I_{j+1})$ for all $j = 1, \dots, k-1$; \item there is a subfamily $(I_{\nu(1)}, \dots, I_{\nu(m)})$ such that $y(I_{\nu(p)}) = x(\atom{p-1, p}) = f_p$, for all $p = 1, \dots, m$; \item for any $j$ which does not belong to $\{\nu(1), \dots , \nu(m)\}$, we have that $y(I_j)$ is a trivial arrow of $A$; \item for any $1 \le j < k$, at least one among $j$ and $j+1$ belongs to the family $\{\nu(1), \dots , \nu(m)\}$. \end{itemize} Equivalently, an element of $\operatorname{\mathsf{Par}}t_{x, y}$ is a degenerate map $\pi \colon \Deltan{n} \to \Deltan{\ell}$ and a non-degenerate $\ell$-simplex $z \colon \Deltan{\ell} \to A$ of $N_\infty(A)$ such that: \begin{itemize} \item we have $m \leq \ell \leq n$; \item we have $z \pi = y$; \item there is a unique strictly increasing active morphism $\alpha \colon \Deltan{m} \to \Deltan{\ell}$ of $\cDelta$ such that $z \alpha = x$ and such that the morphism $\pi \colon \Deltan{n} \to \Deltan{\ell}$ collapses the arrow $\atom{p, p+1}$ of $\Deltan{n}$ only if this belongs to the fibre of $\alpha(i)$, for some $0 \le i \le m$. \end{itemize} Indeed, given a partition $(I_1, \dots, I_k)$ and a subfamily $(\nu(p))_{p=1}^m$ as above, we set \[ i_p = |I_{\nu(p)}| \quad , \quad \ell_q = \sum_{p=1}^q i_p \quadet \ell = \ell_k \] and the morphism $\pi \colon \Deltan{n} \to \Deltan{\ell}$ to map $I_{\nu(p)}$ to $\{\ell_{p-1}, \ell_{p-1}+1, \dots , \ell_p\}$ and to collapse to a point the elements of $I_j$ with $j$ not in $\{\nu(1), \dots , \nu(m)\}$; the simplex $z$ is forced by the definition of $\pi$ and the fact that $z\pi = y$, so that, if we set \[ m_j = \min(I_j) \quadet M_j = \max(I_j)\,, \] we have that for every $1 \le p < m$ and any $1 \le q \le i_{p+1}$ \[ z(\atom{\ell_{p} + q-1, \ell_{p} + q}) = y(\atom{m_{\nu(p)} + q-1, m_{\nu(p)} + q}) = g_{m_{\nu(p)} + q}\,; \] finally, the morphism $\alpha \colon \Deltan{m} \to \Deltan{\ell}$ is defined by mapping $\atom{p}$ to $\atom{i_p}$, with $i_0 = 0$. Conversely, given a pair $(\pi, z)$ and $\alpha$ as above, we define the partition of $\Deltan{n}$ as follows: the subfamily $(I_{\nu(1)}, \dots, I_{\nu(\mu)})$ as the largest \emph{active} subsets of $\Deltan{n}$ under $\pi$, that is the largest subsets $S$ such that the restriction morphism $\pi_{|S}$ is a bijection onto its image; in particular, $\pi(I_{\nu(p)}) = \alpha(\atom{p-1, p})$. The other elements $I_j$ of the partitions are given by the fibres of the points $\alpha(i)$ over $\pi$. Notice that given a partition $(I_1, \dots, I_k)$ in $\operatorname{\mathsf{Par}}t_{x,y}$, corresponding to the pair $(\pi, z)$, the elements $I_j$ which are collapsed by $\pi$ are precisely those for which $y(I_j)$ is an identity of $A$. To wrap up, with notations as above, we are saying that \[ f_p = g_{M_{\nu(p)}} \comp_0 g_{M_{\nu(p)} -1} \comp_0 \dots \comp_0 g_{m_{\nu(p)} +1}\,, \] for $1 \le p \le m$, and that \[ g_{M_j} \comp_0 g_{M_{j}-1} \comp_0 \dots \comp_0 g_{m_j+1} \] is an identity arrow of $A$ for all $j$ not in $\{\nu(1), \dots , \nu(m)\}$. \end{paragr} \begin{paragr} We define $\tilde{A}(x, y)$ as follows: \begin{itemize} \item if $x \colon \Deltan{0} \to A$ is a $0$-simplex of $A$, we set $\tilde{A}(x, y)$ to be the final \emph{\oo-category} $\On{0}$; \item otherwise, $\tilde{A}(x, y)$ is an \emph{\oo-category} defined as a sum of \oo-categories $\tilde{A}_{(\pi, z)}$ indexed over the elements $(\pi, z)$ of $\operatorname{\mathsf{Par}}t_{x, y}$. Given such an element $(\pi, z)$, with morphism $\alpha \colon \Deltan{m} \to \Deltan{\ell}$ and corresponding partition $(I_1, \dots , I_k)$, the \oo-category $\tilde{A}_{(\pi, z)}$ is defined as the product of \oo-categories \[ \prod_{j = 1}^k \tilde{A}_{I_j}\,, \] where $\tilde{A}_{I_j}$ is just $\On{0}$ if $y(I_j)$ is a trivial arrow of $A$ and otherwise if $I_j = I_{\nu(p)}$, for some $1 \le p \le m$, we set \[ \tilde{A}_{I_{\nu(p)}} = \On{\omega}\bigl(\atom{0, i_p}, \atom{0, 1} + \atom{1, 2} + \dots + \atom{i_p-1, i_p}\bigr)\,. \] \end{itemize} If the index of the sum above is empty, that is there is no partition of $\Deltan{n}$ satisfying the conditions described in the preceding paragraph, then $\tilde{A}(x, y)$ is set to be the empty \oo-category. Notice that this happens in particular every time $m > n$. Observe also that if there is a cell between $x = (f_1, \dots, f_m)$ and $y = (g_1, \dots, g_n)$, then necessarily \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1\,. \] For any $1$-cell $x = (f_1, \dots, f_n)$ of $\tilde{A}$, the identity of $x$ is given by the only trivial $2$-cell of the \oo-category \[ \prod_{j=1}^n\tilde{A}_{\{f_j\}}\,, \] that is where the partition of $\Deltan{n}$ is given by $(\{f_1\}, \{f_2\}, \dots , \{f_n\})$, or equivalently the pair $(\pi, z)$ is given by $\pi = \id{\Deltan{n}}$ and $z = x$. Indeed, observe that for any arrow $f$ of $A$, we have that $\tilde{A}_{\{f\}}$ is defined to be $\On{\omega}(\atom{0, 1}, \atom{0, 1})$, which is the terminal \oo-category. This ends the definition of $\tilde{A}$ as a reflexive \oo-graph. \end{paragr} \begin{paragr} In this paragraph we want to endow the reflexive \oo-graph $\tilde{A}(a, a')$ with the structure of an \oo-category, for any pair $(a, a')$ of objects of $\tilde{A}$. In order to do so, for any $x = (f_1, \dots, f_\ell)$, $y = (g_1, \dots, g_m)$ and $t = (h_1, \dots, h_n)$ of $\tilde{A}(a, a')$, we want to define an \oo-functor \[ \tilde{A}(y, t) \times \tilde{A}(x, y) \to \tilde{A}(x, t)\,. \] Without any loss of generality, we can suppose $\ell \le m \le n$ (see the preceding paragraph) and consider the case $\ell >0$, since the other case is trivial. Let us fix an element $(I_1, \dots, I_k)$ of $\operatorname{\mathsf{Par}}t_{x, y}$, corresponding to the pair $(\pi, t)$ with map $\alpha \colon \Deltan{\ell} \to \Deltan{m'}$ and an element $(I'_1, \dots, I'_{k'})$ of $\operatorname{\mathsf{Par}}t_{y, t}$, corresponding to the pair $(\pi', t')$ with map $\alpha' \colon \Deltan{m} \to \Deltan{n'}$; we also set \[ \phi(p) = \min(I_{\nu(p)})+1\,,\quad \psi(p) = \max(I_{\nu(p)}) \] as well as \[ \phi'(q) = \min(I'_{\nu'(q)})+1\,,\quad \psi'(q) = \max(I'_{\nu'(q)})\,, \] for all $1 \le p \le \ell$ and $1 \le q \le m$. We have to give an \oo-functor which has \begin{equation}\label{eq:mapping-oo-category} \tilde{A}_{(\pi', z')} \times \tilde{A}_{(\pi, z)} = \prod_{j=1}^{k'} \tilde{A}_{I'_j} \times \prod_{j=1}^k \tilde{A}_{I_j} \end{equation} as source. Notice that for every $1 \le p \le \ell$ we have \[ f_p = g_{\psi(p)} \comp_0 g_{\psi(p) -1} \comp_0 \dots \comp_0 g_{\phi(p)}\,, \] and for every $1 \le q \le m$ we have \[ g_q = h_{\psi'(q)} \comp_0 h_{\psi'(q) -1} \comp_0 \dots \comp_0 h_{\phi'(q)}\,, \] so that in fact \begin{equation*} \begin{split} f_p &= h_{\psi'(\psi(p))} \comp_0 h_{\psi'(\psi(p)) -1} \comp_0 \dots \comp_0 h_{\phi'(\psi(p))} \\ &\ \comp_0 h_{\psi'(\psi(p)-1)} \comp_0 h_{\psi'(\psi(p)-1)-1} \comp_0 \dots \comp_0 h_{\phi'(\psi(p)-1)} \\ &\ \comp_0 \dots \\ &\ \comp_0 h_{\psi'(\phi(p))} \comp_0 h_{\psi'(\phi(p))-1} \comp_0 \dots \comp_0 h_{\phi'(\phi(p))}\,. \end{split} \end{equation*} for every $1 \le p \le \ell$. It is straightforward to check that the commutative diagram \[ \begin{tikzcd} \Deltan{m} \ar[rr, "\alpha'"] \ar[d, "\pi"'] && \Deltan{n'} \ar[ddl, bend left, "z'"] \\ \Deltan{m'} \ar[dr, "z"'] && \\ & A & \end{tikzcd} \] of functors can be uniquely completed to the diagram \[ \begin{tikzcd} \Deltan{m} \ar[rr, "\alpha'"] \ar[d, "\pi"'] && \Deltan{n'} \ar[d, "\pi''"] \\ \Deltan{m'} \ar[dr, "z"'] \ar[rr, "\alpha''"] && \Deltan{n''} \ar[dl, "z''"] \\ & A & \end{tikzcd}\,, \] where $\pi'' \colon \Deltan{n'} \to \Deltan{n''}$ is a degenerate morphism and $\alpha'' \colon \Deltan{m'} \to \Deltan{n''}$ is a strictly increasing morphism. We want the target of the \oo-functor we are going to define to be the \oo-category $\tilde{A}_{(\pi'', z'')}$, where $(\pi'', z'')$ is the element of $\operatorname{\mathsf{Par}}t_{x, t}$ constructed above, with associated morphism $\alpha''\alpha \colon \Deltan{\ell} \to \Deltan{n''}$. Let $(I_1, \dots, I_r)$ be the partition associated to $(\pi, z)$, with $\nu \colon \{1, \dots, \ell\} \to \{1, \dots, r\}$ defining the subfamily, and $(I'_1, \dots, I'_s)$ be the partition associated to $(\pi' z')$, with $\nu' \colon \{1, \dots, m\} \to \{1, \dots, s\}$ defining the subfamily. For any $1 \le j \le r$, there are unique integer $k_j$ and $J$ such that $y(I_j)$ is equal to the image in $A$ under $t$ of the subfamily $(I'_{\nu'(k_j)}, I'_{\nu'(k_j+1)}, \dots , I'_{\nu'(k_j + J)})$ and such that $\alpha'(I_j)$ as a subset of $\Deltan{n'}$ is equal to the image of this subfamily under $\pi'$; notice that if $j$ is of the form $\nu(q)$, then $J = i_q-1$. Let $1 \le j \le r$ be an integer not in the image of the function $\nu$. The \oo-category $\tilde{A}_{I_j}$ is defined to be the terminal \oo-category $\On{0}$. For any arrow $a$ of $\On{\omega}$, we have the isomorphism $\On{\omega}(a, a) \cong \On{0}$. Consider the subset $\alpha'(I_j)$ of $\Deltan{n'}$ and its preimage $P_j = (\pi')^{-1}\bigl(\alpha(I_j)\bigr)$ in $\Deltan{n}$. Denote $p_j = |P_j|$ the cardinality of $P_j$, $m_j$ the minimum element of $P_j$ and set $a_j$ to be the following arrow \[ a_j = \sum_{i=0}^{p_j-1} \atom{m_j+i, m_j+1+i} \] of $\On{\omega}$. In defining the \oo-functor we will have to identify the \oo-category $\tilde{A}_{I_j}$ with the \oo-category $\On{\omega}(a_j, a_j)$. This is because $P_j$ is a component of the partition defined by the element $(\pi'', z'')$ of $\operatorname{\mathsf{Par}}t_{x, t}$ and the \oo-functor restricted to the component $\tilde{A}_{I_j}$ is defined to be the identity $\tilde{A}_{I_j} \to \tilde{A}_{P_j}$. Let $1 \le q \le \ell$ be an integer. The \oo-category $\tilde{A}_{I_{\nu(q)}}$ is by definition \[ \On{\omega}(\atom{0, i_q}, \atom{0, 1} + \dots + \atom{i_q-1, i_q}). \] The image $\alpha'(I_{\nu(q)})$ is equal to the image under $\pi'$ of the union of the family \[ I'_{\nu'(k_{\nu(q)})}, I'_{\nu'(k_{\nu(q)})+1} , \dots , I'_{\nu'(k_{\nu(q) + Q})} \] of subsets of $\Deltan{n}$, where $Q = i_q -1$. Let $\tilde Q$ be the integer such that \[ \nu'(k_{\nu(q) + Q}) = \nu(k_{\nu(q)}) + \tilde Q\,; \] notice that clearly $Q \le \tilde Q$. For every $0 \le j \le \tilde Q$, we set $\nu'(k_{\nu(q)}) + j = \lambda(q, j)$ and \[ m'(j) = \min(I'_{\lambda(q, j)}) \quadet M'(j) = \max(I'_{\lambda(q, j)})\,. \] Obviously, $M'(j) + 1 = m(j+1)$ for $0 \le j < \tilde Q$. Consider the following arrows \[ b'_j = \atom{m'(j), m'(j)+1} + \dots + \atom{M'(j)-1, M'(j)} \] of $\On{\omega}$, so that \[ \tilde{A}_{I'_{\lambda(q, j)}} \cong \On{\omega}(\atom{m'(j), M'(j)}, b'_j) \] for every $0 \le j \le \tilde Q$. for every $1 \le i \le m$, we have that the \oo-category \[ \On{\omega}\bigl(\atom{0, \psi(i)}, \atom{0, 1} + \dots + \atom{\psi(i)-1, \psi(i)}\bigr) \] is canonically isomorphic to the \oo-category \[ \On{\omega}\bigl(\atom{\Psi(i), \Psi(i) + \psi(i)}, \atom{\Psi(i), \Psi(i)+ 1} + \dots + \atom{\Psi(i) + \psi(i)-1, \Psi(i) + \psi(i)}\bigr)\,, \] where we set \[ \Psi(1) = 0 \quadet \Psi(i) = \psi(1) + \psi(2) + \dots \psi(i-1)\,. \] In order to simplify the notations, let us set \[ b_i = \atom{\Psi(i), \Psi(i) + \psi(i)} \quad , \quad c_i = \sum_{k= 0}^{\phi(i)-1} \atom{\Psi(i) + k, \Psi(i) + k + 1} \quad \text{for }1 \le i \le m\,, \] and also \[ b = b_1 + b_2 + \dots + b_m \quadet c = c_1 + c_2 + \dots + c_m\,. \] There is a canonical \oo-functor \[ \prod_{i=1}^m \On{\omega}(b_i, c_i) \to \On{\omega}(b, c) \] given by ``horizontal composition'' $\comp_0$, \ie mapping a tuple $(x_1, \dots, x_m)$ of $p$-cells to the $p$-cell $x_1 \comp_0 x_2 \comp_0 \dots \comp_0 x_m$ of $\On{\omega}(b, c)$. Proposition~\ref{prop:2-cells_orientals} actually shows that this \oo-functor is an isomorphism of \oo-categories. The same argument entails that the \oo-category \[ \prod_{i=1}^{\ell} \On{\omega}\bigl(\atom{0, \phi(i)}, \atom{0, 1} + \dots + \atom{\phi(i)-1, \phi(i)}\bigr) \] is canonically isomorphic to \[ \On{\omega}(a, b) \] via the ``horizontal composition'' $\comp_0$, where we have set \[ a = \sum_{i=1}^{\ell} \atom{\Phi(i), \Phi(i) + \phi(i)} \quad , \quad \Phi(i) = \sum_{k= 1}^{i-1} \phi(k)\,. \] The \oo-category in~\eqref{eq:mapping-oo-category} is thus canonically isomorphic to the \oo-category \[ \On{\omega}(b, c) \times \On{\omega}(a, b)\,. \] On the other hand, the target \oo-category of the \oo-functor we are set to construct is \[ \tilde{A}(x, z) = \prod_{i=1}^\ell \tilde{A}\bigl((f_i), (h_{\psi\phi(i-1)+1}, \dots, h_{\psi\phi(i)} \bigr)\,, \] which by definition is \[ \prod_{i=1}^\ell \On{\omega}\bigl(\atom{0, \phi(i)}, \atom{0, 1} + \dots + \atom{\phi(i)-1, \phi(i)}\bigr). \] The same argument used above gives us that this \oo-category is canonically isomorphic to \[ \On{\omega}(a, c)\,. \] We then define the \oo-functor \[ \On{\omega}(b, c) \times \On{\omega}(a, b) \to \On{\omega}(a, c) \] to be the ``vertical composition'' $\comp_1$, \ie a pair of $p$-cells $(x, y)$ of the source is mapped to the $p$-cell $x \comp_1 y$ of $\On{\omega}(a, c)$. This endows the reflexive \oo-graph $\tilde{A}(a, a')$ with the structure of an \oo-category. \end{paragr} \begin{paragr} In this paragraph we put an \oo-category structure on the reflexive \oo-graph~$\tilde{A}$. In order to do this, we shall define, for any objects $a$, $a'$ and $a''$ of $\tilde{A}$, an \oo-functor \[ \tilde{A}(a', a'') \times \tilde{A}(a, a') \to \tilde{A}(a, a'')\,. \] As \oo-categories are categories enriched in \oo-categories, an \oo-functor $F \colon C \to D$ between two \oo-categories $C$ and $D$ can be given by a map $F_0 \colon C_0 \to D_0$ on objects and a family of \oo-functors $C(c, c') \to D(Fc, Fc')$, indexed by the pairs of objects $(c, c')$ of $C$, satisfying the axioms described in paragraph~\ref{def_enriched}. In light of the above, we have to provide a map \[ \tilde{A}(a', a'')_0 \times \tilde{A}(a, a')_0 \to \tilde{A}(a, a'')_0\,, \] that we define by sending a pair $(y, x)$ with $x \colon \Deltan{m} \to A$ and $y \colon \Deltan{n} \to A$ to the concatenation simplex \[ y \cdot x \colon \Deltan{m+n} \to A \quad , \quad \atom{i, i+1} \mapsto \begin{cases} x_{\{i, i+1\}}\,, & \text{if $i < m$,}\\ y_{\{i-m, i+1-m\}}\,, & \text{if $i \ge m$.} \end{cases} \] Furthermore, for any choice of objects $(y, x)$ and $(t, z)$ of $\tilde{A}(a', a'')_0 \times \tilde{A}(a, a')_0$, we have to provide an \oo-functor \[ \tilde{A}(y, t) \times \tilde{A}(x, z) \to \tilde{A}(y\cdot x, t \cdot z)\,. \] Following the reasoning of the previous paragraph, we know that there are integers \[ 0 = i_0 < i_1 < \dots < i_m \quadet 0 = j_0 < j_1 < \dots < j_n \] such that, if we set \begin{align*} a = \sum_{k=0}^{m-1} \atom{i_k, i_{k+1}}\,, && c = \sum_{p=0}^{i_m -1} \atom{p, p+1}\,, \\ b = \sum_{k=0}^{n-1} \atom{j_k, j_{k+1}}\,, && d = \sum_{p=0}^{j_n -1} \atom{p, p+1}\,, \end{align*} then we have canonical isomorphisms \[ \tilde{A}(x, z) \cong \On{\omega}(a, c) \quadet \tilde{A}(y, t) \cong \On{\omega}(b, d) \] of \oo-categories. Moreover, setting \[ b' = \sum_{k=0}^{n-1} \atom{i_m + j_k, i_m + j_{k+1}} \quadet d' = \sum_{p=0}^{j_n -1} \atom{i_m + p, i_m + p+1}\,, \] we have a canonical isomorphism \[ \On{\omega}(b, d) \cong \On{\omega}(b', d') \] and by the same argument we can build a further canonical isomorphism \[ \tilde{A}(y\cdot x, t \cdot z) \cong \On{\omega}(a + b', c + d') \] of \oo-categories. We are thus left to provide an \oo-functor \[ \On{\omega}(b', d') \times \On{\omega}(a, c) \to \On{\omega}(a + b', c + d')\,, \] which we set to be the ``horizontal composition'' by $\comp_0$. Notice that by Proposition~\ref{prop:2-cells_orientals}, this \oo-functor is in fact an isomorphism. \end{paragr} \todo{Check axioms for the functor} \begin{lemme} Let $a$ and $a'$ be two objects of $A$ and consider two $1$-cells $x = (f_1, \dots, f_m)$ and $y = (g_1, \dots, g_n)$ of $\tilde{A}(a, a')$. Then there is a zig-zag of $2$-cells linking $x$ to $y$ if and only if \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1 \] in $A$. \end{lemme} \begin{proof} Indeed, this is trivially true if $x$, and then also $y$, is a trivial cell of $A$. So let us suppose $m>0$ and $n>0$. On the one hand, condition~\ref{item:cells-tilde-ii} immediately implies that two $1$-cells $x$ and $y$ as above are connected by a zig-zag of $2$-cells only if \[ f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1\,. \] On the other hand, let \[ h = f_m \comp_0 \dots \comp_0 f_1 = g_n \comp_0 \dots \comp_0 g_1 \] and consider the $1$-cell $z = (h)$ of $\tilde{A}$. It results immediately from the structure of the oriental $\On{\omega}$ that the \oo-categories \[ \tilde{A}\bigl((h), x\bigr) = \On{\omega}\bigl(\atom{0, m}, \atom{0, 1} + \dots + \atom{m-1, m}\bigr) \] and \[ \tilde{A}\bigl((h), y\bigr) = \On{\omega}\bigl(\atom{0, n}, \atom{0, 1} + \dots + \atom{n-1, n}\bigr) \] are non-empty; hence $x$ and $y$ are connected by a zig-zag of length two. \end{proof} \begin{coro} We have $\ti{1}(\tilde{A}) \cong A$. \end{coro} \begin{proof} We have a canonical \oo-functor $\eps_A \colon \tilde{A} \to A$ which is the identity on objects and that maps a $1$-cell $x = (f_1, \dots, f_n)$ to $f_n \comp_0 \dots \comp_0 f_1$ if $n>0$ and a $0$-simplex $a \colon \Deltan{0} \to A$ to the identity of $a$ in $A$. The identity is clearly preserved and the functoriality follows by the definition of $0$-composition of $1$-cells of $\tilde{A}$ by concatenation and moreover the assignment is well-defined by the previous lemma. We are left to show that for any pair of objects $(a, a')$ of $A$, the map \[ \ti{0}(A)(a, a') \to A(a, a') \] is a bijection. It is clearly surjective, since for any morphism $f \colon a \to a'$ of $A$ we have $\eps_A\bigl((f)) = f$ (and similarly if $f$ is an identity cell of $A$). It results from the previous lemma that this map is also injective, hence completing the proof of the corollary. \end{proof} \begin{paragr} We now turn to constructing a normalised oplax $3$-functor $\eta_A \colon A \to \ti{3}(\tilde{A})$. \begin{description} \item[$\TreeDot$] The map $(\eta_A)_{\treeDot}$ is defined to be the identity map on objects. \item[$\TreeLog$] The map $(\eta_A)_{\treeLog}$ assigns to any non-trivial morphism $f \colon a \to a'$ of $A$ the $1$-cell $(f) \colon \Deltan{1} \to A$ of $\tilde{A}$ and to any identity $1_a$ of $A$ the \emph{trivial} $1$-cell $a \colon \Deltan{0} \to A$ of $\tilde{A}$. \item[$\TreeV$] The map $(\eta_A)_{\treeV}$ assigns to any pair of composable morphisms \[ \begin{tikzcd} a \ar[r, "f"] & a' \ar[r, "g"] & a'' \end{tikzcd} \] of $A$ the unique $2$-cell $(\eta_A)_{\treeV}(g, f)$ of $\tilde{A}$ with source $(g \comp_0 f)$ and target $(f, g)$, \ie the unique element $\atom{0, 1, 2}$ of the set \[ \On{\omega}\bigl(\atom{0, 2}, \atom{0, 1} + \atom{1, 2})\,. \] \item[$\TreeW$] The map $(\eta_A)_{\treeW}$, assigns to any triple of composable morphisms \[ \begin{tikzcd}[column sep=small] a \ar[r, "f"] & a' \ar[r, "g"] & a'' \ar[r, "h"] & a''' \end{tikzcd} \] of $A$ the unique $3$-cell $(\eta_A)_{\treeW}(h, g, f)$ of $\tilde{A}$ with $1$-source $(h\comp_0 g \comp_0 f)$ and $1$-target $(h, g, f)$, \ie the unique arrow $\atom{0, 1, 2, 3}$ of the $1$-category \[ \On{\omega}\bigl(\atom{0, 3}, \atom{0, 1} + \atom{1, 2} + \atom{2, 3}\bigr)\,. \] \end{description} Notice that by definition we have that $1_{(\eta_A)_{\treeDot}(a)}$, that is the $1$-cell $a \colon \Deltan{0} \to A$, is precisely $(\eta_A)_{\treeL}(1_a)$; the other conditions of normalisation are trivial. We are left to check the coherence for the tree \scalebox{0.3}{ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [] [] [] [] ] \end{forest} }. Consider four composable morphisms of~$A$ \[ \begin{tikzcd}[column sep=small] \bullet \ar[r, "f"] & \bullet \ar[r, "g"] & \bullet \ar[r, "h"] & \bullet \ar[r, "i"] & \bullet \end{tikzcd}\ . \] We have to show that the $3$-cells \begin{gather*} (\eta_A)_{\treeW}(i, h, g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeV}(i\comp_0 h \comp_0 g, f)\\ \comp_2\\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeV}(h, g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeW}(i, h \comp_0 g, f)\\ \comp_2 \\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeW}(h, g, f) \comp_1 (\eta_A)_{\treeV}(i, h\comp_0 g \comp_0 f) \end{gather*} and \begin{gather*} (\eta_A)_{\treeV}(i, h) \comp_0 (\eta_A)_{\treeLog}(g) \comp_0 (\eta_A)_{\treeLog}(f) \comp_1 (\eta_A)_{\treeW}(ih, g, f) \\ \comp_2\\ (\eta_A)_{\treeLog}(i) \comp_0 (\eta_A)_{\treeLog}(h) \comp_0 (\eta_A)_{\treeV}(g, f) \comp_1 (\eta_A)_{\treeW}(i, h, gf) \end{gather*} of $\ti{3}(A)$ are equal, which is equivalent to exhibit a zig-zag of $4$-cells connecting them. In fact, they are precisely the target and the source of the unique $2$-cell of the $2$-category \[\ti{3}\tilde{A}\bigl((i\comp_0 h \comp_0 g \comp_0 f), (i, h, g, f)\bigr)\,,\] \ie the cell $\atom{0, 1, 2, 3, 4}$ of the $2$-category \[ \ti{3}\On{\omega}\bigl(\atom{0, 4}, \atom{0, 1} + \atom{1, 2} + \atom{2, 3} + \atom{3, 4})\,. \] \end{paragr} \begin{paragr} The construction of the preceding paragraph is in fact the cellular version of the truncation of a simplicial morphism, which we shall still denote by $\eta_A \colon N_\infty(A) \to N_\infty(\tilde{A})$. We shall dedicate the rest of the chapter to define such a map and moreover show that it is the unit map of the adjoint pair $(c_\infty, N_\infty)$, so that in particular $\tilde{A} \cong c_\infty(A)$. \end{paragr} \begin{paragr}\label{paragr:sections} Let $x \colon \Deltan{n} \to A$ be an $n$-simplex of $N_\infty(A)$ and consider its Eilenberg–Zilber decomposition $(\pi, y)$, where $\pi \colon \Deltan{n} \to \Deltan{m}$ is a degenerate map and $y \colon \Deltan{m} \to A$ is a non-degenerate $m$-simplex of $N_\infty(A)$ such that $y \pi = x$. Notice that for any section $\iota \colon \Deltan{m} \to \Deltan{n}$ of $\pi$, we have $x \iota = y \pi \iota = y$. We shall always consider the section given as follows. Set $f_i = x_{\{i-1, i\}}$ for $0 < i \le n$ and consider the sequence $1 \le i_1 < \dots < i_{n'} \le n$ of all integers such that $f_{i_\ell}$ is \emph{not} degenerate, for $1 \le \ell \le n'$. We define $\iota(0) = 0$, $\iota(n') = n$ and $\iota(\ell) = i_{\ell}$ for all $1 \le \ell < n'$. \end{paragr} \begin{paragr} Consider two simplices $a \colon \Deltan{p} \to A$ and $b \colon \Deltan{q} \to A$ of $N_\infty(A)$ and their Eilenberg--Zilber decompositions $(\pi, a')$ and $(\rho, b')$, where $\pi \colon \Deltan{p} \to \Deltan{p'}$, $\rho \colon \Deltan{q} \to \Deltan{q'}$ are morphisms of $\cDelta$ and $a' \colon \Deltan{p'} \to A$ and $b' \colon \Deltan{q'} \to A$ are two non-degenerate simplices of $N_\infty(A)$ such that $a' \pi = a$ and $b' \rho = b$. Suppose that $p \le q$ and that there is a strictly increasing morphism $\phi \colon \Deltan{p} \to \Deltan{q}$ of $\cDelta$ such that $b \phi = a$. We fix the following notation: \begin{itemize} \item $f_i$ for the arrow $a_{\{i-1, i\}}$ of $A$, for $1 \le i \le p$; \item $g_i$ for the arrow $b_{\{i-1, i\}}$ of $A$, for $1 \le i \le q$; \item $f'_i$ for the arrow $a'_{\{i-1, i\}}$ of $A$, for $1 \le i \le p'$; \item $g'_i$ for the arrow $b'_{\{i-1, i\}}$ of $A$, for $1 \le i \le q'$. \end{itemize} Consider the subfamily $(f_{i_k})_{k}$, with $k=1, \dots , p'$, of the family of arrows $(f_i)$ which are not identities, that is such that $f_{i_k} = f'_k$ for all $k=1, \dots, p'$. Similarly, we consider the subfamily $(g_{i_k})_k$, with $k=1, \dots , q'$ of the family of arrows $(g_i)$ which are not identities, that is such that $g_{i_k} = g'_k$ for all $k=1, \dots, q'$. By definition, we have \[ f_i = g_{\phi(i)} \comp_0 \dots \comp_0 g_{\phi(i-1)+1} \] for any $1 \le i \le p$ and so in particular \[ f_{i_k} = g_{\phi(i_k)} \comp_0 \dots \comp_0 g_{\phi(i_k-1)+1} \] for any $1 \le k \le p'$. We now let \[ q'' = \sum_{k=1}^{p'} \rho\phi(i_k) - (\rho\phi(i_k - 1) + 1) \] and consider the degenerate morphism $\rho' \colon \Deltan{q'} \to \Deltan{q''}$ of $\cDelta$ which collapses all the points $\ell$ such that $\rho\phi(i) \le \ell \le \rho\phi(i+1)$ whenever $i \neq i_k$ for all $k = 1 , \dots, p'$; equivalently, the morphism $\rho'$ collapses to identity all the arrows $\ell \to \ell+1$ of $\Deltan{q'}$ belonging to the image of $i \to i+1$ of $\Deltan{p}$ under $\rho\phi$ and such that $\rho(i) = \rho(i+1)$. We define a $q''$-simplex $b'' \colon \Deltan{q''} \to A$ as the unique simplex such that $a' \pi = b'' \rho' \rho \phi$, which is equivalent to ask that $b' = b'' \rho'$. Explicitly, if $\ell = \rho'\rho\phi(i_k) + j$, with $1 \le k \le p'$, $j \ge 1$ and $\ell < \rho'\rho\phi(i_k +1)$, then the arrow $\ell-1 \to \ell$ of $\Deltan{q''}$ is mapped to $g_{\rho\phi(i_k) + j}$ by $b''$. Then there exists a unique strictly increasing morphism $\phi' \colon \Deltan{p'} \to \Deltan{q''}$ of $\cDelta$ verifying $\phi' \pi = \rho'\rho \phi$. If we denote by $g''_i$ the morphisms $\b''_{\{i-1, i\}}$ of $A$, for $i = 1, \dots, q''$, then we have \begin{equation*} \begin{split} f'_k & = g_{\phi(i_k)} \comp_0 \dots \comp_0 g_{\phi(i_k-1)+1} \\ & = g''_{\phi'(k)} \comp_0 \dots \comp_0 g''_{\phi(k-1)+1} \end{split} \end{equation*} for any $k = 1, \dots, p'$. We observe that none all the morphisms of $A$ in the last line is degenerate, \ie an identity. \end{paragr} \begin{paragr}\label{paragr:induced_injection} Let $x \colon \Deltan{n} \to A$ be an $n$-simplex of $N_\infty(A)$ and consider its Eilenberg–Zilber decomposition $(\pi, y)$, where $\pi \colon \Deltan{n} \to \Deltan{m}$ is a degenerate map and $y \colon \Deltan{m} \to A$ is a non-degenerate $m$-simplex of $N_\infty(A)$ such that $y \pi = x$. We want to define an $n$-simplex $\tilde x \colon \On{n} \to \tilde{A}$ of $N_\infty(\tilde{A})$. We shall do so by first defining an $m$-simplex $\tilde y \colon \On{m} \to \A$ of $N_\infty(\A)$ and then by setting $\tilde x = \tilde y \On{\pi}$, that we shall work out in few steps. We shall denote by $\iota \colon \Deltan{m} \to \Deltan{n}$ the section of $\pi$ as defined in paragraph~\ref{paragr:sections}. \end{paragr} \begin{paragr} For any object $\atom{i}$ of $\On{m}$, with $0\le i \le m$, we set $\tilde y(\atom{i}) = y(i)$. For any $0 < i \le m$, we denote by $f_i$ the arrow $y_{\{i-1, i\}}$ of $A$ and consider the $1$-cell \[ a = \atom{i_0, i_1} + \atom{i_1, i_2} + \dots + \atom{i_{k-1}, i_k} \] of $\On{m}$, with $0 \le i_0 < i_1 < \dots < i_k \le m$, that we can see as a strictly increasing morphism $a \colon \Deltan{k} \to \Deltan{m}$. To this $1$-cell, it is canonically associated the $k$-simplex $z \colon \Deltan{k} \to A$ of $N_\infty(A)$ defined by \[ z_{\{p, p+1\}} = f_{i_{p+1}} \comp_0 \dots \comp_0 f_{i_p + 1}\,, \] that is to say $z = y a$. This $k$-simplex may be degenerate, so we consider its Eilenberg--Zilber decomposition $(\rho, z')$, with $\rho \colon \Deltan{k} \to \Deltan{k'}$ and $z' \colon \Deltan{k'} \to A$ such that $z' \rho = z$. We define $\tilde y(a)$ to be the $1$-cell $z'$ of $\tilde{A}$. If $\kappa \colon \Deltan{k'} \to \Deltan{k}$ is our preferred section of~$\rho$ as defined in paragraph~\ref{paragr:sections}, then we also have \[ z' = ya\kappa = x \iota a \kappa\,. \] \end{paragr} \begin{paragr} Consider two $1$-cells $a$ and $b$ of $\On{m}$ that we can write as two non-degenerate, that is strictly increasing, simplices \[ a \colon \Deltan{p} \to \Deltan{m} \quadet b \colon \Deltan{q} \to \Deltan{m}\,, \] and suppose they are such that $a(0) = b(0)$ and $a(p) = b(q)$. We shall first work under the hypothesis that $p= 1$. The simplices $ya$ and $yb$ of $N_\infty(A)$ may be degenerate, so by taking their Eilenberg--Zilber decompositions $(\rho, a')$ and $(\varpi, b')$ we get two non-degenerate simplicies $a' \colon \Deltan{p'} \to A$ and $b' \colon \Deltan{q'} \to A$ of $N_\infty(A)$, that also satisfy the relations \[ a' = y a \kappa \quadet b' = y b \lambda\,, \] if $\kappa \colon \Deltan{p'} \to \Deltan{p}$ and $\lambda \colon \Deltan{q'} \to \Deltan{q}$ are the preferred sections of $\rho$ and $\varpi$, respectively. There are just two options for $p'$ that we now describe. \begin{description} \item[$p'=0$] This happens precisely when $ya$ is a degenerate arrow of $A$, \ie an identity, and $\tilde{A}\bigl(\tilde y(a'), \tilde y(b')\bigr)$ is the terminal \oo-category. \item[$p'=1$] This happens if and only of the image of $ya$ is not an identity arrow of $A$. In this case, we have \begin{equation*} \begin{split} \tilde{A}\bigl(\tilde y(a'), \tilde y(b')\bigr) & = \On{\omega}\bigl(\atom{0, q'}, \atom{0, 1} + \dots + \atom{q'-1, q'}\bigr) \\ & = \On{q'}\bigl(\atom{0, q'}, \atom{0, 1} + \dots + \atom{q'-1, q'}\bigr) \end{split} \end{equation*} and the \oo-functor \[ \tilde y_{a, b} \colon \On{q}(a, b) \to \tilde{A}(\tilde y(a'), \tilde y(b')\bigr) \] is induced by $\On{\varpi} \colon \On{q} \to \On{q'}$. Observe that the injective morphism $\lambda$ induces an isomorphism \begin{equation*} \begin{split} \tilde{A}\bigl(\tilde y(a'), \tilde y(b')\bigr) & = \On{\omega}\bigl(\atom{0, q'}, \atom{0, 1} + \dots +\atom{q'-1, q'}\bigr)\\ & \cong\On{\omega}\bigl(\atom{0, q}, \atom{0, \lambda(1)} + \dots + \atom{\lambda(q'-1), q}\bigr)\,, \end{split} \end{equation*} which will turn useful in the following. \end{description} \end{paragr} \begin{paragr} Consider two $1$-cells $a$ and $b$ of $\On{m}$ that we can write as two non-degenerate, that is strictly increasing, simplices \[ a \colon \Deltan{p} \to \Deltan{m} \quadet b \colon \Deltan{q} \to \Deltan{m}\,, \] and suppose they are such that $a(0) = b(0)$ and $a(p) = b(q)$. The simplices $ya$ and $yb$ of $N_\infty(A)$ may be degenerate, so by taking their Eilenberg--Zilber decompositions $(\rho, a')$ and $(\varpi, b')$ we get two non-degenerate simplicies $a' \colon \Deltan{p'} \to A$ and $b' \colon \Deltan{q'} \to A$ of $N_\infty(A)$, that also satisfy the relations \[ a' = y a \kappa \quadet b' = y b \lambda\,, \] if $\kappa \colon \Deltan{p'} \to \Deltan{p}$ and $\lambda \colon \Deltan{q'} \to \Deltan{q}$ are the preferred sections of $\rho$ and $\varpi$, respectively. More explicitly, the $1$-cells $a$ and $b$ of $\On{m}$ correspond respectively to the $1$-cells \[ a = \atom{a_0, a_1} + \dots + \atom{a_{p-1}, a_p} \] and \[ b = \atom{b_0, b_1} + \dots + \atom{b_{q-1}, b_{q})} \] such that $a_0 = b_0$ and $a_p = b_{q}$, where we have set $a_i = a(i)$, for $0 \le i \le p$ and $b_j = b(j)$ for $0\le j \le q$. It results from Lemma~10.4 of~\cite{AraMaltsiCondE} that there is a $2$-cell from $a$ to $b$ if and only if there exists a strictly increasing morphism $\phi \colon \Deltan{p} \to \Deltan{q}$ of $\cDelta$ such that $a = b \phi$. Notice that if such a morphism $\phi$ exists, than it is unique, as $b$ is a monomorphism. We suppose that this is the case and we define an \oo-functor \[ \tilde x_{a, b} \colon \On{m}(a, b) \longrightarrow \tilde{A}\bigl(\tilde y(a), \tilde y(b)\bigr)\,. \] The source of this \oo-functor is the \oo-category \[ \On{m}(a, b) = \On{m}\bigl(\atom{a_0, a_1} + \dots + \atom{a_{p-1}, a_p}, \atom{b_0, b_1} + \dots + \atom{b_{q-1}, b_{q})}\bigr)\,, \] that by virtue of Proposition~\ref{prop:2-cells_orientals} is canonically isomorphic to \[ \prod_{i= 1}^{p} \On{m}\bigl(\atom{a_{i-1}, a_{i}}, \atom{b_{\phi(i-1)+1}, b_{\phi(i-1)+2}} + \dots + \atom{b_{\phi(i) -1}, b_{\phi(i)}}\bigr)\,, \] while the target \oo-category $\tilde{A}\bigl(\tilde y(a), \tilde y(b)\bigr) = \tilde{A}(a', b')$ is a sum of \oo-categories $\tilde{A}_\psi(a', b')$ indexed on strictly increasing morphisms $\psi \colon a' \to b'$ of $\cDelta$ such that $a' = b' \psi$. In particular, for the unique strictly increasing morphism $\phi' \colon \Deltan{p'} \to \Deltan{q'}$ satisfying $\phi' \rho = \varpi \phi$ (cf.~paragraph~\ref{paragr:induced_injection}), we have \[ \tilde{A}_{\phi'}(a', b') = \On{\omega}\bigl(\atom{0, \phi'(1)}, + \dots + \atom{\phi'(p'-1), \phi'(p')}, \atom{0, 1} + \dots + \atom{q'-1, q'}\bigr)\,. \] Now, this latter category is equal to the \oo-category \[ \On{\ell'}\bigl(\atom{0, \phi(1)}, + \dots + \atom{\phi(\ell-1), \phi(\ell)}, \atom{0, 1} + \dots + \atom{\ell'-1, \ell'}\bigr) \] and there is a canonical isomorphism between this \oo-category and the \oo-category $\On{m}(a, b)$. We define the \oo-functor $\tilde x_{a, b}$ to be this isomorphism of \oo-categories. We have to check that for any triple $(a, b, c)$ of composable $1$-cells of $\On{m}$ we have a commutative diagram \[ \begin{tikzcd} \On{m}(b, c) \times \On{m}(a, b) \ar[r, "\comp_1"] \ar[d, "\tilde x_{b, c} \times \tilde x_{a, b}"'] & \On{m}(a, c) \ar[d, "\tilde x_{a, c}"] \\ \tilde{A}\bigl(\tilde x(b), \tilde x(c)\bigr) \times \tilde{A}\bigl(\tilde x(a), \tilde x(b)\bigr) \ar[r, "\comp_1"] & \tilde{A}\bigl(\tilde x(a), \tilde x(c)\bigr) \end{tikzcd} \] of \oo-categories. Suppose that we have $a \colon \Deltan{p} \to A$, $b \colon \Deltan{q} \to A$ and $c \colon \Deltan{r} \to A$, with $1 \le p \le q \le r \le m$, and that $\phi \colon \Deltan{p} \to \Deltan{q}$ and $\psi \colon \Deltan{q} \to \Deltan{r}$ are the unique morphisms of $\cDelta$ such that $a = \phi b$ and $b = \psi c$. Thus we get \[ \tilde{A}\bigl(\tilde x(a), \tilde x(b)\bigr) = \On{q}\bigl(\atom{0, \phi(1)} + \dots + \atom{\phi(p-1), \phi(p)}, \atom{0, 1} + \dots + \atom{q-1, q}\bigr) \] and \[ \tilde{A}\bigl(\tilde x(b), \tilde x(c)\bigr) = \On{r}\bigl(\atom{0, \psi(1)} + \dots + \atom{\psi(q-1), \psi(q)}, \atom{0, 1} + \dots + \atom{r-1, r}\bigr) \] and \[ \tilde{A}\bigl(\tilde x(a), \tilde x(c)\bigr) = \On{r}\bigl(\atom{0, \psi\phi(1)} + \dots + \atom{\psi\phi(p-1), \psi\phi(p)}, \atom{0, 1} + \dots + \atom{r-1, r}\bigr)\,. \] Remember that the \oo-functor $tA\bigl(\tilde x(b), \tilde x(c)\bigr) \times \tilde{A}\bigl(\tilde x(a), \tilde x(b)\bigr) \to \tilde{A}\bigl(\tilde x(a), \tilde x(c)\bigr)$ is defined by making use of the canonical isomorphism between $\tilde{A}\bigl(\tilde x(a), \tilde x(b)\bigr)$ and the \oo-category \[ \On{r}\bigl(\atom{0, \psi\phi(1)} + \dots + \atom{\psi\phi(p-1), \psi\phi(p)}, \atom{\psi(0), \psi(1)} + \dots + \atom{\psi(q-1), \psi(q)}\bigr) \] \end{paragr} \end{comment} \appendix \section{Strict higher categories} \label{app:higher_cats} \begin{paragr}\label{def_enriched} Let \V~be a category. A \emph{\Vn-graph} $X$ is the data of a set $X_0$ of \emph{objects} and, for any $x, y$ in $X_0$, an object~$X(x, y)$ of~\V. A \emph{morphism of \Vn-graphs} $f \colon X \to Y$ is given by a function $f_0 \colon X_0 \to Y_0$ between the objects as well as morphisms \[ f_{x, y} \colon X(x, y) \to Y(fx, fy) \] of \V~for any $x, y$ in $X_0$. We denote by \Vn-$\pref{\G_1}$ the category of \Vn-graphs. Let $I$ be an object of \V. A \emph{reflexive (\V, $I$)\nbd-graph} $X$, or simply \emph{reflexive \V-graph} if the object $I$ is clear from the context, is a \V-graph endowed with a morphism $k_x \colon I \to \operatorname{\mathsf{Hom}}_\mathcal{V}(x, x)$ for any element $x$ in $X_0$. A \emph{morphism of reflexive \Vn-graphs} $f \colon X \to Y$ is a morphism of \Vn-graphs such that $f_{x, x}\, k_x = k_{fx}$ for any $x$ in $X_0$. We denote by \Vn-$\pref{\Gr_1}$ the category of reflexive \Vn-graphs. Let $(\mathcal V, \otimes, I)$ be a monoidal category. A $(\mathcal V, \otimes, I)$\nbd-category $A$, or simply \Vn-category if the monoidal structure is clear, is a reflexive $(\mathcal V, I)$\nbd-graph endowed with morphisms \[ \operatorname{\mathsf{Hom}}_A(b, c) \otimes \operatorname{\mathsf{Hom}}_A(a, b) \to \operatorname{\mathsf{Hom}}_A(a, c) \] of \V, for any objects $a, b$ and $c$ in $A_0$, satisfying the associativity axioms \[ \begin{tikzcd} \operatorname{\mathsf{Hom}}_A(c, d) \otimes \operatorname{\mathsf{Hom}}_A(b, c) \otimes \operatorname{\mathsf{Hom}}_A(a, b) \ar[r] \ar[d] & \operatorname{\mathsf{Hom}}_A(c, d) \otimes \operatorname{\mathsf{Hom}}_A(a, c) \ar[d] \\ \operatorname{\mathsf{Hom}}_A(b, d) \otimes \operatorname{\mathsf{Hom}}_A(a, b) \ar[r] & \operatorname{\mathsf{Hom}}_A(a, d) \end{tikzcd} \] for any $a$, $b$, $c$ and $d$ in $A_0$, and the identity axioms \[ \begin{tikzcd}[column sep=6em] I \otimes \operatorname{\mathsf{Hom}}_A(a, b) \ar[r, "{k_b \otimes \operatorname{\mathsf{Hom}}_A(a, b)}"] & \operatorname{\mathsf{Hom}}_A(b, b) \otimes \operatorname{\mathsf{Hom}}_A(a, b) \ar[d] \\ \operatorname{\mathsf{Hom}}(a, b) \ar[u, "\cong"] \ar[r, equal]& \operatorname{\mathsf{Hom}}(a, b) \end{tikzcd} \] and \[ \begin{tikzcd}[column sep=6em] \operatorname{\mathsf{Hom}}_A(a, b) \otimes I \ar[r, "{\operatorname{\mathsf{Hom}}_A(a, b) \otimes k_a}"] & \operatorname{\mathsf{Hom}}_A(a, b) \otimes \operatorname{\mathsf{Hom}}_A(a, a) \ar[d] \\ \operatorname{\mathsf{Hom}}(a, b) \ar[u, "\cong"] \ar[r, equal]& \operatorname{\mathsf{Hom}}(a, b) \end{tikzcd} \] for any $a$ and $b$ in $A_0$. A \Vn-category is also widely known as \Vn-enriched category or category enriched in \V. A \emph{morphism of \Vn-categories} $\phi \colon A \to B$, also called \Vn-enriched functor, is a morphism of the underlying reflexive \Vn-graphs which moreover commutes with compositions morphisms. We denote by \Vn-${\mathcal{C}\mspace{-2.mu}\it{at}}$ the category of \Vn-categories. It is easy to see that if the category \V{} has finite products, then the category \Vn-${\mathcal{C}\mspace{-2.mu}\it{at}}$ also has finite products. \end{paragr} \begin{paragr} For $n>1$, the category $\nCat{(n+1)}$ of $(n+1)$-categories can be inductively defined as the category of reflexive $\nCat{n}$-graphs. This provides a canonical functor $\nCat{n} \to \nCat{(n+1)}$ which admits a left adjoint $\tau^n$. The category $\nCat{\infty}$ can be defined as the limit of the tower \[ \begin{tikzcd} \dots \ar[r] & \nCat{(n+1)} \ar[r, "\tau^{n}"] & \nCat{n} \ar[r, "\tau^{n-1}"] & \nCat{(n-1)} \ar[r, "\tau^{n-2}"] & \dots \end{tikzcd} \] \end{paragr} \section{Steiner theory} In this section we present the theory of augmented directed complexes, introduced by Steiner in~\cite{Steiner1}. We follow closely the exposition given by Ara and Maltsiniotis in~\cite{AraMaltsiCondE} and~\cite{Joint}. \begin{paragr}\label{paragr:conv_comp} Unless explicitly stated, in this section we shall always write ``chain complex'' to mean ``chain complex of abelian groups in non-negative degrees with homological indexing''. We remind that a \ndef{homogeneous element}\index{homogeneous element} of a chain complex~$K$ is an element of a group $K_n$ for some $n \ge 0$. If $x$ is an homogeneous element of~$K$, we shall call the \ndef{degree}\index{homogeneous element!degree} of~$x$ the unique $n\ge 0$ for which $k$ belongs to $K_n$ and we shall denote it by~$|x|$. \end{paragr} \begin{paragr} An \ndef{augmented directed complex}\index{augmented directed complex} is a triple $(K, K^\ast, e)$ where \[ K = \begin{tikzcd}\cdots \ar[r, "d_{n+1}"] & K_n \ar[r, "d_n"] & K_{n-1} \ar[r, "d_{n-1}"] & \cdots \ar[r, "d_2"] & K_1 \ar[r, "d_1"] & K_0 \end{tikzcd} \] is a chain complex, $e \colon K_0 \to \Z$ is an augmentation (so that we have $e\, d_1 = 0$) and $K^\ast = (K^\ast_i)_{i \ge 0}$ is a graded set such that for any~$i \ge 0$ the set~$K^\ast_i$ is a submonoid of the abelian group~$K_i$. We shall call \ndef{positivity submonoids}\index{augmented directed complex!positivity submonoid} of $K$ the submonoids~$K^\ast_i$, with $i \ge 0$. We will often denote an augmented directed complex simply by its underlying chain complex, especially if the augmented and positivity structures are clear. \end{paragr} \begin{rem} We warn the reader that we do not ask any compatibility of the submonoids of positivity with respect to the differentials. \end{rem} \begin{paragr}\label{paragr:preorder_monoid} Let $(K, K^\ast, e)$ be an augmented directed complex. For any $i \ge 0$, the submonoid $K^\ast_i$ induces a preorder relation $\le$ on~$K_i$, compatible with the abelian group structure, defined by \[ x \le y \quaddefssi y - x \in K^\ast_i\ . \] In particular, we have \[ K^\ast_i = \{x \in K_i : x \ge 0\}\,. \] More precisely, for an augmented complex $(K, e)$ the additional structure given by the collection of submonoids $K^\ast$ is equivalent to endow, for all $i \ge 0$, each abelian group~$K_i$ with a preorder relation compatible with the abelian group structure. \end{paragr} \begin{paragr} A \ndef{morphism of augmented directed complexes} $f$ from $(K, K^\ast, e)$ to $(K', K'^\ast, e')$ is a morphism of augmented chain complexes which moreover respects the submonoids of positivity. That is, $f \colon K \to K'$ is a morphism of chain complexes such that $e'\, f_0 = e$ and for any $i \ge 0$ the image $f(K^\ast_i)$ of the submonoid $K^\ast_i$ under $f$ is contained in~$K^{\prime\ast}_i$. This latter condition can be stated equivalently by saying that the morphism $f_i \colon K_i \to K'_i$ preserves the preorder $\le$ on~$K_i$ for all $i \ge 0$. We shall denote by~$\mathcal{C}_{\mathrm{ad}}$ the category of augmented directed complexes. \end{paragr} \begin{paragr}\label{paragr:def_basis} A \emph{basis}\index{augmented directed complex!with basis} of an augmented directed complex $K$ is a graded set $B = (B_i)_{i\geq 0}$ such that, for any $i\geq 0$, the set $B_i$ is both a basis for the $\Z$-module $K_i$ and a set of generators for the submonoid $K^\ast_i$ of $K_i$. We shall often identify a basis $B = (B_i)_{i \ge 0}$ to the set $\coprod_{i \ge 0} B_i$ If an augmented directed complex has a basis, for any $i\geq 0$ the preorder relation of positivity on $K_i$ defined in paragraph~\ref{paragr:preorder_monoid} is a partial order relation and the elements $B_i$ of the basis are the minimal elements of the poset $(K_i^\ast\setminus \{0\}, \le)$; in particular, if a basis of $K$ exists then it is unique. When an augmented directed complex has a basis, we shall say that the complex is \emph{with basis}. \end{paragr} \begin{paragr}\label{paragr:def_support} Let $K$ be an augmented directed complex with basis $B$. For any $i\geq 0$, a $i$\hyp{}homogeneous element~$x$ can be written uniquely as a linear combination of elements of $B_i$ \[ x = \sum_{b \in B_i} x_b\, b \] with integral coefficients. The \emph{support}\index{support} of $x$, denoted by $\operatorname{supp}(x)$, is the (finite) set of elements of the basis appearing in this linear combination with non-zero coefficient. We can write the $i$\hyp{}homogeneous elements uniquely as the difference of two positive $i$\hyp{}homogeneous elements with disjoint supports $x = x_+ - x_-$, where \[ x_+ = \sum_{\substack{b \in B_i \\ x_b > 0}} x_b\,b \qquad\text{and}\qquad x_- = -\sum_{\substack{b \in B_i \\ x_b < 0}} x_b\,b\,. \] \end{paragr} \begin{paragr}\label{paragr:def_atome} Let $K$ be an augmented directed complex with basis $B = (B_i)_{i\ge 0}$. For $i \ge 0$ and $x$ in $K_i$, we define a matrix \[ \atom{x}=\tabll{\atom{x}}{i}, \] where the elements $\atom{x}^\varepsilon_k$ are inductively defined by: \begin{itemize} \item $\atom{x}^0_i = x = \atom{x}^1_i$\,; \item $\atom{x}^0_{k - 1} = d(\atom{x}^0_k)_-$ and $\atom{x}^1_{k - 1} = d(\atom{x}^1_k)_+$\,, \,for $0 < k \leq i$\,. \end{itemize} We say that the basis $B$ of $K$ is \ndef{unital} \index{augmented directed complex!unital basis} if, for any $i\geq 0$ and any $x$ in $B_i$, we have the equality $e(\atom{x}^0_0) = 1 = e(\atom{x}^1_0)$. We shall say that an augmented directed complex $K$ is \ndef{with unital basis} is it is with basis and its unique basis is unital. \end{paragr} \begin{paragr}\label{paragr:def_Steiner} Let $K$ be an augmented directed complex with basis $B$. For $i\geq 0$, we denote by $\leq_i$ the smallest preorder relation on $B = \coprod_i B_i$ satisfying \[ x \leq_i y \quad\text{if}\quad |x|>i, |y|>i\text{ and }\operatorname{supp}{(\atom{x}^1_i)}\cap \operatorname{supp}{(\atom{y}^0_i)} \neq \vide. \] We say that the basis $B$ is \emph{loop-free} \index{augmented directed complex!loop-free basis} if, for any $i \ge 0$, the preorder relation $\leq_i$ is a partial order relation. We shall call \emph{Steiner complex}\index{Steiner complex} an augmented directed complex $K$ with unital and loop-free basis $B$. \end{paragr} \begin{paragr}\label{paragr:def_le_N} Let $K$ be an augmented directed complex with basis $B = \coprod_{i \ge 0} B_i$. We shall denote by~$\leN$ the smallest preorder relation on $B$ satisfying \[ x \leN y \quad\text{if}\quad x \in \operatorname{supp}(d(y)_-) \text{ or } y \in \operatorname{supp}(d(x)_+), \] where we fixed by convention $d(b) = 0$ if $b$ belongs to $B_0$. We shall say that a basis $B$ is \emph{strongly loop-free} \index{augmented directed complex!strongly loop-free basis} if the preorder relation $\leN$ is actually a partial order relation. We shall call an augmented directed complex $K$ a \emph{strong Steiner complex}\index{strong Steiner complex} if it is with basis and its unique basis is unital and strongly loop-free. \end{paragr} \begin{prop}[Steiner] Let $K$ be an augmented directed complex with basis $B$. If the basis $B$ is strongly loop-free, then it is loop-free. \end{prop} \begin{proof} See Proposition~3.7 of~\cite{Steiner1}. \end{proof} \begin{comment} \begin{paragr}\label{def:lambda} We define a functor \[ \lambda \colon \nCat{\infty} \to \mathcal{C}_{\mathrm{ad}} \] as follows. Let $A$ be a small $\infty$-category. For $i\geq 0$, the abelian group $\lambda(A)_i$ is generated by the elements of the form $[x]$, where $x$ is an $i$\nbd-cell of $A$, and by the relations \[ [x \ast_j y] = [x] + [y], \] where $x$ and $y$ are two $j$\nbd-composable $i$\nbd-cells of $A$. The positivity submonoid $\lambda(A)_i^\ast$ is the submonoid generated by the elements $[x]$, for $x$ an $i$\nbd-cell of $A$. For $i > 0$, the differential $d_i \colon \lambda(A)_i \to \lambda(A)_{i-1}$ of an element $[x]$, where $x$ is an $i$\nbd-cell of $A$, is defined by \[ d_i([x]) = [t(x)] - [s(x)]. \] The augmentation $e \colon \lambda(A)_0 \to \Z$ is the unique morphism of abelian groups sending, for any object $x$ of $C$, the generating element $[x]$ to $1$. If $u \colon A \to B$ is an $\infty$-functor, for $i\geq 0$ we define the morphism $\lambda(u)_i$ by sending the generating element $[x]$ of $\lambda(A)_i$ to the generating element $[u(x)]$ of $\lambda(B)_i$. It is easy to check that this defines a morphism of augmented directed complexes from $\lambda(A)$ to $\lambda(B)$ and that $\lambda$ is indeed a functor. \end{paragr} \begin{rem} Notice that if $A$ is an \oo-category and $x$ is a trivial $i$\hyp{}cell of $A$ for $i \ge 0$, \ie the cell $x$ is an identity of an $(i-1)$\hyp{}cell, then the element~$[x]$ of the abelian group~$\lambda(A)_i$ is zero. Indeed, the relations defining $\lambda(A)_i$ imply \[ [x] = [x \ast_{i-1} x] = [x] + [x]\,. \] \end{rem} \end{comment} \begin{paragr}\label{def:nu} We define a functor \[ \nu \colon \mathcal{C}_{\mathrm{ad}} \to \nCat{\infty} \] as follows. Let $K$ be an augmented directed complex. For $i \ge 0$, the $i$\hyp{}cells of $\nu(K)$ are the matrices \[ \tabld{x}{i} \] such that \begin{enumerate} \item $x^\epsilon_k$ belongs to $K^\ast_k$ for $\epsilon = 0, 1$ and $0 \le k \le i$ ; \item $d(x^\epsilon_k) = x^1_{k-1} - x^0_{k-1}$ for $\epsilon = 0, 1$ and $0 < k \le i$ ; \item $e(x^\epsilon_0) = 1$ for $\epsilon = 0, 1$ ; \item $x_i^0 = x_i^1$. \end{enumerate} Let us describe the \oo-categorical structure. Let \[ x = \tabld{x}{i} \] be an $i$\hyp{}cell of $\nu(K)$ for $i \ge 0$. If $i > 0$ we define the source and the target of $x$ to be respectively \[ s(x) = \begin{pmatrix} x^0_0 &\dots &x^0_{i-2} &x^0_{i-1}\cr\noalign{\vskip 3pt} x^1_0 &\dots &x^1_{i-2} &x^0_{i-1} \end{pmatrix} \quadet t(x) = \begin{pmatrix} x^0_0 &\dots &x^0_{i-2} &x^1_{i-1}\cr\noalign{\vskip 3pt} x^1_0 &\dots &x^1_{i-2} &x^1_{i-1} \end{pmatrix}\, . \] The identity of $x$ is given by the matrix \[ 1_x = \begin{pmatrix} x^0_0 &\dots &x^0_{i-1} &x^0_{i} & 0\cr\noalign{\vskip 3pt} x^1_0 &\dots &x^1_{i-1} &x^1_{i} & 0 \end{pmatrix}\, . \] Finally if \[ y = \tabld{y}{i} \] is another $i$\hyp{}cell which is $j$\hyp{}composable with $x$, with $i > j \ge 0$, then we set \[ x \ast_j y = \begin{pmatrix} y^0_0 &\dots & y^0_j & x^0_{j+1} + y^0_{j+1} & \dots & x^0_{i} + y^0_i \cr\noalign{\vskip 3pt} x^1_0 &\dots & x^1_j & x^1_{j+1} + y^1_{j+1} & \dots & x^1_{i} + y^1_i \end{pmatrix}\, . \] One checks that this indeed defines an \oo-category. If $x$ is an $i$\hyp{}cell of $\nu(K)$, $i \ge 0$, then we shall denote by $x^\eps_k$ the component of the matrix defining $x$, for $0 \le k \le i$ and $\eps = 0, 1$. We shall simply name by $x_i$ the element $x^0_i = x^1_i$ and for $k >i$ and $\eps = 0, 1$ we set $x^\eps_k = 0$. Let $f \colon K \to K'$ be a morphism of augmented directed complexes. The collection of functions \[ \tabld{x}{i} \mapsto \begin{pmatrix} f(x^0_0) &\dots &f(x^0_{i-1}) &f(x^0_{i})\cr\noalign{\vskip 3pt} f(x^1_0) &\dots &f(x^1_{i-1}) &f(x^1_{i}) \end{pmatrix} \] defines an \oo-functor $\nu(f) \colon \nu(K) \to \nu(K')$. \end{paragr} \begin{rem} Steiner shows in~\cite{Steiner1} that the functor $\nu$ admits a left adjoint $\lambda$, that we will not define since we will not need it. In particular, the composition $\nN = N_\infty \circ \nu \colon \mathcal{C}_{\mathrm{ad}} \to {\mathcal{S}\mspace{-2.mu}\it{et}}Simp$ defines a nerve functor for augmented directed complexes and this has a left adjoint given by $\cC = \lambda \circ c_\infty \colon {\mathcal{S}\mspace{-2.mu}\it{et}}Simp \to \nCat{\infty}$. \end{rem} \begin{paragr}\label{paragr:def_atom_II} Let $K$ be an augmented directed complex with basis $B$. For any element $x$ of $K_i$, one easily checks that the matrix \[ \atom{x} = \tabll{\atom{x}}{i}\,, \] as defined in paragraph~\ref{paragr:def_atome}, is an $i$\hyp{}cell of $\nu(K)$ if and only if the element $x$ belongs to $K^\ast_i$ and we have the equalities $e(\atom{x}^0_0) = 1 = e(\atom{x}^1_0)$. Setting $\atom{x}_i^\eps = x^\eps_i$ for $k \le i$ and $\atom{x}^\eps_k = 0$ for all $k > i$, $\eps= 0, 1$, these notations are compatible with those of paragraph~\ref{def:nu} whenever~$\atom{x}$ is an $i$\hyp{}cell of $\nu(K)$. If the basis $B$ of $K$ is unital, then for any element $x$ of the basis the matrix defined by $\atom{x}$ is a cell of $\nu(K)$. In this case, we call the cell $\atom{x}$ of $\nu(X)$ the \ndef{atom}\index{atom} associated to $x$ . \end{paragr} \begin{thm}\label{thm:Steiner_adj} The functors \[ \lambda \colon \nCat{\infty} \to \mathcal{C}_{\mathrm{ad}} \quadet \nu \colon \mathcal{C}_{\mathrm{ad}} \to \nCat{\infty} \] define a pair of adjoint functors. \end{thm} \begin{proof} This is Theorem 2.11 of \cite{Steiner1}. \end{proof} \begin{thm}[Steiner]\label{thm:equivalence_Steiner} For any Steiner complex $K$, the counit morphism \[ \lambda(\nu(K)) \to K \] is an isomorphism. In particular, the restriction of the functor $\nu \colon \mathcal{C}_{\mathrm{ad}}\to \nCat{\infty}$ to the category of Steiner complexes is fully faithful \end{thm} \begin{paragr}\label{paragr:def_Steiner_category} We shall call \ndef{Steiner \oo-category}\index{Steiner infty-category@Steiner \oo-category} (resp.~\ndef{strong Steiner \oo-category}\index{strong Steiner infty-category@strong Steiner \oo-category}) an \oo-category in the essential image of the restriction of the functor $\nu \colon \mathcal{C}_{\mathrm{ad}} \to \nCat{\infty}$ to the full subcategory of Steiner complexes (resp.~strong Steiner complexes). The preceding theorem states that the functor $\nu$ induces an equivalence of categories between the category of Steiner complexes and that of Steiner \oo-categories (resp.~between the category of strong Steiner complexes and that of strong Steiner \oo-cat\-e\-go\-ries). \end{paragr} \section{Joyal's {$\Theta$} category} \label{app:theta} \begin{paragr}\label{paragr:def_disks} For any $i \ge 0$, we shall denote by $\operatorname{\mathsf{Cell}}_i$ the set of $i$-cells of an \oo-category and by $\Dn i$ the \oo-category corepresenting the functor $\operatorname{\mathsf{Cell}}_i \colon \nCat{\infty} \to {\mathcal{S}\mspace{-2.mu}\it{et}}$ mapping an \oo-category $A$ to the set of its $i$\hyp{}cells. In fact, this \oo-category is an $i$\hyp{}category having a single non-trivial $i$\hyp{}cell that we shall call its \ndef{principal cell}. For any $0 \le k \le i$ the $i$\hyp{}category $\Dn i$ has exactly two non-trivial $k$\hyp{}cells which are the $k$\hyp{}dimensional iterated source and target of its principal cell. This is how the graphs of $\Dn i$ (without identities) for $i=0, 1, 2$ look like: \[ \Dn 0 = \begin{tikzcd} \bullet \end{tikzcd} \ ,\ D_1 = \begin{tikzcd} \bullet \arrow[r] & \bullet \end{tikzcd} \ ,\ D_2 = \begin{tikzcd} \bullet \arrow[r, bend left=50, ""{name=U, below}] \arrow[r, bend right=50, ""{name=D}] & \bullet \arrow[Rightarrow,from=U,to=D] \end{tikzcd} \ ,\ D_3 = \begin{tikzcd} \bullet \arrow[r, bend left=50, ""'{name=U}] \arrow[r, bend right=50, ""{name=D}] & \bullet \arrow[Rightarrow,from=U,to=D, shift right=1ex, bend right=30, ""{name=L}] \arrow[Rightarrow, from=U, to=D, shift left=1ex, bend left=30, ""{name=R, left}] \arrow[triple, from=L, to=R]{} \end{tikzcd}\ . \] For $i>0$, the natural transformations source and target $\operatorname{\mathsf{Cell}}_i \to \operatorname{\mathsf{Cell}}_{i-1}$ induces \oo-functors $\Ths{i}, \Tht{i} \colon \Dn{i-1} \to \Dn i$. Explicitly the \oo-functor $\sigma_i$ (resp.~$\tau_i$) sends the principal cell of $\Dn{i-1}$ to the source (resp.~the target) of the principal cell of $\Dn i$. For $0 \le j < i$ we shall denote by $\Ths[i]{j}, \Tht[i]{j} \colon \Dn{j} \to \Dn{i}$ the \oo-functors corepresented by the natural transformations $\Gls[i]{j}$ and $\Glt[i]{j}$ respectively, \ie the \oo-functors \[ \Ths[i]{j} = \Ths{i}\dots\Ths{j+2}\Ths{j+1} \quadet \Tht[i]{j} = \Tht{i}\dots\Tht{j+2}\Tht{j+1}\,. \] \end{paragr} \begin{paragr}\label{paragr:def_tree} Let $\ell >0$ and $i_1, \dots, i_\ell$, $j_1, \dots, j_{\ell-1}$ be a collection of positive integers satisfying the inequalities \[ i_k > j_k < i_{k+1}\ ,\quad \text{for }0< k <\ell\,. \] We shall often organise these integers in a matrix, called \ndef{matrix of dimensions}, of the following form \[ \begin{pmatrix} i_1 && i_2 && \dots && i_{\ell-1} && i_\ell\cr\noalign{\vskip 3pt} & j_1 && j_2 && \dots && j_{\ell-1} & \end{pmatrix} \] and associate to it the diagram \[ \begin{tikzcd}[column sep=1em] \Dn{i_1} & & \Dn{i_2} & & \Dn{i_3} & & \dots & & \Dn{i_{\ell-1}} & & \Dn{i_\ell} \\ & \Dn{j_1} \arrow[lu, "{\Ths[i_1]{j_1}}"] \arrow[ru, "{\Tht[i_2]{j_1}}"'] & & \Dn{j_2} \arrow[lu, "{\Ths[i_2]{j_2}}"] \arrow[ru, "{\Tht[i_3]{j_2}}"'] & & \dots & & \dots & & \Dn{i_{\ell-1}} \arrow[lu, "{\Ths[i_{\ell-1}]{j_{\ell-1}}}"] \arrow[ru, "{\Tht[i_\ell]{j_{\ell-1}}}"'] & \end{tikzcd} \] in $\nCat{\infty}$. We shall call \ndef{globular sum} the colimit of such a diagram and we shall simply denote it by \[ \Dn{i_1} \amalg_{\Dn{j_1}} \Dn{i_2} \amalg_{\Dn{j_2}}\dots \amalg_{\Dn{j_{\ell-1}}} \Dn{i_\ell}\,. \] We shall call \ndef{globular pasting scheme} any \oo-category that we get this way. \end{paragr} \begin{paragr}\label{paragr:dimension_globular_scheme} Consider a matrix of dimensions \[ \begin{pmatrix} i_1 && i_2 && \dots && i_{\ell-1} && i_\ell\cr\noalign{\vskip 3pt} & j_1 && j_2 && \dots && j_{\ell-1} & \end{pmatrix}\ . \] The \ndef{dimension} of the globular pasting scheme $T$ \[ \Dn{i_1} \amalg_{\Dn{j_1}} \Dn{i_2} \amalg_{\Dn{j_2}}\dots \amalg_{\Dn{j_{\ell-1}}} \Dn{i_\ell} \] is given by the number \[ \sum_{1\le k \le \ell} i_k - \sum_{0 < k < \ell} j_k = i_1 - j_1 + i_2 - j_2 + \dots + i_{\ell-1} - j_{\ell -1} + i_\ell\,. \] The \ndef{height} of the globular pasting scheme $T$ is defined as the number \[ \text{ht}(T) = \max_{1\le k \le \ell}(i_k)\,. \] \end{paragr} \begin{paragr}\label{paragr:def_Theta} Joyal's $\Theta$ category is the full subcategory of $\nCat{\infty}$ spanned by globular pasting schemes. We shall denote by $\Theta_+$ the full subcategory of $\nCat{\infty}$ obtained by adding the empty \oo-category to $\Theta$. \end{paragr} \begin{paragr}\label{paragr:def_Theta_n} The height defined in the previous paragraph defines a canonical grading on the objects of $\Theta$. For any integer $n \ge 0$, we denote by $\Theta_n$ the full subcategory of $\Theta$ spanned by the objects of height at most $n$. We observe that $\Theta_0$ is the category whose only object is $\Dn{0}$ and whose only morphism is the identity and that $\Theta_1$ is canonically isomorphic to the category $\cDelta$ of simplices; we thus get a canonical embedding $\cDelta \hookto \Theta$. \end{paragr} \begin{rem} The category $\Theta$ was first introduced by Joyal in~\cite{JoyalDisks}, with a definition more geometric in spirit. Berger~\cite{BergerNerve} and Makkai--Zawadowski~\cite{MakkaiZawadowskiDuality} later independently showed that the two definitions are actually equivalent. Another equivalent definition is due to Oury~\cite{Oury}. \end{rem} \begin{paragr} Another convenient and graphical description of the category $\Theta$ can be given in terms of planar rooted trees. Let $\mathcal T$ be the category of presheaves in finite linearly ordered set on the poset of non-negative integers, \ie an element $X$ of $\mathcal T$ is a sequence of finite linearly ordered sets $(X_n)_{n\ge 0}$ equipped with order-preserving maps $X_n \to X_{n-1}$ for all $n>0$. A \ndef{planar rooted tree}, or simply \ndef{tree}, is an object $T$ of $\mathcal T$ such that $T_0$ is a singleton and for which $T_i$ is eventually empty for $i$ big enough. The greatest $i$ for which $T_i$ is non-empty will be called the \ndef{height} of the tree. Let us now sketch the correspondence between objects of $\Theta$ and trees. Instead of giving a formal framework, we are going to present some examples upon which one can easily build the intuition behind this bijection. For any $i \ge 0$, we associate to the object $\Dn i$ the linear tree $T$ of height $i$, that is, for which $T_k$ is a singleton for all $0 \le k \le i$, and we depict it as \begin{center} \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, } [ [ [, edge=dotted [] ] ] ] \end{forest} \end{center} So for instance we have \begin{center} $\Dn 0 = \bullet$ \quad , \quad $\Dn 1$ = \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, } [ [ ] ] \end{forest} \quad , \quad $\Dn 2$ = \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, } [ [ [ ] ] ] \end{forest} \quad , \quad $\Dn 3$ = \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, } [ [ [ [] ] ] ] \end{forest} \end{center} The height $i$ of the tree determines the dimension of the principal cell. The element of $\cDelta$ corresponds precisely to the trees of height at most 1, for instance \begin{center} $\Deltan{0} = \bullet$ \quad , \quad $\Deltan 1 =$ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, } [ [ ] ] \end{forest} \quad , \quad $\Deltan 2 =$ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ ] [ ] ] \end{forest} \quad , \quad $\Deltan 3 =$ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ ] [ ] [ ] ] \end{forest}\, . \end{center} More generally, given an object $S$ of $\Theta$ with matrix of dimensions \[ \begin{pmatrix} i_1 && i_2 && \dots && i_{\ell-1} && i_\ell\cr\noalign{\vskip 3pt} & j_1 && j_2 && \dots && j_{\ell-1} & \end{pmatrix}\,, \] the corresponding tree is drawn inductively as follows. The terms $i_k$ and $i_{k+1}$ correspond to the height of two linear trees that are glued together, \ie share the same root and then fork from height $j_k$. We proceed from right to left, so that the description agrees with the usual way of writing the composition of cells. As a first example, consider the following tables of dimensions \[ \begin{pmatrix} 2 && 2 \cr\noalign{\vskip 3pt} & 0 & \end{pmatrix} \quad , \quad \begin{pmatrix} 2 && 2 \cr\noalign{\vskip 3pt} & 1 & \end{pmatrix} \quad , \quad \begin{pmatrix} 2 && 2 && 2 \cr\noalign{\vskip 3pt} & 0 && 1 & \end{pmatrix} \, , \] and the associated objects of $\Theta$ \[ D_2 \amalg_{D_0} D_2 \quad , \quad D_2 \amalg_{D_1} D_2 \quad , \quad D_2 \amalg_{D_0} D_2 \amalg_{D_1} D_2\, , \] that is the globular pasting schemes \[ \begin{tikzcd} \bullet \arrow[r, leftarrow, bend left, ""{name=U1, below}] \arrow[r, bend right, ""{name=D1}] & \bullet \arrow[r, leftarrow, bend left, ""{name=U2, below}] \arrow[r, bend right, ""{name=D2}] & \bullet \ar[Rightarrow, from=U1, to=D1] \ar[Rightarrow, from=U2, to=D2] \end{tikzcd} \quad , \quad \begin{tikzcd} \bullet \arrow[r, leftarrow, bend left=70, ""'{name=U}] \arrow[r, leftarrow, bend right=70, ""{name=D}] \arrow[r, leftarrow, ""{name=M1}, ""'{name=M2}] & \bullet \ar[Rightarrow, from=U, to=M1] \ar[Rightarrow, from=M2, to=D] \end{tikzcd} \quadet \begin{tikzcd} \bullet \arrow[r, leftarrow, bend left, ""'{name=U1}] \arrow[r, leftarrow, bend right, ""{name=D1}] & \bullet \arrow[r, leftarrow, bend left=70, ""'{name=U2}] \arrow[r, leftarrow, bend right=70, ""{name=D2}] \arrow[r, leftarrow, ""{name=M1}, ""'{name=M2}] & \bullet \ar[Rightarrow, from=U1, to=D1] \ar[Rightarrow, from=U2, to=M1] \ar[Rightarrow, from=M2, to=D2] \end{tikzcd}\,. \] These objects correspond respectively to trees \begin{center} \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ ] ] [ [ ] ] ] \end{forest} \qquad , \qquad \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ ] [ ] ] ] \end{forest} \qquad\text{and}\qquad \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ ] ] [ [ ] [ ] ] ] \end{forest} \end{center} As another more involved example, consider the matrix of dimensions \[ \begin{pmatrix} 2 && 2 && 2 && 2 && 2 && 2 && 3 && 2 && 2 \cr\noalign{\vskip 3pt} & 1 && 1 && 1 && 0 && 1 && 0 && 0 && 1 & \end{pmatrix}\,, \] and the corresponding globular pasting scheme \[ \begin{tikzcd} \bullet \ar[r, rightarrow, bend left=85, looseness=2.2, ""{name=D11, below}] \ar[r, leftarrow, bend left=70, looseness=1, ""{name=D12i}, ""{name=D12ii, below}] \ar[r, leftarrow, ""{name=D13i}, ""{name=D13ii, below}] \ar[r, leftarrow, bend right=70, looseness=1, ""{name=D14i}, ""{name=D14ii, below}] \ar[r, leftarrow, bend right=85, looseness=2.2, ""{name=D15}] \ar[Rightarrow, from=D11, to=D12i] \ar[Rightarrow, from=D12ii, to=D13i] \ar[Rightarrow, from=D13ii, to=D14i] \ar[Rightarrow, from=D14ii, to=D15] & \bullet \ar[r, leftarrow, bend left=70, ""{name=D21, below}] \ar[r, leftarrow, ""{name=D22i}, ""{name=D22ii, below}] \ar[r, leftarrow, bend right=70, ""{name=D23}] \ar[Rightarrow, from=D21, to=D22i] \ar[Rightarrow, from=D22ii, to=D23] & \bullet \ar[r, leftarrow, bend left=70, ""{name=D31, below}] \ar[r, leftarrow, bend right=70, ""{name=D32}] \arrow[Rightarrow,from=D31,to=D32, shift right=1ex, bend right=30, ""{name=L}] \arrow[Rightarrow, from=D31, to=D32, shift left=1ex, bend left=30, ""{name=R, left}] \arrow[triple, from=R, to=L]{} & \bullet \ar[r, leftarrow, bend left=70, ""{name=D41, below}] \ar[r, leftarrow, ""{name=D42i}, ""{name=D42ii, below}] \ar[r, leftarrow, bend right=70, ""{name=D43}] \ar[Rightarrow, from=D41, to=D42i] \ar[Rightarrow, from=D42ii, to=D43] & \bullet \end{tikzcd}\ . \] The associated tree is given by \[ \begin{forest} for tree={ label/.option=content, grow'=north, content=, circle, fill, minimum size=3pt, inner sep=0pt, s sep+=15, } [ [ [ ] [ ] [ ] [ ] ] [ [ ] [ ] ] [ [ [ ] ] ] [ [ ] [ ] ] ] \end{forest}\, . \] \end{paragr} \section{Orientals} In this short section we present a visual intuition of the first few orientals. Then for every poset $E$ we give a description of the \oo-category $c_\infty N(E)$, that we shall simply denote by $\On{E}$ and call the \ndef{oriental of $E$}, and we deduce some of its basic properties, following closely~\cite[ยง6]{AraMaltsiCondE}. \begin{paragr}\label{paragr:atoms_orientals} Fix an integer $n\ge 0$. For any $0\le i \le n$ the $i$-chains of the strong Steiner complex $\cC (\Deltan n)$ are the elements of the free abelian groups generated by the elements of the set $\nd{(\Deltan n)}{i}$, the set of non-degenerate $i$-simplices of the representable simplicial set $\Deltan{n}$; that is, the generators of $\cC (\Deltan n)$ are the $i$-tuples $(j_0, j_1, \dots, j_i)$ of non-negative integers such that $0 \le j_\ell < j_{\ell +1} \le n$ for all $\ell =0, \dots, i-1$. For any such element of the basis of $\cC (\Deltan n)_i$, we shall write $\atom{j_0j_1\dots j_i}$ for the corresponding atom, instead of the more pedantic $\atom{(j_0, j_1, \dots, j_i)}$ (see paragraph~\ref{paragr:def_atome}). \end{paragr} \begin{paragr}\label{paragr:orientals} The \oo-categories $\On 0$ and $\Dn 0$ are isomorphic. They are both terminal objects for the category $\nCat{\infty}$ of small \oo-categories and they corepresent the functor mapping any \oo-category $A$ to the set $\operatorname{\mathsf{Ob}} A$ of its objects. The \oo-categories $\On 1$ and $\Dn 1$ are isomorphic, too. They are both generated as \oo-graphs by $\bullet \longrightarrow \bullet$ and they corepresent the functor mapping any \oo-category $A$ to the set $\operatorname{\mathsf{Cell}}_1(A)$ of its $1$-cells. The \oo-category $\On 2$ is a free \oo-category, generated by the \oo-graph \begin{center} \begin{tikzpicture} \foreach \i in {0, 1, 2}{ \tikzmath{\a = 210 + (120 * \i);} \node (n\i) at (\a:2) {$\atom{\i}$}; } \draw [->] (n0) -- node [below] {$\atom{01}$} (n1); \draw [->] (n1) -- node [right] {$\atom{12}$} (n2); \draw [->] (n0) -- node [left] {$\atom{02}$} (n2); \draw [double, double equal sign distance, -implies] (150:0.3) -- node [above] {$\atom{012}$} (-10:0.3); \end{tikzpicture}, \end{center} so that the $2$-cell $\atom{012}$ has $\atom{02}$ as source and $\atom{12}\comp_0 \atom{01}$ as target. With the notations as above and as in paragraph~\ref{paragr:def_atome}, we have \[ \atom{i} = \begin{pmatrix} (i) \\ (i) \end{pmatrix}\ , \quad\text{for }i=0, 1, 2 \] for the objects, \[ \atom{ij} = \begin{pmatrix}(i) & (i, j) \\ (j) & (i, j)\end{pmatrix}\ , \quad\text{for }i, j= 0, 1, 2\text{ and }i <j \] for the $1$-cells and \[ \atom{012} = \begin{pmatrix} (0) & (0, 2) & (0, 1, 2) \\ (2) & (0, 1) + (1, 2) & (0, 1, 2) \end{pmatrix} \] for the $2$-cell. The \oo-category $\On 3$ is a free \oo-category generated by the \oo-graph \begin{center} \begin{tikzpicture}[scale=2] \square{ /square/label/.cd, 0=$\atom{0}$, 1=$\atom{1}$, 2=$\atom{2}$, 3=$\atom{3}$, 01=$\atom{01}$, 12=$\atom{12}$, 23=$\atom{23}$, 02=$\atom{02}$, 03=$\atom{03}$, 13=$\atom{13}$, 012=$\atom{012}$, 023=$\atom{023}$, 123=$\atom{123}$, 013=$\atom{013}$, 0123=$\atom{0123}$ } \end{tikzpicture} \end{center} so that the $3$-cell $\atom{0123}$ has the $2$-cell \[\bigl(\atom{23}\comp_0 \atom{012}\bigr) \comp_1 \atom{023}\] as source and the $2$-cell \[\bigl(\atom{123}\comp_0\atom{01}\bigr) \comp_1 \atom{013}\] as target. Indeed we have \[ \atom{0123} = \begin{pmatrix} (0) & (01) + (12) + (23) & (012) + (023) & (0123) \\ (3) & (03) & (123) + (013) & (0123) \end{pmatrix}\ . \] The \oo-category $\On 4$ is freely generated by the diagram displayed in figure~\ref{fig:4-simplex}, where we omitted the brackets $\atom{\,\cdot\,}$ for reasons of space. \end{paragr} \begin{figure} \caption{The \oo-category $\On 4$.} \label{fig:4-simplex} \end{figure} \begin{paragr} Let $E$ be a poset. For any $p \ge 0$ the non-degenerated $p$-simplices of $N_\infty(E)$ are strictly increasing maps $\Deltan{p} \to E$, \ie the set $N_\infty(E)_p$ of non-degenerate $p$-simplices of the nerve of $E$ consists of $(p+1)$\hyp{}tuples \[(x_0, x_1,\dots, x_p)\] of elements of $E$ such that $x_i < x_{i+1}$ for all $i=0, 1, \dots, p-1$. The abelian group $(\cC N_\infty(E))_p$ (resp. abelian monoid $(\cC N_\infty(E))^*_p$) is freely generated by the set $\nd{N_\infty(E)}{p}$. The differential is defined by \[ d(i_0, i_1,\dots, i_p) = \sum_{k=0}^p (-1)^k (i_0, i_1, \dots, \widehat{i_k}, \dots, i_p)\,,\quad p>0\,, \] where $(i_0, \dots, \widehat{i_k}, \dots, i_p) = (i_0, \dots, i_{k-1}, i_{k+1}, \dots, i_p)$, and the augmentation by $e(i_0) = 1$. \end{paragr} \begin{thm} The augmented directed complex $\cC N_\infty(E)$ is a strong Steiner complex. \end{thm} \begin{proof} This follows immediately from Theorem~8.6 of~\cite{AraMaltsiCondE}. Indeed, using the notation of~\loccit, for any poset $E$ the simplicial set $N_\infty(E)$ is canonically isomorphic to $k^*(E, \xi E)$ (cf.~paragraph~8.4 of~\cite{AraMaltsiCondE}). \end{proof} \begin{paragr}\label{paragr:oriental_poset} The previous theorem shows that the \oo-category $\nu \cC N_\infty(E)$ is a strong Steiner category and it is more precisely freely generated by the atoms \[ \atom{x_0 x_1 \dots x_p}\ , \quad p\ge 0\text{ and } x_0 < x_1 < \dots < x_p\,. \] This \oo-category associated to the poset $E$ shall be called the \emph{oriental of $E$} and shall be denoted by $\On{E}$. \end{paragr} \begin{lemme} The functor $\Or \colon \mathcal{O}\mspace{-2.mu}\it{rd} \to \nCat{\infty}$ associating to any poset $E$ its oriental \oo-category $\On{E}$ preserves monomorphisms. \end{lemme} \begin{proof} This is Proposition 9.6 of \cite{AraMaltsiCondE}. \end{proof} We report here an important result of~\cite{AraMaltsiCondE} giving a very explicit description of the ``horizontal composition'' of cells of the orientals. \begin{prop}\label{prop:2-cells_orientals} Let $n \ge 1$, $m\ge 1$ and $i_0,i_1\dots,i_m$ be integers such that \[0=i_0<i_1<\cdots<i_{m-1}<i_m=n\,.\] Then, the \oo-functor \[\textstyle\prod\limits^m_{k=1}\operatorname{\mathsf{Hom}}_{\On{n}}(a_k,b_k)\longrightarrow\operatorname{\mathsf{Hom}}_{\On{n}}(a,b)\ ,\] where \[ a_k= \begin{pmatrix} (i_{k-1}) &(i_{k-1},i_k) \cr \noalign{\vskip 3pt} (i_k) &(i_{k-1},i_k) \end{pmatrix}\,,\quad b_k= \begin{pmatrix} (i_{k-1}) &\textstyle\sum\limits_{i_{k-1}<l\leq i_k}(l-1,l) \cr \noalign{\vskip 3pt} (i_k) &\sum\limits_{i_{k-1}<l\leq i_k}(l-1,l) \end{pmatrix}\,,\ \ 1\leq k\leq m\ , \] \[\kern -75pt a= \begin{pmatrix} (0) &\textstyle\sum\limits^m_{k=1}\kern -3pt(i_{k-1},i_k) \cr \noalign{\vskip 3pt} (n) &\sum\limits^m_{k=1}\kern -3pt(i_{k-1},i_k) \end{pmatrix}\,,\quad b= \begin{pmatrix} (0) &\sum\limits^n_{l=1}(l-1,l) \cr \noalign{\vskip 3pt} (n) &\sum\limits^n_{l=1}(l-1,l) \end{pmatrix}\,, \] defined by the ``horizontal composition'' $\comp_0$ of $\On{n}$ \[(x_1,x_2,\dots,x_m)\longmapsto x_1\comp_0x_2\comp_{0}\cdots\comp_{0}x_m\] is an isomorphism of \oo-categories. \end{prop} \begin{proof} This is Proposition~A.4 of~\cite{AraMaltsiCondE}. \end{proof} In section~\ref{sec:tilde} we shall need few more properties of the hom-\oo-categories of the oriental $\On{E}$ of a poset~$E$ that are proven in~\cite{AraMaltsiCondE}. \begin{prop} Let $E$ be a poset and $s \colon \Deltan{n} \to E$ a non-degenerate $n$-simplex of $N(E)$, with $n>0$. Consider the $1$-cell $S$ of $\On{E}$ defined by \[ S = \sum_{i=0}^{n-1} \atom{s_i, s_{i+1}}\,. \] Then the \oo-functor $\On{s} \colon \On{n} \to \On{E}$ induces an isomorphism \[ \operatorname{\mathsf{Hom}}i_{\On{n}} (\atom{0, n}, \atom{0, 1} + \dots + \atom{n-1, n}) \to \operatorname{\mathsf{Hom}}i_{\On{E}}(\atom{s_0, s_n}, S) \] of \oo-categories. \end{prop} \begin{proof} This is a particular case of Proposition 1.5 of \cite{AraMaltsiCondE}, since it is clear that the \oo-categories $\On{n}$ and what they denote by $\Or(S)$ are canonically isomorphic. \end{proof} \begin{coro} Let $E$ be a poset and $s \colon \Deltan{n} \to E$ a non-degenerate $n$-simplex of $N(E)$, with $n>0$. Consider the $1$-cell $S$ of $\On{E}$ defined by \[ S = \sum_{i=0}^{n-1} \atom{s_i, s_{i+1}}\,. \] Then for any $1$-cell \[ f = \sum_{i=0}^{m-1} \atom{j_i, j_{i+1}} \] with $i_0 = 0$ and $i_m = n$ we have that the \oo-functor $\iota_s = \On{s} \colon \On{n} \to \On{E}$ induces an isomorphism \[ \operatorname{\mathsf{Hom}}i_{\On{n}} (f, \atom{0, 1} + \dots + \atom{n-1, n}) \to \operatorname{\mathsf{Hom}}i_{\On{E}}(\iota_s(f), S) \] of \oo-categories. \end{coro} \begin{proof} This is an equivalent formulation of Proposition 1.5 of~\cite{AraMaltsiCondE} and follows immediately from the preceding two propositions. \end{proof} \begin{coro}\label{coro:suboriental} Let $j \colon E \hookto F$ an inclusion of posets. Then for any two parallel $1$-cells $f$ and $g$ of $E$, the \oo-functor $\iota_j = \On{j} \colon \On{E} \to \On{F}$ induces an isomorphism \[ \operatorname{\mathsf{Hom}}i_{\On{E}}(f, g) \to \operatorname{\mathsf{Hom}}i_{\On{F}}(\iota_j(f), \iota_j(g)) \] of \oo-categories. \end{coro} \begin{proof} This follows immediately from the previous proposition by considering the simplex $\bar{g} \colon \Deltan{n} \to E$ of $N(E)$, where \[ g = \sum_{i=0}^{n-1} \atom{\bar{g}_i, \bar{g}_{i+1}}\,. \qedhere \] \end{proof} \printindex \end{document}
{\mathfrak{m}athfrak boldsymbol{\rm e}}gin{document} \title[Quantum linear Galois orders]{Quantum linear Galois orders} \mathfrak{m}athfrak author{Vyacheslav Futorny} \mathfrak{m}athfrak author{Jo\~ao Schwarz} \mathfrak{m}athfrak address{Instituto de Matem\'atica e Estat\'istica, Universidade de S\~ao Paulo, S\~ao Paulo SP, Brasil} \email{[email protected],}\email{[email protected]} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{abstract} We define a class of quantum linear Galois algebras which include the universal enveloping algebra $U_q(gl_n)$, the quantum Heisenberg Lie algebra and other quantum orthogonal Gelfand-Zetlin algebras of type $A$, the subalgebras of $G$-invariants of the quantum affine space, quantum torus for $G=G(m, p, n)$, and of the quantum Weyl algebra for $G=S_n$. We show that all quantum linear Galois algebras satisfy the quantum Gelfand-Kirillov conjecture. Moreover, it is shown that the the subalgebras of invariants of the quantum affine space and of quantum torus for the reflection groups and of the quantum Weyl algebra for symmetric groups are, in fact, Galois orders over an adequate commutative subalgebras and free as right (left) modules over these subalgebras. In the rank $1$ cases the results hold for an arbitrary finite group of automorphisms when the field is $\mathfrak{m}athbb C$. \end{abstract} \mathfrak{m}aketitle \sf{e}ction{Introduction} The purpose of this paper is to quantize the results of \mathbb{C}ite{FS2}, where subalgebras of invariants of Weyl algebras were studied for irreducible reflection groups. It was shown that in many cases these subalgebras have a structure of Galois orders over certain commutative domains. This feature indicates a hidden skew group algebra structure of all these algebras. The theory of Galois rings and orders developed in \mathbb{C}ite{FO1}, \mathbb{C}ite{FO2}. Classical examples includes finite $W$-algebras of type $A$ \mathbb{C}ite{FMO}, in particular the universal enveloping algebra of $gl_n$, and generalized Weyl algebras of rank $1$ over integral domains with infinite order automorphisms \mathbb{C}ite{Bavula}. The importance of the Galois order structure is in their representation theory, where one can effectively study the Gelfand-Tsetlin categories of modules with torsion for certain maximal commutative subalgebras \mathbb{C}ite{Ovsienko}, \mathbb{C}ite{FO2}. Our main objects of interest are the following quantum algebras: the quantum affine space $O_q(\mathfrak{m}athsf k^{2n})$, the quantum torus $O_q(\mathfrak{m}athsf k*^{2n})$ and the quantum Weyl algebra $A_n^q(\mathfrak{m}athsf k)$. Our first result shows that the subring of invariants $O_q(\mathfrak{m}athsf k^{2n})^G$ of the quantum affine space is a Galois order over certain polynomial subalgebra when $G=G_m^{\otimes n}$ is a product of cyclic groups (Proposition \ref{prop-affine-cyclic-order}) or $G=G(m,p,n)$ is one of non exceptional reflection groups (Theorem \ref{thm-affine-refl-order}): {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-main1} If $G$ is a product of $n$ copies of a cyclic group of fixed finite order or one of the irreducible non exceptional reflection groups $G(m,p,n)$, then the invariant subring $O_q(\mathfrak{m}athsf k^{2n})^G$ of the quantum affine space is is a Galois order over a polynomial subalgebra $\boldsymbol{\Gamma}_{Z}amma$ of $O_q(\mathfrak{m}athsf k^{2n})^G$. Moreover, $O_q(\mathfrak{m}athsf k^{2n})^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} Theorem \ref{thm-main1} can be easily generalized to the case of the quantum torus (Theorem \ref{thm-torus-refl-order}): {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-main2} For every $G=G(m,p,n)$ the invariant subring $O_q(\mathfrak{m}athsf k*^{2n})^G$ of the quantum torus is is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[x^{\pm 1}_1, \ldots, x^{\pm 1}_n]^G$ in $\mathfrak{m}athsf k(x_1,\ldots, x_n)*\mathfrak{m}athbb{Z}^n)^G$. Moreover, $O_q(\mathfrak{m}athsf k^{2n})^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} We have the following generalization of Theorem \ref{thm-main1} for quantum planes (Theorem \ref{thm-plane-order}) and the first quantum Weyl algebra (Proposition \ref{prop-A1-order}) when $\mathfrak{m}athsf k=\mathfrak{m}athbb C$: {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-main3} Let $A\in \{O_q(\mathfrak{m}athbb C^{2}), A_1^q(\mathfrak{m}athbb C)\}$. For every finite group $G$ of automorphisms of $A$, the subring of invariants $A^G$ is a Galois order over a certain polynomial subalgebra $\boldsymbol{\Gamma}_{Z}amma$ in one variable. Moreover, $A^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} It was shown in \mathbb{C}ite{FS1} that $A_n(\mathfrak{m}athsf k)^{S_n}$ is a Galois order over some polynomial algebra. We prove the quantum analog of this result for $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$ (Theorem \ref{thm-quantum-order}). In Section \ref{QGKC} we address the quantum Gelfand-Kirillov conjecture for various algebras. We introduce a class of quantum linear Galois algebras and show that the quantum Gelfand-Kirillov conjecture is valid in this class (Theorem \ref{thm-qlga}). Quantum linear Galois algebras include the quantum Orthogonal Gelfand-Zetlin algebras of type $A$ (in particular, the universal enveloping algebra $U_q(gl_n)$ and the quantum Heisenberg Lie algebra), $O_q(\mathfrak{m}athsf k^{2n})^G$ and $O_q(\mathfrak{m}athsf k*^{2n})^G$ for $G=G(m,p,n)$, $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$. When $n=1$ the group $G$ in all cases can be arbitrary. We also compute the skew fields of fractions for the quantum $2$-sphere and for the quantum group $O_{q^2}(so(3,\mathfrak{m}athbb{C}))$. Finally, we show that the subalgebra of $G_m$-invariants of$U(sl_2)$ for the cyclic group $G_m$ of order $m$ is birationally equivalent to $U(sl_2)$ in spite of the rigidity of the latter. \ \mathfrak{m}athfrak{n}oindent{\mathfrak{m}athfrak bf Acknowledgements.} V.F. is supported in part by CNPq grant (200783/2018-1) and by Fapesp grant (2014/09310-5). J.S. is supported in part by Fapesp grants (2014/25612-1) and (2016/14648-0). \sf{e}ction{Preliminaries} All rings and fields in the paper are assumed to be $\mathfrak{m}athsf k$-algebras over an algebraically closed field $\mathfrak{m}athsf k$ of characteristic $0$. For $q\in \mathfrak{m}athsf k$ we denote by $\mathfrak{m}athsf k_q[x,y]$ the \emph{quantum plane} over $\mathfrak{m}athsf k$ is defined as $\mathfrak{m}athsf k\langle x,y\mathfrak{m}id yx=qxy\rangle$. In this paper we will always assume that $q$ is not a root of unity. Let $\overline{q}=(q_1, \ldots, q_n) \in \mathfrak{m}athsf k^n$ be an $n$-tuple whose components are non zero and non roots of unity. The tensor product of quantum planes $k_{q_1}[x_1,y_1] \otimes \ldots \otimes \mathfrak{m}athsf k_{q_n}[x_n,y_n]$ will be called \emph{quantum affine space} and will be denoted by $O_{\overline{q}}(\mathfrak{m}athsf k^{2n})$. If $q_1= \ldots = q_n=q$, we will use the notation $O_q(\mathfrak{m}athsf k^{2n})$. Denote by $A_1^q(\mathfrak{m}athsf k)$ the first quantum Weyl algebra defined as $\mathfrak{m}athsf k\langle x,y\mathfrak{m}id yx-qxy=1\rangle$ and set $$A_n^{\overline{q}}(\mathfrak{m}athsf k)=A_1^{q_1}(\mathfrak{m}athsf k)\otimes_\mathfrak{m}athsf k \mathbb{C}dots \otimes_\mathfrak{m}athsf k A_1^{q_n}(\mathfrak{m}athsf k)$$ for any positive integer $n$. Again, if $q_1= \ldots = q_n=q$ then we simply denote it by $A_n^{{q}}(\mathfrak{m}athsf k)$. The quantum affine space $O_{\overline{q}}(\mathfrak{m}athsf k^{2n})$ and the quantum Weyl algebra $A_n^{\overline{q}}(\mathfrak{m}athsf k)$ are birationally equivalent, that is they have isomorphic skew fields of fractions \mathbb{C}ite{BG}. \subsection{Galois orders} We recall the concepts of Galois rings and Galois orders from \mathbb{C}ite{FO1}. Let $\boldsymbol{\Gamma}_{Z}amma$ be a commutative domain and $K$ the field of fractions of $\boldsymbol{\Gamma}_{Z}amma$. Let $L$ be a finite Galois extension of $K$ with the Galois group $G=Gal(L,K)$, $\mathfrak{m}athfrak{M}\subset Aut_\mathfrak{m}athsf k \, L $ a monoid satisfying the following condition: if $m,m' \in \mathfrak{m}athfrak{M}$ and their restrictions to $K$ coincide, then $m=m'$. Consider the action of $G$ on $\mathfrak{m}athfrak{M}$ by conjugation. A finitely generated $\boldsymbol{\Gamma}_{Z}amma$-ring $U$ in $(L*\mathfrak{m}athfrak{M})^G$ is called a \emph{Galois ring over $\boldsymbol{\Gamma}_{Z}amma$} if $KU=KU=(L*\mathfrak{m}athfrak{M})^G$. A Galois ring over $\boldsymbol{\Gamma}_{Z}amma$ is called a \emph{right (left) Galois order over $\boldsymbol{\Gamma}_{Z}amma$} if for every right (left) finite dimensional $K$-vector subspace $W \subset \mathfrak{m}athfrak{K}$, $W \mathbb{C}ap \boldsymbol{\Gamma}_{Z}amma$ is a finitely generated right (left) $\boldsymbol{\Gamma}_{Z}amma$-module. If $U$ is both left and right Galois order over $\boldsymbol{\Gamma}_{Z}amma$, then we say that $U$ is a \emph{Galois order over $\boldsymbol{\Gamma}_{Z}amma$}. If $x = \sum_{m \in \mathfrak{m}athfrak{M}} x_m m\in L*\mathfrak{m}athfrak{M}$ then set $$supp \, x =\{m\in \mathfrak{m}athfrak{M} | , x_m\mathfrak{m}athfrak{n}eq 0 \}.$$ We have {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition} \mathbb{C}ite{FO1} \label{prop-supp} Let $\boldsymbol{\Gamma}_{Z}amma\subset U$ be a commutative domain and $U\subset (L*\mathfrak{m}athfrak{M})^G$. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{itemize} \item[(i)] If $U$ is generated by $u_1,\ldots, u_k$ as a $\boldsymbol{\Gamma}_{Z}amma$-ring and $\mathfrak{m}athfrak bigcup_{i=1}^k supp \, u_i$ generates $\mathfrak{m}athfrak{M}$ as a monoid, then $U$ is a Galois ring over $\boldsymbol{\Gamma}_{Z}amma$. \item[(ii)] Let $U$ be a Galois ring over $\boldsymbol{\Gamma}_{Z}amma$ and $S=\boldsymbol{\Gamma}_{Z}amma\sf{e}tminus \{0\}$. Then $S$ is a left and right Ore set, and the localization of $U$ by $S$ both on the left and on the right is isomorphic to $(L*\mathfrak{m}athfrak{M})^G$. \end{itemize} \end{proposition} We also recall the following characterization of Galois orders. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition} \mathbb{C}ite{FO1} \label{prop-proj} Let $\boldsymbol{\Gamma}_{Z}amma$ be a commutative Noetherian domain with the field of fractions $K$. If $U$ is a Galois ring over $\boldsymbol{\Gamma}_{Z}amma$ and $U$ is a left (right) projective $\boldsymbol{\Gamma}_{Z}amma$-module, then $U$ is a left (right) Galois order over $\boldsymbol{\Gamma}_{Z}amma$. \end{proposition} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{remark}\label{rem-Ore} Let $D$ be a commutative domain, finitely generated as a $k$-algebra, $\sigma \in Aut_\mathfrak{m}athsf k \, D$ and $A=D[x; \sigma]$ the skew polynomial Ore extension, where $x d=\sigma(d)x$, for all $d\in D$. Then $D[x; \sigma]\simeq D* \mathfrak{m}athcal M$, where $$\mathfrak{m}athcal M =\{\sigma^n\mathfrak{m}id n=0, 1, \ldots\} \simeq \mathfrak{m}athbb N.$$ The isomorphism is identity on $D$ and sends $x$ to the generator $\overline{1}$ of the monoid $\mathfrak{m}athbb N$ and $\overline{1}$ acts on $D$ as $\sigma$. Then for $L=K$, the field of fractions of $D$ and for $ G=\{e\}$ we have that the algebra $A$ is a Galois ring (order) over $D$ in $K*\mathfrak{m}athcal M$. The localization of $A$ by $x$ is isomorphic to $D*\mathfrak{m}athbb{Z}$. \end{remark} \subsection{Invariant subalgebras} We will use the following two results on the subalgebras of invariants in the non commutative setting. The first is the result of Montgomery and Small which generalizes the Hilbert-Noether theorem. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{Montgomery} Let $A$ be a commutative Noetherian ring, and $R \supset A$ an overring such that $A$ is central and $R$ is a finitely generated $A$-algebra. Let $G$ be a finite group of $A$-algebra automorphisms of $R$ such that $|G|^{-1} \in R$. If $R$ is left and right Noetherian then $R^G$ is a finitely generated $A$-algebra. \end{theorem} The following connects the projectivity of subalgebras of invariants with the projectivity of the algebra itself as modules over respective commutative subalgebras. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{lemma} \label{lemma2}\mathbb{C}ite{FS2} Let $U$ be an associative algebra and $\boldsymbol{\Gamma}_{Z}amma\subset U$ a Noetherian commutative subalgebra. Let $H$ be a finite group of automorphisms of $U$ such that $H(\boldsymbol{\Gamma}_{Z}amma)\subset \boldsymbol{\Gamma}_{Z}amma$. If $U$ is projective right (left) $\boldsymbol{\Gamma}_{Z}amma$-module and $\boldsymbol{\Gamma}_{Z}amma$ is projective over $\boldsymbol{\Gamma}_{Z}amma^H$, then $U^H$ is projective right (left) $\boldsymbol{\Gamma}_{Z}amma^H$-module. \end{lemma} \subsection{Generalized Weyl algebras} We will often use a realization of a given algebra as a \emph{generalized Weyl algebra} \mathbb{C}ite{Bavula}. Let $D$ be a ring, $\sigma=(\sigma_1,\ldots, \sigma_n)$ an $n$-tuple of commuting automorphisms of $D$, $a=(a_1,\ldots, a_n)$ nonzero elements of the center of $D$ and $\sigma_i(a_j)=a_j, j \mathfrak{m}athfrak{n}eq i$. The generalized Weyl algebra $D(a, \sigma)$ is generated over $D$ by $X_i, Y_i$, $i=1,\ldots, n$ subject to the relations: \[ X_i d = \sigma_i (d) X_i; \, Y_i d= \sigma_i^{-1}(d) Y_i, \, d \in D, i=1, \ldots , n , \] \[ Y_i X_i = a_i; \, X_i Y_i = \sigma_i(a_i), \, i=1 ,\ldots , n \, ,\] \[ [Y_i, X_j]=[Y_i, Y_j]=[X_i, X_j]=0 \, , i \mathfrak{m}athfrak{n}eq j.\] We will assume that $D$ is a Noetherian domain which is finitely generated $\mathfrak{m}athsf k$-algebra. Fix a basis $e_1, \ldots, e_n$ of the free abelian group $\mathfrak{m}athbb{Z}^n$. There is natural embedding of $D(a, \sigma)$ int the skew group ring $D*\mathfrak{m}athbb{Z}^n$, where the action on $D$ is defined as follows: $re_i$ acts as $\sigma_i^r$, for all $i$ and $r \in \mathfrak{m}athbb{Z}$. Moreover, this embedding is an isomorphism if each $a_i$ is a unit in $D$, $i=1, \ldots, n$ (cf. \mathbb{C}ite{FS2}, Proposition 4). Both algebras algebras, $D(a, \sigma)$ and $D*\mathfrak{m}athbb{Z}^n$, admit the skew fields of fractions. Hence, following the discussion above we have {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition}\label{prop-birational} The algebras $D(a, \sigma)$ and $D*\mathfrak{m}athbb{Z}^n$ have isomorphic skew fields of fractions. \end{proposition} Note that, if $\sigma_1,\ldots, \sigma_n$ are linearly independent over $\mathfrak{m}athbb{Z}$, then $D(a, \sigma)$ is a Galois order over $D$ in the skew group ring $({\rm{Frac}} \ D)*\mathfrak{m}athbb{Z}^n$ (cf. \mathbb{C}ite{FS2}, Theorem 5). \sf{e}ction{Invariants of quantum affine spaces} In this section we consider the invariants of quantum affine space $O_q(\mathfrak{m}athsf k^{2n})$. Fix any integer $m>1$ and let $G_m\subset \mathfrak{m}athsf k$ be a cyclic group of order $m$. Our first group $G=G_m^{\otimes n}$ is the product of $n$ copies of $G_m$. Consider the following natural action of $G_m^{\otimes n}$ on $O_q(\mathfrak{m}athsf k^{2n})$: if $g=(g_1, \ldots, g_n)\in G$ then $g(x_i)=g_ix_i$, $g(y_i)=y_i$, $i=1, \ldots, n$. This action was defined in \mathbb{C}ite{Hartwig}, however we are using the defining relations as in \mathbb{C}ite{Dumas}. We have {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition}\label{prop-affine-cyclic} The invariant subspace $O_q(\mathfrak{m}athsf k^{2n})^{G_m^{\otimes n}}$ is isomorphic to $O_{q^m}(\mathfrak{m}athsf k^{2n})$. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The isomorphism just sends $x_i$ to $x_i^m$ and $y_i $ to $y_i$, $i=1, \ldots, n$. \end{proof} \end{proposition} Consider the free monoid $\mathfrak{m}athbb{N}^n$ with generators $\mathfrak{m}box{\rm v}arepsilonilon_1, \ldots, \mathfrak{m}box{\rm v}arepsilonilon_n$ and the skew monoid ring $\mathfrak{m}athsf k[x_1,\ldots,x_n]*\mathfrak{m}athbb{N}^n$, where $\mathfrak{m}athbb{N}^n$ acts as follows: $\mathfrak{m}box{\rm v}arepsilonilon_i(x_i) = qx_i$, $\mathfrak{m}box{\rm v}arepsilonilon_i(x_j) = x_j$, $j \mathfrak{m}athfrak{n}eq i$, $i, j=1, \ldots, n$. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition}\label{prop-affine-cyclic-order} Quantum affine space $O_q(\mathfrak{m}athsf k^{2n})$ is isomorphic to $\mathfrak{m}athsf k[x_1,\ldots,x_n]*\mathfrak{m}athbb{N}^n$. In particular, $O_q(\mathfrak{m}athsf k^{2n})$ is a Galois ring over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[x_1,\ldots,x_n]$ in $\mathfrak{m}athsf k(x_1,\ldots,x_n)*\mathfrak{m}athbb{N}^n$. \end{proposition} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The isomorphism is given by: $x_i \mathfrak{m}apsto x_i$, $y_i \mathfrak{m}apsto \mathfrak{m}box{\rm v}arepsilonilon_i$, $i=1,\ldots, n$. The rest is clear. \end{proof} For $m \mathfrak{m}athfrak{g}eq 1$, $n \mathfrak{m}athfrak{g}eq 1$, $p|m, p>0$ denote by $A(m,p,n)$ the subgroup of $G_m^{\otimes n}$ consisting of elements $(h_1,\ldots, h_n)$ such that $(h_1h_2 \ldots h_n)^{m/p} = id$. The groups $G(m,p,n) = A(m,p,n) \rtimes S_n$ were introduced by Shephard and Todd and describe all irreducible non-exceptional complex reflection groups. Here $S_n$ acts on $A(m, p, n)$ by permutations. Let $G=G(m,p,n)$, and consider the following action of $G$ on $O_q(\mathfrak{m}athsf k^{2n})$: $h=(g,\pi) \in G$, $g=(g_1, \ldots, g_n)\in G_m^{\otimes n}, \, \pi \in S_n$, with $h(x_i)= g_i x_{\pi(i)}$, $h(y_i)=y_{\pi(i)}$, $i=1,\ldots, n$. The group $G$ also acts on $\mathfrak{m}athsf k[x_1,\ldots,x_n]* \mathfrak{m}athbb{N}^n$: the action on $x_i$ is the same as above, and $h(\mathfrak{m}box{\rm v}arepsilonilon_i)= \mathfrak{m}box{\rm v}arepsilonilon_{\pi(i)}$. Clearly, $G$ acts on $\mathfrak{m}athbb{N}^n$ by conjugations, and the isomorphism in Proposition \ref{prop-affine-cyclic-order} is $G$-equivariant. Hence, $O_q(\mathfrak{m}athsf k^{2n})^{G}$ and $(\mathfrak{m}athsf k[x_1,\ldots,x_n]*\mathfrak{m}athbb{N}^n)^{G}$ are canonically isomorphic. Hence, $O_q(\mathfrak{m}athsf k^{2n})^G$ is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[x_1, \ldots, x_n]^{G}$. Taking into account that $\boldsymbol{\Gamma}_{Z}amma$ is a polynomial algebra and applying Proposition \ref{prop-proj}, Lemma \ref{lemma2} and \mathbb{C}ite{Bass}, Corollary 4.5, we have {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-affine-refl-order} For every $G=G(m,p,n)$ the invariant subring $O_q(\mathfrak{m}athsf k^{2n})^G$ of the quantum affine space is is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[x_1, \ldots, x_n]^{G}$. Moreover, $O_q(\mathfrak{m}athsf k^{2n})^G$ is free as left (right) $\boldsymbol{\Gamma}_{Z}amma$-modules. \end{theorem} \subsection{Invariants of quantum torus} One can extend Theorem \ref{thm-affine-refl-order} to \emph{quantum torus} $O_q(\mathfrak{m}athsf k*^{2n})^G$, which is the localization of $O_q(\mathfrak{m}athsf k^{2n})^G\simeq \mathfrak{m}athsf k[x_1,\ldots, x_n]*\mathfrak{m}athbb{N}^n$ by $x_1,\ldots, x_n$, $y_1,\ldots, y_n$. Hence, $$O_q(\mathfrak{m}athsf k*^{2n})^G\simeq \mathfrak{m}athsf k[x^{\pm 1}_1,\ldots, x^{\pm 1}_n]*\mathfrak{m}athbb{Z}^n.$$ We also have by Proposition \ref{prop-affine-cyclic}: $$O_q(\mathfrak{m}athsf k*^{2n})^{G_m^{\otimes n}}\simeq O_{q^m}(\mathfrak{m}athsf k*^{2n}).$$ Using the arguments before Theorem \ref{thm-affine-refl-order} we immediately obtain {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-torus-refl-order} For every $G=G(m,p,n)$ the invariant subring $O_q(\mathfrak{m}athsf k*^{2n})^G$ of the quantum torus is is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[x^{\pm 1}_1, \ldots, x^{\pm 1}_n]^{G}$ in $\mathfrak{m}athsf k(x_1,\ldots, x_n)*\mathfrak{m}athbb{Z}^n)^{G}$. Moreover, $O_q(\mathfrak{m}athsf k^{2n})^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} \subsection{Quantum complex plane} In this section we assume that $\mathfrak{m}athsf k = \mathfrak{m}athbb{C}$. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition}\label{prop-Dumas} Consider any finite group $G$ of automorphisms of the quantum plane $\mathfrak{m}athbb{C}_q[x,y]$. Then the ring of invariants $\mathfrak{m}athbb{C}_q[x,y]^G$ is embedded into the Ore extension $\mathfrak{m}athbb{C}_q[x,y]_x^G \mathbb{C}ong \mathfrak{m}athbb{C}(x^m)[v; \sigma]$, where $\sigma(x^m)=q^n x^m$ for some $n, m >0$ and $v = x^k y^l$, $l, k>0$. \end{proposition} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The action of $G$ on the quantum plane $\mathfrak{m}athbb{C}_q[x,y]$ extends naturally to its action on the localization of $\mathfrak{m}athbb{C}_q[x,y]$ by $x$. It was shown in \mathbb{C}ite{Alev4} that every finite group $G$ of automorphisms of the quantum plane is a subgroup of the torus $\mathfrak{m}athbb{C}*^2$, and thus has the form $G_m \times G_{m'}$ for cyclic groups of orders $m$ and $n$ respectively. Let $g'$ be a generator of $G_m$ and $g''$ a generator of $G_{m'}$. Then $(g'^k,g''^l)(x)=\mathfrak{m}athfrak alpha^k x, \, (g'^k,g''^l)(y)={\mathfrak{m}athfrak boldsymbol{\rm e}}ta^ly$, where $\mathfrak{m}athfrak alpha$ is a primitive $m$-th root of unity, and ${\mathfrak{m}athfrak boldsymbol{\rm e}}ta$ is a primitive $m''$-th root of unity. The subring of $G$-invariants of the localized ring $\mathfrak{m}athbb{C}_q[x,y]_x$ is the Ore extension $\mathfrak{m}athbb{C}(x^m)[v; \sigma]$, where $\sigma(x^m)=q^n x^m$ for some $n$ and $m$ by \mathbb{C}ite{Dumas}, 3.3.3. Multiplying $v$ by $x^m$ sufficiently many times, we can assume it to be in the claimed form. \end{proof} We have the following general result about the invariants of the quantum plane. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-plane-order} For every finite group $G$ of automorphisms of the quantum plane $\mathfrak{m}athbb{C}_q[x,y]$ the subring of invariants $\mathfrak{m}athbb{C}_q[x,y]^G$ is a Galois order over a certain polynomial subalgebra $\boldsymbol{\Gamma}_{Z}amma$. Moreover, $\mathfrak{m}athbb{C}_q[x,y]^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The subring of invariants $\mathfrak{m}athbb{C}_q[x,y]^G$ $\mathfrak{m}athbb{C}_q[x,y]^G$ is embedded into $\mathfrak{m}athbb{C}(x^m)[v;\sigma] \mathbb{C}ong \mathfrak{m}athbb{C}(x^m)*\mathfrak{m}athbb{N}$ by Proposition \ref{prop-Dumas}, where the generator $\overline{1}$ of $\mathfrak{m}athbb{N}$ acts as follows: $\overline{1}(x^m)=q^n x^m$. Also, $v = x^k y^l$ is $G$-invariant and it is mapped to $\overline{1}$ under the isomorphism above. We conclude that $\mathfrak{m}athbb{C}_q[x,y]^G$ is a Galois order over $\mathfrak{m}athbb{C}[x^m]$ (cf. Remark \ref{rem-Ore}). The rest follows from Proposition \ref{prop-proj}, Lemma \ref{lemma2} and \mathbb{C}ite{Bass}, Corollary 4.5. \end{proof} \sf{e}ction{Invariants of quantum Weyl algebras} Consider now the first quantum Weyl algebra $A_1^q(\mathfrak{m}athsf k)$, generated over $\mathfrak{m}athsf k$ by $x$ and $y$ subject to the relation $yx-qxy=1$. It can be realized as a generalized Weyl algebra $D(a, \sigma)$ with $D=\mathfrak{m}athsf k[h]$, $a=h$, $\sigma(h)=q^{-1} (h-1)$ and generators $X, Y$. The isomorphism is given as follows: $yx \mathfrak{m}apsto h$, $x \mathfrak{m}apsto X$, $y \mathfrak{m}apsto Y$. Then $A_1^q(\mathfrak{m}athsf k)$ is a Galois order over $D$ by \mathbb{C}ite{FO1}, as $q$ is not root of unity and $\sigma$ has an infinite order. Moreover, the quantum Weyl algebra $A_n^{{q}}(\mathfrak{m}athsf k)\simeq A_1^q(\mathfrak{m}athsf k)^{\otimes n}$ is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k [h_1,\ldots, h_n]$ in $\mathfrak{m}athsf k(h_1,\ldots, h_n)*\mathfrak{m}athbb{Z}^n$, where a basis $\mathfrak{m}box{\rm v}arepsilonilon_1, \ldots, \mathfrak{m}box{\rm v}arepsilonilon_n$ of $\mathfrak{m}athbb{Z}^n$ atcs on $\boldsymbol{\Gamma}_{Z}amma$ as expected: $\mathfrak{m}box{\rm v}arepsilonilon_i(h_i)=q^{-1}(h_i-1)$; $\mathfrak{m}box{\rm v}arepsilonilon_i(h_j)=h_j$, $i,j=1,\ldots, n$. The embedding is given by: \[ y_i x_i \mathfrak{m}apsto h_i, x_i \mathfrak{m}apsto \mathfrak{m}box{\rm v}arepsilonilon_i, \, y_i \mathfrak{m}apsto h_i^{-1} \mathfrak{m}box{\rm v}arepsilonilon_i^{-1},\] $i=1,\ldots, n$ Consider the subring of invariants $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$, where $S_n$ acts by simultaneous permutations of the variables $y_i$ and $x_i$, $i=1,\ldots, n$. Using the structure of the quantum Weyl algebra $A_n^{{q}}(\mathfrak{m}athsf k)$ as a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k [h_1,\ldots, h_n]$ in $\mathfrak{m}athsf k(h_1,\ldots, h_n)*\mathfrak{m}athbb{Z}^n$ we obtain an embedding of $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$ into the ring $(\mathfrak{m}athsf k (h_1,\ldots, h_n)*\mathfrak{m}athbb{Z}^n)^{S_n}$, where $S_n$ permutes $h_1,\ldots,h_n$ and acts on $\mathfrak{m}athbb{N}^n$ by conjugation: if $\pi \in S_n$ then $\pi(\sigma_i)=\sigma_{\pi(i)}$. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-quantum-order} $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$ is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athsf k[h_1,\ldots, h_n]^{S_n}$. Moreover, $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{theorem} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The algebra $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$ is finitely generated by Theorem \ref{Montgomery}. Choose generators $u_1, \ldots, u_k$ and add to this list the elements $x_1 + \ldots + x_n$ and $y_1+ \ldots + y_n$. The images of the latter two elements in $(\mathfrak{m}athsf k(h_1,\ldots, h_n)*\mathfrak{m}athbb{Z}^n)$ are $\mathfrak{m}box{\rm v}arepsilonilon_1 + \ldots + \mathfrak{m}box{\rm v}arepsilonilon_n$ and $h_1^{-1} \mathfrak{m}box{\rm v}arepsilonilon_1^{-1} + \ldots + h_n^{-1} \mathfrak{m}box{\rm v}arepsilonilon_n^{-1}$ respectively. Hence the support of their image generate $\mathfrak{m}athbb{Z}^n$ as a group, and the first statement follows from Proposition \ref{prop-supp}. The seond statement follows from Proposition \ref{prop-proj}, Lemma \ref{lemma2} and \mathbb{C}ite{Bass}, Corollary 4.5. \end{proof} We have the following analog of Theorem \ref{thm-plane-order} for the first quantum Weyl algebra when $\mathfrak{m}athsf k = \mathfrak{m}athbb{C}$: {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proposition}\label{prop-A1-order} Let $G$ be any finite group of automorphisms of $A_1^q(\mathfrak{m}athbb{C})$. Then the invariant subring $A_1^q(\mathfrak{m}athbb{C})^G$ is a Galois order over $\boldsymbol{\Gamma}_{Z}amma=\mathfrak{m}athbb C[x^m]$ in $\mathfrak{m}athbb C(x^m)*\mathfrak{m}athbb{N}$. Moreover, $A_1^q(\mathfrak{m}athbb{C})^G$ is free as a left (right) $\boldsymbol{\Gamma}_{Z}amma$-module. \end{proposition} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} Again, by Alev and Dumas (\mathbb{C}ite{Alev2}), every finite group $G$ of automorphisms of $A_1^q(\mathfrak{m}athbb{C})$ is of the form $G_m$, where the generator of $G_m$ acts by: $x \mathfrak{m}apsto \mathfrak{m}athfrak alpha x$, $y \mathfrak{m}apsto \mathfrak{m}athfrak alpha^{-1} y$ for some $m$th primitive root of unity $\mathfrak{m}athfrak alpha$. Localization of $A_1^q(\mathfrak{m}athbb{C})$ by $x$ is isomorphic to $\mathfrak{m}athbb{C}(x)[z, \sigma]$, with $z=(q-1)xy +1$ and $\sigma(x)=qx$. On the other hand, $\mathfrak{m}athbb{C}(x)[z, \sigma]$ is just the localization of $\mathfrak{m}athbb{C}_q [x, z]$ by $x$. By Theorem \ref{thm-plane-order} we obtain an embedding of $A_1^q(\mathfrak{m}athbb{C})^G$ into $\mathfrak{m}athbb{C}(x^m)[v; \sigma] \mathbb{C}ong \mathfrak{m}athsf k(x^m)*\mathfrak{m}athbb{N}$, where $\sigma (x^m)=q^n x^m$. \end{proof} \sf{e}ction{Quantum Gelfand-Kirillov conjecture}\label{QGKC} The \emph{quantum Gelfand-Kirillov conjecture} (cf. \mathbb{C}ite{BG}, \mathbb{C}ite{FH}) compares the skew field of fractions of a given algebra with \emph{quantum Weyl fields}, that is the skew field of fractions of the tensor product of quantum Weyl algebras $A_1^{q_1}(\mathfrak{m}athsf k)\otimes_\mathfrak{m}athsf k \mathbb{C}dots \otimes_\mathfrak{m}athsf k A_1^{q_n}(\mathfrak{m}athsf k)$ (or, equivalently, of some quantum affine space). An algebra $A$ is said to satisfy the quantum Gelfand-Kirillov conjecture if ${\rm{Frac}} (A)$ is isomorphic to a quantum Weyl field over a purely transcendental extension of $\mathfrak{m}athsf k$. We will say that two domains $D_{1}$ and $D_{2}$ are \emph{birationally equivalent} if ${\rm{Frac}} (D_{1})\simeq {\rm{Frac}} (D_{2})$. The quantum Gelfand-Kirillov conjecture is strongly connected with the \\ \emph{$q$-difference Noether} \emph{problem} for reflection groups introduced in \mathbb{C}ite{Hartwig}. This problem asks whether the invariant quantum Weyl subfield $({\rm{Frac}} A_n^{{q}}(\mathfrak{m}athsf k))^W$ is isomorphic to some quantum Weyl field, where $W$ is a reflection group. The positive solution of the $q$-difference Noether problem was obtained in \mathbb{C}ite{Hartwig} for classical reflection groups. Using this fact, the validity of the quantum Gelfand-Kirillov conjecture was shown for the quantum universal enveloping algebra $U_q(gl_n)$ (\mathbb{C}ite{FH}) and for the quantum Orthogonal Gelfand-Zetlin algebras of type $A$ (\mathbb{C}ite{Hartwig}). The latter class includes the simplyconnected quantized form of $gl_n$, $\mathbb{C}heck{U}(gl_n)$ and the quantized Heisenberg Liealgebra among the others. \subsection{Functions on the quantum $2$-sphere} Denote by $A(S^2_\lambda)$ the algebra of functions on the quantum $2$-sphere. The algebra $A(S^2_\lambda)$ is the quotient of $\mathfrak{m}athbb{C} \langle X,Y,H \rangle$ by the relations $$XH = \lambda HX, \ YH=\lambda^{-1} HY, $$ $$ \lambda^{1/2}YX=-(c-H)(d+H), \, \lambda^{-1/2}XY = -(c- \lambda H)(d+ \lambda H). $$ It can be realized as a generalized Weyl algebra $\mathfrak{m}athbb{C}[H](a, \sigma)$, where $$a = -\lambda^{-1/2}XY (c-H)(d+H))$$ and $\sigma(H) = \lambda H$. By Proposition \ref{prop-birational}, $\mathfrak{m}athbb{C}[H](a, \sigma)$ is birationally equivalent to $\mathfrak{m}athbb{C}[H] * \mathfrak{m}athbb{Z}$, where $\mathfrak{m}athbb{Z}$ is generated by $\overline{1}$ and $\overline{1}(H)= \lambda H$. Applying Proposition \ref{prop-affine-cyclic-order} we obtain that $A(S^2_\lambda)$ is birationally equivalent to the quantum plane with parameter $\lambda$. Hence, $A(S^2_\lambda)$ satisfies the quantum Gelfand-Kirillov conjecture, that is {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{corollary}\label{cor-sphere} ${\rm{Frac}} \, A(S^2_\lambda) \mathbb{C}ong {\rm{Frac}} \, \mathfrak{m}athsf k_\lambda[x,y]$. \end{corollary} \subsection{The quantum group $O_{q^2}(so(3,\mathfrak{m}athbb{C}))$} Let $A=O_{q^2}(so(3,\mathfrak{m}athbb{C}))$. The algebra $A$ can be realized as a generalized Weyl algebra $\mathfrak{m}athbb{C}[H,C](\sigma, a)$, where $a= C+H^2/q(1+q^2))$ and $\sigma(C)=C$, $\sigma(H)=q^2 H$. By Proposition \ref{prop-birational}, $A$ is birationally equivalent to $\mathfrak{m}athbb{C}[C,H]*\mathfrak{m}athbb{Z}$, where $\mathfrak{m}athbb{Z}$ is generated by $\overline{1}$ acting as $\sigma$ on $\mathfrak{m}athbb{C}[C,H]$. Since $C$ is invariant by $\sigma$, this ring is clearly birationaly equivalent to $\mathfrak{m}athbb{C}[C] \otimes (\mathfrak{m}athbb{C}[H] * \mathfrak{m}athbb{Z})$. Applying Proposition \ref{prop-affine-cyclic-order} we obtain that $A$ satisfies the quantum Gelfand-Kirillov conjecture, that is {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{corollary}\label{cor-so} ${\rm{Frac}} \, O_{q^2}(so(3,\mathfrak{m}athbb{C})) \mathbb{C}ong {\rm{Frac}} \, (\mathfrak{m}athbb{C}(C) \otimes \mathfrak{m}athbb{C}_{q^2}[x,y])$. \end{corollary} \subsection{Quantum Linear Galois Algebras} In this section we obtain a quantum version of the theory of linear Galois algebras developed in \mathbb{C}ite{Eshmatov}. The field $\mathfrak{m}athsf k$ is assumed to be the field of complex numbers. Recall that $U$ is a \emph{Galois algebra} over $\boldsymbol{\Gamma}_{Z}amma$ if $U$ is a Galois ring over $\boldsymbol{\Gamma}_{Z}amma$ and $\mathfrak{m}athsf k$-algebra. Let $V$ be a finite dimensional complex vector space, $S=S(V^*)=$ $\mathfrak{m}athbb{C}[x_1,\ldots, x_n]$, and $L = {\rm{Frac}} \, S$. Let $G$ be a unitary reflection group which is a product of groups of type $G(m,p,n)$. Consider the tensor product of polynomial algebras $S \otimes \mathfrak{m}athbb{C}[w_1,\ldots,w_m]$, with the trivial action of $G$ on the second component. A \emph{quantum linear Galois algebra} $U$ is a Galois algebra over an appropriate $\boldsymbol{\Gamma}_{Z}amma$ in $(\mathfrak{m}athbb{C}(x_1,\ldots,x_n;w_1,\ldots,w_m)*\mathfrak{m}athbb{Z}^n)^G$ or $(\mathfrak{m}athbb{C}(x_1,\ldots,x_n;w_1,\ldots,w_m)*\mathfrak{m}athbb{N}^n)^G$, where a basis $\mathfrak{m}box{\rm v}arepsilonilon_1, \ldots, \mathfrak{m}box{\rm v}arepsilonilon_n$ of either $\mathfrak{m}athbb{Z}^n$ or $\mathfrak{m}athbb{N}^n$ acts as follows: $\mathfrak{m}box{\rm v}arepsilonilon_i(x_i) = qx_i$, $ \mathfrak{m}box{\rm v}arepsilonilon_i(x_j) = x_j$, $j \mathfrak{m}athfrak{n}eq i$, $i,j=1,\ldots, n$. Note that the quantum universal enveloping algebra $U_q(gl_n)$ and the quantum orthogonal Gelfand-Zetlin algebras of type $A$ are examples of quantum linear Galois algebras \mathbb{C}ite{FH}, \mathbb{C}ite{Hartwig}. The results of the previous sections show that the following algebras are also quantum linear Galois algebras:\\ \ {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{itemize} \item $O_q(\mathfrak{m}athsf k^{2n})^G$ for $G=G(m,p,n)$; \item $A_n^{{q}}(\mathfrak{m}athsf k)^{S_n}$; \item $O_q(\mathfrak{m}athsf k*^{2n})^G$ for $G=G(m,p,n)$. \end{itemize} \ The following theorem shows that the quantum Gelfand-Kirillov Conjecture holds for quantum linear Galois algebras, which is the quantum analogue of \mathbb{C}ite{Eshmatov}, Theorem 6. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{theorem}\label{thm-qlga} Let $U$ be a quantum linear Galois algebra in $$(\mathfrak{m}athbb{C}(x_1,\ldots,x_n;w_1,\ldots,w_m)*X^n)^G,$$ where $X$ is either $\mathfrak{m}athbb{Z}$ or $\mathfrak{m}athbb{N}$, with the $G$ action as above. Then the quantum Gelfand-Kirillov conjecture holds for $U$ and there exist $l=(l_1,\ldots, l_n) \in \mathfrak{m}athbb{Z}^n$ such that $${\rm{Frac}} \, U \mathbb{C}ong {\rm{Frac}} \, (O_{\overline{q}}(k^{2n}) \otimes \mathfrak{m}athbb{C}[w_1,\ldots, w_n]),$$ where $\overline{q}=(q^{l_1},\ldots, q^{l_n})$. \end{theorem} {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{proof} The proof follows from Proposition \ref{prop-supp}, (ii) and the positive solution of the $q$-difference Noether problem for $G$ \mathbb{C}ite{Hartwig}. \end{proof} \subsection{Skew field of fractions of $U(sl_2)$} Consider the standard basis $e,f,h$ of $sl_2$, where $[h,e]=e$, $[h,f]=-f$, $[e,f]=2h$. The universal enveloping algebra $U(sl_2)$ can be realized as a generalized Weyl algebra $\mathfrak{m}athsf k[H,C](\sigma, a)$, where $a=C-H(H+1))$, with the isomorphism given by $e \mathfrak{m}apsto X$, $f \mathfrak{m}apsto Y$, $h \mathfrak{m}apsto H,$ $h(h+1)+fe \mathfrak{m}apsto C$. Define an action of the cyclic group $G_m$ of order $m$, $m>1$ on $U(sl_2)$ as follows. Denote by $g$ a generator of $G_m$. Then $g$ fixes $h$ and sends $e \mathfrak{m}apsto \xi e$, $f \mathfrak{m}apsto \xi^{-1} f$, where $\xi$ is a fixed $m$th primitive root of unity. We have that $\mathfrak{m}athsf k[H,C](\sigma, a)$ (and hence $U(sl_2)$) is birationally equivalent to $\mathfrak{m}athsf k[H,C]*\mathfrak{m}athbb{Z}$, where again $\mathfrak{m}athbb{Z}$ acts by $\sigma$. The action of $G_m$ naturally extends to $\mathfrak{m}athsf k[H,C]*\mathfrak{m}athbb{Z}$, where the generator $g$ acts on $\mathfrak{m}athbb{Z}$ by sending $\overline{y} \mathfrak{m}apsto \xi^{y}, y \in \mathfrak{m}athbb{Z}$. Therefore $U(sl_2)^{G_m}$ embedds into $(\mathfrak{m}athsf k[H,C]*\mathfrak{m}athbb{Z})^{G_m}$. Since $C$ is fixed by $\sigma$ and also by the action of $G_m$, we have $${\rm{Frac}} (\mathfrak{m}athsf k[H,C]*\mathfrak{m}athbb{Z})^{G_m} \mathbb{C}ong {\rm{Frac}} (\mathfrak{m}athsf k[C] \otimes (\mathfrak{m}athsf k[H]*\mathfrak{m}athbb{Z})^{G_m}).$$ On the other hand, $k[H]*\mathfrak{m}athbb{Z}$ is isomorphic to the localization $A_1(\mathfrak{m}athsf k)_x = A_1(\mathfrak{m}athsf k)_{x^m}$ (\mathbb{C}ite{FO1}, section 7) of the first Weyl algebra. Hence, $${\rm{Frac}} (\mathfrak{m}athsf k[H]*\mathfrak{m}athbb{Z})^{G_m} \mathbb{C}ong {\rm{Frac}} (A_1(\mathfrak{m}athsf k)_{x^m})^{G_m} \mathbb{C}ong {\rm{Frac}} (A_1(\mathfrak{m}athsf k)^{G_m}_{x^m}) \mathbb{C}ong {\rm{Frac}} (A_1(\mathfrak{m}athsf k)^{G_m}),$$ where the action of the generator $g$ on $A_1(\mathfrak{m}athsf k)$ is as follows: $x \mathfrak{m}apsto \xi^{-1} x$, $\partial \mathfrak{m}apsto \xi \partial$. We conclude that $U(sl_2)^{G_m}$ is birationally equivalent to $\mathfrak{m}athsf k[C] \otimes A_1(\mathfrak{m}athsf k)^{G_m}$. Taking into account the result of \mathbb{C}ite{Alev1}, which implies that $A_1(\mathfrak{m}athsf k)^{G_m}\simeq A_1(\mathfrak{m}athsf k)$ we finally have {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{corollary}\label{cor-sl2} For any $m>1$ and the action of $G_m$ described above, we have $${\rm{Frac}} (U(sl_2)^{G_m}) \mathbb{C}ong {\rm{Frac}} (\mathfrak{m}athsf k[C] \otimes A_1(\mathfrak{m}athsf k)) \mathbb{C}ong {\rm{Frac}} (\mathfrak{m}athsf k[C] )\otimes {\rm{Frac}} (U(sl_2)).$$ \end{corollary} The last isomorphism is just the classical Gelfand-Kirillov conjecture for $sl_2$ \mathbb{C}ite{Gelfand}. Recall, that $U(sl_2)$ is rigid by \mathbb{C}ite{Alev3}, that is there is no non trivial finite group $G \subset Aut_\mathfrak{m}athsf k \, U(sl_2)$ such that $U(sl_2)^G \mathbb{C}ong U(sl_2)$. By Corollary \ref{cor-sl2}, in spite of the rigidity of $U(sl_2)$ we have ${\rm{Frac}}(U(sl_2)^{G_m})\simeq {\rm{Frac}}(U(sl_2))$, giving an example to the question posed in in \mathbb{C}ite{Kirkman}. {\mathfrak{m}athfrak boldsymbol{\rm e}}gin{thebibliography}{9} \mathfrak{m}athfrak bibitem{Alev1} Alev, J.; Dumas,F.; Operateurs differentiels invariants et probleme de Noether, Studies in Lie Theory (eds. J. Bernstein, V. Hinich and A. Melnikov), Birkhauser, Boston, 2006. \mathfrak{m}athfrak bibitem{Alev2} Alev, J.; Dumas, F.; Rigidite des plongements des quotients primitifs minimaux de Uq(sl(2)) dans l’algebre quantique de Weyl-Hayashi, Nagoya Math. J. 143 (1996), 119��-146. \mathfrak{m}athfrak bibitem{Alev3} Alev, J.; Polo, P.; A rigidity theorem for finite group actions on enveloping algebras of semisimple lie algebras. Advances in Mathematics, 111 (1995), 208-226. \mathfrak{m}athfrak bibitem{Alev4} Alev, J.; Chamarie, M; Automorphismes et deerivations de quelques algeebres quantiques, Commun. Algebra, 20 (1992), 1787-1802. \mathfrak{m}athfrak bibitem{Bass} Bass, H.; Big projective modules are free, Illinois J. Math., vol 7, (1963), 24-31. \mathfrak{m}athfrak bibitem{Bavula}[Ba] Bavula, V.; Generalized Weyl algebras and their representations, Algebra i Analiz 4 (1992), 75-97. English translation: St. Petersburg Math. J. 4 (1993) 71-92. \mathfrak{m}athfrak bibitem{BG}[BG] Brown K.A., Goodearl K.R., Lectures on algebraic quantum groups, Advance course in Math. CRM Barcelona, vol 2., Birkhauser Verlag, Basel, 2002 \mathfrak{m}athfrak bibitem{Dumas} Dumas, F.; An Introduction to Non commutative polynomial invariants, Lecture Notes, Homological methods and representations of non commutative algebras, Mar del Plata, Argentina, March 6-16, 2006. \mathfrak{m}athfrak bibitem{Eshmatov} Eshmatov, F.; Futorny, V.; Ovsienko, S.; Schwarz, J.; Noncommutative Noether’s Problem for Unitary Reflection Groups, Proceedings of the American Mathematical Society, 145 (2017), 5043-5052. \mathfrak{m}athfrak bibitem{FH} Futorny, V.; Hartwig, J. T.;. Solution of a q-difference noether problem and the quantum gelfand-kirillov conjecture for gln. Mathematische Zeitschrift, 276:1-37, 2014. \mathfrak{m}athfrak bibitem{FO1}[FO1] Futorny, V.; Ovsienko, S.; Galois orders in skew monoid rings, J. of Algebra, 324 (2010), 598-630. \mathfrak{m}athfrak bibitem{FO2}[FO2] Futorny, V; Ovsienko, S; Fibers of characters in Gelfand-Tsetlin categories, Transactions of The American Mathematical Society, v. 366 (2014), 4173-4208. \mathfrak{m}athfrak bibitem{FS1}[FS1] Futorny, V.; Schwarz, J.; Galois orders of symmetric differential operators, Algebra and Discrete Mathematics, Volume 23 (2017) 35-46. \mathfrak{m}athfrak bibitem{FS2}[FS2] Futorny, V.; Schwarz, J.; Algebras of invariant differential operators, 2018, arXiv:1804.05029. \mathfrak{m}athfrak bibitem{FMO}[FMO] Futorny, V.; Molev, A.; Ovsienko, S.; The Gelfand-Kirillov conjecture and Gelfand-Tsetlin modules for finite W-algebras, Advances in Mathematics 223 (2010), 773-796. \mathfrak{m}athfrak bibitem{Gelfand} Gelfand, I. M.; Kirillov, A. A.; Sur les corps li\'es aux alg\`ebres envoloppantes des alg\`ebres de Lie, volume 31 of Inst. Hautes Etudes Sci. Publ. Mat., 5-19, 1966. \mathfrak{m}athfrak bibitem{Hartwig} Hartwig, J. T.; The q-difference noether problem for complex reflection groups and quantum OGZ algebras. Communications in Algebra, 45:1166-1176, 2017. \mathfrak{m}athfrak bibitem{Kirkman} Kirkman, E,; Kuzmanovich, J.; Zhang, J.; Rigidity of graded regular algebras. Transactions of the American Mathematical Society, 360:6331-6369, 2008. \mathfrak{m}athfrak bibitem{Montgomery} Montgomery, S.; Small, L. W.; Fixed rings of noetherian rings. Bull. London. Math. Soc., 13:33-38, 1981. \mathfrak{m}athfrak bibitem{Ovsienko}[O] Ovsienko, S.; Finiteness statements for Gelfand-Tsetlin modules, Proceedings of Third International Algebraic Conference in Ukraine (Ukrainian), Natsional. Akad. Nauk Ukrainy, Inst. Mat., Kiev, (2002), 323-338. \end{thebibliography} \end{document}
\begin{document} \normalem \title[Uniform convergence]{Uniform convergence of operator semigroups without time regularity} \author{Alexander Dobrick} \address{Alexander Dobrick, Arbeitsbereich Analysis, Christian-Albrechts-Universit\"at zu Kiel, Ludewig-Meyn-Str.\ 4, 24098 Kiel, Germany} \email{[email protected]} \author{Jochen Gl\"uck} \address{Jochen Gl\"uck, Fakultät für Informatik und Mathematik, Universität Passau, Innstraße 33, D-94032 Passau, Germany} \email{[email protected]} \subjclass[2010]{47D03; 47D06; 35K40; 35B40} \keywords{Coupled heat equations; Schroedinger semigroups; matrix potential; long-term behaviour; semigroup representation; Jacobs--de Leeuw--Glicksberg theory; semigroup at infinity} \date{\today} \begin{abstract} When we are interested in the long-term behaviour of solutions to linear evolution equations, a large variety of techniques from the theory of $C_0$-semigroups is at our disposal. However, if we consider for instance parabolic equations with unbounded coefficients on $\mathbb{R}^d$, the solution semigroup will not be strongly continuous, in general. For such semigroups many tools that can be used to investigate the asymptotic behaviour of $C_0$-semigroups are not available anymore and, hence, much less is known about their long-time behaviour. Motivated by this observation, we prove new characterisations of the operator norm convergence of general semigroup representations -- without any time regularity assumptions -- by adapting the concept of the ``semigroup at infinity'', recently introduced by M.~Haase and the second named author. Besides its independence of time regularity, our approach also allows us to treat the discrete-time case (i.e., powers of a single operator) and even more abstract semigroup representations within the same unified setting. As an application of our results, we prove a convergence theorem for solutions to systems of parabolic equations with the aforementioned properties. \end{abstract} \maketitle \section{Introduction} The purpose of this article is to study uniform convergence to equilibrium for linear operator semigroups as time tends to infinity. For powers $T^n$ of a single operator $T$, it is well-known that this kind of long-time behaviour can characterised by spectral properties of $T$. For $C_0$-semigroups, the situation is more subtle, but has been extensively studied throughout the literature. We refer, for instance, to the classical references \cite{Neerven1996}, \cite[Chapter~V]{Engel2000}, \cite{Emelyanov2007}, \cite{Eisner2010} and \cite[Chapter 14]{Batkai2017} for more information. Still, the current state of the art leaves following issues open: \begin{research_questions} \begin{enumerate}[(1)] \item For semigroups in the continuous time interval $[0,\infty)$, much of the known theory deals with the case of $C_0$-semigroups. This special case is very useful for many applications, for instance in the analysis of partial differential equations. On the other hand, there are important PDEs whose solution semigroup is not strongly continuous; this situation occurs, for instance, for parabolic equations with unbounded coefficients on $\mathbb{R}^d$ (see Section~\ref{section:application-coupled-parabolic-equations}). Therefore, the treatment of such examples requires a theory which is capable of efficiently handling semigroups that are not strongly continuous. \item The quest for a most cohesive and clear theory suggests that we should also seek for methods which help us to treat the discrete-time case (i.e., powers of a single operator) and the continuous-time case (i.e., semigroups indexed over the time interval $[0,\infty)$) within a unified theoretical framework. This was also a major guideline in \cite{GerlachConvPOS} and \cite{Glueck2019}. \end{enumerate} \end{research_questions} \subsection*{Contributions} Our answer to the issues mentioned above is as follows: inspired by the classical Jacobs--de Leeuw--Glicksberg (JdLG) theory and its success in the study of the long-time behaviour of strongly or weakly compact operator semigroups, we show how a similar idea can be used to study convergence of semigroups with respect to the operator norm. This is not a straightforward task due to the following obstacle: if $(T_s)_{s \in [0,\infty)}$ is an operator semigroup on a Banach space, even the local orbits \begin{align*} \{T_s\colon s \in [0,s_0]\} \end{align*} will typically not be (relatively) compact with respect to the operator norm. Therefore, in order to obtain compactness -- and to thus employ typical JdLG arguments --, we restrict our attention to the behaviour of the semigroup ``at infinitely large times''. This idea leads us to what we call the \emph{semigroup at infinity}. This concept was recently developed in \cite{Glueck2019} in order to study strong convergence of semigroups; here, we adapt it to the operator norm topology -- and as it turns out, this completely resolves the issues that the local orbits of semigroups are not operator norm compact, in general. Here is an outline of our general strategy: \begin{itemize} \item In Section~\ref{section:semigroup-representations-and-the-semigroup-at-infinity} we study general representations $(T_s)_{s \in S}$ of commutative semigroups $S$ on Banach spaces. To each such representation we assign a \emph{semigroup at infinity}. Under appropriate assumptions, this yields a splitting of the semigroup into a ``stable part'' that converges to $0$, and a ``reversible part'' that extends to a compact group. \item In Section~\ref{section:triviality-of-compact-operator-groups} we give sufficient criteria for compact operator groups to be trivial. \item By combining the aforementioned results, we finally obtain various criteria for operator norm convergence of semigroups in Section~\ref{section:operator-norm-convergence-of-semigroups}. \end{itemize} The following result demonstrates what can be shown by our methods for semigroups indexed over the time interval $[0,\infty)$ without any continuity assumption. For undefined terminology about operator semigroups we refer to the beginning of Section~\ref{section:semigroup-representations-and-the-semigroup-at-infinity}. AM-spaces are a class of Banach lattices that are, for instance, described in \cite[Section~II.7]{Schaefer1974}; here we only mention that the space $C_b(X;\mathbb{R})$ of all bounded continuous real-valued functions on a topological space $X$ is an example of an AM-space. \begin{theorem} \label{thm:introduction} Let $E$ be an AM-space (over the scalar field $\mathbb{R}$), or a real-valued $L^p$-space over an arbitrary measure space for $p \in [1,\infty] \setminus \{2\}$. For each contractive operator semigroup $(T_s)_{s \in [0,\infty)}$ on $E$ the following assertions are equivalent: \begin{enumerate}[\upshape (i)] \item $T_s$ converges with respect to the operator norm to a finite rank projection as $s \to \infty$. \item There exists a time $s_0 \in [0,\infty)$ such that $T_{s_0}$ is quasi-compact. \end{enumerate} \end{theorem} Here, \emph{contractive} means that $\norm{T_s} \le 1$ for all $s \in [0,\infty)$. We prove this theorem at the end of Subsection~\ref{subsection:convergence-under-divisibility-conditions}. In Section~\ref{section:application-coupled-parabolic-equations} we show how the theorem can be applied to study the long-time behaviour of certain parabolic systems on $\mathbb{R}^d$. In the appendix we recall a few facts on poles of operator resolvents and on the behaviour of nets in metric spaces. \begin{remark} We mentioned two research questions at the beginning of the introduction. Theorem~\ref{thm:introduction} demonstrates that our methods yield non-trivial answers to question~(1). But the theorem also allows for an interesting insight concerning question~(2): The powers of a two-dimensional permutation matrix show that the conclusion of the theorem fails for semigroups indexed over $\mathbb{N}_0$ rather than $[0,\infty)$. The fact that we treat general semigroup representations enables us to understand this phenomenon on a very conceptual level: it is caused by the different algebraic properties of the semigroups $([0,\infty),+)$ and $(\mathbb{N}_0,+)$ -- the first one is \emph{essentially divisible}, while the second one is not. We refer to Subsection~\ref{subsection:convergence-under-divisibility-conditions} and in particular to Remark~\ref{rem:continuous-vs-discrete-time} for details. \end{remark} \subsection*{Related literature} Despite the prevalence of $C_0$-semigroups, semigroups with weaker continuity assumptions occur on many occasions in the literature. The time regularity properties one encounters vary from strong continuity on $(0,\infty)$ (see e.g.\ \cite{Arendt2016, Arendt2018} for two applications) to such concepts as bi-continuity \cite{Kuehnemund2003} and continuity on norming dual pairs \cite{Kunze2009}. Strong convergence for semigroups that are not $C_0$ has been recently studied in the papers \cite{GerlachLB}, \cite{GerlachConvPOS} and \cite{Glueck2019}; the latter of them is closely related to the approach that we present here. One of the few classical results about operator norm convergence of semigroups that are not $C_0$ is a theorem of Lotz about quasi-compact positive semigroups on Banach lattices \cite[Theorem~4]{Lotz1986}; we generalise this result in Corollary~\ref{cor:quasi-compact-convergence-banach-lattice} below. In \cite{Dobrick2020} the semigroup of infinity is used to investigate the long-term behaviour of semigroups associated to transport processes on infinite networks with $L^\infty$-state spaces. \subsection*{Notation and Terminology.} We denote the complex unit circle by $\mathbb{T}$. All Banach spaces in this paper can be either real or complex, unless otherwise specified. To clarify whether the elements of certain function spaces are assumed to be real- or complex-valued we use notation such as $L^p(\Omega,\mu; \mathbb{R})$ and $L^p(\Omega,\mu;\mathbb{C})$, etc. Let $E,F$ be Banach spaces (over the same scalar field). We endow the space $\mathcal{L}(E;F)$ of bounded linear operators from $E$ to $F$ with the operator norm topology throughout; moreover, we use the abbreviation $\mathcal{L}(E) \coloneqq \mathcal{L}(E;E)$. For a set $\mathcal{M} \subseteq \mathcal{L}(E)$ and a closed subspace $U \subseteq E$ that is invariant under all operators in $\mathcal{M}$, we use the notation \begin{align*} \mathcal{M}|_U := \{T|_U: \, T \in \mathcal{M}\} \subseteq \mathcal{L}(U). \end{align*} The dual Banach space of $E$ will be denoted by $E'$. If the underlying scalar field is complex, the spectrum of a linear operator $A\colon E \supseteq D(A) \to E$ will be denoted by $\sigma(A)$; for $\lambda \in \mathbb{C} \setminus \sigma(A)$, the resolvent of $A$ at $\lambda$ is denoted by $\mathcal{R}(\lambda,A) \coloneqq (\lambda - A)^{-1}$. Further, the point spectrum of $A$ will be denoted by $\sigma_{\operatorname{pnt}}(A)$. If the underlying scalar field of $E$ is real, the spectrum and the point spectrum of an operator $A$ are defined as the spectrum and the point spectrum of the canonical extension of $A$ to any complexification of $E$. Basic terminology for semigroup representations is introduced at the beginning of the next section. \section{Semigroup representations and the semigroup at infinity} \label{section:semigroup-representations-and-the-semigroup-at-infinity} In this section we develop a general framework to analyse whether an operator semigroup converges with respect to the operator norm as time tends to infinity. The most important situation that occurs in applications is that the semigroup contains a quasi-compact operator, and this situation will also be one of our main interests (though not our only interest). In the case of $C_0$-semigroups, a rather complete description of the long-term behaviour in the case of quasi-compactness can be found in \cite[Section~V.3]{Engel2000} (and for more general aspects of the long-term behaviour of $C_0$-semigroups we refer for instance to \cite[Chapter~V]{Engel2000} and \cite{Eisner2010}). However, as it has become apparent in the preceding sections, the case of $C_0$-semigroups is not always sufficient and, as explained in the introduction, we do not wish to develop an individual convergence theory for each different type of time regularity that might occur in applications. Thus, we stick to the other extreme and develop a single theory that does not assume any time regularity at all. This goal being set, it is just consequent to leave the restricted setting of semigroups of the type $(T_s)_{s \in [0,\infty)}$, and to consider operator representations of general commutative semigroups $(S,+)$ instead. This allows us to also treat the time-discrete case $(T^n)_{n \in \mathbb{N}_0}$ and, for instance, the case of multi-parameter semigroups within our one theory. Moreover, it allows for some interesting theoretical observations in the spirit of \cite{GerlachConvPOS} and \cite{Glueck2019}. Our approach is based on the celebrated Jacobs--de Leeuw--Glicksberg (JdLG) theory which applies abstract results on (semi-)topological semigroups to the more concrete situation of operator semigroups, and we combine this with the construction of a \emph{semigroup at infinity} which is inspired by \cite{Glueck2019}. In this context, we find it also worthwhile to mention that there exist other quite abstract approaches to general operator semigroups, too, that do not rely on JdLG theory (see for instance \cite{Gao2014}); however, we will stick to JdLG theory in this paper. \subsection{Setting} \label{subsection:general-semigroup-setting} Throughout the rest of this paper, let $(S,+)$ be a commutative semigroup with neutral element $0$ (i.e., in a more algebraic language, $(S,+)$ is a commutative monoid). We define a reflexive and transitive relation (i.e., a \emph{pre-order}) $\le$ on $S$ by setting \begin{align*} s \le t \quad \text{if and only if} \quad \text{there exists } r \in S \text{ such that } t = s+r \end{align*} for $s,t \in S$. Note that $S$ is directed with respect to the pre-order $\le$ since we have $s,t \le s+t$ for all $s,t \in S$. A \emph{representation} of $S$ on a Banach space $E$ is any mapping $T\colon S \to \mathcal{L}(E)$ that satisfies \begin{align*} T(0) = \id_E \quad \text{and} \quad T(s + t) = T(s) T(t) \qquad \text{for all } t, s \in S. \end{align*} In the following, we will often use the index notation $T_s$ instead of $T(s)$ and call $(T_s)_{s \in S}$ an \emph{operator semigroup} on $E$. Let $(T_s)_{s \in S}$ be an operator semigroup on $E$, and assume that the underlying scalar field of $E$ is $\mathbb{C}$. A function $\lambda \colon S \to \mathbb{C}$ is called an \emph{eigenvalue} of $(T_s)_{s \in S}$ if there exists a non-zero vector $x \in E$ such that \begin{align*} T_s x = \lambda_s x \qquad \text{for all } s \in S; \end{align*} in this case, the vector $x$ is called a corresponding \emph{eigenvector}. Note that an eigenvalue $\lambda = (\lambda_s)_{s \in S}$ is always a representation of $(S,+)$ on the space $\mathbb{C}$. Moreover, we call an eigenvalue $\lambda = (\lambda_s)_{s \in S}$ \emph{unimodular} if $\modulus{\lambda_s} = 1$ for all $s \in S$. An operator semigroup $(T_s)_{s \in S}$ on a Banach space $E$ is called \emph{bounded} if it satisfies $\sup_{s \in S} \norm{T_s} < \infty$. Note that, as $S$ is a directed set, every operator semigroup $(T_s)_{s \in S}$ is a net, and hence it makes sense to talk about convergence of $(T_s)_{s \in S}$. At this point we recall that, throughout the article, we always endow the operator space $\mathcal{L}(E)$ with the operator norm, i.e., for us, convergence always means convergence with respect to the operator norm. In the case of a bounded operator semigroup one has the following simple characterization of convergence to the zero operator. \begin{proposition} \label{prop:bounded-convergence} Let $(T_s)_{s \in S}$ be a bounded semigroup of $(S,+)$ on a Banach space $E$. The following assertions are equivalent: \begin{enumerate}[\upshape (i)] \item $\lim_{s \in S} T_s = 0$. \item There exists $s_0 \in S$ such that $\norm{T_{s_0}} < 1$. \item $0$ is contained in the closure of the set $\{T_s\colon s \in S\}$. \end{enumerate} \end{proposition} \begin{proof} (i) $\Rightarrow$ (ii): Obvious. (ii) $\Rightarrow$ (iii): Let $s_0 \in S$ such that $\norm{T_{s_0}} < 1$. Let $\varepsilon > 0$. Then there exists $n \in \mathbb{N}$ such that $\norm{T_{s_0}}^n < \varepsilon$. Hence, \begin{align*} \norm{T_{n s_0}} \leq \norm{T_{s_0}}^n < \varepsilon. \end{align*} Therefore, $0 \in \overline{\{T_s\colon s \in S\}}$. (iii) $\Rightarrow$ (i): Let $\varepsilon > 0$. Then there exists $s_0 \in S$ such that $\norm{T_{s_0}} \leq \varepsilon$. Thus, \begin{align*} \norm{T_t} \leq \varepsilon M \qquad \text{for all } t \in s_0 + S, \end{align*} where $M \coloneqq \sup_{s \in S} \norm{T_s}$. So it follows that $\lim_{s \in S} T_s = 0$. \end{proof} \subsection{The semigroup at infinity} In \cite[Section 2]{Glueck2019} the concept of the \emph{semigroup at infinity} with respect to the strong operator topology was used to study strong convergence of operator semigroups. In reminiscence of this concept we define the semigroup at infinity now with respect to the operator norm topology. \begin{definition} Let $(T_s)_{s \in S}$ be a semigroup of $(S,+)$ on a Banach space $E$. We call the set \begin{align*} \sgInftyON{\mathcal{T}} \coloneqq \bigcap_{r \in S} \overline{\{T_s\colon s \ge r\}} \end{align*} the \emph{semigroup at infinity} associated with $(T_s)_{s \in S}$ with respect to the operator norm. Since we restrict ourselves to the operator norm topology throughout the paper and since we only consider a single operator semigroup, we will often just call $\sgInftyON{\mathcal{T}}$ the \emph{semigroup at infinity}. \end{definition} Note that the semigroup at infinity consists of all cluster points (with respect to the operator norm) of the net $(T_s)_{s \in S}$. If the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact, then one can apply the \emph{Jacobs--de Leeuw--Glicksberg} theory to the topological semigroup $\sgInftyON{\mathcal{T}}$. This yields a smallest non-empty closed ideal $\mathcal{I}$ in $\sgInftyON{\mathcal{T}}$ (where \emph{ideal} means that $T \mathcal{I} \subseteq \mathcal{I}$ for all $T \in \sgInftyON{\mathcal{T}}$), and the ideal $\mathcal{I}$ -- the so-called \emph{Sushkevich kernel} of $\sgInftyON{\mathcal{T}}$ -- is a compact topological group with respect to operator multiplication. For details we refer for instance to \cite[Section~16.1]{Eisner2015} or to \cite[Theorem~V.2.3]{Engel2000}. Denote the neutral element in $\mathcal{I}$ by $P_\infty$ -- it is a projection in $\mathcal{L}(E)$ which we call the \emph{projection at infinity}; the range of $P_\infty$ is denoted by $E_\infty$. Note that the ``semigroup at infinity'' approach differs from classical applications of JdLG theory to semigroup asymptotics in the following way: classically, one would rather try to apply the JdLG-decomposition to the semigroup \begin{align*} \mathcal{T} \coloneqq \overline{\{T_s\colon s \in S\}}. \end{align*} To make this approach work though, we would need a global compactness requirement of the semigroup $(T_s)_{s \in S}$, in the sense that $\mathcal{T}$ is compact with respect to the operator norm topology. Generally, this is a far too strong assumption if one is interested in characterising the convergence of $(T_s)_{s \in S}$; this can already be seen by considering the following simple example. \begin{example} Consider the nilpotent right shift $(T_s)_{s \in [0, \infty)}$ on $L^\infty(0, 1)$, i.e., \begin{align*} (T_s f)(t) = \begin{cases} f(t - s), &\quad \text{if } s < t, \\ 0, &\quad \text{else}, \end{cases} \qquad (f \in L^\infty(0, 1)). \end{align*} Then $T_s$ converges to the zero operator with respect to the operator norm as $s \to \infty$, but $\{T_s\colon s \geq 0\}$ is not even relatively compact in the strong operator topology. If we replace $L^\infty(0,1)$ with $L^p(0,1)$ for $p \in [1,\infty)$, the set $\{T_s\colon s \geq 0\}$ becomes relatively compact with respect to the strong operator topology, but it is still not relatively compact with respect to the operator norm topology. \end{example} To overcome this obstacle, in the following theorem we will apply the JdLG-decomposition to the semigroup at infinity. This result is very close in spirit to a similar theorem for the strong operator topology that can be found in \cite[Theorem~2.2]{Glueck2019}. \begin{theorem} \label{thm:JdLG-semigroup-infinity} Let $(T_s)_{s \in S}$ be a bounded semigroup of $(S,+)$ on a Banach space $E$ and assume that the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact. Set $\mathcal{T} \coloneqq \overline{\{T_s\colon s \in S\}} \subseteq \mathcal{L}(E)$. Then the following assertions hold: \begin{enumerate}[\upshape (a)] \item The projection at infinity, $P_\infty$, commutes with all operators in $\mathcal{T}$, and $\mathcal{T} P_\infty = \sgInftyON{\mathcal{T}} P_\infty$. \item The semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is a group with respect to operator multiplication with neutral element $P_\infty$. Moreover, we have \begin{align*} \mathcal{T}|_{E_\infty} = \sgInftyON{\mathcal{T}}|_{E_\infty} = \overline{\{T_s \colon s \in S\}|_{E_\infty}}^{\mathcal{L}(E_\infty)}, \end{align*} and this set is a compact subgroup of the bijective operators in $\mathcal{L}(E_\infty)$. Finally, $\sgInftyON{\mathcal{T}}$ and $\sgInftyON{\mathcal{T}}|_{E_\infty}$ are isomorphic (in the category of topological groups) via the mapping $R \mapsto R|_{E_\infty}$. \item We have $\lim_{s \in S} T_s|_{\ker P_\infty} = 0$ with respect to the operator norm on $\mathcal{L}(\ker P_\infty)$. \item For every vector $x \in E$ the following assertions are equivalent: \begin{enumerate}[\upshape (i)] \item $P_\infty x = 0$. \item $0$ is contained in the weak closure of the orbit $\{T_s x \mid \, s \in S\}$. \item The net $(T_s x)_{s \in S}$ norm converges to $0$ in $E$. \item We have $Rx = 0$ for each $R \in \sgInftyON{\mathcal{T}}$. \item We have $Rx = 0$ for at least one $R \in \sgInftyON{\mathcal{T}}$. \end{enumerate} \item If the underlying scalar field of $E$ is complex, then the semigroup $(T_s)_{s \in S}$ has discrete spectrum, i.e, \begin{align*} E_\infty = \overline{\lin} \{x \in E\colon \forall \, s \in S \ \exists\, \lambda_s \in \mathbb{T} \text{ with } T_s x = \lambda_s x\}. \end{align*} \end{enumerate} \end{theorem} Note that the first part of assertion~(a) implies that every operator in $\mathcal{T}$ -- and thus in particular every operator $T_s$ -- leaves $E_\infty$ and $\ker P_\infty$ invariant. \begin{proof}[Proof of Theorem~\ref{thm:JdLG-semigroup-infinity}] (a) The first assertion is clear since $\mathcal{T}$ is commutative. Moreover, we have $\mathcal{T} \sgInftyON{\mathcal{T}} \subseteq \sgInftyON{\mathcal{T}} \subseteq \mathcal{T}$, where the second inclusion is obvious and the first inclusion follows easily from the definitions of $\mathcal{T}$ and $\mathcal{T}_\infty$. Therefore, \begin{align*} \mathcal{T} P_\infty = \mathcal{T} P_\infty P_\infty \subseteq \sgInftyON{\mathcal{T}} P_\infty \subseteq \mathcal{T} P_\infty. \end{align*} (c) Since $P_\infty$ is trivial on $\ker P_\infty$, we have $0 \in \overline{\{T_s|_{\ker P_\infty} \colon s \in S\}}$; this is equivalent to $\lim_{s \in S} T_s|_{\ker P_\infty} = 0$ by Proposition~\ref{prop:bounded-convergence}. (b) Let $\mathcal{I} \subseteq \sgInftyON{\mathcal{T}}$ denote the Sushkevich kernel of $\sgInftyON{\mathcal{T}}$, i.e., the smallest non-empty closed ideal in the semigroup $\sgInftyON{\mathcal{T}}$ (see the discussion before the theorem). We show that $\sgInftyON{\mathcal{T}} = \mathcal{I}$. To this end, let $R \in \sgInftyON{\mathcal{T}}$. Then $R$ is a cluster point of the net $(T_s)_{s \in S}$, so there exists a subnet $(T_{s_j})_j$ that converges to $R$. It follows from assertion~(c), which we have already proved, that $T_{s_j}(\id_E - P_\infty) \to 0$, so $R(\id_E - P_\infty) = 0$ and hence, $R = RP_\infty$. Since $P_\infty \in \mathcal{I}$ and since $\mathcal{I}$ is an ideal in $\sgInftyON{\mathcal{T}}$ we conclude that $R \in \mathcal{I}$. We have thus proved that $\sgInftyON{\mathcal{T}}$ is a group with respect to operator multiplication and that its neutral element is $P_\infty$. Next we show the equalities in the displayed formula. One has $\mathcal{T}|_{E_\infty} = \sgInftyON{\mathcal{T}}|_{E_\infty}$ by (a). As the restriction map from $\mathcal{L}(E)$ to $\mathcal{L}(E_\infty; E)$ is continuous, we have $\mathcal{T}|_{E_\infty} \subseteq \overline{\{T_s \colon s \in S\}|_{E_\infty}}$. The converse inclusion follows from $\overline{\{T_s \colon s \in S\}|_{E_\infty}} P_\infty \subseteq \mathcal{T}$. Since $\sgInftyON{\mathcal{T}}$ is a group with neutral element $P_\infty$, it readily follows that $\sgInftyON{\mathcal{T}}|_{E_\infty}$ is a subgroup of the bijective operators in $\mathcal{L}(E_\infty)$. The mapping \begin{align*} \sgInftyON{\mathcal{T}} \ni R \mapsto R|_{E_\infty} \in \sgInftyON{\mathcal{T}}|_{E_\infty} \end{align*} is clearly a surjective and continuous group homomorphism and consequently, $\sgInftyON{\mathcal{T}}|_{E_\infty}$ is compact. If $R|_{E_\infty} = \id_{E_\infty}$ for some $R\in \sgInftyON{\mathcal{T}}$, then $P_\infty = RP_\infty = R$, so our group homomorphism is also injective. Finally, it is also a homeomorphism by the compactness of its domain and range. (d) Fix $x \in E$. (iv) $\Rightarrow$ (i) $\Rightarrow$ (v) $\Rightarrow$ (iii) $\Rightarrow$ (iv): Clearly, since $P_\infty \in \sgInftyON{\mathcal{T}}$, (iv) implies (i) and (i) implies (v). Furthermore, (v) implies $0 \in \overline{ \{T_s x \mid s \in S\} }$ which is equivalent to $\lim_{s \in S} T_s x = 0$, i.e., (iii), due to the boundedness of the semigroup. Moreover, if (iii) holds and $\varepsilon > 0$ is fixed, then there exists $s \in S$ such that $\{T_t x \mid t \geq s\} \subseteq \varepsilon \Ball$, where $\Ball$ denotes the closed unit ball in $E$. Thus, $\sgInftyON{\mathcal{T}} x \subseteq \varepsilon\Ball$. Since $\varepsilon > 0$ was arbitrary, it follows that $\sgInftyON{\mathcal{T}} x = \{0\}$, i.e., (iv) holds. (ii) $\Leftrightarrow$ (iii): Obviously, (iii) implies (ii). Conversely, suppose that (ii) holds. Then it follows that $0$ is contained in the weak closure of the set $\{T_s P_\infty x \mid s \in S\}$. Moreover, it follows from~(a) that the set $\{T_s P_\infty \mid s \in S\}$ is a subset of $\sgInftyON{\mathcal{T}} P_\infty$ and thus relatively compact in $\mathcal{L}(E)$. Hence, $\{T_s P_\infty x \mid s \in S\}$ is relatively strongly compact and thus its closure coincides with its weak closure. Hence, $0$ is contained in the strong closure of $\{T_s P_\infty x \mid s \in S\}$, so $T_s P_\infty x \to 0$ due to the boundedness of the semigroup. If we apply the implication from~(iii) to~(i), which we have already shown, to the vector $P_\infty x$, this yields $P_\infty x = P_\infty (P_\infty x) = 0$. (e) Recall that, by (b), $\mathcal{G} \coloneqq \overline{\{T_s \colon s \in S\}|_{E_\infty}} \subseteq \mathcal{L}(E_\infty)$ is a compact group with respect to the operator norm on $\mathcal{L}(E_\infty)$. Let $\mathcal{G}^*$ denote the dual group of $\mathcal{G}$. According to \cite[Corollary~15.18]{Eisner2015} we have \begin{align*} E_\infty = \, & \overline{\lin} \{x \in E_\infty\colon \exists \, \xi \in \mathcal{G}^* \; \forall \, R \in \mathcal{G}\colon Rx = \xi(R)x\} \\ \subseteq \, & \overline{\lin} \{x \in E_\infty\colon \forall \, s \in S \ \exists\, \lambda_s \in \mathbb{T}\colon T_s x = \lambda_s x\} \subseteq E_\infty. \end{align*} Now let $x \in E$ be an eigenvector associated to the unimodular eigenvalue $\lambda = (\lambda_s)_{s \in S}$. Consider $y \coloneqq (I - P_\infty) x \in \ker P_\infty$. Then $T_s y \to 0$ and $T_s y = \lambda_s y$ for each $s \in S$. Since $\modulus{\lambda_s} = 1$ for all $s \in S$, this implies $y = 0$, i.e., $x \in E_\infty$. \end{proof} \begin{remark} \label{rem:sg-at-infty-is-also-a-group-in-the-strong-case} \begin{enumerate}[\upshape (a)] \item For the strong operator topology, the analogue result to Theorem~\ref{thm:JdLG-semigroup-infinity} is \cite[Theorem~2.2]{Glueck2019}. The assertion that the semigroup at infinity is automatically a group in case that it is non-empty and compact is not included in this reference, but it is also true in the situation there; this can be shown by exactly the same argument as in our proof of Theorem~\ref{thm:JdLG-semigroup-infinity}(b). This shows that the semigroup at infinity is minimal in the sense that there is no smaller topological group that contains all the information about the asymptotic behaviour of the semigroup. \item For the strong operator topology, the statement in Theorem~\ref{thm:JdLG-semigroup-infinity}(e) holds, too, although that was not observed in \cite[Theorem~2.2]{Glueck2019}. \end{enumerate} \end{remark} As a consequence of the above theorem, operator norm convergence of a semigroup can be characterised in terms of its semigroup at infinity. Let us state this explicitly in the following corollary. \begin{corollary} \label{cor:characterization-of-sg-convergence} For every bounded semigroup $(T_s)_{s \in S}$ on a Banach space $E$ the following assertions are equivalent: \begin{enumerate}[\upshape (i)] \item $(T_s)_{s \in S}$ converges (with respect to the operator norm). \item $\sgInftyON{\mathcal{T}}$ is a singleton. \item $\sgInftyON{\mathcal{T}}$ is non-empty and compact, and acts as the identity on $E_\infty$. \item $\sgInftyON{\mathcal{T}}$ is non-empty and compact, and $(T_s)_{s \in S}$ acts as the identity on $E_\infty$. \end{enumerate} If the equivalent conditions~\textup{(i)--(iv)} are satisfied, then $\lim_{s \in S}T_s$ equals $P_\infty$, the projection at infinity. If the underlying scalar field of $E$ is complex, the above assertions~ \textup{(i)--(iv)} are also equivalent to: \begin{enumerate}[\upshape (v)] \item $\sgInftyON{\mathcal{T}}$ is non-empty and compact, and $\one \coloneqq (1)_{s \in S}$ is the only unimodular eigenvalue of $(T_s)_{s \in S}$. \end{enumerate} \end{corollary} \begin{proof} (i) $\Rightarrow$ (ii): If the net $(T_s)_{s \in S}$ converges, then its limit is the only cluster point of $(T_s)_{s \in S}$. Hence, $\sgInftyON{\mathcal{T}}$ is a singleton. (ii) $\Rightarrow$ (iii): Assertion~(ii) implies $\sgInftyON{\mathcal{T}} = \{P_\infty\}$, and $P_\infty$ acts trivially on $E_\infty$. (iii) $\Rightarrow$ (iv): By Theorem~\ref{thm:JdLG-semigroup-infinity}(a) we have $\mathcal{T}|_{E_\infty} = \sgInftyON{\mathcal{T}}|_{E_\infty}$, so~(iii) implies~(iv). (iv) $\Rightarrow$ (i): By Theorem~\ref{thm:JdLG-semigroup-infinity}(c), assertion~(iv) implies that $\lim_{s \in S} T_s = P_\infty$. (iv) $\Leftrightarrow$ (v): By Theorem~\ref{thm:JdLG-semigroup-infinity}(e), $(T_s)_{s \in S}$ acts as the identity on $E_\infty$ if and only if $\one \coloneqq (1)_{s \in S}$ is the only unimodular eigenvalue of $(T_s)_{s \in S}$. \end{proof} \begin{remark} \label{rem:embedded-semigroup-and-strong-op-topology} We note once again that our results in this subsection, as well as their proofs, are quite close to similar results for the strong operator topology from \cite[Subsection~2.2]{Glueck2019}. The relation between the semigroups at infinity with respect to the operator norm topology and with respect to the strong operator topology can also be formalised in the following sense. If $(T_s)_{s \in S}$ is an operator semigroup on a Banach space $E$ one can, for each $s \in S$, define an operator $R_s$ on the Banach space $\mathcal{L}(E)$ by \begin{align*} R_s\colon \mathcal{L}(E) \to \mathcal{L}(E), \quad A \mapsto T_s A. \end{align*} Then $(R_s)_{s \in S}$ is a bounded semigroup on the Banach space $\mathcal{L}(E)$, and topological properties of $(R_s)_{s \in S}$ with respect to the strong operator topology translate into topological properties of $(T_s)_{s \in S}$ with respect to the operator norm. This observation can be used as a basis to derive the theory of the semigroup at infinity with respect to the operator norm from the corresponding theory with respect to the strong topology presented in \cite{Glueck2019}. However, in the present section we prefer to give more direct proofs in order to make our work more self-contained and to improve its accessibility for readers not familiar with \cite{Glueck2019}. \end{remark} In order to apply Theorem~\ref{thm:JdLG-semigroup-infinity} and Corollary~\ref{cor:characterization-of-sg-convergence} one needs criteria to ensure that the semigroup at infinity is non-empty and compact; in a general setting, such criteria can be found in the following proposition. \begin{proposition} \label{prop:characterisation-of-compact-and-non-empty} For every bounded semigroup $(T_s)_{s \in S}$ on a Banach space $E$, the following assertions are equivalent: \begin{enumerate}[\upshape (i)] \item The semigroup at infinity is non-empty and compact. \item Every subnet of $(T_s)_{s\in S}$ has a convergent subnet. \item Every universal subnet of $(T_s)_{s \in S}$ converges. \end{enumerate} In case that $S$ contains a cofinal sequence, the above assertions~\textup{(i)--(iii)} are also equivalent to: \begin{enumerate}[\upshape (iv)] \item For every cofinal sequence $(s_n)_{n \in \mathbb{N}}$ in $S$, the sequence $(T_{s_n})_{n \in \mathbb{N}}$ has a convergent subsequence. \end{enumerate} \end{proposition} \begin{proof} (i) $\Leftarrow$ (ii) $\Leftrightarrow$ (iii): These implications follow from general topological properties; see Lemma~\ref{lemma:set-of-cluster-points}. (i) $\Rightarrow$ (ii): Note that one has $\lim_{s \in S} (T_s(I - P_\infty)) = 0$ by Theorem~\ref{thm:JdLG-semigroup-infinity}(c). Moreover, the net $(T_s P_\infty)_{s \in S}$ is contained in the compact set $\sgInftyON{\mathcal{T}}P_\infty$ by Theorem~\ref{thm:JdLG-semigroup-infinity}(a). Thus each of it subnets has a convergent subnet. Since \begin{align*} T_s = T_s P_\infty + T_s (I - P_\infty) \qquad \text{for all } s \in S, \end{align*} this shows that every subnet of $(T_s)_{s \in S}$ has a convergent subnet. Now assume that $S$ contains a co-final subsequence. (iii) $\Rightarrow$ (iv) $\Rightarrow$ (i): This, again, follows from the general Lemma~\ref{lemma:set-of-cluster-points}. \end{proof} If $(x_\alpha)_\alpha$ is a net in an arbitrary metric (or topological) space whose set of cluster points is non-empty and compact, then the set of cluster points of a fixed subnet of $(x_\alpha)_\alpha$ might well be empty. The implication (i) $\Rightarrow$ (ii) in Proposition~\ref{prop:characterisation-of-compact-and-non-empty} show that the situation is different for our semigroup setting. A nice consequence of this observation is the subsequent Corollary~\ref{cor:subsemigroup-non-empty-and-compact}. For a proper understanding of that corollary, the following algebraic observation is important. \begin{remark} \label{remark:order-on-subsemigroups} Let $R$ be a subsemigroup of $S$ that contains $0$. Denote the pre-order on $R$ inherited from $S$ by $\le_S$ and denote the pre-order on $R$ induced by its semigroup operation by $\le_R$. For all $r_1,r_2 \in R$ one then has the implication \begin{align*} r_1 \le_R r_2 \quad \Longrightarrow \quad r_1 \le_S r_2. \end{align*} Note that $\le_R$ and $\le_S$ do not coincide in general, which can be seen, for instance, by considering the subsemigroup $\{0\} \cup [1,\infty)$ of $([0,\infty),+)$. Now, let $X$ be a set and for each $r \in R$, let $x_r \in X$. Let us use, within this remark, the notations $(x_r)_{r \in (R,\le_R)}$ and $(x_r)_{r \in (R,\le_S)}$ to distinguish the nets that we obtain be considering the different pre-orders $\le_R$ and $\le_S$ on $R$. Then it follows from the implication above that the net $(x_r)_{r \in (R,\le_R)}$ is a subnet of $(x_r)_{r \in (R,\le_S)}$. In particular, if $R$ is cofinal in $S$ and $(x_s)_{s \in S}$ is a net in $X$, then $(x_r)_{r \in (R,\le_R)}$ is a subnet of $(x_s)_{s \in S}$. \end{remark} \begin{corollary} \label{cor:subsemigroup-non-empty-and-compact} Let $E$ be a Banach space. Let $R$ be a subsemigroup of $S$ that contains $0$ and is cofinal in $S$ and let $(T_s)_{s \in S}$ is a bounded semigroup on $E$ whose associated semigroup at infinity is non-empty and compact. Then the semigroup at infinity associated with $(T_s)_{s \in R}$ is also non-empty and compact, and the projections at infinity of $(T_s)_{s \in S}$ and $(T_s)_{s \in R}$ coincide. \end{corollary} Note that in the corollary the semigroup $R$ is endowed with the order induced by its semigroup operation (denoted by $\le_R$ in Remark~\ref{remark:order-on-subsemigroups}). For any other order on $R$ (for instance the order inherited from $S$) we did not even define the notion \emph{semigroup at infinity}. \begin{proof}[Proof of Corollary~\ref{cor:subsemigroup-non-empty-and-compact}] It follows from Remark~\ref{remark:order-on-subsemigroups} that $(T_s)_{s \in R}$ is a subnet of $(T_s)_{s \in S}$. In particular, every universal subnet of $(T_s)_{s \in R}$ is also a universal subnet of $(T_s)_{s \in S}$ and thus convergent by Proposition~\ref{prop:characterisation-of-compact-and-non-empty}. Hence, by the same proposition the semigroup at infinity associated with $(T_s)_{s \in R}$ is non-empty and compact. Let $P_\infty$ and $Q_\infty$ denote the projections at infinity of $(T_s)_{s \in S}$ and $(T_s)_{s \in R}$, respectively. Those two projections commute. It follows from Theorem~\ref{thm:JdLG-semigroup-infinity}(c) that $\lim_{s \in S} T_s|_{\ker P_\infty} = 0$ and thus, in particular, $\lim_{s \in R} T_s|_{\ker P_\infty} = 0$; Theorem~\ref{thm:JdLG-semigroup-infinity}(d), applied to the semigroup $(T_s)_{s \in R}$, thus implies that $Q_\infty x = 0$ for every $x \in \ker P_\infty$, i.e., $\ker P_\infty \subseteq \ker Q_\infty$. Conversely, it also follows from Theorem~\ref{thm:JdLG-semigroup-infinity}(c) that $\lim_{s \in R} T_s|_{\ker Q_\infty} = 0$, so Proposition~\ref{prop:bounded-convergence} implies that even $\lim_{s \in S} T_s|_{\ker Q_\infty} = 0$. Theorem~\ref{thm:JdLG-semigroup-infinity}(d), applied to the semigroup $(T_s)_{s \in S}$, thus implies that $P_\infty x = 0$ for every $x \in \ker Q_\infty$, i.e., $\ker Q_\infty \subseteq \ker P_\infty$. Therefore, we proved that the commuting projections $P_\infty$ and $Q_\infty$ have the same kernel. The general observation that two commuting projections coincide if their kernels coincide, thus yields $P_\infty = Q_\infty$. \end{proof} In order to determine the projection $P_\infty$ in concrete situations the following proposition is quite useful; it shows that $P_\infty$ is uniquely determined by some of its properties listed in Theorem~\ref{thm:JdLG-semigroup-infinity}. \begin{proposition} \label{prop:uniqueness-of-proposition} Let $(T_s)_{s \in S}$ be a bounded semigroup on a Banach space $E$ and let $P \in \mathcal{L}(E)$ be a projection that commutes with all operators $T_s$. Consider the following assertions: \begin{enumerate}[\upshape (a)] \item $\lim_s T_s|_{\ker P} = 0$ (with respect to the operator norm on $\mathcal{L}(\ker P)$). \item The set $\{T_s|_{PE} \colon s \in S\}$ is relatively compact in $\mathcal{L}(PE)$. \item The net $(T_s x)_{s \in S}$ does not converge to $0$ for any $x \in PE \setminus \{0\}$. \end{enumerate} If assertions~(a) and~(b) are satisfied, then the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact, and the projection at infinity satisfies \begin{align*} P_\infty E \subseteq PE \qquad \text{and} \qquad \ker P_\infty \supseteq \ker P. \end{align*} If all assertions assertions~\textup{(a)--(c)} are satisfied, then in addition $P_\infty = P$. \end{proposition} \begin{proof} First note that the semigroup leaves both the kernel and the range of $P$ invariant since $P$ commutes with each operator $T_s$. Now assume that~(a) and~(b) are satisfied and let $(T_{s_j})$ be a universal subnet of $(T_s)_{s \in S}$. By~(a), $(T_{s_j}|_{\ker P})$ converges to $0$ and by~(b), $(T_{s_j}|_{PE})$ is convergent. Thus, the net $(T_{s_j})$ is convergent, which proves that $\sgInftyON{\mathcal{T}}$ is non-empty and compact by Proposition~\ref{prop:characterisation-of-compact-and-non-empty}. It follows from assumption~(a) and Theorem~\ref{thm:JdLG-semigroup-infinity}(d) that $\ker P_\infty \supseteq \ker P$. To show that $P_\infty E \subseteq PE$, let $x \in P_\infty E$. We have $(\id_E-P)x \in \ker P \subseteq \ker P_\infty$, and since $P_\infty$ and $P$ commute, this implies that $0 = (\id_E-P)P_\infty x = (\id_E-P)x$, so $x = Px \in PE$. Now assume in addition that assumption~(c) is satisfied. We show that the inclusion $\ker P_\infty \subseteq \ker P$ is also satisfied then. Let $x \in \ker P_\infty$. Since $P$ and $P_\infty$ commute, the projection $P$ leaves $\ker P_\infty$ invariant, i.e., we also have $Px \in \ker P_\infty$. Hence, $T_s P x \to 0$ by Theorem~\ref{thm:JdLG-semigroup-infinity}(d), so it follows from assumption~(c) that $Px = 0$. We thus proved that the kernels of $P_\infty$ and $P$ coincide, so $P_\infty = P$. \end{proof} \subsection{Powers of a single operator} In this subsection we consider time-discrete semigroups, i.e., semigroups of the form $(T^n)_{n \in \mathbb{N}_0}$ for a single operator $T$. Let us first note in the following lemma that, in this case, the semigroup at infinity is non-empty and compact if and only if the entire set $\{T^n\colon n \in \mathbb{N}_0\}$ is relatively compact in $\mathcal{L}(E)$. \begin{lemma} \label{lemma:compactness-discrete-case} Let $T \in \mathcal{L}(E)$ be a power-bounded operator on a Banach space $E$. Then the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, associated to the semigroup $(T^n)_{n \in \mathbb{N}_0}$ is non-empty and compact if and only if the set $\mathcal{T} = \{T^n\colon n \in \mathbb{N}_0\}$ is relatively compact in $\mathcal{L}(E)$. \end{lemma} \begin{proof} ``$\Rightarrow$'' Let $(T^{n_k})_{k \in \mathbb{N}}$ be an arbitrary sequence in $\mathcal{T}$; we have to distinguish two cases since this sequence might not be a subsequence of $(T^n)_{n \in \mathbb{N}_0}$. In the first case, the index sequence $(n_k)_{k \in \mathbb{N}}$ is bounded; then, by the pigeon hole principle, it has a constant subsequence, so $(T^{n_k})_{k \in \mathbb{N}}$ has a constant, thus convergent, subsequence. In the second case the index sequence $(n_k)_{k \in \mathbb{N}}$ is unbounded. Then it has a subsequence $(n_{k_j})_{j \in \mathbb{N}}$ that is cofinal in $\mathbb{N}_0$. Hence, Proposition~\ref{prop:characterisation-of-compact-and-non-empty} yields that $(T^{n_{k_j}})_{j \in \mathbb{N}}$ has a convergent subsequence, and the latter is also a subsequence of $(T^{n_k})_{k \in \mathbb{N}}$. ``$\Leftarrow$'' The implication follows directly from Proposition~\ref{prop:characterisation-of-compact-and-non-empty}. \end{proof} Now we derive a spectral characterization of the compactness and non-emptiness of the semigroup at infinity associated to a single operator. \begin{proposition} \label{prop:compactness-for-single-operators} Let $T \in \mathcal{L}(E)$ be a power-bounded operator on a complex Banach space $E$ and consider the semigroup $(T^n)_{n \in \mathbb{N}_0}$ on $E$. Then the following two assertions are equivalent: \begin{enumerate}[\upshape (i)] \item The semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact. \item All spectral values of $T$ on the unit circle are poles of the resolvent of $T$. \end{enumerate} In this case, $P_\infty$ coincides with the spectral projection of $T$ associated with $\sigma(T) \cap \mathbb{T}$. \end{proposition} \begin{proof} (i) $\Rightarrow$ (ii): Let $\lambda \in \mathbb{T}$ be a spectral value of $T$. Let $\mathcal{K}$ denote the closed convex hull of the relatively compact set \begin{align*} \mathbb{T} \cdot \{T^n\colon n \in \mathbb{N}_0\}; \end{align*} then $\mathcal{K}$ is compact, too. Moreover, the operator $(r\lambda - \lambda) \mathcal{R}(r\lambda,T)$ is contained in $\mathcal{K}$ for each $r > 1$; this is a consequence of the Neumann series representation of the resolvent. Consequently, the net $\big((r\lambda - \lambda) \mathcal{R}(r\lambda,T)\big)_{r \in (1,\infty)}$ (where $(1,\infty)$ is directed conversely to the order inherited from $\mathbb{R}$) has a convergent subnet. This shows, according to Proposition~\ref{prop:pole-of-resolvent-by-resolvent-convergence} in the appendix, that $\lambda$ is a pole of $\mathcal{R}(\mathord{\,\cdot\,},T)$. (ii) $\Rightarrow$ (i): Note that, as a consequence of~(ii), $\sigma(T) \cap \mathbb{T}$ is isolated from the rest of the spectrum of $T$; let $P$ denote the spectral projection associated with $\sigma(T) \cap \mathbb{T}$. We show that $P$ satisfies the assumptions (a)--(c) in Proposition~\ref{prop:uniqueness-of-proposition}. The spectral radius of $T|_{\ker P}$ is strictly less than $1$, so $T|_{\ker P}^n \to 0$ as $n \to \infty$; this proves assumption~(a). In order to show assumptions~(b) and~(c), note that the set $\sigma(T) \cap \mathbb{T}$ is finite as a consequence of~(ii), and enumerate its elements (if any exist) as $\lambda_1,\dots,\lambda_m$. By assumption, each $\lambda_k$ is a pole of the resolvent of $T$, and its pole order equals $1$ since $T$ is power bounded. Hence, $T$ acts as $\lambda_k$ times the identity on the range of the associated spectral projection $P_k$. It follows that $T$ acts on $PE = P_1E \oplus \dots \oplus P_mE$ as the multiplication with the tuple $(\lambda_1, \dots,\lambda_m)$, which readily implies that $\{(T|_{PE})^n \colon n \in \mathbb{N}_0\}$ is relatively compact with respect to the operator norm and that $T^nx$ does not converge to $0$ as $n \to \infty$ for any $x \in PE$. Thus, all assumptions~(a)--(c) of Proposition~\ref{prop:uniqueness-of-proposition} are satisfied, which shows that $\sgInftyON{\mathcal{T}}$ is non-empty and compact and $P = P_\infty$. \end{proof} \subsection{Semigroups which contain a quasi-compact operator} Recall that a bounded operator $T$ on a Banach space $E$ is called \emph{quasi-compact} if there exists a compact operator $K$ on $E$ and $n \in \mathbb{N}$ such that $\norm{T^n - K} < 1$. It is well known that, if the underlying scalar field is complex, a quasi-compact operator $T$ has at most finitely many spectral values on the complex unit circle, and that all those spectral values are poles of the resolvent of $T$ with finite-rank residuum. Hence, the spectral projection associated to the part of the spectrum on the unit circle has finite rank. Quasi-compact operators -- and in particular, of course, compact operators -- appear quite often in concrete applications. This is why the following proposition, in conjunction with Theorem~\ref{thm:JdLG-semigroup-infinity} and Corollary~\ref{cor:characterization-of-sg-convergence}, is very useful. \begin{proposition} \label{prop:quasi-compact} Let $(T_s)_{s \in S}$ be a bounded semigroup on a Banach space $E$ such that, for some $s_0 \in S$, the operator $T_{s_0}$ is quasi-compact. Then the semigroup at infinity associated to $(T_s)_{s \in S}$ is non-empty and compact, and the projection at infinity has finite rank. \end{proposition} \begin{proof} We may assume that the underlying scalar field of $E$ is complex, since otherwise we can consider a complexification of $E$. According to Proposition~\ref{prop:compactness-for-single-operators} the semigroup at infinity associated to $(T_{s_0}^n)_{n \in \mathbb{N}_0}$ is non-empty and compact; let $P$ denote the projection at infinity associated to this semigroup at infinity. Then $P$ commutes with each operator $T_s$, so both $\ker P$ and $PE$ are invariant under the action of the semigroup $(T_s)_{s \in [0,\infty)}$. Moreover, $(T_{s_0}|_{\ker P})^n \to 0$ as $n \to \infty$, so it follows from Proposition~\ref{prop:bounded-convergence} that actually $\lim_{s \in S}T_s|_{\ker P} = 0$. Additionally, it follows from Proposition~\ref{prop:compactness-for-single-operators} and the quasi-compactness of $T_{s_0}$ that $PE$ is finite-dimensional. Since our semigroup is bounded, the set $\{T_s|_{PE}\colon s \in S\}$ is thus relatively compact in $\mathcal{L}(PE)$, so it follows from Proposition~\ref{prop:uniqueness-of-proposition} that the semigroup at infinity associated with $(T_s)_{s \in S}$ is non-empty and compact, and that the projection at infinity, $P_\infty$, satisfies $P_\infty E \subseteq PE$. Hence, $P_\infty$ has finite rank. \end{proof} In the situation of Proposition~\ref{prop:quasi-compact}, the projections at infinity associated with $(T_s)_{s \in S}$ and with $(T_{s_0}^n)_{n \in \mathbb{N}_0}$ coincide if the subsemigroup $\{ns_0\colon n \in \mathbb{N}_0\}$ is cofinal in $S$ (see Corollary~\ref{cor:subsemigroup-non-empty-and-compact}). Without this additional assumption, the projections at infinity do not need to coincide, as the following examples show. \begin{examples} \begin{enumerate}[(a)] \item Consider the semigroup $S = [0,\infty)$ where the semigroup operation is given by the maximum operator $\lor$; then the order in $S$ coincides with the usual order on $[0,\infty)$. Now, let $E = \mathbb{C}^2$, let $Q \in \mathcal{L}(\mathbb{C}^2)$ be the projection onto the first component and define \begin{align*} T_s = \begin{cases} \id_{\mathbb{C}^2} \quad & \text{if } s \in [0,1], \\ Q \quad & \text{if } s \in (1,\infty). \end{cases} \end{align*} Then $(T_s)_{s \in S}$ is a bounded semigroup of $([0,\infty),\lor)$, its semigroup at infinity is non-empty and compact and its projection at infinity equals $Q$. The operator $T_1$ is compact, but the projection at infinity associated to $(T_1^n)_{n \in \mathbb{N}_0}$ is $\id_{\mathbb{C}^2}$. \item Here is also an example where the underlying semigroup is cancellative: Let $S = [0,\infty)^2$, together with the componentwise addition $+$. Let $E = \mathbb{C}^2$, let $Q \in \mathcal{L}(\mathbb{C}^2)$ denote the projection onto the first component and $P \in \mathcal{L}(\mathbb{C}^2)$ the projection onto the second component. We define a representation $(T_{(s,t)})_{(s,t) \in [0,\infty)^2}$ by \begin{align*} T_{(s,t)} = \begin{cases} \id_{\mathbb{C}^2} \quad & \text{if } s = 0 \text{ and } t = 0, \\ Q \quad & \text{if } s > 0 \text{ and } t = 0, \\ P \quad & \text{if } s = 0 \text{ and } t > 0, \\ 0 \quad & \text{if } s > 0 \text{ and } t > 0. \end{cases} \end{align*} Then $(T_{(s,t)})_{(s,t) \in [0,\infty)^2}$ is a bounded semigroup with non-empty and compact semigroup at infinity; its projection at infinity equals $0$. The operator $T_{(0,1)}$ is compact, but the projection at infinity associated with $(T_{(0,1)}^n)_{n \in \mathbb{N}_0}$ equals $P$. \end{enumerate} \end{examples} \subsection{Beyond the quasi-compact case} While the situation of Proposition~\ref{prop:quasi-compact} is most important for applications, it is not completely satisfying from a theoretical point of view. Indeed, for every Banach space $E$ and every commutative monoid $(S,+)$ the semigroup at infinity associated to the trivial semigroup $(\id_E)_{s \in S}$ is non-empty and compact, but $\id_E$ is not quasi-compact unless $E$ is finite-dimensional. In the case of a time-discrete semigroup $(T^n)_{n \in \mathbb{N}_0}$ the non-quasi-compact case is still covered by Proposition~\ref{prop:compactness-for-single-operators} -- where non-quasi-compactness of $T$ means precisely that at least one spectral value on the unit circle has infinite-dimensional eigenspace. It would be satisfying to have a similar result for more general semigroups $(S,+)$ at hand, at least for the semigroup $([0,\infty),+)$. However, the following example shows the things are not that simple. \begin{example} \label{ex:easy-spectrum-not-sufficient-for-semigroups-with-real-times} There exists an $L^2$-space and a bounded positive semigroup $\mathcal{T} = (T_t)_{t \in [0,\infty)}$ on it with the following properties: \begin{enumerate}[\upshape(a)] \item The spectrum of every operator $T_t$ is finite and consists of poles of the resolvent. \item The semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is not compact. \end{enumerate} Indeed, let $U \subseteq \mathbb{T}$ denote the group of all roots of unity and consider the space $\ell^2(U)$. Note that there exists a group homomorphism $\varphi\colon \mathbb{R} \to \mathbb{Q}$ which acts as the identity on $\mathbb{Q}$ (the existence of $\varphi$ follows from the fact the $\mathbb{R}$, seen as a vector space over $\mathbb{Q}$, possesses a basis that contains the number $1$). We define $\mathcal{T}$ by \begin{align*} T_tf(z) = f(\mathrm{e}^{2\pi \mathrm{i} \varphi(t)}z) \end{align*} for $t \in [0,\infty)$, $f \in \ell^2(U)$ and $z \in U$. Obviously, the semigroup obtained this way is bounded and positive. For every time $t$ there exists an integer $n \in \mathbb{N}$ such that $T_t^n = I$ (indeed, one simply has to choose $n$ such that $n\varphi(t)$ is an integer). Hence, every operator $T_t$ is algebraic (i.e., mapped to $0$ by a polynomial), so it follows that property~(a) is satisfied. On the other hand, choose a sequence $(q_n)_{n \in \mathbb{N}}$ of positive rational numbers which converges to $\infty$ and such that $\mathrm{e}^{2\pi \mathrm{i} q_n} \not= \mathrm{e}^{2\pi \mathrm{i} q_m}$ whenever $n \not= m$. By applying the sequence $(T_{q_n})_{n \in \mathbb{N}}$ to any canonical unit vector in $\ell^2(U)$ we can see that no subsequence of this sequence converges (not even strongly) as $n \to \infty$. Hence, it follows from Proposition~\ref{prop:characterisation-of-compact-and-non-empty} that the semigroup at infinity is either empty or not compact. Since $\sgInftyON{\mathcal{T}}$ clearly contains the identity operator, we thus conclude that $\sgInftyON{\mathcal{T}}$ is not compact. \end{example} \begin{remark} \label{rem:from-subsemigroups-to-semigroup-is-not-always-possible} \begin{enumerate}[(a)] \item In the situation of Example~\ref{ex:easy-spectrum-not-sufficient-for-semigroups-with-real-times} the semigroup at infinity associated with the time discrete semigroup $(T_{nt})_{n \in \mathbb{N}}$ is, for any time $t \in (0,\infty)$, non-empty and compact; this follows from Proposition~\ref{prop:compactness-for-single-operators}. On the other hand, the semigroup at infinity associated with the entire semigroup $\mathcal{T}$ is not compact. This shows that the implication in Corollary~\ref{cor:subsemigroup-non-empty-and-compact} does not have a simple converse. \item It is easy to modify Example~\ref{ex:easy-spectrum-not-sufficient-for-semigroups-with-real-times} in such a way that all orbits of the semigroup become relatively compact: just replace $\ell^2(U)$ with $L^2(\mathbb{T})$ in the example and construct the semigroup in the same way. Then, for each $f \in L^2(\mathbb{T})$, the orbit $\{T_tf\colon t \in [0,\infty)\}$ is a subset of the compact set $\{f(\mathrm{e}^{2\pi \theta \mathrm{i}}\mathord{\,\cdot\,})\colon \theta \in [0,1]\}$ and thus, the orbit is relatively compact. However, we can see by considering a sequence $(q_n)_{n \in \mathbb{N}}$ as in Example~\ref{ex:easy-spectrum-not-sufficient-for-semigroups-with-real-times} that the semigroup at infinity is not compact. \end{enumerate} \end{remark} Example~\ref{ex:easy-spectrum-not-sufficient-for-semigroups-with-real-times} shows that, if the semigroup at infinity associated to $(T_{s_0}^n)_{n \in \mathbb{N}_0}$ is non-empty and compact for each $s_0 \in [0,\infty)$, we cannot automatically conclude that the semigroup at infinity associated to $(T_s)_{s \in [0,\infty)}$ is non-empty and compact. If we want this implication to be true we need an additional assumption, and this is the only time in the theoretical part of this paper where we are forced to impose a time regularity condition on our semigroup. In fact, if the semigroup is strongly continuous at a strictly positive time, we obtain the following characterisation. \begin{theorem} \label{thm:from-subsemigroups-to-the-semigroup-via-strong-continuity} Let $E$ be a Banach space and let $(T_s)_{s \in [0, \infty)}$ be a bounded semigroup on $E$ which is strongly continuous at at least one time $s_0 \in (0,\infty)$. The following assertions are equivalent: \begin{enumerate} \item[\upshape (i)] For each $s \in (0,\infty)$ the semigroup at infinity associated with $(T_s^n)_{n \in \mathbb{N}_0}$ is non-empty and compact. \item[\upshape (ii)] The semigroup at infinity associated with $(T_s)_{s \in [0,\infty)}$ is non-empty and compact. \end{enumerate} If the underlying scalar field of $E$ is complex, the above assertions~\textup{(i)} and \textup{(ii)} are also equivalent to: \begin{enumerate} \item[\upshape (iii)] For each $s \in (0,\infty)$ all spectral values of $T_s$ on the complex unit circle are poles of the resolvent of $T_s$. \end{enumerate} \end{theorem} The proof of Theorem~\ref{thm:from-subsemigroups-to-the-semigroup-via-strong-continuity} requires a bit of preparation. Let $\varphi\colon K \to K$ be a continuous map on some compact Hausdorff space $K$. In this case, the pair $(K; \varphi)$ is called a \emph{topological dynamical system}. Further, a point $x \in K$ is called \emph{recurrent} for the system $(K; \varphi)$ if for each neighbourhood $U \subseteq K$ of $x$ there is $n \in \mathbb{N}$ such that $\varphi^n(x) \in U$. It is not hard to see that $x \in K$ is recurrent if and only if $x \in K$ is \emph{infinitely recurrent}, that is for each neighbourhood $U \subseteq K$ of $x$ and each $n_0 \in \mathbb{N}$ there is $n \in \mathbb{N}$ with $n \ge n_0$ such that $\varphi^n(x) \in U$. More facts on recurrence in topological dynamical systems can for instance be found in \cite[Chapter~3.2]{Eisner2015}. We now use these notions to prove the following lemma. \begin{lemma} \label{lem:unit-circle-pointwise} There exists a cofinal net $(n_j)_j$ in $\mathbb{N}$ such that the net $(\lambda^{n_j})_j$ converges to $1$ for each $\lambda \in \mathbb{T}$. \end{lemma} \begin{proof} Endow $G \coloneqq \mathbb{T}^\mathbb{T}$ with the topology of pointwise convergence and with the pointwise multiplication. Then $G$ is a compact topological group. Set $\one \coloneqq (1)_{\lambda \in \mathbb{T}}$ and let $\varphi\colon G \to G$ be given by $\varphi(\mu) = (\lambda \mu_\lambda)_{\lambda \in \mathbb{T}}$ for each $\mu = (\mu_\lambda)_{\lambda \in \mathbb{T}}$. Then $\varphi$ is continuous and the topological dynamical system $(G; \varphi)$ is a so-called \emph{group rotation}. Hence, by \cite[Proposition~3.12(d)]{Eisner2015} every point in $G$ is recurrent with respect to $(G,\varphi)$ and thus, so is the point $\one$. Now, let $\mathcal{U}$ denote the neighbourhood filter of $\one$ in $G$, ordered by converse set inclusion, and endow $\mathcal{U} \times \mathbb{N}$ with the product order, which renders it a directed set. For each pair $(U,k) \in \mathcal{U} \times \mathbb{N}$ we can find a number $n_{(U,k)} \in \mathbb{N}$ such that $n_{(U,k)} \ge k$ and $\varphi^{n_{(U,k)}}(\one) \in U$. Hence, the net $\big( \varphi^{n_{(U,k)}}(\one)\big)_{(U,k) \in \mathcal{U} \times \mathbb{N}}$ converges to $\one$ in $G$, which means that $\big( \lambda^{n_{(U,k)}} \big)_{(U,k) \in \mathcal{U} \times \mathbb{N}}$ converges to $1$ for each $\lambda \in \mathbb{T}$. Moreover, the net $\big(n_{(U,k)}\big)_{(U,k) \in \mathcal{U} \times \mathbb{N}}$ is clearly cofinal in $\mathbb{N}$ by construction. \end{proof} Now we can show that, if the semigroup at infinity of a time-discrete operator semigroup $(T^n)_{n \in \mathbb{N}_0}$ is non-empty and compact, then there exists a subnet $(T^{n_j})_j$ which converges to $P_\infty$, where $(n_j)_j$ can be chosen independently of the operator $T$ (and also independently of the underlying Banach space). \begin{proposition} \label{prop:single-net-for-all-operators} Let $(n_j)_j$ be a cofinal net in $\mathbb{N}$ such that $(\lambda^{n_j})_j$ converges to $1$ for each $\lambda \in \mathbb{T}$ (such a net exists according to Lemma~\ref{lem:unit-circle-pointwise}). If $(T^n)_{n \in \mathbb{N}_0}$ is a bounded semigroup on a Banach space $E$ whose semigroup at infinity is non-empty and compact, then $(T^{n_j})_j$ converges to $P_\infty$. \end{proposition} \begin{proof} We may assume throughout the proof that the scalar field is complex, since otherwise we may replace $E$ with a complexification. We know from Proposition~\ref{prop:compactness-for-single-operators} that $P_\infty$ is the spectral projection of $T$ associated with $\sigma(T) \cap \mathbb{T}$. Since the net $(n_j)_j$ is cofinal in $\mathbb{N}$, Theorem~\ref{thm:JdLG-semigroup-infinity}(c) yields $(T|_{\ker P_\infty})^{n_j} \to 0$. Moreover, $E_\infty$ can be decomposed as \begin{align*} E_\infty = P_1 E \oplus \dots \oplus P_m E, \end{align*} where $m \in \mathbb{N}_0$, $\sigma(T) \cap \mathbb{T} = \{\lambda_1,\dots,\lambda_m\}$ and $P_1,\dots,P_m$ are the spectral projections associated with the single spectral values $\lambda_1,\dots, \lambda_m$. The operator $T$ acts on the space $E_\infty$ as the multiplication with the tuple $(\lambda_1,\dots,\lambda_m)$, so it follows readily that $(T|_{E_\infty})^{n_j} \to \id_{E_\infty}$. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:from-subsemigroups-to-the-semigroup-via-strong-continuity}] We may assume throughout the proof that $E$ is a complex Banach space since we can otherwise replace $E$ with a complexification. (i) $\Leftrightarrow$ (iii): This equivalence follows from Proposition~\ref{prop:compactness-for-single-operators}. (ii) $\Rightarrow$ (i): This implication follows from Corollary~\ref{cor:subsemigroup-non-empty-and-compact}. (i) $\Rightarrow$ (ii): For each $s \in (0, \infty)$ denote by $P_{\infty,s}$ the projection at infinity that belongs to the semigroup $(T_{ns})_{n \in \mathbb{N}_0}$; then $P_{\infty,s}$ is also the spectral projection of $T_s$ that belongs to the intersection of the spectrum with the unit circle. Let $(n_j) \subseteq \mathbb{N}$ be a cofinal net with the property asserted in Lemma~\ref{lem:unit-circle-pointwise}. According to Proposition~\ref{prop:single-net-for-all-operators} we have $T_{n_js} = T_s^{n_j} \to P_{\infty, s}$ for each $s \in (0,\infty)$, which implies that the operator family $(P_{\infty, s})_{s \in (0,\infty)}$ satisfies the semigroup law. This in turn implies that all the projections $P_{\infty,s}$ coincide (see \cite[Lemma~2.2]{GerlachLB}); from now on, we set $P \coloneqq P_{s,\infty}$ for all $s \in (0,\infty)$. Since all operators $T_s$ commute with $P$, our the semigroup $(T_s)_{s \in [0,\infty)}$ leaves both $\ker P$ and $PE$ invariant. It remains to prove that $P$ satisfies the conditions~(a) and~(b) of Proposition~\ref{prop:uniqueness-of-proposition}: (a) It is an immediate consequence of Proposition~\ref{prop:bounded-convergence} that $(T_s|_{\ker P})_{s \in [0,\infty)}$ converges to $0$ as, for instance, the powers of $T_1|_{\ker P} = T_1|_{\ker P_{\infty,1}}$ converge to $0$. (b) It follows from Theorem~\ref{thm:JdLG-semigroup-infinity}(b) that, for each $s \in (0,\infty)$, the operator $T_s|_{PE}$ is invertible on $PE$. Hence, the semigroup $(T_s|_{PE})_{s \in [0, \infty)}$ extends to a group on $PE$. Since the semigroup is strongly continuous at at least one time, it thus follows that it is strongly continuous at all times $s \in [0,\infty)$. Let $A$ denote the generator of the $C_0$-semigroup $(T_s|_{PE})_{s \in [0, \infty)}$. Let us show that the operator $A$ has at most finitely many eigenvalues on the imaginary axis. So assume to the contrary that the set $\mathrm{i} B \coloneqq \sigma_{\operatorname{pnt}}(A) \cap \mathrm{i} \mathbb{R}$ is infinite. Choose two time $s,t \in (0,\infty)$ such that $s/t$ is irrational. Since $\mathrm{e}^{\mathrm{i} tB}$ consists of unimodular eigenvalues of $T_t|_{PE}$, it follows that this set is finite. Hence, there exists an infinite subset $\mathrm{i} C$ of $\mathrm{i} B$ whose values are all mapped to the same number by the mapping $\exp(\mathord{\,\cdot\,} t)$. Thus, $t(c_1-c_2) \in 2\pi \mathbb{Z}$ for all $c_1,c_2 \in C$. Consequently, $s(c_1-c_2) = \frac{s}{t}t(c_1-c_2) \not\in 2\pi \mathbb{Z}$ for any two distinct $c_1,c_2 \in C$, which conversely implies that all the values $\mathrm{e}^{\mathrm{i} sc}$ are distinct for $c \in C$. However, each such number is an eigenvalue of $T_s|_{PE}$; this is a contradiction since $T_s|_{PE}$ has only finitely many eigenvalues. Let $\mathrm{i} \beta_1, \dots, \mathrm{i} \beta_n$ denote the eigenvalues of $A$ on the imaginary axis (at least one such eigenvalue exists unless $PE = \{0\}$) and denote their corresponding eigenspaces by $E_1,\dots,E_n$. We note that $PE = E_1 \oplus \dots \oplus E_n$. To see this, choose a sufficiently small number $s_0 \in (0,\infty)$ such that all the numbers $\mathrm{e}^{\mathrm{i} s_0\beta_1},\dots, \mathrm{e}^{\mathrm{i} s_0\beta_n}$ are distinct. Then, for each $k\in \{1,\dots,n\}$, the space $E_k$ is the eigenspace of $T_{s_0}$ for the eigenvalue $\mathrm{e}^{\mathrm{i} s_0\beta_k}$ \cite[Corollary~IV.3.8(ii)]{Engel2000}. Consequently, $E_k$ is even the spectral space of $T_{s_0}$ for the spectral value $\mathrm{e}^{\mathrm{i} s_0\beta_k}$ since the latter number is a first order pole of the resolvent of $T_{s_0}$ (as $T_{s_0}$ is power-bounded). Moreover, $P$ is the spectral projection of $T_{s_0}$ corresponding to the part $\sigma(T_{s_0}) \cap \mathbb{T} = \{\mathrm{e}^{\mathrm{i} s_0\beta_1},\dots,\mathrm{e}^{\mathrm{i} s_0\beta_n}\}$ of the spectrum, so indeed \begin{align*} PE = \ker(\mathrm{e}^{\mathrm{i} s_0\beta_k} - T_{s_0}) \oplus \dots \oplus \ker(\mathrm{e}^{\mathrm{i} s_0\beta_k} - T_{s_0}) = E_1 \oplus \dots \oplus E_n. \end{align*} As the semigroup $(T_s|_{PE})_{s \in [0,\infty)}$ acts on $E_k$ as the multiplication with $(\mathrm{e}^{\mathrm{i} s \beta_k})_{s \in [0,\infty)}$, it follows that $\{T_s|_{PE} \colon s \in [0, \infty) \}$ is relatively compact in $\mathcal{L}(PE)$. \end{proof} \section{Triviality of compact operator groups} \label{section:triviality-of-compact-operator-groups} Loosely speaking, the major theoretical consequence of Corollary~\ref{cor:characterization-of-sg-convergence} is that, if one would like to find sufficient criteria for an operator semigroup to converge with respect to the operator norm, then one should seek for criteria which ensure that a compact operator group is trivial. This is the purpose of the present section. \subsection{Divisible groups and a spectral condition} \label{subsection:divisible-groups-and-a-spectral-condition} Recall that a compact topological group $G$ is called \textit{divisible} if for each $g \in G$ and each $n \in \mathbb{N}$, there exists $h \in G$ such that $h^n = g$. We start with a theorem on the triviality of divisible compact groups of linear operators. The corollaries of this theorem that are listed at the end of this subsection will be powerful tools in Section~\ref{section:operator-norm-convergence-of-semigroups} when we finally prove various concrete convergence theorems for operator semigroups. It is worthwhile to note that a compact topological group $G$ is divisible if and only if $G$ is connected (see \cite[Corollary~2]{Mycielski1958} or, for the special case where $G$ is commutative, \cite[assertions~(a) and~(b) on p.\,55]{Kaplansky1954}). \begin{theorem} \label{thm:trivial-group-by-rational-spectrum} Let $E$ be a complex Banach space and let $\mathcal{G} \subseteq \mathcal{L}(E)$ be a divisible and compact subgroup of the invertible linear operators on $E$. If, for each $T \in \mathcal{G}$, all spectral values of $T$ are roots of unity, then $\mathcal{G} = \{\id_E\}$. \end{theorem} For the proof of Theorem~\ref{thm:trivial-group-by-rational-spectrum} we need a bit of Banach algebra theory, specifically the following lemma. For the convenience of the reader, we include its simple proof. \begin{lemma} \label{lemma:maximal-commutative-subalgebra} Let $\mathcal{A}$ be a complex Banach algebra with multiplicatively neutral element $1$ and let $\mathcal{B} \subseteq \mathcal{A}$ be a commutative subalgebra which is maximal among all commutative subalgebras of $\mathcal{A}$. Then the following assertions hold: \begin{enumerate}[\upshape (i)] \item $\mathcal{B}$ is closed and contains $1$. \item For each $b \in \mathcal{B}$ its spectrum in $\mathcal{A}$ coincides with its spectrum in $\mathcal{B}$. \end{enumerate} \end{lemma} \begin{proof} (i) This follows immediately from the maximality of $\mathcal{B}$. (ii) Fix $b \in B$. Clearly, the spectrum of $b$ in $\mathcal{A}$ is contained in the spectrum of $b$ in $\mathcal{B}$. To show the converse inclusion, let $\lambda$ be in the resolvent set of $b$ with respect to $\mathcal{A}$. Observe that the inverse $(\lambda - b)^{-1}$ commutes with all elements in $\mathcal{B}$. Therefore, the linear span of the set \begin{align*} \{(\lambda - b)^{-n} \colon n \in \mathbb{N}_0 \} \cdot \mathcal{B} \end{align*} is a commutative subalgebra of $\mathcal{A}$ that contains $\mathcal{B}$ and thus coincides with $\mathcal{B}$. Hence, $(\lambda - b)^{-1} \in \mathcal{B}$, i.e., $\lambda$ is contained in the resolvent set of $b$ in $\mathcal{B}$. \end{proof} Note that if $\mathcal{G} \subseteq \mathcal{L}(E)$ is a compact subgroup of the invertible linear operators on a complex Banach space $E$, then $\sup_{n \in \mathbb{Z}} \norm{T^n} < \infty$ for all $T \in \mathcal{G}$, i.e., each operator in $\mathcal{G}$ is \emph{doubly power-bounded}. After these preparations, Theorem~\ref{thm:trivial-group-by-rational-spectrum} can be proved. \begin{proof}[Proof of Theorem~\ref{thm:trivial-group-by-rational-spectrum}] According to \cite[Corollary~1 and Corollary~2]{Mycielski1958} every element of a divisible compact group is contained in a divisible commutative (and closed) subgroup, so it suffices to prove the assertion for commutative $\mathcal{G}$. Let $\mathcal{B}$ be a subalgebra of $\mathcal{L}(E)$ which is maximal among all commutative subalgebras of $\mathcal{L}(E)$ that contain $\mathcal{G}$ (such a $\mathcal{B}$ exists by Zorn's lemma). Then $\mathcal{B}$ is also maximal among all commutative subalgebras of $\mathcal{L}(E)$, so according to Lemma~\ref{lemma:maximal-commutative-subalgebra}, $\mathcal{B}$ is closed and contains $\id_E$; moreover, for each $T \in \mathcal{B}$ the spectrum $\sigma(T)$ of $T$ in $\mathcal{L}(E)$ and its spectrum in $\mathcal{B}$ coincide. Hence, if $\Omega(\mathcal{B})$ denotes the character space of the Banach algebra $\mathcal{B}$, then we have \begin{align*} \sigma(T) = \{\varphi(T)\colon \varphi \in \Omega(\mathcal{B})\}; \end{align*} see e.g.~\cite[Theorem~1.3.4(1)]{Murphy1990}. Since each $\varphi \in \Omega(\mathcal{B})$ is a continuous group homomorphism, it follows that $\varphi(\mathcal{G})$ is a divisible and compact subgroup of $\mathbb{T}$ for each character $\varphi$. On the other hand, it follows from our spectral assumption that $\varphi(\mathcal{G})$ consists of roots of unity only; consequently, $\varphi(\mathcal{G}) = \{1\}$. We conclude that $\sigma(T) = \{1\}$ for each $T \in \mathcal{G}$, so each such $T$ equals $\id_E$ by Gelfand's $T = \id$ theorem since $T$ is doubly power-bounded (see e.g.\ \cite[Theorem~B.17]{Engel2000}). \end{proof} The Banach algebra argument used in the previous proof is a common technique in the spectral analysis of operator semigroups; related arguments can, for instance, be found in \cite[Chapter~XVI]{Hille1957} and \cite[Section~4.7]{Blake1999}. The condition that all spectral values of any $T \in \mathcal{G}$ are roots of unity is automatically satisfied in two important situations. The first one is that the underlying space is a Banach lattice and all operators in $\mathcal{G}$ are positive; this is the content of the following corollary. \begin{corollary} \label{cor:trivial-group-positive} Let $E$ be a Banach lattice and let $\mathcal{G} \subseteq \mathcal{L}(E)$ be a divisible and compact subgroup of the invertible bounded linear operators on $E$ such that each operator in $\mathcal{G}$ is positive. Then $\mathcal{G} = \{\id_E\}$. \end{corollary} \begin{proof} One may assume that the scalar field is complex. According to Theorem~\ref{thm:trivial-group-by-rational-spectrum} it suffices to show that the spectrum of each $T \in \mathcal{G}$ consists of roots of unity only, so fix $T \in \mathcal{G}$. Clearly, $\sigma(T) \subseteq \mathbb{T}$, so it follows from Lemma~\ref{lemma:compactness-discrete-case} and Proposition~\ref{prop:compactness-for-single-operators} that $\sigma(T)$ is finite and consists of poles of the resolvent. It follows from infinite-dimensional Perron--Frobenius theory that the spectrum of $T$ is \emph{cyclic}, meaning that $\lambda^n \in \sigma(T)$ for all $n \in \mathbb{Z}$ whenever $\lambda \in \sigma(T)$ (see \cite[Theorem~4.7]{Lotz1968} or \cite[Theorem~V.4.9]{Schaefer1974}). By the finiteness of the spectrum, this implies that $\sigma(T)$ consists of roots of unity only. \end{proof} Before we continue with a second situation where Theorem~\ref{thm:trivial-group-by-rational-spectrum} can be applied, let us briefly discuss another possibility to derive the corollary above. The following theorem also contains Corollary~\ref{cor:trivial-group-positive} as a special case since every finite divisible group consists of one element only. The theorem and its proof were communicated to us by Rainer Nagel, according to whom the result goes originally back to Heinrich P.\ Lotz. We could not find a concrete reference for it in the literature, though. \begin{theorem} Let $E$ be a (real or complex) Banach lattice and let $\mathcal{G} \subseteq \mathcal{L}(E)$ be a compact subgroup of the invertible bounded linear operators on $E$ such that each operator in $\mathcal{G}$ is positive. Then $\mathcal{G}$ is finite. \end{theorem} \begin{proof} We may assume that the underlying scalar field is complex. Since $\mathcal{G}$ is compact, it suffices to show that every element in $\mathcal{G}$ is isolated, and to this end it suffices to prove that $I$ is isolated. Now, fix $T \in \mathcal{G} \setminus \{I\}$. Since $T$ is doubly power bounded, the spectrum of $T$ is a subset of the complex unit circle. Moreover, the spectrum cannot consist of the number $1$ only, since this would imply $T = \id$ by Gelfand's $T = \id$ theorem \cite[Theorem~B.17]{Engel2000}. Since the peripheral spectrum (which is the spectrum) of $T$ is cyclic (see \cite[Theorem~4.7]{Lotz1968} or \cite[Theorem~V.4.9]{Schaefer1974}), there exists a spectral value $\lambda$ of $T$ with negative real part. In particular, the spectral value $\lambda - 1$ of the operator $T-\id$ has modulus at least $\sqrt{2}$, so \begin{align*} \norm{T - \id} \ge r(T-\id) \ge \modulus{\lambda - 1} \ge \sqrt{2}. \end{align*} This shows that every operator in $\mathcal{G} \setminus \{\id\}$ has distance at least $\sqrt{2}$ from $\id$, so $\id$ is indeed isolated in $\mathcal{G}$. \end{proof} After this brief intermezzo, let us continue to discuss consequences of Theorem~\ref{thm:trivial-group-by-rational-spectrum}. Our next corollary deals with the case of contractive operators on so-called \emph{projectively non-Hilbert spaces}. This notion is taken from \cite[Definition~3.1]{Glueck2016b}; a real Banach space $E$ is called \emph{projectively non-Hilbert}, if for no rank-$2$ projection $P \in \mathcal{L}(E)$, the range $PE$ is isometrically a Hilbert space. Every real-valued $L^p$-space over an arbitrary measure space is projectively non-Hilbert if $p \in [1,\infty] \setminus \{2\}$, see \cite[Example~3.2]{Glueck2016b} and the discussion after \cite[Example~3.5]{Glueck2016b}. Moreover, every real Banach lattice that is a so-called \emph{AM}-space is projectively non-Hilbert \cite[Example~1.2.7]{GlueckDISS}; this includes the space of real-valued bounded and continuous functions on any topological space. \begin{corollary} \label{cor:trivial-group-contractive} Let $E$ be a real Banach space that is projectively non-Hilbert and let $\mathcal{G} \subseteq \mathcal{L}(E)$ be a divisible and compact subgroup of the invertible bounded linear operators on $E$ such that each operator in $\mathcal{G}$ is contractive. Then $\mathcal{G} = \{\id_E\}$. \end{corollary} \begin{proof} Let $E_\mathbb{C}$ denote a Banach space complexification of $E$; for each $T \in \mathcal{G}$ we denote the canonical extension of $T$ to $E_\mathbb{C}$ by $T_\mathbb{C}$. Then $\mathcal{G}_\mathbb{C} \coloneqq \{T_\mathbb{C}\colon T \in \mathcal{G}\}$ is a divisible and compact subgroup of the invertible bounded linear operators on $E_\mathbb{C}$. Now fix $T \in \mathcal{G}$; it suffices to prove that the spectrum of $T_\mathbb{C}$ consists of roots of unity only. By Proposition~\ref{prop:characterisation-of-compact-and-non-empty} the semigroup at infinity associated to $(T_\mathbb{C}^n)_{n \in \mathbb{N}_0}$ is non-empty and compact, so it follows from Proposition~\ref{prop:compactness-for-single-operators} that $\sigma(T_\mathbb{C})$ is a finite subset of the complex unit circle and consists of eigenvalues of $T_\mathbb{C}$. Moreover, the set $\{T_\mathbb{C}^n\colon n \in \mathbb{N}_0\}$ is relatively compact with respect to the weak operator topology, i.e., $T_\mathbb{C}$ is \emph{weakly almost periodic}. Since $E$ is projectively non-Hilbert, we can now apply \cite[Theorem~3.11]{Glueck2016b} to conclude that the spectrum of $T_\mathbb{C}$ consists of roots of unity only. \end{proof} \subsection{Strong positivity of groups} \label{subsection:strong-positivity-of-groups} Another way to ensure that a group of linear operators is trivial is to ensure a certain condition of \emph{strong positivity}; this works in the very general setting of ordered Banach spaces. By an \emph{ordered Banach space} we mean a tuple $(E,E_+)$ where $E$ is a real Banach space and $E_+$ is a closed subset of $E$ such that $\alpha E_+ + \beta E_+ \subseteq E_+$ for all $\alpha,\beta \in [0,\infty)$ and such that $E_+ \cap (-E_+) = \{0\}$; the set $E_+$ is called the \emph{positive cone} in $E_+$. Let $(E,E_+)$ be an ordered Banach space. An operator $T \in \mathcal{L}(E)$ is called \emph{positive} if $TE_+ \subseteq E_+$; a semigroup on $E$ is said to be \emph{positive} if every operator in it is positive. A functional $\varphi \in E'$ is called \emph{positive} if $\langle \varphi, f \rangle \ge 0$ for all $f \in E_+$. A vector $f \in E_+$ is said to be an \emph{almost interior point} of $E_+$ if $\langle \varphi, f \rangle > 0$ for each non-zero positive functional $\varphi \in E'$. If, for instance, $E$ is an $L^p$-space over a $\sigma$-finite measure space and $p \in [1,\infty)$, then a function $f \in E_+$ is an almost interior point if and only if $f(\omega) > 0$ for almost all $\omega \in \Omega$. For more information about almost interior points we refer to \cite[Section~2]{GlueckAlmostInterior}. The following result is inspired by the proof of \cite[Theorem~4.1]{GlueckAlmostInterior}. \begin{theorem} \label{thm:trivial-group-strictly-positive} Let $(E,E_+)$ be an ordered Banach space with $E_+ \not= \{0\}$ and let $\mathcal{G} \subseteq \mathcal{L}(E)$ be a norm-bounded subgroup of the invertible operators on $E$. Assume that every operator in $\mathcal{G}$ is positive and that, for each $f \in E_+ \setminus \{0\}$, there exists $T \in \mathcal{G}$ such that $Tf$ is an almost interior point of $E_+$. Then $E$ is one-dimensional and $\mathcal{G} = \{\id_E\}$. \end{theorem} \begin{proof} We first show that every point in $E_+ \setminus \{0\}$ is an almost interior point of $E_+$. So let $f \in E_+ \setminus \{0\}$. Choose $T \in \mathcal{G}$ such that $Tf$ is an almost interior point of $E_+$. Since $T^{-1}$ is an element of $\mathcal{G}$, it is a positive operator on $E$, and since $T^{-1}$ is surjective it thus follows from \cite[Corollary~2.22(a)]{GlueckAlmostInterior} that $T^{-1}$ maps almost interior points to almost interior points. Hence, $f = T^{-1}Tf$ is an almost interior point. Since all vectors in $E_+ \setminus \{0\}$ are almost interior points, it follows from \cite[Theorem~2.10]{GlueckAlmostInterior} that $E$ is one-dimensional. Thus, $\mathcal{G}$ can be identified with a bounded subgroup of the multiplicative group $(0,\infty)$, so $\mathcal{G}$ does indeed consist of one element only. \end{proof} \section{Operator norm convergence of semigroups} \label{section:operator-norm-convergence-of-semigroups} In this section we finally derive convergence theorems for various classes of operator semigroups. In Subsection~\ref{subsection:convergence-under-divisibility-conditions} representations whose underlying semigroup $(S,+)$ satisfies a certain kind of divisibility condition are considered. In Subsection~\ref{subsection:convergence-under-a-strict-positivity-condition} we then deal with positive semigroups on ordered Banach spaces under an appropriate strong positivity assumption. \subsection{Convergence under divisibility conditions} \label{subsection:convergence-under-divisibility-conditions} We call the semigroup $(S,+)$ \emph{essentially divisible} if, for each $s \in S$ and each integer $n \in \mathbb{N}$, there exist elements $t_1,t_2 \in S$ such that $nt_1 = s + nt_2$. This definition is taken from \cite{Glueck2019}, where it was used as a generalisation of semigroups that generate divisible groups (which played an important role in \cite{GerlachConvPOS}). Let us illustrate the notion of essential divisibility with a list of simple examples. \begin{examples} \begin{enumerate}[\upshape (a)] \item The semigroup $([0, \infty),+)$ is essentially divisible, and so is $(\mathbb{Q} \cap [0,\infty), +)$. \item More generally, for each $a \ge 0$, both the semigroup $(\{0\} \cup [a,\infty),+)$ and the semigroup $\big(\{0\} \cup (\mathbb{Q} \cap [a,\infty)), +\big)$ are essentially divisible. \item The semigroup $([0,\infty)^n,+)$ is essentially divisible for each $n \in \mathbb{N}$. \item The semigroup $([0,\infty), \max)$ is essentially divisible; here, $\max$ denotes the binary operator which assigns the maximum to any two given elements of $[0,\infty)$. \item More generally, if $L$ is a lattice with a smallest element $i$, then $(L,\lor)$ is an essentially divisible semigroup (with neutral element $i$). \item The semigroup $(\mathbb{N}_0,+)$ is not essentially divisible. \item The semigroup $(D,+)$, where $D = \{k/2^n\colon k,n \in \mathbb{N}_0\}$ is the set of dyadic numbers in $[0,\infty)$, is not essentially divisible. \end{enumerate} \end{examples} Now we use the notion of essential divisibility to prove a convergence theorem for positive semigroups on Banach lattices and a convergence theorem for contractive semigroups on projectively non-Hilbert spaces. Let us begin with the positive case. \begin{theorem} \label{thm:bounded-convergence-banach-lattice} Let $E$ be a Banach lattice and let $(T_s)_{s \in S}$ be a positive and bounded semigroup on $E$. If the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact and if $(S,+)$ is essentially divisible, then $(T_s)_{s \in S}$ converges with respect to the operator norm to the projection at infinity. \end{theorem} \begin{proof} Note that the range $E_\infty$ of the projection at infinity, $P_\infty$, is again a Banach lattice since $P_\infty$ is positive \cite[Proposition~II.11.5]{Schaefer1974}. According to Theorem~\ref{thm:JdLG-semigroup-infinity}, \begin{align*} \sgInftyON{\mathcal{T}}|_{E_\infty} = \overline{\{T_s \colon s \in S\}|_{E_\infty}}^{\mathcal{L}(E_\infty)} \end{align*} is a compact subgroup of the invertible operators on $E_\infty$. As $(S, +)$ is essentially divisible, a simple compactness argument thus shows that $\sgInftyON{\mathcal{T}}|_{E_\infty}$ is divisible. Since this group consists of positive operators, it is therefore trivial by Corollary~\ref{cor:trivial-group-positive}. Since the groups $\sgInftyON{\mathcal{T}}|_{E_\infty}$ and $\sgInftyON{\mathcal{T}}$ are isomorphic by Theorem~\ref{thm:JdLG-semigroup-infinity}, the semigroup at infinity, $\sgInftyON{\mathcal{T}}|_{E_\infty}$, is also trivial. Thus, Corollary~\ref{cor:characterization-of-sg-convergence} yields the claim. \end{proof} The following corollary is due to Lotz in the special case where $S = [0,\infty)$. \begin{corollary} \label{cor:quasi-compact-convergence-banach-lattice} Let $E$ be a Banach lattice and let $(T_s)_{s \in S}$ be a positive and bounded semigroup on $E$. If $T_{s_0}$ is quasi-compact for at least one $s_0 \in S$ and if $(S,+)$ is essentially divisible, then $(T_s)_{s \in S}$ converges with respect to the operator norm to a finite rank projection. \end{corollary} \begin{proof} This is an immediate consequence of Corollary~\ref{prop:quasi-compact} and Theorem~\ref{thm:bounded-convergence-banach-lattice}. \end{proof} Our second corollary -- which only deals with the semigroup $([0,\infty),+)$ -- has the nice theoretical feature that it covers, in contrast to Corollary~\ref{cor:quasi-compact-convergence-banach-lattice}, also the trivial operator semigroup that consists merely of the operator $\id_E$ -- which is arguably the most simple convergent operator semigroup. \begin{corollary} \label{cor:spectral-condition-banach-lattice} Let $E$ be a complex Banach lattice and let $(T_s)_{s \in [0,\infty)}$ be a positive and bounded semigroup on $E$ which is strongly continuous at at least one time $s_0 \in (0,\infty)$. If, for each $s \in (0,\infty)$, all spectral values of $T_s$ on the unit circle are poles of the resolvent, then $T_s$ converges with respect to the operator norm as $s \to \infty$. \end{corollary} \begin{proof} This is an immediate consequence of Theorems~\ref{thm:from-subsemigroups-to-the-semigroup-via-strong-continuity} and~\ref{thm:bounded-convergence-banach-lattice}. \end{proof} Now we deal with real Banach spaces which are projectively non-Hilbert; see the discussion before Corollary~\ref{cor:trivial-group-contractive} for a definition of this property. \begin{theorem} \label{thm:convergence-projectively-non-Hilbert-case} Let $E$ be a real Banach space that is projectively non-Hilbert and let $(T_s)_{s \in S}$ be a contractive semigroup on $E$. If the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact and if $(S,+)$ is essentially divisible, then $(T_s)_{s \in S}$ converges with respect to the operator norm to the projection at infinity. \end{theorem} \begin{proof} Note that the semigroup at infinity, $P_\infty$, is contractive, and hence its range is itself a projectively non-Hilbert space. It follows as in Theorem~\ref{thm:bounded-convergence-banach-lattice} that the compact group $\sgInftyON{\mathcal{T}}|_{E_\infty}$ is divisible; since it consists of contractive operators only, Corollary~\ref{cor:trivial-group-contractive} shows that this group is actually trivial. Thus, the semigroup at infinity -- which is isomorphic to $\sgInftyON{\mathcal{T}}|_{E_\infty}$ -- is trivial, too. So the conclusion follows from Corollary~\ref{cor:characterization-of-sg-convergence}. \end{proof} Again, we state the same result separately for the quasi-compact case. \begin{corollary} \label{cor:quasi-compact-convergence-projectively-non-Hilbert} Let $E$ be a real Banach space that is projectively non-Hilbert and let $(T_s)_{s \in S}$ be a contractive semigroup on $E$. If $T_{s_0}$ is quasi-compact for at least one $s_0 \in S$ and if $(S,+)$ is essentially divisible, then $(T_s)_{s \in S}$ converges with respect to the operator norm to a finite rank projection. \end{corollary} \begin{proof} This is an immediate consequence of Corollary~\ref{prop:quasi-compact} and Theorem~\ref{thm:convergence-projectively-non-Hilbert-case}. \end{proof} A similar result as in Corollary~\ref{cor:spectral-condition-banach-lattice} is, of course, also true for contractive semigroups on projectively non-Hilbert spaces; we refrain from stating this explicitly as a corollary. Finally, Theorem~\ref{thm:introduction} from the introduction follows readily from Corollary~\ref{cor:quasi-compact-convergence-projectively-non-Hilbert}: \begin{proof}[Proof of Theorem~\ref{thm:introduction}] (i) $\Rightarrow$ (ii): This implication is obvious. (ii) $\Rightarrow$ (i): For both possible choices of $E$, this space is projectively non-Hilbert. Since the semigroup $([0,\infty),+)$ is essentially divisible, the assertion follows from Corollary~\ref{cor:quasi-compact-convergence-projectively-non-Hilbert}. \end{proof} \begin{remark} \label{rem:continuous-vs-discrete-time} All results in this subsection fail as we drop the assumption that the semigroup $(S,+)$ is essentially divisible. For instance, the semigroup $(\mathbb{N}_0,+)$ is not essentially divisible, and indeed the $n$-th powers of the matrix \begin{align*} \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix} \end{align*} do not converge as $n \to \infty$ -- despite the fact that the matrix is positive and contractive with respect to the $p$-norm for each $p$. A closely related phenomenon is discussed in \cite[Example~3.7]{GerlachConvPOS}. \end{remark} \subsection{Convergence under a strong positivity condition} \label{subsection:convergence-under-a-strict-positivity-condition} The following theorem is generalisation of \cite[Theorem~5.3]{GlueckAlmostInterior} where only the cases $S = \mathbb{N}_0$ and $S = [0,\infty)$ where considered. \begin{theorem} \label{thm:convergence-of-a-strictly-positive-semigroup} Let $(E,E_+)$ be an ordered Banach space with $E_+ \not= \{0\}$ and let $(T_s)_{s \in S}$ be a bounded and positive semigroup on $E$. Moreover, assume that $T_{s_0}$ is quasi-compact for at least one $s_0 \in S$ and that the following strong positivity condition holds: for each $f \in E_+ \setminus \{0\}$ there exists $s \in S$ such that $T_sf$ is an almost interior point of $E_+$. Then $(T_s)_{s \in S}$ converges with respect to the operator norm to a projection in $\mathcal{L}(E)$ of rank at most $1$. \end{theorem} \begin{proof} According to Proposition~\ref{prop:quasi-compact} the semigroup at infinity, $\sgInftyON{\mathcal{T}}$, is non-empty and compact since $(T_s)_{s \in S}$ is bounded and since $T_{s_0}$ is quasi-compact. Let $P_\infty$ denote the corresponding projection at infinity. Then $P_\infty$ is a positive operator and hence, its range $E_\infty$ is also an ordered Banach space with positive cone $P_\infty E_+ = E_+ \cap E_\infty$. If $P_\infty = 0$, Theorem~\ref{thm:JdLG-semigroup-infinity}(c) implies that the semigroup converges to $0$; so assume now that $P_\infty \not= 0$. It follows from the assumptions that there exists at least one almost interior point in $E_+$, which implies that the set $E_+ - E_+$ is dense in $E$ (see e.g.\ \cite[Proposition~2.9]{GlueckAlmostInterior}). In particular, the positive cone $P_\infty E_+$ of the space $E_\infty$ is non-zero since $P_\infty \not= 0$. By Theorem~\ref{thm:JdLG-semigroup-infinity}(b), $\sgInftyON{\mathcal{T}}|_{E_\infty}$ is a compact subgroup of the invertible operators on $E_\infty$, and for each $s \in S$ the restriction $T_s|_{E_\infty}$ is contained in $\sgInftyON{\mathcal{T}}|_{E_\infty}$. Moreover, $\sgInftyON{\mathcal{T}}|_{E_\infty}$ clearly consists of positive operators. We now show that this group satisfies the assumptions of Theorem~\ref{thm:trivial-group-strictly-positive}. To this end, let $0 \not= f \in P_\infty E_+$. By assumption there exists an $s \in S$ such that $T_sf$ is an almost interior point of $E_+$. Since $T_sf \in P_\infty E_+$, it follows from \cite[Corollary~2.22(b)]{GlueckAlmostInterior} that the vector $T_sf$ is also an almost interior point of the positive cone $P_\infty E_+$ of $E_\infty$. Hence, the operator $T_s|_{P_\infty E} \in \sgInftyON{\mathcal{T}}|_{E_\infty}$ maps $f$ to an almost interior point of the positive cone of $E_\infty$, so we can employ Theorem~\ref{thm:trivial-group-strictly-positive} to conclude that $E_\infty$ is one-dimensional and that $\sgInftyON{\mathcal{T}}|_{E_\infty} = \{\id_{E_\infty}\}$. Corollary~\ref{cor:characterization-of-sg-convergence} thus shows that $(T_s)_{s \in S}$ converges to the rank-$1$ projection $P_\infty$. \end{proof} \section{Application: coupled parabolic equations on $\mathbb{R}^d$} \label{section:application-coupled-parabolic-equations} In this section we use Theorem~\ref{thm:introduction} to analyse the asymptotic behaviour of coupled parabolic equations with possibly unbounded coefficients on the space $\mathbb{R}^d$. Of course, the unboundedness of the coefficients forces us to impose other conditions on the equation in order to obtain well-posedness. Throughout the section we mainly rely on the results of \cite{Delmonte2011}, and as in this paper, we work on the space of bounded continuous functions over $\mathbb{R}^d$. \subsection{Setting} Here is our precise setting. Fix an integer $N \geq 1$ (which will denote the number of coupled equations) as well as functions $A \colon \mathbb{R}^d \to \mathbb{R}^{d \times d}$, $b\colon \mathbb{R}^d \to \mathbb{R}^d$ and $V\colon \mathbb{R}^d \to \mathbb{R}^{N \times N}$ and assume that the following conditions are satisfied: \begin{enumerate}[\upshape (a)] \item For all $x \in \mathbb{R}^d$ the matrix $A(x)$ is symmetric and there exists a continuous function $\nu\colon \mathbb{R}^d \to (0,\infty)$ such that the ellipticity condition \begin{align*} \xi^T A(x) \xi \ge \nu(x) \norm{\xi}_2 \end{align*} holds for all $x \in \mathbb{R}^d$ and all $\xi \in \mathbb{R}^d$. \item There exists $\alpha \in (0,1)$ such that the functions $A$, $b$ and $V$ are locally $\alpha$-H\"older continuous on $\mathbb{R}^d$. \item The function $V$ is bounded. \item There exists a twice continuously differentiable function $\varphi\colon \mathbb{R}^d \to (0,\infty)$ such that $\varphi(x) \to \infty$ as $\norm{x}_2 \to \infty$ and a number $\lambda_0 > 0$ such that the estimate \begin{align*} \lambda_0 \varphi - \sum_{i,j=1}^d A_{ij} \partial_{ij} \varphi - \sum_{j=1}^d b_j \partial_j \varphi \ge 0 \end{align*} holds on $\mathbb{R}^d$. \end{enumerate} Those are essentially the assumptions from~\cite[Hypotheses~2.1]{Delmonte2011}, with two exceptions: \begin{itemize} \item Instead of boundedness of $V$ a weaker condition is used there (see \cite[Hypotheses~2.1(iii) and~Remark~2.2]{Delmonte2011}). The reason why we assume boundedness of $V$ is explained after Corollary~\ref{cor:semigroups-for-degenerate-parabolic-pde-contractive}. \item At first glance, the inequality in~\cite[Hypotheses~2.2(iv)]{Delmonte2011} looks slightly distinct from the inequality that is assumed in assertion~(d). However, since $V$ is assumed to be bounded, both inequalities are actually equivalent in our setting (if one changes $\lambda_0$ appropriately). \end{itemize} We point out that both $A$ and $b$ are allowed to be unbounded and that $A(x)$ need not be bounded away from $0$ as $\norm{x}_2 \to \infty$. In the following, the (possibly degenerate) parabolic equation \begin{align} \label{eq:degenerate-parabolic-pde} \dot u = (\mathcal{B} + V)u \end{align} is considered on the space $C_b(\mathbb{R}^d; \mathbb{R}^N)$ of bounded continuous function on $\mathbb{R}^d$ with values in $\mathbb{R}^N$, where the operator $\mathcal{B}$ is given by \begin{align} \label{eq:degenerate-elliptic-operator-without-potential} \mathcal{B} u \coloneqq \begin{pmatrix} \big(\sum_{i,j=1}^d A_{ij} \partial_{ij} + \sum_{j=1}^d b_j \partial_j\big) u_1 \\ \vdots \\ \big(\sum_{i,j=1}^d A_{ij} \partial_{ij} + \sum_{j=1}^d b_j \partial_j\big) u_N \end{pmatrix} \end{align} for all $u$ in the domain \begin{align*} D(\mathcal{B}) \coloneqq \{u \in C_b(\mathbb{R}^d; \mathbb{R}^N) & \cap \bigcap_{1 \le p < \infty} W^{2,p}_{\operatorname{loc}}(\mathbb{R}^d; \mathbb{R}^N)\colon \\ & \text{the expression in~\eqref{eq:degenerate-elliptic-operator-without-potential} is in } C_b(\mathbb{R}^d; \mathbb{R}^N)\}. \end{align*} The above setting will allow us to employ the results from \cite{Delmonte2011} about well-posedness of the equation~\eqref{eq:degenerate-parabolic-pde}. In order to apply our Theorem~\ref{thm:introduction} to study the long-term behaviour of the solutions, though, we have to ensure that the space $C_b(\mathbb{R}^d; \mathbb{R}^N)$ is isometrically isomorphic to a real-valued $C_b$-space. To this end, we endow it with the norm \begin{align*} \norm{u}_\infty = \max\{\norm{u_k}_\infty\colon k \in \{1,\dots,N\}\} \end{align*} for all $u$ in this space. This already suggests that, in order to apply Theorem~\ref{thm:introduction}, we further need the matrix $V(x)$ to be \emph{$\infty$-dissipative} for each $x \in \Omega$ as to ensure that the solution semigroup of~\eqref{eq:degenerate-parabolic-pde} is contractive. In \cite{Delmonte2011}, the space $C_b(\mathbb{R}^d; \mathbb{R}^N)$ is equipped with the norm $\norm{u} = \sum_{k=1}^N \norm{u_k}_\infty$ which is equivalent to the norm introduced above but which does not render $C_b(\mathbb{R}^d; \mathbb{R}^N)$ an AM-space. \begin{proposition} \label{prop:semigroups-for-degenerate-parabolic-pde} The operators $\mathcal{B}$ and $\mathcal{B} + V$ \textup{(}with $D(\mathcal{B} + V) \coloneqq D(\mathcal{B})$\textup{)} on $C_b(\mathbb{R}^d;\mathbb{R}^N)$ are closed, and all sufficiently large real numbers belong to the resolvent sets of both $\mathcal{B}$ and $\mathcal{B}+V$. Moreover, there exist operator semigroups $(S_t)_{t \in [0,\infty)}$ and $(T_t)_{t \in [0,\infty)}$ on $C_b(\mathbb{R}^d; \mathbb{R}^N)$ with the following properties: \begin{enumerate}[\upshape (a)] \item For each $f \in C_b(\mathbb{R}^d;\mathbb{R}^N)$, each $x \in \mathbb{R}^d$ and all sufficiently large real numbers $\lambda$ the functions \begin{align*} (0,\infty) \ni t \mapsto \mathrm{e}^{-\lambda t}S_tf(x) \in \mathbb{R}^N \quad \text{and} \quad (0,\infty) \ni t \mapsto \mathrm{e}^{-\lambda t}T_tf(x) \in \mathbb{R}^N \end{align*} are continuous and in $L^1((0,\infty);\mathbb{R}^N)$, and their integrals equal $\mathcal{R}(\lambda,\mathcal{B})f(x)$ and $\mathcal{R}(\lambda,\mathcal{B}+V)f(x)$, respectively. \item The semigroup $(S_t)_{t \in [0,\infty)}$ is contractive. \end{enumerate} \end{proposition} \begin{proof} The assertions about $\mathcal{B}$ and $\mathcal{B} + V$, as well as the existence of both semigroups and property~(a) follow from \cite[Section~3]{Delmonte2011}; to see that we can really use the domain $D(\mathcal{B})$ as domain of the operator $\mathcal{B}+V$ we need the assumption that $V$ is bounded. Since $\mathcal{B}$ acts separately in every component, so does the semigroup $(S_t)_{t \in [0,\infty)}$; hence, contractivity of $(S_t)_{t \in [0,\infty)}$ follows from contractivity in the scalar case, which can for instance be found in~\cite[Proposition~2.3(i)]{Delmonte2011}. \end{proof} The semigroup $(T_t)_{t \in [0,\infty)}$ describes the solutions to our parabolic equation~\eqref{eq:degenerate-parabolic-pde}; see \cite[Section~3]{Delmonte2011}. We note that, in our setting where the matrix potential $V$ is bounded, one could -- alternatively to the approach from \cite{Delmonte2011} -- employ the theory of bi-continuous semigroups to study the perturbed operator $\mathcal{B} + V$; see \cite[beginning of Section~5]{Metafune2002} and \cite[Theorem~3.5]{Farkas2004}. (There are also results about unbounded perturbations of bi-continuous semigroups such as in \cite[Corollary~4.2]{Albanese2004}, but we do not know whether such results can be applied under the assumptions of \cite[Hypotheses~2.1]{Delmonte2011}). We point out that while the semigroup $(S_t)_{t \in [0,\infty)}$ is positive, the semigroup $(T_t)_{t \in [0,\infty)}$ is not positive, in general. Moreover, in general we cannot expect those semigroups to be strongly continuous (see for instance the discussion at the beginning of \cite[Subsection~3.1]{Delmonte2011}). If we assume $\infty$-dissipativity of the matrices $V(x)$, then the semigroup $(T_t)_{t \in [0,\infty)}$ is also contractive: \begin{corollary} \label{cor:semigroups-for-degenerate-parabolic-pde-contractive} Assume that, for each $x \in \mathbb{R}^d$, the matrix $V(x)$ is dissipative with respect to the $\infty$-norm on $\mathbb{R}^N$. Then the semigroup $(T_t)_{t \in [0,\infty)}$ is contractive, too. \end{corollary} \begin{proof} For each $f \in C_b(\mathbb{R}^d;\mathbb{R}^N)$, each $x \in \mathbb{R}^d$ and each $\lambda > 0$ the mapping $(0,\infty) \ni t \mapsto \mathrm{e}^{-\lambda t}S_tf(x) \in \mathbb{R}^N$ is continuous and in $L^1((0,\infty);\mathbb{R}^N)$, and its integral equals $\mathcal{R}(\lambda,\mathcal{B})f(x)$; this follows from Proposition~\ref{prop:semigroups-for-degenerate-parabolic-pde} and from the identity theorem for analytic functions. As $(S_t)_{t \in [0,\infty)}$ is contractive, so is the operator $\lambda \mathcal{R}(\lambda,\mathcal{B})$ for each $\lambda > 0$, and thus it follows that $\mathcal{B}$ is dissipative. The matrix-valued multiplication operator $V$ is dissipative by assumption, and since it is a bounded operator, it is thus even strictly dissipative. Consequently, the operator $\mathcal{B} + V$ is dissipative, too. It now follows from Post's inversion formula for the Laplace transform (for $\mathbb{R}^N$-valued functions) and, again, from Proposition~\ref{prop:semigroups-for-degenerate-parabolic-pde} that $(T_t)_{t \in [0,\infty)}$ is contractive. \end{proof} The proof of Corollary~\ref{cor:semigroups-for-degenerate-parabolic-pde-contractive} is the reason why we assumed $V$ to be bounded; we needed the boundedness on two occasions in the proof: (i) in order to derive strict dissipativity of $V$ from mere dissipativity, and (ii) in order for $\mathcal{B} + V$ to have the same domain as $\mathcal{B}$. The authors do not know whether Corollary~\ref{cor:semigroups-for-degenerate-parabolic-pde-contractive} remains true for unbounded $V$ which satisfies, besides dissipativity, only the assumptions of \cite[Hypotheses~2.1]{Delmonte2011}. \subsection{A convergence result} After the preparations of the preceding subsection, we now arrive at the following convergence result for the solutions to~\eqref{eq:degenerate-parabolic-pde}. Let us remark that, if the matrices $V(x)$ in the potential have non-negative off-diagonal entries, the long-term behaviour of the solutions equations of the type~\eqref{eq:degenerate-parabolic-pde} was studied in~\cite[Section~4]{Addona2019}; this is possible since the mentioned assumption on $V(x)$ allows for the use of Perron--Frobenius theory. Here, we make no such positivity assumption. Instead, we are going to assume that the matrices $V(x)$ are $\infty$-dissipative. If the operator semigroup $(T_t)_{t \in [0,\infty)}$ is immediately compact, this implies that the solutions to~\eqref{eq:degenerate-parabolic-pde} converge uniformly (for initial values in the unit ball) as time tends to infinity. \begin{theorem} \label{thm:convergence-on-unbounded-domains} Assume that, for each $x \in \mathbb{R}^d$, the matrix $V(x)$ is dissipative with respect to the $\infty$-norm on $\mathbb{R}^N$. If the operators $T_t$ are compact for $t > 0$, then $T_t$ converges with respect to the operator norm to a finite-rank projection as $t \to \infty$. \end{theorem} \begin{proof} This is a consequence of Corollary~\ref{cor:semigroups-for-degenerate-parabolic-pde-contractive} and Theorem~\ref{thm:introduction} since $C_b(\mathbb{R}^d;\mathbb{R}^N)$ is isometrically isomorphic to the space $C_b(L;\mathbb{R})$, where $L$ is composed of $N$ disjoint copies of $\mathbb{R}^d$. \end{proof} Of course, one does not really need to assume that all operators $T_t$ (for $t > 0$) are compact in order to apply Theorem~\ref{thm:introduction}; it would suffice to assume that at least one operator $T_{t_0}$ is quasi-compact. However, the property that all $T_t$ are compact is quite a reasonable assumption in this setting since there are several sufficient criteria for this property available; we refer to \cite[Subsection~3.2]{Delmonte2011} for such conditions and refrain from stating them here explicitly. However, let us illustrate the above result by the following simple concrete example, where the differential operator is a special case of the one considered in \cite[Section~4]{Delmonte2011}. \begin{example} \label{ex:concrete-example-on-unbounded-domain} Consider the $\mathbb{R}^2$-valued evolution equation \begin{align} \label{eq:concrete-example-on-unbounded-domain} \begin{pmatrix} \dot u_1 \\ \dot u_2 \end{pmatrix} = \begin{pmatrix} \Delta u_1 \\ \Delta u_2 \end{pmatrix} - \begin{pmatrix} (1+\norm{x}_2^2)^\beta \; x^T \nabla u_1 \\ (1+\norm{x}_2^2)^\beta \; x^T \nabla u_2 \end{pmatrix} + V(x) \begin{pmatrix} u_1 \\ u_2 \end{pmatrix} \end{align} on $\mathbb{R}^d$, where $\beta > 0$ is a fixed real number and where $V(x)$ is given by \begin{align*} V(x) = v(x) \begin{pmatrix} -1 & -1 \\ -2 & -2 \end{pmatrix} + w(x) \begin{pmatrix} -1 & -1 \\ -1 & -1 \end{pmatrix} \end{align*} for two functions $v,w\colon \mathbb{R}^d \to (0,\infty)$ that are bounded and locally $\alpha$-Hölder continuous with $\alpha \in (0,1)$. Examples of this type (in fact, of a more general type) are considered in~\cite[Section~4]{Delmonte2011}, where it is shown that this equation fits into the setting of the present section and that the solution semigroup of~\eqref{eq:concrete-example-on-unbounded-domain} is immediately compact on $C_b(\mathbb{R}^d; \mathbb{R}^2)$ \cite[Theorem~4.2]{Delmonte2011} (but note that the parameter $\alpha$ is used with different meaning there). It is not difficult to see that the matrix $V(x)$ is dissipative with respect to the $\ell^\infty$-norm on $\mathbb{R}^2$ for each $x \in \mathbb{R}^d$. Therefore, it follows from Theorem~\ref{thm:convergence-on-unbounded-domains} that the solution semigroup of~\eqref{eq:concrete-example-on-unbounded-domain} converges with respect to the operator norm on $C_b(\mathbb{R}^d; \mathbb{R}^2)$ as $t \to \infty$. The function $(\one, -\one)^T$ is an equilibrium, so the limit is non-zero for some initial values. \end{example} A few words about the choice of the potential $V$ in the preceding example are in order. The point about the sum of the two matrices in the definition of $V(x)$ is that it prevents the matrices $V(x)$ from being simultaneously diagonalisable (except for very simply choices of $v$ and $w$). In the case of simultaneous diagonalisability of the $V(x)$, we could transform the equation~\eqref{eq:concrete-example-on-unbounded-domain} into a form where both components decouple -- which means that we would essentially deal with two unrelated scalar equations. \section{On poles of operator resolvents} \label{section:poles-of-op-resolvents} In the following proposition we briefly recall a result about poles of the resolvent of a linear operator. This result is needed in the proof of Proposition~\ref{prop:compactness-for-single-operators}. \begin{proposition} \label{prop:pole-of-resolvent-by-resolvent-convergence} Let $T$ be a bounded linear operator on a complex Banach space $E$ and let $(\mu_j)_j$ be a net in the resolvent set of $T$ which converges to a number $\lambda \in \mathbb{C}$. Then the following assertions hold: \begin{enumerate}[\upshape (a)] \item $\lambda \in \mathbb{C} \setminus \sigma(T)$ if and only if the net $\big((\mu_j - \lambda)\mathcal{R}(\mu_j,T)\big)_j$ converges to the zero operator. \item $\lambda$ is a spectral value of $T$ and a first order pole of the resolvent function $\mathcal{R}(\mathord{\,\cdot\,}, T)$ if and only if the net $\big((\mu_j - \lambda)\mathcal{R}(\mu_j,T)\big)_j$ converges to a non-zero operator $P \in \mathcal{L}(E)$. \\ In this case, $P$ is the spectral projection associated with the pole $\lambda$. \end{enumerate} \end{proposition} \begin{proof} (a) The implication ``$\Rightarrow$'' is obvious, and the converse implication ``$\Leftarrow$'' follows from that well-known fact that, for every $\mu$ in the resolvent set of $T$, the norm of $\mathcal{R}(\mu,T)$ is no less than $1/\dist(\mu, \sigma(T))$ (where $\dist$ denotes the distance in the complex plane). (b) If $\lambda$ is a spectral value of $T$ and a first order pole of the resolvent, then the net $\big((\mu_j - \lambda)\mathcal{R}(\mu_j,T)\big)_j$ obviously converges to the spectral projection associated with $\lambda$, and this spectral projection is non-zero. Now assume conversely that the net $\big((\mu_j - \lambda)\mathcal{R}(\mu_j,T)\big)_j$ converges to an operator $P \not= 0$. It then follows from~(a) that $\lambda$ is a spectral value of $T$; in particular, the elements of the net $(\mu_j)_j$ are eventually distinct from $\lambda$. Hence, it follows from the resolvent identity that \begin{align} \mathcal{R}(\mu,T)P = \frac{P}{\mu - \lambda} \label{eq:resolvent-and-abel-projection} \end{align} for each $\mu$ in the resolvent set of $T$. From this we immediately obtain $P^2 = P$, i.e., $P$ is a projection; moreover, $P$ clearly commutes with $T$, so $T$ splits over the decomposition $E = \ker P \oplus PE$. It follows from~(a) that $\lambda$ is in the resolvent set of $T|_{\ker P}$. Moreover, we conclude from~\eqref{eq:resolvent-and-abel-projection} that $\lambda$ is a first order pole of the resolvent of $T|_{PE}$. Consequently, $\lambda$ is also a first order pole of the resolvent of $T$. \end{proof} \section{A few facts about nets} \label{section:universal-nets} In this appendix we recall a few facts about nets and universal nets that are needed in the main text, in particular in Proposition~\ref{prop:characterisation-of-compact-and-non-empty}. Recall that a net $(x_j)$ in a set $X$ is called a \emph{universal net} if, for each $A \subseteq X$, the net is either eventually contained in $A$ or eventually contained in $X \setminus A$. If a subnet $(x_{j_i})$ of a net $(x_j)$ is a universal net, then we call $(x_{j_i})$ a \emph{universal subnet} of $(x_j)$. It follows from Zorn's lemma that every net has a universal subnet. If $X$ is a topological Hausdorff space, then a subset $A \subseteq X$ is compact if and only if every universal net in $A$ converges to an element of $A$. In the following lemma we collect a few facts about metric spaces. For a proof we refer for instance to \cite[Theorem~B.3]{Glueck2019}, where these facts are given in a slightly more general topological setting. \begin{lemma} \label{lemma:set-of-cluster-points} Let $(x_\alpha)_{\alpha \in I}$ be a net in a metric space $X$ and let \begin{align*} C \coloneqq \bigcap_{\beta \in I} \overline{\{x_\alpha \colon \alpha \geq \beta\}} \end{align*} be its set of cluster points. Consider the following assertions. \begin{enumerate}[\upshape (i)] \item The set $C$ is non-empty and compact. \item Each subnet of $(x_\alpha)_{\alpha \in I}$ has a convergent subnet. \item Each universal subnet of $(x_\alpha)_{\alpha \in I}$ converges. \item For each cofinal subsequence $(\alpha_n)_{n \in \mathbb{N}}$ in $I$ the sequence $(x_{\alpha_n})_{n \in \mathbb{N}}$ has a cluster point. \end{enumerate} Then $\text{\upshape (i)} \Leftarrow \text{\upshape (ii)}\Leftrightarrow \text{\upshape (iii)} \Rightarrow \text{\upshape (iv)}$. If, in addition, $I$ contains a cofinal sequence, then $\text{\upshape (iv)} \Rightarrow \text{\upshape (i)}$ as well. \end{lemma} \end{document}
\begin{document} \date{} \title{Gossip over Holonomic Graphs} \begin{abstract} A gossip process is an iterative process in a multi-agent system where only two neighboring agents communicate at each iteration and update their states. The neighboring condition is by convention described by an undirected graph. In this paper, we consider a general update rule whereby each agent takes an arbitrary weighted average of its and its neighbor's current states. In general, the limit of the gossip process (if it converges) depends on the order of iterations of the gossiping pairs. The main contribution of the paper is to provide a necessary and sufficient condition for convergence of the gossip process that is independent of the order of iterations. This result relies on the introduction of the novel notion of holonomy of local stochastic matrices for the communication graph. We also provide complete characterizations of the limit and the space of holonomic stochastic matrices over the graph. \noindent {\bf Keywords:} Consensus; Gossiping; Markov Processes; Holonomy; Convergence of Matrix Products \end{abstract} \section{Introduction} Consensus problems have a long history~\cite{degroot} and are closely related to Markov chains~\cite{seneta}. Over the past decades, there has been considerable interest in developing algorithms intended to cause a group of $n>1$ agents to reach a consensus in a distributed manner, see~\cite{Ts3,vicsekmodel,reza1,luc,ReBe05,reachingp1,cutcdc,touri2014,decide,tacrate,xudong} just to cite a few. A simple idea to solve the consensus problem exploits a form of iterative message passing, in which each agent exchanges information with at most {\em one} other agent per iteration. One such exchange is called a {\em gossip}. Whenever two agents gossip, they set their state variables equal to the average of their values before gossiping~\cite{boyd052}. This process, which we term {\em standard gossip process}, is known to make all agents' values converge to the average of their initial states, provided that the neighbor graph is connected. In the standard gossip process, the update matrix associated with each iteration is {\em doubly} stochastic. Recall that all doubly stochastic matrices share the same left- (and right-) eigenvector, namely the vector ${\bf 1}$ of all $1$'s, corresponding to eigenvalue $1$. Therefore, if the product of doubly stochastic matrices converge to a rank-one matrix, it can only converge to $\frac{1}{n}{\bf 1} {\bf 1}^\top$, where $n$ is the number of agents in the system. In this paper, we enable convergence of a gossip process to an arbitrary {\em weighted} ensemble average. To this end, the update rule of the standard gossip process is generalized to allow neighboring agents to update their states according to a {\em weighted} average of their current values. We emphasize that when a pair of agents, say $i$ and $j$, communicate, they are not required to take the same weighted average. For example, agent $i$ can weigh its and $j$'s values by $1/3$ and $2/3$, whereas agent $j$'s weighs its and $i$'s values by $2/5$ and $3/5$. We call this generalized version of a gossip process a {\em weighted gossip} process. The extension of the update rule to (asymmetric) weighted average at each iteration gives rise to several important questions: (1) Can we still guarantee (exponential) convergence of the weighted gossip process? (2) Since different gossiping pairs can take different weighted averages and the corresponding stochastic matrices are {\em not} necessarily doubly stochastic, can we characterize the limit of a weighted gossip process (provided that the product of those stochastic matrices converges to a rank-one matrix)? (3) Furthermore, when can the limit of the product be {\em independent} of the order of appearance of the stochastic matrices in the product? (4) Finally, in settings for which questions (1)-(3) have positive answers, can we design an update rule (or, equivalently, the set of stochastic matrices) to ensure convergence of the gossip process to {\em any} desired weighted ensemble average? We address in the paper the above four questions and provide answers to them. To do so, we first introduce a novel notion of holonomy of stochastic matrices assigned to the edges of a given undirected graph that describes the neighbor topology (also referred to as communication topology) of the multi-agent system. We borrow this terminology from Riemannian geometry, which encodes transformations of vectors transported along close curves. In the present context, we take products of stochastic matrices along edges in arbitrary walks in the graph, and the term holonomy is used to indicate a change of a certain eigenvector corresponding to eigenvalue $1$ of the product along any closed walk in the graph. Holonomy in this context is related to the so-called {\em graph balance} for signed (or, more generally, voltage) graphs~\cite{chen2017cluster}, as both concepts require the net effect of a contextually-defined transformation over a closed walk to be trivial. Based on the notion of holonomy, we establish a necessary and sufficient condition for a weighted gossip process to (exponentially) converge to a {\em unique} rank-one matrix, and characterize explicitly this limit. Note that any such limit can always be written as ${\bf 1} p^\top$ where $p$ is a probability vector. The above facts imply the existence of a map $\pi$ that assigns a set of holonomic stochastic matrices to the probability vector $p$. Moreover, we show that for an arbitrary probability vector $p$ in the interior of the standard simplex, there exists a set of holonomic stochastic matrices so that a corresponding weighted gossip process converges to ${\bf 1} p^\top$, thus providing an affirmative answer to question (4) above. In other words, we show that the map $\pi$ is onto the interior of the standard simplex. Another major contribution of the paper is to provide a complete characterization of the preimage $\pi^{-1}(p)$, i.e., all sets of holonomic stochastic matrices that are mapped to $p$ by $\pi$. This paper shares the same spirit as the recent work~\cite{BC2020triangulated}, in the sense that both consider convergence to arbitrary rank-one stochastic matrices, and both provide conditions for the limits of products of certain stochastic matrices to be independent of their orders of appearance in the products. However, the specific settings and analyses differ significantly. In the present work, the stochastic matrices considered have a nontrivial $2\times 2$ principal submatrix, with the remaining part being an identity matrix, reflecting the fact that communication at each iteration is pairwise. Stochastic matrices appearing in~\cite{BC2020triangulated} have in contrast a non-trivial $3\times 3$ principal submatrix, reflecting communications for three agents simultaneously. This seemingly minor extension in fact increases the complexity of the products drastically. As a trade-off for the reduced complexity of the products in the current paper, we do not impose any restriction on the structure of the graph (as long as it is connected); whereas in~\cite{BC2020triangulated}, only a special class of rigid graphs, termed triangulated Laman graphs, allowed us to draw conclusions similar to the ones of the current paper. Our work answers questions about weighted gossip processes that have not been investigated in the extant literature. For a comparison with existing works, we describe a few recent results about the {\em standard} gossip process. For a deterministic standard gossip process, if each pair of neighboring agents gossip infinitely often, then all agents' states asymptotically converge to the average of their initial values~\cite{cutcdc}; if there exists a period $T$ such that each pair gossip at least once within each successive subsequence of length $T$, the converge will be reached exponentially fast~\cite{pieee}. Moreover, if the underlying graph is a tree and each neighboring pair is restricted to gossip only once per period, it is known~\cite{tacgossip} that the convergence rate is fixed and invariant over all possible periodic gossip sequences the graph allows. For a randomized standard gossip process, in which each pair of neighbor agents are randomly selected to gossip, all agents' states converge to the average of their initial values almost surely and in mean square~\cite{boyd052}. Finally, we emphasize that the terms ``gossip'' and ``weighted gossip'' have, over the years, had evolving meanings. We defined here a gossip process as being an iterative process in which interactions are between pairs of agents only. For some, a ``gossip'' process is moreover required to have agents converge to the average of their initial states~\cite{boyd052}; also, ``weighted gossip'' is used in~\cite{wgossip} to describe a variant of the push-sum algorithm~\cite{pushsum}, whose purpose is to reach an average consensus over directed graphs. These works thus differ from ours. The remainder of the paper is organized as follows: We gather a few key notations and conventions at the end of the section. The notion of holonomy and the main results of the paper are presented in Section~\ref{sec:mainresult}. Analyses and proofs of the main results are provided in Section~\ref{sec:proofs}. The paper ends with a conclusion in Section~\ref{end}. \noindent {\bf Notations and conventions.} We denote by $G= (V, E)$ an undirected graph, without multiple edges but, possibly, with self-loops. We call $G$ {\em simple} if it has no self-loops. The graphs we consider here are connected. The node set is by convention denoted by $V = \{v_1,\ldots, v_n\}$ and the edge set by $E$. We refer to the edge linking nodes $v_i$ and $v_j$ as $(v_i,v_j)$. A self-loop is then of the form $(v_i,v_i)$. Given a sequence of edges $\gamma=e_1\cdots e_k$ in $E$, we say that a node $v\in V$ is {\em covered} by $\gamma$ if it is incident to an edge in $\gamma$. For $\gamma = e_1e_2\cdots$ an infinite sequence, we say that $v$ is covered infinitely often by $\gamma$ if there exists an infinite number of sub-indices $i_1<i_2<\cdots$ such that $e_{i_j}$ is incident to $v$. Given a sequence $\gamma=e_1e_2\cdots$, we say that $\gamma'$ is a string of $\gamma$ if it is a contiguous subsequence, i.e., $\gamma' = e_ie_{i+1}\cdots e_\ell$ for some $i \ge 1$ and $\ell \ge i$. Let $\gamma = e_1\cdots e_k$ be a finite sequence and $e_{k+1}$ be an edge of $G$. Denote by $\gamma \vee e_{k+1}$ the sequence $e_1\cdots e_k e_{k+1}$ obtained by adding $e_{k+1}$ to the end of $\gamma$. For a given undirected graph $G$ as above, we denote by $\vec G = (V, \vec E)$ a {\em directed graph} on the same node set and with a ``bidirectionalized'' edge set; precisely, $\vec E$ is defined as follows: we assign to every edge $(v_i,v_j)$ of $G$, $i\neq j$, two directed edges $v_iv_j$ and $v_jv_i$; to a self-loop $(v_i,v_i)$ of $G$ corresponds a self-loop $v_iv_i$ of $\vec G$. Let $w = v_{i_1}\ldots v_{i_k}$ be a walk in the directed graph $\vec G$, i.e., every $v_{i_\ell}v_{i_{\ell + 1}}$, for $\ell =1,\ldots, k-1$, is an edge of $\vec G$. We call $v_{i_1}$ the starting-node and $v_{i_k}$ the ending-node of~$w$. We define by $w^{-1}:= v_{i_k} v_{i_{k-1}} \ldots v_{i_1}$ the {\em inverse} of $w$. Let $w' = v_{i_k}v_{i_{k+1}}\cdots v_{i_m}$ be another walk in $G$, where the starting-node of $w'$ is the same as the ending-node of $w$. We denote by $ww' = v_{i_1}\ldots v_{i_k}\ldots v_{i_m}$ the {\em concatenation} of the two walks. A square nonnegative matrix is called a {\em stochastic matrix} if all its row-sums equal one. A matrix is {\em irreducible} if it is not similar via a permutation to a block upper triangular matrix (with strictly more than one block of positive size). The graph of an $n\times n$ matrix is a directed graph on $n$ nodes: there is a directed edge from node $v_j$ to node $v_i$ whenever the $ij$th entry of the matrix is nonzero. It is known that a matrix is irreducible if and only if its graph is strongly connected~\cite[Theorem 6.2.24]{horn1}. On the space of $n \times m$ real matrices, we define the following semi-norm: for a given $A\in\mathbb{R}^{n\times m}$, $$\|A\|_S := \max_{1\le j\le m}\max_{1\le i_1,i_2\le n}|a_{i_1j}-a_{i_2j}|.$$ The zero-set of this semi-norm is the set of matrices with all rows equal. See~\cite{wolf63} for more details. The {\em support of a matrix} $A = [a_{ij}]$, denoted by $\operatorname{supp}(A)$, is the set of indices $ij$ such that $a_{ij}\neq 0$. We denote by $\min A$ the smallest non-zero entry of $A$: $$\min A = \min_{ij \in \operatorname{supp}(A)} a_{ij}.$$ In this paper, we will only consider $\min A$ for $A$ being a nonnegative matrix. We say that $p \in \mathbb{R}^n$ is a {\em probability vector} if $p_i \geq 0$ and $\sum_{i=1}^n p_i=1$. The set of probability vectors in $\mathbb{R}^n$ is the $(n-1)$-simplex, which is denoted by $\Delta^{n-1}$. Its interior with respect to the standard Euclidean topology in $\mathbb{R}^n$ is denoted by $\operatorname{int} \Delta^{n-1}$. If $p\in \operatorname{int} \Delta^{n-1}$, then all entries of $p$ are positive. We let ${\bf 1}$ be a vector of all ones, whose dimension will be clear from the context. Given a real number $x$, we denote by $\lfloor x \rfloor$ the largest integer that is smaller than or equal to $x$, i.e., $\lfloor x \rfloor := \max_{z \in \mathbb{Z}} \lbrace z \mid z \leq x \rbrace$. The Cartesian product of $k$ linearly independent open bounded line segments in an Euclidean space is called a { \em $k$-dimensional open box}. An open box is not necessarily parallel to the coordinate axes. We denote by $\mathbb{R}_+$ the set of positive real numbers. \section{Main Results}\label{sec:mainresult} We now present the main results proved in this paper, and the main concepts introduced. \subsection{Local stochastic matrices and holonomy for digraphs}\label{ssec:lsmholo} Let $G = (V, E)$ be a simple graph on $n$ nodes. Each node represents an agent, and each agent is assigned a variable $x_i(t) \in \mathbb{R}$ at the time step~$t$. To each edge $(v_i,v_j)$ of $G$, with $i < j$, corresponds a potential interaction of agents $i$ and $j$, whereby they update their current states $x_i(t)$ and $x_j(t)$ (if this gossip pair is activated) according to the rule: \begin{equation*}\label{eq:defupdate} \begin{bmatrix} x_i(t+1) \\ x_j(t+1) \end{bmatrix}=\bar A_{ij}\begin{bmatrix} x_i(t) \\ x_j(t) \end{bmatrix}, \end{equation*} where $\bar A_{ij}$ is the $2$-by-$2$ row stochastic matrix given by \begin{equation}\label{eq:defbarAij} \bar A_{ij}:= \begin{bmatrix} 1-a_{ij} & a_{ij} \\ a_{ji} & 1-a_{ji} \end{bmatrix}, \end{equation} with $a_{ij}$ and $a_{ji}$ real numbers in the open interval $(0,1)$. During this update, all the other agents $k$, for $k\neq i, j$, keep their states unchanged: $x_k(t+1)=x_k(t)$. Thus, if we let $E_{ij}$ be the $n$-by-$n$ square matrix with $1$ at the $ij$th entry and $0$ elsewhere, then the update of the entire network can be described by $x(t+1) = A_{ij} x(t)$, where $A_{ij}$ is the $n$-by-$n$ row stochastic matrix defined as follows: \begin{align}\label{eq:deflsm} A_{ij} := \; & (1-a_{ij}) E_{ii} + a_{ij} E_{ij} + a_{ji} E_{ji} + (1-a_{ji}) E_{jj} \nonumber\\ & + \sum_{k\neq i, j} E_{kk}. \end{align} In words, the matrix $A_{ij}$ is such that the principal submatrix associated with columns/rows $i$ and $j$ is the $2\times 2$ stochastic matrix $\bar A_{ij}$, and the complementary principle submatrix is the identity matrix $I_{n-2}$. Note that $A_{ij} = A_{ji}$ from~\eqref{eq:deflsm}. We call these $A_{ij}$'s, for $(v_i,v_j)\in E$, {\bf local stochastic matrices} of $G$. The graph of each local stochastic matrix is a bi-directional graph with exactly two directed edges $v_iv_j$ and $v_jv_i$, and self-arcs at all $n$ nodes. A local stochastic matrix $A_{ij}$, for $(v_i,v_j)$ an edge of $G$, has two degrees of freedom, namely $a_{ij}$ and $a_{ji}$ as defined in~\eqref{eq:deflsm}. We denote by $\mathcal{S}_G$ the set of $|E|$-tuples of local stochastic matrices over a connected graph $G = (V, E)$, which is an open convex subset of an Euclidean space of dimension $2|E|$. Given an ordering of the edges in $G$, we will use $\mathcal{A}=(A_{ij})_{(v_i,v_j) \in E}$ to denote an element of $\mathcal{S}_G$. For a finite sequence $\gamma = e_{1} \ldots e_{k}$ of edges in $G$ and for a given pair of integers $0\le s \le t \le k$, we define a product of local stochastic matrices as follows: for $t\geq s+1$, $$ P_\gamma(t:s) := A_{e_t} A_{e_{t - 1}} \cdots A_{e_{s+1}}, $$ and $P_\gamma(t:s)=I$ for $t \leq s$. For the case where $s = 0$ and $t = k$, we will simply write $P_\gamma = P_\gamma(k:0)$. The notation can be used on infinite strings $\gamma$, with $k = \infty$, as well. We single out the following sequences: \begin{Definition}[Spanning sequence]\label{def:spanningsequence} Let $G = (V, E)$ be a simple, undirected graph. A finite sequence of edges of $G$ is {\bf spanning} if it covers a spanning tree of $G$. An infinite sequence of edges is {\em spanning} if it has infinitely many disjoint finite strings that are spanning. An infinite sequence is {\em $m$-spanning} if every string of length $m$ is spanning. \end{Definition} Let $\vec G = (V, \vec E)$ be the directed version of $G$. For each directed edge $v_iv_j$ in $\vec G$, we define the ratio $$ r_{ij} := \frac{a_{ij}}{a_{ji}}. $$ Note that $r_{ij}$ is well-defined, because $a_{ji}\in (0,1)$. Also, it follows from the definition that $r_{ji}=r_{ij}^{-1}$. Let $w = v_{i_1}\ldots v_{i_k}$ be a walk in $\vec G$. We define \begin{equation}\label{eq:defRw} R_w:= \prod^{k-1}_{\ell = 1} r_{i_\ell,i_{\ell+1}}. \end{equation} Let $w_1$ and $w_2$ be two walks in $G$, with $w_1$ ending at the starting node of $w_2$. Then, $R_{w_1w_2}=R_{w_1}R_{w_2}$. In particular, setting $w := w_1=w_2^{-1}$, we get $R_{ww^{-1}}=1$. The following definition is instrumental to our results: \begin{Definition}[Holonomic local stochastic matrices]\label{def:holonomicAij} Let $C$ be a cycle in $\vec G$ of length greater than $2$. The local stochastic matrices $A_{ij}$ are {\bf holonomic} for $C$ if $R_C=1$, and are {\it holonomic} for $G$ if they are holonomic for every cycle of $\vec G$ of length of greater than $2$. \end{Definition} With foresight, we borrow the word {\em holonomic} from differential geometry to characterize the set of matrices. The justification of the name is the following: in geometry, this notion, roughly speaking, describes variation of some quantity (e.g., a parallel-transported vector) along loops in a given space. If there is no variation of the quantity while `traveling' around the loop, the process is said to be `holonomic'. Here, the space is the graph and the quantity is the product of the ratios $r_{ij}$ along cycles of the graph. The notion of holonomy will appear through a formula, established below, that involves the products of $r_{ij}$'s along walks in $G$. Clearly, for the product of these $r_{ij}$ along walks to depend {\em only} on the starting- and ending-nodes, it is necessary that such products along cycles be equal to~$1$; indeed, these cycles can be inserted an arbitrary amount of times to a walk without changing its starting nor its ending node. Because of such one-to-one correspondence, we use the term `holonomy' as a definition of the properties of the matrices $A_{ij}$ described in Definition~\ref{def:holonomicAij}. \begin{Remark}\normalfont Note that if $C$ is a $2$-cycle, then $R_C$ is $1$ by definition. Thus, if $G$ is a tree, then every set of local stochastic matrices is holonomic for $G$. \end{Remark} \subsection{Statement of the main results} We have three main results: the first two deal with convergence of infinite products of stochastic matrices and existence of a unique limiting rank one matrix, and the last one states that one can choose local stochastic matrices to obtain any desired limiting distribution of their products. \subsubsection{Convergence of products and invariance of limits} For a given $\mathcal{A}\in \mathcal{S}_G$, we define \begin{equation}\label{eq:underlinea} \underline {a}:= \min_{(v_i,v_j)\in E} (\min A_{ij}) \quad \mbox{and} \quad {\epsilon:= \underline{a}^{n - 1}.} \end{equation} The first main result is as follows: \begin{Theorem}\label{th:main1} Let $G = (V, E)$ be a simple, connected undirected graph on $n$ nodes. Then, for every set of local stochastic matrices $A_{ij}\in \mathbb{R}^{n\times n}$, $(v_i,v_j)\in E$, defined as in~\eqref{eq:deflsm}, the following two statements are equivalent: \begin{enumerate} \item[(i)] There is a unique $p\in \operatorname{int} \Delta^{n-1}$ such that for any infinite spanning sequence $\gamma$, $P_\gamma ={\bf 1} p^\top.$ \item[(ii)] The $A_{ij}$ are holonomic for $G$. \end{enumerate} Furthermore, if the $A_{ij}$ are holonomic for $G$ and $\gamma$ is $m$-spanning, then \begin{equation}\label{eq:exponentialconvergence} \|P_{\gamma}(t:0) \|_S \le (1-\epsilon)^{\frac{t}{m \lfloor \frac{n}{2} \rfloor} - 1}. \end{equation} \end{Theorem} \begin{Remark}\normalfont Note that uniqueness of the probability vector $p$ is with respect to a given set of holonomic local stochastic matrices $A_{ij}$, and with respect to {\em all} infinite spanning sequences $\gamma$. The dependence of $p$ on the $A_{ij}$'s will be characterized shortly in Algorithm~1 below. Also, it is easy to verify that the standard gossiping process has local stochastic matrices such that $\bar A_{ij} = \frac{1}{2} {\bf 1}{\bf 1}^\top$ for all $(v_i,v_j)\in E$, so $r_{ij} = 1$. Thus, these local stochastic matrices $A_{ij}$ are holonomic for {\em any} connected graph. For this special case, the corresponding probability vector~$p$ is simply $\frac{1}{n}{\bf 1}$. \end{Remark} Note that an infinite spanning sequence can be obtained with probability one by selecting an edge out of $E$ uniformly at random. The following fact is then an immediate consequence of Theorem~\ref{th:main1}: \begin{Corollary}\label{cor:randomsequence} Let $\gamma$ be a simple random sequence obtained by selecting an edge out of $E$ uniformly at random. If the $A_{ij}$ are holonomic for $G$, then there exists a unique probability vector $p$ such that $P_\gamma = {\bf 1} p^\top$ with probability one. \end{Corollary} We characterize below the probability vector $p$. We do so by first presenting a positive vector, denoted by $q = [q_1;\cdots; q_n]$, and then normalizing it. The entire procedure is summarized in the following algorithm: \noindent {\bf Algorithm 1.} Construction of $p$: \begin{description} \item[Step 1:] Pick an arbitrary node, say $v_1$, of $G$, and set $q_1 := 1$. \item[Step 2:] For all nodes $v_i$, $i \neq 1$, of $G$, let $w$ be an arbitrary walk {\em from} $v_1$ {\em to} $v_i$ in $\vec G$ (since $G$ is connected, such a walk always exists). Define $q_i:= R_w$. \item[Step 3:] Normalize the vector $q$ by \begin{equation}\label{eq:defp} p := \frac{q}{\sum^n_{i = 1} q_i}. \end{equation} \end{description} It should be clear that every entry of $q$, defined in Steps 1 and 2, is positive, so the vector $p$ is well defined. \begin{Theorem}\label{th:defp} The probability vector $p$ in Theorem~\ref{th:main1} is given by~\eqref{eq:defp}. \end{Theorem} \begin{Remark}\normalfont From its construction, $p$ appears to depend on both the base node chosen ($v_1$ above, Step~1), and on the walks from nodes $v_j$ to $v_1$ chosen (Step~2). On the way of proving the main results, we will show that, under the assumption that the local stochastic matrices are holonomic for $G$, $p$ is in fact independent of these two parameters. \end{Remark} \subsubsection{The space of holonomic local stochastic matrices} In this subsection, we study the set of $|E|$-tuples of holonomic local stochastic matrices for $G$ as a subset of $\mathcal{S}_G$: \begin{equation}\label{eq:defHG} \mathcal{H}_G := \{\mathcal{A} \in \mathcal{S}_G \mid R_C=1 \mbox{ for all cycles in } \vec G\}. \end{equation} Since holonomic constraints arise only if cycles are present, if $G$ is a tree, then $\mathcal{H}_G = \mathcal{S}_G$. By Theorems~\ref{th:main1} and~\ref{th:defp}, an element $\mathcal{A}\in \mathcal{H}_G$ gives rise to a unique probability vector $p$, defined in Algorithm~1. Formally, we define a map $\pi$ as follows: \begin{equation}\label{eq:defpi} \pi: \mathcal{A} \in \mathcal{H}_G \mapsto p \in \operatorname{int} \Delta^{n-1}. \end{equation} Following the steps of Algorithm~1, it is easy to see that $\pi$ is analytic. We now characterize the preimages $\pi^{-1}(p)$ precisely: \begin{Theorem}\label{th:mainsurjective} The map $\pi$ defined in~\eqref{eq:defpi} is surjective. For each $p\in \operatorname{int} \Delta^{n-1}$, the preimage $\pi^{-1}(p)$ is an $|E|$-dimensional open box. \end{Theorem} It is an immediate consequence of the theorem that the dimension of $\mathcal{H}_G$ is $(n+|E|-1)$; indeed, since the dimension of $\pi^{-1}(p)$ is independent of $p$ and since $\pi$ is onto $\Delta^{n-1}$, the dimension of $\mathcal{H}_G$ is the sum of the dimension of $\Delta^{n-1}$, which is $(n-1)$, and the dimension of some (and, hence, any) preimage $\pi^{-1}(p)$, which is $|E|$. Note that the segments defining the box are not necessarily aligned with the coordinate axes, and will generally be slanted. \section{Analysis and Proofs of Theorems}\label{sec:proofs} In this section, we establish relevant propositions and prove the main results. \subsection{Holonomy and Algorithm~1}\label{ssec:holoalgo} In the subsection, we show that the output of Algorithm~1 is indeed independent of the base node chosen in Step 1 and the walks chosen in Step 2. These statements are proven in Proposition~\ref{prop:pindepbase}, and in Proposition~\ref{prop:1forclosedwalk} and Corollary~\ref{cor:corpropclosedwalk} respectively. \begin{Proposition}\label{prop:1forclosedwalk} Let $A_{ij}$ be a set of local stochastic matrices that are holonomic for $G$, and $w$ be a closed walk in $\vec G$. Then, $R_w = 1$. \end{Proposition} \begin{proof} Any closed walk $w$ can be decomposed, edge-wise, into a union of disjoint cycles, labeled as $C_1, \ldots, C_k$. Then, $R_w = R_{C_1}\cdots R_{C_k}$. From Definition~\ref{def:holonomicAij}, $R_{C_i} = 1$ for every $i =1,\ldots, k$ and, hence, $R_w = 1$. \end{proof} The next result follows as a corollary to Proposition~\ref{prop:1forclosedwalk}: \begin{Corollary}\label{cor:corpropclosedwalk} Let $A_{ij}$ be a set of local stochastic matrices that are holonomic for $G$. Let $w$ and $w'$ be two distinct walks in $\vec G$ from the same node $v_i$ to the same node $v_j$. Then, $R_{w}=R_{w'}$. \end{Corollary} \begin{proof} By concatenating $w$ with $w'^{-1}$, we obtain a closed walk, which we denote by $w^*$. On one hand, by Proposition~\ref{prop:1forclosedwalk}, $R_{w^*} = 1$. On the other hand, $R_{w^*} = R_w R_{w'^{-1}} = R_{w}/R_{w'}$. It follows that $R_{w} = R_{w'}$. \end{proof} The above corollary has shown that if the base node $v_i$, chosen in Step~1 of Algorithm 1, is fixed, then the value of other entries $q_j$, for $j\neq i$, are independent of the choices of walks from $v_i$ to $v_j$. Though the value of the vector $q$ depends on a particular choice of base node, we show below that this dependence only changes a normalization constant. Consequently, the value of the vector $p$ in Step~3 is independent of said base node. We now let $q$ and $q'$ be the vectors obtained from Algorithm~1 by using $v$ and $v'$, respectively, as the base nodes. \begin{Proposition}\label{prop:pindepbase} Let $w$ be any walk from $v'$ to $v$ in $\vec G$. Then, $q' = R_w q$. \end{Proposition} \begin{proof} We establish the proposition by showing that for $1\leq i\leq n$, we have $q'_i = R_w q_i$. Let $w_i$ be a walk from node $v$ to $v_i$ (if $v_i = v$, then $w_i$ can be the empty walk), and $w'_i$ be the walk from node $v'$ to $v_i$ obtained by concatenating the given $w$ and $w_i$. Then, $R_{w'_i} = R_w R_{w_i}$. From Algorithm 1, we know that $q_i = R_{w_i}$ and $q'_i = R_{w'_i}$, so $q'_i = R_w q_i$ as desired. \end{proof} \subsection{A common left-eigenvector}\label{ssec:proppv} This subsection is devoted to first showing that the vector $p$ of Theorem~\ref{th:defp} is in fact a left-eigenvector of all local stochastic matrices $A_{ij}$ for $G$, associated with the eigenvalue~$1$, provided that these matrices are holonomic for $G$. This result is the first step in the proof of Theorem~\ref{th:main1}. We also show here that if the holonomic constraint is not met, the vector $p$ is not well-defined. \begin{Proposition}\label{prop:p} There is a probability vector $p$ such that $p^\top A_{ij} = p^\top$ for all $(v_i,v_j)\in E$ if and only if the set of $A_{ij}$ is holonomic for $G$. Moreover, if such $p$ exists, then it is unique. \end{Proposition} To prove Proposition~\ref{prop:p}, we use the following lemmas: \begin{Lemma}\label{lem:uniquep} Let $\bar r_{ij}:=[1; r_{ij}] \in \mathbb{R}^2$. Then, $\bar r_{ij}$ is the unique left-eigenvector (up to scaling) of $\bar A_{ij}$ corresponding to eigenvalue~$1$. \end{Lemma} \begin{proof} Recall that $r_{ij} = a_{ij}/a_{ji}$. It follows that \begin{align*} \bar A_{ij}^\top \bar r_{ij} & = \begin{bmatrix} 1-a_{ij} & a_{ji} \\ a_{ij} & 1-a_{ji} \end{bmatrix} \begin{bmatrix} 1 \\ r_{ij} \end{bmatrix} = \begin{bmatrix} (1-a_{ij}) + a_{ji} r_{ij} \\ a_{ij} + (1-a_{ji}) r_{ij} \end{bmatrix} \\ & = \begin{bmatrix} (1-a_{ij}) + a_{ij} \\ a_{ji} r_{ij} + (1-a_{ji}) r_{ij} \end{bmatrix} = \begin{bmatrix} 1 \\ r_{ij} \end{bmatrix} = \bar r_{ij}, \end{align*} so $\bar r_{ij}$ is a left-eigenvector of $\bar A_{ij}$ corresponding to eigenvalue~$1$. The uniqueness follows from the fact that $\bar A_{ij}$ is an irreducible stochastic matrix (with all entries being positive). \end{proof} We next have the following fact: \begin{Lemma}\label{lem:newuniquep} If there exists a vector $p \neq 0$ such that $p^\top A_{ij} = p^\top$ for any $(v_i,v_j)\in E$, then for any walk $w$ in $\vec G$ from node $v_\ell$ to node $v_k$, it holds that $p_k = R_w p_\ell$. \end{Lemma} \begin{proof} Since $p^\top A_{ij} = p^\top$, the vector $[p_i;p_j]$, with $i < j$, is a left-eigenvector of $\bar A_{ij}$ corresponding to eigenvalue $1$. From Lemma~\ref{lem:uniquep}, $[p_i;p_j]$ is necessarily proportional to $\bar r_{ij}$ and thus $p_j = r_{ij} p_i$. Thus, we can apply this relation repeatedly along the sequence of the edges in $w$ and obtain that $p_k = R_w p_\ell$. \end{proof} With Lemmas~\ref{lem:uniquep} and \ref{lem:newuniquep} above, we prove Proposition~\ref{prop:p}: \begin{proof}[Proof of Proposition~\ref{prop:p}] We first assume that the set of $A_{ij}$ is holonomic for $G$. Let $q$ and $p$ be given as in Algorithm 1. Since $q$ and $p$ differ by a multiplicative factor, it suffices to show that $q^\top A_{ij} = q^\top$. By construction (see Eq.~\eqref{eq:deflsm}), the matrix $A_{ij}$ is equal to the identity matrix save for the $2\times 2$ principal submatrix at columns/rows $i$ and $j$, $\bar A_{ij}$. Without loss of generality, we assume that $i < j$, and let $\bar q_{ij}:= [q_i;q_j]$. It is enough to show that for any pair $i <j$, $\bar q_{ij}$ is a left-eigenvector of $\bar A_{ij}$ with eigenvalue~$1$. Since $(v_i,v_j)$ is an edge of $G$, $q_j = r_{ij} q_i$ by Algorithm~1. Thus, $\bar q_{ij}$ is proportional to $\bar r_{ij}$ introduced in Lemma~\ref{lem:uniquep} and, hence, $\bar q_{ij}^\top \bar A_{ij} = \bar q_{ij}^\top$. We now show that $p$ is the only probability vector that satisfies $p^\top A_{ij} = p^\top $ for all $(v_i,v_j)\in E$. Let $p'$ be another such probability vector. To every $A_{ij}$, the equality $p'^\top A_{ij} = p'^\top$ implies that the two entries $p'_i$ and $p'_j$ satisfy $p'_j = r_{ij} p'_i$ by Lemma~\ref{lem:uniquep}. By Lemma~\ref{lem:newuniquep}, if we fix a base node, say $v_1$, of $G$, and let $w$ be a walk from $v_1$ to $v_i$ (since $G$ is connected), then $p'_i = R_w p'_1$ for all $i = 2,\ldots,n$. From Step 2 in Algorithm~1, we see that $p'$ is proportional to $p$ and, hence, $p' = p$. It remains to show that if the set of $A_{ij}$'s is not holonomic for $G$, then no $p$ such that $p^\top A_{ij} = p$, for all $(v_i,v_j)\in E$, exists. We proceed by contradiction and assume that there exists such a vector $p$. Then, for any walk $w$ starting at $v_i$ and ending at $v_j$, we have from Lemma~\ref{lem:newuniquep} that $p_j= R_w p_i$. Because $R_w$ is always positive and because $\vec G$ is strongly connected, every entry of $p$ is nonzero (otherwise, $p$ has to be the zero vector). But, since the set of $A_{ij}$ is not holonomic, there exists a closed walk $w$ in $\vec G$ such that $R_{w}\neq 1$. Pick a node, say $v_i$, in $w$; then, $p_i = R_w p_i \neq p_i$, which is a contradiction. This completes the proof. \end{proof} Let $\mathcal{A} \in \mathcal{S}_G$. To any spanning tree $G' = (V, E')$ of $G$, the corresponding subset of local stochastic matrices $A_{ij}$, for $(v_i,v_j) \in E'$, is always holonomic for $G'$. Thus, the follow result is an immediate consequence of Proposition~\ref{prop:p}: \begin{Corollary}\label{cor:treetoprobvec} To every spanning tree $G' = (V, E')$ of $G$ we can assign a unique probability vector $p'$ such that \begin{equation}\label{eq:propp} p'^\top A_{ij} = p'^\top \mbox{ for all } (v_i, v_j)\in E'. \end{equation} \end{Corollary} Note, in particular, that if the local stochastic matrices are holonomic for $G$, then for any two spanning trees $G'$ and $G''$ of $G$, their associated probability vectors $p'$ and $p''$ are equal. Conversely, we have the following: \begin{Proposition}\label{prop:distinctpnonholo} Suppose that the set of $A_{ij}$ is not holonomic for $G$; then, there exist two distinct spanning trees $G'$ and $G''$ of $G$ with distinct probability vectors~$p'$ and~$p''$ satisfying Eq.~\eqref{eq:propp} \end{Proposition} \begin{proof} Because the set of $A_{ij}$ is not holonomic for $G$, there exists at least one cycle $C = v_1v_2\cdots v_k v_1$ of $\vec G$ such that $R_C \neq 1$. Let $G'$ and $G''$ be two spanning trees such that $G'$ contains edges $(v_\ell, v_{\ell+1})$, for all $\ell = 1,\ldots,k-1$ and $G''$ contains the edge $(v_1,v_k)$. It should be clear that such $G'$ and $G''$ exist and are distinct because $G'$ cannot contain the edge $(v_1,v_k)$. By Corollary~\ref{cor:treetoprobvec}, we can uniquely assign the probability vectors $p'$ and $p''$ to $G'$ and $G''$, respectively. We claim that $p'$ and $p''$ are distinct. To establish the claim, we let $w := v_1\cdots v_k$ be the unique path in $\vec G'$ from $v_1$ to $v_k$. Then, since $p'$ satisfies Eq.~\eqref{eq:propp}, by Lemma~\ref{lem:newuniquep}, we have that $p'_{k} = R_w p'_{1}$. Similarly, for $\vec G''$, since $v_1v_k$ is a directed edge, we have that $p''_{k} = r_{1k} p''_{1}$. But, $R_C = R_w r_{k1} = R_w /r_{1k}\neq 1$, which implies that the two ratios $p'_{k}/p'_{1}$ and $p''_{k}/p''_{1}$ are different. This completes the proof. \end{proof} \subsection{Uniform lower bound for nonzero entries of $P_\gamma$} For a given vector $z\in \mathbb{R}^n_{\geq 0}$, recall that $\operatorname{supp}(z)$ is the support of $z$. Let $\gamma$ be a walk in $G$ and $ z_\gamma:= P_\gamma z$. If $\gamma$ is an empty walk, then $P_\gamma=I$ and $z_\gamma = z$. We also recall that $\min z_{\gamma}$ the smallest non-zero entry of $z_\gamma$, i.e., $\min z_\gamma = \min \{ z_{\gamma,i} \mid i\in \operatorname{supp}(z_\gamma)\} $. Note that if $P$ is an arbitrary $n\times n$ nonnegative matrix with positive diagonal entries, then $\operatorname{supp}(z) \subseteq \operatorname{supp}(Pz)$; indeed, if $z_{i} > 0$, then $(Pz)_{i} \geq P_{ii}z_{i} > 0$. As a consequence, we have the following fact: \begin{equation} \label{eq:suppinclusion}\gamma_1 \mbox{ is a string of } \gamma_2 \;\mathbb{R}ightarrow\; \operatorname{supp}(z_{\gamma_1}) \subseteq \operatorname{supp}(z_{\gamma_2}). \end{equation} When we consider a nested family of edge strings $\gamma_1 \subseteq \gamma_2\subseteq \cdots $ for which the supports of the corresponding $z_{\gamma_i}$ are the same, the smallest non-zero entry over the support is non-decreasing as shown below: \begin{Lemma}\label{lem:gammaveeedge} Let $\gamma$ be a string of edges and $e$ be an edge of $G$. Let $\gamma':= \gamma \vee e$. If $\operatorname{supp}(z_{\gamma'}) = \operatorname{supp}(z_\gamma)$, then $\min z_{\gamma'} \geq \min z_{\gamma}$. \end{Lemma} \begin{proof} For convenience, but without loss of generality, we assume that $e = (v_1, v_2)$, so $A_{12} =\diag(\bar A_{12}, I_{n-2})$, where $\bar A_{12}$ was defined in Eq.~\eqref{eq:defbarAij}. The matrix $P_{\gamma'}$ differs from $P_{\gamma}$ in its first two rows only; we denote by $z_{\gamma,1}$ and $z_{\gamma,2}$ the first two entries of $z_\gamma$. Since $\operatorname{supp}(z_{\gamma'}) = \operatorname{supp}(z_{\gamma})$ and since all entries of $\bar A_{12}$ are positive, we have that $z_{\gamma,1}$ is $0$ if and only if $z_{\gamma,2}$ is $0$; indeed, say $z_{\gamma,1} = 0$ and $z_{\gamma,2} > 0$, then, after multiplication, the first entry of $z_{\gamma'}$ will be positive, contradicting the fact that $\operatorname{supp}(z_{\gamma'}) = \operatorname{supp}(z_{\gamma})$. Now, consider the following two cases: \noindent {\em Case 1.} If $z_{\gamma,1} = z_{\gamma,2} = 0$, then $z_\gamma = z_{\gamma'}$ and, hence, $\min z_\gamma = \min z_{\gamma'}$. \noindent {\em Case 2.} If $z_{\gamma,1} \neq 0$ (and, hence, $z_{\gamma,2}\neq 0$), then the first and second entries of $z_{\gamma'}$ are given by $$ \begin{bmatrix} z_{\gamma',1} \\ z_{\gamma',2} \end{bmatrix} = \bar A_{12} \begin{bmatrix} z_{\gamma,1} \\ z_{\gamma,2} \end{bmatrix}. $$ Because $\bar A_{12}$ is a stochastic matrix, both $z_{\gamma',1}$ and $z_{\gamma',2}$ are convex combinations of $z_{\gamma,1}$ and $z_{\gamma,2}$. Thus, $$ \min \begin{bmatrix} z_{\gamma',1} \\ z_{\gamma', 2} \end{bmatrix} \ge \min \begin{bmatrix} z_{\gamma,1} \\ z_{\gamma,2} \end{bmatrix} \geq \min z_{\gamma}. $$ In either case, we conclude that $\min z_{\gamma'} \ge \min z_{\gamma}$. \end{proof} With Lemma~\ref{lem:gammaveeedge}, we can now establish a lower bound on $\min P_\gamma$: \begin{Proposition}\label{prop:mindelta} Let $\mathcal{A}\in \mathcal{S}_G$ and $\underline a$ be defined as~\eqref{eq:underlinea}. Then, for any sequence $\gamma$ of edges, $$\min P_\gamma > \underline{a}^{n-1} = \epsilon.$$ \end{Proposition} \begin{proof} Let $\{e_i\}_{i = 1}^n$ be the standard basis of $\mathbb{R}^n$. Then, the $i$th column of $P_\gamma$, denoted by $P_{\gamma,i}$, is given by $P_\gamma e_i$. It should be clear that $\min P_\gamma = \min_{i = 1}^n P_{\gamma,i}$. Thus, it suffices to show that $\min P_{\gamma,i} \geq \underline{a}^{n-1}$ for all $i = 1,\ldots, n$. To establish the fact, we fix an arbitrary $i\in \{1,\ldots, n\}$, and let $N_{\gamma, i}$ be the cardinality of $\operatorname{supp} P_{\gamma,i}$. We will show below that \begin{equation}\label{eq:inductiveproof} \min P_{\gamma, i} \geq \underline{a}^{N_{\gamma,i} -1}. \end{equation} Note that if~\eqref{eq:inductiveproof} holds, then the proof is done because $N_{\gamma,i}$ is bounded above by~$n$ and, hence, $\min P_{\gamma, i} \geq \underline{a}^{n-1}$. The proof of~\eqref{eq:inductiveproof} is by induction on $N_{\gamma,i}$. For the base case $N_{\gamma,i} = 1$, the sequence $\gamma$ can only comprise edges $(v_j,v_k)$ that are not incident to node $v_i$. To see this, note that by the definition of the local stochastic matrices, if $\gamma$ does not contain any edge incident to node $v_i$, then $P_{\gamma,i} = e_i$ and, hence, $N_{\gamma,i} = 1$. Next, we assume that $\gamma$ contains an edge incident to $v_i$, and let $\gamma_t=(v_i,v_j)$ be the first such edge in $\gamma$. Then, by the above arguments, $P_{\gamma,i}(t-1:0) = e_i$. Moreover, using the same arguments as in the proof of Lemma~\ref{lem:gammaveeedge}, we have that both the $i$th and $j$th entry of $P_{\gamma,i}(t:0)$ are nonzero. Further, since the support of $P_{\gamma,i}$ is monotonic by~\eqref{eq:suppinclusion}, we have $N_{\gamma,i}\geq 2$. We have thus shown that if $N_{\gamma,i} = 1$, then $P_{\gamma,i} = e_i$ and $\min P_{\gamma,i} = 1$. For the inductive step, we assume that the statement holds for any $\gamma$ with $N_{\gamma,i} = k$ (for $1\leq k \leq n-1$), and prove that it holds for any $\gamma$ with $N_{\gamma,i} = k + 1$. For any given $\gamma$ with $N_{\gamma,i} = k + 1$, we let $t\ge 1$ be chosen such that the two strings $\gamma':=\gamma(t:0)$ and $\gamma'':= \gamma(t + 1:0)$ satisfy the condition that $N_{\gamma',i}= k$ and $N_{\gamma'',i} = k+1$. Such $t$ exists because $N_{\gamma(t:0),i}$ is a monotonically non-decreasing function in~$t$ due to Eq.~\eqref{eq:suppinclusion} and $\operatorname{supp} e_i = 1$. Let $A_{ij}$ be the local stochastic matrix corresponding to the last edge in $\gamma''$. It is so that $P_{\gamma''} = A_{ij} P_{\gamma'}$. By the induction hypothesis, $\min P_{\gamma',i} \geq {\underline a}^{k-1}$, we have that $$ \min P_{\gamma'',i} \geq \min A_{ij} \min P_{\gamma',i} \geq \underline{a}^{k}. $$ Finally, note that the sequence $\gamma$ is obtained from $\gamma''$ by adding edges to the end of $\gamma''$. One can thus iteratively apply Lemma~\ref{lem:gammaveeedge} to obtain that $\min P_{\gamma,i} \ge \min P_{\gamma'',i}$. This completes the proof. \end{proof} \subsection{Proofs of Theorems~\ref{th:main1} and~\ref{th:defp}} For a stochastic matrix $A\in\mathbb{R}^{n\times n}$, its coefficient of ergodicity \cite{seneta} is defined as $$\mu(A) = \frac{1}{2}\max_{i,j}\sum_{k=1}^{n}|a_{ik}-a_{jk}|.$$ We always have that $\mu(A) \le 1$. It has been shown in \cite[Lemma 3]{hajnal} that for any two stochastic matrices $P$ and $Q$, \begin{equation}\label{eq:contraction} \|PQ\|_S \le \mu(P)\|Q\|_S. \end{equation} A stochastic matrix $A$ is called a {\em scrambling matrix} if no pair of rows of $A$ are orthogonal. The following result is well known (see, e.g., Eq.~(25) in~\cite{reachingp2}): \begin{Lemma}\label{lem:scrambling} For any scrambling matrix $A$, \begin{equation*}\label{eq:scrambling} \mu(A)\le 1- \min A. \end{equation*} \end{Lemma} Let $\gamma$ be a finite spanning sequence of edges of $G$. Then, by~\eqref{eq:suppinclusion}, the graph of $P_\gamma$ is strongly connected with self-arcs (more precisely, the graph contains a bi-directional spanning tree). It then follows that $P_\gamma$ is irreducible~\cite[Theorem 6.2.24]{horn1}. We also need the following lemma: \begin{Lemma}\label{lm:neighborshared} The product of any set of $\ell\ge \lfloor \frac{n}{2} \rfloor$ irreducible $n\times n$ stochastic matrices with positive diagonal entries is a scrambling matrix. \end{Lemma} \begin{proof} We will use a graphical approach. We call a digraph {\em neighbor-shared} if any two distinct nodes share a common in-neighbor. Let $G_p$ and $G_q$ be two directed graphs with the same node set $V$. The composition of $G_p $ with $G_q$, denoted by $G_q\circ G_p$, is a digraph with node set $V$ and edge set defined as follows: $v_i v_j$ is an edge of $G_q\circ G_p$ whenever there is a node $v_k$ such that $v_iv_k$ is an edge of $G_{p}$ and $v_kv_j$ is an edge of $G_{q}$. Since composition is an associative binary operation, it extends unambiguously to any finite sequence of digraphs with the same node set. Let $M_1$ and $M_2$ be two nonnegative $n\times n$ matrices, and $G_1$, $G_2$ be their respective graphs. Then, by construction, the graph of $M_2M_1$ is $G_2\circ G_1$. From~\cite[Prop.~9]{reachingp1}, we have that the composition of any set of $\ell\ge \lfloor \frac{n}{2} \rfloor$ strongly connected graphs with self-arcs with the same node set is neighbor-shared. It has been shown in~\cite{reachingp2} that a stochastic matrix is scrambling if and only if its graph is neighbor-shared. This concludes the proof. \end{proof} With the preliminaries above, we will now prove Theorems~\ref{th:main1} and~\ref{th:defp}. \begin{proof}[Proof of Theorems~\ref{th:main1} and~\ref{th:defp}] We first assume that the $A_{ij}$ are holonomic for $G$ and prove the two theorems. Let $\gamma$ be an infinite spanning sequence. Since $\|P_{\gamma(t:0)}\|_S$ is monotonically non-increasing by~\eqref{eq:contraction}, the limit exists as $t$ goes to $\infty$. We show below that the limit is $0$. Let $0 =: t_0 < t_1 < t_2 \cdots $ be a monotonically increasing sequence such that every string $\gamma(t_{k+1}:t_{k})$, for $k \ge 0$, has $\lfloor \frac{n}{2} \rfloor$ disjoint spanning sub-strings. From Lemma~\ref{lm:neighborshared}, every product $P_{\gamma(t_{k+1}:t_{k})}$, for $k\ge 0$, is a scrambling matrix. By Proposition~\ref{prop:mindelta} and Lemma~\ref{lem:scrambling}, $\mu(P_\gamma(t_{k+1},t_k)) < (1 - \epsilon)$. It follows from the inequality~\eqref{eq:contraction} and $\|I\|_S=1$ that \begin{align*} \lim_{k\to \infty} \|P_{\gamma(t_k:0)} \|_S &\le \lim_{k\to\infty} (1 - \epsilon) \|P_{\gamma(t_{k-1}:0)} \|_S \\ &\le \lim_{k\to\infty} (1 - \epsilon)^k = 0, \end{align*} which implies that $\lim_{t\to \infty} \|P_{\gamma(t:0)}\|_S = 0$. It is known~\cite{chatterjee1977towards} that the semi-norm $\|P_{\gamma(t:0)}\|_S$ converges to $0$ if and only if $P_\gamma$ converges to a rank-one matrix. This establishes asymptotic convergence. If $\gamma$ is, furthermore, $m$-spanning, then the sequence $\{t_k\}_{k\ge 0}$ can be chosen such that $t_{k+1} - t_k \le m \lfloor \frac{n}{2}\rfloor =: T$. Let $t$ be an arbitrary time index and choose $k$ with $t_{k} \le t < t_{k+1}$. Then, \begin{align*} \|P_{\gamma(t:0)} \|_S &\le \|P_{\gamma(t_{k}:0)}\|_S \le (1 -\epsilon)^{k} = (1 - \epsilon)^{\lfloor \frac{t}{T} \rfloor} \\ &\le (1 - \epsilon)^{\frac{t}{T} - 1}, \end{align*} which establishes exponential convergence and Eq.~\eqref{eq:exponentialconvergence} in Theorem~\ref{th:main1}. To show that the vector $p$ is the one given in Algorithm 1, we first note that from Proposition~\ref{prop:p}, $p^\top A_{ij}= p^\top$ for all $(v_i, v_j)\in E$. Thus, $p^\top P_{\gamma(t:0)} = p^\top$ for all $t \ge 1$. Because $P_{\gamma(t:0)}$ converges to a rank one matrix as $t \to \infty$, it must converge to ${\bf 1} p^\top$. Finally, we assume that the $A_{ij}$ are not holonomic and show that there does not exist a probability vector $p$ such that $P_\gamma = {\bf 1} p^\top$ for any infinite spanning sequence $\gamma$. Under the assumption on $A_{ij}$, owing to Proposition~\ref{prop:distinctpnonholo}, there exist at least two distinct spanning trees $G'$ and $G''$ of $G$ for which the associated probability vectors $p'$ and $p''$ are distinct. Let $\gamma'$ and $\gamma''$ be two infinite spanning sequences for $G$, with the property that edges in $\gamma'$ (resp. $\gamma''$) belong to $G'$ (resp. $G''$). Because $G'$ and $G''$ are trees, the associated $(A_{ij})_{(v_i,v_j)\in E'}$ and $(A_{ij})_{(v_i,v_j)\in E''}$ are holonomic for $G'$ and $G''$, respectively. Thus, by the above arguments $P_{\gamma'} = {\bf 1} p'$ and $P_{\gamma''} = {\bf 1} p''$. Since $p'\neq p''$, $P_{\gamma'} \neq P_{\gamma''}$. This completes the proof. \end{proof} \subsection{Proof of Theorem~\ref{th:mainsurjective}} Recall that a local stochastic matrix $A_{ij}$ assigned to an undirected edge $(v_i,v_j)\in E$ gives rise to two ratios $r_{ij}=\frac{a_{ij}}{a_{ji}}$ and $r_{ji} = \frac{a_{ji}}{a_{ij}}$, which are inverse of each other, as defined in Section~\ref{ssec:lsmholo}. The set of all such ratios is thus the $|E|$-dimensional {\em subset} of $\mathbb{R}^{2|E|}$ defined as follows: $$ \mathcal{Y} := \left \{(y_{ij})_{v_iv_j\in\vec E} \in \mathbb{R}_+^{2|E|} \mid y_{ij} y_{ji} = 1 \quad\forall v_iv_j \in \vec E \right \}. $$ It is easy to see that $\mathcal{Y}$ is diffeomorphic to $\mathbb{R}^{|E|}_+$. Also, recall that $\mathcal{S}_G$ is the set of all $|E|$-tuples of local stochastic matrices for $G$. We now introduce the map $\phi: \mathcal{S}_G \to \mathcal{Y}$ defined as follows: \begin{equation}\label{eq:defphi} \phi:\mathcal{S}_G \to \mathcal{Y}: (A_{ij})_{(v_i,v_j)\in E} \mapsto \left(\frac{a_{ij}}{a_{ji}}\right)_{v_iv_j\in \vec E}. \end{equation} Moreover, we have the following result: \begin{Proposition}\label{prop:maphisujective} The map $\phi$ defined in~\eqref{eq:defphi} is surjective and for any $y\in \mathcal{Y}$, the pre-image $\phi^{-1}(y)$ is an $|E|$-dimensional open box embedded in $\mathbb{R}^{2|E|}$. \end{Proposition} \begin{proof} The map $\phi$ can be realized as a {\em Cartesian product} of maps $\phi_{ij}: (0,1)\times(0,1) \to \mathbb{R}^2_+$, for $(v_i,v_j)\in E$ with $i < j$, where each $\phi_{ij}$ is defined by sending the matrix $A_{ij}$ to a pair of reciprocal ratios $(a_{ij}/a_{ji}, a_{ji}/a_{ij})$, i.e., we have that \begin{align*} \phi\left ((A_{ij})_{(v_i,v_j)\in E} \right ) & = \prod_{(v_i,v_j)\in E} \phi_{ij}(A_{ij}) \\ &=\Big((a_{ij}/a_{ji}, a_{ji}/a_{ij})\Big)_{(v_i,v_j) \in E}. \end{align*} Thus, taking inverses, we obtain that $$\phi^{-1}\left ((a_{ij}/a_{ji})_{v_iv_j\in \vec E} \right ) = \prod_{(v_i,v_j)\in E} \phi_{ij}^{-1}(a_{ij}/a_{ji}, a_{ji}/a_{ij}).$$ Now, let $(r_{ij})_{v_iv_j \in \vec E}$, with $r_{ij} > 0$ and $r_{ij}r_{ji} = 1$, be an arbitrary point in the codomain of $\phi$. We claim that $\phi_{ij}^{-1}(r_{ij},r^{-1}_{ij})$ is nonempty and, moreover, it is an open bounded segment in $\mathbb{R}^2$. If the claim holds, then the proof is complete: Indeed, if $\phi_{ij}^{-1}(r_{ij},r^{-1}_{ij})$ is nonempty, then $\phi_{ij}$ is surjective. Owing to the Cartesian product structure exhibited above, $\phi$ is also surjective. By the same arguments, if $\phi_{ij}^{-1}(r_{ij},r^{-1}_{ij})$ is an open bounded segment, then $\phi^{-1}((r_{ij})_{v_iv_j\in \vec E})$ is an open box. We will now establish the claim stated above. For ease of presentation, we will represent the matrix $A_{ij}$ by the pair of entries $(a_{ij}, a_{ji})$ (recall that all the other entries of $A_{ij}$ are uniquely determined by this pair). This representation can be viewed as a bijective linear map. With this representation, it follows from computation that $$ \phi^{-1}_{ij}(r_{ij},r^{-1}_{ij}) = \begin{cases} \{ (r_{ij} x, x ) \mid 0 < x < 1\} & \mbox{if $r_{ij}\le 1$}, \\ \{ (x, r^{-1}_{ij} x ) \mid 0 < x < 1\} & \mbox{if $r_{ij} > 1$}. \end{cases} $$ Thus, the preimage is an open segment parameterized by $x\in (0,1)$ as is claimed. \end{proof} The map $\phi$ relates the local stochastic matrices to the ratios $r_{ij}$, for $v_iv_j \in \vec E$. We next construct a map that relates these ratios to the probability vector $p$. To this end, let $\theta: \operatorname{int} \Delta^{n-1} \to \mathbb{R}^{2|E|}_+$ defined as follows: \begin{equation}\label{eq:deftheta} \theta: p = [p_1 \;\cdots\; p_n]^\top \mapsto (p_j/p_i)_{v_iv_j \in \vec E}~. \end{equation} We will show that the map $\theta$ is one-to-one, and thus admits a well-defined inverse. To this end, we describe the image of $\theta$ explicitly, as an algebraic subset of $\mathbb{R}^{2|E|}$. For a given positive vector $y=(y_{ij})_{v_iv_j\in \vec E} \in \mathcal{Y}$ and for a given walk $w = v_1\cdots v_k$ in $\vec G$, we let $Y_w := \prod^{k-1}_{\ell = 1} y_{\ell,\ell + 1}$. Define a subset of $\mathcal{Y}$ as follows: \begin{equation}\label{eq:defY} \mathcal{Y}_{\mathcal{H}} := \left \{y\in \mathcal{Y} \mid Y_w = 1 \mbox{ for every closed walk $w$ of $\vec G$} \right \}. \end{equation} Note that if $\mathcal{A}\in \mathcal{S}_G$ is holonomic for $G$, then the corresponding vector of ratios $r = (r_{ij})_{v_iv_j\in \vec E}$ belongs to the set $\mathcal{Y}_{\mathcal{H}}$ by construction. We have the following result: \begin{Proposition}\label{prop:onetooneontoY} The map $\theta$ is one-to-one and onto $\mathcal{Y}_{\mathcal{H}}$. \end{Proposition} \begin{proof} First, if $y = \theta(p)$ for some $p\in \operatorname{int} \Delta^{n-1}$, it follows from~\eqref{eq:deftheta} that $Y_w = 1$ for any closed walk $w = v_1\cdots v_k v_1$, so the image of $\theta$ is contained in $\mathcal{Y}_{\mathcal{H}}$. Next, we show that the map $\theta$ is one-to-one. Let $p$ and $p'$ be two distinct vectors in $\operatorname{int} \Delta^{n-1}$. Then, there exists at least a pair of distinct indices $(i,j)$ such that $p_j/p_i \neq p'_j / p'_i$. Indeed, if no such pair exists, then $p'$ is proportional to $p$ which, since both $p$ and $p'$ belong to $\Delta^{n-1}$, contradicts the fact that they are distinct. This shows that $\theta$ is one-to-one. Finally, we show that for any $y\in \mathcal{Y}_{\mathcal{H}}$, there exists a $p\in \operatorname{int} \Delta^{n-1}$ such that $\theta(p) = y$. One can obtain such a vector $p$ by using Algorithm 1, but with $r_{ij}$ and $R_w$ replaced by $y_{ij}$ and $Y_w$, respectively. The choice of the base node and the choices of walks from the base node to the other nodes do not matter since $Y_w = 1$ for all closed walks $w$---the same arguments used in Propositions~\ref{prop:1forclosedwalk} and~\ref{prop:pindepbase}, and Corollary~\ref{cor:corpropclosedwalk} can be applied to establish the fact. Then, by construction, the vector $p$ indeed satisfies $\theta(p) = y$. To see this, we let $v_iv_j$ be an arbitrary edge in $\vec G$ and show that $p_j/p_i = y_{ij}$. Let $v_i$ be a base node chosen in Step 1 of Algorithm 1. Since $v_iv_j$ is an edge, by Step 2 of Algorithm 1, we have that $p_j = y_{ij} p_i$, i.e., $p_j/p_i = y_{ij}$. \end{proof} With the propositions above, we prove Theorem~\ref{th:mainsurjective}: \begin{proof}[Proof of Theorem~\ref{th:mainsurjective}] By Proposition~\ref{prop:onetooneontoY}, the map $\theta$ is a bijection. Moreover, by Definition~\ref{def:holonomicAij} of holonomic local stochastic matrices, $\mathcal{H}_G = \phi^{-1}(\mathcal{Y}_{\mathcal{H}})$. We can thus write the map $\pi: \mathcal{H}_G \to \operatorname{int} \Delta^{n-1}$ as $\pi(\cdot) = \theta^{-1}(\phi(\cdot))$ by restricting the domain of $\phi$ to the subset $\mathcal{H}_G$. Thus, for a given $p\in \operatorname{int} \Delta^{n-1}$, since $\pi^{-1}(p) = \phi^{-1}(\theta(p))$ and since $\theta(p)\in \mathcal{Y}_\mathcal{H} \subset \mathcal{Y}$, we conclude from Proposition~\ref{prop:maphisujective} that $\pi^{-1}(p)$ is an $|E|$-dimensional open box. \end{proof} \section{Conclusions}\label{end} In this paper, we have investigated convergence of {\em weighted} gossip processes and characterized their limits. Mathematically, a weighted gossip process can be expressed as an infinite product of local stochastic matrices, which are not required to be doubly stochastic. Using the notion of holonomy, we have provided a necessary and sufficient condition for the product to converge to a unique rank-one matrix, independent of the order of the appearance of the stochastic matrices in the product. We characterized explicitly both the limit and the sets of holonomic stochastic matrices that can give rise to a desired limit. Amongst the future directions in which the present work can be extended, we mention generalization of the results to local stochastic matrices with zeros in the $2\times 2$ principal submatrices. This case, though seemingly close to the one studied here, in fact exhibits a very different asymptotic behavior. We will also aim to generalize the results to vector-valued gossip processes, and to establish a unified framework that accommodate the results of the paper and the results of the previous work~\cite{BC2020triangulated}. \end{document}
\begin{document} \title{Embedding coproducts of partition lattices} \author[F.~Wehrung]{Friedrich Wehrung} \address{LMNO, CNRS UMR 6139\\ D\'epartement de Math\'ematiques, BP 5186\\ Universit\'e de Caen, Campus 2\\ 14032 Caen cedex\\ France} \email{[email protected]} \urladdr{http://www.math.unicaen.fr/\~{}wehrung} \keywords{Lattice; equivalence relation; embedding; coproduct; ideal; filter; upper continuous} \subjclass[2000]{Primary 06B15; Secondary 06B10, 06B25} \dedicatory{Dedicated to B\'ela Cs\'ak\'any for his 75th birthday} \date{\today} \begin{abstract} We prove that the lattice $\Eq\Omega$ of all equivalence relations on an infinite set~$\Omega$ contains, as a $0,1$-sublattice, the $0$-coproduct of two copies of itself, thus answering a question by G.\,M. Bergman. Hence, by using methods initiated by de Bruijn and further developed by Bergman, we obtain that $\Eq\Omega$ also contains, as a sublattice, the coproduct of $2^{\card\Omega}$ copies of itself. \end{abstract} \maketitle \section{Introduction}\label{S:Intro} Whitman's Theorem \cite{Whit} states that every lattice~$L$ can be embedded into the lattice $\Eq\Omega$ of all equivalence relations on some set~$\Omega$. The cardinality of~$\Omega$ may be taken equal to $\card L+\aleph_0$. There is not much room for improvement of the cardinality bound, as for example, $\Eq\Omega$ cannot be embedded into its dual lattice. (We believe the first printed occurrence of this result to be Proposition~6.2 in G.\,M. Bergman's recent preprint~\cite{Berg}, although it may have already been known for some time.) Hence the question of embeddability into~$\Eq\Omega$ of lattices of large cardinality (typically, $\card(\Eq\Omega)=2^{\card\Omega}$) is nontrivial. In \cite{Berg}, Bergman also extends results of N.\,G. de Bruijn \cite{Brui1, Brui2} by proving various embedding results of large powers or copowers of structures such as symmetric groups, endomorphism rings, and monoids of self-maps of an infinite set~$\Omega$, into those same structures. The nature of the underlying general argument is categorical. The problem whether the lattice~$\Eq\Omega$ contains a coproduct (sometimes called ``free product'' by universal algebraists) of two, or more, copies of itself, was stated as an open question in a preprint version of that paper. In the present note, we solve this problem in the affirmative. The idea of our proof is the following. The lattice~$\Eq\Omega$ of all equivalence relations on~$\Omega$ is naturally isomorphic to the ideal lattice~$\Id K$ of the lattice~$K$ of all \emph{finitely generated} equivalence relations, that is, those equivalence relations containing only finitely many non-diagonal pairs. Denote by~$K\amalg^0K$ the coproduct (amalgamation) of two copies of~$K$ above the common ideal~$0$. As $K\amalg^0K$ has the same cardinality as~$\Omega$, it follows from J\'onsson's proof of Whitman's Embedding Theorem that the lattice $\Id(K\amalg^0K)$ embeds into~$\Eq\Omega$. Finally, we prove that the ideal lattice functor preserves the coproduct~$\amalg^0$ and one-one-ness (Theorem~\ref{T:epsembedding}), in such a way that $(\Id K)\amalg^0(\Id K)$ embeds into~$\Id(K\amalg^0K)$. Then it is easy to extend this result to the usual coproduct $(\Id K)\amalg(\Id K)$. We also present an example (Example~\ref{Ex:BamalgAC}) that shows that the result of Theorem~\ref{T:epsembedding} does not extend to amalgamation above a common (infinite) ideal. That is, for a common ideal~$A$ of lattices~$B$ and~$C$, the canonical homomorphism from $(\Id B)\amalg_{\Id A}(\Id C)$ to $\Id(B\amalg_AC)$ may not be one-to-one. \section{Basic concepts}\label{S:Basic} We refer to \cite{GLT2} for unexplained lattice-theoretical notions. For any subsets~$Q$ and~$X$ in a poset (i.e., partially ordered set)~$P$, we put \[ Q\mathbin{\downarrow} X=\setm{p\in Q}{(\exists x\in X)(p\leq x)}\quad\text{and}\quad Q\mathbin{\uparrow} X=\setm{p\in Q}{(\exists x\in X)(p\geq x)}. \] We also write $Q\mathbin{\downarrow} x$, resp. $Q\mathbin{\uparrow} x$ in case $X=\set{x}$. A subset~$Q$ of~$P$ is a \emph{lower subset} of~$P$ if $Q=P\mathbin{\downarrow} Q$. A map~$f\colon K\to L$ between lattices is \emph{meet-complete} if for each~$a\in K$ and each~$X\subseteq K$, $a=\bigwedge X$ in~$K$ implies that $f(a)=\bigwedge f[X]$ in~$L$. (Observe that we do not require either~$K$ or~$L$ to be a complete lattice.) When this is required only for nonempty~$X$, we say that~$f$ is \emph{nonempty-meet-complete}. \emph{Join-completeness} and \emph{nonempty-join-completeness} of maps are defined dually. We say that~$f$ is \emph{complete} (resp., \emph{nonempty-complete}) if it is both meet-complete and join-complete (resp., both nonempty-meet-complete and nonempty-join-complete). We say that~$f$ is \emph{lower bounded} if $\setm{x\in K}{y\leq f(x)}$ is either empty or has a least element for each~$y\in L$. \emph{Upper bounded} homomorphisms are defined dually. Lower bounded homomorphisms are nonempty-meet-complete and upper bounded homomorphisms are nonempty-join-complete. An \emph{ideal} of a lattice~$L$ is a nonempty lower subset of~$L$ closed under finite joins. We denote by~$\Id L$ the lattice of all ideals of~$L$. For a lattice homomorphism $f\colon K\to\nobreak L$, the map $\Id f\colon\Id K\to\Id L$ defined by \[ (\Id f)(X)=L\mathbin{\downarrow} f[X]\,,\quad\text{for each }X\in\Id L\,, \] is a nonempty-join-complete lattice embedding. If $L$ is a \emph{$0$-lattice} (i.e., a lattice with least element), the canonical map $L\to\Id L$, $x\mapsto L\mathbin{\downarrow} x$ is a $0$-lattice embedding. The assignment that to every lattice associates its dual lattice~$L^{\mathrm{op}}$ (i.e., the lattice with the same underlying set as~$L$ but reverse ordering) is a category equivalence---and even a category isomorphism---from the category of all lattices to itself, that sends $0$-lattices to $1$-lattices. For every lattice~$L$, we denote by $\zero{L}$ the lattice obtained by adding a new zero element to~$L$. A lattice $L$ is \emph{upper continuous} if for each $a\in L$ and each upward directed subset $\setm{x_i}{i\in I}$ of~$L$ admitting a join, the equality $a\wedge\bigvee_{i\in I}x_i=\bigvee_{i\in I}(a\wedge x_i)$ holds. We shall often use upper continuity in the following form: if~$I$ is an upward directed poset and both~$\mathfrak{a}mm{x_i}{i\in I}$ and $\mathfrak{a}mm{y_i}{i\in I}$ are isotone families with respective joins~$x$ and~$y$, then the family $\mathfrak{a}mm{x_i\wedge y_i}{i\in I}$ has join~$x\wedge y$. Every algebraic lattice is upper continuous, so, for example, $\Id L\cup\set{\varnothing}$ is upper continuous for any lattice~$L$; hence~$\Id L$ is also upper continuous. The lattice~$\Eq\Omega$ of all equivalence relations on a set~$\Omega$, partially ordered by inclusion, is an algebraic lattice, thus it is upper continuous. Other examples of upper continuous lattices that are not necessarily complete are given in~\cite{AGS}. For example, it follows from \cite[Corollary~2.2]{AGS} that \emph{every finitely presented lattice is upper continuous}. We denote by $\mathfrak{P}(\Omega)$ the powerset of a set~$\Omega$, and by~$\omega$ the set of all natural numbers. \section{The free lattice on a partial lattice}\label{S:FPLat} We recall Dean's description of the free lattice on a partial lattice, see \cite{Dean} or \cite[Section~XI.9]{FJN}. A \emph{partial lattice} is a poset~$(P,\leq)$ endowed with partial functions~$\bigvee$ and~$\bigwedge$ from the nonempty finite subsets of~$P$ to~$P$ such that if $p=\bigvee X$ (resp., $p=\bigwedge X$), then~$p$ is the greatest lower bound (resp., least upper bound) of~$X$ in~$P$. An \emph{o-ideal} of~$P$ is a lower subset~$A$ of~$P$ such that $p=\bigvee X$ and $X\subseteq A$ implies that $p\in A$ for each $p\in P$ and each nonempty finite subset~$X$ of~$P$. The set~$\operatorname{\ol{\Id}} P$ of all o-ideals of~$P$, partially ordered by inclusion, is an algebraic lattice. Observe that $\operatorname{\ol{\Id}} P=(\Id P)\cup\set{\varnothing}$ in case~$P$ is a lattice. O-filters are defined dually; again, the lattice~$\operatorname{\ol{\Fil}} P$ of all o-filters of~$P$, partially ordered by inclusion, is algebraic. We denote by~$\mathcal{I}(A)$ (resp., $\mathcal{F}(A)$) the least o-ideal (resp., o-filter) of~$P$ containing a subset~$A$ of~$P$. The \emph{free lattice}~$\mathrm{F}_{\mathbf{L}}(P)$ on~$P$ is generated, as a lattice, by an isomorphic copy of~$P$, that we shall identify with~$P$. (The subscript~$\mathbf{L}$ in~$\mathrm{F}_{\mathbf{L}}(P)$ stands for the variety of all lattices, as the ``free lattice on~$P$'' construction can be carried out in any variety of lattices.) For each~$x\in\mathrm{F}_{\mathbf{L}}(P)$, the following subsets of~$P$, \[ \mathcal{I}(x)=P\mathbin{\downarrow} x=\setm{p\in P}{p\leq x}\quad\text{and} \quad\mathcal{F}(x)=P\mathbin{\uparrow} x=\setm{p\in P}{x\leq p} \] are, respectively, an o-ideal and an o-filter of~$P$, which can also be evaluated by the following rules: \begin{gather} \mathcal{I}(x\vee y)=\mathcal{I}(x)\vee\mathcal{I}(y)\text{ in }\operatorname{\ol{\Id}} P\,,\quad \mathcal{F}(x\vee y)=\mathcal{F}(x)\cap\mathcal{F}(y);\label{Eq:cIcFxveey}\\ \mathcal{I}(x\wedge y)=\mathcal{I}(x)\cap\mathcal{I}(y),\quad\mathcal{F}(x\wedge y)= \mathcal{F}(x)\vee\mathcal{F}(y)\text{ in }\operatorname{\ol{\Fil}} P\,,\label{Eq:cIcFxwedgey} \end{gather} for all~$x,y\in\mathrm{F}_{\mathbf{L}}(P)$. The natural partial ordering on~$\mathrm{F}_{\mathbf{L}}(P)$ satisfies the following ``Whitman-type'' condition: \begin{multline}\label{Eq:Whit} x_0\wedge x_1\leq y_0\vee y_1\Longleftrightarrow\text{either } (\exists p\in P)(x_0\wedge x_1\leq p\leq y_0\vee y_1)\\ \text{ or there is }i<2\text{ such that either }x_i\leq y_0\vee y_1\text{ or }x_0\wedge x_1\leq y_i\,, \end{multline} which is also the basis of the inductive definition of that ordering. \section{The $0$-coproduct of a family of lattices with zero}\label{S:0Coprod} Our development of the~$0$-coproduct of a family of lattices with zero below bears some similarities with the development of coproducts (called there \emph{free products}) given in \cite[Chapter~VI]{GLT2}. Nevertheless, as we use the known results about the free lattice on a partial lattice (outlined in Section~\ref{S:FPLat}), our presentation becomes significantly shorter. Let $\mathfrak{a}mm{L_i}{i\in I}$ be a family of lattices with zero. Modulo the harmless set-theoretical assumption that $L_i\cap L_j=\set{0}$ for all distinct indices~$i,j\in I$, the \emph{coproduct} (often called \emph{free product} by universal algebraists) of~$\mathfrak{a}mm{L_i}{i\in I}$ can be easily described as $\mathrm{F}_{\mathbf{L}}(P)$, where~$P$ is the partial lattice whose underlying set is the union~$\bigcup_{i\in I}L_i$, whose underlying partial ordering is the one generated by the partial orders on all the $L_i$s, and whose partial lattice structure consists of all existing joins and meets of nonempty finite subsets in each ``component''~$L_i$. We denote this lattice by $L=\coprod^0_{i\in I}L_i$, the superscript~$0$ meaning that the coproduct of the~$L_i$s is evaluated in the category of all $0$-lattices and $0$-preserving homomorphisms, which we shall often emphasize by saying ``$0$-coproduct'' instead of just coproduct. We shall also identify each~$L_i$ with its canonical copy in~$L$. Of course, the coproduct of any family of lattices $\mathfrak{a}mm{L_i}{i\in I}$ in the variety of all lattices is the sublattice of $\coprod^0_{i\in I}\zero{(L_i)}$ generated by the union of the images of the~$L_i$s. Now we shall analyze further the structure of the $0$-coproduct~$L$, in a fashion similar to the development in \cite[Chapter~VI]{GLT2}. We add a new largest element, denoted by~$\infty$, to~$L$, and we set $\ol{L}_i=L_i\cup\set{\infty}$ for each~$i\in I$. The following lemma is an analogue, for $0$-coproducts instead of coproducts, of \cite[Theorem~VI.1.10]{GLT2}. \begin{lemma}\label{L:xii} For each~$x\in L$ and each~$i\in I$, there are a largest element of~$L_i$ below~$x$ and a least element of~$\ol{L}_i$ above~$x$ with respect to the ordering of~$L\cup\set{\infty}$. Furthermore, if we denote these elements by~$x_{(i)}$ and~$x^{(i)}$, respectively, then the following formulas hold: \begin{equation}\label{Eq:Defxii} \begin{aligned} &p_{(i)}=p^{(i)}=p,\text{ if }p\in L_i;\\ &p_{(i)}=0\text{ and }p^{(i)}=\infty,\text{ if }p\in P\setminus L_i;\\ &(x\vee y)_{(i)}=x_{(i)}\vee y_{(i)} \text{ and }(x\wedge y)_{(i)}=x_{(i)}\wedge y_{(i)};\\ &(x\vee y)^{(i)}=x^{(i)}\vee y^{(i)};\\ &(x\wedge y)^{(i)}=\begin{cases} 0\,,&\text{if }x^{(j)}\wedge y^{(j)}=0\text{ for some }j\in I,\\ x^{(i)}\wedge y^{(i)}\,,&\text{otherwise}, \end{cases} \end{aligned} \end{equation} for each $x,y\in L$ and each $i\in I$. \end{lemma} \begin{proof} For an element~$x$ of~$L$, abbreviate by ``$x_{(i)}$ exists'' (resp., ``$x^{(i)}$ exists'') the statement that $L_i\mathbin{\downarrow} x$ is a principal ideal in~$L_i$ (resp., $\ol{L}_i\mathbin{\uparrow} x$ is a principal filter in~$\ol{L}_i$), and then denote by~$x_{(i)}$ (resp., $x^{(i)}$) the largest element of $L_i\mathbin{\downarrow} x$ (resp., the least element of~$\ol{L}_i\mathbin{\uparrow} x$). Denote by~$K$ the set of all $x\in L$ such that both $x_{(i)}$ and $x^{(i)}$ exist for each~$i\in I$. It is clear that~$K$ contains~$P$ and that both~$p_{(i)}$ and~$p^{(i)}$ are given by the first two formulas of~\eqref{Eq:Defxii}, for any $p\in P$. Furthermore, it follows immediately from the definition of~$K$ that \begin{align} \mathcal{I}(z)&=\bigcup_{i\in I}(L_i\mathbin{\downarrow} z_{(i)}),\label{Eq:cI(x)fla}\\ \mathcal{F}(z)&=\bigcup_{i\in I}(L_i\mathbin{\uparrow} z^{(i)}),\label{Eq:cF(x)fla} \end{align} for each $z\in K$. We shall establish that~$K$ is a sublattice of~$L$. So let $x,y\in K$, put $u=x\wedge y$ and $v=x\vee y$. It is straightforward that for each~$i\in I$, both~$u_{(i)}$ and $v^{(i)}$ exist, and \begin{equation}\label{Eq:u_iv^i} u_{(i)}=x_{(i)}\wedge y_{(i)},\quad v^{(i)}=x^{(i)}\vee y^{(i)}. \end{equation} Now we shall prove that $v_{(i)}$ exists and is equal to $x_{(i)}\vee y_{(i)}$. By the induction hypothesis, \eqref{Eq:cI(x)fla} holds at both~$x$ and~$y$. So, as $\mathcal{I}(v)=\mathcal{I}(x)\vee\mathcal{I}(y)$, in order to get the asserted existence and description of the elements~$v_{(i)}$, it suffices to prove that \begin{equation}\label{Eq:Lixiyivi} \bigcup_{i\in I}(L_i\mathbin{\downarrow} x_{(i)})\vee\bigcup_{i\in I}(L_i\mathbin{\downarrow} y_{(i)})= \bigcup_{i\in I}\bigl(L_i\mathbin{\downarrow}(x_{(i)}\vee y_{(i)})\bigr). \end{equation} The containment from left to right is obvious, and each $x_{(i)}\vee y_{(i)}$ is contained in any o-ideal of~$P$ containing $\set{x_{(i)},y_{(i)}}$, so it suffices to prove that the right hand side of~\eqref{Eq:Lixiyivi} is an o-ideal of~$P$. As the join operation in~$P$ is internal to each~$L_i$, this set is closed under joins. As each~$L_i$ is a lower subset of~$P$, this set is also a lower subset of~$P$. This establishes the desired result for the~$v_{(i)}$s. It remains to prove that $u^{(i)}$ exists and is equal to $z_i$, where $z_i=x^{(i)}\wedge y^{(i)}$ if $x^{(j)}\wedge y^{(j)}\neq0$ for all~$j$, and $z_i=0$ otherwise. By the induction hypothesis, \eqref{Eq:cF(x)fla} holds at both~$x$ and~$y$. So, as $\mathcal{F}(u)=\mathcal{F}(x)\vee\mathcal{F}(y)$, in order to get the asserted existence and description of the elements~$u^{(i)}$, it suffices to prove that \begin{equation}\label{Eq:Lixiyiui} \bigcup_{i\in I}(L_i\mathbin{\uparrow} x^{(i)})\vee\bigcup_{i\in I}(L_i\mathbin{\uparrow} y^{(i)})= \bigcup_{i\in I}(L_i\mathbin{\uparrow} z_i). \end{equation} The containment from left to right is obvious. If an o-filter~$U$ of~$P$ contains $\set{x^{(i)},y^{(i)}}$ for all~$i\in I$, then it also contains all elements $x^{(i)}\wedge y^{(i)}$; in particular, it is equal to~$P$ in case $x^{(i)}\wedge y^{(i)}=0$ for some~$i$. In any case, $z_i\in U$ for all~$i\in I$. So it suffices to prove that the right hand side of~\eqref{Eq:Lixiyiui} is an o-filter of~$P$. This is trivial in case~$z_i=0$ for some~$i$, so suppose that $z_i\neq0$ for all~$i$. As the meet operation in~$P$ is internal to each~$L_i$, the right hand side of~\eqref{Eq:Lixiyiui} is closed under meets. As each~$L_i\setminus\set{0}$ is an upper subset of~$P$, this set is also an upper subset of~$P$. This establishes the desired result for the~$u^{(i)}$s. \end{proof} \begin{lemma}\label{L:coprdKiLi} Let~$K_i$ be a $0$-sublattice of a lattice~$L_i$, for each~$i\in I$. Then the canonical $0$-lattice homomorphism~$f\colon\coprod^0_{i\in I}K_i\to\coprod^0_{i\in I}L_i$ is an embedding. \end{lemma} \begin{proof} By the amalgamation property for lattices \cite[Section~V.4]{GLT2}, the $i$-th coprojection from~$K_i$ to~$K$ is an embedding, for each~$i\in I$. Put $L'_i=K\amalg_{K_i}L_i$ for each~$i\in I$. Comparing the universal properties, it is immediate that the $0$-coproduct~$L$ of $\mathfrak{a}mm{L_i}{i\in I}$ is also the coproduct of $\mathfrak{a}mm{L'_i}{i\in I}$ over~$K$. Again by using the amalgamation property for lattices, all canonical maps from the~$L'_i$s to~$L$ are embeddings. So, in particular, the canonical map from their common sublattice~$K$ to~$L$ is an embedding. \end{proof} We shall call the adjoint maps $\alpha_i\colon x\mapsto x_{(i)}$ and $\beta_i\colon x\mapsto x^{(i)}$ the canonical lower, resp. upper adjoint of~$L$ onto~$L_i$, resp.~$\ol{L}_i$. Observe that these maps may not be defined in the case of amalgamation of two lattices over a common sublattice, as Example~\ref{Ex:BamalgAC} will show. (In that example, there is no largest element of~$B$ below $b_0\vee c_0$.) The following result is an immediate consequence of well-known general properties of adjoint maps. \begin{corollary}\label{C:Basicxii2} The canonical embedding from $L_i$ into~$L$ is both lower bounded and upper bounded, for each~$i\in I$. In particular, it is a nonempty-complete lattice homomorphism. Furthermore, the lower adjoint $\alpha_i$ is meet-complete while the upper adjoint~$\beta_i$ is nonempty-join-complete. \end{corollary} In the following lemma, we shall represent the elements of $L=\coprod^0_{i\in I}L_i$ in the form~$\mathbf{p}(\vec a)$, where~$\mathbf{p}$ is a lattice term with variables from~$I\times\omega$ and the ``vector'' $\vec a=\mathfrak{a}mm{a_{i,n}}{(i,n)\in I\times\omega}$ is an element of the cartesian product~$\Pi=\prod_{(i,n)\in I\times\omega}L_i$. Define a \emph{support} of~$\mathbf{p}$ as a subset~$J$ of~$I$ such that~$\mathbf{p}$ involves only variables from~$J\times\omega$. Obviously, $\mathbf{p}$ has a finite support. It is straightforward from~\eqref{Eq:Defxii} that $\mathbf{p}(\vec a)_{(i)}=0$ and either~$\mathbf{p}(\vec a)=0$ or $\mathbf{p}(\vec a)^{(i)}=\infty$, for each~$i$ outside a support of~$\mathbf{p}$. \begin{lemma}\label{L:PartUC} Let $\Lambda$ be an upward directed poset, let $\mathfrak{a}mm{{\vec a}^\lambda}{\lambda\in\Lambda}$ be an isotone family of elements of~$\Pi$ with supremum~$\vec a$ in~$\Pi$, and let~$\mathbf{p}$ be a lattice term. If all the lattices~$L_i$ are upper continuous, then $\mathbf{p}(\vec a)=\bigvee_{\lambda\in\Lambda}\mathbf{p}({\vec a}^\lambda)$ in~$L$. \end{lemma} Again, Example~\ref{Ex:BamalgAC} will show that Lemma~\ref{L:PartUC} fails to extend to the amalgam of two lattices over a common ideal. \begin{proof} As~$\mathbf{p}(\vec a)$ is clearly an upper bound for all elements~$\mathbf{p}({\vec a}^\lambda)$, it suffices to prove that for each lattice term~$\mathbf{q}$ on~$I\times\omega$ and each~$\vec b\in\Pi$ such that $\mathbf{p}({\vec a}^\lambda)\leq\mathbf{q}(\vec b)$ for all~$\lambda\in\Lambda$, the inequality~$\mathbf{p}(\vec a)\leq\mathbf{q}(\vec b)$ holds. We argue by induction on the sums of the lengths of~$\mathbf{p}$ and~$\mathbf{q}$. The case where~$\mathbf{p}$ is a projection follows immediately from the second sentence of Corollary~\ref{C:Basicxii2}. The case where either~$\mathbf{p}$ is a join or~$\mathbf{q}$ is a meet is straightforward. Now suppose that~$\mathbf{p}=\mathbf{p}_0\wedge\mathbf{p}_1$ and~$\mathbf{q}=\mathbf{q}_0\vee\mathbf{q}_1$. We shall make repeated uses of the following easily established principle, which uses only the assumption that~$\Lambda$ is upward directed: \begin{quote}\em For every positive integer~$n$ and every $X_0,\dots,X_{n-1}\subseteq\Lambda$, if $\bigcup_{i<n}X_i$ is cofinal in~$\Lambda$, then one of the~$X_i$s is cofinal in~$\Lambda$. \end{quote} Now we use~\eqref{Eq:Whit}. If there exists a cofinal subset~$\Lambda'$ of~$\Lambda$ such that \[ (\forall\lambda\in\Lambda')(\exists i<2) \bigr(\text{either }\mathbf{p}_i({\vec a}^\lambda)\leq\mathbf{q}(\vec b)\text{ or } \mathbf{p}({\vec a}^\lambda)\leq\mathbf{q}_i(\vec b)\bigr), \] then there are~$i<2$ and a smaller cofinal subset~$\Lambda''$ of~$\Lambda'$ such that \begin{align*} \text{either}\quad&(\forall\lambda\in\Lambda'')\bigl(\mathbf{p}_i({\vec a}^\lambda)\leq\mathbf{q}(\vec b)\bigr)\\ \text{or}\quad&(\forall\lambda\in\Lambda'') \bigl(\mathbf{p}({\vec a}^\lambda)\leq\mathbf{q}_i(\vec b)\bigr). \end{align*} In the first case, it follows from the induction hypothesis that~$\mathbf{p}_i(\vec a)\leq\mathbf{q}(\vec b)$. In the second case, it follows from the induction hypothesis that~$\mathbf{p}(\vec a)\leq\mathbf{q}_i(\vec b)$. In both cases, $\mathbf{p}(\vec a)\leq\mathbf{q}(\vec b)$. It remains to consider the case where there exists a cofinal subset~$\Lambda'$ of~$\Lambda$ such that \[ (\forall\lambda\in\Lambda')(\exists c_\lambda\in P) \bigl(\mathbf{p}({\vec a}^\lambda)\leq c_\lambda\leq\mathbf{q}(\vec b)\bigr). \] It follows from the induction hypothesis that \begin{equation}\label{Eq:p01joinOK} \mathbf{p}_\ell(\vec a)=\bigvee_{\lambda\in\Lambda'}\mathbf{p}_\ell({\vec a}^\lambda)\,, \quad\text{for all }\ell<2\,. \end{equation} Fix a common finite support~$J$ of~$\mathbf{p}_0$, $\mathbf{p}_1$, $\mathbf{q}_0$, $\mathbf{q}_1$. Each~$c_\lambda$ belongs to~$L_i$, for some~$i$ in the given support~$J$. By using the finiteness of~$J$ and by extracting a further cofinal subset of~$\Lambda'$, we may assume that all those~$i$ are equal to the same index $j\in J$. Hence we have reduced the problem to the case where \begin{equation}\label{Eq:Interpolxpq} (\forall\lambda\in\Lambda') \bigl(\mathbf{p}({\vec a}^\lambda)\leq c_\lambda\leq\mathbf{q}(\vec b)\bigr)\,, \quad\text{where }c_\lambda=\mathbf{p}({\vec a}^\lambda)^{(j)}\in L_j. \end{equation} If $\mathbf{p}_0(\vec a)^{(i)}\wedge\mathbf{p}_1(\vec a)^{(i)}=0$ for some~$i\in I$, then~$\mathbf{p}(\vec a)=0\leq\mathbf{q}(\vec b)$ and we are done. Now suppose that $\mathbf{p}_0(\vec a)^{(i)}\wedge\mathbf{p}_1(\vec a)^{(i)}\neq0$ for all~$i\in I$. By using~\eqref{Eq:p01joinOK}, the finiteness of~$J$, and the upper continuity of~$L_i$, we obtain that there exists a cofinal subset~$\Lambda''$ of~$\Lambda'$ such that \[ (\forall\lambda\in\Lambda'')(\forall i\in J) \bigl(\mathbf{p}_0({\vec a}^\lambda)^{(i)}\wedge\mathbf{p}_1({\vec a}^\lambda)^{(i)} \neq0\bigr). \] In particular, both $\mathbf{p}_0({\vec a}^\lambda)$ and $\mathbf{p}_1({\vec a}^\lambda)$ are nonzero for each $\lambda\in\Lambda''$. As~$J$ is a common support of~$\mathbf{p}_0$ and~$\mathbf{p}_1$, the equality $\mathbf{p}_0({\vec a}^\lambda)^{(i)}\wedge\mathbf{p}_1({\vec a}^\lambda)^{(i)}=\infty$ holds for all $\lambda\in\Lambda''$ and all $i\in I\setminus J$, hence \[ (\forall\lambda\in\Lambda'')(\forall i\in I) \bigl(\mathbf{p}_0({\vec a}^\lambda)^{(i)}\wedge\mathbf{p}_1({\vec a}^\lambda)^{(i)} \neq0\bigr). \] Thus it follows from~\eqref{Eq:Defxii} that $c_\lambda=\mathbf{p}({\vec a}^\lambda)^{(j)}=\mathbf{p}_0({\vec a}^\lambda)^{(j)}\wedge\mathbf{p}_1({\vec a}^\lambda)^{(j)}$ for each $\lambda\in\Lambda''$. Hence, by the upper continuity of~$L_j$ (and thus of~$\ol{L}_j$), \eqref{Eq:p01joinOK}, and the previously observed fact that the upper adjoint~$\beta_j$ is nonempty-join-complete, $\setm{c_\lambda}{\lambda\in\Lambda''}$ has a join in~$L_j$, which is equal to~$\mathbf{p}_0(\vec a)^{(j)}\wedge\mathbf{p}_1(\vec a)^{(j)}=\mathbf{p}(\vec a)^{(j)}$. Therefore, it follows from~\eqref{Eq:Interpolxpq} that $\mathbf{p}(\vec a)\leq\mathbf{p}(\vec a)^{(j)}\leq\mathbf{q}(\vec b)$. \end{proof} \section{Ideal lattices and $0$-coproducts}\label{S:IdCoprod} In this section we fix again a family~$\mathfrak{a}mm{L_i}{i\in I}$ of lattices with zero, pairwise intersecting in~$\set{0}$, and we form~$L=\coprod^0_{i\in I}L_i$. We denote by $\varepsilon_i\colon\Id L_i\hookrightarrow\Id L$ the $0$-lattice homomorphism induced by the canonical embedding~$L_i\hookrightarrow L$, for each~$i\in I$. By the universal property of the coproduct, there exists a unique $0$-lattice homomorphism~$\varepsilon\colon\coprod^0_{i\in I}\Id L_i\to\Id L$ such that $\varepsilon_i=\varepsilon\mathbin{\restriction}_{\Id L_i}$ for each~$i\in I$. Observe that in case~$I$ is finite, the lattice~$\coprod^0_{i\in I}\Id L_i$ has $\bigvee_{i\in I}L_i$ as a largest element, and this element is sent by~$\varepsilon$ to~$L$ (because every element of~$L$ lies below some join of elements of the~$L_i$s). Hence, \emph{if the index set~$I$ is finite, then the map~$\varepsilon$ preserves the unit as well}. \begin{lemma}\label{L:Evalp(vecX)} Let $\mathbf{p}$ be a lattice term on~$I\times\omega$ and let $\vec X=\mathfrak{a}mm{X_{i,n}}{(i,n)\in I\times\omega}$ be an element of~$\prod_{(i,n)\in I\times\omega}\Id L_i$. We put $\vec\varepsilon\vec X=\mathfrak{a}mm{\varepsilon_i(X_{i,n})}{(i,n)\in I\times\omega}\in(\Id L)^{I\times\omega}$. Then the following equality holds. \[ \mathbf{p}(\vec\varepsilon\vec X)=L\mathbin{\downarrow}\setm{\mathbf{p}(\vec x)}{\vec x\mathbin{\vec{\in}}\vec X}\,, \] where ``\,$\vec x\mathbin{\vec{\in}}\vec X$\,'' stands for $(\forall(i,n)\in I\times\omega)(x_{i,n}\in X_{i,n})$. \end{lemma} \begin{proof} We argue by induction on the length of the term~$\mathbf{p}$. If~$\mathbf{p}$ is a projection, then the result follows immediately from the definition of the maps~$\varepsilon_i$. If~$\mathbf{p}$ is either a join or a meet, then the result follows immediately from the expressions for the join and the meet in the ideal lattice of~$L$, in a fashion similar to the end of the proof of \cite[Lemma~I.4.8]{GLT2}. \end{proof} \begin{theorem}\label{T:epsembedding} The canonical map~$\varepsilon\colon\coprod^0_{i\in I}\Id L_i\to\Id\bigl(\coprod^0_{i\in I}L_i\bigr)$ is a $0$-lattice embedding. \end{theorem} \begin{proof} We put again~$L=\coprod^0_{i\in I}L_i$. Let~$\mathbf{p}$, $\mathbf{q}$ be lattice terms in~$I\times\omega$ and let $\vec X\in\prod_{(i,n)\in I\times\omega}\Id L_i$ such that $\mathbf{p}(\vec\varepsilon\vec X)\leq\mathbf{q}(\vec\varepsilon\vec X)$ in~$\Id L$. We must prove that $\mathbf{p}(\vec X)\leq\mathbf{q}(\vec X)$ in $\coprod^0_{i\in I}\Id L_i$. For each $\vec x\mathbin{\vec{\in}}\vec X$, the inequalities $L\mathbin{\downarrow}\mathbf{p}(\vec x)\leq\mathbf{p}(\vec\varepsilon\vec X)\leq\mathbf{q}(\vec\varepsilon\vec X)$ hold in~$\Id L$, thus, by Lemma~\ref{L:Evalp(vecX)}, there exists $\vec y\mathbin{\vec{\in}}\vec X$ such that $L\mathbin{\downarrow}\mathbf{p}(\vec x)\leq L\mathbin{\downarrow}\mathbf{q}(\vec y)$ in~$\Id L$, that is, $\mathbf{p}(\vec x)\leq\mathbf{q}(\vec y)$ in~$L$. Therefore, by applying the canonical map from~$L=\coprod^0_{i\in I}L_i$ to~$\coprod^0_{i\in I}\Id L_i$ and putting $\vec L\mathbin{\downarrow}\vec x=\mathfrak{a}mm{L_i\mathbin{\downarrow} x_{i,n}}{(i,n)\in I\times\omega}$, we obtain \begin{equation}\label{Eq:psmallqbig} \mathbf{p}(\vec L\mathbin{\downarrow}\vec x)\leq\mathbf{q}(\vec L\mathbin{\downarrow}\vec y)\leq\mathbf{q}(\vec X) \quad\text{in }\coprod\nolimits^0_{i\in I}\Id L_i\,. \end{equation} As $\vec X$ is equal to the directed join $\bigvee_{\vec x\mathbin{\vec{\in}}\vec X}(\vec L\mathbin{\downarrow}\vec x)$ in~$\prod_{(i,n)\in I\times\omega}\Id L_i$ and each $\Id L_i$ is upper continuous, it follows from Lemma~\ref{L:PartUC} that \[ \mathbf{p}(\vec X)=\bigvee\Famm{\mathbf{p}(\vec L\mathbin{\downarrow}\vec x)}{\vec x\mathbin{\vec{\in}}\vec X}\quad \text{in }\coprod\nolimits^0_{i\in I}\Id L_i\,. \] Therefore, it follows from~\eqref{Eq:psmallqbig} that \begin{equation*} \mathbf{p}(\vec X)\leq\mathbf{q}(\vec X)\quad\text{in }\coprod\nolimits^0_{i\in I}\Id L_i\,. \tag*{\qed} \end{equation*} \renewcommand{\qed}{} \end{proof} The following example shows that Theorem~\ref{T:epsembedding} does not extend to the amalgam $B\amalg_AC$ of two lattices~$B$ and~$C$ above a common ideal~$A$. The underlying idea can be traced back to Gr\"atzer and Schmidt in~\cite[Section~5]{GrSc95}. \begin{example}\label{Ex:BamalgAC} Lattices $B$ and $C$ with a common ideal~$A$ such that the canonical lattice homomorphism $f\colon(\Id B)\amalg_{\Id A}(\Id C)\to\Id\bigl(B\amalg_AC\bigr)$ is not one-to-one. \end{example} \begin{proof} Denote by~$K$ the poset represented in Figure~\ref{Fig:LattK}. We claim that the subsets~$A$, $B$, and~$C$ of~$K$ defined by \begin{align*} A&=\setm{a_n}{n<\omega}\cup\setm{p_n}{n<\omega}\cup\setm{q_n}{n<\omega}\,,\\ B&=A\cup\setm{b_n}{n<\omega}\,,\\ C&=A\cup\setm{c_n}{n<\omega}\,. \end{align*} are as required. Observe that~$B$ and~$C$ are isomorphic lattices and that~$A$ is an ideal of both~$B$ and~$C$. \begin{figure} \caption{The poset $K$.} \label{Fig:LattK} \end{figure} The map~$f$ is the unique lattice homomorphism that makes the diagram of Figure~\ref{Fig:CanMap} commute. Unlabeled arrows are the corresponding canonical maps. \begin{figure} \caption{The commutative diagram defining the homomorphism~$f$.} \label{Fig:CanMap} \end{figure} Put $D=B\amalg_AC$ and identify~$B$ and~$C$ with their images in~$D$. Further, we endow $\Id B\cup\Id C$ with its natural structure of partial lattice, that is, the ordering is the union of the orderings of~$\Id B$ and~$\Id C$ (remember that~$A=B\cap C$ is an ideal of both~$B$ and~$C$) and the joins and meets are those taking place in either~$\Id B$ or~$\Id C$. Observe that $\Id A=\Id B\cap\Id C$ and $(\Id B)\amalg_{\Id A}(\Id C)$ is the free lattice on the partial lattice~$(\Id B)\cup(\Id C)$. As the latter is identified with its canonical image in $(\Id B)\amalg_{\Id A}(\Id C)$, the elements~$A$, $B\mathbin{\downarrow} b_0$, and~$C\mathbin{\downarrow} c_0$ belong to $(\Id B)\amalg_{\Id A}(\Id C)$. We prove by induction that $a_n\leq b_0\vee c_0$ in~$D$ for all~$n<\omega$. This is trivial for $n=0$. Suppose that $a_n\leq b_0\vee c_0$. Then $a_n\vee b_0\leq b_0\vee c_0$, but~$B$ is a sublattice of~$D$ containing the subset~$\set{a_n,b_0}$ with join~$b_n$, thus $b_n\leq b_0\vee c_0$, and thus $p_n\leq b_0\vee c_0$. Similarly, $q_n\leq b_0\vee c_0$, but~$A$ is a sublattice of~$D$ containing the subset~$\set{p_n,q_n}$ with join~$a_{n+1}$, and thus $a_{n+1}\leq b_0\vee c_0$, which completes the induction step. So we have established the inequality \begin{equation}\label{Eq:Aleqb+c} f(A)\leq f(B\mathbin{\downarrow} b_0)\vee f(C\mathbin{\downarrow} c_0)\quad\text{in } \Id\bigl(B\amalg_AC\bigr)=\Id D\,. \end{equation} Now observe that $\setm{B\mathbin{\downarrow} x}{x\in B}\cup\setm{C\mathbin{\downarrow} y}{y\in C}$ is an o-ideal of the partial lattice $(\Id B)\cup(\Id C)$, containing~$\set{B\mathbin{\downarrow} b_0,C\mathbin{\downarrow} c_0}$ and to which~$A$ does not belong. Hence, $A\notin\mathcal{I}(\set{B\mathbin{\downarrow} b_0,C\mathbin{\downarrow} c_0})$, which means that $A\nleq(B\mathbin{\downarrow} b_0)\vee(C\mathbin{\downarrow} c_0)$ in $(\Id B)\amalg_{\Id A}(\Id C)$. Therefore, by~\eqref{Eq:Aleqb+c}, $f$ is not an embedding. \end{proof} As observed before, this example shows that Lemma~\ref{L:PartUC} fails to extend to the amalgam of two lattices over a common ideal. Indeed, while $A=\bigvee_n(A\mathbin{\downarrow} a_n)$ in~$\Id B$, the same equality fails in $(\Id B)\amalg_{\Id A}(\Id C)$. The reason for this is that $A\mathbin{\downarrow} a_n\leq(B\mathbin{\downarrow} b_0)\vee(C\mathbin{\downarrow} c_0)$ for each~$n$, while $A\nleq(B\mathbin{\downarrow} b_0)\vee(C\mathbin{\downarrow} c_0)$. \section{Embedding coproducts of infinite partition lattices}\label{S:EmbPart} Whitman's Embedding Theorem states that every lattice embeds into~$\Eq\Omega$, for some set~$\Omega$. We shall use a proof of Whitman's Theorem due to B. J\'onsson \cite{Jons}, see also \cite[Section~IV.4]{GLT2}. The following result is proved there. \begin{lemma}\label{L:Jonss} For every lattice~$L$ with zero, there are an infinite set~$\Omega$ and a map $\delta\colon\Omega\times\Omega\to L$ satisfying the following properties: \begin{enumerate} \item $\delta(x,y)=0$ if{f} $x=y$, for all $x,y\in\Omega$. \item $\delta(x,y)=\delta(y,x)$, for all $x,y\in\Omega$. \item $\delta(x,z)\leq\delta(x,y)\vee\delta(y,z)$, for all $x,y,z\in L$. \item For all $x,y\in\Omega$ and all $a,b\in L$ such that $\delta(x,y)\leq a\vee b$, there are $z_1,z_2,z_3\in\nobreak\Omega$ such that $\delta(x,z_1)=a$, $\delta(z_1,z_2)=b$, $\delta(z_2,z_3)=a$, and $\delta(z_3,y)=b$. \end{enumerate} \end{lemma} Observe, in particular, that the map~$\delta$ is \emph{surjective}. Furthermore, a straightforward L\"owenheim-Skolem type argument (``keeping only the necessary elements in~$\Omega$'') shows that one may take $\card\Omega=\card L+\aleph_0$. The following is the basis for J\'onsson's proof of Whitman's Embedding Theorem. \begin{corollary}\label{C:Jonss} For every lattice~$L$ with zero and every set~$\Omega$ such that $\card \Omega=\card L+\aleph_0$, there exists a complete lattice embedding from~$\Id L$ into~$\Eq\Omega$. \end{corollary} \begin{proof} Any map~$\delta$ as in Lemma~\ref{L:Jonss} gives rise to a map $\varphi\colon\Id L\to\Eq\Omega$ defined by the rule \begin{equation}\label{Eq:Defvarphi} \varphi(A)=\setm{(x,y)\in\Omega\times\Omega}{\delta(x,y)\in A}\,, \quad\text{for each }A\in\Id L\,, \end{equation} and conditions (1)--(4) above imply that~$\varphi$ is a complete lattice embedding. \end{proof} \begin{theorem}\label{T:Copr0Part} Let~$\Omega$ be an infinite set. Then there exists a $0,1$-lattice embedding from $(\Eq\Omega)\amalg^0(\Eq\Omega)$ into~$\Eq\Omega$. \end{theorem} \begin{proof} Denote by~$K$ the sublattice of~$\Eq\Omega$ consisting of all \emph{compact} equivalence relations of~$\Omega$. Thus the elements of~$K$ are exactly the equivalence relations containing only finitely many non-diagonal pairs. In particular, $\Eq\Omega$ is canonically isomorphic to~$\Id K$. Now we apply Corollary~\ref{C:Jonss} to $L=K\amalg^0K$. As $\card L=\card\Omega$, we obtain a complete lattice embedding $\varphi\colon\Id L\hookrightarrow\Eq\Omega$. However, $\Id L=\Id(K\amalg^0K)$ contains, by Theorem~\ref{T:epsembedding} and the last sentence of the first paragraph of Section~\ref{S:IdCoprod}, a $0,1$-sublattice isomorphic to~$(\Id K)\amalg^0(\Id K)$, thus to~$(\Eq\Omega)\amalg^0(\Eq\Omega)$. \end{proof} For any nonempty set~$\Omega$, form $\ol{\Omega}=\Omega\cup\set{\infty}$ for an outside point~$\infty$. As there exists a retraction~$\rho\colon\ol{\Omega}\twoheadrightarrow\Omega$ (pick $p\in\Omega$ and send~$\infty$ to~$p$), we can form a meet-complete, nonempty-join-complete lattice embedding $\eta\colon\Eq\Omega\hookrightarrow\Eq\ol{\Omega}$ by setting \[ \eta(\theta)=\setm{(x,y)\in\ol{\Omega}\times\ol{\Omega}} {(\rho(x),\rho(y))\in\theta}\,, \quad\text{for each }\theta\in\Eq\Omega\,, \] and $\eta$ sends the zero element of~$\Eq\Omega$ to a nonzero element of~$\Eq\ol{\Omega}$. Hence, in case~$\Omega$ is infinite, $\zero{(\Eq\Omega)}$ completely embeds into~$\Eq\Omega$. As $(\Eq\Omega)\amalg(\Eq\Omega)$ is the sublattice of $\zero{(\Eq\Omega)}\amalg^0\zero{(\Eq\Omega)}$ generated by the union of the images of~$\Eq\Omega$ under the two canonical coprojections, it follows from Theorem~\ref{T:Copr0Part} and Lemma~\ref{L:coprdKiLi} that $(\Eq\Omega)\amalg(\Eq\Omega)$ has a $1$-lattice embedding into~$\Eq\Omega$. If we denote by~$\theta$ the image of zero under this embedding, then $(\Eq\Omega)\amalg(\Eq\Omega)$ has a $0,1$-lattice embedding into~$\Eq(\Omega/{\theta})$, and thus, as $\card(\Omega/{\theta})\leq\card\Omega$, into~$\Eq\Omega$. Hence we obtain \begin{theorem}\label{T:CoprPart} Let~$\Omega$ be an infinite set. Then there exists a $0,1$-lattice embedding from $(\Eq\Omega)\amalg(\Eq\Omega)$ into~$\Eq\Omega$. \end{theorem} By applying the category equivalence~$L\mapsto L^{\mathrm{op}}$ to Theorems~\ref{T:Copr0Part} and~\ref{T:CoprPart} and denoting by~$\amalg^1$ the coproduct of $1$-lattices, we obtain the following result. \begin{theorem}\label{T:CoprPartop} Let~$\Omega$ be an infinite set. Then there are $0,1$-lattice embeddings from $(\Eq\Omega)^{\mathrm{op}}\amalg^1(\Eq\Omega)^{\mathrm{op}}$ into~$(\Eq\Omega)^{\mathrm{op}}$ and from $(\Eq\Omega)^{\mathrm{op}}\amalg(\Eq\Omega)^{\mathrm{op}}$ into~$(\Eq\Omega)^{\mathrm{op}}$. \end{theorem} By using the results of \cite{Berg}, we can now fit the copower of the optimal number of copies of~$L=\Eq\Omega$ into itself. The variety~$\mathbf{V}$ to which we apply those results is, of course, the variety of all lattices with zero. The functor to be considered sends every set~$I$ to~$F(I)=\coprod^0_IL$, the $0$-coproduct of~$I$ copies of~$L$. If we denote by $e_i^I\colon L\hookrightarrow F(I)$ the $i$-th coprojection, then, for any map $f\colon I\to J$, $F(f)$ is the unique $0$-lattice homomorphism from~$F(I)$ to~$F(J)$ such that $F(f)\circ e_i^I=e_{f(i)}^J$ for all~$i\in I$. Observe that even in case both~$I$ and~$J$ are finite, $F(f)$ does not preserve the unit unless~$f$ is surjective. The condition labeled~(9) in \cite[Section~3]{Berg}, stating that every element of~$F(I)$ belongs to the range of~$F(a)$ for some $a\colon n\to I$, for some positive integer~$n$, is obviously satisfied. Hence, by \cite[Theorem~3.1]{Berg}, $F(\mathfrak{P}(\Omega))$ has a $0$-lattice embedding into $F(\omega)^\Omega$. Furthermore, it follows from \cite[Lemma~3.3]{Berg} that~$F(\omega)$ has a $0$-lattice embedding into $\prod_{1\leq n<\omega}F(n)$. By Lemma~\ref{L:coprdKiLi} and Theorem~\ref{T:Copr0Part}, each~$F(n)$ has a $0,1$-lattice embedding into~$L$. As, by the final paragraph of \cite[Section~2]{Berg}, $L^\Omega$ has a $0$-lattice embedding into~$L$, we obtain the following theorem. \begin{theorem}\label{T:OptEmb} Let $\Omega$ be an infinite set. Then the following statements hold: \begin{enumerate} \item $\coprod^0_{\mathfrak{P}(\Omega)}\Eq\Omega$ has a $0$-lattice embedding into~$\Eq\Omega$. \item $\coprod_{\mathfrak{P}(\Omega)}\Eq\Omega$ has a lattice embedding into~$\Eq\Omega$. \item $\coprod^1_{\mathfrak{P}(\Omega)}(\Eq\Omega)^{\mathrm{op}}$ has a $1$-lattice embedding into~$(\Eq\Omega)^{\mathrm{op}}$. \item $\coprod_{\mathfrak{P}(\Omega)}(\Eq\Omega)^{\mathrm{op}}$ has a lattice embedding into~$(\Eq\Omega)^{\mathrm{op}}$. \end{enumerate} \end{theorem} This raises the question whether $(\Eq\Omega)\amalg^1(\Eq\Omega)$ embeds into~$\Eq\Omega$, which the methods of the present paper do not seem to settle in any obvious way. More generally, we do not know whether, for a sublattice~$A$ of~$\Eq\Omega$, the amalgam $(\Eq\Omega)\amalg_A(\Eq\Omega)$ of two copies of~$\Eq\Omega$ over~$A$ embeds into~$\Eq\Omega$. \section*{Acknowledgment} I thank George Bergman for many comments and corrections about the successive versions of this note, which resulted in many improvements in both its form and substance. \end{document}
\begin{document} \title[A Logical Calculus To Intuitively And Logically Denote Number Systems] {A Logical Calculus To Intuitively And Logically Denote Number Systems} \author[Pith Xie]{Pith Xie} \address{Department of Information Science and Communication \\ Nanjing University of Information Science and Technology \\ Nanjing, 210044, China} \curraddr{P.O.Box 383 \\ Gulou Post Office \\ Gulou District \\ Nanjing, 210008, China} \email{[email protected]} \subjclass[2000]{Primary 40A05; Secondary 03B10, 03B80, 03D05.} \begin{abstract} Simple continued fractions, base-b expansions, Dedekind cuts and Cauchy sequences are common notations for number systems. In this note, first, it is proven that both simple continued fractions and base-b expansions fail to denote real numbers and thus lack logic; second, it is shown that Dedekind cuts and Cauchy sequences fail to join in algebraical operations and thus lack intuition; third, we construct a logical calculus and deduce numbers to intuitively and logically denote number systems. \end{abstract} \maketitle \setcounter{tocdepth}{5} \setcounter{page}{1} \section{Introduction} Number system is a set together with one or more operations. Any notation for number system has to denote both set and operations. The common notations for number systems are simple continued fractions, base-b expansions, Dedekind cuts and Cauchy sequences. In \cite{Ref1} and \cite{Ref2}, simple continued fractions and base-b expansions denote each number in number systems as a set of symbols. So both them denote number systems intuitively and join well in algebraical operations. In \cite{Ref3} and \cite{Ref4}, Dedekind cuts and Cauchy sequences introduce infinite rational numbers to denote an irrational number. So both them denote number systems logically and join well in logical deduction. In this note, first, it is proven that both simple continued fractions and base-b expansions fail to denote real numbers and thus lack logic; second, it is shown that Dedekind cuts and Cauchy sequences fail to intuitively join in algebraical operations and thus lack intuition. However, mathematical logic has sufficiency of intuition and logic. In \cite{Ref9}, formal language introduces producer ``$ \rightarrow $" to formalize intuitive language. In \cite{Ref10}, propositional logic introduces connectives such as ``$ \neg $", ``$ \wedge $", ``$ \vee $", ``$ {\mathbb R}ightarrow / \rightarrow $" and ``$ \Leftrightarrow / \leftrightarrow $" to formalize logical deduction. Therefore, it is feasible to combine producer and connectives to deduce intuitive and logical notations for number systems. The paper is organized as follows. In Section 2, we study the most common notation for number system ---\!--- decimals, and prove that they fail to denote real numbers. In Section 3, by comparing those common notations for number systems, we show that intuitive simple continued fractions and base-b expansions lack logic while logical Dedekind cuts and Cauchy sequences lack intuition. In Section 4, we construct a logical calculus and deduce numbers to intuitively and logically denote number systems. \section{Decimals And Real Number System}\label{Sec_DEC} In this section, we show a conceptual error in the proof to \cite[THEOREM 134]{Ref1}, and then correct \cite[THEOREM 134]{Ref1}. \begin{definition}\label{Den_LIM} A sequence $ \{ x_n \} $ in a metric space $ (X,d) $ is a convergent sequence if there exists a point $ x \in X $ such that, for every $ \epsilon > 0 $, there exists an integer $ N $ such that $ d(x,x_n) < \epsilon $ for every integer $ n \geq N $. The point $ x $ is called the limit of the sequence $ \{ x_n \} $ and we write \begin{eqnarray} x_n \rightarrow x \end{eqnarray} or \begin{eqnarray} \lim\limits_{n \to \infty} x_n = x. \end{eqnarray} \end{definition} \begin{theorem}[{\cite[THEOREM 134]{Ref1}}]\label{Them_SUM} Any positive number $ \xi $ may be expressed as a decimal \begin{eqnarray} A_{1} A_{2} \cdots A_{s+1}.\ a_1 a_2 a_3 \cdots, \end{eqnarray} where $ 0 \leq A_{1} < 10, 0 \leq A_{2} < 10, \cdots , 0 \leq a_{n} < 10 $, not all A and a are 0, and an infinity of the $ a_n $ are less than 9. If $ \xi \geq 1 $, then $ A_{1} \geq 0 $. There is a (1,1) correspondence between the numbers and the decimals, and \begin{eqnarray}\label{Eqn_XI} \xi = A_{1} \cdot 10^s + \cdots + A_{s+1} + \frac{a_1}{10} + \frac{a_2}{10^2} + \cdots . \end{eqnarray} \end{theorem} \begin{proof} Let $ [\xi] $ be the integral part of $ \xi $. Then we write \begin{eqnarray}\label{9.1.1} \xi = [\xi] + x = X + x, \end{eqnarray} where $ X $ is an integer and $ 0 \leq x < 1 $, and consider $ X $ and $ x $ separately. If $ X > 0 $ and $ 10^{s} \leq x < 10^{s+1} $, and $ A_1 $ and $ X_1 $ are the quotient and remainder when $ X $ is divided by $ 10^{s} $, then $ X = A_1 \cdot 10^{s} + X_1 $, where $ 0 < A_1 = [10^{-s}X] < 10 $, $ 0 \leq X_1 < 10^s $. Similarly \begin{eqnarray*} X_1 = & A_2 \cdot 10^{s-1} + X_2 & (0 \leq A_2 < 10, 0 \leq X_2 < 10^{s-1}), \\ X_2 = & A_3 \cdot 10^{s-2} + X_3 & (0 \leq A_3 < 10, 0 \leq X_3 < 10^{s-2}), \\ \cdots & \cdots & \cdots \\ X_{s-1} = & A_s \cdot 10 + X_s & (0 \leq A_s < 10, 0 \leq X_s < 10), \\ X_s = & A_{s+1} & (0 \leq A_{s+1} < 10). \end{eqnarray*} Thus $ X $ may be expressed uniquely in the form \begin{eqnarray} X = A_1 \cdot 10^s + A_2 \cdot 10^{s-1} + \cdots + A_s \cdot 10 + A_{s+1}, \end{eqnarray} where every $ A $ is one of 0, 1, 2, $ \cdots $, 9, and $ A_1 $ is not 0. We abbreviate this expression to \begin{eqnarray}\label{9.1.3} X = A_1 A_2 \cdots A_s A_{s+1}, \end{eqnarray} the ordinary representation of $ X $ in decimal notation. Passing to $ x $, we write \begin{eqnarray*} & X = f_1 & (0 \leq f_1 < 1). \end{eqnarray*} We suppose that $ a_1 = [10 f_1] $, so that \begin{eqnarray*} \frac{a_1}{10} \leq f_1 < \frac{a_1 + 1}{10}; \end{eqnarray*} $ a_1 $ is one of 0, 1, 2, $ \cdots $, 9, and \begin{eqnarray*} a_1 = [10 f_1], & 10 f_1 = a_1 + f_2 & (0 \leq f_2 < 1). \end{eqnarray*} Similarly, we define $ a_2, a_3, \cdots $ by \begin{eqnarray*} a_2 = [10 f_2], & 10 f_2 = a_2 + f_3 & (0 \leq f_3 < 1), \\ a_3 = [10 f_3], & 10 f_3 = a_3 + f_4 & (0 \leq f_4 < 1), \\ \cdots & \cdots & \cdots \end{eqnarray*} Every $ a_n $ is one of 0, 1, 2, $ \cdots $, 9. Thus \begin{eqnarray}\label{9.1.4} x = x_n + g_{n+1}, \end{eqnarray} where \begin{eqnarray} x_n = \frac{a_1}{10} + \frac{a_2}{10^2} + \cdots + \frac{a_n}{10^n}, \label{9.1.5} \\ 0 \leq g_{n+1} = \frac{f_{n+1}}{10^n} < \frac{1}{10^n}. \end{eqnarray} We thus define a decimal $ . a_1 a_2 a_3 \cdots a_n \cdots $ associated with $ x $. We call $ a_1, a_2, \cdots $ the first, second, $ \cdots $ \emph{digits} of the decimal. Since $ a_n < 10 $, the series \begin{eqnarray}\label{9.1.7} \sum\limits_{1}^{\infty} \frac{a_n}{10^n} \end{eqnarray} is convergent; and since $ g_{n+1} \rightarrow 0 $, its sum is $ x $. We may therefore write \begin{eqnarray}\label{9.1.8} x = .\ a_1 a_2 a_3 \cdots, \end{eqnarray} the right-hand side being an abbreviation for the series (\ref{9.1.7}). If $ f_{n+1} = 0 $ for some $ n $, \emph{i.e.} if $ 10^n x $ is an integer, then \begin{eqnarray*} a_{n+1} = a_{n+2} = \cdots = 0. \end{eqnarray*} In this case we say that the decimal \emph{terminates}. Thus \begin{eqnarray*} \frac{17}{400} = .0425000 \cdots, \end{eqnarray*} and we write simply $ \frac{17}{400} = .0425 $. It is plain that the decimal for $ x $ will terminate if and only if $ x $ is a rational fraction whose denominator is of the form $ 2^\alpha 5^\beta $. Since $ \frac{a_{n+1}}{10^{n+1}} + \frac{a_{n+2}}{10^{n+2}} + \cdots = g_{n+1} < \frac{1}{10^n} $ and $ \frac{9}{10^{n+1}} + \frac{9}{10^{n+2}} + \cdots = \frac{9}{10^{n+1}(1-\frac{1}{10})} = \frac{1}{10^n} $, it is impossible that every $ a_n $ from a certain point on should be 9. With this reservation, every possible sequence $ (a_n) $ will arise from some $ x $. We define $ x $ as the sum of the series (\ref{9.1.7}), and $ x_n $ and $ g_{n+1} $ as in (\ref{9.1.4}) and (\ref{9.1.5}). Then $ g_{n+1} < 10^{-n} $ for every $ n $, and $ x $ yields the sequence required. Finally, if \begin{eqnarray}\label{9.1.9} \sum\limits_{1}^{\infty} \frac{a_n}{10^n} = \sum\limits_{1}^{\infty} \frac{b_n}{10^n}, \end{eqnarray} and the $ b_n $ satisfy the conditions already imposed on the $ a_n $, then $ a_n = b_n $ for every $ n $. For if not, let $ a_N $ and $ b_N $ be the first pair which differ, so that $ |a_N - b_N| \geq 1 $. Then \begin{eqnarray*} \left| \sum\limits_{1}^{\infty} \frac{a_n}{10^n} - \sum\limits_{1}^{\infty} \frac{b_n}{10^n} \right| \geq \frac{1}{10^N} - \sum\limits_{N+1}^{\infty} \frac{|a_n - b_n|}{10^n} \geq \frac{1}{10^N} - \sum\limits_{N+1}^{\infty} \frac{9}{10^n} = 0. \end{eqnarray*} This contradicts (\ref{9.1.9}) unless there is equality. If there is equality, then all of $ a_{N+1} - b_{N+1}, a_{N+2} - b_{N+2}, \cdots $ must have the same sign and the absolute value 9. But then either $ a_n = 9 $ and $ b_n = 0 $ for $ n > N $, or else $ a_n = 0 $ and $ b_n = 9 $, and we have seen that each of these alternatives is impossible. Hence $ a_n = b_n $ for all $ n $. In other words, different decimals correspond to different numbers. We now combine (\ref{9.1.1}), (\ref{9.1.3}), and (\ref{9.1.8}) in the form \begin{eqnarray} \xi = X + x = A_1 A_2 \cdots A_s A_{s+1}.\ a_1 a_2 a_3 \cdots; \end{eqnarray} and the claim follows. \end{proof} According to Definition \ref{Den_LIM}, the series (\ref{9.1.7}) converges to the limit $ x $. For an infinite sequence, however, its limit may not equal its $ \omega-th $ number for any infinite number $ \omega $. 1. $ \omega $ is a \emph{transfinite cardinal number}\cite{Ref5}. Since the equalities and order on the fractions including transfinite cardinal numbers have not been defined, the equation $ g_{\omega+1} = \frac{f_{\omega+1}}{10^\omega} = 0 $ cannot be derived from given premises for any $ \omega $. 2. $ \omega $ is an \emph{infinite superreal number}\cite{Ref6} or an \emph{infinite surreal number}\cite{Ref7}. Since the infinitesimal $ g_{\omega+1} = \frac{f_{\omega+1}}{10^\omega} > 0 $ holds for every $ \omega $, the equation $ g_{\omega+1} = \frac{f_{\omega+1}}{10^\omega} = 0 $ cannot be derived from given premises for any $ \omega $. In summary, the equation $ x = x_\omega + g_{\omega+1} $ cannot derives $ x = x_\omega $ for any infinite number $ \omega $. Thus, (\ref{9.1.8}) cannot be derived from given premises. In fact, the proof to \cite[THEOREM 134]{Ref1} confuses the limit and the $ \omega-th $ number of the same infinite sequence for some infinite number $ \omega $. According to the arguments above, we correct \cite[THEOREM 134]{Ref1} as follows. \begin{theorem} Any positive number $ \xi $ may be expressed as a limit of an infinite decimal sequence \begin{eqnarray} \lim\limits_{n \to \infty} A_{1} A_{2} \cdots A_{s+1}.\ a_1 a_2 a_3 \cdots a_n, \end{eqnarray} where $ 0 \leq A_{1} < 10, 0 \leq A_{2} < 10, \cdots , 0 \leq a_{n} < 10 $, not all A and a are 0, and an infinity of the $ a_n $ are less than 9. If $ \xi \geq 1 $, then $ A_{1} \geq 0 $. There is a (1,1) correspondence between the numbers and the limits of infinite decimal sequences, and \begin{eqnarray} \xi = A_{1} \cdot 10^s + \cdots + A_{s+1} + \lim\limits_{n \to \infty} \sum \frac{a_n}{10^n}. \end{eqnarray} \end{theorem} \section{Common Notations For Number Systems} \subsection{Intuitive Notations} Simple continued fractions and base-b expansions construct intuitive symbols to denote number systems. They join well in algebraical operations and thus have sufficiency of intuition. \begin{definition} A finite continued fraction is a function \begin{eqnarray}\label{10.1.1} a_0 + \cfrac{1}{a_1 + \cfrac{1}{a_2 + \cfrac{1}{\begin{matrix} a_3 + & \cdots \\ & + \cfrac{1}{a_N} \end{matrix}}}} \end{eqnarray} of $ N+1 $ variables \begin{eqnarray} a_0, a_1, \cdots, a_n, \cdots, a_N, \end{eqnarray} which is called finite simple continued fraction when $ a_0, a_1, \cdots, a_N $ are integers such that $ a_n > 0 $ for all $ n \geq 1 $. \end{definition} Finite simple continued fractions can be written in a compact abbreviated notation as \begin{eqnarray} [a_0, a_1, a_2, \cdots, a_N]. \end{eqnarray} \begin{definition} If $ a_0, a_1, a_2, \cdots, a_n, \cdots $ is a sequence of integers such that $ a_n > 0 $ for all $ n \geq 1 $, then the notation \begin{eqnarray} [a_0, a_1, a_2, \cdots] \end{eqnarray} denotes an infinite simple continued fraction. \end{definition} \begin{theorem}[{\cite[THEOREM 149]{Ref1}}]\label{The_CF1} If $ p_{n} $ and $ q_{n} $ are defined by \begin{eqnarray} p_{0}=a_{0}, & \ p_{1}=a_{1}a_{0}+1, & \ p_{n}=a_{n}p_{n-1}+p_{n-2} \ (2 \leq n \leq N), \\ q_{0}=1, & \ q_{1}=a_{1}, & \ q_{n}=a_{n}q_{n-1}+q_{n-2} \ (2 \leq n \leq N), \end{eqnarray} then \begin{eqnarray} [ a_0, a_1, \ldots , a_n ] = \frac{p_{n}}{q_{n}}. \end{eqnarray} \end{theorem} Theorem \ref{The_CF1} can be specialized for finite simple continued fractions as follows: \begin{theorem}\label{The_CF2} $ \{ a_0, a_1, \cdots, a_n \} $ is an integer sequence. If $ p_{n} $ and $ q_{n} $ are defined by \begin{eqnarray} p_{0}=a_{0}, & \ p_{1}=a_{1}a_{0}+1, & \ p_{n}=a_{n}p_{n-1}+p_{n-2} \ (2 \leq n \leq N), \\ q_{0}=1, & \ q_{1}=a_{1}, & \ q_{n}=a_{n}q_{n-1}+q_{n-2} \ (2 \leq n \leq N), \end{eqnarray} then \begin{eqnarray} [ a_0, a_1, \ldots , a_n ] = \frac{p_{n}}{q_{n}}. \end{eqnarray} \end{theorem} Theorem \ref{The_CF2} can directly derive such a corollary as follows: \begin{corollary}\label{Coy_CF2} Any finite simple continued fraction can be represented by a rational number. \end{corollary} \begin{theorem}[{\cite[THEOREM 161]{Ref1}}] Any rational number can be represented by a finite simple continued fraction. \end{theorem} According to Corollary \ref{Coy_CF2} and \cite[THEOREM 161]{Ref1}, finite simple continued fractions are equivalent to rational numbers. \begin{theorem}[{\cite[THEOREM 161]{Ref1}}] Any rational number can be represented by a finite simple continued fraction. \end{theorem} \begin{theorem}[{\cite[THEOREM 170]{Ref1}}] Every irrational number can be expressed in just one way as an infinite simple continued fraction. \end{theorem} \begin{proof} We call \begin{eqnarray} a'_n = [a_n, a_{n+1}, \cdots] \end{eqnarray} the n-th complete quotient of the continued fraction $ x = [a_0, a_1, \cdots] $. Clearly \begin{eqnarray*} a'_n & = & \lim\limits_{N \to \infty} [a_n, a_{n+1}, \cdots, a_N] \\ & = & a_n + \lim\limits_{N \to \infty} \frac{1}{[a_{n+1}, \cdots, a_N]} \\ & = & a_n + \frac{1}{a'_{n+1}}, \end{eqnarray*} and in particular $ x = a'_0 = a_0 + \frac{1}{a'_1} $. Also $ a'_n > a_n, a'_{n+1} > a_{n+1} > 0, 0 < \frac{1}{a'_{n+1}} < 1 $; and so $ a_n = [a'_n] $, the integral part of $ a'_n $. Let $ x $ be any real number, and let $ a_0 = [x] $. Then \begin{eqnarray*} x = a_0 + \xi_0, \ \ \ 0 \leq \xi_0 < 1. \end{eqnarray*} If $ \xi_0 \neq 0 $, we can write \begin{eqnarray*} \frac{1}{\xi_0} = a'_1, \ \ \ [a'_1] = a_1, \ \ \ a'_1 = a_1 + \xi_1, \ \ \ 0 \leq \xi_1 < 1. \end{eqnarray*} If $ \xi_1 \neq 0 $, we can write \begin{eqnarray*} \frac{1}{\xi_1} = a'_2 = a_2 + \xi_2, \ \ \ 0 \leq \xi_2 < 1, \end{eqnarray*} and so on. Also $ a'_n = 1/\xi_{n-1} > 1 $, and so $ a_n \geq 1 $, for $ n \geq 1 $. Thus, \begin{eqnarray} x = [a_0, a'_1] = \left[ a_0, a_1 + \frac{1}{a'_2} \right] = [a_0, a_1, a'_2] = [a_0, a_1, a_2, a'_3] = \cdots, \end{eqnarray} where $ a_0, a_1, \cdots $ are integers and \begin{eqnarray} a_1 > 0, \ \ \ a_2 > 0, \cdots. \end{eqnarray} The system of equations \begin{eqnarray*} x = & a_0 + \xi_0 & (0 \leq \xi_0 < 1), \\ \frac{1}{\xi_0} = & a'_1 = a_1 + \xi_1 & (0 \leq \xi_1 < 1), \\ \frac{1}{\xi_1} = & a'_2 = a_2 + \xi_2 & (0 \leq \xi_2 < 1), \\ \cdots & \cdots & \cdots \end{eqnarray*} is known as the \emph{continued fraction algorithm}. The algorithm continues so long as $ \xi_n \neq 0 $. If we eventually reach a value of $ n $, say $ N $, for which $ \xi_n = 0 $, the algorithm terminates and \begin{eqnarray} x = [a_0, a_1, a_2, \cdots, a_N]. \end{eqnarray} In this case $ x $ is represented by a simple continued fraction, and is rational. If $ x $ is an integer, then $ \xi_0 = 0 $ and $ x = a_0 $. If $ x $ is not integral, then \begin{eqnarray*} x = \frac{h}{k}, \end{eqnarray*} where $ h $ and $ k $ are integers and $ k > 1 $. Since \begin{eqnarray*} \frac{h}{k} = a_0 + \xi_0, \ \ \ h = a_0 + \xi_0 k, \end{eqnarray*} $ a_0 $ is the quotient, and $ k_1 = \xi_0 k $ the remainder, when $ h $ is divided by $ k $. If $ \xi_0 \neq 0 $, then \begin{eqnarray} a'_1 = \frac{1}{\xi_0} = \frac{k}{k_1} \end{eqnarray} and \begin{eqnarray*} \frac{k}{k_1} = a_1 + \xi_1, \ \ \ k = a_1 k_1 + \xi_0 k_1; \end{eqnarray*} thus $ a_1 $ is the quotient, and $ k_2 = \xi_1 k_1 $ the remainder, when $ k $ is divided by $ k_1 $. We thus obtain a series of equations \begin{eqnarray*} h = a_0 k + k_1, \ \ \ k = a_1 k_1 + k_2, \ \ \ k_1 = a_2 k_2 + k_3, \ \ \ \cdots \end{eqnarray*} continuing so long as $ \xi_n \neq 0 $, or, what is the same thing, so long as $ k_{n+1} \neq 0 $. The non-negative integers $ k, k_1, k_2, \cdots $ form a strictly decreasing sequence, and so $ k_{N+1} = 0 $ for some $ N $. It follows that $ \xi_N = 0 $ for some $ N $, and that the continued fraction algorithm terminates. This proves \cite[THEOREM 161]{Ref1}. The system of equations \begin{eqnarray*} h = & a_0 k + k_1 & (0 < k_1 < k), \\ k = & a_1 k_1 + k_2 & (0 < k_2 < k_1), \\ \cdots & \cdots & \cdots \\ k_{N-2} = & a_{N-1} k_{N-1} + k_{N} & (0 < k_{N} < k_{N-1}), \\ k_{N-1} = & a_{N} k_{N} & \end{eqnarray*} is known as \emph{Euclid's algorithm}. If $ x $ is irrational the continued fraction algorithm cannot terminate. Hence it defines an infinite sequence of integers \begin{eqnarray} a_0, a_1, a_2, \cdots, \end{eqnarray} and as before \begin{eqnarray} x = [a_0, a'_1] = [a_0, a_1, a'_2] = \cdots = [a_0, a_1, a_2, \cdots, a_n, a'_{n+1}], \end{eqnarray} where $ a'_{n+1} = a_{n+1} + \frac{1}{a'_{n+2}} > a_{n+1} $. Hence \begin{eqnarray} x = a'_0 = \frac{a'_1 a_0 +1}{a'_1} = \cdots = \frac{a'_{n+1}p_n + p_{n-1}}{a'_{n+1}q_n + q_{n-1}}, \end{eqnarray} and so \begin{eqnarray} x - \frac{p_n}{q_n} = \frac{p_{n-1}q_n - p_n q_{n-1}}{q_n(a'_{n+1}q_n + q_{n-1})} = \frac{(-1)^{n}}{q_n(a'_{n+1}q_n + q_{n-1})}, \\ |x - \frac{p_n}{q_n}| < \frac{1}{q_n(a_{n+1}q_n + q_{n-1})} = \frac{1}{q_n q_{n+1}} \leq \frac{1}{n(n+1)} \rightarrow 0, \label{For_CFI} \end{eqnarray} when $ n \rightarrow \infty $. Thus \begin{eqnarray}\label{For_CFE} x = \lim\limits_{n \to \infty} \frac{p_n}{q_n} = [a_0, a_1, \cdots, a_n, \cdots], \end{eqnarray} and the algorithm leads to the continued fraction whose value is $ x $. \end{proof} In Section \ref{Sec_DEC}, we have proven that the limit of an infinite sequence may not equal the $ \omega-th $ number of the same infinite sequence for any infinite number $ \omega $. 1. $ \omega $ is a transfinite cardinal number. Since the equalities and order on the fractions including transfinite cardinal numbers have not been defined, the inequality $ |x - \frac{p_\omega}{q_\omega}| < \frac{1}{\omega(\omega+1)} $ does not hold for any $ \omega $. 2. $ \omega $ is an infinite superreal number or an infinite surreal number. Since the infinitesimal $ \frac{1}{\omega(\omega+1)} > 0 $ holds for every $ \omega $, the equation $ |x - \frac{p_\omega}{q_\omega}| = 0 $ or $ x = \frac{p_\omega}{q_\omega} $ cannot be derived from given premises for any $ \omega $. In summary, the inequality (\ref{For_CFI}) cannot derives (\ref{For_CFE}). In fact, (\ref{For_CFI}) only derives $ x = \lim\limits_{n \to \infty} \frac{p_n}{q_n} = \lim\limits_{n \to \infty} [a_0, a_1, \cdots, a_n] $. According to the arguments above, we correct \cite[THEOREM 170]{Ref1} as follows. \begin{theorem} Every irrational number can be expressed in just one way as a limit of an infinite simple continued fraction sequence. \end{theorem} According to \cite[\S BF.2]{Ref2}, we can define base-b expansions as follows: \begin{definition}\label{Den_BBE} Base-b expansion is an expression of number as follows. \begin{eqnarray} c_{n}b^{n} + c_{n-1}b^{n-1} \cdots + c_{2}b^{2} + c_{1}b^{1} + c_{0}b^{0} + d_{1}b^{-1} + d_{2}b^{-2} \cdots + d_{n}b^{-n}, \end{eqnarray} where $ b $ represents the base, and $ c_{i} $ and $ d_{i} $ are place-value coefficients. The expansion would ordinarily be written without the plus signs and the powers of the base as follows: \begin{eqnarray} c_{n}c_{n- 1} \cdots c_{2}c_{1}c_{0}\ .\ d_{1}d_{2} \cdots d_{n}, \end{eqnarray} where $ b^{i} $ is implied by the place-value property of the system. \end{definition} According to \ref{Den_BBE}, finite decimals are just base-10 expansions. \begin{definition}\label{Den_BVE} Base-variable expansions are base-b expansions for every finite integer b greater than 1. \end{definition} \begin{theorem}\label{Them_BVE} Every base-variable expansion is equal to a rational number. \end{theorem} \begin{proof} According to Definition \ref{Den_BVE}, every base-variable expansion $ x $ must also be a base-b expansion. Then \begin{eqnarray} x = \pm a_{n}\cdots a_{2}a_{1}a_{0}\ .\ a_{-1}a_{-2} \cdots a_{-n} . \end{eqnarray} According to Definition \ref{Den_BBE}, it follows that \begin{eqnarray}\label{For_BVE} &\pm a_{n} \cdots a_{2}a_{1}a_{0}\ .\ a_{-1}a_{-2} \cdots a_{-n} = \pm \frac{{\sum\limits_{i=-n}^{n} a_{i}b^{i+n}}}{b^{n}}.& \end{eqnarray} Since both digit $ 0 \leq a_{i} < b $ and $ b $ are integers, $ \pm \frac{{\sum\limits_{i=-n}^{n} a_{i}b^{i+n}}}{b^{n}} $ must be a rational number. So the claim follows. \end{proof} \begin{theorem}[The Fundamental Theorem of Arithmetic]\label{them_arc} Every natural number is either prime or can be uniquely factored as a product of primes in a unique way. \end{theorem} \begin{theorem}\label{Them_BBE} Every base-b expansion for a constant b may be unequal to a rational number. \end{theorem} \begin{proof} According to the equation (\ref{For_BVE}), every base-b expansion for a constant b may be expressed as follows: \begin{eqnarray} x = \pm \frac{{\sum\limits_{i=-n}^{n} a_{i}b^{i+n}}}{b^{n}}. \end{eqnarray} Since there exists infinite primes, there must exist a prime $ q $ such that $ (q,b)=1 $. Since $ q>1 $ and $ b \neq 0 $, it follows from Theorem \ref{them_arc} that for every $ 0 \leq a_i < b $ there exists \begin{eqnarray} q \cdot \sum\limits_{i=-n}^{n} a_{i}b^{i+n} \neq b^{n}. \end{eqnarray} Hence \begin{eqnarray} \pm \frac{\sum\limits_{i=-n}^{n} a_{i}b^{i+n}}{b^{n}} & \neq & \frac{1}{q}, \end{eqnarray} which holds for every $ 0 \leq a_i < b $. So the claim follows. \end{proof} From Theorem \ref{Them_BVE} and Theorem \ref{Them_BBE}, we can conclude such a corollary as follows: \begin{corollary} Base-variable expansions are included in rational numbers. \end{corollary} According to the arguments above, no algorithms can determine the equalities between infinite simple continued fractions or infinite base-variable expansions and real numbers. As to the limits of infinite simple continued fraction sequences and those of infinite base-variable expansion sequences, they belong to logical notations and will be discussed in the next section. In summary, both simple continued fractions and base-variable expansions lack logic and fail to denote real numbers. \subsection{Logical Notations} According to Definition \ref{Den_LIM}, limit is based on infinite sequence. So the limits of infinite simple continued fraction sequences and those of infinite base-variable expansion sequences are also defined on infinite sequence. In 1872, Dedekind and Cantor invented Dedekind cuts and Cauchy sequences respectively to denote number systems. However, both Dedekind cuts and Cauchy sequences are based on rational number system. In 1889, Peano published a study giving an axiomatic approach to the natural numbers\cite{Ref8}. Peano Axioms can also be extended to define rational numbers. Then both Dedekind cuts and Cauchy sequences join well in logical deduction and thus have sufficiency of logic. In nature, Dedekind cuts and Cauchy sequences introduce infinite rational numbers to denote an irrational number. In Dedekind cuts, an irrational cut $ (A, B) $ is defined on two infinite rational sets $ A $ and $ B $. In Cauchy sequences, an irrational number is defined on an equivalence class of some infinite rational sequence. Although it is feasible to logically define algebraical operations on infinite sets or infinite sequences, it is impossible to intuitively execute these infinite algebraical operations in a finite period. So the limits of infinite simple continued fraction sequences and those of infinite base-variable expansion sequences lack intuition and fail to join in algebraical operations. For the same reason, both Dedekind cuts and Cauchy sequences lack intuition and fail to join in algebraical operations. \section{Logical Calculus} Simple continued fractions and base-variable expansions fail to denote real numbers, while the limits of infinite simple continued fraction sequences, the limits of infinite base-variable expansion sequences, logical Dedekind cuts and Cauchy sequences fail to join in algebraical operations. In mathematical logic, logical calculus is a formal system to abstract and analyze the induction and deduction apart from specific meanings. In this section, however, we construct a logical calculus by virtue of formal language and deduce numbers to intuitively and logically denote number systems. The logical calculus not only denotes real numbers, but also allows them to join in algebraical operations. The introduction of formal language aims to use computer fast execute real number operations. For clarity, we will explain the logical calculus with natural language. In \cite{Ref9}, the producer ``$ \rightarrow $" substitutes the right permutations for the left permutations to produce new permutations. In \cite{Ref10}, the connectives ``$ \neg $", ``$ \wedge $", ``$ \vee $", ``$ {\mathbb R}ightarrow $" and ``$ \Leftrightarrow $" stand for ``not", ``and", ``or", ``implies" and ``if and only if" respectively. Here, the producer ``$ \rightarrow $" is considered as a predicate symbol and embedded into logical calculus. \begin{definition} $ \{ \Phi, \Psi \} $ is a logical calculus such that: \begin{eqnarray} \label{4.1} && \Phi \{ \\ \label{4.2} && V \{ \emptyset, a, b \cdots \}, \\ \label{4.3} && C \{ \emptyset, 1, + \cdots \}, \\ \label{4.4} && P \{ \emptyset, \in, \subseteq, \rightarrow, |, =, < \cdots \}, \\ \label{4.5} && V \circ C \{ \emptyset, a, b \cdots, 1, + \cdots, aa, ab \cdots, a1, a+ \cdots, ba, bb \cdots, b1, b+ \cdots, \\ \notag && aaa, aab \cdots, aa1, aa+ \cdots, baa, bab \cdots, ba1, ba+ \cdots \}, \\ \label{4.6} && C \circ C \{ \emptyset, 1, + \cdots, 11, 1+ \cdots, 111, 11+ \cdots \}, \\ \label{4.7} && V \circ C \circ P \{ \emptyset, a, b \cdots, 1, + \cdots, \in, \subseteq \cdots, aa, ab \cdots, a1, a+ \cdots, a\in, a\subseteq \cdots, \\ \notag && ba, bb \cdots, b1, b+ \cdots, b\in, b\subseteq \cdots, aaa, aab \cdots, aa1, aa+ \cdots, aa\in, aa\subseteq \cdots, \\ \notag && baa, bab \cdots, ba1, ba+ \cdots, ba\in, ba\subseteq \cdots \}, \\ \label{4.8} && (\hat{a} \in V) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots), \\ \label{4.9} && (\hat{a} \in C) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots), \\ \label{4.10} && (\hat{a} \in (V \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv 1) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a1) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa1) \cdots), \\ \label{4.11} && (\hat{a} \in (C \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots \vee (\hat{a} \equiv 11) \vee (\hat{a} \equiv 1+) \cdots \\ \notag && \vee (\hat{a} \equiv 111) \vee (\hat{a} \equiv 11+) \cdots), \\ \label{4.12} && (\hat{a} \in (V \circ C \circ P)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv \in) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a\in) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa\in) \cdots), \\ \label{4.13} && (\bar{a} \in (V \circ C)) \wedge (\bar{b} \in (V \circ C)) \wedge (\bar{c} \in (V \circ C)) \wedge (\bar{d} \in (V \circ C)) \wedge (\bar{e} \in (V \circ C)) \\ \notag && \wedge (\bar{f} \in (V \circ C)) \wedge (\bar{g} \in (V \circ C)) \wedge (\bar{h} \in (V \circ C)) \wedge (\bar{i} \in (V \circ C)) \wedge (\bar{j} \in (V \circ C)) \\ \notag && \cdots \wedge (\bar{\bar{a}} \in (V \circ C \circ P)) \wedge (\bar{\bar{b}} \in (V \circ C \circ P)) \wedge (\bar{\bar{c}} \in (V \circ C \circ P)) \cdots, \\ \label{4.14} && ((\bar{a} \subseteq \{\bar{b},\bar{c}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}))) \cdots \\ \notag && \}, \\ \label{4.15} && \Psi \{ \\ \label{4.16} && (\bar{a} \subseteq \bar{b}) \Leftrightarrow (\bar{b} = \bar{c}\bar{a}\bar{d}), \\ \label{4.17} && (\bar{a} \rightarrow \bar{b}\bar{c}\bar{d}) \wedge (\bar{c} \rightarrow \bar{e}) {\mathbb R}ightarrow (\bar{a} \rightarrow \bar{b}\bar{e}\bar{d}), \\ \label{4.18} && (\bar{\bar{a}} \rightarrow \bar{\bar{b}} | \bar{\bar{c}}) {\mathbb R}ightarrow ((\bar{\bar{a}} \rightarrow \bar{\bar{b}}) \wedge (\bar{\bar{a}} \rightarrow \bar{\bar{c}})), \\ \label{4.19} && (\bar{\bar{a}} | \bar{\bar{b}} \rightarrow \bar{\bar{c}}) {\mathbb R}ightarrow ((\bar{\bar{a}} \rightarrow \bar{\bar{c}}) \wedge (\bar{\bar{b}} \rightarrow \bar{\bar{c}})), \\ \label{4.20} && (\bar{a} < \bar{b}) {\mathbb R}ightarrow \neg(\bar{b} < \bar{a}), \\ \label{4.21} && (\bar{a} < \bar{b}) {\mathbb R}ightarrow \neg(\bar{a} = \bar{b}), \\ \label{4.22} && (\bar{a} < \bar{b}) \wedge (\bar{b} < \bar{c}) {\mathbb R}ightarrow (\bar{a} < \bar{c}), \\ \label{4.23} && (\bar{a} < \bar{b}) \wedge (\bar{a} \in (C \circ C)) \wedge (\bar{b} \in (C \circ C)) {\mathbb R}ightarrow (\bar{a} \wedge \bar{b}), \\ \label{4.24} && (\bar{a} < \bar{b}\bar{c}\bar{d}) \wedge (\bar{c} = \bar{e}) {\mathbb R}ightarrow (\bar{a} < \bar{b}\bar{e}\bar{d}), \\ \label{4.25} && (\bar{a}\bar{b}\bar{c} < \bar{d}) \wedge (\bar{b} = \bar{e}) {\mathbb R}ightarrow (\bar{a}\bar{e}\bar{c} < \bar{d}), \\ \label{4.26} && (\bar{a} < \bar{b}\bar{c}\bar{d}) \wedge (\bar{c} \rightarrow \bar{e}) \wedge \neg(\bar{c} \subseteq \{\bar{a},\bar{b},\bar{d}\}) {\mathbb R}ightarrow (\bar{a} < \bar{b}\bar{e}\bar{d}), \\ \label{4.27} && (\bar{a} < \bar{b}\bar{c}\bar{d}\bar{c}\bar{e}) \wedge (\bar{c} \rightarrow \bar{f}) \wedge \neg(\bar{c} \subseteq \{\bar{a},\bar{b},\bar{d},\bar{e}\}) {\mathbb R}ightarrow (\bar{a} < \bar{b}\bar{f}\bar{d}\bar{f}\bar{e}), \\ \label{4.28} && (\bar{a}\bar{b}\bar{c} < \bar{d}) \wedge (\bar{b} \rightarrow \bar{e}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d}\}) {\mathbb R}ightarrow (\bar{a}\bar{e}\bar{c} < \bar{d}), \\ \label{4.29} && (\bar{a}\bar{b}\bar{c} < \bar{d}\bar{b}\bar{e}) \wedge (\bar{b} \rightarrow \bar{f}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e}\}) {\mathbb R}ightarrow (\bar{a}\bar{f}\bar{c} < \bar{d}\bar{f}\bar{e}), \\ \label{4.30} && (\bar{a}\bar{b}\bar{c} < \bar{d}\bar{b}\bar{e}\bar{b}\bar{f}) \wedge (\bar{b} \rightarrow \bar{g}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f}\}) {\mathbb R}ightarrow (\bar{a}\bar{g}\bar{c} < \bar{d}\bar{g}\bar{e}\bar{g}\bar{f}), \\ \label{4.31} && (\bar{a}\bar{b}\bar{c}\bar{b}\bar{d} < \bar{e}) \wedge (\bar{b} \rightarrow \bar{f}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e}\}) {\mathbb R}ightarrow (\bar{a}\bar{f}\bar{c}\bar{f}\bar{d} < \bar{e}), \\ \label{4.32} && (\bar{a}\bar{b}\bar{c}\bar{b}\bar{d} < \bar{e}\bar{b}\bar{f}) \wedge (\bar{b} \rightarrow \bar{g}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f}\}) {\mathbb R}ightarrow (\bar{a}\bar{g}\bar{c}\bar{g}\bar{d} < \bar{e}\bar{g}\bar{f}), \\ \label{4.33} && (\bar{a}\bar{b}\bar{c}\bar{b}\bar{d} < \bar{e}\bar{b}\bar{f}\bar{b}\bar{g}) \wedge (\bar{b} \rightarrow \bar{h}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) {\mathbb R}ightarrow (\bar{a}\bar{h}\bar{c}\bar{h}\bar{d} < \bar{e}\bar{h}\bar{f}\bar{h}\bar{g}), \\ \label{4.34} && \bar{a} = \bar{a}, \\ \label{4.35} && (\bar{a} = \bar{b}) {\mathbb R}ightarrow (\bar{b} = \bar{a}), \\ \label{4.36} && (\bar{a} = \bar{b}) {\mathbb R}ightarrow \neg (\bar{a} < \bar{b}), \\ \label{4.37} && (\bar{a} = \bar{b}\bar{c}\bar{d}) \wedge (\bar{c} = \bar{e}) {\mathbb R}ightarrow (\bar{a} = \bar{b}\bar{e}\bar{d}), \\ \label{4.38} && (\bar{a}\bar{b}\bar{c}) \wedge (\bar{b} = \bar{d}) {\mathbb R}ightarrow (\bar{a}\bar{b}\bar{c} = \bar{a}\bar{d}\bar{c}), \\ \label{4.39} && (\bar{a} = \bar{b}\bar{c}\bar{d}) \wedge (\bar{c} \rightarrow \bar{e}) \wedge \neg(\bar{c} \subseteq \{\bar{a},\bar{b},\bar{d}\}) {\mathbb R}ightarrow (\bar{a} = \bar{b}\bar{e}\bar{d}), \\ \label{4.40} && (\bar{a} = \bar{b}\bar{c}\bar{d}\bar{c}\bar{e}) \wedge (\bar{c} \rightarrow \bar{f}) \wedge \neg(\bar{c} \subseteq \{\bar{a},\bar{b},\bar{d},\bar{e}\}) {\mathbb R}ightarrow (\bar{a} = \bar{b}\bar{f}\bar{d}\bar{f}\bar{e}), \\ \label{4.41} && (\bar{a}\bar{b}\bar{c} = \bar{d}\bar{b}\bar{e}) \wedge (\bar{b} \rightarrow \bar{f}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e}\}) {\mathbb R}ightarrow (\bar{a}\bar{f}\bar{c} = \bar{d}\bar{f}\bar{e}), \\ \label{4.42} && (\bar{a}\bar{b}\bar{c} = \bar{d}\bar{b}\bar{e}\bar{b}\bar{f}) \wedge (\bar{b} \rightarrow \bar{g}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f}\}) {\mathbb R}ightarrow (\bar{a}\bar{g}\bar{c} = \bar{d}\bar{g}\bar{e}\bar{g}\bar{f}), \\ \label{4.43} && (\bar{a}\bar{b}\bar{c}\bar{b}\bar{d} = \bar{e}\bar{b}\bar{f}\bar{b}\bar{g}) \wedge (\bar{b} \rightarrow \bar{h}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) {\mathbb R}ightarrow (\bar{a}\bar{h}\bar{c}\bar{h}\bar{d} = \bar{e}\bar{h}\bar{f}\bar{h}\bar{g}) \\ \notag && \}. \end{eqnarray} \end{definition} First, we will explain the primitive symbols of the logical calculus $ \{ \Phi, \Psi \} $ with natural language. The symbols ``$ \{ $", ``$ \} $", ``$ , $", ``$ ( $", ``$ ) $" are punctuation. The symbol ``$ \emptyset $" indicates emptiness. The symbol ``$ \cdots $" indicates an omission. (\ref{4.1}) denotes $ \Phi $ as a set of notations and particular axioms between $ \{ $ and $ \} $. Different logical calculus correspond to different notations and particular axioms. (\ref{4.2}) denotes $ V $ as a set of variables between $ \{ $ and $ \} $. (\ref{4.3}) denotes $ C $ as a set of constants between $ \{ $ and $ \} $. (\ref{4.4}) denotes $ P $ as a set of predicate symbols between $ \{ $ and $ \} $. (\ref{4.5}) denotes $ V \circ C $ as a set of concatenations between $ V $ and $ C $. (\ref{4.6}) denotes $ C \circ C $ as a set of concatenations between $ C $ and $ C $. (\ref{4.7}) denotes $ V \circ C \circ P $ as a set of concatenations among $ V $, $ C $ and $ P $. (\ref{4.8}) $ \sim $ (\ref{4.12}) define a set of axioms on the binary predicate symbol $ \in $. (\ref{4.13}) defines an axiom on new variables ranging over $ V \circ C $. (\ref{4.14}) defines an axiom on the binary predicate symbol $ \subseteq $. (\ref{4.15}) denotes $ \Psi $ as a set of general axioms between $ \{ $ and $ \} $. Different logical calculus correspond to the same general axioms. (\ref{4.16}) defines an axiom on the binary predicate symbol $ \subseteq $. (\ref{4.17}) defines an axiom on the binary predicate symbol $ \rightarrow $. (\ref{4.18}) $ \sim $ (\ref{4.19}) define a set of axioms on the binary predicate symbol $ | $. (\ref{4.20}) $ \sim $ (\ref{4.33}) define a set of axioms on the binary predicate symbol $ < $. (\ref{4.34}) $ \sim $ (\ref{4.43}) define a set of axioms on the binary predicate symbol $ = $. Then, we will prove that the logical calculus $ \{ \Phi, \Psi \} $ can deduce common number systems. \begin{definition} In a logical calculus $ \{ \Phi, \Psi \} $, if $ \bar{a} \equiv true $, then $ \bar{a} $ is a number. \end{definition} \begin{theorem} \begin{eqnarray} \notag && \textit{If} \ \Phi \{ \\ \label{4.44} && V \{ \emptyset, a, b \}, \\ \label{4.45} && C \{ \emptyset, 1, + \}, \\ \label{4.46} && P \{ \emptyset, \in, \subseteq, \rightarrow, |, =, < \}, \\ \label{4.47} && V \circ C \{ \emptyset, a, b \cdots, 1, + \cdots, aa, ab \cdots, a1, a+ \cdots, ba, bb \cdots, b1, b+ \cdots, \\ \notag && aaa, aab \cdots, aa1, aa+ \cdots, baa, bab \cdots, ba1, ba+ \cdots \}, \\ \label{4.48} && C \circ C \{ \emptyset, 1, + \cdots, 11, 1+ \cdots, 111, 11+ \cdots \}, \\ \label{4.49} && V \circ C \circ P \{ \emptyset, a, b \cdots, 1, + \cdots, \in, \subseteq \cdots, aa, ab \cdots, a1, a+ \cdots, a\in, a\subseteq \cdots, \\ \notag && ba, bb \cdots, b1, b+ \cdots, b\in, b\subseteq \cdots, aaa, aab \cdots, aa1, aa+ \cdots, aa\in, aa\subseteq \cdots, \\ \notag && baa, bab \cdots, ba1, ba+ \cdots, ba\in, ba\subseteq \cdots \}, \\ \label{4.50} && (\hat{a} \in V) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots), \\ \label{4.51} && (\hat{a} \in C) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots), \\ \label{4.52} && (\hat{a} \in (V \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv 1) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a1) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa1) \cdots), \\ \label{4.53} && (\hat{a} \in (C \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots \vee (\hat{a} \equiv 11) \vee (\hat{a} \equiv 1+) \cdots \\ \notag && \vee (\hat{a} \equiv 111) \vee (\hat{a} \equiv 11+) \cdots), \\ \label{4.54} && (\hat{a} \in (V \circ C \circ P)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv \in) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a\in) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa\in) \cdots), \\ \label{4.55} && (\bar{a} \in (V \circ C)) \wedge (\bar{b} \in (V \circ C)) \wedge (\bar{c} \in (V \circ C)) \wedge (\bar{d} \in (V \circ C)) \wedge (\bar{e} \in (V \circ C)) \\ \notag && \wedge (\bar{f} \in (V \circ C)) \wedge (\bar{g} \in (V \circ C)) \wedge (\bar{h} \in (V \circ C)) \wedge (\bar{i} \in (V \circ C)) \wedge (\bar{j} \in (V \circ C)) \\ \notag && \wedge (\bar{\bar{a}} \in (V \circ C \circ P)) \wedge (\bar{\bar{b}} \in (V \circ C \circ P)) \wedge (\bar{\bar{c}} \in (V \circ C \circ P)), \\ \label{4.56} && ((\bar{a} \subseteq \{\bar{b},\bar{c}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}))), \\ \label{4.57} && a \rightarrow 1|1+a, \\ \label{4.58} && a < 1+a, \\ \label{4.59} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow (\bar{a}+\bar{b} = \bar{b}+\bar{a}) \\ \notag && \}, \end{eqnarray} then $ N \{ \Phi, \Psi \} $ denotes natural number system. \end{theorem} \begin{proof} \begin{eqnarray*} (A1) & (a \rightarrow 1|1+a) {\mathbb R}ightarrow (a \rightarrow 1) & by (\ref{4.57}),(\ref{4.18}) \\ (A2) & {\mathbb R}ightarrow (a \rightarrow 1+a) & by (\ref{4.19}) \\ (A3) & (a < 1+a) {\mathbb R}ightarrow (1 < 1+1) & by (\ref{4.58}),(A1),(\ref{4.29}) \\ (A4) & {\mathbb R}ightarrow 1 & by (\ref{4.23}) \\ (A5) & {\mathbb R}ightarrow (1+1) & by (A3),(\ref{4.23}) \\ (A6) & {\mathbb R}ightarrow (1+a < 1+1+a) & by (\ref{4.58}),(A2),(\ref{4.29}) \\ (A7) & {\mathbb R}ightarrow (1+1 < 1+1+1) & by (A1),(\ref{4.29}) \\ (A8) & {\mathbb R}ightarrow (1+1) & by (\ref{4.23}) \\ (A9) & {\mathbb R}ightarrow (1+1+1) & by (A7),(\ref{4.23}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the numbers from $ N \{ \Phi, \Psi \} $: \begin{eqnarray*} & 1, 1+1, 1+1+1, 1+1+1+1 \cdots & \end{eqnarray*} \begin{eqnarray*} (B1) & 1+1 = 1+1 & by (A4),(\ref{4.59}) \\ (B2) & 1+1+1 = 1+1+1 & by (A4),(A5),(\ref{4.59}) \\ (B3) & 1+1+1+1 = 1+1+1+1 & by (A5),(\ref{4.59}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the equalities on deducible numbers from $ N \{ \Phi, \Psi \} $: \begin{eqnarray*} & 1+1 = 1+1, 1+1+1 = 1+1+1, 1+1+1+1 = 1+1+1+1 \cdots & \end{eqnarray*} The deducible numbers correspond to the natural numbers as follows: \begin{eqnarray*} 1 & \equiv & 1, \\ 1+1 & \equiv & 2, \\ 1+1+1 & \equiv & 3, \\ \vdots & \vdots & \vdots . \end{eqnarray*} The equalities on deducible numbers correspond to the addition in natural number system. So the claim follows. \end{proof} \begin{theorem} \begin{eqnarray} \notag && \textit{If} \ \Phi \{ \\ \label{4.60} && V \{ \emptyset, a, b, c \}, \\ \label{4.61} && C \{ \emptyset, 1, +, [, ], - \}, \\ \label{4.62} && P \{ \emptyset, \in, \subseteq, \rightarrow, |, =, < \}, \\ \label{4.63} && V \circ C \{ \emptyset, a, b \cdots, 1, + \cdots, aa, ab \cdots, a1, a+ \cdots, ba, bb \cdots, b1, b+ \cdots, \\ \notag && aaa, aab \cdots, aa1, aa+ \cdots, baa, bab \cdots, ba1, ba+ \cdots \}, \\ \label{4.64} && C \circ C \{ \emptyset, 1, + \cdots, 11, 1+ \cdots, 111, 11+ \cdots \}, \\ \label{4.65} && V \circ C \circ P \{ \emptyset, a, b \cdots, 1, + \cdots, \in, \subseteq \cdots, aa, ab \cdots, a1, a+ \cdots, a\in, a\subseteq \cdots, \\ \notag && ba, bb \cdots, b1, b+ \cdots, b\in, b\subseteq \cdots, aaa, aab \cdots, aa1, aa+ \cdots, aa\in, aa\subseteq \cdots, \\ \notag && baa, bab \cdots, ba1, ba+ \cdots, ba\in, ba\subseteq \cdots \}, \\ \label{4.66} && (\hat{a} \in V) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots), \\ \label{4.67} && (\hat{a} \in C) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots), \\ \label{4.68} && (\hat{a} \in (V \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv 1) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a1) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa1) \cdots), \\ \label{4.69} && (\hat{a} \in (C \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots \vee (\hat{a} \equiv 11) \vee (\hat{a} \equiv 1+) \cdots \\ \notag && \vee (\hat{a} \equiv 111) \vee (\hat{a} \equiv 11+) \cdots), \\ \label{4.70} && (\hat{a} \in (V \circ C \circ P)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv \in) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a\in) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa\in) \cdots), \\ \label{4.71} && (\bar{a} \in (V \circ C)) \wedge (\bar{b} \in (V \circ C)) \wedge (\bar{c} \in (V \circ C)) \wedge (\bar{d} \in (V \circ C)) \wedge (\bar{e} \in (V \circ C)) \\ \notag && \wedge (\bar{f} \in (V \circ C)) \wedge (\bar{g} \in (V \circ C)) \wedge (\bar{h} \in (V \circ C)) \wedge (\bar{i} \in (V \circ C)) \wedge (\bar{j} \in (V \circ C)) \\ \notag && \wedge (\bar{\bar{a}} \in (V \circ C \circ P)) \wedge (\bar{\bar{b}} \in (V \circ C \circ P)) \wedge (\bar{\bar{c}} \in (V \circ C \circ P)), \\ \label{4.72} && ((\bar{a} \subseteq \{\bar{b},\bar{c}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}))), \\ \label{4.73} && a \rightarrow 1|[aba], \\ \label{4.74} && b|c \rightarrow +|-, \\ \label{4.75} && a < [1+a], \\ \label{4.76} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}b\bar{b}c\bar{c}] = [[\bar{a}b\bar{b}]c\bar{c}]), \\ \label{4.77} && \bar{a} {\mathbb R}ightarrow ([\bar{a}-\bar{a}] = [1-1]), \\ \label{4.78} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}+\bar{b}] = [\bar{b}+\bar{a}]), \\ \label{4.79} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([[\bar{a}-\bar{b}]+\bar{b}] = \bar{a}), \\ \label{4.80} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-\bar{b}+\bar{c}] = [\bar{a}+\bar{c}-\bar{b}]), \\ \label{4.81} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}+\bar{c}]] = [[\bar{a}+\bar{b}]+\bar{c}]), \\ \label{4.82} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}-\bar{c}]] = [[\bar{a}+\bar{b}]-\bar{c}]), \\ \label{4.83} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}+\bar{c}]] = [[\bar{a}-\bar{b}]-\bar{c}]), \\ \label{4.84} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}-\bar{c}]] = [[\bar{a}-\bar{b}]+\bar{c}] \\ \notag && \}, \end{eqnarray} then $ Z \{ \Phi, \Psi \} $ denotes integral number system. \end{theorem} \begin{proof} \begin{eqnarray*} (A1) & (a \rightarrow 1|[aba]) {\mathbb R}ightarrow (a \rightarrow 1) & by (\ref{4.73}),(\ref{4.18}) \\ (A2) & {\mathbb R}ightarrow (a \rightarrow [aba]) & by (\ref{4.73}),(\ref{4.18}) \\ (A3) & {\mathbb R}ightarrow (a \rightarrow [1b1]) & by (A2),(A1),(\ref{4.17}) \\ (A4) & (b|c \rightarrow +|-) {\mathbb R}ightarrow (b|c \rightarrow -) & by (\ref{4.74}),(\ref{4.18}) \\ (A5) & {\mathbb R}ightarrow (b \rightarrow -) & by (\ref{4.19}) \\ (A6) & {\mathbb R}ightarrow (a \rightarrow [1-1]) & by (A3),(A5),(\ref{4.17}) \\ (A7) & (a < [1+a]) {\mathbb R}ightarrow (1 < [1+1]) & by (\ref{4.75}),(A1),(\ref{4.29}) \\ (A8) & {\mathbb R}ightarrow 1 & by (\ref{4.23}) \\ (A9) & {\mathbb R}ightarrow [1+1] & \ by (A7),(\ref{4.23}) \\ (A10) & (a < [1+a]) {\mathbb R}ightarrow ([1-1] < [1+[1-1]]) & by (\ref{4.75}),(A6),(\ref{4.29}) \\ (A11) & {\mathbb R}ightarrow ([1-1] < [[1-1]+1]) & by (\ref{4.78}),(\ref{4.24}) \\ (A12) & {\mathbb R}ightarrow ([1-1] < 1) & by (\ref{4.79}),(\ref{4.24}) \\ (A13) & {\mathbb R}ightarrow [1-1] & by (\ref{4.23}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the numbers from $ Z \{ \Phi, \Psi \} $: \begin{eqnarray*} & [1-1], 1, [1-1-1], [1+1], [1+1+1] \cdots & \end{eqnarray*} \begin{eqnarray*} (B1) & [1+1] = [1+1] & by (A8),(\ref{4.78}) \\ (B2) & [1+[1+1]] = [[1+1]+1] & by (A8),(A9),(\ref{4.78}) \\ (B3) & [1+[1-1]] = [[1-1]+1] & by (A8),(A13),(\ref{4.78}) \\ (B4) & [[1+1]+[1-1]] = [[1-1]+[1+1]] & by (A9),(A13),(\ref{4.78}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the equalities on deducible numbers from $ Z \{ \Phi, \Psi \} $: \begin{eqnarray*} & [1+[1+1]] = [[1+1]+1], [1+[1-1]] = [[1-1]+1] \cdots & \end{eqnarray*} The deducible numbers correspond to the integral numbers as follows: \begin{eqnarray*} \vdots & \vdots & \vdots, \\ \ [1-1-[1+1]] & \equiv & -2, \\ \ [1-1-1] & \equiv & -1, \\ \ [1-1] & \equiv & 0, \\ \ 1 & \equiv & 1, \\ \ [1+1] & \equiv & 2, \\ \ [1+1+1] & \equiv & 3, \\ \ [1+1+1+1] & \equiv & 4, \\ \vdots & \vdots & \vdots . \end{eqnarray*} The equalities on deducible numbers correspond to the addition and subtraction in integral number system. So the claim follows. \end{proof} \begin{theorem} \begin{eqnarray} \notag && \textit{If} \ \Phi \{ \\ \label{4.85} && V \{ \emptyset, a, b, c, d \}, \\ \label{4.86} && C \{ \emptyset, 1, +, [, ], - \}, \\ \label{4.87} && P \{ \emptyset, \in, \subseteq, \rightarrow, |, =, < \}, \\ \label{4.88} && V \circ C \{ \emptyset, a, b \cdots, 1, + \cdots, aa, ab \cdots, a1, a+ \cdots, ba, bb \cdots, b1, b+ \cdots, \\ \notag && aaa, aab \cdots, aa1, aa+ \cdots, baa, bab \cdots, ba1, ba+ \cdots \}, \\ \label{4.89} && C \circ C \{ \emptyset, 1, + \cdots, 11, 1+ \cdots, 111, 11+ \cdots \}, \\ \label{4.90} && V \circ C \circ P \{ \emptyset, a, b \cdots, 1, + \cdots, \in, \subseteq \cdots, aa, ab \cdots, a1, a+ \cdots, a\in, a\subseteq \cdots, \\ \notag && ba, bb \cdots, b1, b+ \cdots, b\in, b\subseteq \cdots, aaa, aab \cdots, aa1, aa+ \cdots, aa\in, aa\subseteq \cdots, \\ \notag && baa, bab \cdots, ba1, ba+ \cdots, ba\in, ba\subseteq \cdots \}, \\ \label{4.91} && (\hat{a} \in V) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots), \\ \label{4.92} && (\hat{a} \in C) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots), \\ \label{4.93} && (\hat{a} \in (V \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv 1) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a1) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa1) \cdots), \\ \label{4.94} && (\hat{a} \in (C \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots \vee (\hat{a} \equiv 11) \vee (\hat{a} \equiv 1+) \cdots \\ \notag && \vee (\hat{a} \equiv 111) \vee (\hat{a} \equiv 11+) \cdots), \\ \label{4.95} && (\hat{a} \in (V \circ C \circ P)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv \in) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a\in) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa\in) \cdots), \\ \label{4.96} && (\bar{a} \in (V \circ C)) \wedge (\bar{b} \in (V \circ C)) \wedge (\bar{c} \in (V \circ C)) \wedge (\bar{d} \in (V \circ C)) \wedge (\bar{e} \in (V \circ C)) \\ \notag && \wedge (\bar{f} \in (V \circ C)) \wedge (\bar{g} \in (V \circ C)) \wedge (\bar{h} \in (V \circ C)) \wedge (\bar{i} \in (V \circ C)) \wedge \\ \notag && (\bar{j} \in (V \circ C)) \wedge (\bar{\bar{a}} \in (V \circ C \circ P)) \wedge (\bar{\bar{b}} \in (V \circ C \circ P)) \wedge (\bar{\bar{c}} \in (V \circ C \circ P)), \\ \label{4.97} && ((\bar{a} \subseteq \{\bar{b},\bar{c}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee \\ \notag && (\bar{a} \subseteq \bar{f}) \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}))), \\ \label{4.98} && a \rightarrow 1|[aba], \\ \label{4.99} && b \rightarrow +|-, \\ \label{4.100} && c|d \rightarrow b|++|--, \\ \label{4.101} && a < [1+a], \\ \label{4.102} && (\bar{a} < \bar{b}) \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+\bar{c}] < [\bar{b}+\bar{c}]), \\ \label{4.103} && (\bar{a} < \bar{b}) \wedge \bar{c} {\mathbb R}ightarrow ([\bar{c}-\bar{b}] < [\bar{c}-\bar{a}]), \\ \label{4.104} && ([1-1] < \bar{a}) \wedge (\bar{a} < \bar{b}) \wedge ([1-1] < \bar{c}) {\mathbb R}ightarrow ([\bar{a}--\bar{c}] < [\bar{b}--\bar{c}]), \\ \label{4.105} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}c\bar{b}d\bar{c}] = [[\bar{a}c\bar{b}]d\bar{c}]), \\ \label{4.106} && \bar{a} {\mathbb R}ightarrow ([\bar{a}-\bar{a}] = [1-1]), \\ \label{4.107} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}+\bar{b}] = [\bar{b}+\bar{a}]), \\ \label{4.108} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([[\bar{a}-\bar{b}]+\bar{b}] = \bar{a}), \\ \label{4.109} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-\bar{b}+\bar{c}] = [\bar{a}+\bar{c}-\bar{b}]), \\ \label{4.110} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}+\bar{c}]] = [[\bar{a}+\bar{b}]+\bar{c}]), \\ \label{4.111} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}-\bar{c}]] = [[\bar{a}+\bar{b}]-\bar{c}]), \\ \label{4.112} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}+\bar{c}]] = [[\bar{a}-\bar{b}]-\bar{c}]), \\ \label{4.113} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}-\bar{c}]] = [[\bar{a}-\bar{b}]+\bar{c}]), \\ \label{4.114} && \bar{a} {\mathbb R}ightarrow ([\bar{a}++1] = \bar{a}), \\ \label{4.115} && \neg (\bar{a} = [1-1]) {\mathbb R}ightarrow ([\bar{a}--\bar{a}] = 1), \\ \label{4.116} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}++\bar{b}] = [\bar{b}++\bar{a}]), \\ \label{4.117} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}+\bar{c}]] = [[\bar{a}++\bar{b}]+[\bar{a}++\bar{c}]]), \\ \label{4.118} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}-\bar{c}]] = [[\bar{a}++\bar{b}]-[\bar{a}++\bar{c}]]), \\ \label{4.119} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}++\bar{c}]] = [[\bar{a}++\bar{b}]++\bar{c}]), \\ \label{4.120} && \bar{a} \wedge \neg (\bar{b} = [1-1]) {\mathbb R}ightarrow ([\bar{a}--\bar{b}++\bar{b}] = \bar{a}), \\ \label{4.121} && \bar{a} \wedge \bar{b} \wedge \neg (\bar{c} = [1-1]) {\mathbb R}ightarrow (([\bar{a}--\bar{c}++\bar{b}] = [\bar{a}++\bar{b}--\bar{c}]) \wedge ([[\bar{a}+\bar{b}]--\bar{c}] = \\ \notag && [[\bar{a}--\bar{c}]+[\bar{b}--\bar{c}]]) \wedge ([[\bar{a}-\bar{b}]--\bar{c}] = [[\bar{a}--\bar{c}]-[\bar{b}--\bar{c}]]) \wedge \\ \notag && ([\bar{a}++[\bar{b}--\bar{c}]] = [[\bar{a}++\bar{b}]--\bar{c}])), \\ \label{4.122} && \bar{a} \wedge \neg ([\bar{b} = [1-1]) \wedge \neg (\bar{c} = [1-1]]) {\mathbb R}ightarrow (([\bar{a}--[\bar{b}++\bar{c}]] = [[\bar{a}--\bar{b}]--\bar{c}]) \wedge \\ \notag && ([\bar{a}--[\bar{b}--\bar{c}]] = [[\bar{a}--\bar{b}]++\bar{c}])) \\ \notag && \}, \end{eqnarray} then $ Q \{ \Phi, \Psi \} $ denotes rational number system. \end{theorem} \begin{proof} \begin{eqnarray*} (A1) & (a \rightarrow 1|[aba]) {\mathbb R}ightarrow (a \rightarrow 1) & by (\ref{4.98}),(\ref{4.18}) \\ (A2) & {\mathbb R}ightarrow (a \rightarrow [aba]) & by (\ref{4.98}),(\ref{4.18}) \\ (A3) & {\mathbb R}ightarrow (a \rightarrow [[aba]ba]) & by (A2),(\ref{4.17}) \\ (A4) & {\mathbb R}ightarrow (a \rightarrow [[1-1]-1]) & by (A1),(\ref{4.99}) \\ (A5) & (a < [1+a]) {\mathbb R}ightarrow (1 < [1+1]) & by (\ref{4.101}),(A1),(\ref{4.29}) \\ (A6) & {\mathbb R}ightarrow 1 & by (\ref{4.23}) \\ (A7) & {\mathbb R}ightarrow [1+1] & \ by (A5),(\ref{4.23}) \\ (A8) & {\mathbb R}ightarrow ([aba] < [1+[aba]]) & by (A5),(A2),(\ref{4.29}) \\ (A9) & {\mathbb R}ightarrow ([1+1] < [1+[1+1]]) & by (A1),(\ref{4.99}),(\ref{4.29}) \\ (A10) & {\mathbb R}ightarrow ([1+1] < [[1+1]+1]) & by (\ref{4.107}),(\ref{4.24}) \\ (A11) & {\mathbb R}ightarrow ([1+1] < [1+1+1]) & by (\ref{4.105}) \\ (A12) & {\mathbb R}ightarrow ([1-1] < [1+[1-1]]) & by (A8),(A1),(\ref{4.99}),(\ref{4.29}) \\ (A13) & {\mathbb R}ightarrow ([1-1] < [[1-1]+1]) & by (\ref{4.107}),(\ref{4.24}) \\ (A14) & {\mathbb R}ightarrow ([1-1] < 1) & by (\ref{4.108}) \\ (A15) & {\mathbb R}ightarrow ([1-1] < [1+1+1]) & by (A14),(A5),(A11),(\ref{4.22}) \\ (A16) & {\mathbb R}ightarrow ([1--[1+1+1]] < [[1+1]--[1+1+1]]) & by (A14),(A5),(A15),(\ref{4.104}) \\ (A17) & {\mathbb R}ightarrow [1--[1+1+1]] & by (\ref{4.23}) \\ (A18) & {\mathbb R}ightarrow [[1+1]--[1+1+1]] & by (A16),(\ref{4.23}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the numbers from $ Q \{ \Phi, \Psi \} $: \begin{eqnarray*} & [1-1], [[1-1]-[1--[1+1]]], [1--[1+1]], 1 \cdots & \end{eqnarray*} \begin{eqnarray*} (B1) & [1+[1--[1+1+1]]] = [[1--[1+1+1]]+1] & by (A6),(A17),(\ref{4.107}) \\ (B2) & [1++[1--[1+1+1]]] = [[1--[1+1+1]]++1] & by (A6),(A17),(\ref{4.116}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the equalities on deducible numbers from $ Q \{ \Phi, \Psi \} $: \begin{eqnarray*} & [1+1] = [1+1], [1++[1--[1+1+1]]] = [[1--[1+1+1]]++1] \cdots & \end{eqnarray*} The deducible numbers correspond to the rational numbers as follows: \begin{eqnarray*} \vdots & \vdots & \vdots, \\ \ [1-1-1] & \equiv & -1, \\ \vdots & \vdots & \vdots, \\ \ [1-1-[1--[1+1]]] & \equiv & -\frac{1}{2}, \\ \vdots & \vdots & \vdots, \\ \ [1-1] & \equiv & 0, \\ \vdots & \vdots & \vdots, \\ \ [1-[1--[1+1]]] & \equiv & \frac{1}{2}, \\ \vdots & \vdots & \vdots, \\ \ 1 & \equiv & 1, \\ \vdots & \vdots & \vdots, \\ \ [1+[1--[1+1]]] & \equiv & \frac{3}{2}, \\ \vdots & \vdots & \vdots, \\ \ [1+1] & \equiv & 2, \\ \vdots & \vdots & \vdots, \\ \ [1+1+[1--[1+1]]] & \equiv & \frac{5}{2}, \\ \vdots & \vdots & \vdots, \\ \ [1+1+1] & \equiv & 3, \\ \vdots & \vdots & \vdots . \end{eqnarray*} The equalities on deducible numbers correspond to the addition, subtraction, multiplication, division in rational number system. So the claim follows. \end{proof} \begin{definition} Real number system is a logical calculus $ R \{ \Phi, \Psi \} $ such that: \begin{eqnarray} \notag && \Phi \{ \\ \label{4.123} && V \{ \emptyset, a, b, c, d, e, f, g, h, i, j, k, l \}, \\ \label{4.124} && C \{ \emptyset, 1, +, [, ], -, /, \top, \bot, \underline{\ \ } \}, \\ \label{4.125} && P \{ \emptyset, \in, \subseteq, \rightarrow, |, =, <, \| \}, \\ \label{4.126} && V \circ C \{ \emptyset, a, b \cdots, 1, + \cdots, aa, ab \cdots, a1, a+ \cdots, ba, bb \cdots, b1, b+ \cdots, \\ \notag && aaa, aab \cdots, aa1, aa+ \cdots, baa, bab \cdots, ba1, ba+ \cdots \}, \\ \label{4.127} && C \circ C \{ \emptyset, 1, + \cdots, 11, 1+ \cdots, 111, 11+ \cdots \}, \\ \label{4.128} && V \circ C \circ P \{ \emptyset, a, b \cdots, 1, + \cdots, \in, \subseteq \cdots, aa, ab \cdots, a1, a+ \cdots, a\in, a\subseteq \cdots, \\ \notag && ba, bb \cdots, b1, b+ \cdots, b\in, b\subseteq \cdots, aaa, aab \cdots, aa1, aa+ \cdots, aa\in, aa\subseteq \cdots, \\ \notag && baa, bab \cdots, ba1, ba+ \cdots, ba\in, ba\subseteq \cdots \}, \\ \label{4.129} && (\hat{a} \in V) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots), \\ \label{4.130} && (\hat{a} \in C) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots), \\ \label{4.131} && (\hat{a} \in (V \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv 1) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a1) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa1) \cdots), \\ \label{4.132} && (\hat{a} \in (C \circ C)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv 1) \vee (\hat{a} \equiv +) \cdots \vee (\hat{a} \equiv 11) \vee (\hat{a} \equiv 1+) \cdots \\ \notag && \vee (\hat{a} \equiv 111) \vee (\hat{a} \equiv 11+) \cdots), \\ \label{4.133} && (\hat{a} \in (V \circ C \circ P)) \Leftrightarrow ((\hat{a} \equiv \emptyset) \vee (\hat{a} \equiv a) \vee (\hat{a} \equiv b) \cdots \vee (\hat{a} \equiv \in) \cdots \vee (\hat{a} \equiv aa) \\ \notag && \vee (\hat{a} \equiv ab) \cdots \vee (\hat{a} \equiv a\in) \cdots \vee (\hat{a} \equiv aaa) \vee (\hat{a} \equiv aab) \cdots \vee (\hat{a} \equiv aa\in) \cdots), \\ \label{4.134} && (\bar{a} \in (V \circ C)) \wedge (\bar{b} \in (V \circ C)) \wedge (\bar{c} \in (V \circ C)) \wedge (\bar{d} \in (V \circ C)) \wedge (\bar{e} \in (V \circ C)) \\ \notag && \wedge (\bar{f} \in (V \circ C)) \wedge (\bar{g} \in (V \circ C)) \wedge (\bar{h} \in (V \circ C)) \wedge (\bar{i} \in (V \circ C)) \wedge \\ \notag && (\bar{j} \in (V \circ C)) \wedge (\bar{k} \in (V \circ C)) \wedge (\bar{l} \in (V \circ C)) \wedge (\bar{\bar{a}} \in (V \circ C \circ P)) \wedge \\ \notag && (\bar{\bar{b}} \in (V \circ C \circ P)) \wedge (\bar{\bar{c}} \in (V \circ C \circ P)), \\ \label{4.135} && ((\bar{a} \subseteq \{\bar{b},\bar{c}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee (\bar{a} \subseteq \bar{f}) \\ \notag && \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}))) \wedge \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee \\ \notag && (\bar{a} \subseteq \bar{f}) \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}))), \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j},\bar{k}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee \\ \notag && (\bar{a} \subseteq \bar{f}) \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}) \vee (\bar{a} \subseteq \bar{k}))), \\ \notag && ((\bar{a} \subseteq \{\bar{b},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g},\bar{h},\bar{i},\bar{j},\bar{k},\bar{l}\}) \Leftrightarrow ((\bar{a} \subseteq \bar{b}) \vee (\bar{a} \subseteq \bar{c}) \vee (\bar{a} \subseteq \bar{d}) \vee (\bar{a} \subseteq \bar{e}) \vee \\ \notag && (\bar{a} \subseteq \bar{f}) \vee (\bar{a} \subseteq \bar{g}) \vee (\bar{a} \subseteq \bar{h}) \vee (\bar{a} \subseteq \bar{i}) \vee (\bar{a} \subseteq \bar{j}) \vee (\bar{a} \subseteq \bar{k}) \vee (\bar{a} \subseteq \bar{l}))), \\ \label{4.136} && (\bar{a}\bar{b}\bar{c} = \bar{d}\bar{b}\bar{e}\bar{f}\bar{g}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f},\bar{g}\}) \wedge \neg(\bar{f} \subseteq \{\bar{a},\bar{b},\bar{c},\bar{d},\bar{e},\bar{g}\}) \wedge \\ \notag && ((\bar{b} \rightarrow \bar{h}) \| (\bar{f} \rightarrow \bar{i})) {\mathbb R}ightarrow (\bar{a}\bar{h}\bar{c} = \bar{d}\bar{h}\bar{e}\bar{i}\bar{g}), \\ \label{4.137} && (\bar{a}\bar{b}\bar{c}\bar{d}\bar{e} = \bar{f}) \wedge \neg(\bar{b} \subseteq \{\bar{a},\bar{c},\bar{d},\bar{e},\bar{f}\}) \wedge \neg(\bar{d} \subseteq \{\bar{a},\bar{b},\bar{c},\bar{e},\bar{f}\}) \wedge ((\bar{b} \rightarrow \bar{g}) \| (\bar{d} \rightarrow \bar{h})) \\ \notag && {\mathbb R}ightarrow (\bar{a}\bar{g}\bar{c}\bar{h}\bar{e} = \bar{f}), \\ \label{4.138} && a \rightarrow 1|[aba], \\ \label{4.139} && b \rightarrow +|-, \\ \label{4.140} && c|d \rightarrow e|f|g, \\ \label{4.141} && e \rightarrow +|+e, \\ \label{4.142} && f \rightarrow -|-f, \\ \label{4.143} && g \rightarrow /|/g, \\ \label{4.144} && (h \rightarrow +) \| (i \rightarrow -), \\ \label{4.145} && (h \rightarrow +h) \| (i \rightarrow -i), \\ \label{4.146} && (i \rightarrow -) \| (h \rightarrow +), \\ \label{4.147} && (i \rightarrow -i) \| (h \rightarrow +h), \\ \label{4.148} && (h \rightarrow +) \| (j \rightarrow /), \\ \label{4.149} && (h \rightarrow +h) \| (j \rightarrow /j), \\ \label{4.150} && k \rightarrow [1+1]|[1+k], \\ \label{4.151} && l \rightarrow 1|[1+l], \\ \label{4.152} && a < [1+a], \\ \label{4.153} && (\bar{a} < \bar{b}) \wedge \bar{c} {\mathbb R}ightarrow (([\bar{a}+\bar{c}] < [\bar{b}+\bar{c}]) \wedge ([\bar{c}-\bar{b}] < [\bar{c}-\bar{a}])), \\ \label{4.154} && ([1-1] < \bar{a}) \wedge (\bar{a} < \bar{b}) \wedge ([1-1] < \bar{c}) {\mathbb R}ightarrow (([\bar{a}--\bar{c}] < [\bar{b}--\bar{c}]) \wedge \\ \notag && ([\bar{c}--\bar{b}] < [\bar{c}--\bar{a}])), \\ \label{4.155} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow (1 < [\bar{a}--f\bar{b}]), \\ \label{4.156} && (1 < \bar{a}) \wedge (\bar{a} < \bar{b}) {\mathbb R}ightarrow (1 < [\bar{b}/g\bar{a}]), \\ \label{4.157} && (1 < \bar{a}) \wedge (\bar{a} < \bar{b}) \wedge (1 < \bar{c}) {\mathbb R}ightarrow ([\bar{a}e\bar{c}] < [\bar{b}e\bar{c}]), \\ \label{4.158} && (1 < \bar{a}) \wedge (1 < \bar{b}) \wedge (1 < \bar{c}) \wedge ([\bar{a}e\bar{c}] < [\bar{b}e\bar{c}]) {\mathbb R}ightarrow (\bar{a} < \bar{b}), \\ \label{4.159} && (1 < \bar{a}) \wedge (1 < \bar{b}) \wedge (\bar{b} < \bar{c}) {\mathbb R}ightarrow ([\bar{a}e\bar{b}] < [\bar{a}e\bar{c}]), \\ \label{4.160} && (1 < \bar{a}) \wedge (1 < \bar{b}) \wedge (1 < \bar{c}) \wedge ([\bar{a}e\bar{b}] < [\bar{a}e\bar{c}]) {\mathbb R}ightarrow (\bar{b} < \bar{c}), \\ \label{4.161} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}c\bar{b}d\bar{c}] = [[\bar{a}c\bar{b}]d\bar{c}]), \\ \label{4.162} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}-\bar{b}] = [\bar{a}/\bar{b}]), \\ \label{4.163} && \bar{a} \wedge \neg (\bar{b} = [1-1]) {\mathbb R}ightarrow ([\bar{a}--\bar{b}] = [\bar{a}//\bar{b}]), \\ \label{4.164} && \bar{a} {\mathbb R}ightarrow ([\bar{a}-\bar{a}] = [1-1]), \\ \label{4.165} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}+\bar{b}] = [\bar{b}+\bar{a}]), \\ \label{4.166} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([[\bar{a}-\bar{b}]+\bar{b}] = \bar{a}), \\ \label{4.167} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-\bar{b}+\bar{c}] = [\bar{a}+\bar{c}-\bar{b}]), \\ \label{4.168} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}+\bar{c}]] = [[\bar{a}+\bar{b}]+\bar{c}]), \\ \label{4.169} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}+[\bar{b}-\bar{c}]] = [[\bar{a}+\bar{b}]-\bar{c}]), \\ \label{4.170} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}+\bar{c}]] = [[\bar{a}-\bar{b}]-\bar{c}]), \\ \label{4.171} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}-[\bar{b}-\bar{c}]] = [[\bar{a}-\bar{b}]+\bar{c}]), \\ \label{4.172} && \bar{a} {\mathbb R}ightarrow ([\bar{a}++1] = \bar{a}), \\ \label{4.173} && \neg (\bar{a} = [1-1]) {\mathbb R}ightarrow ([\bar{a}--\bar{a}] = 1), \\ \label{4.174} && \bar{a} \wedge \bar{b} {\mathbb R}ightarrow ([\bar{a}++\bar{b}] = [\bar{b}++\bar{a}]), \\ \label{4.175} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}++\bar{c}]] = [[\bar{a}++\bar{b}]++\bar{c}]), \\ \label{4.176} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}+\bar{c}]] = [[\bar{a}++\bar{b}]+[\bar{a}++\bar{c}]]), \\ \label{4.177} && \bar{a} \wedge \bar{b} \wedge \bar{c} {\mathbb R}ightarrow ([\bar{a}++[\bar{b}-\bar{c}]] = [[\bar{a}++\bar{b}]-[\bar{a}++\bar{c}]]), \\ \label{4.178} && \bar{a} \wedge \neg (\bar{b} = [1-1]) {\mathbb R}ightarrow ([[\bar{a}--\bar{b}]++\bar{b}] = \bar{a}), \\ \label{4.179} && \bar{a} \wedge \neg (\bar{b} = [1-1]) \wedge \bar{c} {\mathbb R}ightarrow (([\bar{a}--\bar{b}++\bar{c}] = [\bar{a}++\bar{c}--\bar{b}]) \wedge \\ \notag && ([[\bar{a}+\bar{c}]--\bar{b}] = [[\bar{a}--\bar{b}]+[\bar{c}--\bar{b}]]) \wedge ([[\bar{a}-\bar{c}]--\bar{b}] = \\ \notag && [[\bar{a}--\bar{b}]-[\bar{c}--\bar{b}]])), \\ \label{4.180} && \bar{a} \wedge \neg (\bar{b} = [1-1]) \wedge \neg (\bar{c} = [1-1]) {\mathbb R}ightarrow (([\bar{a}++[\bar{b}--\bar{c}]] = [[\bar{a}++\bar{b}]--\bar{c}]) \wedge \\ \notag && ([\bar{a}--[\bar{b}++\bar{c}]] = [[\bar{a}--\bar{b}]--\bar{c}]) \wedge ([\bar{a}--[\bar{b}--\bar{c}]] = [[\bar{a}--\bar{b}]++\bar{c}])), \\ \label{4.181} && \bar{a} {\mathbb R}ightarrow ([\bar{a}+++1] = \bar{a}), \\ \label{4.182} && \bar{a} {\mathbb R}ightarrow ([1+++\bar{a}] = 1), \\ \label{4.183} && \neg (\bar{a} = [1-1]) {\mathbb R}ightarrow ([\bar{a}+++[1-1]] = 1), \\ \label{4.184} && ([1-1] < \bar{a}) {\mathbb R}ightarrow ([[1-1]+++\bar{a}] = [1-1]), \\ \label{4.185} && ([1-1] < \bar{a}) \wedge \neg (\bar{b} = [1-1]) \wedge \bar{c} {\mathbb R}ightarrow (([\bar{a}---\bar{b}+++\bar{b}] = \bar{a}) \wedge \\ \notag && ([\bar{a}---\bar{b}+++\bar{c}] = [\bar{a}+++\bar{c}---\bar{b}]) \wedge ([\bar{a}+++[\bar{c}--\bar{b}]] = \\ \notag && [[\bar{a}+++\bar{c}]---\bar{b}])), \\ \label{4.186} && ([1-1] < \bar{a}) \wedge ([1-1] < \bar{b}) \wedge \bar{c} {\mathbb R}ightarrow (([\bar{a}+++[\bar{b}///\bar{a}]] = \bar{b}) \wedge \\ \notag && ([[\bar{a}+++\bar{c}]///\bar{b}] = [\bar{c}++[\bar{a}///\bar{b}]]) \wedge ([[\bar{a}--\bar{b}]+++\bar{c}] = \\ \notag && [[\bar{a}+++\bar{c}]--[\bar{b}+++\bar{c}]])), \\ \label{4.187} && ([1-1] < \bar{a}) \wedge ([1-1] < \bar{b}) \wedge ([1-1] < \bar{c}) {\mathbb R}ightarrow (([[\bar{a}///\bar{c}]--[\bar{b}///\bar{c}]] = \\ \notag && [\bar{a}///\bar{b}]) \wedge ([[\bar{a}++\bar{b}]///\bar{c}] = [[\bar{a}///\bar{c}]+[\bar{b}///\bar{c}]]) \wedge ([[\bar{a}--\bar{b}]///\bar{c}] = \\ \notag && [[\bar{a}///\bar{c}]-[\bar{b}///\bar{c}]])), \\ \label{4.188} && \neg (\bar{a} = [1-1]) \wedge \neg (\bar{b} = [1-1]) \wedge \neg (\bar{c} = [1-1]) {\mathbb R}ightarrow (([[\bar{a}++\bar{b}]+++\bar{c}] = \\ \notag && [[\bar{a}+++\bar{c}]++[\bar{b}+++\bar{c}]]) \wedge ([\bar{a}+++[\bar{b}++\bar{c}]] = [[\bar{a}+++\bar{b}]+++\bar{c}]) \\ \notag && \wedge ([\bar{a}+++[\bar{b}+\bar{c}]] = [[\bar{a}+++\bar{b}]++[\bar{a}+++\bar{c}]]) \wedge ([\bar{a}+++[\bar{b}-\bar{c}]] = \\ \notag && [[\bar{a}+++\bar{b}]--[\bar{a}+++\bar{c}]])), \\ \label{4.189} && ([1-1] < \bar{a}) \wedge \neg (\bar{b} = [1-1]) \wedge \neg (\bar{c} = [1-1]) {\mathbb R}ightarrow (([\bar{a}---[\bar{b}++\bar{c}]] = \\ \notag && [[\bar{a}---\bar{b}]---\bar{c}]) \wedge ([\bar{a}---[\bar{b}--\bar{c}]] = [[\bar{a}---\bar{b}]+++\bar{c}])), \\ \label{4.190} && ([1-1] < \bar{a}) \wedge ([1-1] < \bar{b}) \wedge \neg (\bar{c} = [1-1]) {\mathbb R}ightarrow (([[\bar{a}++\bar{b}]---\bar{c}] = \\ \notag && [[\bar{a}---\bar{c}]++[\bar{b}---\bar{c}]]) \wedge ([[\bar{a}--\bar{b}]---\bar{c}] = \\ \notag && [[\bar{a}---\bar{c}]--[\bar{b}---\bar{c}]])), \\ \label{4.191} && (1 < \bar{a}) {\mathbb R}ightarrow ([\bar{a}+e1] = \bar{a}), \\ \label{4.192} && (1 < \bar{a}) {\mathbb R}ightarrow ([\bar{a}-f1] = \bar{a}), \\ \label{4.193} && (1 < \bar{a}) {\mathbb R}ightarrow ([1++e\bar{a}] = 1), \\ \label{4.194} && (1 < \bar{a}) {\mathbb R}ightarrow ([1--f\bar{a}] = 1), \\ \label{4.195} && (1 < \bar{a}) {\mathbb R}ightarrow ([1///\bar{a}] = [1-1]), \\ \label{4.196} && (1 < \bar{a}) {\mathbb R}ightarrow ([\bar{a}/g\bar{a}] = 1), \\ \label{4.197} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow ([[\bar{a}i\bar{b}]h\bar{b}] = \bar{a}), \\ \label{4.198} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow ([[\bar{a}h\bar{b}]i\bar{b}] = \bar{a}), \\ \label{4.199} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow ([\bar{b}h[\bar{a}j\bar{b}]] = \bar{a}), \\ \label{4.200} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow ([[\bar{b}h\bar{a}]j\bar{b}] = \bar{a}), \\ \label{4.201} && (1 < \bar{a}) \wedge (1 < \bar{b}) {\mathbb R}ightarrow ([\bar{a}+e\bar{b}] = [\bar{a}e[\bar{a}+e[\bar{b}-1]]]), \\ \label{4.202} && \top\underline{\ \ }1\underline{\ \ }1\underline{\ \ } = [1-1], \\ \label{4.203} && \top\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ } = 1, \\ \label{4.204} && \bot\underline{\ \ }1\underline{\ \ }1\underline{\ \ } = 1, \\ \label{4.205} && \bot\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ } = 1, \\ \label{4.206} && \top\underline{\ \ }k\underline{\ \ }[[[1+1]++l]-1]\underline{\ \ } = \top\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }, \\ \label{4.207} && \bot\underline{\ \ }k\underline{\ \ }[[[1+1]++l]-1]\underline{\ \ } = \bot\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }, \\ \label{4.208} && \top\underline{\ \ }k\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\top\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }+ \top\underline{\ \ }[k-1]\underline{\ \ }[l+1]\underline{\ \ }], \\ \label{4.209} && \bot\underline{\ \ }k\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\bot\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }+ \bot\underline{\ \ }[k-1]\underline{\ \ }[l+1]\underline{\ \ }], \\ \label{4.210} && (1 < \bar{a}) \wedge (\top\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }) \wedge (\bot\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }) {\mathbb R}ightarrow ([\bar{a}+h[\top\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }-- \bot\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }]] = \\ \notag && [[\bar{a}+h\top\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }]-i \bot\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }]) \\ \notag && \}. \end{eqnarray} \end{definition} It should be noted that (\ref{4.202}) $ \sim $ (\ref{4.210}) restrict $ [\top\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }-- \bot\underline{\ \ }\bar{b}\underline{\ \ }\bar{c}\underline{\ \ }] $ to be a Farey fraction. In the following, we will deduce some numbers and equalities as examples. \begin{eqnarray*} (A1) & (a \rightarrow 1|[aba]) {\mathbb R}ightarrow (a \rightarrow 1) & by (\ref{4.138}),(\ref{4.18}) \\ (A2) & {\mathbb R}ightarrow (a \rightarrow [aba]) & by (\ref{4.138}),(\ref{4.18}) \\ (A3) & {\mathbb R}ightarrow (a \rightarrow [[aba]ba]) & by (A2),(\ref{4.17}) \\ (A4) & {\mathbb R}ightarrow (a \rightarrow [[1-1]-1]) & by (A1),(\ref{4.139}) \\ (A5) & (a < [1+a]) {\mathbb R}ightarrow (1 < [1+1]) & by (\ref{4.152}),(A1),(\ref{4.29}) \\ (A6) & {\mathbb R}ightarrow 1 & by (\ref{4.23}) \\ (A7) & {\mathbb R}ightarrow [1+1] & \ by (A5),(\ref{4.23}) \\ (A8) & {\mathbb R}ightarrow ([aba] < [1+[aba]]) & by (A5),(A2),(\ref{4.29}) \\ (A9) & {\mathbb R}ightarrow ([1+1] < [1+[1+1]]) & by (A1),(\ref{4.139}),(\ref{4.29}) \\ (A10) & {\mathbb R}ightarrow ([1+1] < [[1+1]+1]) & by (\ref{4.165}),(\ref{4.24}) \\ (A11) & {\mathbb R}ightarrow ([1+1] < [1+1+1]) & by (\ref{4.161}) \\ (A12) & {\mathbb R}ightarrow ([1-1] < [1+[1-1]]) & by (A8),(A1),(\ref{4.139}),(\ref{4.29}) \\ (A13) & {\mathbb R}ightarrow ([1-1] < [[1-1]+1]) & by (\ref{4.165}),(\ref{4.24}) \\ (A14) & {\mathbb R}ightarrow ([1-1] < [1-1+1]) & by (\ref{4.161}) \\ (A15) & {\mathbb R}ightarrow ([1-1] < 1) & by (\ref{4.166}) \\ (A16) & {\mathbb R}ightarrow ([1-1] < [1+1+1]) & by (A15),(A5),(A11),(\ref{4.22}) \\ (A17) & {\mathbb R}ightarrow [1-1] & by (A15),(\ref{4.23}) \\ (A18) & {\mathbb R}ightarrow [1+1+1] & by (A16),(\ref{4.23}) \\ (A19) & {\mathbb R}ightarrow ([1-1] < [1+1]) & by (A15),(A5),(\ref{4.22}) \\ (A20) & ([[1+1]--[1+1]] < [[1+1+1]--[1+1]]) & by (A5),(A11),(\ref{4.154}) \\ (A21) & {\mathbb R}ightarrow (1 < [[1+1+1]--[1+1]]) & by (A20),(\ref{4.173}) \\ (A22) & {\mathbb R}ightarrow [[1+1+1]--[1+1]] & by (A21),(\ref{4.23}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the numbers from $ R \{ \Phi, \Psi \} $: \begin{eqnarray*} & [1-1], [[1-1]-[[1+1]----[1+1]]], [[1+1+1]----[1+1]] \cdots & \end{eqnarray*} \begin{eqnarray*} (B1) & (\top\underline{\ \ }k\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\top\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }+ \top\underline{\ \ }[k-1]\underline{\ \ }[l+1]\underline{\ \ }]) & by (\ref{4.208}) \\ (B2) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\top\underline{\ \ }[[1+1]-1]\underline{\ \ }l\underline{\ \ }+ & \\ & \top\underline{\ \ }[[1+1]-1]\underline{\ \ }[l+1]\underline{\ \ }]) & by (\ref{4.150}), (\ref{4.18}), \\ && (\ref{4.42}) \\ (B3) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\top\underline{\ \ }[[1+1]-1]\underline{\ \ }1\underline{\ \ }+ & \\ & \top\underline{\ \ }[[1+1]-1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.151}), (\ref{4.18}), \\ && (\ref{4.42}) \\ (B4) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\top\underline{\ \ }[1+1-1]\underline{\ \ }1\underline{\ \ }+ & \\ & \top\underline{\ \ }[1+1-1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.161}) \\ (B5) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\top\underline{\ \ }[1-1+1]\underline{\ \ }1\underline{\ \ }+ & \\ & \top\underline{\ \ }[1-1+1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.167}) \\ (B6) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\top\underline{\ \ }[[1-1]+1]\underline{\ \ }1\underline{\ \ }+ & \\ & \top\underline{\ \ }[[1-1]+1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.161}) \\ (B7) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\top\underline{\ \ }1\underline{\ \ }1\underline{\ \ }+ \top\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.166}) \\ (B8) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ } = [\top\underline{\ \ }1\underline{\ \ }1\underline{\ \ }+ \top\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.172}) \\ (B9) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ } = [[1-1]+1]) & by (\ref{4.202}),(\ref{4.203}) \\ (B10) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ } = 1) & by (\ref{4.166}) \\ (B11) & {\mathbb R}ightarrow ([1-1] < \top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }) & by (A15),(\ref{4.24}) \\ (B12) & {\mathbb R}ightarrow (\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }) & by (\ref{4.23}) \\ (B13) & (\bot\underline{\ \ }k\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\bot\underline{\ \ }[k-1]\underline{\ \ }l\underline{\ \ }+ \bot\underline{\ \ }[k-1]\underline{\ \ }[l+1]\underline{\ \ }]) & by (\ref{4.209}) \\ (B14) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++l]\underline{\ \ } = [\bot\underline{\ \ }[[1+1]-1]\underline{\ \ }l\underline{\ \ }+ & \\ & \bot\underline{\ \ }[[1+1]-1]\underline{\ \ }[l+1]\underline{\ \ }]) & by (\ref{4.150}), (\ref{4.18}), \\ && (\ref{4.42}) \\ (B15) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\bot\underline{\ \ }[[1+1]-1]\underline{\ \ }1\underline{\ \ }+ & \\ & \bot\underline{\ \ }[[1+1]-1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.151}), (\ref{4.18}), \\ && (\ref{4.42}) \\ (B16) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\bot\underline{\ \ }[1+1-1]\underline{\ \ }1\underline{\ \ }+ & \\ & \bot\underline{\ \ }[1+1-1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.161}) \\ (B17) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\bot\underline{\ \ }[1-1+1]\underline{\ \ }1\underline{\ \ }+ & \\ & \bot\underline{\ \ }[1-1+1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.167}) \\ (B18) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\bot\underline{\ \ }[[1-1]+1]\underline{\ \ }1\underline{\ \ }+ & \\ & \bot\underline{\ \ }[[1-1]+1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.161}) \\ (B19) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[[1+1]++1]\underline{\ \ } = [\bot\underline{\ \ }1\underline{\ \ }1\underline{\ \ }+ \bot\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.166}) \\ (B20) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ } = [\bot\underline{\ \ }1\underline{\ \ }1\underline{\ \ }+ \bot\underline{\ \ }1\underline{\ \ }[1+1]\underline{\ \ }]) & by (\ref{4.172}) \\ (B21) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ } = [1+1]) & by (\ref{4.204}),(\ref{4.205}) \\ (B22) & {\mathbb R}ightarrow ([1-1] < \bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }) & by (A19),(\ref{4.24}) \\ (B23) & {\mathbb R}ightarrow (\bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }) & by (\ref{4.23}) \\ (B24) & ([[1+1]+e[[1+1+1]--[1+1]]] = & \\ & [[1+1]e[[1+1]+e[[[1+1+1]--[1+1]]-1]]]) & by (A5),(A21), \\ && (\ref{4.201}) \\ (B25) & {\mathbb R}ightarrow ([[1+1]++e[[1+1+1]--[1+1]]] = & \\ & [[1+1]+e[[1+1]++e[[[1+1+1]--[1+1]]-1]]]) & by (\ref{4.141}) \\ (B26) & {\mathbb R}ightarrow ([[1+1]+++e[[1+1+1]--[1+1]]] = & \\ & [[1+1]++e[[1+1]+++e[[[1+1+1]--[1+1]]-1]]]) & by (\ref{4.141}) \\ (B27) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[[1+1+1]--[1+1]]-1]]]) & by (\ref{4.141}) \\ (B28) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[[[1+1]+1]--[1+1]]-1]]]) & by (\ref{4.161}) \\ (B29) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++ & \\ & [[[[1+1]--[1+1]]+[1--[1+1]]]-1]]]) & by (\ref{4.179}) \\ (B30) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[1+[1--[1+1]]]-1]]]) & by (A19),(\ref{4.173}) \\ (B31) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[[1--[1+1]]+1]-1]]]) & by (\ref{4.165}) \\ (B32) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[1--[1+1]]+1-1]]]) & by (\ref{4.161}) \\ (B33) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[1--[1+1]]-1+1]]]) & by (\ref{4.167}) \\ (B34) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[[[1--[1+1]]-1]+1]]]) & by (\ref{4.161}) \\ (B35) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]++++[1--[1+1]]]]) & by (\ref{4.166}) \\ (B36) & ([[1+1]+h[\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }-- \bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }]] = & \\ & [[[1+1]+h\top\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }]-i \bot\underline{\ \ }[1+1]\underline{\ \ }[1+1]\underline{\ \ }]) & by (A5),(B12), \\ && (B23),(\ref{4.210}) \\ (B37) & {\mathbb R}ightarrow ([[1+1]+h[1--[1+1]]] = [[[1+1]+h1]-i[1+1]]) & by (B10),(B21), \\ && (\ref{4.41}) \\ (B38) & {\mathbb R}ightarrow ([[1+1]++h[1--[1+1]]] = & \\ & [[[1+1]++h1]--i[1+1]]) & by (\ref{4.136}),(\ref{4.145}) \\ (B39) & {\mathbb R}ightarrow ([[1+1]+++h[1--[1+1]]] = & \\ & [[[1+1]+++h1]---i[1+1]]) & by (\ref{4.136}),(\ref{4.145}) \\ (B40) & {\mathbb R}ightarrow ([[1+1]++++[1--[1+1]]] = & \\ & [[[1+1]++++1]----[1+1]]) & by (\ref{4.136}),(\ref{4.144}) \\ (B41) & ([[1+1]+e1] = [1+1]) & by (A5),(\ref{4.191}) \\ (B42) & {\mathbb R}ightarrow ([[1+1]++e1] = [1+1]) & by (\ref{4.141}) \\ (B43) & {\mathbb R}ightarrow ([[1+1]+++e1] = [1+1]) & by (\ref{4.141}) \\ (B44) & {\mathbb R}ightarrow ([[1+1]++++1] = [1+1]) & by (\ref{4.141}) \\ (B45) & {\mathbb R}ightarrow ([[1+1]++++[1--[1+1]]] = [[1+1]----[1+1]]) & by (B40),(B44), \\ && (\ref{4.37}) \\ (B46) & {\mathbb R}ightarrow ([[1+1]++++[[1+1+1]--[1+1]]] = & \\ & [[1+1]+++[[1+1]----[1+1]]]) & by (B35),(B45), \\ && (\ref{4.37}) \\ \vdots & \vdots & \vdots \end{eqnarray*} Then we deduce the equalities on deducible numbers from $ R \{ \Phi, \Psi \} $: \begin{eqnarray*} & [[1+1]++[[1+1]---[1+1]]] = [[[1+1]---[1+1]]++[1+1]], & \\ & [[1+1]++++[[1+1+1]--[1+1]]] = [[1+1]+++[[1+1]----[1+1]]] & \\ & \vdots & \end{eqnarray*} The deducible numbers correspond to the real numbers as follows: \begin{eqnarray*} \vdots & \vdots & \vdots, \\ \ [[1-1]-[[1+1+1]----[1+1]]] & \equiv & \\ \vdots & \vdots & \vdots, \\ \ [[1-1]-[[1+1]---[1+1]]] & \equiv & -\sqrt[2]{2}, \\ \vdots & \vdots & \vdots, \\ \ [[1-1]-[[1+1]---[1+1+1]]] & \equiv & -\sqrt[3]{2}, \\ \vdots & \vdots & \vdots, \\ \ [[1-1]-1] & \equiv & -1, \\ \vdots & \vdots & \vdots, \\ \ [[1-1]-[1--[1+1]]] & \equiv & -\frac{1}{2}, \\ \vdots & \vdots & \vdots, \\ \ [1-1] & \equiv & 0, \\ \vdots & \vdots & \vdots, \\ \ [1-[1--[1+1]]] & \equiv & \frac{1}{2}, \\ \vdots & \vdots & \vdots, \\ \ [[1+1]///[1+1+1]] & \equiv & \log_{3} 2, \\ \vdots & \vdots & \vdots, \\ \ 1 & \equiv & 1, \\ \vdots & \vdots & \vdots, \\ \ [[1+1]---[1+1+1]] & \equiv & \sqrt[3]{2}, \\ \vdots & \vdots & \vdots, \\ \ [[1+1]---[1+1]] & \equiv & \sqrt[2]{2}, \\ \vdots & \vdots & \vdots, \\ \ [1+[1--[1+1]]] & \equiv & \frac{3}{2}, \\ \vdots & \vdots & \vdots, \\ \ [[1+1+1]///[1+1]] & \equiv & \log_{2} 3, \\ \vdots & \vdots & \vdots, \\ \ [[1+1+1]----[1+1]] & \equiv & \\ \vdots & \vdots & \vdots, \\ \ [1+1] & \equiv & 2, \\ \vdots & \vdots & \vdots, \\ \ [1+1+[1--[1+1]]] & \equiv & \frac{5}{2}, \\ \vdots & \vdots & \vdots, \\ \ [1+1+1] & \equiv & 3, \\ \vdots & \vdots & \vdots, \\ \ [[[1+1+1]----[1+1]]++[1+1]] & \equiv & \\ \vdots & \vdots & \vdots . \end{eqnarray*} The equalities on deducible numbers correspond to addition, subtraction, multiplication, division, power operation and more other operations in real number system. Note that in the correspondence above some irrational numbers such as $ [[1+1+1]----[1+1]] $, $ [[[1+1+1]----[1+1]]++[1+1]] $ and $ [[1-1]-[[1+1+1]----[1+1]]] $ do not correspond to any irrational number based on traditional operations, which however can be constructed by the logical calculus $ R \{ \Phi, \Psi \} $. Then the logical calculus $ R \{ \Phi, \Psi \} $ not only derives the irrational numbers, but also makes its deducible numbers join in algebraical operations. So the logical calculus $ R \{ \Phi, \Psi \} $ intuitively and logically denote real number system. \end{document}
\begin{document} \author{Jian-Qi Shen \footnote{E-mail address: [email protected]}$^{1,2}$, Pan Chen$^{1,2}$ and Hong Mao$^{2}$} \address{1. Center for Optical and Electromagnetic Research, State Key Laboratory of \\Modern Optical Instrumentation, College of Information Science and Engineering \\ 2.Zhejiang Institute of Modern Physics and Department of Physics,\\ Zhejiang University, Hangzhou 310027, People$^{,}$s Republic of China} \date{\today} \title{Exact time-dependent decoherence factor \\and its adiabatic classical limit} \maketitle \begin{abstract} The present paper finds the complete set of exact solutions of the general time-dependent dynamical models for quantum decoherence, by making use of the Lewis-Riesenfeld invariant theory and the invariant-related unitary transformation formulation. Based on this, the general explicit expression for the decoherence factor is then obtained and the adiabatic classical limit of an illustrative example is discussed. The result ( i.e., the adiabatic classical limit) obtained in this paper is consistent with what obtained by other authors, and futhermore we obtain the more general results concerning the time-dependent non-adiabatic quantum decoherence. It is shown that the invariant theory is appropriate for treating both the time-dependent quantum decoherence and the geometric phase factor. PACS: 03.65.Ge, 03.65.Bz Keywords: decoherence factor; invariant theory; time-dependent decoherence \end{abstract} \pacs{PACS: 03.65.Ge 03.65.Bz } \section{INTRODUCTION} Solvable models in quantum mechanics enable one to investigate quantum measurement problems very conveniently\cite{Nakazato}. A good number of investigators have studied these useful models such as Hepp-Coleman model \cite{Namiki} and Cini model\cite{Cini}. The exact solvability of these models often provides physicists with a clear understanding of the physical phenomena involved and yield rich physical insights\cite {Namiki2,Nakazato2}. In these works, the first important step is to obtain the exact solutions of the Schr\"{o}dinger equation and the time-evolution operator, which can be applied to the calculation of the decoherence factor and study of the wavefunction collapse, etc.. Although the exact solutions and the decoherence of these models have been extensively studied by many authors, the coefficients in the Hamiltonians of all these models are merely time-independent (or partially time-dependent), to the best of our knowledge. In the present paper, we obtain the explicit time-evolution operator and the decoherence factor of the general dynamical models where the Hamiltonians are totally time-dependent. Time-dependent system is governed by the time-dependent Schr\"{o}dinger equation. The invariant theory\cite{Lewis} suggested by Lewis and Riesenfeld in 1969 can solve the time-dependent Schr\"{o}dinger equation. In 1991, Gao {\it et al} proposed a generalized invariant theory\cite{Gao1} by introducing basic invariants, which enable one to find the complete set of commuting invariants for some time-dependent multi-dimensional systems\cite {Gao4,Shen,Kim}. We will analyze the general dynamical model in what follows and then calculate the time-dependent decoherence factor by making use of these invariant theories. \section{EXACT DECOHERENCE FACTOR IN TIME-DEPENDENT DECOHERENCE} The original Cini model for the correlation between the states of the measured system and the measuring instrument-detector is built for a two-level system interacting with the detector. Liu and Sun generalized this Cini model to an $M$-level system\cite{Liu}. In this paper we further consturct an general dynamical model for quantum decoherence between the measuring instrument-detector and the measured system where $A_{+},A_{-}$ and $A$ denote the measuring instrument-detector; and $\omega _{i}(t)$ represents the energy parameter of a certain state $\left| i\right\rangle $ of the multi-level measured system and $\theta _{i}(t)$ and $\phi _{i}(t)$ are coupling coefficients of interaction of the measuring instrument-detector with the measured system. The Hamiltonian which describes the interaction between the state $\left| i\right\rangle $ of the multi-level measured system and the measuring instrument-detector is then given as follows: \begin{eqnarray} H_{i}(t) &=&\omega _{i}(t)\{\frac{1}{2}\sin \theta _{i}(t)\exp [-i\phi _{i}(t)]A_{+} \nonumber \\ &&+\frac{1}{2}\sin \theta _{i}(t)\exp [i\phi _{i}(t)]A_{-}+\cos \theta _{i}(t)A\} \label{eq18} \end{eqnarray} with $A,A_{+-}$ and $A$ satisfying the general commuting relations of a Lie algebra \begin{equation} \lbrack A_{+},A_{-}]=nA,\quad \lbrack A,A_{+}]=mA_{+},\quad \lbrack A,A_{-}]=-mA_{-}, \label{eq19} \end{equation} where $m,-m$ and $n$ are structure constants of this Lie algebra. Time evolution of this dynamical model is governed by the Schr\"{o}dinger equation (in the unit $\hbar=1$) \begin{equation} i\frac{\partial \left| \Psi _{i}(t)\right\rangle _{s}}{\partial t} =H_{i}(t)\left| \Psi _{i}(t)\right\rangle _{s}. \label{eq17} \end{equation} According to the Lewis-Riesenfeld invariant theory, an operator $I(t)$ that agrees with the following invariant equation\cite{Lewis} \begin{equation} \frac{\partial I_{i}(t)}{\partial t}+\frac{1}{i}[I_{i}(t),H_{i}(t)]=0 \label{eq20} \end{equation} is called an invariant whose eigenvalue is time-independent, i.e., \begin{equation} I_{i}(t)\left| \lambda ,i,t\right\rangle =\lambda \left| \lambda ,i,t\right\rangle _{i},\quad \frac{\partial \lambda }{\partial t}=0. \label{eq21} \end{equation} It is seen from Eq. (\ref{eq20}) that $I_{i}(t)$ is the linear combination of $A_{+},A_{-}$and $A$ \ and may be generally written\qquad \begin{equation} I_{i}(t)=y\{\frac{1}{2}\sin a_{i}(t)\exp [-ib_{i}(t)]A_{+}+\frac{1}{2}\sin a_{i}(t)\exp [ib_{i}(t)]A_{-}\}+\cos a_{i}(t)A, \label{eq22} \end{equation} where the constant $y$ will be determined below. Substitution of (\ref{eq22} ) into Eq. (\ref{eq20}) yields \begin{eqnarray} y\exp (-ib_{i})(\dot{a}_{i}\cos a_{i}-i\dot{b}_{i}\sin a_{i})-im\omega _{i}[\exp (-i\phi _{i})\cos a_{i}\sin \theta _{i}-y\exp (-ib_{i})\sin a_{i}\cos \theta _{i}] &=&0, \\ \dot{a}_{i}+\frac{ny}{2}\omega _{i}\sin \theta _{i}\sin (b_{i}-\phi _{i}) &=&0. \label{eq23} \end{eqnarray} where dot denotes the time derivative. The time-dependent parameters $a_{i}$ and $b_{i}$ are determined by these two auxiliary equations. It is easy to verify that the particular solution $\left| \Psi _{i}(t)\right\rangle _{s}$ of the Schr\"{o}dinger equation can be expressed in terms of the eigenstate $\left| \lambda ,i,t\right\rangle $ of the invariant $I_{i}(t),$ namely, \begin{equation} \left| \Psi _{i}(t)\right\rangle _{s}=\exp [\frac{1}{i}\varphi _{i}(t)]\left| \lambda ,i,t\right\rangle \label{eq24} \end{equation} with \begin{equation} \varphi _{i}(t)=\int_{0}^{t}\left\langle \lambda ,i,t^{^{\prime }}\right| [H_{i}(t^{^{\prime }})-i\frac{\partial }{\partial t^{^{\prime }}}]\left| \lambda ,i,t^{^{\prime }}\right\rangle {\rm d}t^{^{\prime }}. \end{equation} The physical meanings of $\int_{0}^{t}\left\langle \lambda ,i,t^{^{\prime }}\right| H_{i}(t^{^{\prime }})\left| \lambda ,i,t^{^{\prime }}\right\rangle {\rm d}t^{^{\prime }}$ and $\int_{0}^{t}\left\langle \lambda ,i,t^{^{\prime }}\right| -i\frac{\partial }{\partial t^{^{\prime }}}\left| \lambda ,i,t^{^{\prime }}\right\rangle {\rm d}t^{^{\prime }}$ are dynamical and geometric phase, respectively. Since the expression (\ref{eq24}) is merely a formal solution of the Schr\"{o}dinger equation, in order to get the explicit solutions we make use of the invariant-related unitary transformation formulation\cite{Gao1} which enables one to obtain the complete set of exact solutions of the time-dependent Schr\"{o}dinger equation (\ref{eq17}). In accordance with the invariant-related unitary transformation method, the time-dependent unitary transformation operator is often of the form \begin{equation} V_{i}(t)=\exp [\beta _{i}(t)A_{+}-\beta _{i}^{\ast }(t)A_{-}] \label{eq25} \end{equation} with $\beta _{i}(t)=-\frac{a_{i}(t)}{2}x\exp [-ib_{i}(t)],\quad \beta _{i}^{\ast }(t)=-\frac{a_{i}(t)}{2}x\exp [ib_{i}(t)].$ By making use of the Glauber formula, lengthy calculation yields \begin{eqnarray} I_{iV} &=&V_{i}^{\dagger }(t)I_{i}(t)V_{i}(t)=\{\frac{y}{2}\exp (-ib_{i})\sin a_{i}\cos [(\frac{mn}{2})^{\frac{1}{2}}a_{i}x] \nonumber \\ &&-\frac{(\frac{mn}{2})^{\frac{1}{2}}}{n}\exp (-ib_{i})\cos a_{i}\sin [( \frac{mn}{2})^{\frac{1}{2}}a_{i}x]\}A_{+} \nonumber \\ &&+\{\frac{y}{2}\exp (ib_{i})\sin a_{i}\cos [(\frac{mn}{2})^{\frac{1}{2} }a_{i}x] \nonumber \\ &&-\frac{(\frac{mn}{2})^{\frac{1}{2}}}{n}\exp (ib_{i})\cos a_{i}\sin [(\frac{ mn}{2})^{\frac{1}{2}}a_{i}x]\}A_{-} \nonumber \\ &&+\{\cos a_{i}\cos [(\frac{mn}{2})^{\frac{1}{2}}a_{i}x]+\frac{(\frac{mn}{2} )^{\frac{1}{2}}}{m}y\sin a_{i}\sin [(\frac{mn}{2})^{\frac{1}{2}}a_{i}x]\}A. \end{eqnarray} It can be easily seen that when \begin{equation} y=\frac{m}{(\frac{mn}{2})^{\frac{1}{2}}},\quad x=\frac{1}{(\frac{mn}{2})^{ \frac{1}{2}}}, \end{equation} one may derive that $I_{iV}=A$ which is time-independent. Thus the eigenvalue equation of the time-independent invariant $I_{iV}$ may be written in the form \begin{equation} I_{iV}\left| \lambda \right\rangle =\lambda \left| \lambda \right\rangle ,\quad \left| \lambda \right\rangle =V_{i}^{\dagger }(t)\left| \lambda ,i,t\right\rangle . \label{eq27} \end{equation} In the meanwhile, by the aid of Baker-Campbell-Hausdorff formula\cite{Wei}, one can arrive at \begin{eqnarray} V_{i}^{\dagger }(t)H_{i}(t)V_{i}(t) &=&\{\omega _{i}[\frac{1}{2}\sin \theta _{i}\exp (-i\phi _{i})-\frac{\sqrt{\frac{mn}{2}}}{n}\exp (-ib_{i})\cos \theta _{i}\sin (\sqrt{\frac{mn}{2}}a_{i}x)] \nonumber \\ &&+\frac{1}{2}\omega _{i}\exp (-ib_{i})\sin \theta _{i}\cos (b_{i}-\phi _{i})[\cos (\sqrt{\frac{mn}{2}}a_{i}x)-1]\}A_{+} \nonumber \\ &&+\{\omega _{i}[\frac{1}{2}\sin \theta _{i}\exp (i\phi _{i})-\frac{\sqrt{ \frac{mn}{2}}}{n}\exp (ib_{i})\cos \theta _{i}\sin (\sqrt{\frac{mn}{2}} a_{i}x)] \nonumber \\ &&+\frac{1}{2}\omega _{i}\exp (ib_{i})\sin \theta _{i}\cos (b_{i}-\phi _{i})[\cos (\sqrt{\frac{mn}{2}}a_{i}x)-1]\}A_{-} \nonumber \\ &&+\omega _{i}[\cos \theta _{i}\cos (\sqrt{\frac{mn}{2}}a_{i}x)+\frac{\sqrt{ \frac{mn}{2}}}{m}\sin \theta _{i}\cos (b_{i}-\phi _{i})\sin (\sqrt{\frac{mn}{ 2}}a_{i}x)]A, \label{eq027} \end{eqnarray} which is related to the dynamical phase; and \begin{eqnarray} V_{i}^{\dagger }(t)i\frac{\partial V_{i}(t)}{\partial t} &=&[-\frac{i}{2} \dot{a}_{i}x\exp (-ib_{i})-\frac{1}{2}\frac{1}{\sqrt{\frac{mn}{2}}}\dot{b} _{i}\exp (-ib_{i})\sin (\sqrt{\frac{mn}{2}}a_{i}x)]A_{+} \nonumber \\ &&+[\frac{i}{2}\dot{a}_{i}x\exp (ib_{i})-\frac{1}{2}\frac{1}{\sqrt{\frac{mn}{ 2}}}\dot{b}_{i}\exp (ib_{i})\sin (\sqrt{\frac{mn}{2}}a_{i}x)]A_{-} \nonumber \\ &&-\frac{\dot{b}_{i}}{m}[1-\cos (\sqrt{\frac{mn}{2}}a_{i}x)]A, \label{eq028} \end{eqnarray} which is related to the geometric phase. It follows from Eq. (\ref{eq027}) and Eq. (\ref{eq028}) that, under the transformation $V(t),$ the Hamiltonian $H(t)$ can be changed into \begin{eqnarray} H_{iV}(t) &=&V_{i}^{\dagger }(t)H_{i}(t)V_{i}(t)-V_{i}^{\dagger }(t)i\frac{ \partial V_{i}(t)}{\partial t} \nonumber \\ &=&\{\omega _{i}[\cos a_{i}\cos \theta _{i}+\frac{(\frac{mn}{2})^{\frac{1}{2} }}{m}\sin a_{i}\sin \theta _{i}\cos (b_{i}-\phi _{i})] \nonumber \\ &&+\frac{\dot{b}_{i}}{m}(1-\cos a_{i})\}A \label{eq28} \end{eqnarray} Hence, with the help of Eq. (\ref{eq24}) and Eq. (\ref{eq27}), the particular solution of the Schr\"{o}dinger equation is obtained \begin{equation} \left| \Psi _{i}(t)\right\rangle _{s}=\exp [\frac{1}{i}\varphi _{i}(t)]V_{i}(t)\left| \lambda ,i\right\rangle \label{eq29} \end{equation} with the phase \begin{eqnarray} \varphi _{i}(t) &=&\int_{0}^{t}\left\langle \lambda \right| [V_{i}^{\dagger }(t^{^{\prime }})H_{i}(t^{^{\prime }})V_{i}(t^{^{\prime }})-V_{i}^{\dagger }(t^{^{\prime }})i\frac{\partial }{\partial t^{^{\prime }}}V_{i}(t^{^{\prime }})]\left| \lambda \right\rangle {\rm d}t^{^{\prime }} \nonumber \\ &=&\varphi _{id}(t)+\varphi _{ig}(t) \nonumber \\ &=&\lambda \int_{0}^{t}\{\omega _{i}[\cos a_{i}\cos \theta _{i}+\frac{(\frac{ mn}{2})^{\frac{1}{2}}}{m}\sin a_{i}\sin \theta _{i}\cos (b_{i}-\phi _{i})] \nonumber \\ &&+\frac{\dot{b}_{i}}{m}(1-\cos a_{i})\}{\rm d}t^{^{\prime }}, \label{eq30} \end{eqnarray} where the dynamical phase is $\varphi _{id}(t)=\lambda \int_{0}^{t}\omega _{i}[\cos a_{i}\cos \theta _{i}+\frac{(\frac{mn}{2})^{\frac{1}{2}}}{m}\sin a_{i}\sin \theta _{i}\cos (b_{i}-\phi _{i})]{\rm d}t^{^{\prime }}$ and the geometric phase is $\varphi _{ig}(t)=\lambda \int_{0}^{t}\frac{\dot{b}_{i}}{m }(1-\cos a_{i}){\rm d}t^{^{\prime }}.$ It is seen that the former phase is related to the dynamical parameters of the Hamiltonian such as $\omega _{i},\cos \theta _{i},\sin \theta _{i},$ etc., whereas the latter is not immediately related to these parameters. If the parameter $a_{i}$ is taken to be time-independent, then we arrive at \begin{equation} \varphi _{ig}(T)=\lambda \int_{0}^{T}\frac{\dot{b}_{i}}{m}(1-\cos a_{i}){\rm d}t^{^{\prime }}=\frac{\lambda }{m}[2\pi (1-\cos a_{i})], \end{equation} where $2\pi (1-\cos a_{i})$ is an expression for the solid angle over the parameter space of the invariant. This fact shows the global or topological meanings of the geometric phase $\varphi _{ig}(t)$\cite{Berry}. The expression (\ref{eq29}) is a particular exact solution corresponding to $\lambda $ and the general solutions of the time-dependent Schr\"{o}dinger equation are easily obtained by using the linear combinations of all these particular solutions. Since we have exact solutions of the general time-dependent model, we can obtain the exact expression for the time-dependent decoherence factor that is given \begin{equation} F_{i,j}(t)=\left\langle \lambda \right| V_{i}^{\dagger }(t)V_{j}(t)\left| \lambda \right\rangle . \label{eq200} \end{equation} Further calculation yields \begin{equation} F_{i,j}(t)=\exp [\frac{n\lambda }{2}(\beta _{i}\beta _{j}^{\ast }-\beta _{i}^{\ast }\beta _{j})]\left\langle \lambda \right| \exp [(\beta _{j}-\beta _{i})A_{+}-(\beta _{j}^{\ast }-\beta _{i}^{\ast })A_{-}]\left| \lambda \right\rangle, \end{equation} which is the general expression for the decoherence factor of the time-dependent dynamical model (\ref{eq18}). Although the expression (\ref {eq21}) is somewhat complicated, it is just the explicit expression that does not contain the chronological product. \section{AN ILLUSTRATIVE EXAMPLE} To show that (\ref{eq21}) descends to the result familiar to us in the time-independent (or partially time-dependent) dynamical model of decoherence, we take into consideration the adiabatic classical limit of a special dynamical model. Liu and Sun generalized the original Cini model to an $M$-level system\cite{Liu}. The Hamiltonian of this generalized model is written \begin{equation} H=H_{S}+H_{D}+H_{I}, \label{eq1} \end{equation} where $H_{S}$ is the model Hamiltonian of the measured system S with $M$ levels and $H_{D}$ is the free Hamiltonian of the two-boson-state detector D. They are generally of the forms \begin{equation} H_{S}=\sum_{k=1}^{M}E_{k}\left| \Phi _{k}\right\rangle \left\langle \Phi _{k}\right| ,\quad H_{D}=\omega _{1}a_{1}^{\dagger }a_{1}+\omega _{2}a_{2}^{\dagger }a_{2} \label{eq2} \end{equation} with the creation and annihilation operators $a_{i}^{\dagger }$ , $a_{i}$ satisfying the following commuting relations \begin{equation} \left[ a_{i},a_{j}^{\dagger }\right] =\delta _{ij},\quad \left[ a_{i},a_{j} \right] =\left[ a_{i}^{\dagger },a_{j}^{\dagger }\right] =0. \label{eq3} \end{equation} The interaction Hamiltonian $H_{I}$ is given by \begin{eqnarray} H_{I} &=&\sum_{n}\left| \Phi _{n}\right\rangle \left\langle \Phi _{n}\right| (g_{n}a_{1}^{\dagger }a_{2}+g_{n}^{\ast }a_{2}^{\dagger }a_{1}) \nonumber \\ &=&\sum_{n}\left| \Phi _{n}\right\rangle \left\langle \Phi _{n}\right| (g_{n}J_{+}+g_{n}^{\ast }J_{-}) \end{eqnarray} with $J_{+}=a_{1}^{\dagger }a_{2},J_{-}=a_{2}^{\dagger }a_{1},J_{3}=\frac{1}{ 2}(a_{1}^{\dagger }a_{1}-a_{2}^{\dagger }a_{2})$ satisfying the commuting relations $[J_{+},J_{-}]=2J_{3},[J_{3},J_{\pm }]=\pm J_{\pm }.$ It can be seen from the form of the Hamiltonian that both $\left| \Phi _{k}\right\rangle \left\langle \Phi _{k}\right| $ and $N=\frac{ a_{1}^{\dagger }a_{1}+a_{2}^{\dagger }a_{2}}{2}$ commute with $H,$namely, $ \left[ \left| \Phi _{k}\right\rangle \left\langle \Phi _{k}\right| ,H\right] =\left[ N,H\right] =0.$ Hence, a generalized quasialgebra which enables one to obtain the complete set of exact solutions of the Schr\"{o}dinger equation can be found by working in a sub-Hilbert-space corresponding to the particular eigenvalues of both $\left| \Phi _{k}\right\rangle \left\langle \Phi _{k}\right| $ and $N$, and then the Hamiltonian can be rewritten in this sub-Hilbert-space \begin{equation} H_{n,k}(t)=E_{k}+g_{k}J_{+}+g_{k}^{\ast }J_{-}+(\omega _{1}-\omega _{2})J_{3}+n(\omega _{1}+\omega _{2}) \label{eq6} \end{equation} with $n$ being the eigenvalue of $N$ and satisfying \begin{equation} N\left| n_{1},n_{2}\right\rangle =n\left| n_{1},n_{2}\right\rangle ,\quad n= \frac{1}{2}(n_{1}+n_{2}). \end{equation} Thus in the sub-Hilbert-space we write the Schr\"{o}dinger equation in the form \begin{equation} H_{n,k}(t)\left| \Psi _{n,k}(t)\right\rangle _{s}=i\frac{\partial }{\partial t}\left| \Psi _{n,k}(t)\right\rangle _{s}, \label{eq7} \end{equation} and $\left| \Psi (t)\right\rangle _{s}$ can be obtained from \begin{equation} \left| \Psi (t)\right\rangle _{s}=\sum_{n}\prod_{k}c_{n,k}\left| \Psi _{n,k}(t)\right\rangle _{s}\left| \Phi _{k}\right\rangle \label{eq8} \end{equation} where $c_{n,k}$ is time-independent and determined by the initial conditions. For the case of this time-dependent Cini model, we set $ A_{+}=J_{+},A_{-}=J_{-},C=J_{z}$ with $J_{\pm }=J_{1}\pm iJ_{2}$. In the adiabatic limit, it follows from the auxiliary equations (\ref{eq23}) that \begin{equation} a_{i}=\theta _{i},\quad b_{i}=\varphi _{i};\quad a_{j}=\theta _{j},\quad b_{j}=\varphi _{j}. \end{equation} Since \begin{equation} \beta _{i}=-\frac{a_{i}}{2}\exp [-ib_{i}],\quad \beta _{j}=-\frac{a_{j}}{2} \exp [-ib_{j}], \end{equation} we let both $b_{i}$ and $b_{j}$ vanish for the convenience, which leads to $ \dot{\theta}_{i}=0$ and $\dot{\theta}_{j}=0$ in terms of the auxiliary equations (\ref{eq23}), and then $\beta _{i}=-\frac{a_{i}}{2},\beta _{j}=-\frac{ a_{j}}{2}.$ It is therefore easily obtained that \begin{eqnarray} F_{i,j}(t) &=&\exp [\frac{\lambda }{2}(\beta _{i}\beta _{j}^{\ast }-\beta _{i}^{\ast }\beta _{j})]\left\langle j,m\right| \exp [(\beta _{j}-\beta _{i})J_{+}-(\beta _{j}^{\ast }-\beta _{i}^{\ast })J_{-}]\left| j,m\right\rangle \nonumber \\ &=&\left\langle j,m\right| \exp \{[(\beta _{j}-\beta _{i})-(\beta _{j}^{\ast }-\beta _{i}^{\ast })]J_{1} \nonumber \\ &&+i[(\beta _{j}-\beta _{i})+(\beta _{j}^{\ast }-\beta _{i}^{\ast })]J_{2}\}\left| j,m\right\rangle \nonumber \\ &=&\left\langle j,m\right| \exp [i(a_{i}-a_{j})J_{2}]\left| j,m\right\rangle , \end{eqnarray} where $j$ and $m$ satisfy \begin{equation} J_{3}\left| j,m\right\rangle =m\left| j,m\right\rangle ,\quad J^{2}\left| j,m\right\rangle =j(j+1)\left| j,m\right\rangle . \end{equation} If when $t=0,$ then the state of the measuring instrument-detector is $\left| j,j\right\rangle ,$ and the decoherence factor is therefore \begin{eqnarray} F_{i,j}(t) &=&\left\langle j,j\right| \exp [i(a_{i}-a_{j})J_{2}]\left| j,j\right\rangle \nonumber \\ &=&[\cos (\frac{a_{i}-a_{j}}{2})]^{2j}, \end{eqnarray} which is consistent with that obtained by Sun {\it et al}\cite{Zeng}. For the case of classical limit where $j\rightarrow \infty $ and $\frac{a_{i}-a_{j}}{2}\neq n\pi \quad (n=0,\pm 1,\pm 2,\cdots ),\quad F_{k,l}(t)\rightarrow 0$, which means the wavefunction collapse occurs under the classical limit. Since we exactly solved the Schr\"{o}dinger equation governing the time-dependent quantum decoherence, and the result ( i.e., the adiabatic classical limit) obtained here is consistent with what obtained in previous references, we hold that the more general results presented in this paper is useful to treat the time-dependent non-adiabatic quantum decoherence. \section{CONCLUDING REMARKS} The present paper obtains exact solutions and decoherence factor of the general time-dependent dynamical model for quantum decoherence by working in the sub-Hilbert space corresponding to the eigenvalue of two invariants and by making use of the invariant-related unitary transformation method. The invariant-related unitary transformation formulation is an effective method for treating time-dependent problems\cite{Fu,Shen2,Shen3}. This formulation replaces eigenstates of the time-dependent invariants with those of the time-independent invariants through the unitary transformation. It uses the invariant-related unitary transformation and obtains the explicit expression for the time-evolution operator, instead of the formal solution that is related to the chronological product. In view of what has been discussed above, it can be seen that the invariant theory is appropriate to treat the time-dependent quantum decoherence. Apparently, the results presented in the present paper is easy to generalize to the time-dependent Hepp-Coleman model. Since the geometric phase factor appears in time-dependent systems, it is interesting to investigate the geometric phase in the time-dependent quantum decoherence by using the formulation in this paper. Acknowledgment This project was supported by the National Natural Science Foundation of China under the project No. $30000034$. \begin{references} \bibitem{Nakazato} H. Nakazato, S. Pascazio, Phys. Rev. Lett. 70 (1993) 1. \bibitem{Namiki} M. Namiki, S. Pascazio, Phys. Rev. A 44 (1991) 39. \bibitem{Cini} M. Cini, Nuovo Cimento. B 73 (1983) 27. \bibitem{Namiki2} M. Namiki, S. Pascazio, Found. Phys.Lett. 4 (1991) 203. \bibitem{Nakazato2} H. Nakazato, S. Pascazio. Phys. Rev. A 57 (1998) 753. \bibitem{Lewis} H. R. Lewis, W. B. Riesenfeld, J. Math. Phys. 10 (1969) 1458. \bibitem{Gao1} X. C. Gao, J. B. Xu, T. Z. Qian, Phys. Rev. A 45 (1992) 4355. \bibitem{Gao4} X. C. Gao, J. Fu, J. Q. Shen, Eur. Phys. J. C 13 (2000) 527. \bibitem{Shen} J. Q. Shen, H. Y. Zhu, J. Li, Acta Phys. Sin. 50 (2001) 1884. \bibitem{Kim} S. P. Kim, A. E. Santana, F. C. Khanna, Phys. Lett. A 272 (2000) 46. \bibitem{Berry} M. V. Berry, Proc. R. Soc. Lond. A 392 (1984) 45. \bibitem{Liu} X. J. Liu, C. P. Sun, Phys. Lett. A 198 (1995) 371. \bibitem{Wei} J. Wei, E. Norman, J. Math. Phys.(N.Y) 4 (1963) 575. \bibitem{Zeng} J. Y. Zeng, S. Y. Pei, New Advance in Quantum Mechanics, Publishing House of Pecking University, Pecking, 2000 ( Chapter 3). \bibitem{Fu} J. Fu, X. C. Gao, J. B. Xu, X. B. Zou, Can. J. Phys. 77 (1999) 1. \bibitem{Shen2} J. Q. Shen, H. Y. Zhu, S. L. Shi, J. Li, Phys. Scr. 65 (2002) 465. \bibitem{Shen3} J. Q. Shen, H. Y. Zhu, S. L. Shi, J. Li, J. Phys. Soc. Jpn. 71 (2001) 1440. \end{references} \end{document}
\begin{document} \title{Exceptional Charlier and Hermite orthogonal polynomials ootnote{Partially supported by MTM2012-36732-C03-03 (Ministerio de Economía y Competitividad), FQM-262, FQM-4643, FQM-7276 (Junta de Andalucía) and Feder Funds (European Union).} \begin{abstract} Using Casorati determinants of Charlier polynomials $(c_n^a)_n$, we construct for each finite set $F$ of positive integers a sequence of polynomials $c_n^{F}$, $n\in \sigma _F$, which are eigenfunctions of a second order difference operator, where $\sigma _F$ is certain infinite set of nonnegative integers, $\sigma _F \varsubsetneq \NN$. For suitable finite sets $F$ (we call them admissible sets), we prove that the polynomials $c_n^{F}$, $n\in \sigma _F$, are actually exceptional Charlier polynomials; that is, in addition, they are orthogonal and complete with respect to a positive measure. By passing to the limit, we transform the Casorati determinant of Charlier polynomials into a Wronskian determinant of Hermite polynomials. For admissible sets, these Wronskian determinants turn out to be exceptional Hermite polynomials. \end{abstract} \section{Introduction} Exceptional orthogonal polynomials $p_n$, $n\in X\varsubsetneq \NN$, are complete orthogonal polynomial systems with respect to a positive measure which in addition are eigenfunctions of a second order differential operator. They extend the classical families of Hermite, Laguerre and Jacobi. The last few years have seen a great deal of activity in the area of exceptional orthogonal polynomials (see, for instance, \cite{DEK}, \cite{GUKM1}, \cite{GUKM2} (where the adjective \textrm{exceptional} for this topic was introduced), \cite{GUKM3}, \cite{GUKM4}, \cite{GUGM}, \cite{MR}, \cite{OS0}, \cite{OS}, \cite{Qu}, \cite{STZ}, \cite{Ta} and the references therein). The most apparent difference between classical orthogonal polynomials and exceptional orthogonal polynomials is that the exceptional families have gaps in their degrees, in the sense that not all degrees are present in the sequence of polynomials (as it happens with the classical families) although they form a complete orthonormal set of the underlying $L^2$ space defined by the orthogonalizing positive measure. This means in particular that they are not covered by the hypotheses of Bochner's classification theorem \cite{B}. Exceptional orthogonal polynomials have been applied to shape-invariant potentials \cite{Qu}, supersymmetric transformations \cite{GUKM3}, to discrete quantum mechanics \cite{OS}, mass-dependent potentials \cite{MR}, and to quasi-exact solvability \cite{Ta}. In the same way, exceptional discrete orthogonal polynomials are complete orthogonal polynomial systems with respect to a positive measure which in addition are eigenfunction of a second order difference operator, extending the discrete classical families of Charlier, Meixner, Krawtchouk and Hahn. As far as the author knows the only known example of what can be called exceptional Charlier polynomials appeared in \cite{YZ} (1999). If orthogonal discrete polynomials on nonuniform lattices and orthogonal $q$-polynomials are considered, then one should add \cite{OS,OS2,OS4,OS5} where exceptional Wilson, Racah, Askey-Wilson and $q$-Racah polynomials are considered. The purpose of this paper (and the forthcoming ones) is to introduce a systematic way of constructing exceptional discrete orthogonal polynomials using the concept of dual families of polynomials (see \cite{Leo}). One can then also construct examples of exceptional orthogonal polynomials by taking limits in some of the parameters in the same way as one goes from classical discrete polynomials to classical polynomials in the Askey tableau. \begin{definition}\label{dfp} Given two sets of nonnegative integers $U,V\subset \NN$, we say that the two sequences of polynomials $(p_u)_{u\in U}$, $(q_v)_{v\in V}$ are dual if there exist a couple of sequences of numbers $(\xi_u)_{u\in U}, (\zeta_v)_{v\in V} $ such that \begin{equation}\label{defdp} \xi_up_u(v)=\zeta_vq_v(u), \quad u\in U, v\in V. \end{equation} \end{definition} Duality has shown to be a fruitful concept regarding discrete orthogonal polynomials, and its utility will be again manifest in the exceptional discrete polynomials world. Indeed, it turns out that duality interchanges exceptional discrete orthogonal polynomials with the so-called Krall discrete orthogonal polynomials. A Krall discrete orthogonal family is a sequence of polynomials $(p_n)_{n\in \NN}$, $p_n$ of degree $n$, orthogonal with respect to a positive measure which, in addition, are also eigenfunctions of a higher order difference operator. A huge amount of families of Krall discrete orthogonal polynomials have been recently introduced by the author by mean of certain Christoffel transform of the classical discrete measures of Charlier, Meixner, Krawtchouk and Hahn (see \cite{du0}, \cite{du1}, \cite{DdI}). A Christoffel transform is a transformation which consists in multiplying a measure $\mu$ by a polynomial $r$. It has a long tradition in the context of orthogonal polynomials: it goes back a century and a half ago when E.B. Christoffel (see \cite{Chr} and also \cite{Sz}) studied it for the particular case $r(x)=x$. In this paper we will concentrate on exceptional Charlier and Hermite polynomials (Meixner, Krawtchouk, Hahn, Laguerre and Jacobi families will be considered in forthcoming papers). The content of this paper is as follows. In Section 2, we include some preliminary results about symmetric operators, Christoffel transforms and finite sets of positive integers. In Section 3, using Casorati determinants of Charlier polynomials we associate to each finite set $F$ of positive integers a sequence of polynomials which are eigenfunctions of a second order difference operator. Indeed, write the finite set $F$ of positive integers as $F=\{f_1,\cdots ,f_k\}$, $f_i<f_{i+1}$ ($k$ is then the number of elements of $F$ and $f_k$ the maximum element of $F$). We define the nonnegative integer $u_F$ by $u_F=\sum_{f\in F}f-\binom{k+1}{2}$ and the infinite set of nonnegative integers $\sigma _F$ by $$ \sigma _F=\{u_F,u_F+1,u_F+2,\cdots \}\setminus \{u_F+f,f\in F\}. $$ Given $a\in \RR \setminus \{0\}$, we then associate to $F$ the sequence of polynomials $c_n^{a;F}$, $n\in \sigma _F$, defined by \begin{equation}\label{defchexi} c_n^{a;F}(x)=\begin{vmatrix}c_{n-u_F}^a(x)&c_{n-u_F}^a(x+1)&\cdots &c_{n-u_F}^a(x+k)\\ c_{f_1}^a(x)&c_{f_1}^a(x+1)&\cdots &c_{f_1}^a(x+k)\\ \vdots&\vdots&\ddots &\vdots\\ c_{f_k}^a(x)&c_{f_k}^a(x+1)&\cdots &c_{f_k}^a(x+k) \end{vmatrix} , \end{equation} where $(c_n^a)_n$ are the Charlier polynomials (see (\ref{Chpol})) orthogonal with respect to the discrete measure $$ \rho_a =\sum_{x=0}^\infty \frac{a^x}{x!}\delta _x. $$ Consider now the measure \begin{equation}\label{ctmc} \rho _{a}^{F}=(x-f_1)\cdots (x-f_k)\rho _a. \end{equation} Orthogonal polynomials with respect to $\rho _a^F$ are eigenfunctions of higher order difference operators (see \cite{du0} and \cite{DdI}). It turns out that the sequence of polynomials $c_n^{a;F}$, $n\in \sigma _F$, and the sequence of orthogonal polynomials $(q_n^F)_n$ with respect to the measure $\rho _{a}^{F}$ are dual sequences (see Lemma \ref{lem3.2}). As a consequence we get that the polynomials $c_n^{a;F}$, $n\in \sigma _F$, are always eigenfunctions of a second order difference operator $D_F$ (whose coefficients are rational functions); see Theorem \ref{th3.3}. Charlier-type orthogonal polynomials considered in \cite{YZ} corresponds with the case $F=\{1,2\}$ (duality is also used in \cite{YZ}). In Section 4, we study the most interesting case: it appears when the measure $\rho _{a}^{F}$ (\ref{ctmc}) is positive. This gives rise to the concept of admissible sets of positive integers. Split up the set $F$, $F=\bigcup _{i=1}^KY_i$, in such a way that $Y_i\cap Y_j=\emptyset $, $i\not =j$, the elements of each $Y_i$ are consecutive integers and $1+\max Y_i<\min Y_{i+1}$, $i=1,\cdots, K-1$; we then say that $F$ is admissible if each $Y_i$, $i=1,\cdots, K$, has an even number of elements. It is straightforward to see that $F$ is admissible if and only if $\prod_{f\in F}(x-f)\ge 0$, $x\in \NN$, or in other words, (if $a>0$) the measure $\rho_a^F$ (\ref{ctmc}) is positive. This concept of admissibility has appeared several times in the literature. Relevant to this paper because of the relationship with exceptional polynomials are \cite{Kr} and \cite{Ad} where the concept appears in connection with the zeros of certain Wronskian determinant associated with eigenfunctions of second order differential operators of the form $-d^2/dx ^2 +U$. Admissibility was also considered in \cite{KS} and \cite{YZ}. We prove (Theorems \ref{th4.4} and \ref{th4.5}) that if $F$ is an admissible set and $a>0$, then the polynomials $c_n^{a;F}$, $n\in \sigma _F$, are orthogonal and complete with respect to the positive measure $$ \omega_{a;F} =\sum_{x=0}^\infty \frac{a^x}{x!\Omega ^a_F(x)\Omega ^a_F(x+1)}\delta _x, $$ where $\Omega _F^a$ is the polynomial defined by \begin{equation}\label{defchexii} \Omega _F ^a(x)=\begin{vmatrix}c_{f_1}^a(x)&c_{f_1}^a(x+1)&\cdots &c_{f_1}^a(x+k-1)\\ \vdots&\vdots&\ddots &\vdots\\ c_{f_k}^a(x)&c_{f_k}^a(x+1)&\cdots &c_{f_k}^a(x+k-1) \end{vmatrix} . \end{equation} In particular we characterize admissible sets $F$ as those for which the Casorati determinant $\Omega ^a_F(x)$ has constant sign for $x\in \NN$ (Lemma \ref{l3.1}). Casorati determinants like (\ref{defchexii}) for Charlier and other discrete orthogonal polynomials were considered by Karlin and Szeg\H o in \cite{KS} (see also \cite{KMc}). In particular, Karlin and Szeg\H o proved that when $F$ is admissible then $\Omega _F^a(x)$ $(a>0)$ has constant sign for $x\in \NN$. Although it is out of the scope of this paper, we point out here that the duality transforms the higher order difference operator with respect to which the polynomials $(q_n^F)_n$ are eigenfunctions in a higher order recurrence relation for the polynomials $c_n^{a;F}$. This higher order recurrence relation has the form $$ h(x)c_n^{a;F}(x)=\sum_{j=-u_F-k-1}^{u_F+k+1}u^{a;F}_{n,j}c_{n+j}^{a;F}(x) $$ where $h$ is a polynomial in $x$ of degree $u_F+k+1$ satisfying $h(x)-h(x-1)=\Omega _F^a(x)$, and $a_{n,j}$, $j=-u_F-k-1,\ldots ,u_F+k+1$, are rational functions in $n$ depending on $a$ and $F$ but not on $x$. In Section 5 and 6, we construct exceptional Hermite polynomials by taking limit (in a suitable way) in the exceptional Charlier polynomials when $a\to +\infty $. We then get (see Theorem \ref{th5.1}) that for each finite set $F$ of positive integers, the polynomials \begin{equation}\label{defhexi} H_n^F(x)=\begin{vmatrix}H_{n-u_F}(x)&H_{n-u_F}'(x)&\cdots &H_{n-u_F}^{(k)}(x)\\ H_{f_1}(x)&H_{f_1}'(x)&\cdots &H_{f_1}^{(k)}(x)\\ \vdots&\vdots&\ddots &\vdots\\ H_{f_k}(x)&H_{f_k}'(x)&\cdots &H_{f_k}^{(k)}(x) \end{vmatrix} , \end{equation} $n\in \sigma _F$, are eigenfunctions of a second order differential operator. When $F$ is admissible, the Wronskian determinant $\Omega _F$ defined by \begin{equation}\label{defhexii} \Omega _F(x)=\begin{vmatrix} H_{f_1}(x)&H_{f_1}'(x)&\cdots &H_{f_1}^{(k-1)}(x)\\ \vdots&\vdots&\ddots &\vdots\\ H_{f_k}(x)&H_{f_k}'(x)&\cdots &H_{f_k}^{(k-1)}(x) \end{vmatrix} \end{equation} does not vanish in $\RR$. For admissible sets $F$, we then prove that the polynomials $H_n^F$, $n\in \sigma _F$, are orthogonal with respect to the positive weight $$ \omega_{F}(x) =\frac{e^{-x^2}}{\Omega ^2_F(x)},\quad x\in \RR . $$ Moreover, they form a complete orthogonal system in $L^2(\omega _{F})$ (see Theorem \ref{th6.3}). The exceptional Hermite family introduced in \cite{DR} corresponds with $F=\{1,2,\cdots , 2k\}$ (for the case $k=1$ see also \cite{DEK} and \cite{CPRS}). Simultaneously with this paper, exceptional Hermite polynomials as Wronskian determinant of Hermite polynomials have been introduced and studied (using a different approach) in \cite{GUGM}. We guess that the non vanishing property of the Wronskian determinant (\ref{defhexii}) in $\RR$ is actually true for orthogonal polynomials with respect to a positive measure. Moreover, we conjecture that this property characterizes admissible sets: \noindent \textsl{Conjecture.} Let $F=\{f_1,\cdots, f_k\}$ and $\mu$ be a finite set of positive integers and a positive measure with finite moments and infinitely many points in its support, respectively. Consider the monic sequence $(p_n)_n$ of orthogonal polynomials with respect to $\mu$ and write $\Omega _F^{\mu}$ for the Wronskian determinant defined by $\Omega_{F}^{\mu} (x)=|p_{f_i}^{(j-1)}(x)|_{i,j=1}^k$. Then the following conditions are equivalent. \begin{enumerate} \item $F$ is admissible. \item For all positive measure $\mu$ as above the Wronskian determinant $\Omega _{F}^{\mu}(x)$ does not vanish in $\RR$. \end{enumerate} Wronskian determinants like (\ref{defhexii}) for orthogonal polynomials were considered by Karlin and Szeg\H o in \cite{KS} for the particular case of finite sets $F$ formed by consecutive positive integers. In particular, Karlin and Szeg\H o proved the implication (1) $\Rightarrow $ (2) when $F$ is formed by an even number of consecutive positive integers. When $F$ is an admissible set and $a>0$, exceptional Charlier and Hermite polynomials $c_n^{a;F}$ and $H_n^F$, $n\in \sigma _F$, can be constructed in an alternative way. Indeed, consider the involution $I$ in the set of all finite sets of positive integers defined by $$ I(F)=\{1,2,\cdots, f_k\}\setminus \{f_k-f,f\in F\}. $$ The set $I(F)$ will be denoted by $G$: $G=I(F)$. We also write $G=\{g_1,\cdots , g_m\}$ with $g_i<g_{i+1}$ so that $m$ is the number of elements of $G$ and $g_m$ the maximum element of $G$. We also need the nonnegative integer $v_F$ defined by $$ v_F=\sum_{f\in F}f+f_k-\frac{(k-1)(k+2)}{2}. $$ For $n\ge v_F$, we then have \begin{equation}\label{quschi2i} c_n^{a;F}(x)=\beta_n\begin{vmatrix} c^a_{n-v_F}(x) & \frac{x}{a}c^a_{n-v_F}(x-1) & \cdots & \frac{(x-m+1)_m}{a^m}c^a_{n-v_F}(x-m+1) \\ c^{-a}_{g_1}(-x-1) & c^{-a}_{g_1}(-x) & \cdots & c^{-a}_{g_1}(-x+m-1) \\ \vdots & \vdots & \ddots & \vdots \\ c^{-a}_{g_m}(-x-1) & \displaystyle c^{-a}_{g_m}(-x) & \cdots &c^{-a}_{g_m}(-x+m-1) \end{vmatrix}, \end{equation} \begin{equation}\label{defhexai} H_n^F(x)=\gamma_n\begin{vmatrix}H_{n-v_F}(x)&-H_{n-v_F-1}(x)&\cdots &(-1)^mH_{n-v_F-m}(x)\\ H_{g_1}(-ix)&H_{g_1}'(-ix)&\cdots &H_{g_1}^{(m)}(-ix)\\ \vdots&\vdots&\ddots &\vdots\\ H_{g_m}(-ix)&H_{g_m}'(-ix)&\cdots &H_{g_m}^{(m)}(-ix)\end{vmatrix} , \end{equation} where $\beta_n$ and $\gamma _n$, $n\ge v_F$, are certain normalization constants (see (\ref{nc1}) and (\ref{nc2})). We have however computational evidence that shows that both identities (\ref{quschi2i}) and (\ref{defhexai}) are true for every finite set $F$ of positive integers. Both determinantal definitions (\ref{defchexi}) and (\ref{quschi2i}) of the polynomials $c_n^{a;F}$, $n\in \sigma _F$, automatically imply a couple of factorizations of its associated second order difference operator $D_F$ in two first order difference operators. Using these factorizations, we prove that the sequence $c_n^{a;F}$, $n\in \sigma _F$, and the operator $D_F$ can be constructed in two different ways using Darboux transforms (see Definition \ref{dxt}). If we consider (\ref{defchexi}) the Darboux transform uses the sequence $c_n^{a,F_{\{ k\}}}$, $n\in \sigma _{F_{\{ k\}}}$, where $F_{\{ k\}}=\{f_1,\cdots , f_{k-1}\}$. On the other hand, if we consider (\ref{quschi2i}) the Darboux transform uses the sequence $c_n^{a;F_{\Downarrow}}$, $n\in \sigma _{F_{\Downarrow}}$, where $$ F_{\Downarrow}=\begin{cases} \emptyset,& \mbox{if $F=\{1,2,\cdots , k\}$,}\\ \{f_{s_F}-s_F,\cdots , f_k-s_F\},& \mbox{if $F\not =\{1,2,\cdots , k\}$}, \end{cases} $$ and for $F\not =\{1,2,\cdots , k\}$, we write $s_F=\min \{s\ge 1: s<f_s\}$. The second factorization seems to be more interesting because the operator $F\to F_{\Downarrow}$ preserves the admissibility of the set $F$. The same happens with the determinantal definitions of the exceptional Hermite polynomials $H_n^F$ (\ref{defhexi}) and (\ref{defhexai}). This fact agrees with the G\'omez-Ullate-Kamran-Milson conjecture and its corresponding discrete version (see \cite{GUKM5}): exceptional and exceptional discrete orthogonal polynomials can be obtained by applying a sequence of Darboux transforms to a classical or classical discrete orthogonal family, respectively. We finish this Introduction by pointing out that there is a very nice invariant property of the polynomial $\Omega _F ^a$ (\ref{defchexii}) underlying the fact that the polynomials $c_n^{a;F}$, $n\in \sigma _F$, admit both determinantal definitions (\ref{defchexi}) and (\ref{quschi2i}) (see \cite{du2}, \cite{du3} and \cite{CD}): except for a sign, $\Omega_F^a$ remains invariant if we change $F$ to $G=I(F)$, $x$ to $-x$ and $a$ to $-a$; that is $$ \Omega _F^a(x)=(-1)^{k+u_F}\Omega ^{-a}_G(-x). $$ This invariant property gives rise to the corresponding one for the Wronskian determinant (\ref{defhexii}) (see (\ref{izah})). \section{Preliminaries} Let $\mu $ be a Borel measure (positive or not) on the real line. The $n$-th moment of $\mu $ is defined by $\int _\RR t^nd\mu (t)$. When $\mu$ has finite moments for any $n\in \NN$, we can associate it a bilinear form defined in the linear space of polynomials by \begin{equation}\label{bf} \langle p, q\rangle =\int pqd\mu. \end{equation} Given an infinite set $X$ of nonnegative integers, we say that the polynomials $p_n$, $n\in X$, $p_n$ of degree $n$, are orthogonal with respect to $\mu$ if they are orthogonal with respect to the bilinear form defined by $\mu$; that is, if they satisfy $$ \int p_np_md\mu =0, \quad n\not = m, \quad n,m \in X. $$ When $X=\NN$ and the degree of $p_n$ is $n$, $n\ge 0$, we get the usual definition of orthogonal polynomials with respect to a measure. When $X=\NN$, orthogonal polynomials with respect to a measure are unique up to multiplication by non null constant. Let us remark that this property is not true when $X\not =\NN$. Positive measures $\mu $ with finite moments of any order and infinitely many points in its support has always a sequence of orthogonal polynomials $(p_n)_{n\in\NN }$, $p_n$ of degree $n$ (it is enough to apply the Gram-Smith orthogonalizing process to $1, x, x^2, \ldots$); in this case the orthogonal polynomials have positive norm: $\langle p_n,p_n\rangle>0$. Moreover, given a sequence of orthogonal polynomials $(p_n)_{n\in \NN}$ with respect to a measure $\mu$ (positive or not) the bilinear form (\ref{bf}) can be represented by a positive measure if and only if $\langle p_n,p_n \rangle > 0$, $n\ge 0$. When $X=\NN$, Favard's Theorem establishes that a sequence $(p_n)_{n\in \NN}$ of polynomials, $p_n$ of degree $n$, is orthogonal (with non null norm) with respect to a measure if and only if it satisfies a three term recurrence relation of the form ($p_{-1}=0$) $$ xp_n(x)=a_np_{n+1}(x)+b_np_n(x)+c_np_{n-1}(x), \quad n\ge 0, $$ where $(a_n)_{n\in \NN}$, $(b_n)_{n\in \NN}$ and $(c_n)_{n\in \NN}$ are sequences of real numbers with $a_{n-1}c_n\not =0$, $n\ge 1$. If, in addition, $a_{n-1}c_n>0$, $n\ge 1$, then the polynomials $(p_n)_{n\in \NN}$ are orthogonal with respect to a positive measure with infinitely many points in its support, and conversely. Again, Favard's Theorem is not true for a sequence of orthogonal polynomials $(p_n)_{n\in X}$ when $X\not =\NN$. We will also need the following three lemmas. The first one is the Sylvester's determinant identity (for the proof and a more general formulation of the Sylvester's identity see \cite{Gant}, p. 32). \begin{lemma}\label{lemS} For a square matrix $M=(m_{i,j})_{i,j=1}^k$, and for each $1\le i, j\le k$, denote by $M_i^j$ the square matrix that results from $M$ by deleting the $i$-th row and the $j$-th column. Similarly, for $1\le i, j, p,q\le k$ denote by $M_{i,j}^{p,q}$ the square matrix that results from $M$ by deleting the $i$-th and $j$-th rows and the $p$-th and $q$-th columns. The Sylvester's determinant identity establishes that for $i_0,i_1, j_0,j_1$ with $1\le i_0<i_1\le k$ and $1\le j_0<j_1\le k$, then $$ \det(M) \det(M_{i_0,i_1}^{j_0,j_1}) = \det(M_{i_0}^{j_0})\det(M_{i_1}^{j_1}) - \det(M_{i_0}^{j_1}) \det(M_{i_1}^{j_0}). $$ \end{lemma} The second and third lemmas establish some (more or less straightforward) technical properties about second order difference operators. \begin{lemma}\label{lemdes} Write $A$, $B$ and $D$ for the following two first order and a second order difference operators $$ A=a_0\Sh_0+a_1\Sh_1,\quad B=b_{-1}\Sh_{-1}+b_0\Sh_0,\quad D=f_{-1}\Sh_{-1}+f_0\Sh_0+f_1\Sh_1, $$ where $\Sh_l$ denotes the shift operator $\Sh_l(p)(x)=p(x+l)$. Then $D=BA$ if and only if $$ b_{-1}(x)=\frac{f_{-1}(x)}{a_0(x-1)},\quad b_0(x)=\frac{f_{1}(x)}{a_1(x)},\quad f_0(x)=\frac{f_{-1}(x)a_1(x-1)}{a_0(x-1)}+\frac{f_{1}(x)a_0(x)}{a_1(x)}. $$ On the other hand, $D=AB$ if and only if $$ a_{0}(x)=\frac{f_{-1}(x)}{b_{-1}(x)},\quad a_1(x)=\frac{f_{1}(x)}{b_0(x+1)},\quad f_0(x)=\frac{f_{-1}(x)b_0(x)}{b_{-1}(x)}+\frac{f_{1}(x)b_{-1}(x+1)}{b_0(x+1)}. $$ \end{lemma} \begin{lemma}\label{lemigop} Let $D$ and $\tilde D$ be two second order difference operators with rational coefficients. Assume that there exist polynomials $p_1$, $p_2$ and $p_3$ with degrees $d_1$, $d_2$ and $d_3$, respectively, such that $D(p_i)=\tilde D(p_i)$, $i=1,2,3$. If $d_i>0$ and $d_i\not =d_j$, $i\not =j$, then $D=\tilde D$. \end{lemma} \begin{proof} Since $d_i\not =d_j$, $i\not =j$, we deduce from Lemma 3.4 of \cite{DdI} that the polynomial $$ P(x)=\begin{vmatrix}p_1(x+1)&p_1(x)&p_1(x-1)\\ p_2(x+1)&p_2(x)&p_2(x-1)\\ p_3(x+1)&p_3(x)&p_3(x-1) \end{vmatrix} $$ has degree $d=d_1+d_2+d_3-3>0$. Hence $P(x)\not =0$ for $x\not \in X_P$, where $X_P$ is formed by at most $d$ complex numbers. Given any three rational functions $g_1,g_2,g_3$, write $Y$ for the set formed by their poles. Then, for each $x\not \in X_P\cup Y$, the linear system of equations $D(p_i)(x)=g_i(x)$ defines uniquely the value at $x$ of the coefficients of the second order difference operator $D$. Since $D(p_i)=\tilde D(p_i)$, $i=1,2,3$, we can conclude that $D=\tilde D$ since their coefficients are equal. \end{proof} Given a finite set of numbers $F=\{f_1,\cdots, f_k\}$ we denote by $V_F$ the Vandermonde determinant defined by \begin{align}\label{defvdm} V_F=\prod_{1=i<j=k}(f_j-f_i). \end{align} \subsection{Symmetric operators} Consider a measure $\mu$ with finite moments of any order (so that we can integrate polynomials with respect to $\mu $). Let $\Aa $ be a linear subspace of the linear space of polynomials $\PP$. We say that a linear operator $T:\Aa\to \PP$ is symmetric with respect to the pair $(\mu , \Aa)$ if $\langle T(p),q\rangle _\mu =\langle p,T(q)\rangle _ \mu$ for all polynomials $p, q \in \Aa$, where the bilinear form $\langle \cdot, \cdot \rangle _\mu $ is defined by (\ref{bf}). The following Lemma is then straightforward. \begin{lemma}\label{lsyo} Let $T$ be a symmetric operator with respect to the pair $(\mu ,\Aa)$. Assume we have polynomials $r_n\in \Aa$, $n\in X\subset \NN$, which are eigenfunctions for the operator $T$ with different eigenvalues, that is, $T(r_n)=\lambda_n r_n$, $n\in X$, and $\lambda _n\not =\lambda_m$, $n\not =m$. Then the polynomials $r_n$, $n\in X$, are orthogonal with respect to $\mu$. \end{lemma} When $\mu$ is a discrete measure, the symmetry of a finite order difference operator $D=\sum_{l=-r}^rh_l\Sh _l$ with respect to a pair $(\mu, \Aa)$ can be guaranteed by a finite set of difference equations together with certain boundary conditions. The proof follows as that of Theorem 3.2 in \cite{du0} and it is omitted. \begin{lemma}\label{tcsd} Let $\mu$ be a discrete measure supported on a countable set $X$, $X\subset \RR$. Consider a finite order difference operator $T:\Aa \to \PP$ of the form $T=\sum _{l=-r}^rh_l\Sh_l $, where $\Aa $ is a linear subspace of the linear space of polynomials $\PP$. Assume that the measure $\mu $ and the coefficients $h_l$, $l=-r,\cdots , r$, of $T$ satisfy the difference equations \begin{equation}\label{desm} h_l(x-l)\mu (x-l)=h_{-l}(x)\mu (x),\quad \mbox{for $x\in (l+X)\cap X$ and $l=1, \cdots, r$,} \end{equation} and the boundary conditions \begin{align}\label{bc1} h_l(x-l)&=0, \quad \mbox{for $x\in (l+X)\setminus X$ and $l=1, \cdots, r$,}\\ \label{bc2} h_{-l}(x)&=0, \quad \mbox{for $x\in X\setminus (l+X)$ and $l=1, \cdots, r$.} \end{align} Then $T$ is symmetric with respect to the pair $(\mu, \Aa)$. (Let us remind that for a set of numbers $A$ and a number $b$, we denote by $b+A$ the set $b+A=\{ b+a: a\in A\}$). \end{lemma} On the other hand, when $\mu$ has a smooth density with respect to the Lebesgue measure (i. e. $d\mu =f(x)dx$), the symmetry of a second order differential operator with respect to a pair $(\mu, \Aa)$ can be guaranteed by the usual Pearson equation. The proof follows by performing an integration by parts and it is omitted. \begin{lemma}\label{tcsd2} Let $\mu$ be a measure having a positive $\mathcal{C}^2(I)$ density $f$ with respect to the Lebesgue measure in an interval $I\subset \RR $. Consider a second order differential operator $T:\Aa \to \PP$ of the form $T=a_2(x)\partial ^2 +a_1(x)\partial +a_0(x)$, where $\Aa $ is a linear subspace of the linear space of polynomials $\PP$, $\partial =d/dx$ and $a_2$ and $a_1$ are $\mathcal{C}^1(I)$ functions. Assume that $f $ and the coefficients $a_2$, $a_1$ of $T$ satisfy the Pearson equation $$ (a_2(x)f(x))'=a_1(x)f(x), \quad x\in I, $$ and the boundary conditions that the limit of the functions $x^na_2(x)f(x)$ and $x^n(a_2(x)f(x))'$ vanish at the endpoints of $I$ for $n\ge 0$. Then $T$ is symmetric with respect to the pair $(\mu, \Aa)$. \end{lemma} \subsection{Christoffel transform}\label{secChr} Let $\mu$ be a measure (positive or not) and assume that $\mu$ has a sequence of orthogonal polynomials $(p_n)_{n\in \NN}$, $p_n$ with degree $n$ and $\langle p_n,p_n\rangle \not =0$ (as we mentioned above, that always happens if $\mu$ is positive, with finite moments and infinitely many points in its support). Favard's theorem implies that the sequence of polynomials $(p_n)_n$ satisfies the three term recurrence relation ($p_{-1}=0$) \begin{equation}\label{rrpn} xp_n(x)=a_n^Pp_{n+1}(x)+b_n^Pp_n(x)+c_n^Pp_{n-1}(x). \end{equation} Given a finite set $F$ of real numbers, $F=\{f_1,\cdots , f_k\}$, $f_i<f_{i+1}$, we write $\Phi_n$, $n\ge 0$, for the $k\times k$ determinant \begin{equation}\label{defph} \Phi_n=\vert p_{n+j-1}(f_i)\vert _{i,j=1,\cdots , k}. \end{equation} Notice that $\Phi_n$, $n\ge 0$, depends on both, the finite set $F$ and the measure $\mu$. In order to stress this dependence, we sometimes write in this Section $\Phi_n^{\mu, F}$ for $\Phi_n$. Along this Section we assume that the set $\X_\mu^F=\{ n\in \NN :\Phi_n^{\mu,F}=0\}$ is finite. We denote $\x_\mu ^F=\max \X_\mu ^F$. If $\X_\mu ^F=\emptyset$ we take $\x_\mu ^F=-1$. The Christoffel transform of $\mu$ associated to the annihilator polynomial $\pp$ of $F$, $$ \pp (x)=(x-f_1)\cdots (x-f_k), $$ is the measure defined by $ \mu_F =\pp \mu$. Orthogonal polynomials with respect to $\mu_F$ can be constructed by means of the formula \begin{equation}\label{mata00} q_n(x)=\frac{1}{\pp (x)}\det \begin{pmatrix}p_n(x)&p_{n+1}(x)&\cdots &p_{n+k}(x)\\ p_n(f_1)&p_{n+1}(f_1)&\cdots &p_{n+k}(f_1)\\ \vdots&\vdots&\ddots &\vdots\\ p_n(f_k)&p_{n+1}(f_k)&\cdots &p_{n+k}(f_k) \end{pmatrix}. \end{equation} Notice that the degree of $q_n$ is equal to $n$ if and only if $n\not\in \X_\mu ^F$. In that case the leading coefficient $\lambda^Q_n$ of $q_n$ is equal to $(-1)^k\lambda^P_{n+k}\Phi_n$, where $\lambda ^P_n$ denotes the leading coefficient of $p_n$. The next Lemma follows easily using \cite{Sz}, Th. 2.5. \begin{lemma}\label{sze} The measure $\mu_F$ has a sequence $(q_n)_{n=0}^\infty $, $q_n$ of degree $n$, of orthogonal polynomials if and only if $\X_\mu ^F=\emptyset$. In that case, an orthogonal polynomial of degree $n$ with respect to $\mu _F$ is given by (\ref{mata00}) and also $\langle q_n,q_n\rangle _{\mu _F}\not =0$, $n\ge 0$. If $\X_\mu \not =\emptyset$, the polynomial $q_n$ (\ref{mata00}) has still degree $n$ for $n\not \in \X_\mu^F$, and satisfies $\langle q_n,r\rangle_{\mu _F}=0$ for all polynomial $r$ with degree less than $n$ and $\langle q_n,q_n\rangle _{\mu _F}\not =0$. \end{lemma} The three term recurrence relation for the polynomials $(q_n)_n$ can be derived from the corresponding recurrence relation for the polynomials $(p_n)_n$ (\ref{rrpn}). In addition to the determinant $\Phi_n$ (\ref{defph}), $n\ge 0$, we also consider the $k\times k$ determinant \begin{equation}\label{defps} \Psi_n=\begin{vmatrix} p_n(f_1)&p_{n+1}(f_1)&\cdots &p_{n+k-2}(f_1)&p_{n+k}(f_1)\\ \vdots&\vdots&\ddots &\vdots&\vdots\\ p_n(f_k)&p_{n+1}(f_k)&\cdots &p_{n+k-2}(f_k)&p_{n+k}(f_k) \end{vmatrix}. \end{equation} \begin{lemma}\label{lemmc} For $n> \x_\mu ^F+1$, the polynomials $q_n$ (\ref{mata00}) satisfy the three term recurrence relation \begin{equation}\label{rrvqn} xq_n(x)=a_n^Qq_{n+1}(x)+b_n^Qq_n(x)+c_n^Qq_{n-1}(x), \end{equation} where \begin{align*} a^Q_n&=a_n^P\frac{\lambda^P_{n+1}\lambda^P_{n+k}}{\lambda^P_{n}\lambda^P_{n+k+1}}\frac{\Phi _n}{\Phi_{n+1}},\\ b^Q_n&= b_{n+k}^P+\frac{\lambda^P_{n+k}}{\lambda^P_{n+k+1}}\frac{\Psi _{n+1}}{\Phi_{n+1}}-\frac{\lambda^P_{n+k-1}}{\lambda^P_{n+k}}\frac{\Psi _n}{\Phi_{n}},\\ c^Q_n&=c_n^P\frac{\Phi _{n+1}}{\Phi_{n}}. \end{align*} Moreover, \begin{equation}\label{n2q} \langle q_n,q_n\rangle _{\mu_F}=(-1)^k\frac{\lambda^P_{n+k}}{\lambda^P_{n}}\Phi_n\Phi_{n+1}\langle p_n,p_n\rangle _{\mu},\quad n> \x_\mu^F+1. \end{equation} If $\X_\mu ^F=\emptyset$, then (\ref{rrvqn}) and (\ref{n2q}) hold for $n\ge 0$, with initial condition $q_{-1}=0$. \end{lemma} \begin{proof} We can assume that $p_n$ are monic (that is, $\lambda^P_n=1$). Write $\hat q_n(x)=q_n(x)/\lambda_n^Q$, $n> \x_\mu^F +1$. It is then enough to prove that $$ x\hat q_n(x)=a_n\hat q_{n+1}(x)+b_n\hat q_n(x)+c_n\hat q_{n-1}(x) $$ with \begin{align}\label{rrma} a_n&=1,\\\label{rrmb} b_n&=b_{n+k}^P+\frac{\Psi _{n+1}}{\Phi_{n+1}}-\frac{\Psi _n}{\Phi_{n}},\\\label{rrmc} c_n&=c_n^P\frac{\Phi _{n-1}\Phi_{n+1}}{\Phi^2_{n}},\\\label{rrmq} \langle q_n,q_n\rangle _{\mu_F} &=\frac{\langle p_{n},p_{n}\rangle _{\mu}\Phi_{n+1}}{\Phi_{n}}. \end{align} We write $u_n(x)=x^n$ for $n\le \x_\mu^F+1$, and $u_n(x)=\hat q_n (x)$ for $n>\x_\mu^F+1$. Then the polynomials $u_n$, $n\ge 0$, form a basis of $\PP$. From the previous lemma, we also have for $n>\x ^F_\mu$ that $\langle \hat q_n,u_j\rangle _{\mu_F}=0$, $j=0,\cdots , n-1$. Taking this into account, it is easy to deduce that the polynomials $\hat q_n$, $n> \x_\mu^F+1$, satisfy a three term recurrence relation $$ x\hat q_n(x)=a_n\hat q_{n+1}(x)+b_n\hat q_n(x)+c_n\hat q_{n-1}(x). $$ Since they are monic, we straightforwardly have $a_n=1$, that is, (\ref{rrma}). We compute $c_n$ as $$ c_n=\frac{\langle x\hat q_n,\hat q_{n-1}\rangle _{\mu_F}}{\langle \hat q_{n-1},\hat q_{n-1}\rangle _{\mu_F}}= \frac{\langle x\hat q_n, p_{n-1}\rangle _{\mu_F}}{\langle \hat q_{n-1},\hat q_{n-1}\rangle _{\mu_F}}. $$ Using (\ref{mata00}), we get \begin{align*} \langle x\hat q_n,p_{n-1}\rangle _{\mu_F}&=\frac{(-1)^k}{\Phi_n}\begin{vmatrix}\langle xp_n,p_{n-1}\rangle _{\mu}&\langle xp_{n+1},p_{n-1}\rangle _{\mu} &\cdots &\langle xp_{n+k},p_{n-1}\rangle _{\mu}\\ p_n(f_1)&p_{n+1}(f_1)&\cdots &p_{n+k}(f_1)\\ \vdots&\vdots&\ddots &\vdots\\ p_n(f_k)&p_{n+1}(f_k)&\cdots &p_{n+k}(f_k) \end{vmatrix}\\ &=\frac{(-1)^k}{\Phi_n}\begin{vmatrix}c^P_n\langle p_{n-1},p_{n-1}\rangle _{\mu}&0 &\cdots &0\\ p_n(f_1)&p_{n+1}(f_1)&\cdots &p_{n+k}(f_1)\\ \vdots&\vdots&\ddots &\vdots\\ p_n(f_k)&p_{n+1}(f_k)&\cdots &p_{n+k}(f_k) \end{vmatrix}\\ &=(-1)^k\frac{c^P_n\langle p_{n-1},p_{n-1}\rangle _{\mu}\Phi_{n+1}}{\Phi_n}. \end{align*} In a similar way, one finds that $$ \langle \hat q_{n-1},\hat q_{n-1}\rangle _{\mu_F}=(-1)^k\frac{\langle p_{n-1},p_{n-1}\rangle _{\mu}\Phi_{n}}{\Phi_{n-1}}. $$ (\ref{rrmc}) and (\ref{rrmq}) can now be easily deduced. (\ref{rrmb}) can be proved analogously. \end{proof} For $\X_\mu ^F=\emptyset$, the previous lemma has already appeared in the literature (see for instance \cite{YZ}). \subsection{Finite set of positive integers}\label{sfspi} From now on, $F$ will denote a finite set of positive integers. We will write $F=\{ f_1,\cdots , f_k\}$, with $f_i<f_{i+1}$. Hence $k$ is the number of elements of $F$ and $f_k$ is the maximum element of $F$. We associate to $F$ the nonnegative integers $u_F$ and $v_F$ and the infinite set of nonnegative integers $\sigma_F$ defined by \begin{align}\label{defuf} u_F&=\sum_{f\in F}f-\binom{k+1}{2},\\\label{defvf} v_F&=\sum_{f\in F}f+f_k-\frac{(k-1)(k+2)}{2},\\\label{defsf} \sigma _F&=\{u_F,u_F+1,u_F+2,\cdots \}\setminus \{u_F+f,f\in F\}. \end{align} The infinite set $\sigma_F$ will be the set of indices for the exceptional Charlier or Hermite polynomials associated to $F$. Notice that $v_F=u_F+f_k+1$; hence $\{v_F,v_F+1,v_F+2,\cdots \}\subset \sigma_F$. Notice also that $u_F$ is an increasing function with respect to the inclusion order, that is, if $F\subset \tilde F$ then $u_F\le u_{\tilde F}$. Consider the set $\Upsilon$ formed by all finite sets of positive integers: \begin{align*} \Upsilon=\{F:\mbox{$F$ is a finite set of positive integers}\} . \end{align*} We consider the involution $I$ in $\Upsilon$ defined by \begin{align}\label{dinv} I(F)=\{1,2,\cdots, f_k\}\setminus \{f_k-f,f\in F\}. \end{align} The definition of $I$ implies that $I^2=Id$. For the involution $I$, the bigger the holes in $F$ (with respect to the set $\{1,2,\cdots , f_k\}$), the bigger the involuted set $I(F)$. Here it is a couple of examples $$ I(\{ 1,2,3,\cdots ,k\})=\{ k\},\quad \quad I(\{1, k\})=\{ 1,2,\cdots, k-2, k\}. $$ The set $I(F)$ will be denoted by $G$: $G=I(F)$. We also write $G=\{g_1,\cdots , g_m\}$ with $g_i<g_{i+1}$ so that $m$ is the number of elements of $G$ and $g_m$ the maximum element of $G$. Notice that $$ f_k=g_m,\quad m=f_k-k+1. $$ We also define the number $s_F$ by \begin{equation}\label{defs0} s_F=\begin{cases} 1,& \mbox{if $F=\emptyset$},\\ k+1,&\mbox{if $F=\{1,2,\cdots , k\}$},\\ \min \{s\ge 1:s<f_s\}, & \mbox{if $F\not =\{1,2,\cdots k\}$}. \end{cases} \end{equation} For $1\le i\le k$, we denote by $F_{\{ i\}}$ and $F_{\Downarrow}$ the finite sets of positive integers defined by \begin{align}\label{deff0} F_{\{ i\} }&=F\setminus \{f_i\},\\\label{deff1} F_{\Downarrow}&=\begin{cases} \emptyset,& \mbox{if $F=\{1,2,\cdots , k\}$,}\\ \{f_{s_F}-s_F,\cdots , f_k-s_F\},& \mbox{if $F\not =\{1,2,\cdots , k\}$}. \end{cases} \end{align} The following relation is straightforward from (\ref{dinv}), (\ref{deff0}) and (\ref{deff1}): \begin{equation}\label{ref0f1} F_{\Downarrow}=I\left(G _{\{ m\} }\right) \end{equation} (where as indicated above $G=I(F)$ and $m$ is the number of elements of $G$). \subsection{Charlier and Hermite polynomials} We include here basic definitions and facts about Charlier and Hermite polynomials, which we will need in the following sections. For $a\neq0$, we write $(c_n^a)_n$ for the sequence of Charlier polynomials (the next formulas can be found in \cite{Ch}, pp. 170-1; see also \cite{KLS}, pp., 247-9 or \cite{NSU}, ch. 2) defined by \begin{equation}\label{Chpol} c_n^a(x)=\frac{1}{n!}\sum_{j=0}^n(-a)^{n-j}\binom{n}{j}\binom{x}{j}j!. \end{equation} The Charlier polynomials are orthogonal with respect to the measure \begin{equation}\label{Chw} \rho_a=\sum_{x=0}^{\infty}\frac{a^x}{x!}\delta_x,\quad a\neq0, \end{equation} which is positive only when $a>0$ and then \begin{equation}\label{norCh} \langle c_n^a,c_n^a\rangle=\frac{a^n}{n!}e^a. \end{equation} The three-term recurrence formula for $(c_n^a)_n$ is ($c_{-1}^a=0$) \begin{equation}\label{Chttrr} xc_n^a=(n+1)c_{n+1}^a+(n+a)c_n^a+ac_{n-1}^a,\quad n\geq0. \end{equation} They are eigenfunctions of the following second-order difference operator \begin{equation}\label{Chdeq} D_a=-x\Sh_{-1}+(x+a)\Sh_0-a\Sh_1,\quad D_a(c_n^a)=nc_n^a,\quad n\geq0, \end{equation} where $\Sh_j(f)=f(x+j)$. They also satisfy \begin{equation}\label{Chlad} \Delta(c_n^a)=c_{n-1}^a, \quad \frac{d}{da}(c_n^a)=-c_{n-1}^a, \end{equation} and the duality \begin{equation}\label{Chdua} (-1)^ma^mn!c_n^a(m)=(-1)^na^nm!c_{m}^a(n), \quad n,m\ge 0. \end{equation} We write $(H_n)_n$ for the sequence of Hermite polynomials (the next formulas can be found in \cite{Ch}, Ch. V; see also \cite{KLS}, pp, 250-3) defined by \begin{equation}\label{Hpol} H_n(x)=n!\sum_{j=0}^{[n/2]}\frac{(-1)^j(2x)^{n-2j}}{j!(n-2j)!}. \end{equation} The Hermite polynomials are orthogonal with respect to the weight function $e^{-x^2}$, $x\in \RR$. They are eigenfunctions of the following second-order differential operator \begin{equation}\label{Hdeq} D=\partial ^2-2x\partial,\quad D(H_n)=-2nH_n,\quad n\geq0, \end{equation} where $\partial =d/dx$. They also satisfy $H_n'(x)=2nH_{n-1}(x)$. One can obtain Hermite polynomials from Charlier polynomials using the limit \begin{equation}\label{blchh} \lim_{a\to \infty}\left(\frac{2}{a}\right)^{n/2}c_n^a(\sqrt {2a}x+a)=\frac{1}{n!}H_n(x) \end{equation} see \cite{KLS}, p. 249 (take into account that if we write $(C_n^a)_n$ for the polynomials defined by (9.14.1) in \cite{KLS}, p. 247, then $c_n^a=(-a)^nC_n^a/n!$). The previous limit is uniform in compact sets of $\CC$. \section{Constructing polynomials which are eigenfunctions of second order difference operators} As in Section \ref{sfspi}, $F$ will denote a finite set of positive integers. We will write $F=\{ f_1,\cdots , f_k\}$, with $f_i<f_{i+1}$. Hence $k$ is the number of elements of $F$ and $f_k$ is the maximum element of $F$. We associate to each finite set $F$ of positive integers the polynomials $c_n^{a;F}$, $n\in \sigma_F$, displayed in the following definition. It turns out that these polynomials are always eigenfunctions of a second order difference operator with rational coefficients. We call them exceptional Charlier polynomials when, in addition, they are orthogonal and complete with respect to a positive measure (this will happen as long as the finite set $F$ is admissible; see Definition \ref{defadm} in the next Section). \begin{definition} For a given real number $a\not =0$ and a finite set $F$ of positive integers, we define the polynomials $c_n^{a;F}$, $n\in \sigma _F$, as \begin{equation}\label{defchex} c_n^{a;F}(x)=\begin{vmatrix}c_{n-u_F}^a(x)&c_{n-u_F}^a(x+1)&\cdots &c_{n-u_F}^a(x+k)\\ c_{f_1}^a(x)&c_{f_1}^a(x+1)&\cdots &c_{f_1}^a(x+k)\\ \vdots&\vdots&\ddots &\vdots\\ c_{f_k}^a(x)&c_{f_k}^a(x+1)&\cdots &c_{f_k}^a(x+k) \end{vmatrix} , \end{equation} where the number $u_F$ and the infinite set of nonnegative integers $\sigma _F$ are defined by (\ref{defuf}) and (\ref{defsf}), respectively. \end{definition} To simplify the notation, we will sometimes write $c_n^F=c_n^{a;F}$. Using Lemma 3.4 of \cite{DdI}, we deduce that $c_n^F$, $n\in \sigma _F$, is a polynomial of degree $n$ with leading coefficient equal to \begin{equation}\label{lcrn} \frac{\prod_{i=1}^k(f_i-n+u_F)}{(n-u_F)!\prod_{f\in F}f!}V_F, \end{equation} where $V_F$ is the Vandermonde determinant (\ref{defvdm}). With the convention that $c_n^a=0$ for $n<0$, the determinant (\ref{defchex}) defines a polynomial for any $n\ge 0$, but for $n\not \in \sigma_F$ we have $c_n^F=0$. Combining columns in (\ref{defchex}) and taking into account the first formula in (\ref{Chlad}), we have the alternative definition \begin{equation}\label{defchexa} c_n^F(x)=\begin{vmatrix}c_{n-u_F}^a(x)&c_{n-u_F-1}^a(x)&\cdots &c_{n-u_F-k}^a(x)\\ c_{f_1}^a(x)&c_{f_1-1}^a(x)&\cdots &c_{f_1-k}^a(x)\\ \vdots&\vdots&\ddots &\vdots\\ c_{f_k}^a(x)&c_{f_k-1}^a(x)&\cdots &c_{f_k-k}^a(x) \end{vmatrix} . \end{equation} The polynomials $c_n^F$, $n\in \sigma_F$, are strongly related by duality with the polynomials $q_n^F$, $n\ge 0$, defined by \begin{equation}\label{defqnch} q_n^F(x)=\frac{\begin{vmatrix}c_n^a(x-u_F)&c_{n+1}^a(x-u_F)&\cdots &c_{n+k}^a(x-u_F)\\ c_n^a(f_1)&c_{n+1}^a(f_1)&\cdots &c_{n+k}^a(f_1)\\ \vdots&\vdots&\ddots &\vdots\\ c_n^a(f_k)&c_{n+1}^a(f_k)&\cdots &c_{n+k}^a(f_k) \end{vmatrix}}{\prod_{f\in F}(x-f-u_F)} . \end{equation} \begin{lemma}\label{lem3.2} If $u$ is a nonnegative integer and $v\in \sigma_F$, then \begin{equation}\label{duaqnrn} q_u^F(v)=\xi_u\zeta_vc_v^F(u), \end{equation} where $$ \xi_u=\frac{(-a)^{(k+1)u}}{\prod_{i=0}^k(u+i)!},\quad \zeta_v=\frac{(-a)^{-v}(v-u_F)!\prod_{f\in F}f!}{\prod_{f\in F}(v-f-u_F)}. $$ \end{lemma} \begin{proof} It is a straightforward consequence of the duality (\ref{Chdua}) for the Charlier polynomials. \end{proof} We now prove that the polynomials $c_n^F$, $n\in \sigma_F$, are eigenfunctions of a second order difference operator with rational coefficients. To establish the result in full, we need some more notations. We denote by $\Omega _F^a (x)$ and $\Lambda _F^a(x)$ the polynomials \begin{align}\label{defom} \Omega _F^a(x)&=|c_{f_i}^a(x+j-1)|_{i,j=1}^k,\\ \label{deflam} \Lambda _F^a(x)&=\begin{vmatrix} c_{f_1}^a(x)&c_{f_1}^a(x+1)&\cdots &c_{f_1}^a(x+k-2)&c_{f_1}^a(x+k)\\ \vdots&\vdots&\ddots &\vdots\\ c_{f_k}^a(x)&c_{f_k}^a(x+1)&\cdots &c_{f_k}^a(x+k-2)&c_{f_k}^a(x+k) \end{vmatrix} . \end{align} To simplify the notation, we will sometimes write $\Omega ^F=\Omega_F^{a}$ and $\Lambda ^F=\Lambda_F^{a}$. Using Lemma 3.4 of \cite{DdI} and the definition of $u_F$ (\ref{defuf}), we deduce that the degree of both $\Omega _F$ and $\Lambda_F$ is $u_F+k$. From (\ref{defchex}) and (\ref{defom}), we have \begin{equation}\label{rrom0} \Omega _F(x)=(-1)^{k-1}c_{f_k+u_{F_{\{ k\} }}}^{F_{\{ k\} }}(x), \end{equation} where the finite set of positive integers $F_{\{ k\}}$ is defined by (\ref{deff0}). As for $c_n^F$ (see (\ref{defchexa})), we have for $\Omega_F$ the following alternative definition \begin{equation}\label{defoma} \Omega _F(x)=|c_{f_i-j+1}^a(x)|_{i,j=1}^k. \end{equation} From here and (\ref{defchexa}), it is easy to deduce that \begin{equation}\label{rrom} c_{u_F}^F(x)=\Omega_{F_\Downarrow }(x), \end{equation} where the finite set of positive integers $F_\Downarrow$ is defined by (\ref{deff1}). A simple calculation using the third formula in (\ref{Chlad}) shows that \begin{equation}\label{relomla} \Lambda ^a_F(x)=k\Omega ^a_F(x)-\frac{d}{da}\Omega ^a_F(x). \end{equation} We also need the determinants $\Phi_n^F$ and $\Psi_n^F$, $n\ge 0$, defined by \begin{align}\label{defphch} \Phi^F_n&=|c_{n+j-1}^a(f_i)|_{i,j=1}^k,\\\label{defpsch} \Psi_n^F&=\begin{vmatrix} c_n^a(f_1)&c_{n+1}^a(f_1)&\cdots &c_{n+k-2}^a(f_1)&c_{n+k}^a(f_1)\\ \vdots&\vdots&\ddots &\vdots\\ c_n^a(f_k)&c_{n+1}^a(f_k)&\cdots &c_{n+k-2}^a(f_k)&c_{n+k}^a(f_k) \end{vmatrix}. \end{align} Using the duality (\ref{Chdua}), we have \begin{align}\label{duomph} \Omega _F(n)&=\frac{\prod_{i=0}^{k-1}(n+i)!}{(-a)^{k(n-1)-u_F}\prod_{f\in F}f!}\Phi_n^F,\\ \label{duomps} \Lambda _F(n)&=\frac{(n+k)!\prod_{i=0}^{k-2}(n+i)!}{(-a)^{k(n-1)-u_F+1}\prod_{f\in F}f!}\Psi_n^F. \end{align} According to Lemma \ref{sze}, as long as $\Phi_n^F\not =0$, $n\ge 0$, the polynomials $q_n^F$, $n\ge 0$, are orthogonal with respect to the measure \begin{equation}\label{mraf} \rho _{a}^{F}=\sum _{x=u_F}^\infty \prod_{f\in F}(x-f-u_F)\frac{a^{x-u_F}}{(x-u_F)!}\delta _x. \end{equation} Notice that the measure $\rho_{a}^{F}$ is supported in the infinite set of nonnegative integers $\sigma_F$ (\ref{defsf}). \begin{theorem}\label{th3.3} Let $F$ be a finite set of positive integers. Then the polynomials $c_n^F$, $n\in \sigma _F$, (\ref{defchex}) are common eigenfunctions of the second order difference operator \begin{equation}\label{sodochex} D_F=h_{-1}(x)\Sh_{-1}+h_0(x)\Sh_0+h_1(x)\Sh_{1}, \end{equation} where \begin{align}\label{jpm1} h_{-1}(x)&=-x\frac{\Omega_F(x+1)}{\Omega_F(x)},\\\label{jpm2} h_0(x)&=x+k+a+u_F-a\frac{\Lambda_F(x+1)}{\Omega_F(x+1)}+a\frac{\Lambda_F(x)}{\Omega_F(x)},\\\label{jpm3} h_1(x)&=-a\frac{\Omega_F(x)}{\Omega_F(x+1)}. \end{align} Moreover $D_F(c_n^F)=nc_n^F$, $n\in \sigma _F$. \end{theorem} \begin{proof} Consider the set $\X_a^F$ of nonnegative integers defined by $\X_a^F=\{n\in \NN: \Phi_n^F=0\}$. Using (\ref{duomph}), we get $\X_a^F=\{x\in \NN: \Omega_F(x)=0\}$. Since $\Omega_F$ is a polynomial in $x$, we conclude that $\X_a^F$ is finite. Define then $\x_a^F=\max \X_a^F$, with the convention that if $\X_a^F=\emptyset$ then $\x_a^F=-1$. Write $p_n(x)=c_n^a(x-u_F)$ and $q_n(x)=q_n^F(x)$ (see (\ref{defqnch})). With the notation of Section \ref{secChr}, we have $$ \lambda^P_n=\frac{1}{n!},\quad \lambda^Q_n=\frac{(-1)^k\Phi_n^F}{(n+k)!}. $$ Using the three term recurrence relations (\ref{Chttrr}) for the Charlier polynomials and (\ref{rrvqn}) for $q_n$, $n>\x_a^F+1$, we conclude after an easy calculation that for $u>\x_a^F+1$ and $v\in \RR $ \begin{equation}\label{yttr} vq_u^F(v)=a_u^Qq_{u+1}^F(v)+b_u^Qq_u^F(v)+c_u^Qq_{u-1}^F(v), \end{equation} where \begin{align}\label{anqch} a_n^Q&=(n+k+1)\frac{\Phi _n^F}{\Phi_{n+1}^F},\\\label{bnqch} b_n^Q&=(n+k+a+u_F)+(n+k+1)\frac{\Psi _{n+1}^F}{\Phi_{n+1}^F}-(n+k)\frac{\Psi _{n}^F}{\Phi_{n}^F},\\\label{cnqch} c_n^Q&=a\frac{\Phi _{n+1}^F}{\Phi_{n}^F}. \end{align} Assume now that $v\in \sigma_F$. Then, using the dualities (\ref{duaqnrn}), (\ref{duomph}) and (\ref{duomps}), we get from (\ref{yttr}) after straightforward calculations \begin{equation}\label{edho} uc_v^F(u)=h_1(u)c_{v}^F(u+1)+h_0(u)c_v^F(u)+h_{-1}(u)c_v^F(u-1), \end{equation} for all nonnegative integers $u>\x_a^F+1$, where $h_1$, $h_0$ and $h_{-1}$ are given by (\ref{jpm1}), (\ref{jpm2}) and (\ref{jpm3}), respectively. Since $c_v^F$, $v\in \sigma_F$, are polynomials and $h_1,h_0$ and $h_{-1}$ are rational functions, we have that (\ref{edho}) holds also for all complex number $u$. In other words, the polynomials $c_n^F$, $n\in \sigma_F$, are eigenfunctions of the second order difference operator $D_F$ (\ref{sodochex}). \end{proof} The determinant which defines $\Omega_F^a$ (\ref{defom}) enjoys a very nice invariant property with respect to the involution $I$ defined by (\ref{dinv}). Indeed, for a finite set $F=\{f_1,\cdots , f_k\}$ of positive integers, consider the involuted set $I(F)=G=\{ g_1,\cdots, g_m\}$ with $g_i<g_{i+1}$. We also need the associated functions $\tilde \Omega _F^a$ and $\tilde \Lambda _F^a$ defined by \begin{align}\label{defomt} \tilde \Omega _F^a(x)&=|c_{g_i}^{-a}(-x+j-1)|_{i,j=1}^m,\\\label{delamt} \tilde \Lambda _F^a(x)&=\begin{vmatrix} c_{g_1}^{-a}(-x)&c_{g_1}^{-a}(-x+1)&\cdots &c_{g_1}^{-a}(-x+m-2)&c_{g_1}^{-a}(-x+m)\\ \vdots&\vdots&\ddots &\vdots\\ c_{g_m}^{-a}(-x)&c_{g_m}^{-a}(-x+1)&\cdots &c_{g_m}^{-a}(-x+m-2)&c_{g_m}^{-a}(-x+m) \end{vmatrix} . \end{align} Using the definition of the involution $I$, we have that both $\tilde \Omega _F^a$ and $\tilde \Lambda _F^a$ are polynomials of degree $u_F+k$ (this last can be deduced using Lemma 3.4 of \cite{DdI}). The invariant property mentioned above for $\Omega_F^a$ (\ref{defom}) is the following: except for a sign, $\Omega_F^a$ remains invariant if we change $F$ to $G=I(F)$, $x$ to $-x$ and $a$ to $-a$. In other words, except for a sign, $\Omega_F^a$ and $\tilde \Omega_F^a$ are equal: \begin{equation}\label{iza} \Omega_F^a(x)=(-1)^{k+u_F}\tilde \Omega_F^a (x). \end{equation} For finite sets $F$ formed by consecutive positive integers this invariance was conjecture in \cite{du2} and proved in \cite{du3}. The proof for all finite set of positive integers will be included in \cite{CD}. According to this invariant property, we can rewrite as follows the second order difference operator $D_F$ for which the polynomials $c_n^F$, $n\in \sigma _F$, are common eigenfunctions. \begin{theorem}\label{th3.6} Let $F$ be a finite set of positive integers. Then the coefficients $h_{-1}$, $h_0$, $h_1$ of the operator $D_F$ (\ref{sodochex}) can be rewritten in the form \begin{align}\label{jm1} h_{-1}(x)&=-x\frac{\tilde \Omega_F(x+1)}{\tilde \Omega_F(x)},\\\label{jm2} h_0(x)&=x+m+a+u_G+a\frac{\tilde \Lambda_F(x+1)}{\tilde \Omega_F(x+1)}-a\frac{\tilde \Lambda_F(x)}{\tilde \Omega_F(x)},\\\label{jm3} h_1(x)&=-a\frac{\tilde \Omega_F(x)}{\tilde \Omega_F(x+1)}. \end{align} \end{theorem} \begin{proof} Using (\ref{iza}) and (\ref{relomla}), we straightforwardly get (\ref{jm1}), (\ref{jm2}) and (\ref{jm3}) from (\ref{jpm1}), (\ref{jpm2}) and (\ref{jpm3}). \end{proof} We next show that the polynomials $c_n^F$, $n\in \sigma_F$, (\ref{defchex}) and the corresponding difference operator $D_F$ (\ref{sodochex}) can be constructed by applying a sequence of at most $k$ Darboux transform to the Charlier system (where $k$ is the number of elements of $F$). \begin{definition}\label{dxt} Given a system $(T,(\phi_n)_n)$ formed by a second order difference operator $T$ and a sequence $(\phi_n)_n$ of eigenfunctions for $T$, $T(\phi_n)=\pi_n\phi_n$, by a Darboux transform of the system $(T,(\phi_n)_n)$ we mean the following. For a real number $\lambda$, we factorize $T-\lambda Id$ as the product of two first order difference operators $T=BA+\lambda Id$ ($Id$ denotes the identity operator). We then produce a new system consisting in the operator $\hat T$, obtained by reversing the order of the factors, $\hat T = AB+\lambda Id$, and the sequence of eigenfunctions $\hat \phi_n =A(\phi_n)$: $\hat T(\hat \phi_n)=\pi_n\hat\phi_n$. We say that the system $(\hat T,(\hat\phi_n)_n)$ has been obtained by applying a Darboux transformation with parameter $\lambda$ to the system $(T,(\phi_n)_n)$. \end{definition} \begin{lemma}\label{lfe} Let $F=\{f_1,\cdots ,f_k\}$ be a finite set of positive integers and write $F_{\{ k\} }=\{f_1,\cdots ,f_{k-1}\}$ (see (\ref{deff0})). We define the first order difference operators $A_F$ and $B_F$ as \begin{align} A_F&=\frac{\Omega _F(x+1)}{\Omega_{F_{\{ k\} }}(x+1)}\Sh_0-\frac{\Omega _F(x)}{\Omega_{F_{\{ k\} }}(x+1)}\Sh_1,\\ B_F&=-x\frac{\Omega _{F_{\{ k\} }}(x+1)}{\Omega_{F}(x)}\Sh_ {-1}+a\frac{\Omega _{F_{\{ k\} }}(x)}{\Omega_{F}(x)}\Sh_0. \end{align} Then $c_n^F(x)=A_F(c_{n-f_k+k}^{F_{\{ k\} }})(x)$, $n\in \sigma_F$. Moreover \begin{align*} D_{F_{\{ k\} }}&=B_FA_F+(f_k+u_{F_{\{ k\} }})Id,\\ D_{F}&=A_FB_F+(f_k+u_F)Id. \end{align*} In other words, the system $(D_F,(c_n^F)_{n\in \sigma _F})$ can be obtained by applying a Darboux transform to the system $(D_{F_{\{ k\} }},(c_n^{F_{\{ k\} }})_{n\in \sigma _{F_{\{ k\} }}})$. \end{lemma} \begin{proof} First of all, we point out that $\sigma_F=f_k-k+\sigma_{F_{\{ k\} }}$ (that is an easy consequence of (\ref{defuf}) and (\ref{defsf})). In particular $u_F=u_{F_{\{ k\} }}+f_k-k$. If we apply Sylvester's identity with $i_0=j_0=1$, $i_1=j_1=k$ (see Lemma \ref{lemS}) to the determinant (\ref{defchex}), we get \begin{align*} c_n^F(x)&=\frac{\Omega _F(x+1)}{\Omega_{F_{\{ k\} }}(x+1)}c_{n-f_k+k}^{F_{\{ k\} }}(x)-\frac{\Omega _F(x)}{\Omega_{F_{\{ k\} }}(x+1)}c_{n-f_k+k}^{F_{\{ k\} }}(x+1)\\ &=A_F(c_{n-f_k+k}^{F_{\{ k\} }})(x). \end{align*} Write now $D_{F_{\{ k\} }}=h_{-1}^{F_{\{ k\} }}\Sh_{-1}+h_0^{F_{\{ k\} }}\Sh_0+h_1^{F_{\{ k\} }}\Sh_1$. Using Lemma \ref{lemdes}, the factorization $D_{F_{\{ k\} }}=B_FA_F-(f_k+u_{F_{\{ k\} }})Id$ will follow if we prove $$ h_0^{F_{\{ k\} }}(x)-(f_k+u_{F_{\{ k\} }})=-h_{-1}^{F_{\{ k\} }}(x)\frac{\Omega_F(x-1)}{\Omega_F(x)}-h_{1}^{F_{\{ k\} }}(x)\frac{\Omega_F(x+1)}{\Omega_F(x)}. $$ This can be rewritten as \begin{equation}\label{alqr} D_{F_{\{ k\} }}(\Omega_F)=(f_k+u_{F_{\{ k\} }})\Omega _F. \end{equation} But this is a consequence of the identity $\Omega _F(x)=(-1)^{k-1}c_{f_k+u_{F_{\{ k\} }}}^{F_{\{ k\} }}(x)$ (\ref{rrom0}). We finally prove the factorization $D_{F}=A_FB_F-f_kId$. Since $D_F(c_n^F)=nc_n^F$, $n\in \sigma_F$, using Lemma \ref{lemigop}, it will be enough to prove that $A_FB_F(c_n^F)=(n-f_k-u_F)c_n^F$, $n\in \sigma_F$: \begin{align*} A_FB_F(c_n^F)&=A_FB_FA_F(c_{n-f_k+k}^{F_{\{ k\} }})=A_F[D^{F_{\{ k\} }}-(f_k+u_{F_{\{ k\} }})Id](c_{n-f_k+k}^{F_{\{ k\} }})\\ &=A_F[(n-f_k-u_F)(c_{n-f_k+k}^{F_{\{ k\} }})]=(n-f_k-u_F)c_n^F. \end{align*} \end{proof} Analogous factorization can be obtained by using any of the sets $F_{\{i\} }$, $1\le i<k$ (see (\ref{deff0})) instead of $F_{\{k\} }$. When the determinants $\Omega _F (n)\not =0$ (\ref{defom}), $n\ge 0$ (or equivalently, $\Phi_n^F\not =0$ (\ref{defphch}), $n\ge 0$), the following alternative construction of the polynomial $q_n^F$ (\ref{defqnch}) has been given in \cite{DdI}. For a finite set $F=\{f_1,\cdots , f_k\}$ of positive integers, consider the involuted set $I(F)=G=\{ g_1,\cdots, g_m\}$ with $g_i<g_{i+1}$, where the involution $I$ is defined by (\ref{dinv}). Assuming that $\Omega _F (n)\not =0$, $n\ge 0$, using the invariance (\ref{iza}) and Theorem 1.1 of \cite{DdI}, we have \begin{equation}\label{quschi} q_n^F(x)=\alpha_n\begin{vmatrix} c^a_n(x-v_F) & -c^a_{n-1}(x-v_F) & \cdots & (-1)^mc^a_{n-m}(x-v_F) \\ c^{-a}_{g_1}(-n-1) & c^{-a}_{g_1}(-n) & \cdots & c^{-a}_{g_1}(-n+m-1) \\ \vdots & \vdots & \ddots & \vdots \\ c^{-a}_{g_m}(-n-1) & \displaystyle c^{-a}_{g_m}(-n) & \cdots &c^{-a}_{g_m}(-n+m-1) \end{vmatrix}, \end{equation} where $\alpha_n$, $n\ge 0$, is the normalization constant $$ \alpha_n=(-1)^{k(n+1)}\frac{a^{k(n-1)-u_F}\prod_{f\in F}f!}{\prod_{i=1}^k(n+i)!}. $$ The duality (\ref{duaqnrn}) then provides an alternative definition of the polynomial $c_n^F$, $n\ge v_F$. Indeed, after an easy calculation, we conclude that \begin{equation}\label{quschi2} c_n^F(x)=\beta_n\begin{vmatrix} c^a_{n-v_F}(x) & \frac{x}{a}c^a_{n-v_F}(x-1) & \cdots & \frac{(x-m+1)_m}{a^m}c^a_{n-v_F}(x-m+1) \\ c^{-a}_{g_1}(-x-1) & c^{-a}_{g_1}(-x) & \cdots & c^{-a}_{g_1}(-x+m-1) \\ \vdots & \vdots & \ddots & \vdots \\ c^{-a}_{g_m}(-x-1) & \displaystyle c^{-a}_{g_m}(-x) & \cdots &c^{-a}_{g_m}(-x+m-1) \end{vmatrix}, \end{equation} where $\beta_n$, $n\ge 0$, is the normalization constant \begin{equation}\label{nc1} \beta_n=(-1)^{m+k+u_F}\frac{a^m(n-v_F)!V_F\prod_{g\in G}g!\prod_{i=1}^k(f_i-n+u_F)}{(n-u_F)!V_G\prod_{f\in F}f!}. \end{equation} When the cardinality of the involuted set $G=I(F)$ is less than the cardinality of $F$, (\ref{quschi2}) will provide a more efficient way than (\ref{defchex}) for an explicit computation of the polynomials $c_n^F$, $n\ge v_F$. For instance, take $F=\{1,\cdots, k\}$. Since $I(F)=\{ k\}$, the determinant in (\ref{quschi2}) has order $2$ while the determinant in (\ref{defchex}) has order $k+1$. Applying Sylvester's identity to the determinant (\ref{quschi2}), we get an alternative way to construct the system $(D_F,c_n^F)$ by applying a sequence of at most $m$ Darboux transform to the Charlier system. \begin{lemma}\label{thjod} Given a real number $a\not =0$ and a finite set $F$of positive integers for which $\Omega _F^a (n)\not =0$, $n\ge 0$, define the first order difference operators $C_F$ and $E_F$ as \begin{align}\label{defoc} C_F&=-\frac{x\tilde \Omega _F(x+1)}{a\tilde\Omega_{F_{\Downarrow }}(x)}\Sh_ {-1}+\frac{\tilde \Omega _F(x)}{\tilde \Omega_{F_{\Downarrow }}(x)}\Sh_0,\\\label{defoe} E_F&=a\frac{\tilde \Omega _{F_{\Downarrow }}(x+1)}{\tilde \Omega_{F}(x+1)}\Sh_0-a\frac{\tilde \Omega_{F_{\Downarrow }}(x)}{\tilde \Omega _{F}(x+1)}\Sh_1, \end{align} where $F_{\Downarrow }$ is the finite set of positive integers defined by (\ref{deff1}). Then $D_{F_{\Downarrow }}=E_FC_F+(u_{F}-k-1)Id$ and $D_{F}=C_FE_F+u_FId$. Moreover \begin{equation}\label{spmdv} C_F(c_{n-k-1}^{F_{\Downarrow }})=(-1)^{u_F+u_{F_{\Downarrow }}+1}\frac{n-u_F}{a}c_n^F(x),\quad n\ge v_F, \end{equation} where $n_{F_{\Downarrow }}$ is the number of elements of $F_{\Downarrow }$. \end{lemma} \begin{proof} Write $D_{F_{\Downarrow }}=h_{-1}^{F_{\Downarrow }}\Sh_{-1}+h_0^{F_{\Downarrow }}\Sh_0+h_1^{F_{\Downarrow }}\Sh_1$. Using Lemma \ref{lemdes}, the factorization $D_{F_{\Downarrow }}=E_FC_F-(f_k+u_{F_0})Id$ will follow if we prove \begin{equation}\label{alqr2} h_0^{F_{\Downarrow }}(x)-(u_{F}-k-1))=-\frac{a}{x}h_{-1}^{F_{\Downarrow }}(x)\frac{\tilde \Omega_F(x)}{\tilde \Omega_F(x+1)}-\frac{x+1}{a}h_{1}^{F_{\Downarrow }}(x)\frac{\tilde \Omega_F(x+2)}{\tilde \Omega_F(x+1)}. \end{equation} If we set $a\to -a$, $x\to -x-1$ and use the invariant property of $\Omega$ (\ref{iza}), this can be rewritten as $$ D_{G_{\{ m\}}}(\Omega_G)=(g_m+u_{G_{\{ m\}}})\Omega _G, $$ where $G_{\{ m\} }$ is the finite set of positive integers defined by (\ref{deff0}). (\ref{alqr2}) then follows by taking into account that $\Omega _G(x)=(-1)^{m-1}c_{g_m+u_{G_{\{ m\}}}}^{G_{\{ m\}}}(x)$ (\ref{rrom0}). For $n\ge v_F$, the identity (\ref{spmdv}) follows by applying Sylvester identity to the determinant (\ref{quschi2}) and using (\ref{ref0f1}). The factorization $D_{F}=C_FE_F+u_FId$ can be proved as in Lemma \ref{lfe}. \end{proof} We have computational evidences which show that (\ref{spmdv}) also holds for $n\in \sigma_F$, $n<v_F$. Actually, in the next Section we will prove it for admissible sets $F$. The factorization in the previous Lemma will be the key to prove that for admissible sets $F$, the polynomials $c_n^F$, $n\in\sigma _F$, are complete in the associated $L^2$ space. \section{Exceptional Charlier polynomials} In the previous Section, we have associated to each finite set $F$ of positive integers the polynomials $c_n^F$, $n\in \sigma_F$, which are always eigenfunctions of a second order difference operator with rational coefficients. We are interested in the cases when, in addition, those polynomials are orthogonal and complete with respect to a positive measure. \begin{definition} The polynomials $c_n^{a;F}$, $n\in \sigma_F$, defined by (\ref{defchex}) are called exceptional Charlier polynomials, if they are orthogonal and complete with respect to a positive measure. \end{definition} We next introduce the key concept for finite sets $F$ such that the polynomials $c_n^F$, $n\in \sigma _F$, are exceptional Charlier polynomials. \begin{definition}\label{defadm} Let $F$ be a finite set of positive integers. Split up the set $F$, $F=\bigcup _{i=1}^KY_i$, in such a way that $Y_i\cap Y_j=\emptyset $, $i\not =j$, the elements of each $Y_i$ are consecutive integers and $1+\max (Y_i)<\min Y_{i+1}$, $i=1,\cdots, K-1$. We say that $F$ is admissible if each $Y_i$, $i=1,\cdots, K$, has an even number of elements. \end{definition} Admissible sets $F$ can be characterized in terms of the positivity of the measure $\rho_a^F$ (\ref{mraf}) and the sign of the Casorati polynomial $\Omega _F$ in $\NN$. \begin{lemma}\label{l3.1} Given a positive real number $a$ and a finite set $F$ of positive integers, the following conditions are equivalent. \begin{enumerate} \item The measure $\rho_a^F$ (\ref{mraf}) is positive. \item The finite set $F$ is admissible. \item $\Omega_F^a(n)\Omega_F^a(n+1)>0$ for all nonnegative integer $n$, where the polynomial $\Omega_F^a$ is defined by (\ref{defom}). \end{enumerate} \end{lemma} \begin{proof} It is clear that the definition of an admissible set $F$ is equivalent to $\prod_{f\in F}(x-f)\ge 0$, for all $x\in \NN$. The equivalence between (1) and (2) is then an easy consequence of the definition of the measure $\rho _a^F$. We now prove the equivalence between (1) and (3). (1) $\Rightarrow$ (3). Since the measure $\rho_a^F$ is positive, the polynomials $(q_n^F)_n$ (\ref{defqnch}) are orthogonal with respect to the measure $\rho_a^F$ and have positive $L^2$-norm. According to (\ref{n2q}) in Lemma \ref{lemmc}, we have \begin{equation}\label{nssu} \langle q_n^F,q_n^F\rangle =(-1)^k\frac{n!}{(n+k)!}\langle c_n^a,c_n^a\rangle \Phi_n^F\Phi_{n+1}^F. \end{equation} We deduce then that $(-1)^k\Phi_n^F\Phi_{n+1}^F>0$ for all $n$. Using the duality (\ref{duomph}), we conclude that $\Omega _F(n)\Omega_F(n+1)>0$ for all nonnegative integers $n$. (3) $\Rightarrow$ (1). Using Lemma \ref{sze}, the duality (\ref{duomph}) and proceeding as before, we conclude that the polynomials $(q_n^F)_n$ are orthogonal with respect to $\rho_a^F$ and have positive $L^2$-norm. This implies that there exists a positive measure $\mu$ with respect to which the polynomials $(q_n^F)_n$ are orthogonal. Taking into account that the Fourier transform of $\rho_a^F$ is an entire function, using moment problem standard techniques (see, for instance, \cite{Akh}), it is not difficult to prove that $\mu$ has to be equal to $\rho _a^F$. Hence the measure $\rho_a^F$ is positive. \end{proof} In the two following Theorems we prove that for admisible sets $F$ the polynomials $c_n^F$, $n\in \sigma _F$, are orthogonal and complete with respect to a positive measure. \begin{theorem}\label{th4.4} Given a real number $a\not =0$ and a finite set $F$ of positive integers, assume that $\Omega_F^a(n)\not=0$ for all nonnegative integer $n$. Then the polynomials $c_n^{a;F}$, $n\in \sigma _F$, are orthogonal with respect to the (possibly signed) discrete measure \begin{equation}\label{mochex} \omega_{a;F}=\sum_{x=0}^\infty \frac{a^x}{x!\Omega_F^a(x)\Omega_F^a(x+1)}\delta_x. \end{equation} Moreover, for $a<0$ the measure $\omega_{a;F}$ is never positive, and for $a>0$ the measure $\omega_{a;F}$ is positive if and only if $F$ is admissible. \end{theorem} \begin{proof} Write $\Aa$ for the linear space generated by the polynomials $c_n^F$, $n\in \sigma _F$. Using Lemma \ref{tcsd}, the definition of the measure $\omega_{a;F}$ and the expressions for the difference coefficients of the operator $D_F$ (see Theorem \ref{th3.3}), it is straightforward to check that $D_F$ is symmetric with respect to the pair $(\omega_{a;F},\Aa )$. Since the polynomials $c_n^F$, $n\in \sigma_F$, are eigenfunctions of $D_F$ with different eigenvalues, Lemma \ref{lsyo} implies that they are orthogonal with respect to $\omega_{a;F}$. If $a<0$ and the measure $\omega_{a;F}$ is positive, we conclude that $\Omega_F(2n+1)\Omega_F(2n+2)<0$ for all positive integer $n$. But this would imply that $\Omega_F$ has at least a zero in each interval $(2n+1,2n+2)$, which it is impossible since $\Omega_F$ is a polynomial. If $a>0$, according to Lemma \ref{l3.1}, $F$ is admissible if and only if $\Omega_F(x)\Omega_F(x+1)>0$ for all nonnegative integer $x$. \end{proof} \begin{theorem}\label{th4.5} Let $a$ and $F$ be a positive real number and an admissible finite set of positive integers, respectively. Then the linear combinations of the polynomials $c_n^{a;F}$, $n\in \sigma _F$, are dense in $L^2(\omega_{a;F})$, where $\omega_{a;F}$ is the positive measure (\ref{mochex}). Hence $c_n^{a;F}$, $n\in \sigma _F$, are exceptional Charlier polynomials. \end{theorem} \begin{proof} Using Lemma \ref{l3.1} and taking into account that $F$ is admissible, it follows that the measure $\rho _{a}^{F}$ (\ref{mraf}) is positive. We remark that this positive measure is also determinate (that is, there is not other measure with the same moments as those of $\rho _{a}^{F}$). As we pointed out above, this can be proved using moment problem standard techniques (taking into account, for instance, that the Fourier transform of $\rho_{a}^F$ is an entire function). Since for determinate measures the polynomials are dense in the associated $L^2$ space, we deduce that the sequence $(q_n^F/\Vert q_n^F\Vert _2)_n$ (where $q_n^F$ is the polynomial defined by (\ref{defqnch})) is an orthonormal basis in $L^2(\rho_{a}^F)$. For $s\in \sigma _F$, consider the function $h_s(x)=\begin{cases} 1/\rho _{a}^{F}(s),& x=s\\ 0,& x\not =s, \end{cases}$ where by $\rho _{a}^{F}(s)$ we denote the mass of the discrete measure $\rho_a^F$ at the point $s$. Since the support of $\rho _{a}^{F}$ is $\sigma_F$, we get that $h_s\in L^2(\rho_{a}^F)$. Its Fourier coefficients with respect to the orthonormal basis $(q_n^F/\Vert q_n^F\Vert _2)_n$ are $q_n^F(s)/\Vert q_n^F\Vert _2$, $n\ge 0$. Hence \begin{equation}\label{pf1} \sum _{n=0}^\infty \frac{q_n^F(s)q_n^F(r)}{\Vert q_n^F\Vert _2 ^2}=\langle h_s,h_r\rangle _{\rho_{a}^F}=\frac{1}{\rho_{a}^F(s)}\delta_{s,r}. \end{equation} This is the dual orthogonality associated to the orthogonality $$ \sum_{u\in \sigma _F}q_n^F(u)q_m^F(u)\rho _{a}^{F}(u)=\langle q_n^F,q_n^F\rangle \delta _{n,m} $$ of the polynomials $q_n^F$, $n\ge 0$, with respect to the positive measure $\rho _{a}^{F}$ (see, for instance, \cite{At}, Appendix III, or \cite{KLS}, Th. 3.8). Using (\ref{nssu}), (\ref{norCh}) and the duality (\ref{duomph}), we get \begin{equation}\label{neq1} \frac{1}{\Vert q_n^F\Vert _2 ^2}=\omega _{a;F}(n)x_n, \end{equation} where $x_n$ is the positive number given by \begin{equation}\label{defxn} x_n=\frac{a^k}{e^a}\left(\frac{\prod_{i=0}^k (n+i)!}{a^{(k+1)n-u_F}\prod_{f\in F}f!}\right)^2 \end{equation} Using now the duality (\ref{duaqnrn}), we can rewrite (\ref{pf1}) for $n=m$ as \begin{equation}\label{mochx} \langle c_n^{a;F},c_n^{a;F}\rangle_{\omega_{a}^{F}}=\frac{a^{n-u_F-k}e^a\prod_{f\in F}(n-f-u_F)}{(n-u_F)!}. \end{equation} Consider now a function $f$ in $L^2(\omega_{a;F})$ and write $g(n)=(-1)^nf(n)/x_n^{1/2}$, where $x_n$ is the positive number given by (\ref{defxn}). Using (\ref{neq1}), we get $$ \sum_{n=0}^\infty\frac{\vert g(n)\vert ^2}{\langle q_n^F,q_n^F\rangle _{\rho_{a}^F}}=\sum_{n=0}^\infty \omega_{a;F}(n)\vert f(n)\vert ^2=\Vert f\Vert _2^2<\infty. $$ Define now $$ v_r=\sum_{n=0}^\infty\frac{g(n)q_n^F (r)}{\langle q_n^F,q_n^F\rangle _{\rho_{a}^F}}. $$ Using Theorem III.2.1 of \cite{At}, we get \begin{equation}\label{pf6} \Vert f\Vert _2^2=\sum_{n=0}^\infty\frac{\vert g(n)\vert ^2}{\langle q_n^F,q_n^F\rangle _{\rho_{a}^F}}=\sum _{r\in \sigma _F}\vert v_r\vert ^2\rho_{a}^F (r). \end{equation} On the other hand, using the duality (\ref{duaqnrn}), (\ref{neq1}), (\ref{defxn}) and (\ref{mochx}), we have $$ v_r=\frac{(-1)^r}{(\rho_{a}^F(r))^{1/2}}\sum_{n=0}^\infty f(n)\frac{c_r^{a;F}(n)}{\Vert c_r^{a;F}\Vert _2}\omega_{a;F}(n). $$ This is saying that $(-1)^r(\rho_{a}^F(r))^{1/2}v_r$, $r\in \sigma _\F$, are the Fourier coefficients of $f$ with respect to the orthonormal system $(c_n^{a;F}/\Vert c_n^{a;F}\Vert_2)_n$. Hence, the identity (\ref{pf6}) is Parseval's identity for the function $f$. From where we deduce that the orthonormal system $(c_n^{a;F}/\Vert c_n^{a;F}\Vert_2)_n$ is complete in $L^2(\omega_{a;F})$. \end{proof} \section{Constructing polynomials which are eigenfunctions of second order differential operators} One can construct exceptional Hermite polynomials by taking limit in the exceptional Charlier polynomials. We use the basic limit (\ref{blchh}). Given a finite set of positive integers $F$, using the expression (\ref{defchexa}) for the polynomials $c_n^{a;F}$, $n\in\sigma_F$, setting $x\to \sqrt {2a}x+a$ and taking limit as $a\to +\infty$, we get (up to normalization constants) the polynomials, $n\in \sigma _F$, \begin{equation}\label{defhex} H_n^F(x)=\begin{vmatrix}H_{n-u_F}(x)&H_{n-u_F}'(x)&\cdots &H_{n-u_F}^{(k)}(x)\\ H_{f_1}(x)&H_{f_1}'(x)&\cdots &H_{f_1}^{(k)}(x)\\ \vdots&\vdots&\ddots &\vdots\\ H_{f_k}(x)&H_{f_k}'(x)&\cdots &H_{f_k}^{(k)}(x) \end{vmatrix} . \end{equation} More precisely \begin{equation}\label{lim1} \lim_{a\to +\infty}\left(\frac{2}{a}\right)^{n/2}c_n^{F}(\sqrt{2a}x+a)=\frac{1}{(n-u_F)!\nu_F}H_n^F(x) \end{equation} uniformly in compact sets, where \begin{equation}\label{defnuf} \nu_F=2^{\binom{k+1}{2}}\prod_{f\in F}f!. \end{equation} Notice that $H_n^F$ is a polynomial of degree $n$ with leading coefficient equal to $$ 2^{n+\binom{k+1}{2}}V_F\prod_{f\in F}(f-n+u_F), $$ where $V_F$ is the Vandermonde determinant defined by (\ref{defvdm}). Assume now that $F$ is admissible (\ref{defadm}). According to Lemma \ref{l3.1}, this gives for all $a>0$ that $\Omega ^a _F(x)\Omega ^a _F(x+1)>0$ for $x\in \NN $, where $\Omega _F^a$ is the polynomial (\ref{defom}) associated to the Charlier family. In particular $\Omega ^a _F(x)\not =0$, for all nonnegative integer $x$. Hence, if instead of (\ref{defchexa}) we use (\ref{quschi2}), we get the following alternative expression for the polynomials $H_n^F$, $n\ge v_F$, ($i$ denotes the imaginary unit $i=\sqrt{-1}$) \begin{equation}\label{defhexa} H_n^F(x)=\gamma_n\begin{vmatrix}H_{n-v_F}(x)&-iH_{n-v_F+1}(x)&\cdots &(-i)^mH_{n-v_F+m}(x)\\ H_{g_1}(-ix)&H_{g_1}'(-ix)&\cdots &H_{g_1}^{(m)}(-ix)\\ \vdots&\vdots&\ddots &\vdots\\ H_{g_m}(-ix)&H_{g_m}'(-ix)&\cdots &H_{g_m}^{(m)}(-ix)\end{vmatrix} , \end{equation} where $\gamma_n$ is the normalization constant \begin{equation}\label{nc2} \gamma_n=i^{u_G}2^{\binom{k+1}{2}-\binom{m}{2}}\frac{V_F}{V_G}\prod_{f\in F}(f-n+u_F), \end{equation} and as in the previous sections $G$ denotes the involuted set $G=I(F)$ (see (\ref{dinv})). We introduce the associated polynomials \begin{align}\label{defhom} \Omega _F(x)&=\begin{vmatrix} H_{f_1}(x)&H_{f_1}'(x)&\cdots &H_{f_1}^{(k-1)}(x)\\ \vdots&\vdots&\ddots &\vdots\\ H_{f_k}(x)&H_{f_k}'(x)&\cdots &H_{f_k}^{(k-1)}(x) \end{vmatrix},\\\label{defhomt} \tilde \Omega _F(x)&=i^{u_G+m}\begin{vmatrix} H_{g_1}(-ix)&H_{g_1}'(-ix)&\cdots &H_{g_1}^{(m-1)}(-ix)\\ \vdots&\vdots&\ddots &\vdots\\ H_{g_m}(-ix)&H_{g_m}'(-ix)&\cdots &H_{g_m}^{(m-1)}(-ix)\end{vmatrix} . \end{align} Since $u_G+m=u_F+k$, we have that both $\Omega_F$ and $\tilde \Omega_F$ are polynomials of degree $u_F+k$. The invariant property (\ref{iza}) gives \begin{equation}\label{izah} \Omega _F(x)=2^{\binom{k}{2}-\binom{m}{2}}\frac{V_F}{V_G}\tilde\Omega _F(x). \end{equation} We also straightforwardly have \begin{equation}\label{rromh} H_{u_F}^F(x)=\frac{2^{k-s_F+1}\nu_F}{\nu_{F_\Downarrow}}\Omega_{F_\Downarrow }(x), \end{equation} where the numbers $\nu_F$ and $s_F$ are defined by (\ref{defnuf}) and (\ref{defs0}), respectively, and the finite set of integers $F_\Downarrow$ is defined by (\ref{deff1}). Proceeding in a similar way, we can transform the second order difference operator (\ref{sodochex}) in a second order differential operator with respect to which the polynomials $H_n^F$, $n\in\sigma_F$, are eigenfunctions: \begin{theorem}\label{th5.1} Let $F$ be a finite set of positive integers. Then the polynomials $H_n^F$, $n\in \sigma _F$, are common eigenfunctions of the second order differential operator \begin{equation}\label{sodohex} D_F=-\partial ^2+h_1(x)\partial+h_0(x), \end{equation} where $\partial=d/dx$ and \begin{align}\label{jph1} h_1(x)&=2\left(x+\frac{\Omega_F'(x)}{\Omega_F(x)}\right),\\\label{jph2} h_0(x)&=2\left(k+u_F-x\frac{\Omega_F'(x)}{\Omega_F(x)}\right)-\frac{\Omega_F''(x)}{\Omega_F(x)}. \end{align} More precisely $D_F(H_n^F)=2nH_n^F(x)$. \end{theorem} \begin{proof} The proof is a matter of calculation using carefully the basic limit (\ref{blchh}), hence we only sketch it. We assume that $k$ is even (the case for $k$ odd being similar). Using that $c_n^a(x+k)=\sum_{j=0}^k\binom{k}{j}c_{n-j}^a(x)$, the basic limit (\ref{blchh}) and the alternative definition (\ref{defoma}) for $\Omega _F^a$, we can get the limits \begin{align}\label{lim2} \lim_{a\to \infty}\left(\frac{2}{a}\right)^{(u_F+k)/2}\Omega ^a_F(x_a)&=\frac{2^k\Omega _F(x)}{\nu _F},\\\label{lim3} \lim_{a\to \infty}\left(\frac{2}{a}\right)^{(u_F+k-1)/2}(\Omega ^a_F(x_a+1)-\Omega ^a_F(x_a))&=\frac{2^{k-1}\Omega _F'(x)}{\nu _F},\\\nonumber \lim_{a\to \infty}\left(\frac{2}{a}\right)^{(u_F+k-2)/2}(\Omega ^a_F(x_a+1)-2\Omega ^a_F(x_a)+\Omega ^a_F(x_a-1))&=\frac{2^{k-2}\Omega _F''(x)}{\nu_F}, \end{align} where $\nu_F$ is defined by (\ref{defnuf}) and $x_a=\sqrt{2a}x+a$. Taking into account that $c_n^{a;F}(x)=\Omega_{F_n}^a(x)$, where $F_n=\{f_1,\cdots, f_k,n-u_F\}$, we can get similar limits for the polynomials $c_n^{a;F}(x)$, $n\in \sigma _F$. We next write the spectral equation $D_F^a(c_n^{a;F})=nc_n^{a;F}$ (where we write $D_F^a$ for the second order difference operator (\ref{sodochex})) in the form \begin{align*} &h_{-1}^a(x)\left[c_n^F(x+1)-2c_n^F(x)+c_n^F(x-1)\right]+(h_1^a(x)-h^a_{-1}(x))\left[c_n^F(x+1)-c_n^F(x)\right]\\&\quad\quad +(h_0^a(x)+h_1^a(x)+h^a_{-1}(x))c_n^F(x)=nc_n^F(x), \end{align*} where $h_{-1}^a, h_0^a$ and $h_1^a$ are given by (\ref{jpm1}), (\ref{jpm2}) and (\ref{jpm3}), respectively. It is then enough to set $x\to x_a$ and take carefully limit as $a\to \infty$ using (\ref{jpm1}), (\ref{jpm2}), (\ref{jpm3}) and the previous limits. \end{proof} We can factorize the second order differential operator $D_F$ as product of two first order differential operators. As a consequence the system $(D_F, (H_n^F)_{n\in \sigma_F})$ can be constructed by applying a sequence of $k$ Darboux transforms to the Hermite system. \begin{lemma} Let $F=\{f_1,\cdots ,f_k\}$ be a finite set of positive integers and write $F_{\{ k\}}=\{f_1,\cdots ,f_{k-1}\}$. We define the first order differential operators $A_F$ and $B_F$ as \begin{align} A_F&=-\frac{\Omega _F(x)}{\Omega_{F_{\{ k\}}}(x)}\partial+\frac{\Omega _F'(x)}{\Omega_{F_{\{ k\}}}(x)},\\ B_F&=\frac{\Omega _{F_{\{ k\}}}(x)}{\Omega_{F}(x)}\partial-\frac{2x\Omega_{F_{k}}+\Omega '_{F_{\{ k\}}}(x)}{\Omega_{F}(x)}. \end{align} Then $H_n^F(x)=A_F(H_{n-f_k+k}^{F_{\{ k\}}})(x)$, $n\in \sigma_F$. Moreover \begin{align*} D_{F_{\{ k\}}}&=B_FA_F+2(f_k+u_{F_{\{ k\}}})Id,\\ D_{F}&=A_FB_F+2(f_k+u_F)Id. \end{align*} \end{lemma} \begin{proof} The Lemma can be proved applying limits in Lemma \ref{lfe}, or by applying Silvester Identity (for rows $(1,k)$ and columns $(k-1,k)$) in the definition (\ref{defhex}) of the polynomials $H_n^F$, $n\in \sigma _F$. \end{proof} When $F$ is admissible, using the alternative expression (\ref{defhexa}) for the polynomials $H_n^F$, $n\in \sigma _F$, we get other factorization for the differential operator $D_F$. \begin{lemma}\label{lfh} Let $F$ be an admissible finite set of positive integers and write $F_{\Downarrow}$ for the finite set of positive integers defined by (\ref{deff1}). We define the first order differential operators $C_F$ and $E_F$ as \begin{align}\label{opch} C_F&=\frac{\tilde \Omega _F(x)}{\tilde \Omega_{F_{\Downarrow}}(x)}\partial-\frac{\tilde \Omega _F'(x)+2x\tilde\Omega _F(x)}{\tilde \Omega_{F_{\Downarrow}}(x)},\\\label{opeh} E_F&=-\frac{\tilde \Omega _{F_{\Downarrow}}(x)}{\tilde \Omega_{F}(x)}\partial+\frac{\tilde \Omega '_{F_{\Downarrow}}(x)}{\tilde \Omega_{F}(x)}. \end{align} Then $D_{F_{\Downarrow}}=E_FC_F+2(u_{F}-k-1)Id$ and $D_{F}=C_FE_F+2u_FId$. Moreover \begin{equation}\label{spmdvh} C_F(H_{n-k-1}^{F_{\Downarrow}})=\frac{-2^{m+\binom{k-s_F+2}{2}-\binom{k+1}{2}-1}\prod_{j=1}^{m-1}(g_m-g_j)}{\prod_{j=1}^{s_F-1}(j-1)!(j-n+u_F)\prod_{f\in F;f>s_F}(f-j)}H_n^F(x),\quad n\ge v_F, \end{equation} where $G=I(F)=\{g_1,\ldots , g_m\}$ and $s_F$ is defined by (\ref{defs0}). \end{lemma} \section{Exceptional Hermite polynomials} In the previous Section, we have associated to each finite set $F$ of positive integers the polynomials $H_n^F$, $n\in \sigma_F$, which are always eigenfunctions of a second order differential operator with rational coefficients. We are interested in the cases when, in addition, those polynomials are orthogonal and complete with respect to a positive measure. \begin{definition} The polynomials $H_n^F$, $n\in \sigma_F$, defined by (\ref{defhex}) are called exceptional Hermite polynomials, if they are orthogonal and complete with respect to a positive measure. \end{definition} As it was mentioned in the Introduction, simultaneously with this paper, exceptional Hermite polynomials as Wronskian determinant of Hermite polynomials have been introduced and studied (using a different approach) in \cite{GUGM}. In that paper, exceptional Hermite polynomials are defined for a given non-decreasing finite sequence of non-negative integers $\lambda=(\lambda _1,\cdots , \lambda _l)$, and are denoted by $H^{(\lambda )} _j(x)$; the degree of $H^{(\lambda )} _j(x)$ is $2\sum_{j=1}^l\lambda_j -2l+j$. The relationship between the exceptional Hermite polynomials introduced in \cite{GUGM} and the ones in this paper is the following: given a non-decreasing finite sequence of positive integers $\lambda=(\lambda _1,\cdots , \lambda _l)$, we form a finite set of positive integers $F$ as follows: $F=\{f_1,f_2,\cdots ,f_{2l-1},f_{2l}\}$, where $f_{2j-1}=\lambda_{j}+2j-2$ and $f_{2j}=\lambda_{j}+2j-1$, $j=1,\cdots , l$; then $H^{(\lambda )} _j(x)=H^F_{2\sum_{j=1}^l\lambda_j -2l+j}(x)$. The following Lemma and Theorem show that again the admissibility of $F$ will be the key to construct exceptional Hermite polynomials. \begin{lemma} Let $F$ be a finite set of positive integers. Then $F$ is admissible if and only if the Wronskian determinant $\Omega_F$ (\ref{defhom}) does not vanish in $\RR$. \end{lemma} \begin{proof} Consider a second order differential operator $T$ of the form $T=-d^2/dx^2+U$, and write $\phi_n$, $n\ge 0$, for a sequence of eigenfunctions for $T$. For a finite set of positive integers $F=\{f_1,\cdots, f_k\}$, consider the Wronskian determinant $\Omega_F^T(x)=|\phi_{f_l}^{(j-1)}(x)|_{l,j=1}^k$. For operators defined in a half-line, Krein proved \cite{Kr} that $F$ is admissible if and only if $\Omega _F^T$ does not vanish in the real line. A similar result was proved by Adler \cite{Ad} for operators defined in a bounded interval. Adler's result can easily be extended to the whole real line (in fact, he considered in \cite{Ad} the case of Wronskian determinant of Hermite polynomials). The Lemma is then an easy consequence of this result for the functions $H_n(x)e^{-x^2}$. Anyway, for the sake of completeness, we prove by passing to limit from Lemma \ref{l3.1} the implication $\Rightarrow$ in the Lemma (which it is what we need in the following Theorem). For $a>0$, consider the positive measure $\tau _a$ defined by $$ \tau _a=\frac{a^k}{e^a}\sum _{x=0}^\infty\frac{a^x(c_{u_F}^{a;F}(x))^2}{x!\Omega _F^a(x)\Omega_F^a(x+1)}\delta {y_{a,x}}, $$ where \begin{equation}\label{defya} y_{a,x}=(x-a)/\sqrt{2a}. \end{equation} We also need the following limits \begin{align}\label{lm1} \lim _{a\to +\infty}\frac{\Omega _F^a(\sqrt {2a}x+a)}{a^{(k+u_F)/2}}&=\frac{2^{(k-u_F)/2}\Omega _F(x)}{\nu_F},\\\label{lm11} \lim _{a\to +\infty}\frac{\Omega _F^a(\sqrt {2a}x+a+1)}{a^{(k+u_F)/2}}&=\frac{2^{(k-u_F)/2}\Omega _F(x)}{\nu_F},\\\label{lm2} \lim _{a\to +\infty}\frac{c_{u_F}^{a;F}(\sqrt {2a}x+a)}{a^{u_F/2}}&=\frac{2^{k-u_F/2-s_F+1}\Omega _{F_{\Downarrow}}(x)}{\nu_{F_{\Downarrow}}},\\\label{lm3} \lim _{a\to +\infty}\frac{\sqrt{2a}a^{\sqrt {2a}x+a}}{e^a\Gamma (\sqrt {2a}x+a+1)}&=e^{-x^2}/\sqrt \pi, \end{align} uniformly in compact sets. The first limit is (\ref{lim2}). The second one is a consequence of (\ref{lim3}). The third one is a consequence of (\ref{lim1}) and (\ref{rromh}). The forth one is consequence of Stirling's formula. We proceed by complete induction on $s=\max F$. Since $F$ is admissible, the first case to be considered is $s=2$ which it corresponds with $F=\{1,2\}$. Then $\Omega _F (x)=8x^2+4$ which it clearly satisfies $\Omega _F(x)\not =0$, $x\in \RR $. Asume that $\Omega _F(x)\not =0$, $x\in \RR $, if $\max F\le s$ and take an admissible set $F$ with $\max F=s+1$. The definition of $F_{\Downarrow }$ (\ref{deff1}) then says that $\max F_{\Downarrow }\le s$. The induction hypothesis then implies that $\Omega _{F_{\Downarrow }}(x)\not =0$, $x\in \RR $. We now proceed by \textsl{reductio ad absurdum}. Hence, we assume that the polynomial $\Omega _F$ vanishes in $\RR$. Write $x_0=\max \{x\in \RR: \Omega _F(x)=0\}$. Take real numbers $u,v$ with $x_0<u<v$ and write $I=[u,v]$. Since $\Omega_F(x)\not =0$, $x\in I$, applying Hurwitz's Theorem to the limits (\ref{lm1}) and (\ref{lm11}) we can choice a contable set $X=\{a_n: n\in \NN \}$ of positive numbers with $\lim_n a_n=+\infty$ such that $\Omega _F^a(\sqrt {2a}x+a)\Omega _F^a(\sqrt {2a}x+a+1)\not =0$, $x\in I$ and $a\in X$. Hence, we can combine the limits (\ref{lm1}), (\ref{lm11}), (\ref{lm2}) and (\ref{lm3}) to get \begin{equation}\label{lm4} \lim _{a\to +\infty;a\in X}h_a(x)=d_3h(x),\quad \mbox{uniformly in $I$}, \end{equation} where \begin{align*} h_a(x)&=\frac{a^k\sqrt{2a}a^{\sqrt {2a}x+a}(c_{u_F}^{a;F}(\sqrt {2a}x+a))^2}{e^a\Gamma (\sqrt {2a}x+a+1)\Omega_F^a (\sqrt {2a}x+a)\Omega_F^a (\sqrt {2a}x+a+1)},\\ h(x)&=\frac{e^{-x^2}\Omega ^2_{F_{\Downarrow }}(x)}{\Omega ^2_F(x)}, \end{align*} and $d_3=2^{k-2s_F+2}\nu_F^2/(\sqrt \pi \nu_{F_{\Downarrow }}^2)$. We now prove that \begin{equation}\label{lm5} \lim _{a\to +\infty ;a\in X}\tau _a(I)=d_3\int_{I}h(x)dx. \end{equation} To do that, write $I_a=\{ x\in \NN: a+u\sqrt{2a}\le x\le a+v\sqrt{2a}\}$. The numbers $y_{a,x}$, $x\in I_a$, form a partition of the interval $I$ with $y_{a,x+1}-y_{a,x}=1/\sqrt{2a}$ (see (\ref{defya})). Since the function $h$ is continuous in the interval $I$, we get that $$ \int_{I}h(x)dx=\lim_{a\to +\infty; a\in X}S_a, $$ where $S_a$ is the Cauchy sum $$ S_a=\sum_{x\in I_a}h(y_{a,x})(y_{a,x+1}-y_{a,x}). $$ On the other hand, since $x\in I_a$ if and only if $u\le y_{a,x}\le v$ (\ref{defya}), we get \begin{align*} \tau _a(I)&=\frac{a^k}{e^a}\sum _{x\in I_a}\frac{a^x(c_{u_F}^F(x))^2}{x!\Omega _F^a(x)\Omega_F^a(x+1)}=\frac{1}{\sqrt {2a}}\sum _{x\in I_a}h_a(y_{a,x})\\ &=\sum _{x\in I_a}h_a(y_{a,x})(y_{a,x+1}-y_{a,x}). \end{align*} The limit (\ref{lm5}) now follows from the uniform limit (\ref{lm4}). The identity (\ref{mochx}) for $n=u_F$ says that $\tau _a(\RR)=d_F$, where the positive constant $d_F=\prod_{f\in F}f$ does not depend on $a$. This gives $\tau _a(I)\le d_F$. And so from the limit (\ref{lm5}) we get $$ \int_{I}h(x)dx \le \frac{d_F}{d_3}. $$ That is $$ \int _u^v\frac{e^{-x^2}\Omega ^2_{F_{\Downarrow }}(x)}{\Omega ^2_F(x)}dx\le \frac{d_F}{d_3}. $$ On the other hand, since $\Omega_F(x_0)=0$ and $\Omega _{F_{\Downarrow }}(x)\not =0$, $x\in \RR$, we get $$ \lim _{u\to x_0^+}\int _u^v\frac{e^{-x^2}\Omega ^2_{F_{\Downarrow }}(x)}{\Omega ^2_F(x)}dx=\infty. $$ Which it is a contradiction. \end{proof} \begin{corollary} Given an admissible finite set $F$ of positive integers, we have for $n\in \sigma _F$, \begin{equation}\label{mohx} \langle H_n^{F},H_n^{F}\rangle_{\omega _{F}}=\sqrt \pi 2^{n-u_F+k}(n-u_F)!\prod_{f\in F}(n-f-u_F). \end{equation} \end{corollary} \begin{proof} The proof is similar to that of the previous Theorem (using (\ref{mochx})) and it is omitted. \end{proof} \begin{theorem}\label{th6.3} Let $F$ be an admissible finite set of positive integers. Then the polynomials $H_n^F$, $n\in \sigma _F$, are orthogonal with respect to the positive weight \begin{equation}\label{mohex} \omega_F(x)=\frac{e^{-x^2}}{\Omega ^2_F(x)},\quad x\in \RR, \end{equation} and their linear combinations are dense in $L^2(\omega_{F})$. Hence $H_n^F$, $n\in \sigma _F$, are exceptional Hermite polynomials. \end{theorem} \begin{proof} Write $\Aa _F$ for the linear space generated by the polynomials $H_n^F$, $n\in \sigma _F$. Using Lemma \ref{tcsd2}, it is easy to check that the second order differential operator $D_F$ (\ref{sodohex}) is symmetric with respect to the pair $(\omega _F, \Aa _F)$ (\ref{mohex}). Since the polynomials $H_n^F$, $n\in \sigma _F$, are eigenfunctions of $D_F$ with different eigenvalues Lemma \ref{lsyo} implies that they are orthogonal with respect to $\omega _F$. The completeness of $H_n^F$, $n\in \sigma _F$, in $L^2(\omega_{F})$ can be proved in a similar way to that of Proposition 5.8 in \cite{GUGM} and it is omitted. \end{proof} \noindent \textit{Acknowledgement} The author would like to thank to two anonymous referees for their comments and suggestions. \noindent \textit{Mathematics Subject Classification: 42C05, 33C45, 33E30} \noindent \textit{Key words and phrases}: Orthogonal polynomials. Exceptional orthogonal polynomial. Difference operators. Differential operators. Charlier polynomials. Hermite polynomials. \end{document}
\begin{document} \title{Lattices from graph associahedra and subalgebras of the Malvenuto-Reutenauer algebra} \author{Emily Barnard, Thomas McConville} \maketitle \begin{abstract} The Malvenuto-Reutenauer algebra is a well-studied combinatorial Hopf algebra with a basis indexed by permutations. This algebra contains a wide variety of interesting sub Hopf algebras, in particular the Hopf algebra of plane binary trees introduced by Loday and Ronco. We compare two general constructions of subalgebras of the Malvenuto-Reutenauer algebra, both of which include the Loday-Ronco algebra. The first is a construction by Reading defined in terms of lattice quotients of the weak order, and the second is a construction by Ronco in terms of graph associahedra. To make this comparison, we consider a natural partial ordering on the maximal tubings of a graph and characterize those graphs for which this poset is a lattice quotient of the weak order. \end{abstract} \setcounter{tocdepth}{2} \tableofcontents \section{Introduction} Given a graph $G$, Postnikov defined a graph associahedron $P_G$ as an example of a \emph{generalized permutohedron}, a polytope whose normal fan coarsens the braid arrangement \cite{postnikov:2009permutohedra}. Graph associahedra were also introduced independently in \cite{carr.devadoss:2006coxeter} and \cite{davis.janus.scott:2003fundamental}. Some significant examples of graph associahedra include the associahedron, the cyclohedron, and the permutohedron. Combinatorially, the faces of the graph associahedron correspond to certain collections of connected subgraphs of $G$, called \emph{tubings}. We recall these definitions in Section~\ref{sec:tubing}. We consider a poset $L_G$ on the maximal tubings of $G$ whose Hasse diagram is an orientation of the $1$-skeleton of the graph associahedron. In \cite{ronco:2012tamari}, Ronco defined a binary operation on a vector space generated by the tubings of an ``admissible'' family of graphs $\Gcal$, which gives this space the structure of an associative algebra. We call this algebra a \emph{tubing algebra}; see Section~\ref{subsec_hopf_algebra}. In particular, when $\Gcal$ is the set of complete graphs $K_n$ or path graphs $P_n$, the tubing algebra is isomorphic to either the Malvenuto-Reutenauer algebra on permutations \cite{malvenuto.reutenauer:1995duality} or the Loday-Ronco algebra on binary trees \cite{loday.ronco:1998hopf}, respectively. The interpretation of these algebras in terms of tubings was given previously in \cite{forcey.springfield:2010geometric}. In Section~\ref{subsec_tubing_coalgebra}, we introduce the notion of a ``restriction-compatible'' family of graphs. Such families come with a comultiplication on their maximal tubings. We call the resulting coalgebra a \emph{tubing coalgebra}. Reading introduced a general technique to construct subalgebras of the Malvenuto-Reutenauer algebra using lattice quotients of the weak order on permutations in \cite{reading:2005lattice}. Using the terminology of \cite{reading:2005lattice}, if a sequence of lattice congruences $\{\Theta_n\}_{n\geq 0}$ is translational (respectively, insertional), then the set of congruence classes of $\mathfrak{S}_n$ modulo $\Theta_n$ naturally index a basis of a subalgebra (respectively, sub-coalgebra) of the Malvenuto-Reutenauer algebra. The main goal of this work is to compare the above constructions of Reading and Ronco. For any graph $G$ with vertex set $[n]$, there is a canonical surjective map ${\Psi_G:\mathfrak{S}_n\ra L_G}$ obtained by coarsening the braid arrangement in $\Rbb^n$ to the normal fan of~$P_G$. Our first main result characterizes graphs for which the map $\Psi_G$ is a lattice map. We say a graph $G$ is \emph{filled} if for each edge $\{i,k\}$ in $G$, there are edges $\{i,j\}$ and $\{j,k\}$ in $G$ whenever $i<j<k$. \begin{theorem}\label{thm_main_lattice} The map $\Psi_G$ is a lattice quotient map if and only if $G$ is filled. \end{theorem} Restricting attention to filled graphs, we have the following comparison between the constructions of Reading and Ronco. \begin{theorem}\label{thm_main} Let $\Gcal=\{G_n\}_{n\geq 0}$ be a sequence of filled graphs, and let $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ be the associated sequence of lattice congruences of the weak order. \begin{enumerate} \item\label{thm_main_1} The family $\Gcal$ is admissible if and only if $\mathbf{\Theta}$ is translational. \item\label{thm_main_2} The family $\Gcal$ is restriction-compatible if and only if $\mathbf{\Theta}$ is insertional. \end{enumerate} \end{theorem} In \cite{forcey:2012species}, Forcey posed the problem of determining whether $L_G$ is a lattice for any graph $G$. This turns out to be false in general; cf. Section~\ref{subsec:tubing_lattice}. We say a graph $G$ on $[n]$ is \emph{right-filled} if whenever $\{i,k\}$ is an edge, so is $\{j,k\}$ for $i<j<k$. Dually, we say $G$ is \emph{left-filled} if $\{i,j\}$ is an edge whenever there is an edge $\{i,k\}$ for $i<j<k$. We prove that $L_G$ is a lattice whenever $G$ is either left-filled or right-filled. More precisely, these are the cases when $L_G$ is a semilattice quotient of the weak order. For other graphs, the poset $L_G$ may still be a lattice, even if it is not a semilattice quotient of the weak order. Some additional examples and conjectures are discussed in Section~\ref{sec:other}. The rest of the paper is organized as follows. We introduce the poset of maximal tubings $L_G$ in Section~\ref{sec:tubing}. The main result in this section is Theorem~\ref{thm:NRC}, which states that $L_G$ has the \emph{non-revisiting chain property}, defined in Section~\ref{subsec:NRC}. In Section~\ref{sec:lattice}, we recall the congruence-uniform lattice structure of the weak order on permutations and elaborate on the canonical map from permutations to maximal tubings. Sections~\ref{sec_lattice} and~\ref{sec_hopf} are devoted to proving Theorems~\ref{thm_main_lattice} and~\ref{thm_main}, respectively. We end the paper with some open problems and conjectures in Section~\ref{sec:other}. \section{Poset of maximal tubings}\label{sec:tubing} \subsection{Tubings and $G$-trees} In this section, we recall the principal combinatorial objects in this paper, namely the maximal tubings of a graph and $G$-trees. Let $G=(V,E)$ be a simple graph with vertex set $V=[n]:=\{1,\ldots,n\}$. If $I\subseteq V$, we let $G|_I$ denote the induced subgraph of $G$ with vertex set $I$. A \emph{tube} is a nonempty subset $I$ of vertices such that the induced subgraph $G|_I$ is connected. Any tube not equal to $V$ is called a \emph{proper tube}. We let $\Ical(G)$ be the set of all tubes of $G$. We define the \emph{deletion} $G\setm I$ to be the graph $G|_{V\setm I}$ and the \emph{contraction} (or \emph{reconnected complement}) $G/I$ as the graph with vertex set $V\setm I$ and edges $\{i,j\},\ (i\neq j)$ if either $\{i,j\}\in E(G)$ or there exists a tube $J$ of $G|_I$ such that $\{i,k\}\in E(G)$ and $\{j,l\}\in E(G)$ for some $k,l\in J$. Note that we define deletion and contraction on sets of vertices of $G$ rather than on edges as it is done for graphic matroids. Furthermore, the contracted graph $G/I$ is always simple, i.e. it has no loops or parallel edges. Two tubes $I, J$ are said to be \emph{compatible} if either \begin{itemize} \item they are \emph{nested}: $I\subseteq J$ or $J\subseteq I$, or \item they are \emph{separated}: $I\cup J$ is not a tube. \end{itemize} A \emph{tubing} $\Xcal$ of $G$ is any collection of pairwise compatible tubes. The collection $\Xcal$ is said to be a \emph{maximal tubing} if it is maximal by inclusion. We let $\MTub(G)$ be the set of maximal tubings of the graph $G$. If $\Xcal$ is a tubing of $G$ and $X_1,\ldots,X_r\in\Xcal$ are pairwise disjoint, then the union $I=X_1\cup\cdots\cup X_r$ is called an \emph{ideal} of $\Xcal$. This terminology may be explained by the connection to $G$-trees given later in this section. \begin{lemma} If $\Xcal$ is a tubing of $G$ with an ideal $I$ then there is a unique collection $X_1,\ldots, X_r$ of pairwise disjoint tubes in $\Xcal$, namely the connected components of $G|_I$, such that $I=X_1\cup\cdots\cup X_r$. \end{lemma} Tubings of $G$ may be restricted to certain induced subgraphs or contracted graphs as follows. If $I$ is a subset of $[n]$, let $\Comp(I)$ be the set of maximal tubes of $G|_I$; i.e., $J\in\Comp(I)$ if $J\subseteq I$ and $G|_J$ is a connected component of $G|_I$. If $\Xcal$ is a tubing of $G$, we set $$\Xcal|_I:=\bigcup_{J\in\Xcal}\Comp(I\cap J).$$ \begin{lemma}\label{lem:tubing_restriction} Let $\Xcal$ is a tubing of $G$ and $I\subseteq [n]$. The collection $\Xcal|_I$ is a tubing of $G|_I$. If $\Xcal$ is maximal then so is $\Xcal|_I$. \end{lemma} Lemma~\ref{lem:tubing_restriction} can be deduced from a cellular map between different graph associahedra; see \cite[Definition 3.4]{forcey.springfield:2010geometric}. This map is a generalized form of the \emph{Tonks projection}, one of the standard maps from the faces of the permutahedron to the faces of the associahedron. When $I$ is an ideal of $\Xcal$ we set $$\Xcal/I:=\{J\setm I:\ J\in\Xcal,\ J\nsubseteq I\}.$$ \begin{lemma}\label{lem:tubing_del_con} Let $\Xcal$ is a tubing of $G$ with an ideal $I$. The collection $\Xcal/I$ is a tubing of~$G/I$. If $\Xcal$ is maximal then so is $\Xcal/I$. \end{lemma} Any maximal tubing $\Xcal$ contains exactly $n$ tubes. Indeed, we have the following bijection between $\Xcal$ and $[n]$. \begin{lemma} If $\Xcal$ is a maximal tubing, then each tube $I$ contains a unique element $\operatorname{top}T_{\Xcal}(I)\in [n]$ not contained in any proper tube of $\Xcal|_I$. Furthermore, the function $\operatorname{top}T_{\Xcal}$ is a bijection between the tubes in $\Xcal$ and the vertex set $[n]$. \end{lemma} \begin{proof} It is straight forward to check that $\operatorname{top}T_\Xcal(I)$ is well-defined for each tube $I\in \Xcal$. Let $k\in [n]$ and let $\Ical$ be the set of tubes in $\Xcal$ which contain $k$. Observe that $\Ical$ is not empty (because the connected component of $G$ containing $k$ is a tube in $\Xcal$.) Because each of the tubes in $\Ical$ are nested, there is a smallest tube $I\in \Ical$ (under containment) which contains $k$. For this tube, we have $\operatorname{top}T_\Xcal(I) =k$. It follows that if $\operatorname{top}T_\Xcal(I)=\operatorname{top}T_\Xcal(J)=k$ then $I=J$. Therefore $\operatorname{top}T_\Xcal$ is indeed a bijection. \end{proof} \begin{figure} \caption{\label{fig:tubing_tree} \label{fig:tubing_tree} \end{figure} Let $T$ be a forest poset on $[n]$. That is, each connected component of $T$ is a rooted tree, and $i<_T k$ whenever $i$ and $k$ belong to the same connected component, and the unique path from $i$ to the root of this component passes through $k$. Let $i_\downarrow$ denote the principal order ideal generated by $i$ in $T$. We say that $T$ is a \emph{$G$-forest}, or \emph{$G$-tree} when $T$ is connected, if it satisfies both of the following conditions (see also \cite[Definition~8.1]{postnikov.reiner.williams:2008faces}): \begin{itemize} \item For each $i\in [n]$, the set $i_\downarrow$ is a tube of $G$; \item If $i$ and $k$ are incomparable in $T$, then $i_\downarrow \cup k_\downarrow$ is not tube of $G$. \end{itemize} Given a $G$-forest $T$, observe that the collection $\chi(T)=\{i_\downarrow: i\in [n]\}$ is a maximal tubing on $G$. Indeed, consider $I=i_\downarrow$ and $J=k_\downarrow$ for any pair $i$ and $k$ in $[n]$. If $i$ and $k$ are not comparable, then it is immediate that $I$ and $J$ are compatible (because $I\cup J$ is not a tube). On the other hand, if $i$ and $k$ are comparable, then either $I\subset J$ or $J\subset I$. The following theorem is essentially \cite[Proposition~8.2]{postnikov.reiner.williams:2008faces}, specialized to the case where the building set $\Bcal$ is the collection of tubes of~$G$. An example of this correspondence is shown in Figure~\ref{fig:tubing_tree}. \begin{theorem}\label{G-trees} Let $G$ be a graph with vertex set $[n]$. Then the map $\chi$ which sends $T\mapsto \{i_\downarrow: i\in [n]\}$ is a bijection from the set of $G$-forests to the set of maximal tubings on $G$. The inverse to $\chi$, which we denote by $\tau$ maps the maximal tubing $\Xcal$ to a tree-poset $T$ satisfying: $\operatorname{top}_\Xcal(I)<\operatorname{top}_\Xcal(J)$ if and only if $I\subset J$, where $I$ and $J$ are tubes in $\Xcal$. \end{theorem} It follows that $G$ is connected if and only if each $G$-forest is actually a $G$-tree. \subsection{Graph associahedra}\label{subsec:graph_assoc} Before defining the graph associahedron, the main polytopes discussed in this paper, we recall the definition of the normal fan of a polytope. A \emph{(polyhedral) fan} $\Ncal$ is a set of cones in $\Rbb^n$ such that for any two elements $C,C^{\pr}\in\Ncal$, their intersection $C\cap C^{\pr}$ is in $\Ncal$ and it is a face of both $C$ and $C^{\pr}$. It is \emph{complete} if $\bigcup_{C\in\Ncal} C=\Rbb^n$ and \emph{pointed} if $\{0\}\in\Ncal$. A pointed fan $\Ncal$ is \emph{simplicial} if the number of extreme rays of each $C\in\Ncal$ is equal to its dimension. We consider a simplicial fan to be a type of ``realization'' of a simplicial complex; more accurately, it is a cone over a geometric realization. For a polytope $P\subseteq\Rbb^n$ and $f\in(\Rbb^n)^*$ in the dual space, we let $P^f$ be the subset of $P$ at which $f$ achieves its maximum value. We consider an equivalence relation on $(\Rbb^n)^*$ where $f\sim g$ if $P^f=P^g$. It is not hard to show that each equivalence class is a relatively open polyhedral cone. The \emph{normal fan} of $P$ is the set of closures of these cones, which forms a complete polyhedral fan. A polytope is simple if and only if its normal fan is simplicial. The set of tubings of a graph forms a flag simplicial complex $\Delta_G$, called the \emph{nested set complex}. A set $W$ consisting of the vertices of a connected component of $G$ is a tube that is compatible with every other tube, so it is a cone point in $\Delta_G$. The nested set complex is sometimes defined with these cone points removed since this subcomplex is a simplicial sphere. For our purposes, however, it will be convenient to consider the maximal tubes as part of every maximal tubing of $G$. The nested set complex may be realized as a simplicial fan, which is the normal fan $\Ncal_G$ of a polytope $P_G$ known as the graph associahedron \cite[Theorem 2.6]{carr.devadoss:2006coxeter}, \cite[Theorem 3.14]{feichtner.sturmfels:2005matroid}, \cite[Theorem 7.4]{postnikov:2009permutohedra}. We recall Postnikov's construction below. For polytopes $P,Q\subseteq\Rbb^n$, their \emph{Minkowski sum} $P+Q$ is the polytope $$P+Q=\{\mathbf{x}+\mathbf{y}\ |\ \mathbf{x}\in P,\ \mathbf{y}\in Q\}.$$ The normal fan of $P$ is a coarsening of the normal fan of $P+Q$ \cite[Proposition~7.12]{ZieglerGu}. Let $\mathbf{e}_1,\ldots,\mathbf{e}_n$ be the standard basis vectors in $\Rbb^n$. Given $I\subseteq[n]$, let $\Delta_I$ be the simplex with vertices $\{\mathbf{e}_i\ |\ i\in I\}$. The \emph{graph associahedron} $P_G$ is the Minkowski sum of simplices $\Delta_I$ over all tubes $I$ of $G$; that is, $$P_G=\sum\Delta_I=\left\{\sum \mathbf{x}_I\ |\ (\mathbf{x}_I\in\Delta_I:\ I\ \mbox{is a tube})\right\}.$$ \begin{figure} \caption{The graph associahedron for the graph with edge set $E=\{\{1,3\} \end{figure} Proofs that the face lattice of $P_G$ coincides with the nested set complex are given in \cite{feichtner.sturmfels:2005matroid} and \cite{postnikov:2009permutohedra}. We recall the correspondence between maximal tubings and vertices, which will be most important for our purposes. See \cite[Proposition~7.9]{postnikov:2009permutohedra}. Recall that the notation $i_{\downarrow}$ refers to the principal order ideal generated by $i$ in a $G$-tree. For a maximal tubing $\Xcal$, we interpret $i_{\downarrow}$ as the smallest tube in $\Xcal$ that contains the element $i$. \begin{lemma}\label{polytope_poset} If $\Xcal$ is any maximal tubing, the point $\mathbf{v}^\Xcal=(v_1,\ldots,v_n)$ is a vertex of $P_G$ where $v_i$ is the number of tubes $I$ such that $i\in I$ and $I\subseteq i_{\downarrow}$. Conversely, every vertex of $P_G$ comes from a maximal tubing in this way. \end{lemma} Before we give the proof of Lemma~\ref{polytope_poset}, we need the following easy lemma. \begin{lemma}\label{poset_polytope_helper} Let $\Xcal$ be a tubing of $G$ and let $w_1\ldots w_n$ a permutation on $[n]$ such that $\{w_1,\ldots, w_j\}$ is an ideal of $\Xcal$ for each $j\in[n]$. Suppose that $i=w_j$ for some $j\in[n]$, and write the ideal $\{w_1,\ldots, w_j\}$ as a disjoint union of tubes $X_1\cup \cdots \cup X_r$. Then $i_\downarrow=X_l$ for some $l\in [r]$. \end{lemma} \begin{proof} Since $i_\downarrow$ is the smallest tube in $\Xcal$ containing $i$ there is a unique $l\in [r]$ such that $i_\downarrow \subseteq X_l$. Assume that $i_\downarrow$ is a proper subset of $X_l$, and choose $k\in X_l\setminus i_\downarrow$ such that $i_\downarrow\cup \{k\}$ is a tube. (This is possible because $X_l$ is a tube; that is, $G|_{X_l}$ is connected.) Since $k\in \{w_1,\ldots, w_j\}$ (and clearly $k\ne i$), there is some $p<j$ such that $w_p=k$. Now consider the tube $k_\downarrow\subseteq \{w_1,\ldots, w_p\}$. Observe that $i_\downarrow\not\subseteq k_\downarrow$ because $i\not \in k_\downarrow$. Also $k_\downarrow\not\subseteq i_\downarrow$ because $k\notin i_\downarrow$. But $k_\downarrow \cup i_\downarrow$ is a tube (since $\{k\}\cup i_\downarrow$ is a tube), and that is a contradiction. The statement follows. \end{proof} \begin{proof}[Proof of Lemma~\ref{polytope_poset}] By definition, a point $\mathbf{v}\in P_G$ is a vertex if there exists a linear functional $f:\Rbb^n\ra\Rbb$ such that $\mathbf{v}$ is the unique point in $P_G$ at which $f$ achieves its maximum value. We let $P_G^f$ denote this vertex. The key observation is that if $P_G=\sum\Delta_I$ is the decomposition of the graph associahedron $P_G$ as a Minkowski sum of simplices, then $P_G^f=\sum\Delta_I^f$. If $f$ is any linear functional such that $f(\mathbf{e}_i)\neq f(\mathbf{e}_j)$ for all $i\neq j$, then $f$ is maximized at a unique vertex of the simplex $\Delta_I$ for any nonempty $I\subseteq[n]$. Namely, if $w=w_1\cdots w_n$ is the permutation of $[n]$ such that $f(\mathbf{e}_{w_1})<\cdots <f(\mathbf{e}_{w_n})$, then $\Delta_I^f=\mathbf{e}_{w_k}$ where $k$ is the maximum index such that $w_k\in I$. Now let $\Xcal$ be a maximal tubing, and let $\mathbf{v}=\mathbf{v}^{\Xcal}$ be defined as above. Let $w=w_1\cdots w_n$ be a permutation such that $\{w_1,\ldots,w_j\}$ is an ideal of $\Xcal$ for all $j$. (Such a permutation exists. For example, take any linear extension of the $G$-tree corresponding to $\Xcal$.) Set $$f(x_1,\ldots,x_n)=x_{w_1}+2x_{w_2}+\cdots+nx_{w_n}.$$ We claim that $P_G^f=\mathbf{v}$. Let $I$ be a tube (not necessarily in $\Xcal$), and let $i\in I$. To verify the claim, we will show that $f|_{\Delta_I}$ is maximized at the vertex $\mathbf{e}_i$ if and only if $\Delta_I$ contributes $\mathbf{e}_i$ to $\mathbf{v}$. That is, $f|_{\Delta_I}$ is maximized at the vertex $\mathbf{e}_i$ if and only if $I \subseteq i_\downarrow$. Suppose that $i=w_j$ in the permutation~$w$. Observe that $f|_{\Delta_I}$ is maximized at $\mathbf{e}_i$ if and only if $I\subseteq \{w_1,w_2,\ldots, w_j\}$. Write the ideal $\{w_1,w_2,\ldots, w_j\}$ as a disjoint union $X_1\cup X_2\cup\cdots \cup X_r$ of tubes in $\Xcal$. By Lemma~\ref{poset_polytope_helper}, $i_\downarrow=X_l$ for some $l$. If $I\subseteq X_1\cup X_2\cup \cdots \cup X_r$ then $I \subseteq X_l$ because $i\in I$ and $I$ is a tube. Clearly, if $I\subseteq i_\downarrow =X_l$ then $I \subseteq X_1\cup\cdots \cup X_r$. We have proved the claim that $P_G^f=\mathbf{v}$. Next, we prove that every vertex of $P_G$ is of the form $\mathbf{v}^{\Xcal}$ for some $\Xcal$. Let $w$ be a permutation and $f$ any linear functional such that $f(\mathbf{e}_{w_1})<\cdots<f(\mathbf{e}_{w_n})$. If there exists some maximal tubing $\Xcal$ such that $\{w_1,\ldots,w_j\}$ is an ideal of $\Xcal$ for all $j$, then we know that $P_G^f=\mathbf{v}^{\Xcal}$. Indeed, one can define a tubing $\Xcal=\{X_1,\ldots,X_n\}$ where $X_j$ is the largest tube in the subset $\{w_1,\ldots,w_j\}$ containing $w_j$. (That is, $X_j$ is the set of vertices of the connected component of $G|_{ \{w_1,\ldots,w_j\}}$ containing $w_j$.) It is clear that $\Xcal$ has the desired property. \end{proof} If $I$ is any tube of $G$, then the subcomplex of tubings containing $I$ is isomorphic to the product of nested set complexes $\Delta_{G|_I}\times\Delta_{G/I}$. By induction, we may deduce that any face of $P_G$ is isomorphic to a product of graph associahedra. When $G$ is a complete graph, the polytope $P_G$ is the ``standard'' permutahedron, and its normal fan $\Ncal_G$ is the set of cones defined by the braid arrangement. For a general graph $G$, the polytope $P_G$ is a Minkowski summand of the standard permutahedron, so its normal fan is coarser than that defined by the braid arrangement. Besides the usual ordering of tubings by inclusion, there is an alternate partial order introduced by Forcey \cite{forcey:2012species} and Ronco \cite{ronco:2012tamari}. We describe the restriction of their poset to $\MTub(G)$. Suppose that $I$ is a non-maximal tube in $\Xcal$. Since $P_G$ is a simple polytope whose face lattice is dual to $\Delta_G$, there exists a unique tube $J$ distinct from $I$ such that $\Ycal=\Xcal\setm\{I\}\cup\{J\}$ is a maximal tubing of $G$. Define a \emph{flip} as the relation $\Xcal\ra \Ycal$ if $\operatorname{top}T_\Xcal(I)<\operatorname{top}T_{\Ycal}(J)$. We say $\Xcal\leq \Ycal$ holds if there exists a sequence of flips of maximal tubings of the form $\Xcal\ra\cdots\ra \Ycal$. \begin{figure} \caption{\label{fig:L_ex} \label{fig:L_ex} \end{figure} \begin{lemma}\label{lem_poset} The set $\MTub(G)$ is partially ordered by the relation $\leq$. \end{lemma} \begin{proof} The edges of the graph associahedron $P_G$ take the following form. Let $\Xcal$ and $\Ycal$ be maximal tubings of $G$ such that $\Ycal=\Xcal\setm\{I\}\cup\{J\}$ for some distinct tubes $I,J$. Set $i=\operatorname{top}T_\Xcal(I)$ and $j=\operatorname{top}T_{\Ycal}(J)$. Then the vertices $\mathbf{v}^{\Xcal}$ and $\mathbf{v}^{\Ycal}$ agree on every coordinate except the $i^{th}$ and $j^{th}$ coordinates. Indeed, $\mathbf{v}^{\Ycal}-\mathbf{v}^{\Xcal}=\lambda(\mathbf{e}_i-\mathbf{e}_j)$ where $\lambda$ is equal to the number of tubes of $G$ contained in $I\cup J$ that contain both $i$ and $j$. Let $\lambda:\Rbb^n\ra\Rbb$ such that $\lambda(x_1,\ldots,x_n)=nx_1+(n-1)x_2+\cdots+x_n$. If $\Xcal$ and $\Ycal$ are as above and $i<j$, then $\lambda(\mathbf{v}^{\Ycal}-\mathbf{v}^{\Xcal})>0$. Hence, the relation $\Xcal\ra \Ycal$ on maximal tubings is induced by the linear functional $\lambda$. Consequently, the relation is acyclic, so its transitive closure is a partial order. \end{proof} We let $(L_G,\leq)$ denote the poset on $\MTub(G)$ defined above. An example of the poset $L_G$ for the graph $G$ with vertex set $V=[3]$ and edge set $E=\{\{1,3\},\{2,3\}\}$ is given in Figure~\ref{fig:L_ex}. The figure demonstrates that $L_G$ is the transitive, reflexive closure of an orientation of the 1-skeleton of the graph associahedron $P_G$. \begin{remark} The proof of Lemma~\ref{lem_poset} identifies the poset of maximal tubings with a poset on the 1-skeleton of the polytope $P_G$ oriented by a linear functional. This type of construction of a poset on the vertices of a polytope appears frequently in the literature, e.g. in the shellability of polytopes \cite{bruggesser.mani:1972shellable}, the complexity of the simplex method \cite{kalai:1997linear}, and the generalized Baues problem \cite{bjorner:1992essential}, among others. One may choose to orient the edges of $P_G$ by some other generic linear functional $\lambda^{\pr}$, giving some new partial order $L$ on the vertices of $P_G$. Letting $w=w_1\cdots w_n$ be the permutation such that $\lambda^{\pr}(\mathbf{e}_{w_1})>\cdots >\lambda^{\pr}(\mathbf{e}_{w_n})$, it is easy to see that $L\cong L_{G^{\pr}}$ where $G^{\pr}$ is the graph obtained by relabeling vertex $w_i$ in $G$ by $i$ for all $i\in[n]$. Hence, by considering the class of posets $L_G$, we are considering all posets on the vertices of a graph associahedron induced by a generic linear functional. \end{remark} \subsection{Properties of the poset of maximal tubings}\label{subsec_properties} In this section, we cover some basic properties of $L_G$ that hold for any graph $G$. If $H$ is a graph with $V(H)\subseteq\Nbb$, the \emph{standardization} $\std(H)$ is the same graph with vertex set $V(\std(H))=[n],\ n=|V(H)|$ such that the vertices of $\std(H)$ have the same relative order as in $H$. That is, there is a graph isomorphism $\phi:H\ra\std(H)$ such that if $i,j\in V(H),\ i<j$ then $\phi(i)<\phi(j)$. \begin{lemma}\label{lem_decomposition} Let $G$ be a graph, $I\subseteq V(G)=[n]$ such that $G$ does not have any edge $\{i,j\}$ with $i\in I$ and $j\in[n]\setm I$. Then $$L_G\cong L_{\std(G|_I)}\times L_{\std(G|_{[n]\setm I})}.$$ \end{lemma} \begin{proof} Under the assumptions about $G$ and $I$, there do not exist any tubes $X$ such that $X\cap I\neq\emptyset$ and $X\cap([n]\setm I)\neq\emptyset$. Furthermore, any tube of $G|_I$ is compatible as a tube of $G$ with any tube of $G|_{[n]\setm I}$. Hence, the set $\MTub(G)$ naturally decomposes as a Cartesian product $$\MTub(G)\stackrel{\sim}{\longlra}\MTub(\std(G|_I))\times\MTub(\std(G|_{[n]\setm I})).$$ We claim that this bijection induces the desired isomorphism of posets $$L_G\cong L_{\std(G|_I)}\times L_{\std(G|_{[n]\setm I})}.$$ If $\Xcal,\Ycal\in\MTub(G)$ such that $\Ycal=\Xcal\setm\{J\}\cup\{J^{\pr}\}$ for some tubes $J,J^{\pr}$ then $J$ and $J^{\pr}$ must be incompatible. Consequently, either both tubes $J,J^{\pr}$ are contained in $I$, or both tubes are contained in $[n]\setm I$. Without loss of generality, assume that $\operatorname{top}T_{\Xcal}(J)<\operatorname{top}T_{\Ycal}(J^{\pr})$ and that $J$ and $J^{\pr}$ are both subsets of $I$, which implies $\Xcal\ra\Ycal$ holds in $L_G$. Let $\phi:H\ra\std(H)$ be the natural graph isomorphism between $H$ and its standardization. Then the inequality $\operatorname{top}T_{\std(\Xcal|_I)}(\phi(J))<\operatorname{top}T_{\std(\Xcal|_I)}(\phi(J^{\pr}))$ still holds, so we have the relation $\std(\Xcal|_I)\ra\std(\Ycal|_I)$ in $L_{\std(G|_I)}$. Conversely, if $\Xcal$ and $\Ycal$ are maximal tubings of $\std(G|_I)$ and $\Zcal$ is any maximal tubing of $G|_{[n]\setm I}$, then the relation $\Xcal\ra\Ycal$ in $L_{\std(G|_I)}$ implies a relation $(\phi^{-1}(\Xcal)\cup\Zcal)\ra(\phi^{-1}(\Ycal)\cup\Zcal)$ in $L_G$. \end{proof} If $(L,\leq)$ is any poset, its \emph{dual} $(L^*,\leq^*)$ is the poset with the same underlying set such that $a\leq b$ if and only if $b\leq^* a$. If $G$ is any graph with $V(G)=[n]$, we let $G^*$ be the graph obtained by swapping vertices $i$ and $n+1-i$ for all $i$. This induces a natural bijection between maximal tubings of $G$ and maximal tubings of $G^*$. \begin{lemma}\label{graph duality} The natural bijection $\MTub(G)\ra\MTub(G^*)$ induces an isomorphism of posets $L_G^*\cong L_{G^*}$. \end{lemma} \begin{proof} Let $\Xcal,\Ycal\in\MTub(G)$ are distinct tubings such that $\Ycal=\Xcal\setm\{I\}\cup\{J\}$. Let $\Xcal^*,\Ycal^*\in\MTub(G^*)$ be the corresponding maximal tubings of $G^*$. Then \begin{align*} \Xcal\ra\Ycal &\LRa \operatorname{top}_{\Xcal}(I)<\operatorname{top}_{\Ycal}(J)\\ &\LRa \operatorname{top}_{\Ycal^*}(J^*)<\operatorname{top}_{\Xcal^*}(I^*)\\ &\LRa \Ycal^*\ra\Xcal^*. \end{align*} Passing to the transitive closure of $\ra$, we deduce that $L_G$ and $L_{G^*}$ are dual posets. \end{proof} \subsection{The non-revisiting chain property}\label{subsec:NRC} In this section, we prove that graph associahedra have the non-revisiting chain property, defined below. This is equivalent to the statement that for any tubing $\Xcal$, the set of maximal tubings containing $\Xcal$ is an interval of~$L_G$. Given a polytope $P$, we will say a linear functional $\lambda:\Rbb^n\ra\Rbb$ is \emph{generic} if it is not constant on any edge of $P$. When $\lambda$ is generic, we let $L(P,\lambda)$ be the poset on the vertices of $P$ where $v\leq w$ if there exists a sequence of vertices $v=v_0,v_1,\ldots,v_l=w$ such that $\lambda(v_0)<\lambda(v_1)<\cdots<\lambda(v_l)$ and $[v_{i-1},v_i]$ is an edge for all $i\in\{1,\ldots,l\}$. The following properties of $L(P,\lambda)$ are immediate. \begin{proposition}\label{prop:omega_properties} Let $P$ be a polytope with a generic linear functional $\lambda$. \begin{enumerate} \item The dual poset $L(P,\lambda)^*$ is isomorphic to $L(P,-\lambda)$. \item If $F$ is a face of $P$, then the inclusion $L(F,\lambda)\hookra L(P,\lambda)$ is order-preserving. \item $L(P,\lambda)$ has a unique minimum $v_{\hat{0}}$ and a unique maximum $v_{\hat{1}}$. \end{enumerate} \end{proposition} The pair $(P,\lambda)$ is said to have the \emph{non-revisiting chain (NRC) property} if whenever $\mathbf{x}<\mathbf{y}<\mathbf{z}$ in $L(P,\lambda)$ such that $\mathbf{x}$ and $\mathbf{z}$ lie in a common face $F$, then $\mathbf{y}$ is also in $F$. The name comes from the fact that if $P$ has the NRC property, then any sequence of vertices following edges monotonically in the direction of $\lambda$ does not return to a face after leaving it. By definition, the NRC property means that faces are \emph{order-convex} subsets of $L(P,\lambda)$. (Recall that a subset $S$ of a poset is \emph{order-convex} provided that whenever elements $x,z\in S$ satisfy $x<z$ then the entire interval $[x,z]$ belongs to $S$.) In light of Proposition~\ref{prop:omega_properties}, this is equivalent to the condition that for any face $F$, the set of vertices of $F$ form an interval of $L(P,\lambda)$ isomorphic to $L(F,\lambda)$. \begin{remark} There is also an unoriented version of the NRC property due to Klee and Wolfe called the \emph{non-revisiting path property}, which is the condition that for any two vertices $\mathbf{v}, \mathbf{w}$ of $P$, there exists a path from $\mathbf{v}$ to $\mathbf{w}$ that does not revisit any facet of $P$. It was known that the Hirsch conjecture on the diameter of 1-skeleta of polytopes is equivalent to the conjecture that every polytope has the non-revisiting path property. These conjectures were formulated to determine the computational complexity of the simplex method from linear programming in the \emph{worst-case} scenario. The Hirsch conjecture was disproved by Santos \cite{santos:2012counterexample}, but many interesting questions remain. In particular, the polynomial Hirsch conjecture remains open. \end{remark} In contrast to the non-revisiting path property, many low-dimensional polytopes lack the non-revisiting chain property. For example, if $P$ is a simplex of dimension at least $2$, then $[\mathbf{v}_{\hat{0}},\mathbf{v}_{\hat{1}}]$ is an edge of $P$ that is not an interval of $L(P,\lambda)$. However, the property does behave nicely under Minkowski sum. \begin{proposition}\label{prop:MS_NRF} If $(P,\lambda)$ and $(Q,\lambda)$ have the non-revisiting chain property, then so does $(P+Q,\lambda)$. \end{proposition} The proof of Proposition~\ref{prop:MS_NRF} relies on Lemma~\ref{lem:sum_order_embed}. For polytopes $P$ and $Q$, the normal fan of $P+Q$ is the common refinement of $\Ncal(P)$ and $\Ncal(Q)$; that is, $$\Ncal(P+Q)=\{C\cap C^{\pr}\ |\ C\in\Ncal(P),\ C^{\pr}\in\Ncal(Q)\}.$$ Let $V(P)$ be the set of vertices of $P$, and let $C_v$ be the normal cone to the vertex $v$ in $P$. From the description of the normal fan of $P+Q$, there is a canonical injection $\iota:V(P+Q)\hookra V(P)\times V(Q)$ that assigns a vertex $\mathbf{v}\in P+Q$ to $(\mathbf{u},\mathbf{w})$ if the normal cones satisfy $C_{\mathbf{v}}=C_\mathbf{u}\cap C_\mathbf{w}$. \begin{lemma}\label{lem:sum_order_embed} The map $\iota:V(P+Q)\hookra V(P)\times V(Q)$ is an order-preserving function from $L(P+Q,\lambda)$ to $L(P,\lambda)\times L(Q,\lambda)$. \end{lemma} \begin{proof} Let $E=[\mathbf{v},\mathbf{w}]$ be an edge of $P+Q$, and suppose $\lambda(\mathbf{v})<\lambda(\mathbf{w})$. It suffices to show that $\iota(\mathbf{v})<\iota(\mathbf{w})$. Let $\iota(\mathbf{v})=(\mathbf{v}^{\pr},\mathbf{v}^{\pr\pr})$ and $\iota(\mathbf{w})=(\mathbf{w}^{\pr},\mathbf{w}^{\pr\pr})$. Then the normal cone $C_E$ is the intersection of $C_\mathbf{v}$ and $C_\mathbf{w}$, which themselves are the intersections of $C_{\mathbf{v}^{\pr}},\ C_{\mathbf{v}^{\pr\pr}}$ and $C_{\mathbf{w}^{\pr}},\ C_{\mathbf{w}^{\pr\pr}}$. Since $$C_E=(C_{\mathbf{v}^{\pr}}\cap C_{\mathbf{w}^{\pr}})\cap(C_{\mathbf{v}^{\pr\pr}}\cap C_{\mathbf{w}^{\pr\pr}})$$ is a cone of codimension 1, we may deduce that $C_{\mathbf{v}^{\pr}}\cap C_{\mathbf{w}^{\pr}}$ and $C_{\mathbf{v}^{\pr\pr}}\cap C_{\mathbf{w}^{\pr\pr}}$ are both of codimension $\leq 1$. Hence, the segments $E^{\pr}=[\mathbf{v}^{\pr},\mathbf{w}^{\pr}]$ and $E^{\pr\pr}=[\mathbf{v}^{\pr\pr},\mathbf{w}^{\pr\pr}]$ are either vertices or edges of $P$ and $Q$, respectively. Moreover, if both $E^{\pr}$ and $E^{\pr\pr}$ are edges, then they must be parallel and $E=E^{\pr}+E^{\pr\pr}$. In the event one of them is a vertex, say $E^{\pr\pr}$ (so that $\mathbf{v}''=\mathbf{w}''$), then $E^{\pr}$ must be an edge, and $$\lambda(\mathbf{v}^{\pr})=\lambda(\mathbf{v})-\lambda(\mathbf{v}^{\pr\pr})<\lambda(\mathbf{w})-\lambda(\mathbf{v}^{\pr\pr})=\lambda(\mathbf{w})-\lambda(\mathbf{w}^{\pr\pr})=\lambda(\mathbf{w}^{\pr}).$$ If both $E^\pr$ and $E^{\pr\pr}$ are edges, then since $\lambda$ achieves its minimum value on $E=E^{\pr}+E^{\pr\pr}$ at $\mathbf{v}=\mathbf{v}^{\pr}+\mathbf{v}^{\pr\pr}$, we have $\lambda(\mathbf{v}^{\pr})<\lambda(\mathbf{w}^{\pr})$ and $\lambda(\mathbf{v}^{\pr\pr})<\lambda(\mathbf{w}^{\pr\pr})$. In both cases, $\iota(\mathbf{v})<\iota(\mathbf{w})$ holds. \end{proof} \begin{proof}[Proof of Proposition~\ref{prop:MS_NRF}] Every face of $P+Q$ is of the form $F+F^{\pr}$ where $F$ is a face of $P$ and $F^{\pr}$ is a face of $Q$. Suppose $\mathbf{u},\mathbf{v},\mathbf{w}$ are vertices of $P+Q$ such that $\mathbf{u}<\mathbf{v}<\mathbf{w}$ in $L(P+Q,\lambda)$ and $\mathbf{u},\mathbf{w}\in F+F^{\pr}$. Set $\iota(\mathbf{u})=(\mathbf{u}_P,\mathbf{u}_Q)$, and analogously for $\iota(\mathbf{v})$ and $\iota(\mathbf{w})$. Then $\mathbf{u}_P\leq \mathbf{v}_P\leq \mathbf{w}_P$ in $L(P,\lambda)$ and $\mathbf{u}_Q\leq \mathbf{v}_Q\leq \mathbf{w}_Q$ in $L(Q,\lambda)$. Since $P$ and $Q$ have the non-revisiting chain property, $\mathbf{v}_P$ is in $F$ and $\mathbf{v}_Q$ is in $F^{\pr}$. Hence, $\mathbf{v}=\mathbf{v}_P+\mathbf{v}_Q$ is in $F+F^{\pr}$, as desired. \end{proof} \begin{corollary}[Proposition 7.2 \cite{hersh:2018nonrevisiting}] Every zonotope has the non-revisiting chain property with respect to any generic linear functional. \end{corollary} We now return to graph associahedra. Let $G$ be a graph on $[n]$, and let $\lambda$ be the linear functional in the proof of Lemma~\ref{lem_poset}, where $\lambda(\mathbf{x})=nx_1+(n-1)x_2+\cdots+x_n$, so that $L_G\cong L(P_G,\lambda)$. Using the decomposition $P_G=\sum\Delta_I$, Lemma~\ref{lem:sum_order_embed} implies that $\pi_J:L_G\ra L(\Delta_J,\lambda)$ obtained as the composition $$L_G\hookra\bigotimes_I L(\Delta_I,\lambda)\thra L(\Delta_J,\lambda)$$ is order-preserving. We note that the poset $L(\Delta_J,\lambda)$ is a chain where $\mathbf{e}_i>\mathbf{e}_j$ whenever $i,j\in J$ with $i<j$. \begin{lemma}\label{NRC helper} Suppose that $\Xcal$ is a maximal tubing of $G$ and $J$ is a tube not necessarily in~$\Xcal$. Then there exists a unique $k\in J$ such that $J\subseteq k_{\downarrow}$, and for this $k$ we have $\pi_J(\Xcal)=\mathbf{e}_k.$ \end{lemma} \begin{proof} Recall that $k_\downarrow$ is the smallest tube in $\Xcal$ that contains $k$. Hence, there is at most one such element $k\in J$ satisfying $J\subseteq k_\downarrow$. (Indeed, if $j\in J$ and $J\subseteq j_\downarrow$ then we have $j\in J\subseteq k_\downarrow$. Thus $j_\downarrow\subseteq k_\downarrow$. By symmetry, $k_\downarrow\subseteq j_\downarrow$. Therefore $j=k$.) Consider the vertex $\mathbf{v}^\Xcal$ in $P_G$. Lemma~\ref{polytope_poset} implies that $\Delta_J$ contributes $\mathbf{e}_k$ to $\mathbf{v}^\Xcal$ if and only if $k\in J$ and $J\subseteq k_\downarrow$. Therefore $\pi_J(\Xcal) = \mathbf{e}_k$, as desired. \end{proof} \begin{theorem}\label{thm:NRC} The pair $(P_G,\lambda)$ has the non-revisiting chain property. \end{theorem} \begin{proof} Every face of $P_G$ is the intersection of some facets, and the intersection of a family of order-convex sets is again order-convex. Hence, it suffices to prove that if $F$ is any facet of $P_G$ then $V(F)$ is an order-convex subset of $L(P_G,\lambda)$. We argue by way of contradiction that this set is order-convex by selecting an appropriate projection $\pi_J$. Under the dictionary between tubings of $G$ and faces of $P_G$, if $F$ is a facet, then there exists a tube $I$ such that $$V(F)=\{\mathbf{v}^{\Xcal}\ |\ \Xcal\in\MTub(G),\ I\in\Xcal\}.$$ Suppose that there are maximal tubings $\Xcal<\Ycal<\Zcal$ such that $I\in\Xcal$ and $I\in\Zcal$ but $I$ is not in $\Ycal$. Given that such a triple exists, we are free to assume that $\Xcal\ra\Ycal$ is a flip. Then the flip exchanges $I$ for some tube $I^{\pr}$. Let $a=\operatorname{top}T_{\Xcal}(I)$ and $b=\operatorname{top}T_{\Ycal}(I^{\pr})$. The union $I\cup I^{\pr}$ is a tube in both $\Xcal$ and $\Ycal$ such that $b=\operatorname{top}T_{\Xcal}(I\cup I^{\pr})$ and $a=\operatorname{top}T_{\Ycal}(I\cup I^{\pr})$. Hence, $I$ is maximal a tube in the ideal $(I\cup I^{\pr})\setminus \{b\}$ in $\Xcal$. That means $G|_I$ is one of the connected components of $G|_{I\cup I^{\pr}\setm\{b\}}$. Since $I\cup I^{\pr}$ is a tube, this implies $I\cup\{b\}$ is a tube as well. Set $J=I\cup\{b\}$. We claim that if $\Wcal$ is any maximal tubing containing $I$, then the projection $\pi_J(\Wcal)=\mathbf{e}_b$. If $\pi_J(\Wcal)=\mathbf{e}_k\ne \mathbf{e}_b$ then Lemma~\ref{NRC helper} says that $k\in J$ and $J \subseteq k_\downarrow$. Since $k\ne b$, it follows that $k\in I$. Since $k_\downarrow$ is the smallest tube in $\Wcal$ that contains $k$, we have $k_\downarrow \subseteq I$. But then $I\subsetneq J \subseteq k_\downarrow \subseteq I$, and that is a contradiction. Therefore, $\pi_J(\Wcal) = \mathbf{e}_b$. So $\pi_J(\Xcal)=\mathbf{e}_b=\pi_J(\Zcal)$, but $\pi_J(\Ycal)=\mathbf{e}_a$, contradicting the assumption that $\Ycal<\Zcal$. \end{proof} \begin{corollary}\label{faces_are_intervals} For any tubing $\Ycal$ of $G$, the set of maximal tubings which contain $\Ycal$ is an interval in $L_G$. \end{corollary} \begin{remark} Another property that a polytope graph may have is the \emph{non-leaving face property}, which is satisfied if for any two vertices $u,v$ that lie in a common face $F$ of $P$, every geodesic between $u$ and $v$ is completely contained in $F$. This property holds for all zonotopes, but is quite special for general polytopes. Although ordinary associahedra are known to have the non-leaving face property, not all graph associahedra do. We note that the example geodesic in \cite[Figure 6]{manneville.pilaud:2015graph} that leaves a particular facet cannot be made into a monotone path, so it does not contradict our Theorem~\ref{thm:NRC}. \end{remark} Recall that the M\"obius function $\mu=\mu_L:\Int(L)\ra\Zbb$ is the unique function on the intervals of a finite poset $L$ such that for $x\leq y$: $$\sum_{x\leq z\leq y}\mu(x,z)=\begin{cases}1\ \mbox{if }x=y\\0\ \mbox{if }x\neq y\end{cases}.$$ When $L(P,\lambda)$ is a lattice with the non-revisiting chain property, the M\"obius function was determined in \cite{hersh:2018nonrevisiting}. One way to prove this is to show that $L(P,\lambda)$ is a crosscut-simplicial lattice; cf. \cite{mcconville:2017crosscut}. In the case of the poset of maximal tubings, we may express the M\"obius function as follows. For a tubing $\Xcal$, let $|\Xcal|$ be the number of tubes it contains. \begin{corollary}\label{cor:mobius} Let $G$ be a graph with vertex set $[n]$ such that $L_G$ is a lattice. Let $\Xcal$ be a tubing that contains every maximal tube. The set of maximal tubings containing $\Xcal$ is an interval $[\Ycal,\Zcal]$ of $L_G$ such that $\mu(\Ycal,\Zcal)=(-1)^{n-|\Xcal|}$. If $[\Ycal,\Zcal]$ is not an interval of this form, then $\mu(\Ycal,\Zcal)=0$. \end{corollary} Based on some small examples, we conjecture that Corollary~\ref{cor:mobius} is true even without the assumption that $L_G$ is a lattice. \subsection{Covering relations and $G$-forests}\label{subsec_Gtrees} As above, let $G$ be a graph with vertex set $[n]$. In the following sections, it will be useful to realize $L_G$ as a partial order on the set of $G$-forests. The advantage to working with $G$-forests, rather than maximal tubings, is that cover relations in $L_G$ are encoded by certain adjacent (covering) pairs in the forest poset. As in Theorem~\ref{G-trees}, let $T$ be a $G$-forest and let $\Xcal$ be the maximal tubing $\chi(T)$. Recall that we write $i<_T k$ if $k$ is in the unique path from $i$ to the root. A covering relation \emph{in} $T$ is a pair $i$ and $k$ such that $i<_Tk$ and also, $i$ and $k$ are adjacent in $T$. We say that $k$ \emph{covers} $i$ (or $i$ \emph{is covered by} $k$) and write $i{\,\,<\!\!\!\!\cdot\,\,\,}_T k$ (or $k{\,\,\,\cdot\!\!\!\! >\,\,}_T i$). We say that $k$ has a \emph{lower (resp. upper) cover} if there exists an element $i\in T$ such that $i {\,\,<\!\!\!\!\cdot\,\,\,}_Tk$ (resp. $i{\,\,\,\cdot\!\!\!\! >\,\,}_T k)$. The following easy lemma will be useful. \begin{lemma}\label{parent} Let $T$ be a $G$-forest or $G$-forest. Each element in $T$ has at most one upper cover. In particular, if $i<_T j$ and $i<_T k$, then $j$ and $k$ are comparable. \end{lemma} \begin{proof} Suppose that $i$ is less than $j$ and $k$ in $T$. Then the tubes $j_\downarrow$ and $k_\downarrow$ have nonempty intersection. Thus, either $j_\downarrow\subseteq k_\downarrow$ or $k_\downarrow \subseteq j_\downarrow$. \end{proof} We say that the pair $(i,k)$ is a \emph{descent of $T$} if $k{\,\,<\!\!\!\!\cdot\,\,\,}_T~i$ and $i<k$ as integers. Dually, the pair is an \emph{ascent of $T$} if $i>k$ as integers. The next proposition follows from Theorem~\ref{G-trees}. \begin{proposition}\label{cor: covering relations} Suppose that $T$ is a $G$-forest and $\Xcal$ is the corresponding maximal tubing~$\chi(T)$. \begin{itemize} \item Each descent $(i,k)$ in $T$ corresponds bijectively to a covering relations $\Xcal {\,\,\,\cdot\!\!\!\! >\,\,} \Xcal'$ in~$L_G$. \item Each ascent $(i,k)$ in $T$ corresponds bijectively to a covering relation $\Xcal'' {\,\,\,\cdot\!\!\!\! >\,\,} \Xcal$ in $L_G$. \end{itemize} \end{proposition} \begin{proposition}\label{cover relations} Let $T$ be a $G$-forest with descent $(i,k)$, and let $\Xcal=\chi(T)$ be its corresponding maximal tubing. Write the ideal $\{x: x<_{T} k\}$ as the disjoint union of tubes $Y_1\cup \cdots \cup Y_t$. Then swapping $i$ and $k$, we obtain a $G$-forest covered by $T$ in $L_G$, whose corresponding maximal tubing is $$\Xcal\setminus \{k_\downarrow\} \cup \left\{ i_\downarrow \setminus \left(\{k\} \cup \bigcup Y_{a_j}\right) \right\},$$ where the union $\bigcup Y_{a_j}$ is over all $Y_{a_j}\in \{Y_1,\ldots, Y_t\}$ such that $Y_{a_j}\cup \{i\}$ not a tube. (Throughout $x_\downarrow$ is interpreted as the principal order ideal in $T$.) \end{proposition} \begin{proof} Write $S$ for $ i_\downarrow \setminus \left(\{k\} \cup \bigcup Y_{a_j}\right)$ and $\Ycal$ for $\Xcal\setminus \{k_\downarrow\} \cup \{S\}$. First we show that $\Ycal$ is a maximal tubing. Observe that $S$ is a tube. We check that each tube $I$ in $\Xcal\setminus \{k_\downarrow\}$ is compatible with $S$. Since both $I$ and $i_\downarrow$ are tubes in $\Xcal$, either $I\subset i_\downarrow$, $I\supset i_\downarrow$, or $I\cup i_\downarrow$ is not a tube. If $I\supset i_\downarrow$ or $I\cup i_\downarrow$ is not a tube, then the fact that $S\subset i_\downarrow$ implies that $I$ and $S$ are compatible. So, we assume that $I$ is a subset of $i_\downarrow$. Write $X_1\cup X_2\cup \cdots \cup X_r$ for the ideal $\{x: x<_{T} i\}$. Since $i{\,\,\,\cdot\!\!\!\! >\,\,}_{T} k$ we have $k_\downarrow= X_l$ for some $l$. Thus $I\subseteq X_j$ for some $j\ne l$ or $I\subseteq Y_s$ for some $s\in [t]$. If $I\subseteq X_j$ then it follows immediately that $I\subseteq S$. Similarly, if $I\subseteq Y_s$ and $Y_s\cup \{i\}$ is a tube, then $I\subseteq S$. Assume that $I\subseteq Y_s$ and $Y_{s}\cup \{i\}$ is not a tube. Then $I\not\subseteq S$ and $S\not\subseteq I$. We claim that that $Y_s\cup S$ is not a tube. Observe that $X_j$ and $Y_s$ are compatible in $\Xcal$, and neither $X_j\not\subset Y_s$ nor $Y_s\not\subseteq X_j$, for each $j\in [r]$ with $j\ne l$. Thus, $X_j\cup Y_s$ is not a tube. The same argument shows each tube $Y_s\cup Y_j$ is not a tube, for each $j\in[t]$ with $j\ne s$. Thus $Y_s\cup S$ is not a tube, and hence $I\cup S$ is not a tube. We conclude that $I$ and $S$ are compatible. We conclude that $\Ycal$ is a maximal tubing of $G$. Since $\Ycal$ differs from $\Xcal$ by a flip, it follows that $\Xcal$ covers $\Ycal$ in~$L_G$. \end{proof} \section{Lattices}\label{sec:lattice} \subsection{Lattices and lattice congruences} Recall that a poset $L$ is a lattice if each pair $x$ and $y$ has a greatest common lower bound or \emph{meet} $x\wedge y$, and has a smallest common upper bound or \emph{join} $x\vee y$. Throughout we assume that $L$ is finite. A set map $\phi: L\to L'$ is a \emph{lattice map} if it satisfies $\phi(x\wedge y) = \phi(x)\wedge \phi(y)$ and $\phi(x\vee y) = \phi(x)\vee \phi(y)$. We say that $\phi$ preserves both the meet and join operations. When $\phi$ is surjective, we say that it is a \emph{lattice quotient map} and $L'$ is a lattice quotient of $L$. We say that $\phi$ is \emph{meet (join) semilattice map} if it preserves the meet (join) operation, and the image $\phi(L)$ is called a \emph{meet (join) semilattice quotient} of $L$. To determine whether a given set map $\phi: L\to L'$ preserves either the meet or join operations, we consider the equivalence relation on $L$ induced by the fibers of $\phi$. That is, set $x\equiv y \mod \Theta_\phi$ if $\phi(x)=\phi(y)$. \begin{definition}\label{def: cong} Let $L$ be a finite lattice, and let $\Theta$ be an equivalence relation on $L$. We say that $\Theta$ is a \emph{lattice congruence} if it satisfies both of the following conditions for each $x,y,$ and $z$ in $L$. \begin{equation}\label{meet-preserving} \text{ if $x\equiv y \mod \Theta$ then $x\wedge z\equiv y\wedge z\mod \Theta$}\tag{$\Mcal$} \end{equation} \begin{equation}\label{join-preserving} \text{ if $x\equiv y \mod \Theta$ then $x\vee z\equiv y\vee z\mod \Theta$}\tag{$\Jcal$} \end{equation} We say that $\Theta$ is a \emph{meet (join) semilattice congruence} if $\Theta$ satisfies~\ref{meet-preserving} (\ref{join-preserving}). \end{definition} Observe that $\phi: L\to L'$ preserves the meet (join) if and only if the equivalence relation $\Theta_\phi$ induced by its fibers is a meet (join) semilattice congruence. The next proposition implies that each meet semilattice congruence on $L$ gives rise to a meet semilattice quotient. \begin{proposition}\label{meet cong} Let $\Theta$ be an equivalence relation on $L$. Then $\Theta$ is a meet semilattice congruence if and only if $L$ satisfies each of the following conditions: \begin{enumerate} \item Each $\Theta$-class has a unique minimal element; \item the map $\pi_\downarrow^\Theta:L\to L$ which sends $x$ to the unique minimal element in its $\Theta$-class is order preserving. \end{enumerate} In particular, the subposet of $L$ induced by $\pi_\downarrow^\Theta(L)$ is a meet semilattice quotient of $L$. \end{proposition} \begin{proof} The proof of the first statement can be found in \cite[Proposition~9-5.2]{reading:2016lattice}. We assume that $\Theta$ is a meet semilattice congruence or, equivalently, that the two conditions above hold. We check that the subposet of $L$ induced by the image $\pi_\downarrow^{\Theta}(L)$ is a lattice and that $\pi_\downarrow^{\Theta}$ is a meet semilattice map. Suppose that $x$ and $y$ belong to $\pi_\downarrow^\Theta(L)$. We write $x\wedge_\Theta y$ to distinguish the meet operation in $\pi_\downarrow^\Theta(L)$ from the meet operation in $L$. (In general, these are different operations; that is, $x\wedge_\Theta y \ne x\wedge y$.) It is enough to show that the meet $x\wedge_{\Theta} y$ is equal to $\pi_\downarrow^{\Theta}(x\wedge y)$. Because $\pi_\downarrow^\Theta$ is order preserving, we have $\pi_\downarrow^{\Theta}(x\wedge y)\le x, y$. If $z\in \pi_\downarrow^\Theta(L)$ and $z$ is a common lower bound for $x$ and $y$ then $z\le x\wedge y$. Applying the fact that $\pi_\downarrow^\Theta$ is order preserving again, we have $z=\pi_\downarrow^\Theta(z) \le \pi_\downarrow^\Theta(x\wedge y)$. \end{proof} The set $\Con(L)$ of lattice congruences of $L$ forms a distributive lattice under the refinement order. That is, $\Theta\leq\Theta^{\pr}$ holds if $x\equiv y\mod\Theta^{\pr}$ implies $x\equiv y\mod\Theta$ for $x,y\in L$. Hence, when $\Con(L)$ is finite, it is the lattice of order ideals of its subposet of join-irreducible elements. If $L$ is a lattice with a cover relation $x\lessdot y$, the \emph{contraction} $\con(x,y)$ is the most refined lattice congruence identifying $x$ and $y$. It is known that $\con(x,y)$ is join-irreducible, and if $L$ is finite, then every join-irreducible lattice congruence is of this form \cite[Proposition 9-5.14]{reading:2016lattice}. \subsection{Lattice congruences of the weak order}\label{subsec_weak_cong} Recall $x\le y$ in the weak order on $\mathfrak{S}_n$ if $\operatorname{inv}(x) \subseteq \operatorname{inv}(y)$, where $\operatorname{inv}(x)$ is the set of inversions of $x$. (A pair $(i,k)$ is an \emph{inversion} of $x$ if $i<k$, and $k$ precedes $i$ in $x=x_1\ldots x_n$. That is, $i=x_s$ and $k=x_r$, where $r<s$.) It is well-known that the weak order on $\mathfrak{S}_n$ is a lattice. A \emph{descent} of $x$ is an inversion $(i,k)$ such that $i$ and $k$ are consecutive in $x_1\ldots x_n$. That is, $i=x_s$ and $k=x_{s-1}$, where $s\in \{2,\ldots, n\}$. The \emph{descent set} $\des(x)$ of $x$ is the set of all descents of $x$. An \emph{ascent} is a noninversion $(i,k)$ in which $i=x_{s-1}$ and $k=x_{s}$. If $y_s=i$ and $y_{s-1}=k$ is a descent of $y$, then swapping the positions of $i$ and $k$, we obtain a permutation $x$ (with $x_i=y_i$ for each $i\in [n]\setminus \{s-1, s\}$ and $x_{s-1}=i$ and $x_s=k$) that is covered by $y$ in the weak order. Each lower cover relation $y{\,\,\,\cdot\!\!\!\! >\,\,} x$ corresponds bijectively to a descent of $y$. Dually, each upper cover relation $y{\,\,<\!\!\!\!\cdot\,\,\,} y'$ corresponds bijectively to an ascent of $y$. The following lemma is immediate. \begin{lemma}\label{descents and inversions} Suppose that $x'> x$ in the weak order on $\mathfrak{S}_n$. Then there exists a descent $(i,k)$ of $x'$ which is not an inversion of $x$. Swapping $i$ and $k$ in $x'$ we obtain a permutation $x''$ which satisfies $x'{\,\,\,\cdot\!\!\!\! >\,\,} x''\ge x.$ \end{lemma} Recall that each pair $x{\,\,<\!\!\!\!\cdot\,\,\,} y$ maps to a join-irreducible congruence $\con(x,y)$ in $\Con(\mathfrak{S}_n)$. For $n>2$, this map is not injective. We can obtain a bijection by restricting to pairs $x{\,\,<\!\!\!\!\cdot\,\,\,} y$ where $y$ is join-irreducible. Below, we make this bijection explicit with the combinatorics of arc diagrams. An \emph{arc} is a triple $\alpha=(i,k, \epsilon)$ where $1\leq i<k\leq n$ and $\epsilon=(\epsilon_1,\ldots,\epsilon_{k-i-1})$ such that $\epsilon_h\in\{+,-\}$ for $h\in[k-i-1]$. Listing the numbers $1,\ldots,n$ vertically, an arc is typically drawn as a path from $i$ to $k$ that goes to the left of $j$ if $\epsilon_{j-i}=+$ and to the right of $j$ if $\epsilon_{j-i}=-$. \begin{figure} \caption{\label{fig_arc} \label{fig_arc} \end{figure} If $x\lessdot y$ is a cover relation of permutations that swaps $i$ and $k$, then we define $\alpha(x,y)=(i,k,\epsilon)$ to be the arc such that for $i<j<k$: $$\epsilon_{j-i}=\begin{cases}+\ \mbox{if }u^{-1}(j) > u^{-1}(i)\\-\ \mbox{if }u^{-1}(j) < u^{-1}(k)\end{cases}.$$ For example, $\alpha(32514,35214)=(2,5,(-,+))$ is the arc in Figure~\ref{fig_arc}. Given an arc $(x,y,\epsilon)$, write $\{l_1<l_2<\ldots < l_p\}$ for the set $\{x':\epsilon_{x'-x} = -\}$ and $\{r_1<r_2<\ldots<r_q\}$ for the set $\{y': \epsilon_{y'-x} = +\}$. Informally, $l_1<\ldots<l_p$ are the nodes on the left side of the arc $(x,y,\epsilon)$, and $r_1<\ldots<r_q$ are the nodes on the right side of the arc. The next results follows from \cite[Proposition~2.3]{reading:2015noncrossing}. \begin{proposition}\label{arc to perm} Let $\alpha=(x,y,\epsilon)$ be an arc, and let $l_1<\ldots<l_p$ and $r_1<\ldots<r_q$ be defined as above. Then, among all permutations $w\in \mathfrak{S}_n$ such that $\alpha(u,w) =\alpha$ for some $u$ covered by $w$, the unique minimal element is \[j_\alpha= 12\ldots (x-1) l_1\ldots l_p \,y\, x\, r_1\ldots r_q (y+1) (y+2) \ldots n\] In particular, $j_\alpha$ is join-irreducible. \end{proposition} The map $\alpha$ induces a bijection between join-irreducible lattice congruences and join-irreducible permutations. \begin{theorem}\label{thm_weak_arcs} Given two cover relations $x\lessdot y$ and $x^{\pr}\lessdot y^{\pr}$, we have $\con(x,y)=\con(x^{\pr},y^{\pr})$ if and only if $\alpha(x,y)=\alpha(x^{\pr},y^{\pr})$. \end{theorem} In light of Theorem~\ref{thm_weak_arcs}, we will identify a join-irreducible lattice congruence $\Theta^{\alpha}$ of the weak order by its associated arc $\alpha$. For arcs $\alpha,\beta$, we say that $\alpha$ \emph{forces} $\beta$ if $\Theta^{\beta}\leq\Theta^{\alpha}$. An arc $\alpha=(i,k,\epsilon)$ is a \emph{subarc} of $\beta=(i^{\pr},k^{\pr},\epsilon^{\pr})$ if $i^{\pr}\leq i<k\leq k^{\pr}$ and for all $j\in[k-i-1]$, $\epsilon_j=\epsilon_{j+i-i^{\pr}}^{\pr}$. The following theorem is \cite[Theorem 4.4]{reading:2015noncrossing}, which is a translation of \cite{reading:2004lattice}. \begin{theorem}\label{thm_forcing_arcs} Given arcs $\alpha$ and $\beta$, $\alpha$ forces $\beta$ if and only if $\alpha$ is a subarc of $\beta$. \end{theorem} We say that a lattice congruence $\Theta$ \emph{contracts} an arc if $\Theta^{\alpha} \le \Theta$. At times we say that $\Theta$ contracts a pair $x{\,\,<\!\!\!\!\cdot\,\,\,} y$, when we mean $\Theta^{\alpha(x,y)} \le \Theta$. Equivalently, $x\equiv_\Theta y$. Similarly, $\alpha$ is \emph{uncontracted} if $\Theta^{\alpha}\not\subseteq \Theta$. In this case each pair $x{\,\,<\!\!\!\!\cdot\,\,\,} y$ with $\alpha(x,y)=\alpha$ belongs to a distinct $\Theta$-class. \begin{figure} \caption{\label{fig_cong} \label{fig_cong} \end{figure} \begin{example} The weak order on $\mathfrak{S}_4$ is shown in Figure~\ref{fig_cong}. Permutations connected by blue zigzags are equivalence classes of the lattice congruence that contracts the arcs $(2,4,(+)),\ (1,4,(+,+))$ and $(1,4,(-,+))$. The first arc is a subarc of the latter two, so the congruence is the join-irreducible $\Theta^{(2,4,(+))}$. \end{example} The following is \cite[Corollary~4.5]{reading:2015noncrossing}. \begin{corollary}\label{uncontracted} A set $U$ of arcs is the set of arcs that are uncontracted by some lattice congruence $\Theta$ if and only if $U$ is closed under taking subarcs. \end{corollary} \begin{example}\label{subgraphs} Let $V\subset [n]$, and consider the the map $\rho: \mathfrak{S}_n\to \mathfrak{S}_{V}$ which sends the permutation $w=w_1\ldots w_n$ to the subword of $w$ in which we delete each $w_i\not\in V$. Let $\Theta$ denote the smallest (or most refined) lattice congruence on $\mathfrak{S}_n$ in which $\rho(x)= \rho(y)$ implies that $x\equiv _\Theta y$. We claim that $\rho$ is a lattice map if and only if $V$ is an interval \cite[Example~2.2]{reading:2017homomorphisms}. First assume that $\rho$ is a lattice map, or, equivalently, the classes of $\Theta$ are precisely the fibers of $\rho$. Then $\alpha(x,y)$ is uncontracted by $\Theta$ whenever $\rho(x)\ne \rho(y)$. This happens whenever $x=(i,k)y$ and $i,k\in V$. Thus, the set of arcs uncontracted by $\Theta$ is $\{(i,k,\epsilon): i,k\in V\}$. Since this set must be closed under taking subarcs, it follows that $V$ is an interval. Conversely, if $V$ is an interval then the set $U=\{(i,k,\epsilon): i,k\in V\}$ is the set of uncontracted arcs for some lattice congruence $\Theta'$ (because this set is closed under taking subarcs). For each $x{\,\,<\!\!\!\!\cdot\,\,\,} y$, we have $\alpha(x,y)\notin U$ if and only if $\rho(x)=\rho(y)$. Thus $\Theta'= \Theta$, and the equivalence classes of $\Theta$ are precisely the fibers of $\rho$. \end{example} \subsection{Map from permutations to $G$-forests} Recall that for any graph $G$ with vertex set $[n]$, and permutation $w=w_1\ldots w_n$, we have $\Psi_G(w) = \{X_1,\ldots, X_n\}$ where $X_i$ is the largest tube in the subset $\{w_1,\ldots,w_j\}$ containing $w_j$. That is, $X_j$ is the set of vertices of the connected component of $G|_{ \{w_1,\ldots,w_j\}}$ containing $w_j$. Next, we recursively describe the surjection $\Psi_G: \mathfrak{S}_n\to L_G$ as a map onto the set of $G$-trees. Given a connected graph $G$ with vertex set $[n]$ and permutation $w=w_1\ldots w_n$ we recursively construct a $G$-tree $\Psi_G(T)$ as follows: Let $w_n$ be the root of $T$. Let $G_1,\ldots, G_r$ be the connected components of the subgraph induced by $\{w_1,\ldots, w_{n-1}\}$. Restricting $w_1\ldots w_{n-1}$ to each component $G_i$ gives a subword of $w$. We apply the construction to each subword to obtain subtrees $T_1,\ldots, T_r$. Finally, we attach each subtree to the root $w_n$. The next proposition follows from \cite[Corollary~3.9]{postnikov.reiner.williams:2008faces}. \begin{proposition}\label{linear extensions} The fiber $\Psi_G^{-1}(T)\subseteq \mathfrak{S}_n$ is the set of linear extensions of~$T$. \end{proposition} The authors of \cite{postnikov.reiner.williams:2008faces} define a special section of the map $\Psi_G$, whose image we describe below. See \cite[Definition~8.7]{postnikov.reiner.williams:2008faces} and Proposition~\ref{prw 8.9}. \begin{definition}\label{b-permutations} \normalfont Let $G$ be a graph with vertex set $[n]$. A permutation $w$ in $\mathfrak{S}_n$ is a $G$-permutation provided that $w_i$ and $\max\{w_1,\ldots, w_i\}$ lie in the same connected component of $G|_{\{w_1,\ldots, w_i\}}$. \end{definition} The following lemma is \cite[Proposition~8.10]{postnikov.reiner.williams:2008faces}. \begin{lemma}\label{lex smallest} Let $T$ be a $G$-forest and let $w\in \Phi_G^{-1}(T)$. Then $w$ is a $G$-permutation if and only if it is the lexicographically minimal linear extension of $T$. \end{lemma} Let $V$ be a subset of the vertex set $[n]$, and let $G'$ be the subgraph of $G$ induced by $V$. We write $\rho_{G'}: L_G\to L_{G'}$ for the map which takes a maximal tubing $\Xcal$ to $\Xcal|_{V}$. Similarly, write $\rho_{V}: \mathfrak{S}_n\to \mathfrak{S}_{V}$ for the map which sends a permutation $w=w_1w_2\ldots w_n$ to the subword in which we delete each $w_i\notin V$ (without changing the order of the remaining entries). \begin{lemma}\label{lem: interval lemma} Let $G$ be a graph with vertex set $[n]$. If $\Psi_G:\mathfrak{S}_n \to L_G$ is a lattice map, then for each $V\subseteq[n]$ and induced subgraph $G'=G|_{V}$, the map $\Psi_{\std(G')}: \mathfrak{S}_{\std(V)} \to L_{\std(G')}$ is a lattice map. \end{lemma} \begin{proof} Let $V=a_1<a_2<\cdots<a_r$ and $[n]\setminus V = b_1<b_2<\cdots<b_s$, where $r+s=n$. We consider the interval $I$ in the weak order on $\mathfrak{S}_n$ whose elements consist of all of the permutations on $V$ followed by the fixed permutation $b_1b_2\ldots b_s$. Observe that for each $\Xcal\in \Psi_G(I)$, the set $V$ is an ideal. Indeed, let $w=w_1\ldots w_rb_1\ldots b_s$ be a permutation in $I$ and let $\Psi_G(w)=\Xcal=\{X_1\ldots, X_n\}$. For each connected component $H$ of $G'$ there is a largest integer $j$ in $[r]$ such that $w_j\in H$. Recall that $X_j$ is the set of vertices of the connected component of $G|_{\{w_1,\ldots w_j\}}$ that contains~$w_j$. Thus $X_j=H$. We claim that the following diagram commutes. \begin{center} \begin{tikzpicture}[scale=0.75] \node (A) at (0,0) {$I$}; \node(B) at (3,0) {$\Psi_G(I)$}; \node (C) at (0,-2) {$\mathfrak{S}_{V}$}; \node (D) at (3,-2) {$L_{G'}$}; \node (E) at (-0.25,-3.2) {$\mathfrak{S}_{\std(V)}$}; \node (F) at (3.35, -3.2) {$L_{\std(G')}$}; \node at (1.65,.25) {\scriptsize{$\Psi_G$}}; \node at (-.35,-1) {\scriptsize{$\rho_{V}$}}; \node at (3.35,-1) {\scriptsize{$\rho_{G'}$}}; \node at (1.65,-1.75) {\scriptsize{$\Psi_{G'}$}}; \node at (1.6,-2.85) {\scriptsize{$\Psi_{\std(G')}$}}; \draw[->>] (A.east)--(B.west); \draw[->] (A.south)--(C.north); \draw[->>] (C.east)--(D.west); \draw[->] (B.south)--(D.north); \draw[-] (3.,-3) -- (3, -2.5); \draw[-] (3.1,-3) -- (3.1, -2.5); \draw[-] (-.05,-3) -- (-.05, -2.5); \draw[-] (.05,-3) -- (.05, -2.5); \draw[->>] (E.east) -- (F.west); \end{tikzpicture} \end{center} Set $\Psi_G(w):=\Xcal$ and $\Psi_{G'}(\rho_V(w)):=\Ycal$, where $w= w_1\ldots w_rb_1\ldots b_s$ as above. Observe that $w_1<w_2<\cdots<w_r$ is a linear extension for both $\tau(\Xcal|_V)$ and $\tau(\Ycal)$. Therefore, $\rho_{G'}(\Psi_{G}(w))=\Xcal|_{V}=\Ycal=\Psi_{G'}(\rho_{V}(w))$, and the diagram commutes. Next, we check that $\rho_{G'}: \Psi_{G}(I)\to L_{G'}$ is a poset isomorphism. Because $\rho_{V}:I\to \mathfrak{S}_V$ is a poset isomorphism, it follows that $\rho_{G'}$ is surjective. Suppose that $\Xcal, \Ycal\in \Psi_G(I)$ with $\Xcal|_V =\Zcal= \Ycal|_V.$ We will argue that $\tau(\Xcal)=T_{\Xcal}$ and $\tau(\Ycal)= T_{\Ycal}$ are equal. The only possible difference between $T_\Xcal$ and $T_\Ycal$ must occur among the elements of $V$. (Each $i,k\notin V$ that are in the same connected component of $G|_{[n]\setminus V}$ are linearly ordered by $b_1<b_2<\cdots<b_s$.) We write $<_\Xcal$ for the order relation in $T_\Xcal$ and similarly $<_\Ycal$ for the relation in $T_\Ycal$. Assume that $i<_\Xcal k$ and $i\not<_\Ycal k$ for some $i,k\in V$. Observe that the pair must be incomparable in the $G'$-forest $\tau(\Zcal)$. Thus, $\{j\in [n]: j\le_\Xcal k\}\cap V$ is not a tube. But since $V$ is an ideal in $\Xcal$, we have $\{j\in [n]: j\le_\Xcal k\}\cap V = \{j\in [n]: j\le_\Xcal k\}$. The latter is clearly a tube. By this contradiction, we conclude that $\Xcal= \Ycal$, as desired. \end{proof} \section{Lattices of maximal tubings}\label{sec_lattice} \subsection{Right-filled graphs} We say that a graph $G$ with vertex set $[n]$ and edge set $E$ is \emph{right-filled} provided that the following implication holds: \begin{equation*}\label{right filled} \text{If $\{i,k\}\in E$ then $\{j,k\}$ also belongs to $E$ for each $1\le i<j<k\le n$.}\tag{RF} \end{equation*} Dually, we say that $G$ is \emph{left-filled} provided that: \begin{equation*}\label{left filled} \text{If $\{i,k\}\in E$ then $\{i,j\}$ also belongs to $E$ for each $1\le i<j<k\le n$.}\tag{LF} \end{equation*} The goal of this section is two-fold: First we show that if $G$ is right-filled, then the subposet of the weak order induced by the set of $G$-permutations in~$\mathfrak{S}_n$ is a lattice. In fact, we show that this subposet is a meet semilattice quotient of the weak order. (See Corollary~\ref{g-perm lattice cong}.) Second, we prove that $L_G$ is isomorphic to the subposet of the weak order induced by the set of $G$-permutations in~$\mathfrak{S}_n$. Hence, $L_G$ is a lattice. (See Theorem~\ref{inversion order}.) \begin{remark}\label{rmk: dualizing and left-filled} Recall that $G^*$ is the graph obtained from $G$ by swapping the labels $i$ and $n+1-i$ for all $i\in [n]$. Observe that $G$ is right-filled if and only if $G^*$ is left-filled. Lemma~\ref{graph duality} says that $L_{G^*} \cong {L_G}^*$, thus we obtain dual versions of Corollary~\ref{g-perm lattice cong} and Theorem~\ref{inversion order} when $G$ is left-filled. Some care is required. In particular, we note that for left-filled graphs, $L_G$ is \emph{not} isomorphic to the subposet induced by the set of $G$-permutations. \end{remark} \begin{proposition}\label{connected} Suppose that $G$ is a right-filled graph with vertex set $[n]$ and connected components $G_i=(V_i, E_i)$ where $i\in [s]$ and $s\ge 2$. If $\Psi_i: \mathfrak{S}_{V_i} \to L_{G_i}$ is a lattice map for each $i$, then $\Psi_G:\mathfrak{S}_n\to L_G$ is a lattice map. \end{proposition} \begin{proof} We claim that each $V_i$ is an interval. Write $m_i$ for $\min(V_i)$ and $M_i$ for $\max(V_i)$. Observe that each geodesic $M_i=q_0, q_1, \ldots, q_k=m_i$ in the graph $G$ monotonically decreases. That is, $q_0>q_1>\ldots>q_k$. Indeed, if there exists $q_r>q_{r+1}<q_{r+2}$ then the~\ref{right filled} property implies that $q_r$ and $q_{r+2}$ are adjacent. Applying the~\ref{right filled} property again, each closed interval $[q_r, q_{r+1}]\subseteq V_i$. Thus, $V_i$ is an interval. Observe that the following the diagram commutes. By Lemma~\ref{lem_decomposition}, the vertical map from $L_G$ to $\prod_{i=1}^sL_{G_i}$ is an isomorphism. The vertical map from $\mathfrak{S}_n$ onto $\rho:=\prod_{i=1}^s\mathfrak{S}_{V_i}$, where $\rho_{V_i}$ is the restriction map from Example~\ref{subgraphs}. Since each $V_i$ is an interval, $\rho$ is a lattice map. The statement of the proposition now follows. \begin{center} \begin{tikzpicture}[scale=0.75] \node (A) at (0,0) {$\mathfrak{S}_n$}; \node(B) at (5,0) {$L_G$}; \node (C) at (0,-2) {$\prod_{i=1}^s\mathfrak{S}_{V_i}$}; \node (D) at (5,-2) {$\prod_{i=1}^s L_{G_i}$}; \node at (2.75,.35) {\scriptsize{$\Psi_G$}}; \node at (2.5,-1.7) {\scriptsize{$\prod_{i=1}^s\Psi_{G_{i}}$}}; \draw[->>] (A.east)--(B.west); \draw[->>] (A.south)--(C.north); \draw[->>] (C.east)--(D.west); \draw[->] (B.south)--(D.north); \end{tikzpicture} \end{center} \end{proof} With Proposition~\ref{connected} in hand, we assume throughout that $G$ is connected. We realize $L_G$ as a poset on the set of $G$-trees, where $T\le T'$ if and only if $\chi(T)\le \chi(T')$, where $\chi$ is the bijection $T\mapsto \{x_\downarrow: x\in [n]\}$ from Theorem~\ref{G-trees}. \begin{lemma}\label{lem: child relations} Let $G$ be a left or right-filled graph, and let $T\in L_G$. If $x_1$ and $x_2$ are incomparable in $T$, then there does not exist any triple $i<j<k$ such that $i$ and $k$ belong to ${x_1}_\downarrow$ and $j\in {x_2}_\downarrow$. \end{lemma} \begin{proof} Consider the set of pairs $i<k$ in ${x_1}_\downarrow$ such that $i<k-1$. Because ${x_1}_\downarrow$ is a tube, there is a path $i=q_0,\ldots, q_m=k$ in $G$ such that each $q_l$ belongs to ${x_1}_\downarrow$. Choose such a path so that $m$ is minimal. We argue by induction on $m$ that there exists no vertex $j$ in ${x_2}_\downarrow$ satisfying $i<j<k$. Observe that if $j\in {x_2}_\downarrow$ then neither $\{i,j\}$ nor $\{j,k\}$ are edges in $G$. Thus the base case holds because $G$ is either right-filled or left-filled. Now assume that $m>1$, let $j\in \{i+1,\ldots, k-1\}$, and for the moment assume that $G$ is right-filled. Consider $q_{m-1}$. If $q_{m-1}<j$, then the \ref{right filled}-property implies that $j$ and $k$ are adjacent. Hence $j\not \in {x_2}_\downarrow$. If $j<q_{m-1}$ then we have $i<j<q_{m-1}$, and $i$ and $q_{m-1}$ are connected by a path of length~$m-1$. By induction $j\not \in {x_2}_\downarrow$, and the statement follows. If $G$ is left-filled the proof is similar, except that we compare $j$ with $q_2$ instead of $q_{m-1}$. \end{proof} \begin{proposition}\label{prop: child relations} Let $G$ be a left or right-filled graph, and let $T\in L_G$. Suppose that $x_1$ and $x_2$ are incomparable in $T$ and that $x_1<x_2$ as integers. Then each element in ${x_1}_\downarrow$ is smaller than each element in ${x_2}_\downarrow$ (as integers). \end{proposition} \begin{proof} Set $i:= \max \{a\in {x_2}_{\downarrow}: \text{there exists } b\in {x_1}_\downarrow\text{ with }a< b\}$. So, there is some $j\in {x_1}_\downarrow$ such that $i<j$. Assume that $i$ is the largest element in ${x_2}_\downarrow$. Thus, $x_1<i<j$ (where we have the first inequality because $x_1<x_2$ and $x_2<i$.) Since $x_1$ and $j$ both belong to ${x_1}_\downarrow$, we have a contradiction to Lemma~\ref{lem: child relations}. So, there exists some number $k$ in ${x_2}_\downarrow$ with $k>i$, and the maximality of $i$ implies that $k\not<j$ for any $j\in {x_1}_\downarrow$. Then the triple $i<j<k$ satisfies: $i,k$ both in ${x_2}_{\downarrow}$ and $j\in {x_1}_\downarrow$. That is a contradiction to Lemma~\ref{lem: child relations} again. (Note that the roles of $x_1$ and $x_2$ are symmetric in Lemma~\ref{lem: child relations}.) The proposition follows. \end{proof} Below we recursively construct a special linear extension $\sigma(T)$ for $T\in L_G$. First, if $T$ has a root $x$ then we remove it. Let $C_1,\ldots,C_r$ be the connected components of $T\setminus \{x\}$. We index the connected components so that each element of $C_i$ is less than each element of $C_j$ (as integers) whenever $i<j$. Next, we apply the construction to each component to obtain $v_{{C_i}_1}\ldots v_{{C_i}_s}=\sigma(C_i)$ for $i\in [r]$. Finally, we concatenate the words $\sigma(C_1)\ldots \sigma(C_r)$, ending with the root~$x$ (if there is one). Observe that $\sigma(T)$ is the lexicographically minimal linear extension of the $G$-tree $T$. The next proposition follows from Lemma~\ref{lex smallest} (see also \cite[Proposition~8.10]{postnikov.reiner.williams:2008faces}). \begin{proposition}\label{prw 8.9} The image $\sigma(L_G)$ is the equal to the set of $G$-permutations in $\mathfrak{S}_n$. Moreover, the map $\Psi_G$ induces a bijection between $G$-permutations and $G$-trees, and $\sigma: L_G\to \mathfrak{S}_n$ is a section of the map $\Psi_G$. \end{proposition} A pair of numbers $(i,j)$ is an \emph{inversion} of a $G$-tree $T$ if $i<j$ and $j<_T i$. For example, a descent of $T$ is an inversion such that $i$ covers $j$ in $T$. A pair $(i,j)$ is a non-inversion if $i<j$ and $i<_T j$. (Pairs $i$ and $j$ which are incomparable in $T$ are neither inversions nor non-inversions.) Write $\operatorname{inv}(T)$ for the set of all inversions of $T$ and $\operatorname{inv}^\wedge(T)$ for the set of noninversions. The next lemma follows immediately from Proposition~\ref{prop: child relations} and the construction of $\sigma(T)$. The second item of the statement also follows from \cite[Proposition~9.5]{postnikov.reiner.williams:2008faces}. \begin{lemma}\label{lem: pidown} Let $G$ be left or right-filled graph with vertex set $[n]$. Suppose that $x$ and $x'$ are incomparable in $T$ and $x$ precedes $x'$ in the linear extension $\sigma(T)$. Then $x$ is less than $x'$ as integers. In particular: \begin{itemize} \item the inversion set of $T$ is equal to the inversion set of $\sigma(T)$; \item the descent set of $T$ is equal to the descent set of $\sigma(T)$ \end{itemize} \end{lemma} \begin{remark}\label{rmk: dual section} Dually we recursively construct a (lexicographically) largest linear extension $\sigma^*(T)$ as follows: As before $C_1,\ldots, C_r$ are the connected components of $T\setminus \{x\}$ (if $T$ has root $x$) or $T$ (if $T$ does not have a root), indexed so that each element in $C_i$ is less than each element in $C_j$ if $i<j$. Apply the construction $\sigma^*(C_i)$ to each connected component. Concatenate the words: $\sigma^*(C_r)\ldots \sigma^*(C_1)$, and finally end with the root $x$. Indeed, if $G$ is either left or right filled, then $\sigma^*(T)$ is the unique largest element of the fiber $\Psi_G^{-1}(T)$. \end{remark} \begin{proposition}\label{inversions} Suppose that $G$ is a right-filled graph with vertex set $[n]$. If $w\le w'$ in the weak order on $\mathfrak{S}_n$ then $\operatorname{inv}(\Psi_G(w))\subseteq \operatorname{inv}(\Psi_G(w'))$. \end{proposition} \begin{proof} Write $T$ for $\Psi_G(w)$ and $T'$ for $\Psi_G(w')$. Suppose that $(i,k)$ is an inversion in~$T$. Since $w$ is a linear extension of $T$, we have $(i,k)\in \operatorname{inv}(w)$. Hence $(i,k)\in \operatorname{inv}(w')$. If $i$ and $k$ are comparable in $T'$, then $(i,k)\in \operatorname{inv}(T')$, since $w'$ is a linear extension of $T'$. Because $(i,k)\in \operatorname{inv}(T)$, there is a path $i=q_0,\ldots, q_m=k$ (which we take to have minimal length) in $G$ connecting $i$ to $k$ such that $q_l<_T i$ for each $l\in[m]$. We prove, by induction on $m$, that $i$ and $k$ are comparable in $T'$. In the base case $i$ and $k$ are adjacent in~$G$, and the claim is immediate. Assume $m>1$ (so, in particular, $i$ and $k$ are \textit{not} adjacent in $G$). We make two easy observations: First, because $G$ is right-filled, $q_{m-1}>i$. (Indeed, if $q_{m-1}<i<k$ then $G$ must have the edge $\{i,k\}$, contrary to our assumption that $i$ and $k$ are not adjacent.) Thus, $(i,q_{m-1})\in \operatorname{inv}(T)$. By induction, $i$ and $q_{m-1}$ are comparable in $T'$. Thus, $q_{m-1}<_{T'} i$. Second, because $q_{m-1}$ is adjacent to $k$, they are also comparable in~$T'$. If $k<_{T'} q_{m-1}$ then we are done by transitivity. On the other hand, if $q_{m-1} <_{T'} k$ then Lemma~\ref{parent} implies that $k$ and $i$ are comparable in $T'$. \end{proof} We obtain the following corollary. \begin{corollary}\label{g-perm lattice cong} Let $G$ be a right-filled graph with vertex set $[n]$ and let $T\in L_G$. Then the equivalence relation $\Theta_G$ induced by the fibers of $\Psi_G$ satisfies: \begin{enumerate} \item The $\Theta_G$-class $\Psi_G^{-1}(T)$ has a smallest element in the weak order, namely the $G$-permutation $v$ in $\Psi_G^{-1}(T)$; \item the map $\pi_\downarrow^G:\mathfrak{S}_n\to \mathfrak{S}_n$ which sends $w$ to the unique $G$-permutation in its $\Theta_G$-class is order preserving. \end{enumerate} Thus, the subposet of the weak order on $\mathfrak{S}_n$ induced by the set of $G$-permutations is a meet-semilattice quotient of $\mathfrak{S}_n$. In particular, the subposet induced by the set of $G$-permutations is a lattice. \end{corollary} \begin{proof} Suppose that $w\in \Psi^{-1}(T)$. Since $w$ is a linear extension of $T$, $\operatorname{inv}(T)\subseteq \operatorname{inv}(w')$. Since $\operatorname{inv}(T)=\operatorname{inv}(v)$, we conclude that $v\le w$. Thus, $v$ is the unique minimal element of the fiber $\Psi^{-1}(T)$. Suppose that $w\le w'$ in the weak order on $\mathfrak{S}_n$. Then Proposition~\ref{inversions} says that $\operatorname{inv}(\Psi_G(w))\subseteq \operatorname{inv}(\Psi_G(w'))$. For each $u\in \mathfrak{S}_n$, $\operatorname{inv}(\pi_\downarrow^G(u)) = \operatorname{inv}(\Psi_G(u))$, by Lemma~\ref{lem: pidown}. Thus, $\operatorname{inv}(\pi_\downarrow^G(w))\subseteq \operatorname{inv}((\pi_\downarrow^G(w'))$. The remaining statements of the corollary follow immediately from Proposition~\ref{meet cong}. \end{proof} We are now prepared to state the main theorem of this section. \begin{theorem}\label{inversion order} Suppose that $G$ is right-filled and $T$ and $T'$ belong to $L_G$. Then: \begin{enumerate} \item $T\le T'$ in $L_G$ if and only if $\operatorname{inv}(T)\subseteq \operatorname{inv}(T')$. \item The poset of maximal tubings $L_G$ is isomorphic the subposet of the weak order induced by the set of $G$-permutations in $\mathfrak{S}_n$. In particular, $L_G$ is a lattice. \item $\Psi_G: \mathfrak{S}_n\to L_G$ is meet semilattice map. That is, for all $w, w'\in \mathfrak{S}_n$ we have \[\Psi_G(w\wedge w') = \Psi_G(w)\wedge \Psi_G(w').\] \end{enumerate} \end{theorem} \begin{lemma}\label{descent helper} Suppose that $G$ is a right-filled graph with vertex set $[n]$, let $a\in [n]$, and let $T'\in L_G$. Let the disjoint union $C_1\cup C_2\cup\cdots \cup C_k$ of tubes denote the ideal $a_\downarrow \setminus \{a\}$ in $T$, indexed so that each element of $C_i$ is less than each element in $C_j$ (as integers) whenever $i<j$. \begin{enumerate} \item If $(a,x)\in\operatorname{inv}(T)$, then $x$ belongs to~$C_k$. \item If $(a,x)$ is a descent of $T'$, then swapping $a$ and $x$ we obtain $T$ which satisfies: $$\operatorname{inv}(T)\subseteq \operatorname{inv}(T').$$ \end{enumerate} \end{lemma} \begin{proof} By Proposition~\ref{prop: child relations}, the tubes $C_1,\ldots, C_k$ can be indexed as described in the statement of the lemma. Suppose there exists $i<k$ and $x\in C_i$ such that $x>a$. Let $y$ be any element of $C_{i+1}$ that is adjacent to $a$ in $G$. Because $a<x<y$ and $G$ is right-filled, $x$ and $y$ are adjacent in $G$. That is a contradiction. Suppose that $(p,q)\in \operatorname{inv}(T)$. Hence $p<q$ as integers and $q\in \{y\in [n]: y\le_{T} p\}$. We must show that $q\in \{y\in [n]: y\le_{T'} p\}$. If $p$ is not equal to $a$ or $x$ then the statement follows from the fact that $\{y\in [n]: y\le_{T'} p\}=\{y\in [n]: y\le_{T} p\}$. If $p=a$ then Proposition~\ref{cover relations} implies that $\{y\in [n]: y\le_{T} a\}\subseteq\{y\in [n]: y\le_{T'} a\}$. Thus $(p,q)\in \operatorname{inv}(T')$. Assume that $p=x$, so that we have $a<x<q$, ordered as integers. The first statement of the lemma implies that $q<_{T'} x$ (because $C_k = \{y\in [n]: y\le_{T'} x\}$). Hence $(p,q)\in \operatorname{inv}(T')$. \end{proof} The next lemma is the $G$-tree analog to Lemma~\ref{descents and inversions} (which characterizes covering relations in the weak order on $\mathfrak{S}_n$). \begin{lemma}\label{lem: inversion covers} Let $G$ be a right-filled graph with vertex set $[n]$. Suppose that $T$ and $T'$ are in $L_G$ such that $\operatorname{inv}(T)\subsetneq \operatorname{inv}(T')$. Then there exists $T''$ such that $T'{\,\,\,\cdot\!\!\!\! >\,\,} T''$ and $\operatorname{inv}(T)\subseteq \operatorname{inv}(T'')\subset \operatorname{inv}(T')$. \end{lemma} \begin{proof} We claim that there exists some descent $(i,k)$ of $T'$ that is not an inversion~$T$. The claim follows from Lemma~\ref{lem: pidown}. Indeed, write $v$ for $\sigma(T)$ and $v'$ for $\sigma(T')$. Lemma~\ref{lem: pidown} implies that $v'>v$ in the weak order. By Lemma~\ref{descents and inversions}, there is some descent $(i,k)$ of $v'$ which is not an inversion of $v$. Since $\operatorname{inv}(v)=\operatorname{inv}(T)$, $\operatorname{inv}(v')=\operatorname{inv}(T')$, and $\des(v')=\des(T')$ the claim follows. Let $T''{\,\,<\!\!\!\!\cdot\,\,\,} T'$ via this $(i,k)$ descent. Next, we apply Proposition~\ref{cover relations} to the covering relation $T'{\,\,\,\cdot\!\!\!\! >\,\,} T''$. As in the notation of that proposition, we interpret $x_\downarrow$ as the principal order ideal in $T'$. We will continue to do so for the remainder of the proof. Write the ideal $\{x: x<_{T'} k\}$ as the disjoint union of tube $Y_1\cup \cdots \cup Y_t$. Proposition~\ref{cover relations} says that, $\chi(T'')$ is equal to$$\chi(T')\setminus \{k_\downarrow\} \cup \left\{ i_\downarrow \setminus \left(\{k\} \cup \bigcup Y_{a_j}\right) \right\},$$ where the union $\bigcup Y_{a_j}$ is over all $Y_{a_j}\in \{Y_1,\ldots, Y_t\}$ such that $\{i\}\cup Y_{a_j}$ not a tube. We write $B$ for the set $\{b\in\bigcup Y_{a_j}: (i,b)\in \operatorname{inv}(T')\}.$ It follows that $$\operatorname{inv}(T')\setminus (\{(i,k)\}\cup \{(i,b): b\in B\})= \operatorname{inv}(T'').$$ Let $C$ be the set of $c\in \bigcup Y_{a_j}$ such that $(i,c)\in \operatorname{inv}(T)$. (As above, each $Y_{a_j}$ satisfies: $Y_{a_j}\cup\{i\}$ is not a tube; so in particular, no element $c\in C$ is adjacent to $i$.) To complete the proof, we argue that $C$ is empty. Suppose not, and choose $c\in C$ so that there is a path $i=q_0, q_1,\ldots, q_m=c$ with $q_p \le_T i$ and $q_p\not \in C$ for each $p\in [m-1]$. Consider $q_{m-1}$. On the one hand, if $q_{m-1}<i$ (as integers) then the \ref{right filled}-property implies that $i$ and $c$ are adjacent. But no element in $C$ is adjacent to $i$. So we have a contradiction. On the other hand, if $q_{m-1}>i$ then $(i,q_{m-1})$ is an inversion of $T$. We will argue that $q_{m-1}$ must belong to $C$, and conclude a contradiction. Since $\operatorname{inv}(T)\subset \operatorname{inv}(T')$ have have $(i,q_{m-1})\in \operatorname{inv}(T')$. Thus $q_{m-1}$ is in the ideal $\{x: x<_{T'} i\}$, which we write as a disjoint union of tubes $X_1\cup X_2\cup \cdots \cup X_r$. Because $i{\,\,\,\cdot\!\!\!\! >\,\,}_{T'} k$ and $(i,k)$ is a descent in $T'$, we have $k_{\downarrow}=X_r$ by Lemma~\ref{descent helper}. Since $q_{m-1}$ is adjacent to $c$, both belong to the same tube in the disjoint union $X_1\cup\cdots \cup X_r$. Because $c\in Y_{a_j}\subseteq k_\downarrow$, we have $q_{m-1}$ is also in $k_\downarrow$. Similarly, because $c$ and $q_{m-1}$ are adjacent, they belong to the same tube $Y_{a_j}$ in the ideal $\{x: x<_{T'} k\} = Y_1\cup \cdots \cup Y_t$. We conclude that $q_{m-1}\in C$, contradicting our choice of~$c$. Therefore, $\operatorname{inv}(T)\subseteq \operatorname{inv}(T'') \subset \operatorname{inv}(T')$. \end{proof} \begin{proof}[Proof of Theorem~\ref{inversion order}] Lemma~\ref{descent helper} implies that $T\le T'$ then $\operatorname{inv}(T)\subseteq \operatorname{inv}(T')$. Suppose that $\operatorname{inv}(T)\subseteq \operatorname{inv}(T')$. We argue that $T\le T'$ by induction on the size of $\operatorname{inv}(T')\setminus \operatorname{inv}(T)$. Lemma~\ref{lem: inversion covers} says there exists a $G$-tree $T''$ such that $$\text{$T'{\,\,\,\cdot\!\!\!\! >\,\,} T''$ and $\operatorname{inv}(T)\subseteq \operatorname{inv}(T'')\subset \operatorname{inv}(T')$}.$$ Lemma~\ref{lem: pidown} implies that $\operatorname{inv}(T)=\operatorname{inv}(T')$ if and only if $T=T'$. When $\operatorname{inv}(T)$ and $\operatorname{inv}(T')$ differ by one element, we must have that $T=T''$. When $\operatorname{inv}(T')\setminus \operatorname{inv}(T)$ has $m>1$ elements, the inductive hypothesis implies that $T\le T''{\,\,<\!\!\!\!\cdot\,\,\,} T'$, and we are done. \end{proof} \subsection{Left-filled graphs} In this section we prove the analog of Corollary~\ref{g-perm lattice cong} and Theorem~\ref{inversion order} for left-filled graphs. \begin{corollary}\label{left filled cor} Let $G$ be a left-filled graph with vertex set $[n]$. Then $L_G$ is a lattice, and $\Psi_G: \mathfrak{S}_n\to L_G$ is a join semilattice map. That is, for all $w, w'\in \mathfrak{S}_n$, we have \[\Psi_G(w\vee w')= \Psi_G(w)\vee \Psi_G(w').\] \end{corollary} \begin{proof} Observe that $G^*$ is right-filled. (Recall that $G^*$ is the graph we obtain by swapping labels $i$ and $n+1-i$ for all $i$.) Lemma~\ref{graph duality} says that $L_{G^*} \cong L_G^*$ (where $L_G^*$ is the dual of $L_G$, as posets). Since $L_{G^*}$ is a lattice (by Theorem~\ref{inversion order}), we have $L_G$ is a lattice. Indeed, Lemma~\ref{graph duality} implies that the following diagram commutes. \begin{center} \begin{tikzpicture}[scale=0.75] \node (A) at (0,0) {$\mathfrak{S}_n$}; \node(B) at (3,0) {$\mathfrak{S}_n^*$}; \node (C) at (0,-2) {$L_G$}; \node (D) at (3,-2) {$L_{G^*}$}; \node at (-.5,-.85) {\scriptsize{$\Psi_G$}}; \node at (3.5,-.85) {\scriptsize{$\Psi_{G^*}$}}; \draw[<->] (A.east)--(B.west); \draw[->>] (A.south)--(C.north); \draw[<->] (C.east)--(D.west); \draw[->>] (B.south)--(D.north); \end{tikzpicture} \end{center} The maps in the top and bottom rows of the diagram are essentially the same: They both swap $i$ and $n+1-i$ for all $i\in [n]$. Both maps are lattice anti-isomorphisms. It follows from Theorem~\ref{inversion order} that $\Psi_G$ is a join semilattice map. \end{proof} \subsection{Filled graphs and lattice congruences} We prove our main result (see Theorem~\ref{thm_main_lattice}). \begin{theorem}\label{main} Suppose that $G$ is a graph with vertex set $[n]$ and edge set $E$. Then $\Psi_G: \mathfrak{S}_n\to L_G$ is a lattice quotient map if and only if $G$ is filled. \end{theorem} \begin{proof} If $G$ is filled, then $\Psi_G: \mathfrak{S}_n \to L_G$ preserves the meet operation (by Theorem~\ref{inversion order}) and the join operation (by Corollary~\ref{left filled cor}). Thus $\Psi_G:\mathfrak{S}_n\to L_G$ is a lattice quotient map. Assume that $G$ is not filled. Thus there exists $i<j<k$ such that $\{i,k\}\in E$ but either $\{i,j\}$ or $\{j,k\}$ is not in $E$. Let $G'$ denote the induced subgraph $G|_{\{i,j,k\}}$. We check that in all possible cases $\Psi_{\std(G')}:\mathfrak{S}_{3}\to L_{\std(G')}$ is not a lattice map. By Lemma~\ref{lem: interval lemma}, $\Psi_G: \mathfrak{S}_n\to L_G$ is not a lattice map. In the first case, assume $\{i,k\}$ and $\{j,k\}$ are edges, but $\{i,j\}$ is not. Observe that $\Psi_{\std(G')}$ does not preserve the join operation. On the one hand, $213\vee 132 = 321$ in the weak order on $\mathfrak{S}_3$. Thus, \[\Psi_{\std(G')} (213\,\vee\, 132) = \Psi_{\std(G')}(321)= \substack{1\\2\\3},\] where we write $\Psi_{\std(G')}(321)$ as a $\std(G')$-tree (with the elements ordered vertically). On the other hand, \[ \Psi_{\std(G')}(213) \vee \Psi_{\std(G')}(132)= \substack{3\\[.25em]1\,2} \vee\, \substack{2\\3\\1} =\substack{3\\1\,2}\] The reader can check the computation of the join in $L_{\std(G')}$ with Figure~\ref{fig:L_ex}. The case in which $\{i,k\}$ and $\{i,j\}$ are edges (but $\{j,k\}$ is not) is proved dually. Assume that $\{i,k\}$ is an edge and neither $\{j,k\}$ and nor $\{i,j\}$ are edges. Then, for example, $\Psi_{\std(G')}$ does not preserve the join operation. Indeed \[\Psi_{\std(G')}(123)=\Psi_{\std(G')}(213) = \Psi_{\std(G')}(132)\] is the smallest element in $L_{\std(G')}$. But $\Psi_{\std(G')}(213\,\vee \,132)$ is the biggest element in $L_{\std(G')}$. We conclude that if $G$ is not filled, then $L_G$ is not a lattice map. \end{proof} \subsection{Generators of the congruence $\Theta_G$} Let $\Theta_G$ be the equivalence relation on $\mathfrak{S}_n$ induced by the fibers of $\Psi_G$. In light of Theorem~\ref{main}, when $G$ is filled $\Theta_G$ is a lattice congruence on the weak order. Recall from Section~\ref{subsec_weak_cong} that $\Con(\mathfrak{S}_n)$ is a finite distributive lattice. We identify each congruence $\Theta$ with the corresponding order ideal of join-irreducible congruences. The \emph{generators} of a congruence are the maximal elements of this order ideal. Recall that the join-irreducible congruences of the weak order are given by arcs. (This is Theorem~\ref{thm_weak_arcs}.) Let $(x,y, +)$ denote the arc with $\epsilon_i = +$ for each $i\in [y-x]$. Occasionally we call such an arc a \emph{positive arc}. (Pictorially, this is an arc which does not pass to the right of any point between its endpoints.) A \emph{minimal non-edge} is a pair $x<y$ such that for each $z\in\{x+1, x+2,\ldots,y-1\}$, $\{x,z\}$ and $\{z,y\}$ are edges in $G$, but $\{x,y\}$ is not an edge. \begin{theorem}\label{generators} Suppose that $G$ is a filled graph, and consider the lattice congruence $\Theta_G$ induced by $\Psi_G$. Then $\Theta_G$ is generated by $$\{(x,y,+): \{x,y\}\text{ is a minimal non-edge of $G$}\}.$$ \end{theorem} Before we prove Theorem~\ref{generators} we gather some useful facts. Throughout this section, we write $j$ for a join-irreducible permutation and $j_*$ for the unique element that it covers in the weak order. Recall that we associate each $j$ with the join-irreducible congruence $\Theta^\alpha$ where $\alpha$ is the arc $\alpha(j_*,j)$. Conversely, given an arc $\alpha=(x,y,\epsilon)$, the corresponding join irreducible permutation is \[j_{\alpha}= 12\ldots (x-1) l_1\ldots l_p \,y\, x\, r_1\ldots r_q (y+1) (y+2) \ldots n\] where $\{l_1<l_2<\ldots < l_p\}$ is the set $\{x':\epsilon_{x'-x} = -\}$ and $\{r_1<r_2<\ldots<r_q\}$ is the set $\{y': \epsilon_{y'-x} = +\}$. (See Proposition~\ref{arc to perm}.) We say a join-irreducible element $j$ is \emph{contracted by $\Theta_G$} if $j\equiv_G j_*$. The congruence $\Theta^\alpha$ is a generator for $\Theta_G$ if $j_\alpha$ is contracted by $\Theta_G$, and for each subarc $\beta$ of $\alpha$, the corresponding permutation $j_{\beta}$ is \textit{not} contracted. The next result follows immediately from Corollary~\ref{g-perm lattice cong}. \begin{proposition} Let $G$ be a filled graph with vertex set $[n]$ and let $j$ be a join-irreducible permutation in $\mathfrak{S}_n$. Then $j$ is not contracted by $\Theta_G$ if and only if $j$ is $G$-permutation. \end{proposition} \begin{lemma}\label{not contracted} Let $G$ be a filled graph with vertex set $[n]$ and $1\le x<y\le n$. Suppose that $\{x,y\}$ is an edge in $G$, with $x<y$. Then no arc $(x,y,\epsilon)$ is contracted by $\Theta_G$. \end{lemma} \begin{proof} Let $j$ be join-irreducible with unique descent $(x,y)$. Observe $G|_{[x,y]}$ is a complete graph because $G$ is filled. Let $r=y-x+1$. Write $j$ in one-line notation as: $$j= j_1\ldots j_n=12\ldots (x-1) j_x\ldots j_{x+r} (y+1) (y+2)\ldots n.$$ We claim that $j_i$ and $\max\{j_1,\ldots j_i\}$ belong to the same connected component of $G|_{\{j_1,\ldots j_i\}}$. If $i\le x$ or $i \ge x+r+1$ then $j_i = \max\{j_1,\ldots j_i\}$. So the claim follows. Suppose that $x<i\le x+r$. Then $\max\{j_1,\ldots j_i\} = \max\{j_x,\ldots, j_i\}$. Since $\{j_x,\ldots, j_i\}$ is a subset of $[x,y]$ claim follows. Therefore $j$ is a $G$-permutation. \end{proof} \begin{lemma}\label{+ is contracted} Suppose that $(x,y)$ is not an edge in $G$. The arc (x,y,+) is contracted by~$\Theta_G$. \end{lemma} \begin{proof} Let $j_{x,y}$ denote the join-irreducible corresponding to the arc $(x,y,+)$. We argue that $j_{x,y}$ is contracted by $\Theta_G$. Since $x$ and $y$ are not adjacent and $G$ is filled, $y$ is not connected to any vertex $x'<x$. Write $j_{x,y}$ as $$1\,2\,\ldots (x-1) \,y x \, (x+1)\ldots (y-1)\,(y+1)\,\ldots n.$$ Observe that $j_{x,y}$ is not a $G$-permutation because $y=\max\{1,2,\ldots, x,y\}$ is isolated in the subgraph $G|_{\{1,2,\ldots, x,y\}}$. Thus $j$ is contracted by $\Theta_G$. \end{proof} \begin{lemma}\label{minimal nonedges} Suppose that $\{x,y\}$ is a minimal non-edge of $G$. Let $\alpha$ be the arc $(x,y,\epsilon)$. If $\epsilon\ne +$ then $\alpha$ is not contracted by $\Theta_G$. \end{lemma} \begin{proof} Let $j$ be a join-irreducible corresponding to $\alpha$. Let $r=x-y+1$. Write $j$ in one-line notation as $j_1\ldots j_n$. Observe that $$j=12\ldots (x-1) j_x\ldots j_{x+r} (y+1) (y+2)\ldots n.$$ We claim that $j_i$ and $\max\{j_1,\ldots j_i\}$ belong to the same connected component of $G|_{\{j_1,\ldots j_i\}}$. If $i\le x$ or $i \ge x+r+1$ then $j_i = \max\{j_1,\ldots j_i\}$. So the claim follows. Suppose that $x<i\le x+r$. Then $\max\{j_1,\ldots j_i\} = \max\{j_x,\ldots, j_i\}$. Because $G$ is filled, our hypotheses imply that we have each edge in $\binom{[x,y]}{2}$ except $(x,y)$. If $\max\{j_x,\ldots, j_i\}$ is not equal to $y$ then $G|_{\{j_x,\ldots, j_i\}}$ is a complete graph. So the claim follows. Assume that $\max\{j_x,\ldots, j_i\}=y$. Because $\alpha\ne (x,y,+)$ we have $x<j_x<y$. Thus $(x,j_x)$ and $(j_x,y)$ are both edges in $G$. Therefore $G|_{\{j_x,\ldots, j_i\}}$ is connected. So the claim follows, and $j$ is $G$-permutation. \end{proof} \begin{figure} \caption{\label{fig:arc2} \label{fig:arc2} \end{figure} \begin{proof}[Theorem~\ref{generators}] Let $\Gcal$ denote the set $\{(x,y,+): (x,y)\text{ is a minimal non-edge}\}$. Lemma~\ref{+ is contracted} implies that the join-irreducible elements in $\Gcal$ are among the generators of~$\Theta_G$. To prove the theorem we argue that they are the only generators. By way of contradiction, assume that $(x,y,\epsilon)$ is a generator of $\Theta_G$ and $(x,y,\epsilon) \notin \Gcal$. Write $j$ for the corresponding join-irreducible. By Lemma~\ref{not contracted}, $(x,y)$ is not an edge (because $j$ is contracted by $\Theta_G$). If $\{x,y\}$ is a minimal non-edge then Lemma~\ref{minimal nonedges} says that $\epsilon=+$, and hence $(x,y,\epsilon)\in \Gcal$. Thus we may assume that $\{x,y\}$ is not a minimal non-edge, and it has a subarc $\alpha'$ with end points $x'<y'$ which is a minimal non-edge. Since no subarc of $\alpha$ is contracted, in particular $\alpha'$ is not contracted. It follows that $\alpha'$ is not a positive arc. Therefore, $\epsilon \ne +$. To obtain a contradiction we argue that $j$ is a $G$-permutation. Write $j$ as $$12\ldots (x-1) l_1\ldots l_p \,y\,x\, r_1 \ldots r_q (y+1) \ldots n$$ where $\{l_1<\ldots< l_p, r_1< \ldots< r_q\}= \{x+1, \ldots y-1\}$. Therefore, we get $j_i = \max\{j_1,\dots j_i\}$ for $j_i \in \{1,2,\ldots l_p, (y+1), \ldots, n\}$. We claim that $r_i$ is in the same connected component as $y$ in the subgraph induced by $\{1,2,\ldots,x,y,r_1,\ldots r_i\}$. Let $j'$ be the join-irreducible whose corresponding arc is the subarc of $(x,y,\epsilon)$ with endpoints $x'=r_i<y$. As above, write $$j'=1\,2\ldots (x'-1) \,l'_1\ldots l'_{p'} \,y\,x'\, r'_1 \ldots r'_{q'}\, (y+1) \ldots n.$$ Observe that $\{1,2,\ldots, (x'-1), l'_1,\ldots, l'_{p'} ,y,x'\} = \{1,2,\ldots,x,y,r_1,\ldots r_i\}$. (Each of the entry $l_k$ remains left of the descent $(x' y')$ in $j'$ because $l_k<y$. Each $r_k$ with $k<i$ is left of the descent $(x',y)$ because $r_k<r_i$.) Because $j'$ is not contracted, it is a $G$-permutation. Therefore $y$ and $r_i=x'$ belong to the same connected in the subgraph $G|_{\{1,2,\ldots,x,y,r_1,\ldots r_i\}}$. Finally we consider $x$ and $y= \max\{1,2,\ldots, (x-1), l_1, \ldots, l_p, x,y\}$ in the subgraph $G|_{\{1,2,\ldots, (x-1), l_1, \ldots, l_p, x,y\}}$. We will be done if we can show that $x$ and $y$ belong to the same connected component. We do so by showing, first, that $x$ and $l_p$ belong to the same connected component in this subgraph, and second, that $l_p$ and $y$ belong the same connected component. (Indeed we will see that $l_p$ and $y$ are adjacent in $G$.) Observe that $l_p$ exists because $\epsilon \ne +$. Consider the permutation $$j''=1\,2\ldots (x-1) l_1\ldots l_p \,x\, r_1 \ldots r_q \, y\, (y+1) \ldots n.$$ Observe that this permutation has a unique descent $(l_p,x)$, so it is join-irreducible. Moreover, the arc corresponding to $j''$ is the subarc of $(x,y,\epsilon)$ with endpoints $x<l_p$. Hence $j''$ is a $G$-permutation. Thus $x$ and $l_p$ belong to the same connected component in the subgraph $G|_{\{1,2,\ldots, (x-1), l_1, \ldots, l_p, x\}}$. So, $x$ and $l_p$ belong to the same connected component in~$G|_{\{1,2,\ldots, (x-1), l_1, \ldots, l_p, x, y\}}$. Next consider the subarc of $(x,y,\epsilon)$ with endpoints $l_p <y$. As none of the $l_i$ lie strictly between $l_p$ and $y$, this is a positive arc; see Figure~\ref{fig:arc2}. Since it is not contracted, we must have $l_p$ and $y$ form an edge (by Lemma~\ref{+ is contracted}). We conclude that $x$ and $y$ belong to the same connected component in the sugraph induced by $\{1,2,\ldots, (x-1), l_1, \ldots, l_p, x,y\}$. Thus $j$ is a $G$-permutation. By this contradiction, we obtain the desired result. \end{proof} \section{Algebras and coalgebras of tubings}\label{sec_hopf} \subsection{The Malvenuto-Reutenauer algebra}\label{subsec_MR} Fix a field $\Kbb$. For a set $X$, we let $\Kbb[X]$ denote the vector space over $\Kbb$ for which the set $X$ indexes a basis. For $X=\mathfrak{S}_n$, we let $\Kbb[\mathfrak{S}_n]$ have a distinguished basis $\{\Fbb_w:\ w\in\mathfrak{S}_n\}$. The \emph{Malvenuto-Reutenauer} algebra is a Hopf algebra on the graded vector space \[ \Kbb[\mathfrak{S}_{\infty}]=\bigoplus_{n=0}^{\infty}\Kbb[\mathfrak{S}_n]. \] If $v=v_1\cdots v_n$ is a permutation of $[n]$ and $m\geq 0$, we define the \emph{shift by $m$} to be the word $v[m]=(v_1+m)(v_2+m)\cdots(v_n+m)$. For basis elements $\Fbb_u\in\Kbb[\mathfrak{S}_m],\ \Fbb_v\in\Kbb[\mathfrak{S}_n]$, the product $\Fbb_u\cdot\Fbb_v$ is the sum of the elements $\Fbb_w$ for which $w$ is a shuffle of $u$ and $v[m]$. For example, $$\Fbb_{21}\cdot\Fbb_{12}=\Fbb_{2134}+\Fbb_{2314}+\Fbb_{2341}+\Fbb_{3214}+\Fbb_{3241}+\Fbb_{3421}.$$ The coproduct $\Delta(\Fbb_u)\in\Kbb[\mathfrak{S}_{\infty}]\otimes\Kbb[\mathfrak{S}_{\infty}]$ for $u\in\mathfrak{S}_n$ is defined to be $$\Delta(\Fbb_u)=\sum_{i=0}^n\Fbb_{\std(u_1\cdots u_i)}\otimes\Fbb_{\std(u_{i+1}\cdots u_n)},$$ where $\std(a_1\cdots a_i)$ for a sequence of distinct integers $a_1,\ldots,a_i$ is the element of $\mathfrak{S}_i$ with the same relative order as $a_1\cdots a_i$. For example, $$\Delta(\Fbb_{3241})=\iota\otimes \Fbb_{3241} + \Fbb_1\otimes \Fbb_{231} + \Fbb_{21}\otimes \Fbb_{21} + \Fbb_{213}\otimes \Fbb_1 + \Fbb_{3241}\otimes\iota.$$ Here, the element $\iota\in\Kbb[\mathfrak{S}_0]$ is the multiplicative identity. The counit $\epsilon:\Kbb[\mathfrak{S}_{\infty}]\ra\Kbb$ is the linear map with $\epsilon(\iota)=1$ and $\epsilon(\Fbb_v)=0$ for $v\in\mathfrak{S}_n,\ n\geq 1$. These operations are compatible in a way that makes $\Kbb[\mathfrak{S}_{\infty}]$ a (connected, graded) bialgebra. This automatically gives the Malvenuto-Reutenauer algebra the structure of a Hopf algebra; that is, it comes with a (unique) antipode $S$. We refer to \cite{grinberg.reiner:2014hopf} for further background on Hopf algebras from a combinatorial perspective. The Malvenuto-Reutenauer algebra contains the algebra of noncommutative symmetric functions $\NCSym$ as a sub-Hopf algebra. Loday and Ronco \cite{loday.ronco:1998hopf} discovered a Hopf algebra $\Kbb[Y_{\infty}]=\bigoplus\Kbb[Y_n]$ on the vector space spanned by planar binary trees and a sequence of Hopf algebra embeddings $$\NCSym\hookra\Kbb[Y_{\infty}]\hookra\Kbb[\mathfrak{S}_{\infty}].$$ More generally, we may consider a family of nonempty sets $\{Z_0,Z_1,Z_2,\ldots\}$ with surjections $f_n:\mathfrak{S}_n\thra Z_n$ for each $n\geq 0$. Letting $\{\Pbb_x:\ x\in Z_{\infty}\}$ be a basis for $\Kbb[Z_{\infty}]$, there is a vector space embedding $c:\Kbb[Z_{\infty}]\hookra\Kbb[\mathfrak{S}_{\infty}]$ where $$c(\Pbb_x)=\sum_{w\in f_n^{-1}(x)} \Fbb_w\ \hspace{3mm}\ \mbox{for }x\in Z_n.$$ We are especially interested in the case where $Z_n$ is the set of vertices of a generalized permutahedron of rank $n$ and $f_n:\mathfrak{S}_n\thra Z_n$ is the canonical map. The main problem is to determine whether $c$ makes $\Kbb[Z_{\infty}]$ into an algebra or a coalgebra, i.e. whether $c(\Pbb_x)\cdot c(\Pbb_y)$ and $\Delta(c(\Pbb_x))$ lie in the image of $c$ for any $x,y\in Z_{\infty}$. \subsection{Translational families of lattice congruences}\label{subsec_MR_alg} As usual, we consider the symmetric group $\mathfrak{S}_n$ as a poset under the weak order. When the map $f_n:\mathfrak{S}_n\thra Z_n$ has the structure of a lattice quotient map, there is a generalized permutahedron known as a \emph{quotientope} with vertex set $Z_n$ associated to the map $f_n$ \cite{pilaud.santos:2017quotientopes}. In \cite{reading:2005lattice}, Reading proved that the embedding $c$ associated to a sequence of lattice quotient maps $\{f_n\}_{n\geq 0}$ is an algebra map (resp., coalgebra map) if the family $\{f_n\}$ is translational (resp., insertional). We recall the definition of a translational family in this section and of an insertional family in Section~\ref{subsec_insertional}. Let $\Theta$ be a lattice congruence of the weak order on $\mathfrak{S}_n$ for some $n$. Recall that $\Theta$ contracts a join-irreducible $j$ if $j\equiv j_*\mod \Theta$, where $j{\,\,\,\cdot\!\!\!\! >\,\,} j_*$. Equivalently, for the corresponding arc $\alpha=\alpha(j_*,j)$, we have $\Theta^{\alpha}\leq\Theta$ in the lattice $\Con(L)$. We abuse notation, and say that $\Theta$ \emph{contracts} the arc $\alpha$ if $\Theta$ contracts $j_{\alpha}$. (Indeed, $\Theta$ contracts an arc $\alpha$ if and only if there exists a covering relation $u\lessdot w$ such that $\alpha(u,w)=\alpha$ and $u\equiv w\mod \Theta$.) In particular, the set of arcs contracted by $\Theta$ correspond to the set of join-irreducible elements of $\Con(L)$ less than or equal to $\Theta$ in $\Con(L)$. By Theorem~\ref{thm_forcing_arcs}, if $\alpha$ is contracted by $\Theta$ and $\alpha$ is a subarc of $\beta$, then $\beta$ is contracted by $\Theta$ as well. Fix a sequence $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ where $\Theta_n$ is a lattice congruence of the weak order on $\mathfrak{S}_n$ for each $n\geq 0$. We let $Z_n=\mathfrak{S}_n/\Theta_n$ be the set of equivalence classes modulo $\Theta_n$, and set $Z_{\infty}^{\mathbf{\Theta}}=\{Z_n\}_{n\geq 0}$. As we consider lattice congruences of the weak order for varying $n$, we may say that $\alpha$ is an \emph{arc on $[n]$} to mean that it is an arc for $\mathfrak{S}_n$. An arc $\alpha=(i,j,\epsilon)$ on $[n]$ is a \emph{translate} of an arc $\beta=(k,l,\epsilon^{\pr})$ on $[m]$ if $j-i=l-k$ and $\epsilon=\epsilon^{\pr}$. The family $\{\Theta_n\}_{n\geq 0}$ is called \emph{translational} if whenever $\Theta_n$ contracts an arc $\alpha$ and when $\beta$ is an arc on $[m]$ that is a translate of $\alpha$, the congruence $\Theta_m$ contracts $\beta$. The following is equivalent to \cite[Theorem 1.2, Proposition 7.1]{reading:2005lattice}. \begin{theorem}\label{thm_translational_subalg} If $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ is a translational family, then the map $$c:\Kbb[Z_{\infty}^{\mathbf{\Theta}}]\ra\Kbb[\mathfrak{S}_{\infty}]$$ embeds $\Kbb[Z_{\infty}^{\mathbf{\Theta}}]$ as a subalgebra of $\Kbb[\mathfrak{S}_{\infty}]$. \end{theorem} We proved (Theorem~\ref{main}) that the map $\Psi_G:\mathfrak{S}_n\ra L_G$ is a lattice map if and only if $G$ is a filled graph. We determine when a sequence of filled graphs determines a translational family of lattice congruences of the weak order. As before, we will write $(i,j,+)$ to represent the arc $(i,j,(+,\ldots,+))$. An arc of the form $(i,j,+)$ is called a \emph{positive arc}. For nonnegative integers $k,n$, let $H_{k,n}$ be the graph with vertex set $[n]$ such that $\{i,j\}$ is an edge whenever $1\leq i<j\leq n$ and $j-i\leq k$. Clearly, if $k\geq n-1$, then $H_{k,n}$ is the complete graph on $[n]$. \begin{proposition}\label{prop_translational_char} A sequence of filled graphs $\{G_n\}_{n\geq 0}$ determines a translational family $\{\Theta_n\}_{n\geq 0}$ if and only if there exists some $k\in\{0,1,2,\ldots\}\cup\{+\infty\}$ such that $G_n=H_{k,n}$ for all $n$. \end{proposition} \begin{proof} Let $\{G_n\}_{n\geq 0}$ be a sequence of filled graphs, and let $\Theta_n$ be the lattice congruence induced by $\mathfrak{S}_n\ra L_{G_n}$. Suppose the family $\{\Theta_n\}_{n\geq 0}$ is translational. If $\{i,j\}$ is not an edge of $G_n$, then $\Theta_n$ contracts the arc $(i,j,+)$. Being a translational family means that any arc of the form $(i^{\pr},j^{\pr},+)$ is contracted by $\Theta_m$ where $1\leq i^{\pr}<j^{\pr}\leq m$ and $j-i=j^{\pr}-i^{\pr}$. This in turn means that $\{i^{\pr},j^{\pr}\}$ is not an edge of $G_m$. Hence, there must exist some set $S\subseteq\Nbb$ such that for all $i,j,n$ such that $1\leq i<j\leq n$, we have $j-i\in S$ if and only if $\{i,j\}$ is an edge of $G_n$. As the graphs $G_n$ are filled, the set $S$ must either be of the form $S=[k]$ for some $k\in\{0,1,2,\ldots\}$ or $S=\Nbb$. Conversely, suppose there exists $k\in\{0,1,2,\ldots\}\cup\{\infty\}$ such that $G_n=H_{k,n}$ for all~$n$. Then $\Theta_n$ is the lattice congruence generated by $\{\Theta^{(i,j,+)}:\ j-i=k+1\}$. Since the generating set is closed under translation, the family $\{\Theta_n\}_{n\geq 0}$ must be translational. \end{proof} \begin{remark} The lattice congruence $\Theta_n$ in Proposition~\ref{prop_translational_char} resembles the \emph{metasylvester congruence} $\equiv_n^k$, which is the lattice congruence of the weak order on $\mathfrak{S}_n$ generated by relations of the form $$UacV_1b_1\cdots V_kb_kW\equiv_n^k UcaV_1b_1\cdots V_kb_kW,$$ where $a<b_i<c$ holds for all $i\in[k]$. In other words, two letters $a,c$ in a permutation can be swapped if there are $k$ letters on the right with values between $a$ and $c$. Via the dictionary between arcs and join-irreducible lattice congruences, the metasylvester congruence is the most refined congruence that contracts every arc of the form $(a,c,\epsilon)$ where the number of $+$ entries in $\epsilon$ is at least $k$. In contrast, the congruence $\Theta_n$ corresponding to the graph $H_{k,n}$ is generated by $\Theta^{(i,j,+)}$ where $j-i=k+1$, meaning that $\epsilon_1=\cdots=\epsilon_k=+$. It is straight-forward to check that the family $\{\equiv_n^k\}_{n\geq 1}$ is both translational and insertional; cf. Section~\ref{subsec_insertional}. Hence, the embedding $$c:\Kbb[\mathfrak{S}_{\infty}/\equiv^k]\hookra\Kbb[\mathfrak{S}_{\infty}]$$ realizes $\Kbb[\mathfrak{S}_{\infty}/\equiv^k]$ as both a subalgebra and a sub-coalgebra of the Malvenuto-Reutenauer Hopf algebra. It follows that $\Kbb[\mathfrak{S}_{\infty}/\equiv^k]$ inherits the structure of a bialgebra. It is known that an antipode is inherited as well, giving it the structure of a sub-Hopf algebra. Pilaud \cite{pilaud:2018brick} interpreted this Hopf algebra in terms of the vertices of a family of \emph{brick polytopes}, which are a different class of generalized permutahedra from those we consider in this paper. \end{remark} \subsection{Tubing algebras}\label{subsec_hopf_algebra} We begin this section by recalling the tubing algebra defined by Ronco \cite{ronco:2012tamari}. For $I\subseteq\Nbb,\ n\geq 0$, let $I+n:=\{i+n\ |\ i\in I\}$. In particular, $[m]+n=\{n+1,n+2,\ldots,n+m\}$. If $\Xcal$ is a tubing, we let $\Xcal+n:=\{I+n\ |\ I\in\Xcal\}$. Consider a family of graphs $\Gcal=\bigsqcup_{n\geq 0}\Gcal_n$ where $\Gcal_n$ is a finite collection of graphs with vertex set $[n]$. We allow $\Gcal_n$ to contain multiple copies of the same graph, and for the purposes of defining the tubing algebra, it will be important to be able to distinguish between multiple copies of the same graph. This could be done by defining $\Gcal$ as a sequence of graphs rather than as a set, but we prefer to describe $\Gcal$ as a set. Define an operation $\circ$ on $\Gcal$ to be \emph{admissible} if \begin{itemize} \item $(G\circ G^{\pr})\circ G^{\pr\pr}=G\circ(G^{\pr}\circ G^{\pr\pr})$, and \item for $G\in\Gcal_n,\ G^{\pr}\in\Gcal_m$: \begin{itemize} \item $G\circ G^{\pr}$ is in $\Gcal_{n+m}$, \item $G=(G\circ G^{\pr})|_{[n]}$, and \item $(G^{\pr}+n)=(G\circ G^{\pr})|_{[m]+n}$. \end{itemize} \end{itemize} If $\circ$ is admissible, we call the pair $(\Gcal,\circ)$ an \emph{admissible family}. We remark that our definition of admissibility is stronger than that of \cite[Definition 3.4]{ronco:2012tamari}, but this is the appropriate condition to define an associative algebra of maximal tubings; cf. \cite[Theorem 3.10]{ronco:2012tamari}. Let $\MTub(\Gcal)$ be the set of all maximal tubings of these graphs: $$\MTub(\Gcal)=\bigsqcup_{G\in\Gcal}\MTub(G)$$ We let $\Kbb[\Gcal]=\Kbb[\MTub(\Gcal)]$ be the $\Kbb$-vector space for which $\MTub(\Gcal)$ indexes a basis. We will consider the distinguished basis $\{\Pbb_{\Xcal}:\ \Xcal\in\MTub(\Gcal)\}$ for $\Kbb[\Gcal]$. The vector space $\Kbb[\Gcal]$ is graded so that an element $\Pbb_{\Xcal}$ is of degree $n$ if $\Xcal$ is a tubing of a graph $G$ with $n$ vertices. Since each $\Gcal_n$ is finite, each graded component of $\Kbb[\Gcal]$ is finite-dimensional. \begin{definition}\label{def:tubing_mult} Let $G\in\Gcal_n$ and $G^{\pr}\in\Gcal_m$ be given. For maximal tubings $\Xcal\in\MTub(G)$ and $\Ycal\in\MTub(G^{\pr})$, define $$\Pbb_{\Xcal}\cdot\Pbb_{\Ycal}=\sum\Pbb_{\Zcal}$$ where the sum is over all maximal tubings $\Zcal$ of $G\circ G^{\pr}$ such that $\Xcal=\Zcal|_{[n]}$ and $(\Ycal+n)=\Zcal|_{[m]+n}$. \end{definition} \begin{theorem}[Theorem 3.10 \cite{ronco:2012tamari}]\label{thm_admissible_associative} If $\Gcal$ is a family of graphs with an admissible operation $\circ$ as above, then the binary operation in Definition~\ref{def:tubing_mult} is associative. \end{theorem} To prove Theorem~\ref{thm_admissible_associative}, one may show directly that if $G\in\Gcal_n,\ G^{\pr}\in\Gcal_m,$ and $G^{\pr\pr}\in\Gcal_r$ are graphs with maximal tubings $\Xcal,\ \Ycal,$ and $\Zcal$, respectively, then $$(\Pbb_{\Xcal}\Pbb_{\Ycal})\Pbb_{\Zcal}=\sum\Pbb_{\Wcal}=\Pbb_{\Xcal}(\Pbb_{\Ycal}\Pbb_{\Zcal})$$ where the sum is taken over $\Wcal\in\MTub(G\circ G^{\pr}\circ G^{\pr\pr})$ such that \[ \Xcal=\Wcal|_{[n]},\ \Ycal+n=\Wcal|_{[m]+n},\ \Zcal+(n+m)=\Wcal|_{[r]+m+n}. \] \begin{example}\label{ex_MR_alg} Consider the family of complete graphs $\Gcal=\{K_n\}_{n\geq 0}$ where we define $K_n\circ K_m=K_{n+m}$. If $\Xcal$ is any maximal tubing of $K_n$, its corresponding $K_n$-tree $\tau(\Xcal)$ is a chain. Letting $\Xcal\in\MTub(K_n),\ \Ycal\in\MTub(K_m)$, the elements $\Pbb_{\Zcal}$ in the support of $\Pbb_{\Xcal}\cdot\Pbb_{\Ycal}$ are indexed by precisely those tubings of $K_{n+m}$ for which $\tau(\Zcal)$ is a linear extension of $\tau(\Xcal)\sqcup \tau(\Ycal)$. But this is the shuffle product of $\tau(\Xcal)$ and $\tau(\Ycal)$ when viewed as permutations. Hence, the natural map $\Kbb[\mathfrak{S}_{\infty}]\ra\Kbb[\Gcal]$ is an isomorphism of algebras from the Malvenuto-Reutenauer algebra to the tubing algebra on the family of complete graphs. A similar result about the coalgebra structure of $\Kbb[\mathfrak{S}_{\infty}]$ will be given in Example~\ref{ex_MR_coalg}. \end{example} \begin{remark}\label{rem_MR_decorated} In many instances, it is useful to consider a generalization of the Malvenuto-Reutenauer algebra, which is indexed by \emph{decorated permutations}; see \cite{novelli2010free} or \cite{pilaud2018hopf}. A decorated permutation is a pair $(w,G)$ consisting of a permutation $w$ and an element $G$ called the decoration. If $\Gcal=\sqcup_{n=0}^{\infty}\Gcal_n$ is a graded set with an admissible operation $\circ$, one may define an algebra with a basis \[ \bigsqcup_{n=0}^{\infty}\{\Fbb_{(w,G)}:\ w\in\mathfrak{S}_n,\ G\in\Gcal_n\} \] in much the same way as the Tubing algebra, where $\Fbb_{(u,G)}\cdot\Fbb_{(v,G^{\pr})}$ is the sum of $\Fbb_{(w,G\circ G^{\pr})}$ for which $w$ is a shuffle of $u$ and a shift of $v$. Likewise, the coalgebra structure can be extended to the decorated setting. For this paper, we have chosen to focus on the undecorated setting, though we expect many of our results to hold for decorated permutations as well. \end{remark} \begin{figure} \caption{\label{fig:G2prod} \label{fig:G2prod} \end{figure} \begin{example}\label{ex_admissible} Let $G_n$ be the complete bipartite graph on $[n]$ where $i$ and $j$ are adjacent if $|i-j|$ is odd. It is straight-forward to check that $\{G_n\}_{n\geq 0}$ is an admissible family with $G_n\circ G_m=G_{n+m}$. The product of the basis elements indexed by the two $G$-trees for $G=G_2$ is shown in Figure~\ref{fig:G2prod}. Similarly, the sequences of path graphs, complete graphs, and edge-free graphs are admissible, so their tubings form the basis of an associative algebra. These algebras are the Loday-Ronco algebra, the Malvenuto-Reutenauer algebra, and the polynomial ring in one variable, respectively (c.f. \cite{forcey.springfield:2010geometric}). On the other hand, while the sequence of cycle graphs $C_n$ is not an admissible family, \cite{forcey.springfield:2010geometric} constructs a different binary operation to make the vector space $\Kbb[\Gcal]$ into an associative algebra. We leave the details of that construction to their paper. \end{example} For the remainder of this section, we make the assumption that $|\Gcal_n|=1$ for all $n\geq 0$. For clarity, we may refer to such a collection $\Gcal$ as a \emph{1-parameter family}. In this situation, there is at most one admissible operation $\circ$ defined by the fact that $G\circ G^{\pr}$ is in $\Gcal_{n+m}$ whenever $G\in\Gcal_n$ and $G^{\pr}\in\Gcal_m$. Hence, we simply say that the family $\Gcal$ is admissible if the operation $\circ$ is. Our first main result in this section is a characterization of admissible families. For $A\subseteq\Nbb:=\{1,2,3,\ldots\}$, let $\Gcal(A)=\{G_n^A\}_{n\geq 0}$ be the family of graphs such that $V(G_n^A)=[n]$ and there is an edge between $i$ and $j$ if and only if $|j-i|\in A$. \begin{proposition}\label{prop_admissible_characterization} A 1-parameter family $\Gcal$ is admissible if and only if there exists $A\subseteq\Nbb$ such that $\Gcal=\Gcal(A)$. \end{proposition} \begin{proof} For a given $A\subseteq\Nbb$, it is clear that $\Gcal(A)$ is an admissible family. Indeed, it is clear from the definition that the restriction of $G_{n+m}^A$ to $[n]$ is equal to $G_n^A$, and the restriction of $G_{n+m}^A$ to $[m]+n$ is $G_m^A+n$, as desired. Now suppose $\Gcal=\{G_n\}_{n\geq 0}$ is an admissible family, and let $A=\{k\in\Nbb|\ (1,k+1)\in E(G_{k+1})\}$. We claim that $\Gcal=\Gcal(A)$. To this end, let $n\geq 1$ and $k\in A$ be given where $k\leq n-1$. Select $1\leq i<j\leq n$ such that $j-i=k$. We may decompose $G_n$ as $G_n=G_j\circ G_{n-j}$, so the edge $(i,j)$ is in $G_n$ if and only if it is in $G_j$. Furthermore, $G_j=G_{i-1}\circ G_{j-i+1}$, so $G_j$ has the edge $(i,j)$ exactly when $G_{j-i+1}+(i-1)$ does. By definition of $A$, this occurs exactly when $k\in A$. It follows that $G_n=G_n^A$. \end{proof} As a corollary, we may deduce the first part of Theorem~\ref{thm_main}. \begin{proof}[Proof of Theorem~\ref{thm_main}(\ref{thm_main_1})] If $\Gcal$ is an admissible 1-parameter family of graphs, then $\Gcal=\Gcal(A)$ for some subset $A\subseteq\Nbb$. If each graph $G_n^A$ is filled, then for $i<j$, if $j\in A$ then $i\in A$. This is equivalent to the condition that there exists some $k\in\{0,1,2,\ldots\}\cup\{+\infty\}$ such that $A=\{i\in\Nbb:\ i\leq k\}$. But this means $G_n^A=H_{k,n}$ for all $n$. By Proposition~\ref{prop_translational_char}, this means that the sequence of lattice congruences $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ corresponding to the filled graphs $\{G_n\}_{n\geq 0}$ form a translational family. \end{proof} Let $H,G$ be graphs on $[n]$ such that $E(H)\subseteq E(G)$. From the definition of the graph associahedron, the polytope $P_H$ is a Minkowski summand of $P_G$, so the normal fan of $P_H$ coarsens the normal fan of $P_G$. This in turn induces a surjective map $\Psi_H^G:\MTub(G)\ra\MTub(H)$. For the remainder of the section, we fix subsets $A\subseteq B\subseteq\Nbb$. The graph $G_n^A$ is a subgraph of $G_n^B$ for all $n\geq 0$, which determines a surjective map $\MTub(G_n^B)\thra\MTub(G_n^A)$. For notational convenience, we write $\Psi_n$ in place of the map $\Psi_{G_n^A}^{G_n^B}$ for all $n\geq 0$. \begin{lemma}\label{lem_psi_restriction} For $\Wcal\in\MTub(G_{n+m}^B)$: \begin{enumerate} \item\label{lem_psi_restriction_1} $\Psi_{n+m}(\Wcal)|_{[n]}=\Psi_n(\Wcal|_{[n]})$ \item\label{lem_psi_restriction_2} $\std(\Psi_{n+m}(\Wcal)|_{[m]+n})=\Psi_m(\std(\Wcal|_{[m]+n}))$ \end{enumerate} \end{lemma} \begin{proof} The standardization map in (\ref{lem_psi_restriction_2}) has the effect of shifting the vertex set from $[m]+n$ to $[m]$. Besides this point, the two parts are symmetric, so we only prove the first. Let $\Wcal$ be a maximal tubing of $G_{n+m}^B$ and set $\Zcal=\Psi_{n+m}(\Wcal)$. We wish to show that $\Zcal|_{[n]}$ is equal to $\Psi_n(\Wcal|_{[n]})$. Since they are both maximal tubings of $G_n$, it suffices to show that their $G$-trees share a common linear extension. Let $u=u_1\cdots u_{n+m}$ be a permutation of $[n+m]$ that is a linear extension of $\tau(\Wcal)$. Then $u$ is also a linear extension of $\tau(\Zcal)$, so $u|_{[n]}$ is a linear extension of $\tau(\Zcal)|_{[n]}$. On the other hand, $u|_{[n]}$ is a linear extension of $\tau(\Wcal|_{[n]})$, so it is also a linear extension of $\Psi_n(\Wcal|_{[n]})$. \end{proof} Now we return to the embedding $c:\Kbb[Z_{\infty}]\hookra\Kbb[\mathfrak{S}_{\infty}]$ from Section~\ref{subsec_MR}. The maps $\{\Psi_n\}_{n\geq 0}$ give rise to an embedding of vector spaces $c:\Kbb[\Gcal(A)]\hookra\Kbb[\Gcal(B)]$ where $$c(\Pbb_{\Xcal})=\sum_{\Ycal\in\Psi_n^{-1}(\Xcal)}\Pbb_{\Ycal}$$ for $\Xcal\in G_n^A$. \begin{theorem}\label{thm_graph_MR_subalg} The embedding $$c:\Kbb[\Gcal(A)]\hookra\Kbb[\Gcal(B)]$$ is a map of algebras. \end{theorem} \begin{proof} Let $\Xcal\in\MTub(G_n^A)$ and $\Ycal\in\MTub(G_m^A)$ be given. Then $c(\Pbb_{\Xcal}\cdot\Pbb_{\Ycal})=\sum c(\Pbb_{\Zcal})$, where the sum is over $\Zcal\in\MTub(G_{n+m}^A)$ such that $\Xcal=\Zcal|_{[n]}$ and $\Ycal+n=\Zcal|_{[m]+n}$. We have $$\sum_{\Zcal} c(\Pbb_{\Zcal})=\sum_{\Zcal}\sum_{\Wcal\in\Psi_{n+m}^{-1}(\Zcal)}\Pbb_{\Wcal}$$ On the other hand, \begin{align*} c(\Pbb_{\Xcal})\cdot c(\Pbb_{\Ycal}) &=(\sum_{\Wcal^{\pr}\in\Psi_n^{-1}(\Xcal)}\Pbb_{\Wcal^{\pr}})\cdot (\sum_{\Wcal^{\pr\pr}\in\Psi_m^{-1}(\Ycal)}\Pbb_{\Wcal^{\pr\pr}})\\ &=\sum_{\substack{\Wcal^{\pr}\in\Psi_n^{-1}(\Xcal)\\\Wcal^{\pr\pr}\in\Psi_m^{-1}(\Ycal)}}\Pbb_{\Wcal^{\pr}}\cdot\Pbb_{\Wcal^{\pr\pr}} \end{align*} We show that $c(\Pbb_{\Xcal}\cdot \Pbb_{\Ycal})=c(\Pbb_{\Xcal})\cdot c(\Pbb_{\Ycal})$. To this end, fix $\Pbb_{\Zcal}$ in the expansion of $\Pbb_{\Xcal}\cdot \Pbb_{\Ycal}$, and let $\Wcal\in\Psi_{n+m}^{-1}(\Zcal)$. Set $\Wcal^{\pr}=\Wcal|_{[n]}$ and $\Wcal^{\pr\pr}+n=\Wcal_{[m]+n}$ so that $\Wcal^{\pr}\in\MTub(G_n^B)$ and $\Wcal^{\pr\pr}\in\MTub(G_m^B)$. Clearly, $\Pbb_{\Wcal}$ is in the expansion of $\Pbb_{\Wcal^{\pr}}\cdot \Pbb_{\Wcal^{\pr\pr}}$. But, \begin{align*} &\Psi_n(\Wcal^{\pr}) =\Psi_{n+m}(\Wcal)|_{[n]}=\Zcal|_{[n]}=\Xcal,\ \hspace{2mm}\mbox{and}\\ &\Psi_m(\Wcal^{\pr\pr})=\std(\Psi_{n+m}(\Wcal)|_{[m]+n})=\std(\Zcal|_{[m]+n})=\Ycal, \end{align*} so $\Pbb_{\Wcal}$ is in the expansion of $c(\Pbb_{\Xcal})\cdot c(\Pbb_{\Ycal})$. Conversely, suppose $\Wcal^{\pr}\in\Psi_n^{-1}(\Xcal)$ and $\Wcal^{\pr\pr}\in\Psi_m^{-1}(\Ycal)$ are given, and let $\Pbb_{\Wcal}$ an element in the expansion of $\Pbb_{\Wcal^{\pr}}\cdot\Pbb_{\Wcal^{\pr\pr}}$. Set $\Zcal=\Psi_{n+m}(\Wcal)$. Then \begin{align*} &\Zcal|_{[n]}=\Psi_{n+m}(\Wcal)|_{[n]}=\Psi_n(\Wcal^{\pr})=\Xcal,\ \hspace{2mm}\mbox{and}\\ &\std(\Zcal|_{[m]+n})=\std(\Psi_{n+m}(\Wcal)|_{[m]+n}=\Psi_m(\Wcal^{\pr\pr})=\Ycal, \end{align*} so $\Pbb_{\Zcal}$ is in the expansion of $\Pbb_{\Xcal}\cdot \Pbb_{\Ycal}$. Both $c(\Pbb_{\Xcal}\cdot \Pbb_{\Ycal})$ and $c(\Pbb_{\Xcal})\cdot c(\Pbb_{\Ycal})$ are multiplicity-free sums of basis elements with the same support, so they are equal. \end{proof} \begin{corollary} If $\Gcal$ is an admissible 1-parameter family, the tubing algebra $\Kbb[\Gcal]$ is a subalgebra of the Malvenuto-Reutenauer algebra. \end{corollary} \subsection{Tubing coalgebras}\label{subsec_tubing_coalgebra} We next define a comultiplication on $\Kbb[\Gcal]$. We will assume throughout that $\Gcal$ is a 1-parameter family, though it should be possible to extend it to more general families of graphs by defining a ``selection'' operation as in \cite{pilaud2018hopf}. Say that $\Gcal$ is \emph{restriction-compatible} if for any $G\in\Gcal$ and any subset of vertices $I\subseteq V(G)$, \begin{itemize} \item $\std(G|_I)$ is a subgraph of the graph $G^{\pr}\in\Gcal$ where $V(G^{\pr})=V(\std(G|_I))$, and \item $\std(G/I)$ is a subgraph of the graph $G^{\pr\pr}\in\Gcal$ where $V(G^{\pr\pr})=V(\std(G/I))$. \end{itemize} We note that the second property actually implies the first since $\std(G|_I)$ is a subgraph of $\std(G/(V\setm I))$. \begin{example}\label{ex_res_comp} Path graphs, complete graphs, and edge-free graphs are all restriction-compatible in addition to being admissible (Example~\ref{ex_admissible}). In these cases, the quotient graphs $G/I,\ I\subseteq V(G)$ are again path graphs, complete graphs, and edge-free graphs, respectively. Similarly, the sequence of cycle graphs $C_n$ whose vertices are labeled in cyclic order are also restriction-compatible since the quotient graphs are all cycles. On the other hand, the family of complete bipartite graphs in Example~\ref{ex_admissible} is not restriction-compatible. \end{example} We will not attempt to completely describe all restriction-compatible families of graphs, but we may describe those families that are both restriction-compatible and admissible. \begin{proposition} If $\Gcal$ is a 1-parameter family of graphs that is both restriction-compatible and admissible, then $\Gcal$ must be either the set of path graphs, complete graphs, or edge-free graphs. \end{proposition} \begin{proof} To be an admissible family, $\Gcal$ must be equal to $\Gcal(A)$ for some set $A\subseteq\Nbb$. We wish to show that restriction-compatibility forces either $A=\{1\},\ A=\Nbb$, or $A=\emptyset$. Restriction-compatibility of these cases was observed in Example~\ref{ex_res_comp}. To prove that these are the only examples, it is enough to show that if there exists $k\in A,\ k\geq 2$, then $A=\Nbb$. Suppose such $k$ exists, and let $j\in\Nbb$ with $j<k$. Let $H=(G_{k+1})|_{[j]\cup\{k+1\}}$. Since $\{1,k+1\}\in E(G_{k+1})$, the graph $\std(H)$ is a subgraph of $G_{j+1}$ containing the edge $\{1,j+1\}$. This implies $j\in A$. On the other hand, suppose $j\in\Nbb$ with $j>k$ and set $n=(j+1)k$. Let $I\subseteq[n]$ such that \begin{enumerate} \item $|I|=j-1$, \item $I$ does not contain any multiples of $k$, and \item the smallest element of $I$ is greater than $k$. \end{enumerate} Such a collection exists since $k\geq 2$. Now let $J=[n]\setm (I\cup\{k,n\})$. Then the graph $\std(G_n/J)$ is a subgraph of $G_{j+1}$ containing the edge $\{1,j+1\}$. Hence, $j\in A$ holds. \end{proof} If $H$ is a subgraph of $G$ with the same vertex set $[n]$ and $\Xcal$ is in $\MTub(H)$, we let $c_H^G(\Xcal)=\sum\Ycal$ where the sum ranges over $\Ycal\in\MTub(G)$ such that $\Psi_H^G(\Ycal)=\Xcal$. Suppose $\Gcal$ is a restriction-compatible family. We define $$\Delta_{\Gcal}=\Delta:\Kbb[\Gcal]\ra\Kbb[\Gcal]\otimes\Kbb[\Gcal]$$ as follows. If $\Xcal\in\MTub(G)$, let $$\Delta(\Pbb_{\Xcal})=\sum c_{\std(G|_I)}^{G^{\pr}}(\Pbb_{\std(\Xcal|_I)})\otimes c_{\std(G/I)}^{G^{\pr\pr}}(\Pbb_{\std(\Xcal/I)}),$$ where the sum is over ideals $I$ of $\Xcal$, and $G^{\pr},G^{\pr\pr}\in\Gcal$ such that $|I|=|V(G^{\pr})|$ and $|V(G)\setm I|=|V(G^{\pr\pr})|$. \begin{figure} \caption{\label{fig:coprod} \label{fig:coprod} \end{figure} \begin{example}\label{ex_MR_coalg} We again consider the case $\Gcal=\{K_n\}_{n\geq 0}$ from Example~\ref{ex_MR_alg}. Every induced subgraph $H$ of $K_n$ is a complete graph, as is the quotient $K_n/H$. Thus, for $\Xcal\in\MTub(K_n)$, the formula for $\Delta$ simplifies to $$\Delta(\Pbb_{\Xcal})=\sum\Pbb_{\std(\Xcal|_{I})}\otimes \Pbb_{\std(\Xcal/I)},$$ where the sum ranges over the ideals of $\Xcal$. Since $\tau(\Xcal)$ is a chain $u_1<\cdots<u_n$, its order ideals are of the form $\{u_1,\ldots,u_i\}$ for $i=0,1,\ldots,n$. Under the bijection between $\MTub(K_n)$ and $\mathfrak{S}_n$, this expression becomes $$\Delta(\Fbb_u)=\sum\Fbb_{\std(u_1\cdots u_i)}\otimes \Fbb_{\std(u_{i+1}\cdots u_n)}.$$ Thus, $\Kbb[\Gcal]$ has the same coalgebra structure as $\Kbb[\mathfrak{S}_{\infty}]$. \end{example} \begin{example}\label{ex_cycle_coalg} The set $\{C_n\}_{n\geq 0}$ of cyclically ordered cycle graphs is another restriction-compatible family. In Figure~\ref{fig:coprod} we show the comultiplication applied to a $C_4$-tree, or equivalently, a maximal tubing $\Xcal$ of $C_4$. The sum is split into six terms, one for each choice of ideal of $\Xcal$. We observe that for the two ideals $I$ such that $G|_I$ is not a cycle graph, the element $c_{\std(G|_I)}^{G^{\pr}}(\Pbb_{\std(\Xcal_I)})$ has multiple summands. For example, the fourth term corresponds with the ideal $I=\{1,3\}$. Observe that $\std(G|_I)$ is the edge-free graph on $[2]$, $\Xcal|_{\std(\{1,3\})}=\{\{1\}, \{2\}\}$, and the corresponding $G$-forest $T$ has $1$ and $2$ incomparable. Since $C_2$ is also the complete graph on $[2]$, each element of $\Psi_{G}^{H}$ fiber of $\Xcal|_{\std(\{1,3\})}$ is just a linear extension of $T$. \end{example} \begin{theorem} If $\Gcal$ is a restriction compatible family, then the map $$c:\Kbb[\Gcal]\hookra\Kbb[\mathfrak{S}_{\infty}]$$ commutes with $\Delta$. In particular, $\Delta_{\Gcal}$ is coassociative. \end{theorem} \begin{proof} Fix a maximal tubing $\Xcal\in\MTub(G_n)$. We show that $\Delta(c(\Pbb_{\Xcal}))=(c\otimes c)\circ(\Delta_{\Gcal}(\Pbb_{\Xcal}))$. The element $c(\Pbb_{\Xcal})$ is supported by the permutations of $[n]$ that are linear extensions of the tree poset $\tau(\Xcal)$. Let $\Lcal(P)$ be the set of linear extensions of a poset $P$. Then, \begin{align*} \Delta(c(\Pbb_{\Xcal})) &= \sum_{u\in\Lcal(\tau(\Xcal))}\Delta(\Fbb_u)\\ &= \sum_{i=0}^n\sum_{u\in\Lcal(\tau(\Xcal))}\Fbb_{\std(u_1\cdots u_i)}\otimes \Fbb_{\std(u_{i+1}\cdots u_n)} \end{align*} If $u=u_1\cdots u_n$ is a linear extension of $\tau(\Xcal)$, then the subset $\{u_1,\ldots,u_i\}$ is an ideal, and the complement $\{u_{i+1},\ldots,u_n\}$ is an order filter. If $I$ is an order ideal, then $\tau(\Xcal)|_I=\tau(\Xcal|_I)$ and $\tau(\Xcal)|_{[n]\setm I}=\tau(\Xcal/I)$. Putting these together, we have \begin{align*} \Delta(c(\Pbb_{\Xcal})) &= \sum_I(\sum_{u\in\Lcal(\tau(\Xcal)|_I)} \Fbb_{\std(u)}) \otimes (\sum_{w\in\Lcal(\tau(\Xcal)|_{[n]\setm I})}\Fbb_{\std(w)})\\ &= \sum_I(\sum_{u\in\Lcal(\tau(\Xcal|_I))} \Fbb_{\std(u)}) \otimes (\sum_{w\in\Lcal(\tau(\Xcal/I))}\Fbb_{\std(w)})\\ &= \sum_I c(\Pbb_{\std(\Xcal|_I)})\otimes c(\Pbb_{\std(\Xcal/I)}), \end{align*} where the sum ranges over ideals $I$ of $\tau(\Xcal)$. If $K\subseteq H\subseteq G$ is a sequence of subgraphs with a common vertex set $[n]$, then the map $c_K^G$ factors as $c_K^G=c_H^G\circ c_K^H$. Since $\Gcal$ is a restriction-compatible family, $$\sum_I c(\Pbb_{\std(\Xcal|_I)}) \otimes c(\Pbb_{\std(\Xcal/I)}) = \sum_I c_{G^{\pr}}^{K_{|I|}}c_{\std(G|_I)}^{G^{\pr}}(\Pbb_{\std(\Xcal|_I)})\otimes c_{G^{\pr\pr}}^{K_{|V\setm I|}}c_{\std(G/I)}^{G^{\pr\pr}}(\Pbb_{\std(\Xcal/I)}),$$ where $G^{\pr},G^{\pr\pr}\in\Gcal$ such that $|V(G^{\pr})|=|I|$ and $|V(G^{\pr\pr})|=|V\setm I|$. The latter sum simplifies to $$(c\otimes c)\left(\sum_I c_{\std(G|_I)}^{G^{\pr}}(\Pbb_{\std(\Xcal|_I)})\otimes c_{\std(G/I)}^{G^{\pr\pr}}(\Pbb_{\std(\Xcal/I)})\right)=(c\otimes c)\circ(\Delta_{\Gcal}(\Pbb_{\Xcal})),$$ as desired. \end{proof} \subsection{Insertional families of lattice congruences}\label{subsec_insertional} If $\alpha=(i,j,\epsilon)$ is an arc on $[n]$, we define the \emph{deletion} $\alpha\setm k$ to be the arc on $[n-1]$ where $$\alpha\setm k=\begin{cases}(i-1,j-1,\epsilon)\ \mbox{if }k<i\\(i,j,\epsilon)\ \mbox{if }k>j\\(i,j-1,\epsilon^{\pr})\ \mbox{if }i\leq k\leq j\end{cases},$$ where $\epsilon^{\pr}_l=\epsilon_l$ when $l\leq k-i$ and $\epsilon^{\pr}_l=\epsilon_{l+1}$ when $l>k-i$. That is, $\epsilon^{\pr}$ is obtained from $\epsilon$ by deleting some $+$ or $-$ entry. Reversing this operation, we say the arc $\beta$ is obtained from $\alpha$ by \emph{inserting} $k$ if $\alpha=\beta\setm k$. A sequence of lattice congruences $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ is an \emph{insertional family} if for any arc $\alpha$ contracted by $\Theta_n$, any arc $\beta$ obtained by inserting some $k\in[n+1]$ is contracted by $\Theta_{n+1}$. The analogue of Theorem~\ref{thm_translational_subalg} proved in \cite[Theorem 1.3, Proposition 8.1]{reading:2005lattice} is as follows. \begin{theorem} If $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ is an insertional family, then the map $$c:\Kbb[Z_{\infty}^{\mathbf{\Theta}}]\ra\Kbb[\mathfrak{S}_{\infty}]$$ embeds $\Kbb[Z_{\infty}^{\mathbf{\Theta}}]$ as a sub-coalgebra of $\Kbb[\mathfrak{S}_{\infty}]$. \end{theorem} We now prove the second part of Theorem~\ref{thm_main}. \begin{proof}[Proof of Theorem~\ref{thm_main}(\ref{thm_main_2})] Let $\Gcal=\{G_n\}_{n\geq 0}$ be a 1-parameter family of filled graphs, and let $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ be the corresponding sequence of lattice congruences. We must prove that $\Gcal$ is restriction-compatible if and only if $\mathbf{\Theta}$ is insertional. Suppose first that $\mathbf{\Theta}$ is an insertional family of lattice congruences. To prove that $\Gcal$ is restriction-compatible, it suffices to show that $\std(G_n/\{i_1,\ldots,i_l\})$ is a subgraph of $G_{n-l}$ for $1\leq i_1<\cdots<i_l\leq n$. Indeed, it is enough to prove this statement for $l=1$ since if $H$ and $G$ are graphs on $[n]$ such that $E(H)\subseteq E(G)$, the quotient $H/i$ is a subgraph of $G/i$ for any $i\in[n]$. Hence, the statement for $l=1$ gives a sequence of inclusions: $$E(\std(G_n/\{i_1,\ldots,i_l\}))\subseteq\cdots\subseteq E(\std(G_{n-l+1}/\{i_1\}))\subseteq E(G_{n-l}).$$ For $k\in[n]$, we show that $\std(G_n/k)$ is a subgraph of $G_{n-1}$. Suppose $\{i,j\}$ is not an edge of $G_{n-1}$. Then $\Theta_{n-1}$ contracts the arc $\alpha=(i,j,+)$. Let $\beta^+=(i^{\pr},j^{\pr},+)$ be the arc obtained from $\alpha$ by inserting $k$ such that its sign vector $\epsilon$ is $(+,\ldots,+)$. If $i<k<j+1$, then there is another arc $\beta^-=(i^{\pr},j^{\pr},\epsilon^{\pr})$ such that $\epsilon^{\pr}_{k-i^{\pr}}=-$. Since $\mathbf{\Theta}$ is insertional, both $\beta^+$ and $\beta^-$ are contracted by $\Theta_n$. We claim that $\{i,j\}$ is not an edge of $\std(G_n/k)$. If, to the contrary, it is an edge of $\std(G_n/k)$, then either $\{i^{\pr},j^{\pr}\}$ is an edge of $G_n$, or $\{i^{\pr},k\}$ and $\{k,j^{\pr}\}$ are both edges of $G_n$. In the former case, the arc $\beta^+$ is not contracted by $\Theta_n$, a contradiction. On the other hand, suppose $\{i^{\pr},j^{\pr}\}$ is not an edge, but $\{i^{\pr},k\}$ and $\{k,j^{\pr}\}$ both are. Then $i^{\pr}<k<j^{\pr}$ holds since $G_n$ is filled. But this means $i^{\pr}=i$ and $j^{\pr}=j+1$, so the arc $\beta^-$ is well-defined, and it is contracted by $\Theta_n$. Since $\Theta_n$ is generated by positive arcs, either $(i^{\pr},k,+)$ or $(k,j^{\pr},+)$ must be contracted by $\Theta_n$. But this contradicts the assumption that $\{i^{\pr},k\}$ and $\{k,j^{\pr}\}$ are edges of $G_n$. Now assume that $\Gcal$ is a restriction-compatible family. Let $\alpha=(i,j,\epsilon)$ be an arc contracted by $\Theta_{n-1}$, and pick $k\in[n]$. We claim that any arc $\beta$ obtained by inserting $k$ into $\alpha$ is contracted by $\Theta_n$. This will prove that $\mathbf{\Theta}$ is an insertional family. Since $\Theta_{n-1}$ is generated by positive arcs, there exists a positive subarc $\alpha^{\pr}=(i^{\pr},j^{\pr},+)$ of $\alpha$ that is contracted by $\Theta_{n-1}$. As a result, the pair $\{i^{\pr},j^{\pr}\}$ is not an edge of $G_{n-1}$. Moreover, any arc $\beta$ of $[n]$ with $\beta\setm k=\alpha$ contains a subarc $\beta^{\pr}$ such that $\beta^{\pr}\setm k=\alpha^{\pr}$. Hence, to show that $\beta$ is contracted by $\Theta_n$, it is enough to show that $\beta^{\pr}$ is contracted by $\Theta_n$. If $\beta^{\pr}$ is a positive arc, then it follows that $\beta^{\pr}$ is contracted by $\Theta_n$ since $E(\std(G_n\setm k))\subseteq E(G_{n-1})$. If $\beta^{\pr}$ is not a positive arc, then $i^{\pr}<k\leq j^{\pr}$ and $\beta^{\pr}=(i^{\pr},j^{\pr}+1,\epsilon^{\pr})$ where $\epsilon^{\pr}_{k-i^{\pr}}$ is the only negative entry in $\epsilon^{\pr}$. In this case, since $E(\std(G_n/k))\subseteq E(G_{n-1})$, either $\{i^{\pr},k\}$ or $\{k,j^{\pr}+1\}$ is not an edge, which means that some subarc of $\beta^{\pr}$ is contracted by $\Theta_n$. It follows that $\beta^{\pr}$ is contracted by $\Theta_n$ as well. \end{proof} \section{Open problems}\label{sec:other} \subsection{Lattices of maximal tubings}\label{subsec:tubing_lattice} Not every poset of maximal tubings is a lattice. For example, the two indicated atoms of the poset of maximal tubings shown in Figure~\ref{fig_nl} has two minimal upper bounds, so it is not a lattice. \begin{figure} \caption{\label{fig_nl} \label{fig_nl} \end{figure} Corollary~\ref{g-perm lattice cong} characterizes graphs $G$ for which $L_G$ is a meet-semilattice quotient of the weak order. A more fundamental problem is to characterize all graphs such that $L_G$ is a lattice. To this end, we make the simple observation that an interval $L^{\pr}$ of a lattice $L$ is a sublattice of $L$. In particular if $G^{\pr}$ is any graph obtained by contracting or deleting vertices of $G$ such that $L_{\std(G^{\pr})}$ is not a lattice, then $L_G$ is not a lattice either. Continuing to borrow from matroid terminology, we say that $G^{\pr}$ is a \emph{minor} of $G$ if it is the standardization of a sequence of contractions and deletions. \begin{problem} Give an explicit list of minors such that $L_G$ is a lattice whenever $G$ does not contain a minor from the list. \end{problem} By exhaustive search, we found that when $G$ is a connected graph with four vertices, the poset $L_G$ is not a lattice if and only if $\{1,3\}$ and $\{2,4\}$ are edges but $\{2,3\}$ is not an edge in $G$. These are the seven graphs shown in Figure~\ref{fig_nlex}. \begin{figure} \caption{\label{fig_nlex} \label{fig_nlex} \end{figure} \subsection{Cyclohedra}\label{subsec:cycles} Let $C_n$ be the $n$-cycle graph, with vertices labeled $1,2,\ldots,n$ in cyclic order. The graph associahedron $P_{C_n}$ is known as a \emph{cyclohedron}. The cyclohedron is combinatorially equivalent to the \emph{Type $B_{n-1}$ associahedron} \cite{simion:2003typeB}. Its facial structure is usually described in terms of Type $B_n$ Coxeter-Catalan combinatorial objects, e.g. centrally symmetric triangulations of polygons. The graph associahedron $P_{C_n}$ does not have the same normal fan as the Type $B_{n-1}$ associahedron, however. This geometric distinction is relevant in many of its applications. The graph associahedron $P_{C_n}$ is used to study the self-linking of knots \cite{bott.taubes:1994self} or to tile the moduli space $\ov{Z}^n$ in \cite{devadoss:2002space}, whereas the Type $B_n$ associahedron arises in the theory of cluster algebras \cite{fomin.zelevinsky:2003clusterII}. From the Coxeter-Catalan point of view, the vertices of the Type $B_n$ associahedron can be partially ordered in several ways, which are called Cambrian lattices; \cite{reading:2006cambrian},\cite{thomas:2006tamari}. A \emph{Cambrian lattice} is a certain lattice quotient of the weak order of a finite Coxeter system. We remark that the poset of maximal tubings $L_{C_n}$ is not isomorphic to a Type $B_{n-1}$ Cambrian lattice for $n\geq 3$, despite the fact that they arise as orientations of the same undirected graph. Indeed, $L_{C_3}=L_{K_3}$ is the weak order of Type $A_2$, which is not isomorphic to any Cambrian lattice of Type $B_2$. Cambrian lattices have a remarkable structure: they are all semidistributive lattices \cite{reading:2006cambrian}. A lattice is \emph{semidistributive} if for any three elements $x,y,z$: \begin{itemize} \item if $x\wedge z=y\wedge z$, then $(x\vee y)\wedge z=x\wedge z$ and \item if $x\vee z=y\vee z$, then $(x\wedge y)\vee z=x\vee z$. \end{itemize} The weak order is known to be semidistributive, so when $G$ is filled, the poset $L_G$ inherits semidistributivity as a lattice quotient of the weak order. We do not know of a way to represent $L_{C_n}$ as a lattice quotient of the weak order for $n\geq 4$. In particular, the canonical map $\Psi_{C_n}:\mathfrak{S}_n\ra L_{C_n}$ is not a lattice map as $C_n$ is not filled for $n\geq 4$. However, we have verified by computer calculation that $L_{C_n}$ is a semidistributive lattice for $n\leq 6$. This has led us to the following question. \begin{question} Is $L_{C_n}$ a semidistributive lattice for each $n\geq 1$? \end{question} We remark that the poset $L_G$ need not be semidistributive even when it is a lattice. For example, on may check that the star graph $G$ with $E(G)=\{\{1,2\},\{1,3\},\{1,4\}\}$ has a lattice of maximal tubings that is not semidistributive. \subsection{Facial weak order}\label{subsec:facial_weak} For $n\geq 0$, let $\Pi_n$ be the set of \emph{ordered set partitions} $(B_1,\ldots,B_l)$ of $[n]$. In \cite{chapoton:2000algebres}, Chapoton defined a Hopf algebra $\Kbb[\Pi_{\infty}]=\bigoplus\Kbb[\Pi_n]$ on the set of ordered set partitions. Identifying maximally refined ordered set partitions $(B_1,\ldots,B_n)$ with permutations, the natural inclusion $\Kbb[\mathfrak{S}_{\infty}]\ra\Kbb[\Pi_{\infty}]$ is a Hopf algebra map. This led to the development of the \emph{facial weak order} by Palacios and Ronco \cite{palacios:2006weak}, which is a partial ordering on $\Pi_n$ distinct from the usual refinement order. Under this poset, the product of two ordered set partitions is a sum of elements in an interval of the facial weak order. Dermenjian, Hohlweg, and Pilaud \cite{dermenjian:2018facial} proved that the facial weak order on $\Pi_n$ is a lattice for all $n\geq 1$. Furthermore, they show that any lattice congruence of the weak order may be ``lifted'' to a lattice congruence of the facial weak order. This suggests the following question: \begin{question} Does a translational (resp. insertional) family $\mathbf{\Theta}=\{\Theta_n\}_{n\geq 0}$ of lattice congruences of the weak order lift to a family $\hat{\mathbf{\Theta}}=\{\hat{\Theta}_n\}_{n\geq 0}$ of congruences of the facial weak order such that $\Kbb[\Pi_{\infty}/\hat{\mathbf{\Theta}}]$ is a subalgebra (resp. sub-coalgebra) of $\Kbb[\Pi_{\infty}]$? \end{question} {} \end{document}
\begin{document} \begin{article} \begin{opening} \title{High Order Phase Fitted Multistep Integrators for the Schr\"{o}dinger Equation with Improved Frequency\\ Tolerance} \author{D.S. \surname{Vlachos}\thanks{e-mail: [email protected]}} \author{Z.A. \surname{Anastassi}\thanks{e-mail: [email protected]}} \author{T.E. \surname{Simos}\thanks{Highly Cited Researcher, Active Member of the European Academy of Sciences and Arts. Corresponding Member of the European Academy of Sciences Corresponding Member of European Academy of Arts, Sciences and Humanities, Please use the following address for all correspondence: Dr. T.E. Simos, 10 Konitsis Street, Amfithea - Paleon Faliron, GR-175 64 Athens, Greece, Tel: 0030 210 94 20 091, e-mail: [email protected], [email protected]}} \institute{Laboratory of Computational Sciences, Department of Computer Science and Technology, Faculty of Sciences and Technology, University of Peloponnese, GR-221 00 Tripolis, Greece} \runningtitle{High Order Phase Fitted Multistep Integrators for the Schr\"{o}dinger Equation} \runningauthor{D.S. Vlachos, Z.A. Anastassi, T.E. Simos} \begin{abstract} In this work we introduce a new family of 14-steps linear multistep methods for the integration of the Schr\"odinger equation. The new methods are phase fitted but they are designed in order to improve the frequency tolerance. This is achieved by eliminating the first derivatives of the phase lag function at the fitted frequency forcing the phase lag function to be '\textit{flat}' enough in the neighbor of the fitted frequency. The efficiency of the new family of methods is proved via error analysis and numerical applications. \end{abstract} \keywords{Numerical solution, Schr\"odinger equation, multistep methods, hybrid methods, P-stability, phase-lag, phase-fitted} \classification{PACS}{02.60, 02.70.Bf, 95.10.Ce, 95.10.Eg, 95.75.Pq} \end{opening} \section{Introduction} \label{intro} The numerical integration of systems of ordinary differential equations with oscillatory solutions has been a subject of research during the past decades. This type of ODEs is often met in real problems encounter in computational chemistry, like the Schr\"{o}dinger equation. For problems having highly oscillatory solutions standard methods with unspecialized use can require a huge number of steps to track the oscillations. One way to obtain a more efficient integration process is to construct numerical methods with an increased algebraic order, although the implementation of high algebraic order meets several difficulties like resonances \cite{quinlan_arxiv_astro_ph_9901136} . On the other hand, there are some special techniques for optimizing numerical methods. Trigonometrical fitting and phase-fitting are some of them, producing methods with variable coefficients, which depend on $v = \omega h$, where $\omega$ is the dominant frequency of the problem and $h$ is the step length of integration. More precisely, the coefficients of a general linear method are found from the requirement that it integrates exactly powers up to degree $p+1$. For problems having oscillatory solutions, more efficient methods are obtained when they are exact for every linear combination of functions from the reference set \begin{equation} \{1, x, \ldots , x^K , e^{\pm \mu x},\ldots , x^P e^{\pm \mu x}\}\label{equ_exp_fit} \end{equation} This technique is known as exponential (or trigonometric if $\mu=i\omega$) fitting and has a long history \cite{gautschi_NM_3_381_61}, \cite{lyche_NM_19_65_72}. The set (\ref{equ_exp_fit}) is characterized by two integer parameters, $K$ and $P$ . The set in which there is no classical component is identified by $K =-1$ while the set in which there is no exponential fitting component (the classical case) is identified by $P =-1$. Parameter $P$ will be called the level of tuning. An important property of exponential fitted algorithms is that they tend to the corresponding classical ones when the involved frequencies tend to zero, a fact which allows to say that exponential fitting represents a natural extension of the classical polynomial fitting. The examination of the convergence of exponential fitted multistep methods is included in Lyche's theory \cite{lyche_NM_19_65_72}. There is a large number of significant methods presented with high practical importance that have been presented in the bibliography. The general theory is presented in detail in \cite{ixaru_Book_EF_KAP_04}. Considering the accuracy of a method, when solving oscillatory problems, it is more appropriate to work with the phase-lag, rather than the principal local truncation error. We mention the pioneering paper of Brusa and Nigro \cite{brusa_IJNME_15_685_80}, in which the phase-lag property was introduced. This is actually another type of a truncation error, i.e. the angle between the analytical solution and the numerical solution. On the other hand, exponential fitting is accurate only when a good estimate of the dominant frequency of the solution is known in advance. This means that in practice, if a small change in the dominant frequency is introduced, the efficiency of the method can be dramatically altered. It is well known that for equations similar to the harmonic oscillator the most efficient exponential fitted methods are those with the highest tuning level. A lot of significant work has been made during the last years in this field, mainly focusing for obvious reasons in the solution of the Schr\"{o}dinger equation (see for example \cite{ix78}-\cite{jnaiam3_11}). In this paper we present a new family of methods based on the 14-step linear multistep method of Quinlan and Tremaine \cite{quinlan_AJ_100_1694_90}. The new methods are constructed by vanishing the phase-lag function and its first derivatives at a predefined frequency. Error analysis and numerical experiments show that the new methods exhibit improved characteristics concerning the solution of the time-independent Schr\"odinger equation. The paper is organized as follows: In section 2, the general theory of the new methodology is presented. In section 3, the new methods are described in detail. In section 4 the stability properties of the new methods are investigated. Section 5 presents the results from the numerical experiments and finally, conclusions are drawn in section 6. \section{Phase-lag analysis of symmetric multistep methods} Consider the differential equations \begin{equation} \frac{d^2y(t)}{dt^2}=f(t,y),\;y(t_0)=y_0,\;y'(t_0)=y'_0 \label{equ_ref_diff} \end{equation} and the linear multistep methods \begin{equation} \sum_{j=0}^{J}a_jy_{n+j}=h^2\sum_{j=0}^{J}b_jf_{n+j} \label{equ_ref_meth} \end{equation} where $y_{n+j}=y(t_0+(n+j)h)$, $f_{n+j}=f(t_0+(n+j)h,y(t_0+(n+j)h))$ and $h$ is the step size of the method. With the method (\ref{equ_ref_meth}), we associate the following functional \begin{equation} L(h,a,b,y(t))=\sum_{j=0}^Ja_jy(t+j\cdot h)-h^2\sum_{j=0}^Jb_jy''(t+j\cdot h) \end{equation} where $a,b$ are the vectors of coefficients $a_j$ and $b_j$ respectively, and $y(t)$ is an arbitrary function. The algebraic order of the method (\ref{equ_ref_meth}) is $p$, if \begin{equation} L(h,a,b,y(t))=C_{p+2}h^{p+2}y^{(p+2)}(t)+O(h^{p+3}) \label{equ_ref_error} \end{equation} The coefficients $C_q$ are given \begin{equation}\begin{array}{l} C_0=\sum_{j=0}^J a_j \nonumber \\ C_1=\sum_{j=0}^J j\cdot a_j \nonumber \\ c_q=\frac{1}{q!}\sum_{j=0}^Jj^q\cdot a_j -\frac{1}{(q-2)!}\sum_{j=0}^Jj^{q-2}b_j \end{array}\end{equation} The principal local truncation error (PLTE) is the leading term of (\ref{equ_ref_error}) \begin{equation} PLTE=C_{p+2}h^{p+2}y^{(p+2)}(t) \end{equation} The following assumptions will be considered in the rest of the paper: \begin{enumerate} \item $a_J=1$, since we can always divide the coefficients of (\ref{equ_ref_meth}) with $a_J$. \item $|a_0|+|b_0|\neq 0$, since otherwise we can assume that $J=J-1$. \item $\sum_{j=0}^J |b_j| \neq 0$, since otherwise the solution of (\ref{equ_ref_meth}) would be independent of (\ref{equ_ref_diff}). \item The method (\ref{equ_ref_meth}) is at least of order one. \item The method (\ref{equ_ref_meth}) is zero stable, which means that the roots of the polynomial \begin{equation} p(z)=\sum_{j=0}^Ja_jz^j \end{equation} all lie in the unit disc, and those that lie on the unit circle have multiplicity one. \item The method (\ref{equ_ref_meth}) is symmetric, which means that \begin{equation} a_j=a_{J-j},\;b_j=b_{J-j},\;j=0(1)J \end{equation} It is easily proved that both the order of the method and the step number $J$ are even numbers \cite{lambert_JIMA_18_189_76}. \end{enumerate} Consider now the test problems \begin{equation} y''(t)=-\omega ^2 y(t) \label{equ_ref_equ} \end{equation} where $\omega$ is a constant. The numerical solution of (\ref{equ_ref_equ}) by applying method (\ref{equ_ref_meth}) is described by the difference equation \begin{equation} \sum_{j=1}^{J/2} A_j(s^2)(y_{n+j}+y_{n-j})+A_0(s^2)y_n=0 \end{equation} with \begin{equation} A_j(s^2)=a_{\frac{J}{2}-j}+s^2\cdot b_{\frac{J}{2}-j} \end{equation} and $s=\omega h$. The characteristic equation is then given by \begin{equation} \sum_{j=1}^{J/2} A_j(s^2)(z^j+z^{-j})+A_0(s^2)=0 \label{equ_char_equ} \end{equation} and the interval of periodicity $(0,s_0^2)$ is then defined such that for $s\in (0,s_0)$ the roots of (\ref{equ_char_equ}) are of the form \begin{equation} z_1=e^{i\lambda (s)},\;z_2=e^{-i\lambda (s)},\;|z_j|\leq 1,\;3\leq j\leq J \end{equation} where $\lambda (s)$ is a real function of $s$. The phase-lag $PL$ of the method (\ref{equ_ref_meth}) is then defined \begin{equation} PL=s-\lambda (s) \end{equation} and is of order $q$ if \begin{equation} PL=c\cdot s^{q+1}+O(s^{q+3}) \end{equation} In general, the coefficients of the method (\ref{equ_ref_meth}) depend on some parameter $v$, thus the coefficients $A_j$ are functions of both $s^2$ and $v$. The following theorem was proved by Simos and Williams \cite{simos_CC_23_513_99}: For the symmetric method (\ref{equ_ref_equ}) the phase-lag is given \begin{equation} PL(s,v)=\frac{2\sum_{j=1}^{J/2}A_j(s^2,v)\cdot cos(j\cdot s) +A_0(s^2,v)}{2\sum_{j=1}^{J/2}j^2A_j(s^2,v)} \end{equation} We are now in position to describe the new methodology. In order to efficiently integrate the Schr\"odinger equation, it is a good practice to calculate the coefficients of the numerical method by forcing the phase lag to be zero at a specific frequency. But, since the appropriate frequency is problem dependent and in general is not always known, we may assume that we have an error in the frequency estimation. It would be of great importance to force the phase-lag to be insensitive to this error. Thus, beyond the vanishing of the phase-lag, we also force its first derivatives to be zero. \section{Construction of the new methods} \subsection{Classical Method} The family of new methods is based on the 14-step linear multistep method of Quinlan and Tremaine \cite{quinlan_AJ_100_1694_90} which is of the form (\ref{equ_ref_meth} with coefficients \ason \begin{equation} \begin{array}{l} a_0=1 \;\; a_0=-2 \;\; a_2=2 \;\; a_3=-1 \;\; a_4=0 \;\; a_5=0 \;\; a_6=0 \;\; a_7=0 \\ b_0=0\\ \displaystyle b_1=\frac{433489274083}{237758976000} \;\; b_2=-\frac{28417333297}{4953312000} \;\; b_3=\frac{930518896733}{39626496000}\\ \displaystyle b_4=-\frac{176930551859}{2971987200} \;\; b_5=\frac{7854755921}{65228800} \;\; b_6=-\frac{146031020287}{825552000}\\ \displaystyle b_7=\frac{577045151693}{2830464000} \end{array} \label{equ_base_meth} \end{equation} \asoff The PLTE of the method is given by \begin{equation} PLTE=\frac{152802083671 y^{(16)} h^{16}}{2853107712000}+O\left(h^{18}\right) \end{equation} \subsection{New Methods using Phase Fitting} The methods that are constructed are named as \textit{PF-Di}, where: \begin{itemize} \item \textit{PF-D0}: the phase lag function is zero at the frequency $v=\omega *h$. \item \textit{PF-D1}: the phase lag function and its first derivative are zero at the frequency $v=\omega *h$. \item \textit{PF-D2}: the phase lag function and its first and second derivatives are zero at the frequency $v=\omega *h$. \item \textit{PF-D3}: the phase lag function and its first, second and third derivatives are zero at the frequency $v=\omega *h$. \item \textit{PF-D4}: the phase lag function and its first, second, third and fourth derivatives are zero at the frequency $v=\omega *h$. \item \textit{PF-D5}: the phase lag function and its first, second, third, fourth and fifth derivatives are zero at the frequency $v=\omega *h$. \item \textit{PF-D6}: the phase lag function and its first, second, third, fourth, fifth and sixth derivatives are zero at the frequency $v=\omega *h$. \end{itemize} The coefficients of the methods in the form: \begin{equation}\begin{array}{l} \displaystyle b^i_1= \frac{b_{1,num}^i}{b_{denum}^i}, \quad b^i_2=y\frac{b_{2,num}^i}{b_{denum}^i}, \quad b^i_3= \frac{b_{3,num}^i}{b_{denum}^i}\\ \displaystyle b^i_4= \frac{b_{4,num}^i}{b_{denum}^i}, \quad b^i_5= \frac{b_{5,num}^i}{b_{denum}^i}, \quad b^i_6= \frac{b_{6,num}^i}{b_{denum}^i}, \quad b^i_7= \frac{b_{7,num}^i}{b_{denum}^i} \end{array}\end{equation} where the coefficients $b^i$ correspond to the method \textit{PF-Di}. Since for small values of $v$, the above formulae are subject to heavy cancelations, the Taylor expansions of the coefficients have been calculated as $b_T^i$. The exact formulae of all coefficients are given in appendix. The PLTEs of the methods are: \ason \begin{equation}\begin{array}{ll} \displaystyle PLTE^0 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(14)} \omega ^2+y^{(16)}) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^1 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(12)} \omega ^4+2 y^{(14)} \omega ^2+y^{(16)} ) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^2 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(10)} \omega ^6+3 y^{(12)} \omega ^4+\\ & \displaystyle 3 y^{(14)} \omega ^2+y^{(16)}) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^3 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(8)} \omega ^8+4 y^{(10)} \omega ^6+6 y^{(12)} \omega ^4+4 y^{(14)} \omega ^2\\ & \displaystyle +y^{(16)}) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^4 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(6)} \omega ^{10}+5 y^{(8)} \omega ^8+10 y^{(10)} \omega ^6+10 y^{(12)} \omega ^4\\ & \displaystyle +5 y^{(14)} \omega ^2+y^{(16)}) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^5 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(4)} \omega ^{12}+6 y^{(6)} \omega ^{10}+15 y^{(8)} \omega ^8+20 y^{(10)} \omega ^6\\ & \displaystyle +15 y^{(12)} \omega ^4+6 y^{(14)} \omega ^2+y^{(16)}) h^{16}+O(h^{18}) \nonumber \\ \displaystyle PLTE^6 = & \displaystyle \frac{152802083671}{2853107712000} (y^{(2)} \omega ^{14}+7 y^{(4)} \omega ^{12}+21 y^{(6)} \omega ^{10}+35 y^{(8)} \omega ^8\\ & \displaystyle +35 y^{(10)} \omega ^6+21 y^{(12)} \omega ^4+7 y^{(14)} \omega ^2+y^{(16)}) h^{16}+O(h^{18}) \nonumber \end{array}\end{equation} \asoff \section{Stability Analysis} The stability of the new methods is studied by considering the test equation \begin{equation} \frac{d^2y(t)}{dt^2}=-\sigma ^2 y(t) \end{equation} and the linear multistep method (\ref{equ_ref_meth}) for the numerical solution. In the above equation $\sigma \neq \omega$ ($\omega$ is the frequency at which the phase-lag function and its derivatives vanish). By setting $s=\sigma h$ and $v=\omega h$ we get for the characteristic equation of the applied method \begin{equation} \sum_{j=1}^{J/2} A_j(s^2,v)(z^j+z^{-j})+A_0(s^2,v)=0 \end{equation} where \begin{equation} A_j(s^2,v)=a_{\frac{J}{2}-j}(v)+s^2\cdot b_{\frac{J}{2}-j}(v) \end{equation} The motivation of the above analysis is straightforward: Although the coefficients of the method (\ref{equ_ref_meth}) are designed in a way that the phase-lag and its first derivatives vanish in the frequency $\omega$, the frequency $\omega$ itself is unknown and only an estimation can be made. Thus, if the correct frequency of the problem is $\sigma$ we have to check if the method is stable, that is if the roots of the characteristic equation lie in the unit disk. For this reason we draw in the $s-v$ plane the areas in which the method is stable. Figure \ref{fig:1} shows the stability region for the six methods (the classical one, the phase fitted one and those with first,second, third, fourth, fifth and sixth phase lag derivative elimination). Note here that the $s$-axis corresponds to the real frequency while the $v$-axis corresponds to the estimated frequency used to construct the parameters of the method. \section{Numerical Results} The radial Schr\"odinger equation is given by: \begin{equation} y''(x)=\left(\frac{l(l+1)}{x^2}+V(x)-E)y(x)\right) \label{def_schr_equ} \end{equation} where $\frac{l(l+1)}{x^2}$ is the centrifugal potential, $V(x)$ is the potential, $E$ is the Energy and $W(x)=\frac{l(l+1)}{x^2}+V(x)$ is the effective potential. It is valid that $lim_{x\rightarrow \infty}V(x)=0$ and therefore $lim_{x\rightarrow \infty}W(x)=0$. We consider that $E>0$ and we divide the interval $[0,+\infty)$ into subintervals $[a_i,b_i]$ so that $W(x)$ can be considered constant inside each subinterval with value $\hat{W}_i$. The problem (\ref{def_schr_equ}) can be expressed now by the equations \begin{equation} y''_i=(\hat{W}_i-E)y_i \end{equation} whose solution are \begin{equation} y_i(x)=\left(A_ie^{\sqrt{\hat{W}_i-E}x}+B_ie^{-\sqrt{\hat{W}_i-E}x}\right) \end{equation} with $A_i,B_i \in R$. We will integrate problem \ref{def_schr_equ} with $l = 0$ at the interval $[0, 15]$ using the well known Woods-Saxon potential: \begin{equation} V(x)=\frac{u_0}{1+q}+\frac{U_1q}{(1+q)^2}, \;\; q=e^{\frac{x-x_0}{a}} \end{equation} where $u_0=-50$, $a=0.6$, $x_0=7$, $u_1=-\frac{u_0}{a}$ and with boundary condition $y(0)=0$. The potential $V(x)$ decays more quickly than $\frac{l(l+1)}{x^2}$ , so for large x (asymptotic region) the Schro\"dinger equation (\ref{def_schr_equ}) becomes \begin{equation} y''(x)=\left(\frac{l(l+1)}{x^2}-E)y(x)\right) \end{equation} The last equation has two linearly independent solutions $kxj_l(kx)$ and $kxn_l(kx)$, where $j_l$ and $n_l$ are the spherical \textit{Bessel} and \textit{Neumann} functions and $k=\sqrt{\frac{l(l+1)}{x^2}-E}$. When $x\to \infty$ the solution takes the asymptotic form \begin{equation}\begin{array}{l} y(x) \sim A k x j_l (k x) - B k x n_l (k x) \nonumber \\ \sim D[sin(k x - \pi \frac{l}{2}) + tan(\delta_l ) cos (k x - \pi \frac{l}{2})], \end{array}\end{equation} where $\delta _l$ is called the \textit{scattering phase shift} and it is given by the following expression: \begin{equation} tan(\delta _l)=\frac{y(x_i)S(x_{i+1})-y(x_{i+1})S(x_i)}{y(x_{i+1}C(x_i)-y(x_i)C(x_{i+1})} \end{equation} where $S(x)=kxj_l(kx)$ and $C(x)=kxn_l(kx)$ and $x_i<x_{i+1}$ and both belong to the asymptotic region. Given the energy, we approximate the phase shift, the accurate value of which is $\frac{\pi}{2}$ for the above problem. We will use three different values for the energy: i) $989.701916$, ii) $341.495874$ and iii) $163.215341$. As for the frequency $\omega$ we will use the suggestion of Ixaru and Rizea \cite{ixaru_CPC_38_3329_85}: \begin{equation} \omega=\left\{\begin{array}{ll}\sqrt{E-50}, & x\in [0,6.5] \\ \sqrt{E}, & x\in [6.5,15] \end{array} \right. \end{equation} The results are shown in figures \ref{fig:2}, \ref{fig:3} and \ref{fig:4}. It is clear that the accuracy increases as the number of the eliminated derivatives of the phase lag function increases. \section{Conclusions} We have presented a new family of 14-steps symmetric multistep numerical methods with improved characteristics concerning the integration of the Schr\"odinger equation. The methods were constructed by adopting a new methodology which, except for the phase fitting at a predefined frequency, it eliminates the first derivatives of the phase lag function at the same frequency. The result is that the phase lag function becomes less sensitive on the frequency near the predefined one. This behavior compensates the fact that the exact frequency can only be estimated. Experimental results demonstrate this behavior by showing that the accuracy is increased as the number of the derivatives that are eliminated is increased. \begin{thebibliography}{150} \bibitem{lyche_NM_19_65_72} T. Lyche, Chebyshevian multistep methods for Ordinary Differential Eqations, Num. Math. 19, 65-75 (1972) \bibitem{gautschi_NM_3_381_61} Gautschi, W.: Numerical integration of ordinary differential equations based on trigonometric polynomials. Numer. Math. 3, 381397 (1961) \bibitem{quinlan_arxiv_astro_ph_9901136} Quinlan, G.: Resonances and instabilities in symmetric multistep methods. preprint arXiv astro-ph/9901136 (1999) \bibitem{brusa_IJNME_15_685_80} L. Brusa, L.N.: A one-step method for direct integration of structural dynamic equations. Int. J. Num. Methods Engrg. 15, 685699 (1980) \bibitem{ixaru_Book_EF_KAP_04} L.Gr. Ixaru, G.V.Berghe: Kluwer Academic Publishers, Dordrecht/Boston/London (2004) \bibitem{quinlan_AJ_100_1694_90} Quinlan, D., Tremaine, S.: Symmetric multistep methods for the numerical integration of planetary orbits. The Astronomical Journal 100(5), 1694–1700 (1990) \bibitem{lambert_JIMA_18_189_76} Lambert, J., Watson, I.: Symmetric multistep methods for periodic initial values problems. J. Inst. Math. Appl. 18, 189–202 (1976) \bibitem{simos_CC_23_513_99} Simos, T., Williams, P.: On finite difference methods for the solution of the Schr\"odinger equation. Comput. Chem. 23, 513–554 (1999) \bibitem{ixaru_CPC_38_3329_85} Ixaru, L.G., Rizea, M.: Comparison of some four-step methods for the numerical solution of the Schr¨odinger equation. Computer Physics Communications 38(3), 329–337 (1985) \bibitem{ix78} L.Gr. Ixaru and M. Micu, {\it Topics in Theoretical Physics}. Central Institute of Physics, Bucharest, 1978. \bibitem{landau} L.D. Landau and F.M. Lifshitz: {\it Quantum Mechanics}. Pergamon, New York, 1965. \bibitem{prigogine} I. Prigogine, Stuart Rice (Eds): Advances in Chemical Physics Vol. 93: New Methods in Computational Quantum Mechanics, John Wiley \& Sons, 1997. \bibitem{hertz} G. Herzberg, {\it Spectra of Diatomic Molecules}, Van Nostrand, Toronto, 1950. \bibitem{simos00_r} T.E. Simos, Atomic Structure Computations in Chemical Modelling: Applications and Theory (Editor: A. Hinchliffe, UMIST), {\it The Royal Society of Chemistry} 38-142(2000). \bibitem{simos02_r} T.E. Simos, Numerical methods for 1D, 2D and 3D differential equations arising in chemical problems, {\em Chemical Modelling: Application and Theory}, The Royal Society of Chemistry, 2(2002),170-270. \bibitem{simos90} T.E. Simos: {\it Numerical Solution of Ordinary Differential Equations with Periodical Solution}. Doctoral Dissertation, National Technical University of Athens, Greece, 1990 (in Greek). \bibitem{kosim01} A. Konguetsof and T.E. Simos, On the Construction of exponentially-fitted methods for the numerical solution of the Schr\"odinger Equation, {\it Journal of Computational Methods in Sciences and Engineering} {\bf 1} 143-165(2001). \bibitem{ra78} A.D. Raptis and A.C. Allison: Exponential - fitting methods for the numerical solution of the Schr\"odinger equation, \textit{Computer Physics Communications}, {\bf 14} 1-5(1978). \bibitem{ra84gr} A.D. Raptis, Exponential multistep methods for ordinary differential equations, {\it Bull. Greek Math. Soc.} {\bf 25} 113-126(1984). \bibitem{ix84} L.Gr. Ixaru, Numerical Methods for Differential Equations and Applications, Reidel, Dordrecht - Boston - Lancaster, 1984. \bibitem{ix80} L.Gr. Ixaru and M. Rizea, A Numerov-like scheme for the numerical solution of the Schr\"odinger equation in the deep continuum spectrum of energies. {\it Comput. Phys. Commun.~} {\bf 19} 23-27(1980). \bibitem{wilsim02} T. E. Simos, P. S. Williams: A New Runge-Kutta-Nystrom Method with Phase-Lag of Order Infinity for the Numerical Solution of the Schr\"odinger Equation, {\it MATCH Commun. Math. Comput. Chem.} {\bf 45} 123-137(2002). \bibitem{simos03} T. E. Simos, Multiderivative Methods for the Numerical Solution of the Schr\"odinger Equation, {\it MATCH Commun. Math. Comput. Chem.} {\bf 45} 7-26(2004). \bibitem{ra83} A.D. Raptis, Exponentially-fitted solutions of the eigenvalue Shr\"odinger equation with automatic error control, {\it Computer Physics Communications}, {\bf 28} 427-431(1983) \bibitem{ra81} A.D. Raptis, On the numerical solution of the Schrodinger equation, {\it Computer Physics Communications}, {\bf 24} 1-4(1981) \bibitem{siex99} Zacharoula Kalogiratou and T.E. Simos, A P-stable exponentially-fitted method for the numerical integration of the Schr\"odinger equation, {\it Applied Mathematics and Computation}, {\bf 112} 99-112(2000). \bibitem{rapsim91} A.D. Raptis and T.E. Simos, A four-step phase-fitted method for the numerical integration of second order initial-value problem, {\it BIT}, {\bf 31} 160-168(1991). \bibitem{henrici} Peter Henrici, {\it Discrete variable methods in ordinary differential equations}, John Wiley \& Sons, 1962. \bibitem{chawla83} M.M. Chawla, Uncoditionally stable Noumerov-type methods for second order differential equations, {\it BIT}, {\bf 23} 541-542(1983). \bibitem{chawla84} M. M. Chawla and P. S. Rao, A Noumerov-type method with minimal phase-lag for the integration of second order periodic initial-value problems, {\it Journal of Computational and Applied Mathematics} {\bf 11(3)} 277-281(1984) \bibitem{simos1} Z.A. Anastassi, T.E. Simos, A family of exponentially-fitted Runge-Kutta methods with exponential order up to three for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 41 (1)} 79-100 (2007) \bibitem{simos2} T. Monovasilis, Z. Kalogiratou , T.E. Simos, Trigonometrically fitted and exponentially fitted symplectic methods for the numerical integration of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 40 (3)} 257-267 (2006) \bibitem{simos3} G. Psihoyios, T.E. Simos, The numerical solution of the radial Schr\"odinger equation via a trigonometrically fitted family of seventh algebraic order Predictor-Corrector methods, {\it J. Math. Chem} {\bf 40 (3)} 269-293 (2006) \bibitem{simos4} T.E. Simos, A four-step exponentially fitted method for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 40 (3)} 305-318 (2006) \bibitem{simos5} T. Monovasilis, Z. Kalogiratou , T.E. Simos, Exponentially fitted symplectic methods for the numerical integration of the Schr\"odinger equation {\it J. Math. Chem} {\bf 37 (3)} 263-270 (2005) \bibitem{simos6} Z. Kalogiratou , T. Monovasilis, T.E. Simos, Numerical solution of the two-dimensional time independent Schr\"odinger equation with Numerov-type methods {\it J. Math. Chem} {\bf 37 (3)} 271-279 (2005) \bibitem{simos7} Z.A. Anastassi, T.E. Simos, Trigonometrically fitted Runge-Kutta methods for the numerical solution of the Schr\"odinger equation {\it J. Math. Chem} {\bf 37 (3)} 281-293 (2005) \bibitem{simos8} G. Psihoyios, T.E. Simos, Sixth algebraic order trigonometrically fitted predictor-corrector methods for the numerical solution of the radial Schr\"odinger equation, {\it J. Math. Chem} {\bf 37 (3)} 295-316 (2005) \bibitem{simos9} D.P. Sakas, T.E. Simos, A family of multiderivative methods for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 37 (3)} 317-331 (2005) \bibitem{simos10} T.E. Simos, Exponentially - fitted multiderivative methods for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 36 (1)} 13-27 (2004) \bibitem{simos11} K. Tselios, T.E. Simos, Symplectic methods of fifth order for the numerical solution of the radial Shrodinger equation, {\it J. Math. Chem} {\bf 35 (1)} 55-63 (2004) \bibitem{simos12} T.E. Simos, A family of trigonometrically-fitted symmetric methods for the efficient solution of the Schr\"odinger equation and related problems {\it J. Math. Chem} {\bf 34 (1-2)} 39-58 JUL 2003 \bibitem{simos13} K. Tselios, T.E. Simos, Symplectic methods for the numerical solution of the radial Shr\"odinger equation, {\it J. Math. Chem} {\bf 34 (1-2)} 83-94 (2003) \bibitem{simos14} J. Vigo-Aguiar J, T.E. Simos, Family of twelve steps exponential fitting symmetric multistep methods for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 32 (3)} 257-270 (2002) \bibitem{simos15} G. Avdelas, E. Kefalidis, T.E. Simos, New P-stable eighth algebraic order exponentially-fitted methods for the numerical integration of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 31 (4)} 371-404 (2002) \bibitem{simos16} T.E. Simos, J. Vigo-Aguiar, Symmetric eighth algebraic order methods with minimal phase-lag for the numerical solution of the Schr\"odinger equation {\it J. Math. Chem} {\bf 31 (2)} 135-144 (2002) \bibitem{simos17} Z. Kalogiratou , T.E. Simos, Construction of trigonometrically and exponentially fitted Runge-Kutta-Nystrom methods for the numerical solution of the Schr\"odinger equation and related problems a method of 8th algebraic order, {\it J. Math. Chem} {\bf 31 (2)} 211-232 \bibitem{simos18} T.E. Simos, J. Vigo-Aguiar, A modified phase-fitted Runge-Kutta method for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 30 (1)} 121-131 (2001) \bibitem{simos19} G. Avdelas, A. Konguetsof, T.E. Simos, A generator and an optimized generator of high-order hybrid explicit methods for the numerical solution of the Schr\"odinger equation. Part 1. Development of the basic method, {\it J. Math. Chem} {\bf 29 (4)} 281-291 (2001) \bibitem{simos20} G. Avdelas, A. Konguetsof, T.E. Simos, A generator and an optimized generator of high-order hybrid explicit methods for the numerical solution of the Schr\"odinger equation. Part 2. Development of the generator; optimization of the generator and numerical results, {\it J. Math. Chem} {\bf 29 (4)} 293-305 (2001) \bibitem{simos21} J. Vigo-Aguiar, T.E. Simos, A family of P-stable eighth algebraic order methods with exponential fitting facilities, {\it J. Math. Chem} {\bf 29 (3)} 177-189 (2001) \bibitem{simos22} T.E. Simos, A new explicit Bessel and Neumann fitted eighth algebraic order method for the numerical solution of the Schr\"odinger equation {\it J. Math. Chem} {\bf 27 (4)} 343-356 (2000) \bibitem{simos23} G. Avdelas, T.E. Simos, Embedded eighth order methods for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 26 (4)} 327-341 1999, \bibitem{simos24} T.E. Simos, A family of P-stable exponentially-fitted methods for the numerical solution of the Schr\"odinger equation, {\it J. Math. Chem} {\bf 25 (1)} 65-84 (1999) \bibitem{simos25} T.E. Simos, Some embedded modified Runge-Kutta methods for the numerical solution of some specific Schr\"odinger equations, {\it J. Math. Chem} {\bf 24 (1-3)} 23-37 (1998) \bibitem{simos26} T.E. Simos, Eighth order methods with minimal phase-lag for accurate computations for the elastic scattering phase-shift problem, {\it J. Math. Chem} {\bf 21 (4)} 359-372 (1997) \bibitem{jnaiam1} P. Amodio, I. Gladwell and G. Romanazzi, Numerical Solution of General Bordered ABD Linear Systems by Cyclic Reduction, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 5-12(2006) \bibitem{jnaiam2} S. D. Capper, J. R. Cash and D. R. Moore, Lobatto-Obrechkoff Formulae for 2nd Order Two-Point Boundary Value Problems, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 13-25 (2006) \bibitem{jnaiam3} S. D. Capper and D. R. Moore, On High Order MIRK Schemes and Hermite-Birkhoff Interpolants, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 27-47 (2006) \bibitem{jnaiam4} J. R. Cash, N. Sumarti, T. J. Abdulla and I. Vieira, The Derivation of Interpolants for Nonlinear Two-Point Boundary Value Problems, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 49-58 (2006) \bibitem{jnaiam5} J. R. Cash and S. Girdlestone, Variable Step Runge-Kutta-Nyström Methods for the Numerical Solution of Reversible Systems, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 59-80 (2006) \bibitem{jnaiam6} Jeff R. Cash and Francesca Mazzia, Hybrid Mesh Selection Algorithms Based on Conditioning for Two-Point Boundary Value Problems, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 81-90 (2006) \bibitem{jnaiam7} Felice Iavernaro, Francesca Mazzia and Donato Trigiante, Stability and Conditioning in Numerical Analysis, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 91-112 (2006) \bibitem{jnaiam8} Felice Iavernaro and Donato Trigiante, Discrete Conservative Vector Fields Induced by the Trapezoidal Method, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 113-130 (2006) \bibitem{jnaiam9} Francesca Mazzia, Alessandra Sestini and Donato Trigiante, BS Linear Multistep Methods on Non-uniform Meshes, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(1)} 131-144 (2006) \bibitem{jnaiam10} L.F. Shampine, P.H. Muir, H. Xu, A User-Friendly Fortran BVP Solver, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(2)} 201-217 (2006) \bibitem{jnaiam11} G. Vanden Berghe and M. Van Daele, Exponentially- fitted Störmer/Verlet methods, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 1(3)} 241-255 (2006) \bibitem{jnaiam12} L. Aceto, R. Pandolfi, D. Trigiante, Stability Analysis of Linear Multistep Methods via Polynomial Type Variation, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math} {\bf 2(1-2)} 1-9 (2007) \bibitem{psih} G. Psihoyios, A Block Implicit Advanced Step-point (BIAS) Algorithm for Stiff Differential Systems, {\it Computing Letters} {\bf 2(1-2)} 51-58(2006) \bibitem{enright} W.H. Enright, On the use of 'arc length' and 'defect' for mesh selection for differential equations, {\it Computing Letters} {\bf 1(2)} 47-52(2005) \bibitem{simosnew1} T.E. Simos, P-stable Four-Step Exponentially-Fitted Method for the Numerical Integration of the Schr\"{o}dinger Equation, {\em Computing Letter} \textbf{1(1)} 37-45(2005). \bibitem{simos2007} T.E. Simos, Stabilization of a Four-Step Exponentially-Fitted Method and its Application to the Schr\"odinger Equation, {\em International Journal of Modern Physics C} \textbf{18(3)} 315-328(2007). \bibitem{wang2005} Zhongcheng Wang, P-stable linear symmetric multistep methods for periodic initial-value problems, {\em Computer Physics Communications} \textbf{171} 162–174(2005) \bibitem{simoscma93} T.E. Simos, A Runge-Kutta Fehlberg method with phase-lag of order infinity for initial value problems with oscillating solution, {\it Computers and Mathematics with Applications} {\bf 25} 95-101(1993). \bibitem{simoscma93b} T.E. Simos, Runge-Kutta interpolants with minimal phase-lag, {\it Computers and Mathematics with Applications} {\bf 26} 43-49(1993). \bibitem{simoscma93c} T.E. Simos, Runge-Kutta-Nystr\"om interpolants for the numerical integration of special second-order periodic initial-value problems, {\it Computers and Mathematics with Applications} {\bf 26} 7-15(1993). \bibitem{simoscma94} T.E. Simos and G.V. Mitsou, A family of four-step exponential fitted methods for the numerical integration of the radial Schr\"odinger equation, {\it Computers and Mathematics with Applications} {\bf 28} 41-50(1994). \bibitem{simoscma95} T.E. Simos and G. Mousadis, A two-step method for the numerical solution of the radial Schrödinger equation, {\it Computers and Mathematics with Applications} {\bf 29} 31-37(1995). \bibitem{simoscma96} G. Avdelas and T.E. Simos, Block Runge-Kutta methods for periodic initial-value problems, {\it Computers and Mathematics with Applications} {\bf 31} 69- 83(1996). \bibitem{simoscma96b} G. Avdelas and T.E. Simos, Embedded methods for the numerical solution of the Schr\"odinger equation, {\it Computers and Mathematics with Applications} {\bf 31} 85-102(1996). \bibitem{simoscma96c} G. Papakaliatakis and T.E. Simos, A new method for the numerical solution of fourth order BVP’s with oscillating solutions, {\it Computers and Mathematics with Applications} {\bf 32} 1-6(1996). \bibitem{simoscma97} T.E. Simos, An extended Numerov-type method for the numerical solution of the Schr\"odinger equation, {\it Computers and Mathematics with Applications} {\bf 33} 67-78(1997). \bibitem{simoscma98} T.E. Simos, A new hybrid imbedded variable-step procedure for the numerical integration of the Schr\"odinger equation, {\it Computers and Mathematics with Applications} {\bf 36} 51-63(1998). \bibitem{simoscma01} T.E. Simos, Bessel and Neumann Fitted Methods for the Numerical Solution of the Schr\"odinger equation, {\it Computers \& Mathematics with Applications} {\bf 42} 833-847(2001). \bibitem{simoscma03} A. Konguetsof and T.E. Simos, An exponentially-fitted and trigonometrically-fitted method for the numerical solution of periodic initial-value problems, {\it Computers and Mathematics with Applications} {\bf 45} 547-554(2003). \bibitem{simoscam05} Z.A. Anastassi and T.E. Simos, An optimized Runge-Kutta method for the solution of orbital problems, {\it Journal of Computational and Applied Mathematics} {\bf 175(1)} 1-9(2005) \bibitem{simoscam05a} G. Psihoyios and T.E. Simos, A fourth algebraic order trigonometrically fitted predictor-corrector scheme for IVPs with oscillating solutions, {\it Journal of Computational and Applied Mathematics} {\bf 175(1)} 137-147(2005) \bibitem{simoscam05b} D.P. Sakas and T.E. Simos, Multiderivative methods of eighth algrebraic order with minimal phase-lag for the numerical solution of the radial Schr\"odinger equation, Journal of Computational and Applied Mathematics {\bf 175(1)} 161-172(2005) \bibitem{simoscam05c} K. Tselios and T.E. Simos, Runge-Kutta methods with minimal dispersion and dissipation for problems arising from computational acoustics, {\it Journal of Computational and Applied Mathematics} {\bf 175(1)} 173-181(2005) \bibitem{simoscam03} Z. Kalogiratou and T.E. Simos, Newton-Cotes formulae for long-time integration, {\it Journal of Computational and Applied Mathematics} {\bf 158(1)} 75-82(2003) \bibitem{simoscam03a} Z. Kalogiratou, T. Monovasilis and T.E. Simos, Symplectic integrators for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 158(1)} 83-92(2003) \bibitem{simoscam03b} A. Konguetsof and T.E. Simos, A generator of hybrid symmetric four-step methods for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 158(1)} 93-106(2003) \bibitem{simoscam03c} G. Psihoyios and T.E. Simos, Trigonometrically fitted predictor-corrector methods for IVPs with oscillating solutions, {\it Journal of Computational and Applied Mathematics} {\bf 158(1)} 135-144(2003) \bibitem{simoscam02} Ch. Tsitouras and T.E. Simos, Optimized Runge-Kutta pairs for problems with oscillating solutions, {\it Journal of Computational and Applied Mathematics} {\bf 147(2)} 397-409(2002) \bibitem{simoscam99} T.E. Simos, An exponentially fitted eighth-order method for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 108(1-2)} 177-194(1999) \bibitem{simoscam98} T.E. Simos, An accurate finite difference method for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 91(1)} 47-61(1998) \bibitem{simoscam97} R.M. Thomas and T.E. Simos, A family of hybrid exponentially fitted predictor-corrector methods for the numerical integration of the radial Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 87(2)} 215-226(1997) \bibitem{anastassi_p1} Z.A. Anastassi and T.E. Simos: Special Optimized Runge-Kutta methods for IVPs with Oscillating Solutions, International Journal of Modern Physics C, 15, 1-15 (2004) \bibitem{anastassi_p3} Z.A. Anastassi and T.E. Simos: A Dispersive-Fitted and Dissipative-Fitted Explicit Runge-Kutta method for the Numerical Solution of Orbital Problems, New Astronomy, 10, 31-37 (2004) \bibitem{anastassi_p6} Z.A. Anastassi and T.E. Simos: A Trigonometrically-Fitted Runge-Kutta Method for the Numerical Solution of Orbital Problems, New Astronomy, 10, 301-309 (2005) \bibitem{anastassi_p9} T.V. Triantafyllidis, Z.A. Anastassi and T.E. Simos: Two Optimized Runge-Kutta Methods for the Solution of the Schr?dinger Equation, MATCH Commun. Math. Comput. Chem., 60, 3 (2008) \bibitem{anastassi2} Z.A. Anastassi and T.E. Simos, Trigonometrically Fitted Fifth Order Runge-Kutta Methods for the Numerical Solution of the Schr\"{o}dinger Equation, Mathematical and Computer Modelling, 42 (7-8), 877-886 (2005) \bibitem{anastassi_simos_p8} Z.A. Anastassi and T.E. Simos: New Trigonometrically Fitted Six-Step Symmetric Methods for the Efficient Solution of the Schr\"odinger Equation, MATCH Commun. Math. Comput. Chem., 60, 3 (2008) \bibitem{panopoulos_match} G.A. Panopoulos, Z.A. Anastassi and T.E. Simos: Two New Optimized Eight-Step Symmetric Methods for the Efficient Solution of the Schr\"{o}dinger Equation and Related Problems, MATCH Commun. Math. Comput. Chem., 60, 3 (2008) \bibitem{anastassi_simos_p11} Z.A. Anastassi and T.E. Simos: A Six-Step P-stable Trigonometrically-Fitted Method for the Numerical Integration of the Radial Schr\"odinger Equation, MATCH Commun. Math. Comput. Chem., 60, 3 (2008) \bibitem{anas_p12} Z.A. Anastassi and T.E. Simos, A family of two-stage two-step methods for the numerical integration of the Schr\"odinger equation and related IVPs with oscillating solution, Journal of Mathematical Chemistry, Article in Press, Corrected Proof \bibitem{simoscam97a} T.E. Simos and P.S. Williams, A finite-difference method for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 79(2)} 189-205(1997) \bibitem{simoscam96} G. Avdelas and T.E. Simos, A generator of high-order embedded P-stable methods for the numerical solution of the Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 72(2)} 345-358(1996) \bibitem{simoscam96a} R.M. Thomas, T.E. Simos and G.V. Mitsou, A family of Numerov-type exponentially fitted predictor-corrector methods for the numerical integration of the radial Schr\"odinger equation, {\it Journal of Computational and Applied Mathematics} {\bf 67(2)} 255-270(1996) \bibitem{simoscam95} T.E. Simos, A Family of 4-Step Exponentially Fitted Predictor-Corrector Methods for the Numerical-Integration of The Schr\"odinger-Equation, {\it Journal of Computational and Applied Mathematics} {\bf 58(3)} 337-344(1995) \bibitem{simoscam94} T.E. Simos, An Explicit 4-Step Phase-Fitted Method for the Numerical-Integration of 2nd-order Initial-Value Problems, {\it Journal of Computational and Applied Mathematics} {\bf 55(2)} 125-133(1994) \bibitem{simoscam94a} T.E. Simos, E. Dimas and A.B. Sideridis, A Runge-Kutta-Nystr\"om Method for the Numerical-Integration of Special 2nd-order Periodic Initial-Value Problems, {\it Journal of Computational and Applied Mathematics} {\bf 51(3)} 317-326(1994) \bibitem{simoscam92} A.B. Sideridis and T.E. Simos, A Low-Order Embedded Runge-Kutta Method for Periodic Initial-Value Problems, {\it Journal of Computational and Applied Mathematics} {\bf 44(2)} 235-244(1992) \bibitem{simoscam92a} T.E. Simos amd A.D. Raptis, A 4th-order Bessel Fitting Method for the Numerical-Solution of the Schr\"Odinger-Equation, {\it Journal of Computational and Applied Mathematics} {\bf 43(3)} 313-322(1992) \bibitem{simoscam92b} T.E. Simos, Explicit 2-Step Methods with Minimal Phase-Lag for the Numerical-Integration of Special 2nd-order Initial-Value Problems and their Application to the One-Dimensional Schr\"odinger-Equation, {\it Journal of Computational and Applied Mathematics} {\bf 39(1)} 89-94(1992) \bibitem{simoscam90} T.E. Simos, A 4-Step Method for the Numerical-Solution of the Schr\"odinger-Equation, {\it Journal of Computational and Applied Mathematics} {\bf 30(3)} 251-255(1990) \bibitem{simoscam90a} C.D. Papageorgiou, A.D. Raptis and T.E. Simos, A Method for Computing Phase-Shifts for Scattering, {\it Journal of Computational and Applied Mathematics} {\bf 29(1)} 61-67(1990) \bibitem{raptis82} A.D. Raptis, Two-Step Methods for the Numerical Solution of the Schr\"odinger Equation, {\it Computing} {\bf 28} 373-378(1982). \bibitem{simosijmpc96} T.E. Simos. A new Numerov-type method for computing eigenvalues and resonances of the radial Schr\"odinger equation, International Journal of Modern Physics C-Physics and Computers, {\bf 7(1)} 33-41(1996) \bibitem{simosijqc95} T.E. Simos, Predictor Corrector Phase-Fitted Methods for Y''=F(X,Y) and an Application to the Schr\"odinger-Equation, International Journal of Quantum Chemistry, {\bf 53(5)} 473-483(1995) \bibitem{simosijcm92} T.E. Simos, Two-step almost P-stable complete in phase methods for the numerical integration of second order periodic initial-value problems, {\it Inter. J. Comput. Math.} {\bf 46} 77-85(1992). \bibitem{jnaiam3_1} R. M. Corless, A. Shakoori, D.A. Aruliah, L. Gonzalez-Vega, Barycentric Hermite Interpolants for Event Location in Initial-Value Problems, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 1-16 (2008) \bibitem{jnaiam3_2} M. Dewar, Embedding a General-Purpose Numerical Library in an Interactive Environment, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 17-26 (2008) \bibitem{jnaiam3_3} J. Kierzenka and L.F. Shampine, A BVP Solver that Controls Residual and Error, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 27-41 (2008) \bibitem{jnaiam3_4} R. Knapp, A Method of Lines Framework in Mathematica, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 43-59 (2008) \bibitem{jnaiam3_5} N. S. Nedialkov and J. D. Pryce, Solving Differential Algebraic Equations by Taylor Series (III): the DAETS Code, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 61-80 (2008) \bibitem{jnaiam3_6} R. L. Lipsman, J. E. Osborn, and J. M. Rosenberg, The SCHOL Project at the University of Maryland: Using Mathematical Software in the Teaching of Sophomore Differential Equations, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 81-103 (2008) \bibitem{jnaiam3_7} M. Sofroniou and G. Spaletta, Extrapolation Methods in Mathematica, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 105-121 (2008) \bibitem{jnaiam3_8} R. J. Spiteri and Thian-Peng Ter, pythNon: A PSE for the Numerical Solution of Nonlinear Algebraic Equations, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 123-137 (2008) \bibitem{jnaiam3_9} S.P. Corwin, S. Thompson and S.M. White, Solving ODEs and DDEs with Impulses, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 139-149 (2008) \bibitem{jnaiam3_10} W. Weckesser, VFGEN: A Code Generation Tool, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 151-165 (2008) \bibitem{jnaiam3_11} A. Wittkopf, Automatic Code Generation and Optimization in Maple, {\it JNAIAM J. Numer. Anal. Indust. Appl. Math}, 3, 167-180 (2008) \end{thebibliography} \appendix Method \textit{PF-D0}: \begin{equation}\begin{array}{l} b_{1,num}^0=((18392342566 \cos (v)-11352051608 \cos (2 v)+\\ 4958070583 \cos (3 v)-1405810666 \cos (4 v)+\\ 234300323 \cos (5 v)) v^2)/7257600-\frac{5373508799 v^2}{3628800}-\\ 2 \cos (4 v)+4 \cos (5 v)-4 \cos (6 v)+2 \cos (7 v) \nonumber \\ \\ b_{2,num}^0=-((35142254976 \cos (v)-20245959411 \cos (2 v)\\ +7950775936 \cos (3 v)-1405906674 \cos (4 v)\\ +234300323 \cos (6 v)) v^2)/7257600+\frac{138116413 v^2}{48384}+\\ 24 \cos (4 v)-48 \cos (5 v)+48 \cos (6 v)-24 \cos (7 v) \nonumber \\ \\ b_{3,num}^0=((50246280942 \cos (v)-26679563229 \cos (2 v)\\ +8977155979 \cos (3 v)-702953337 \cos (5 v)\\ +702905333 \cos (6 v)) v^2)/3628800-\frac{415407179 v^2}{50400}-\\ 132 \cos (4 v)+264 \cos (5 v)-264 \cos (6 v)+132 \cos (7 v) \nonumber \\ \\ b_{4,num}^0=-((119523462784 \cos (v)-43206415175 \cos (2 v)\\ +17954311958 \cos (4 v)-7950775936 \cos (5 v)\\ +4958070583 \cos (6 v)) v^2)/7257600+\frac{36857631107 v^2}{3628800}+\\ 440 \cos (4 v)-880 \cos (5 v)+880 \cos (6 v)-440 \cos (7 v) \nonumber \\ \\ b_{5,num}^0=((113384696634 \cos (v)-43206415175 \cos (3 v)\\ +53359126458 \cos (4 v)-20245959411 \cos (5 v)\\ +11352051608 \cos (6 v)) v^2)/7257600-\frac{12520978019 v^2}{1209600}-\\ 990 \cos (4 v)+1980 \cos (5 v)-1980 \cos (6 v)+990 \cos (7 v) \nonumber \\ \\ b_{6,num}^0=-((56692348317 \cos (2 v)-59761731392 \cos (3 v)\\ +50246280942 \cos (4 v)-17571127488 \cos (5 v)\\ +9196171283 \cos (6 v)) v^2)/3628800+\frac{1197972677 v^2}{604800}+\\ 1584 \cos (4 v)-3168 \cos (5 v)+3168 \cos (6 v)-1584 \cos (7 v) \nonumber \\ \\ b_{7,num}^0=(v^2 (-7187836062 \cos (v)+37562934057 \cos (2 v)\\ -36857631107 \cos (3 v)+29909316888 \cos (4 v)\\ -10358730975 \cos (5 v)+5373508799 \cos (6 v)))/1814400-\\ 1848 (\cos (4 v)-2 \cos (5 v)+2 \cos (6 v)-\cos (7 v)) \nonumber \\ \\ b_{denom}^0=\left(-4096 v^2 \sin ^{12}\left(\frac{v}{2}\right)\right) \nonumber \end{array}\end{equation} \ason \begin{equation}\begin{array}{l} b^0_{T,1}=\frac{433489274083}{237758976000}-\frac{152802083671 v^2}{2853107712000}+\frac{1000430523577 v^4}{291016986624000}-\\ \frac{69882256253489 v^6}{1548210368839680000}+ \frac{257597135900761 v^8}{1532728265151283200000}-\frac{91527043218239 v^{10}}{3384264009454033305600}-\ldots\\ b^0_{T,2}=-\frac{28417333297}{4953312000}+\frac{152802083671 v^2}{237758976000}-\frac{1000430523577 v^4}{24251415552000}+\\ \frac{69882256253489 v^6}{129017530736640000}-\frac{257597135900761 v^8}{127727355429273600000}+\frac{91527043218239 v^{10}}{282022000787836108800}+\ldots\\ b^0_{T,3}=\frac{930518896733}{39626496000}-\frac{1680822920381 v^2}{475517952000}+\frac{11004735759347 v^4}{48502831104000}-\\ \frac{768704818788379 v^6}{258035061473280000}+\frac{257597135900761 v^8}{23223155532595200000}-\frac{91527043218239 v^{10}}{51276727415970201600}-\ldots\\ b^0_{T,4}=-\frac{176930551859}{2971987200}+\frac{1680822920381 v^2}{142655385600}-\frac{11004735759347 v^4}{14550849331200}+\\ \frac{768704818788379 v^6}{77410518441984000}-\frac{257597135900761 v^8}{6966946659778560000}+\frac{91527043218239 v^{10}}{15383018224791060480}+\ldots\\ b^0_{T,5}=\frac{7854755921}{65228800}-\frac{1680822920381 v^2}{63402393600}+\frac{11004735759347 v^4}{6467044147200}-\\ \frac{768704818788379 v^6}{34404674863104000}+\frac{257597135900761 v^8}{3096420737679360000}-\frac{91527043218239 v^{10}}{6836896988796026880}-\ldots\\ b^0_{T,6}=-\frac{146031020287}{825552000}+\frac{1680822920381 v^2}{39626496000}-\frac{11004735759347 v^4}{4041902592000}+\\ \frac{768704818788379 v^6}{21502921789440000}-\frac{257597135900761 v^8}{1935262961049600000}+\frac{91527043218239 v^{10}}{4273060617997516800}+\ldots\\ b^0_{T,7}=\frac{577045151693}{2830464000}-\frac{1680822920381 v^2}{33965568000}+\frac{11004735759347 v^4}{3464487936000}-\\ \frac{768704818788379 v^6}{18431075819520000}+\frac{257597135900761 v^8}{1658796823756800000}-\frac{91527043218239 v^{10}}{3662623386855014400}-\ldots \end{array}\end{equation} \asoff Method \textit{PF-D1}: \begin{equation}\begin{array}{l} b_{1,num}^1=(4 v \sin ^9(\frac{v}{2}) (29030400 (2 \cos (v)+2 \cos (2 v)+2 \cos (3 v)\\ +2 \cos (4 v)+2 \cos (6 v)+1) \sin ^3(\frac{v}{2})+v (3628800 (9 \cos (\frac{7 v}{2})\\ -19 \cos (\frac{9 v}{2})+2 (11 \cos (\frac{11 v}{2})-7 \cos (\frac{13 v}{2})+\cos (\frac{15 v}{2})))\\ -11 v^2 (65542714 \cos (\frac{v}{2})-133977068 \cos (\frac{3 v}{2})+127463860 \cos (\frac{5 v}{2})\\ -62185337 \cos (\frac{7 v}{2})+21299831 \cos (\frac{9 v}{2})))))/14175 \nonumber \\ b_{2,num}^1=(8 v \sin ^9(\frac{v}{2}) (v (11 v^2 (57295722 \cos (\frac{v}{2})-50530458 \cos (\frac{3 v}{2})\\ +72737235 \cos (\frac{5 v}{2})+6776053 \cos (\frac{7 v}{2})+1285617 \cos (\frac{9 v}{2})\\ +21299831 \cos (\frac{11 v}{2}))-1814400 (10 \cos (\frac{5 v}{2})+68 \cos (\frac{7 v}{2})\\ -156 \cos (\frac{9 v}{2})+187 \cos (\frac{11 v}{2})-119 \cos (\frac{13 v}{2})+9 \cos (\frac{15 v}{2})\\ +\cos (\frac{17 v}{2})))-29030400 (12 \cos (v)+12 \cos (2 v)+12 \cos (3 v)\\ +11 \cos (4 v)+2 \cos (5 v)+10 \cos (6 v)+\cos (7 v)+6) \sin ^3(\frac{v}{2})))/14175 \nonumber \\ b_{3,num}^1=(4 v \sin ^9(\frac{v}{2}) (58060800 (66 \cos (v)+66 \cos (2 v)+66 \cos (3 v)\\ +56 \cos (4 v)+20 \cos (5 v)+46 \cos (6 v)+10 \cos (7 v)+33) \sin ^3(\frac{v}{2})\\ +v (7257600 (50 \cos (\frac{5 v}{2})+97 \cos (\frac{7 v}{2})-267 \cos (\frac{9 v}{2})\\ +341 \cos (\frac{11 v}{2})-217 \cos (\frac{13 v}{2})-9 \cos (\frac{15 v}{2})+5 \cos (\frac{17 v}{2}))\\ -11 v^2 (418185576 \cos (\frac{v}{2})-101897295 \cos (\frac{3 v}{2})+429149785 \cos (\frac{5 v}{2})\\ +213355284 \cos (\frac{7 v}{2})+25712340 \cos (\frac{9 v}{2})\\ +21299831 (9 \cos (\frac{11 v}{2})+\cos (\frac{13 v}{2}))))))/14175 \nonumber \\ b_{4,num}^1=(8 v \sin ^9(\frac{v}{2}) (v (11 v^2 (469639178 \cos (\frac{v}{2})+311586932 \cos (\frac{3 v}{2})\\ +333470325 \cos (\frac{5 v}{2})+480049389 \cos (\frac{7 v}{2})+77311321 \cos (\frac{9 v}{2})\\ +260311901 \cos (\frac{11 v}{2})+63470954 \cos (\frac{13 v}{2}))-9072000 (90 \cos (\frac{5 v}{2})\\ +36 \cos (\frac{7 v}{2})-188 \cos (\frac{9 v}{2})+275 \cos (\frac{11 v}{2})-175 \cos (\frac{13 v}{2})\\ -47 \cos (\frac{15 v}{2})+9 \cos (\frac{17 v}{2})))-145152000 (44 \cos (v)+44 \cos (2 v)+44 \cos (3 v)\\ +35 \cos (4 v)+18 \cos (5 v)+26 \cos (6 v)+9 \cos (7 v)+22) \sin ^3(\frac{v}{2})))/14175 \nonumber \\ b_{5,num}^1=(4 v \sin ^9(\frac{v}{2}) (435456000 (66 \cos (v)+66 \cos (2 v)+66 \cos (3 v)\\ +50 \cos (4 v)+32 \cos (5 v)+34 \cos (6 v)+16 \cos (7 v)+33) \sin ^3(\frac{v}{2})\\ +v (54432000 (80 \cos (\frac{5 v}{2})-23 \cos (\frac{7 v}{2})-51 \cos (\frac{9 v}{2})\\ +110 \cos (\frac{11 v}{2})-70 \cos (\frac{13 v}{2})-54 \cos (\frac{15 v}{2})+8 \cos (\frac{17 v}{2}))\\ -11 v^2 (2105070006 \cos (\frac{v}{2})+1324106064 \cos (\frac{3 v}{2})+1778508400 \cos (\frac{5 v}{2})\\ +1717441153 \cos (\frac{7 v}{2})+754192017 \cos (\frac{9 v}{2})+920962652 \cos (\frac{11 v}{2})\\ +380999708 \cos (\frac{13 v}{2})))))/14175 \nonumber \\ b_{6,num}^1=(16 v \sin ^9(\frac{v}{2}) (v (11 v^2 (809642310 \cos (\frac{v}{2})+579296403 \cos (\frac{3 v}{2})\\ +714780380 \cos (\frac{5 v}{2})+616166543 \cos (\frac{7 v}{2})+386753499 \cos (\frac{9 v}{2})\\ +310811926 \cos (\frac{11 v}{2})+175060939 \cos (\frac{13 v}{2}))-5443200 (350 \cos (\frac{5 v}{2})\\ -212 \cos (\frac{7 v}{2})+12 \cos (\frac{9 v}{2})+209 \cos (\frac{11 v}{2})-133 \cos (\frac{13 v}{2})\\ -261 \cos (\frac{15 v}{2})+35 \cos (\frac{17 v}{2})))-87091200 (132 \cos (v)\\ +132 \cos (2 v)+132 \cos (3 v)+97 \cos (4 v)+70 \cos (5 v)\\ +62 \cos (6 v)+35 \cos (7 v)+66) \sin ^3(\frac{v}{2})))/14175 \nonumber \\ b_{7,num}^1=-(8 v \sin ^9(\frac{v}{2}) (v (11 v^2 (1943141544 \cos (\frac{v}{2})+1212190059 \cos (\frac{3 v}{2})\\ +1835374595 \cos (\frac{5 v}{2})+1290288356 \cos (\frac{7 v}{2})+1000981284 \cos (\frac{9 v}{2})\\ +673851637 \cos (\frac{11 v}{2})+426700525 \cos (\frac{13 v}{2}))-152409600 (30 \cos (\frac{5 v}{2})\\ -21 \cos (\frac{7 v}{2})+7 \cos (\frac{9 v}{2})+11 \cos (\frac{11 v}{2})-7 \cos (\frac{13 v}{2})\\ -23 \cos (\frac{15 v}{2})+3 \cos (\frac{17 v}{2})))+304819200 (3 \sin (\frac{5 v}{2})-4 \sin (\frac{7 v}{2})\\ +2 \sin (\frac{11 v}{2})-4 \sin (\frac{15 v}{2})+3 \sin (\frac{17 v}{2}))))/14175 \nonumber \\ b_{denom}^1=\left(-4194304 v^4 \cos \left(\frac{v}{2}\right) \sin ^{21}\left(\frac{v}{2}\right)\right) \end{array}\end{equation} \ason \begin{equation}\begin{array}{l} b^1_{T,1}=\frac{433489274083}{237758976000}-\frac{152802083671 v^2}{1426553856000}+\frac{680989543811 v^4}{116406794649600}-\\ \frac{125177474703917 v^6}{2322315553259520000}+\frac{517885739552761 v^8}{306545653030256640000}-\frac{2572884198423151 v^{10}}{211516500590877081600000}-\ldots\\ b^1_{T,2}=-\frac{28417333297}{4953312000}+\frac{152802083671 v^2}{118879488000}-\frac{1000430523577 v^4}{8083805184000}+\\ \frac{161750007895703 v^6}{21502921789440000}-\frac{2419392089643157 v^8}{6386367771463680000}+\frac{69067938626578009 v^{10}}{5875458349746585600000}-\ldots\\ b^1_{T,3}=\frac{930518896733}{39626496000}-\frac{1680822920381 v^2}{237758976000}+\frac{851496508169 v^4}{923863449600}-\\ \frac{3109822683210143 v^6}{43005843578880000}+\frac{17171854137770701 v^8}{4644631106519040000}-\frac{1373640119936290727 v^{10}}{11750916699493171200000}+\ldots\\ b^1_{T,4}=-\frac{176930551859}{2971987200}+\frac{1680822920381 v^2}{71327692800}-\frac{7685041522471 v^4}{2078692761600}+\\ \frac{37302412323393157 v^6}{116115777662976000}- \frac{1150037153857349 v^8}{69669466597785600}+\frac{5553336881578048313 v^{10}}{10575825029543854080000}-\ldots\\ b^1_{T,5}=\frac{7854755921}{65228800}-\frac{1680822920381 v^2}{31701196800}+\frac{4465879941727 v^4}{479040307200}-\\ \frac{14651758435060069 v^6}{17202337431552000}+\frac{5432847035340293 v^8}{123856829507174400}-\frac{2192163846661534231 v^{10}}{1566788893265756160000}+\ldots\\ b^1_{T,6}=-\frac{146031020287}{825552000}+\frac{1680822920381 v^2}{19813248000}-\frac{855811097959 v^4}{53892034560}+\\ \frac{15982331031417479 v^6}{10751460894720000}-\frac{436210741712267 v^8}{5691949885440000}+ \frac{798931592780948369 v^{10}}{326414352763699200000}-\ldots\\ b^1_{T,7}=\frac{577045151693}{2830464000}-\frac{1680822920381 v^2}{16982784000}+\frac{130969300116257 v^4}{6928975872000}\\ -\frac{49277565690609847 v^6}{27646613729280000}+\frac{335110207212583 v^8}{3645707304960000}- \frac{7395015266709846197 v^{10}}{2518053578462822400000}+\ldots \end{array}\end{equation} \asoff Method \textit{PF-D2}: \begin{equation}\begin{array}{l} b_{1,num}^2=\frac{2048}{945} v^2 \sin ^{15}(\frac{v}{2}) (8 v^2 (11 (-4002729 \cos (v)+2078430 \cos (2 v)\\ -724279 \cos (3 v)+2346178) v^2+725760 (-62 \cos (v)+59 \cos (2 v)\\ -40 \cos (3 v)+26 \cos (4 v)-8 \cos (5 v)+\cos (6 v)+35) \sin ^2(\frac{v}{2})) \cos ^3(\frac{v}{2})\\ +483840 \sin ^3(\frac{v}{2}) (v (30 \cos (v)+30 \cos (2 v)+30 \cos (3 v)+13 \cos (4 v)+\\ 18 \cos (5 v)+12 \cos (6 v)-5 \cos (7 v))+3 (5 v+\sin (4 v)+\sin (7 v)))) \nonumber \\ b_{2,num}^2=\frac{2048}{315} v^2 \sin ^{15}(\frac{v}{2}) (-8 v^2 (-9314063 v^2+572 (22949 v^2-\\ 120960) \cos (v)+44 (1391040-428431 v^2) \cos (2 v)+(9699668 v^2\\ -46287360) \cos (3 v)+(27699840-7967069 v^2) \cos (4 v)\\ -10644480 \cos (5 v)+1108800 \cos (6 v)+241920 \cos (7 v)-60480 \cos (8 v)\\ +35925120) \cos ^3(\frac{v}{2})-322560 \sin ^3(\frac{v}{2}) (2 v (90 \cos (v)+90 \cos (2 v)\\ +81 \cos (3 v)+50 \cos (4 v)+49 \cos (5 v)+30 \cos (6 v)-4 \cos (7 v)-2 \cos (8 v))\\ +3 (30 v+\sin (3 v)+4 \sin (4 v)+\sin (5 v)+\sin (6 v)+4 \sin (7 v)+\sin (8 v))))\\ b_{3,num}^2=\frac{4096}{315} v^2 \sin ^{15}(\frac{v}{2}) (10080 (8 v (1980 \cos (v)+1961 \cos (2 v)\\ +1680 \cos (3 v)+1226 \cos (4 v)+1015 \cos (5 v)+591 \cos (6 v)+50 \cos (7 v)\\ -52 \cos (8 v)-3 \cos (9 v)+990) \sin ^3(\frac{v}{2})-39 \cos (\frac{3 v}{2})+3 (13 \cos (\frac{5 v}{2})\\ +32 \cos (\frac{7 v}{2})-46 \cos (\frac{9 v}{2})+46 \cos (\frac{13 v}{2})-32 \cos (\frac{15 v}{2})-13 \cos (\frac{17 v}{2})\\ +13 \cos (\frac{19 v}{2})+\cos (\frac{21 v}{2})))-\cos (\frac{v}{2}) ((11 (6333473 \cos (2 v)\\ +4157054 \cos (3 v)+3619356 \cos (4 v)+2960054 \cos (5 v)\\ +724279 \cos (6 v)) v^2+4 (17018353 v^2+438480) \cos (v)\\ +20160 (-652 \cos (2 v)+445 \cos (3 v)-118 \cos (4 v)-675 \cos (5 v)\\ +939 \cos (6 v)+129 \cos (7 v)-104 \cos (8 v)+14 \cos (9 v)+\cos (10 v))) v^2\\ +12 (2409451 v^4-110880 v^2+2520)))\\ b_{4,num}^2 = \frac{2048}{945} v^2 \sin ^{15}(\frac{v}{2})(3 (432759107 v^4-7257600 v^2+483840) \cos(\frac{v}{2})\\ +362880 (17 \cos (\frac{3 v}{2})-31 \cos (\frac{5 v}{2})-16 \cos (\frac{7 v}{2})+37 \cos (\frac{9 v}{2})\\ -37 \cos (\frac{13 v}{2})+16 \cos (\frac{15 v}{2})+31 \cos (\frac{17 v}{2})-17 \cos (\frac{19 v}{2})-4 \cos(\frac{21 v}{2}))\\ +v (v (11 (103979586\cos (\frac{5 v}{2})+82091598 \cos (\frac{7 v}{2})+62181040 \cos (\frac{9 v}{2})\\ +36275904 \cos(\frac{11 v}{2})+724279 (15 \cos (\frac{13 v}{2})+\cos (\frac{15 v}{2})))v^2\\ +(1298904167 v^2-104025600) \cos (\frac{3 v}{2})+60480 (-18 \cos (\frac{5 v}{2})\\ -366 \cos (\frac{7 v}{2})-1325 \cos (\frac{9 v}{2})+957 \cos(\frac{11 v}{2})+2370 \cos (\frac{13 v}{2})\\ +718\cos (\frac{15 v}{2})-279 \cos (\frac{17 v}{2})+15 \cos (\frac{19 v}{2})+8 \cos (\frac{21 v}{2})))\\ -1935360 (1650 \cos (v)+1612 \cos (2 v)+1365\cos (3 v)\\ +1066 \cos (4 v)+819 \cos (5 v)+468 \cos (6 v)+100 \cos (7 v)\\ -34 \cos (8 v)-6 \cos (9 v)+825) \sin ^3(\frac{v}{2})))\\ b_{5,num}^2=\frac{2048}{315} v^2 \sin ^{15}(\frac{v}{2}) (-24 (40469957 v^4-1058400 v^2+\\ 70560) \cos (\frac{v}{2})+60480 (-44 \cos (\frac{3 v}{2})+117 \cos (\frac{5 v}{2})+37 \cos (\frac{7 v}{2} )\\ -109 \cos (\frac{9 v}{2})+109 \cos (\frac{13 v}{2})-37 \cos (\frac{15 v}{2})-117 \cos (\frac{17 v}{2})\\ +44 \cos (\frac{19 v}{2})+28 \cos (\frac{21 v}{2}))+v (161280 (14850 \cos (v)+14318 \cos (2 v)+\\ 12210 \cos (3 v)+9699 \cos (4 v)+7266 \cos (5 v)+4152 \cos (6 v)+\\ 1125 \cos (7 v)-176 \cos (8 v)-84 \cos (9 v)+7425) \sin ^3(\frac{v}{2})\\ +v (-11 (76636238 \cos (\frac{5 v}{2})+62120365 \cos (\frac{7 v}{2})+46335777 \cos (\frac{9 v}{2})\\ +26765870 \cos (\frac{11 v}{2})+9510034 \cos (\frac{13 v}{2})+1196642 \cos (\frac{15 v}{2})) v^2\\ +14 (4348800-69382489 v^2) \cos (\frac{3 v}{2})+20160 (288 \cos (\frac{5 v}{2})\\ +1506 \cos (\frac{7 v}{2})+1900 \cos (\frac{9 v}{2})-2112 \cos (\frac{11 v}{2})-4320 \cos (\frac{13 v}{2})\\ -1913 \cos (\frac{15 v}{2})+339 \cos (\frac{17 v}{2})+60 \cos (\frac{19 v}{2})-28 \cos (\frac{21 v}{2}))))) \end{array}\end{equation} \begin{equation}\begin{array}{l} b_{6,num}^2=\frac{2048}{315} v^2 \sin ^{15}(\frac{v}{2}) (3 (521152357 v^4-16934400 v^2+\\ 1128960) \cos (\frac{v}{2})+241920 (7 \cos (\frac{3 v}{2})-33 \cos (\frac{5 v}{2})-20 \cos (\frac{7 v}{2})\\ +41 \cos (\frac{9 v}{2})-41 \cos (\frac{13 v}{2})+20 \cos (\frac{15 v}{2})+33 \cos (\frac{17 v}{2})-7 (\cos (\frac{19 v}{2})\\ +2 \cos (\frac{21 v}{2})))+v (v (11 (123210823 \cos (\frac{5 v}{2})+99307625 \cos (\frac{7 v}{2})\\ +73495191 \cos (\frac{9 v}{2})+43125241 \cos (\frac{11 v}{2})+15932285 \cos (\frac{13 v}{2})\\ +2736371 \cos (\frac{15 v}{2})) v^2+(1526016833 v^2-73382400) \cos (\frac{3 v}{2})\\ -40320 (774 \cos (\frac{5 v}{2})+1002 \cos (\frac{7 v}{2})+1333 \cos (\frac{9 v}{2})-1617 \cos (\frac{11 v}{2})\\ -3258 \cos (\frac{13 v}{2})-1442 \cos (\frac{15 v}{2})+51 \cos (\frac{17 v}{2})+105 \cos (\frac{19 v}{2})\\ -28 \cos (\frac{21 v}{2})))-1290240 (2970 \cos (v)+2837 \cos (2 v)+2445 \cos (3 v)\\ +1938 \cos (4 v)+1446 \cos (5 v)+831 \cos (6 v)+240 \cos (7 v)-14 \cos (8 v)\\ -21 \cos (9 v)+1485) \sin ^3(\frac{v}{2})))\\ b_{7,num}^2=-\frac{4096}{945} v^2 \sin ^{15}(\frac{v}{2}) (54 (50601001 v^4-1764000 v^2\\ +117600) \cos (\frac{v}{2})+1270080 (\cos (\frac{3 v}{2})-9 \cos (\frac{5 v}{2})-8 \cos (\frac{7 v}{2})\\ +14 \cos (\frac{9 v}{2})-14 \cos (\frac{13 v}{2})+8 \cos (\frac{15 v}{2})+9 \cos (\frac{17 v}{2})-\cos (\frac{19 v}{2})\\ -5 \cos (\frac{21 v}{2}))+v (v (11 (3 (71792647 \cos (\frac{5 v}{2})+57772764 \cos (\frac{7 v}{2})\\ +42931200 \cos (\frac{9 v}{2})+25158645 \cos (\frac{11 v}{2})+9410087 \cos (\frac{13 v}{2}))\\ +5313226 \cos (\frac{15 v}{2})) v^2+(2670318541 v^2-112190400) \cos (\frac{3 v}{2})\\ +423360 (-171 \cos (\frac{5 v}{2})-141 \cos (\frac{7 v}{2})-221 \cos (\frac{9 v}{2})+264 \cos (\frac{11 v}{2})\\ +540 \cos (\frac{13 v}{2})+229 \cos (\frac{15 v}{2})+6 \cos (\frac{17 v}{2})-21 \cos (\frac{19 v}{2})+\\ 5 \cos (\frac{21 v}{2})))-3386880 (1980 \cos (v)+1885 \cos (2 v)+1632 \cos (3 v)\\ +1290 \cos (4 v)+963 \cos (5 v)+555 \cos (6 v)+162 \cos (7 v)-4 \cos (8 v)\\ -15 \cos (9 v)+990) \sin ^3(\frac{v}{2})))\\ b_{denom}^2=(2147483648 v^6 \cos ^3(\frac{v}{2}) \sin ^{27}(\frac{v}{2}) \end{array}\end{equation} \ason \begin{equation}\begin{array}{l} b^2_{T,1}=\frac{433489274083}{237758976000}-\frac{152802083671 v^2}{951035904000}+ \frac{1404086671901 v^4}{194011324416000}-\\ \frac{108627551857199 v^6}{1161157776629760000}+\frac{3113473234169 v^8}{1621934672117760000}-\frac{21678565330566029 v^{10}}{282022000787836108800000}-\ldots\\ b^2_{T,2}=-\frac{28417333297}{4953312000}+\frac{152802083671 v^2}{79252992000}-\frac{1000430523577 v^4}{4041902592000}+\\ \frac{3812117933243383 v^6}{193526296104960000}-\frac{131666706221101 v^8}{133049328572160000}+\frac{766613393985947587 v^{10}}{23501833398986342400000}-\ldots\\ b^2_{T,3}=\frac{930518896733}{39626496000}-\frac{1680822920381 v^2}{158505984000}+\frac{67397661839051 v^4}{32335220736000}-\\ \frac{47508096701122969 v^6}{193526296104960000}+\frac{31127602487128507 v^8}{1548210368839680000}-\frac{59333732949165745199 v^{10}}{47003666797972684800000}+\ldots\\ b^2_{T,4}=-\frac{176930551859}{2971987200}+\frac{1680822920381 v^2}{47551795200}-\frac{855811097959 v^4}{97005662208}+\\ \frac{149201016148079837 v^6}{116115777662976000}-\frac{407769624909121 v^8}{3225438268416000}+\frac{6653867251060213627 v^{10}}{742163159967989760000}-\ldots\\ b^2_{T,5}=\frac{7854755921}{65228800}-\frac{1680822920381 v^2}{21134131200}+\frac{19713857381587 v^4}{862272552960}-\\ \frac{10823009510563069 v^6}{2867056238592000}+\frac{83749133157903719 v^8}{206428049178624000}-\frac{189076914789983483663 v^{10}}{6267155573063024640000}+\ldots\\ b^2_{T,6}=-\frac{146031020287}{825552000}+\frac{1680822920381 v^2}{13208832000}-\frac{26590548293789 v^4}{673650432000}+\\ \frac{224945948304809533 v^6}{32254382684160000}-\frac{12256588145611 v^8}{15672683520000}+\frac{232561853289543390209 v^{10}}{3916972233164390400000}-\ldots\\ b^2_{T,7}=\frac{577045151693}{2830464000}-\frac{1680822920381 v^2}{11321856000}+\frac{108959828597563 v^4}{2309658624000}-\\ \frac{117725260678970569 v^6}{13823306864640000}+ \frac{35655584375317913 v^8}{36862151639040000}-\frac{248038978837339401007 v^{10}}{3357404771283763200000}+\ldots \end{array}\end{equation} \asoff Method \textit{PF-D3}: \begin{equation}\begin{array}{l} b_{1,num}^3=\frac{262144}{5} v^3 \cos (\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (140734 \cos (\frac{v}{2}) v^5+12 (5357 v^4\\ -1680 v^2+480) \cos (\frac{3 v}{2}) v+(4 (21791 v^4+6480 v^2-3600) \cos (\frac{5 v}{2})\\ +(97229 v^4-27360 v^2+8640) \cos (\frac{7 v}{2})+(32989 v^4-7200 v^2\\ +10800) \cos (\frac{9 v}{2})+80 (-8 v (492 \cos (v)+492 \cos (2 v)+301 \cos (3 v)\\ +288 \cos (4 v)+215 \cos (5 v)-86 \cos (6 v)-60 \cos (7 v)+26 \cos (8 v)\\ +246) \sin ^3(\frac{v}{2})+33 (16 v^2-9) \cos (\frac{11 v}{2})+54 (3-2 v^2) \cos (\frac{13 v}{2})\\ +30 (3-4 v^2) \cos (\frac{15 v}{2})+9 (8 v^2-13) \cos (\frac{17 v}{2})+3 (9-4 v^2) \cos (\frac{19 v}{2}))) v\\ -61440 \cos ^2(\frac{v}{2}) (2 \cos (v)+2 \cos (2 v)+2 \cos (3 v)+2 \cos (4 v)\\ +2 \cos (6 v)+1) \sin ^5(\frac{v}{2})) \nonumber \\ b_{2,num}^3=-\frac{4194304}{5} v^3 \cos ^3(\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (84282 \cos (\frac{v}{2} ) v^5+4 (20504 v^4\\ -4200 v^2+1125) \cos (\frac{3 v}{2}) v+(12 (4774 v^4+2160 v^2-975) \cos (\frac{5 v}{2})\\ +15 (4015 v^4-2112 v^2+552) \cos (\frac{7 v}{2})+(32989 v^4+4860 v^2\\ +6480) \cos (\frac{9 v}{2})+60 (99 (5 v^2-3) \cos (\frac{11 v}{2})+3 (63-71 v^2) \cos (\frac{13 v}{2})\\ +(60-11 v^2) \cos (\frac{15 v}{2})+2 (-4 v (492 \cos (v)+492 \cos (2 v)\\ +286 \cos (3 v)+310 \cos (4 v)+202 \cos (5 v)-95 \cos (6 v)-38 \cos (7 v)\\ +19 \cos (8 v)+246) \sin ^3(\frac{v}{2})+3 (5 v^2-17) \cos (\frac{17 v}{2})\\ -3 (v^2-4) \cos (\frac{19 v}{2})))) v-46080 \cos ^2 (\frac{v}{2}) (2 \cos (v)+2 \cos (2 v)\\ +2 \cos (3 v)+2 \cos (4 v)+2 \cos (6 v)+1) \sin ^5(\frac{v}{2})) \nonumber \\ b_{3,num}^3=\frac{1572864}{5} v^3 \cos (\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (-61440 \cos ^2(\frac{v}{2}) (22 \cos (v)\\ +22 \cos (2 v)+21 \cos (3 v)+16 \cos (4 v)+13 \cos (5 v)+9 \cos (6 v)\\ +6 \cos (7 v)+\cos (8 v)+11) \sin ^5(\frac{v}{2})+2 v (604527 v^4-31000 v^2\\ +6600) \cos (\frac{v}{2})+v (1090199 v^4-5920 v^2-17520) \cos (\frac{3 v}{2})\\ +v ((951159 v^4-47360 v^2-7560) \cos (\frac{5 v}{2})+5 (148753 v^4\\ -10576 v^2+3480) \cos (\frac{7 v}{2})+(444741 v^4+48080 v^2-7200) \cos (\frac{9 v}{2} )\\ +120 (-132 \cos (\frac{11 v}{2})+60 \cos (\frac{13 v}{2})+218 \cos (\frac{15 v}{2})-36 \cos (\frac{17 v}{2})\\ -118 \cos (\frac{19 v}{2})+15 \cos (\frac{21 v}{2})+7 \cos (\frac{23 v}{2} ))+v (11 v (15863 v^2\\ +8960) \cos (\frac{11 v}{2})+v (32989 v^2+37280) \cos (\frac{13 v}{2})-\\ 40 (8 (10601 \cos (v)+9368 \cos (2 v)+7755 \cos (3 v)+5858 \cos (4 v)\\ +3103 \cos (5 v)+538 \cos (6 v)-457 \cos (7 v)-154 \cos (8 v)\\ +70 \cos (9 v)+14 \cos (10 v)+5412) \sin ^3(\frac{v}{2})+v (383 \cos (\frac{15 v}{2})\\ +103 \cos (\frac{17 v}{2})+4 (-26 \cos (\frac{19 v}{2})+2 \cos (\frac{21 v}{2})+\cos (\frac{23 v}{2}))))))) \nonumber \\ b_{4,num}^3=-\frac{4194304}{5} v^3 \cos ^3(\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (-15360 \cos ^2(\frac{v}{2}) (110 \cos (v)\\ +110 \cos (2 v)+108 \cos (3 v)+78 \cos (4 v)+66 \cos (5 v)+44 \cos (6 v)\\ +32 \cos (7 v)+2 \cos (8 v)+55) \sin ^5(\frac{v}{2})+6 v (252263 v^4-15900 v^2\\ +3720) \cos (\frac{v}{2})+v (1374791 v^4+33000 v^2-30780) \cos (\frac{3 v}{2})\\ +v (5 (239129 v^4-18288 v^2-2052) \cos (\frac{5 v}{2})+15 (62139 v^4\\ -4720 v^2+2040) \cos (\frac{7 v}{2})+(553817 v^4+79080 v^2-12600) \cos (\frac{9 v}{2})\\ +180 (-143 \cos (\frac{11 v}{2})+59 \cos (\frac{13 v}{2})+237 \cos (\frac{15 v}{2})-75 \cos (\frac{17 v}{2})\\ -104 \cos (\frac{19 v}{2})+28 \cos (\frac{21 v}{2})+2 \cos (\frac{23 v}{2}))+v (165 v (1243 v^2\\ +760) \cos (\frac{11 v}{2})+v (32989 v^2+42360) \cos (\frac{13 v}{2})-20 (8 (26818 \cos (v)\\ +23302 \cos (2 v)+19472 \cos (3 v)+14923 \cos (4 v)+7646 \cos (5 v)\\ +934 \cos (6 v)-1360 \cos (7 v)-200 \cos (8 v)+194 \cos (9 v)\\ +11 \cos (10 v)+13530) \sin ^3(\frac{v}{2})+3 v (426 \cos (\frac{15 v}{2})+5 \cos (\frac{17 v}{2})\\ -77 \cos (\frac{19 v}{2})+15 \cos (\frac{21 v}{2})+ \cos (\frac{23 v}{2})))))) \end{array}\end{equation} \begin{equation}\begin{array}{l} b_{5,num}^3=\frac{262144}{5} v^3 \cos (\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (-184320 \cos ^2(\frac{v}{2}) (330 \cos (v)\\ +326 \cos (2 v)+298 \cos (3 v)+250 \cos (4 v)+188 \cos (5 v)+142 \cos (6 v)\\ +80 \cos (7 v)+32 \cos (8 v)+4 \cos (9 v)+165) \sin ^5(\frac{v}{2})+480 v (109813 v^4\\ -4160 v^2+384) \cos (\frac{v}{2})+12 v (4062663 v^4-134080 v^2-19200) \cos (\frac{3 v}{2})\\ +v (12 (3490311 v^4-138560 v^2-16800) \cos (\frac{5 v}{2})+(32182799 v^4-\\ 992640 v^2+192960) \cos (\frac{7 v}{2})+(20304031 v^4+1640640 v^2-\\ 74160) \cos (\frac{9 v}{2})+720 (-627 \cos (\frac{11 v}{2})+532 \cos (\frac{13 v}{2})+788 \cos (\frac{15 v}{2})\\ -17 \cos (\frac{17 v}{2})-385 \cos (\frac{19 v}{2})-176 \cos (\frac{21 v}{2})+52 \cos (\frac{23 v}{2})\\ +12 \cos (\frac{25 v}{2}))+v (v (32989 (17 \cos (\frac{15 v}{2})+\cos (\frac{17 v}{2})) v^2+176 (54469 v^2\\ +16860) \cos (\frac{11 v}{2})+16 (192181 v^2+97440) \cos (\frac{13 v}{2})+480 (352 \cos (\frac{15 v}{2})\\ -232 \cos (\frac{17 v}{2})+22 \cos (\frac{19 v}{2})+68 \cos (\frac{21 v}{2})-11 \cos (\frac{23 v}{2})-3 \cos (\frac{25 v}{2})))\\ -1920 (77156 \cos (v)+69308 \cos (2 v)+57403 \cos (3 v)+42088 \cos (4 v)\\ +23663 \cos (5 v)+7258 \cos (6 v)-332 \cos (7 v)-962 \cos (8 v)\\ -52 \cos (9 v)+152 \cos (10 v)+22 \cos (11 v)+40106) \sin ^3(\frac{v}{2})))) \nonumber \\ b_{6,num}^3=-\frac{4194304}{5} v^3 \cos ^3(\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (-92160 \cos ^2(\frac{v}{2}) (66 \cos (v)\\ +66 \cos (2 v)+61 \cos (3 v)+50 \cos (4 v)+37 \cos (5 v)+29 \cos (6 v)\\ +16 \cos (7 v)+5 \cos (8 v)+33) \sin ^5(\frac{v}{2})+6 v (889361 v^4-36900 v^2\\ +5880) \cos (\frac{v}{2})+v (4932719 v^4-121800 v^2-42840) \cos (\frac{3 v}{2})\\ +v (3 (1410343 v^4-62640 v^2-11880) \cos (\frac{5 v}{2})+48 (68101 v^4\\ -3465 v^2+1110) \cos (\frac{7 v}{2})+4 (502843 v^4+53175 v^2-2790) \cos (\frac{9 v}{2})\\ +3 (11 (26231 v^4+10980 v^2-2520) \cos (\frac{11 v}{2})+(78023 v^4+46140 v^2\\ +19560) \cos (\frac{13 v}{2})+2 ((4829 v^4-1870 v^2+13890) \cos (\frac{15 v}{2})\\ -10 (8 v (31262 \cos (v)+27954 \cos (2 v)+23122 \cos (3 v)+17249 \cos (4 v)\\ +9324 \cos (5 v)+2061 \cos (6 v)-650 \cos (7 v)-363 \cos (8 v)+74 \cos (9 v)\\ +55 \cos (10 v)+16236) \sin ^3(\frac{v}{2})+3 (67 v^2+99) \cos (\frac{17 v}{2})\\ +21 (32-5 v^2) \cos (\frac{19 v}{2})+(84-31 v^2) \cos (\frac{21 v}{2})\\ +15 (v^2-6) \cos (\frac{23 v}{2})))))) \nonumber \\ b_{7,num}^3=\frac{524288}{5} v^3 \cos (\frac{v}{2}) \sin ^{18}(\frac{v}{2}) (-122880 \cos ^2(\frac{v}{2}) (462 \cos (v)\\ +452 \cos (2 v)+417 \cos (3 v)+344 \cos (4 v)+271 \cos (5 v)+191 \cos (6 v)\\ +118 \cos (7 v)+45 \cos (8 v)+10 \cos (9 v)+231) \sin ^5(\frac{v}{2})\\ +2 v (24403159 v^4-927600 v^2+75600) \cos (\frac{v}{2})+2 v (22599203 v^4-\\ 730560 v^2-111600) \cos (\frac{3 v}{2})+v (6 (6473137 v^4-267360 v^2\\ -17880) \cos (\frac{5 v}{2})+2 (14893043 v^4-320160 v^2+65880) \cos ( \frac{7 v}{2})\\ +6 (3171377 v^4+240960 v^2-28320) \cos (\frac{9 v}{2})+720 (-264 \cos (\frac{11 v}{2})\\ +302 \cos (\frac{13 v}{2})+664 \cos (\frac{15 v}{2})+50 \cos (\frac{17 v}{2})-348 \cos (\frac{19 v}{2})\\ -127 \cos (\frac{21 v}{2})+5 \cos (\frac{23 v}{2})+20 \cos (\frac{25 v}{2}))+v (v (66 (142321 v^2\\ +36320) \cos (\frac{11 v}{2})+2 (1641761 v^2+744960) \cos (\frac{13 v}{2})+(729971 v^2\\ +262800) \cos (\frac{15 v}{2})+(84227 v^2-61680) \cos (\frac{17 v}{2})+480 (24 \cos (\frac{19 v}{2})\\ +32 \cos (\frac{21 v}{2})+5 \cos (\frac{23 v}{2} )-5 \cos (\frac{25 v}{2})))-640 (214989 \cos (v)+\\ 192536 \cos (2 v)+159857 \cos (3 v)+116994 \cos (4 v)+66367 \cos (5 v)\\ +23162 \cos (6 v)+1179 \cos (7 v)-2150 \cos (8 v)-278 \cos (9 v)\\ +270 \cos (10 v)+110 \cos (11 v)+111232) \sin ^3(\frac{v}{2})))) \nonumber \\ b_{denom}^3=\left(824633720832 v^8 \cos ^6\left(\frac{v}{2}\right) \sin ^{30}\left(\frac{v}{2}\right)\right) \end{array}\end{equation} \ason \begin{equation}\begin{array}{l} b^3_{T,1}=\frac{433489274083}{237758976000}-\frac{152802083671 v^2}{713276928000}+\frac{2211398968549 v^4}{291016986624000}-\\ \frac{33578069009689 v^6}{145144722078720000}-\frac{144902264134913 v^8}{17516894458871808000}-\frac{18020995400748499 v^{10}}{14101100039391805440000}-\ldots\\ b^3_{T,2}=-\frac{28417333297}{4953312000}+\frac{152802083671 v^2}{59439744000}-\frac{1000430523577 v^4}{2425141555200}+\\ \frac{66666008116601 v^6}{1860829770240000}-\frac{11606680689206023 v^8}{6386367771463680000}+\frac{363627917613911087 v^{10}}{5875458349746585600000}-\ldots\\ b^3_{T,3}=\frac{930518896733}{39626496000}-\frac{1680822920381 v^2}{118879488000}+\frac{180183513998459 v^4}{48502831104000}-\\ \frac{6773330550886447 v^6}{12095393506560000}+\frac{8117004168919561 v^8}{142911726354432000}-\frac{9618739589821913801 v^{10}}{2350183339898634240000}+\ldots\\ b^3_{T,4}=-\frac{176930551859}{2971987200}+\frac{1680822920381 v^2}{35663846400}-\frac{117366928934503 v^4}{7275424665600}+\\ \frac{9440045489117267 v^6}{2902894441574400}-\frac{154456853448146527 v^8}{348347332988928000}+\frac{156768697509684951877 v^{10}}{3525275009847951360000}-\ldots\\ b^3_{T,5}=\frac{7854755921}{65228800}-\frac{1680822920381 v^2}{15850598400}+\frac{21053722246547 v^4}{497464934400}-\\ \frac{86689543640365 v^6}{8601168715776}+\frac{153981351646932977 v^8}{95274484236288000}- \frac{98146042038903700999 v^{10}}{522262964421918720000}+\ldots\\ b^3_{T,6}=-\frac{146031020287}{825552000}+\frac{1680822920381 v^2}{9906624000}-\frac{148538554003387 v^4}{2020951296000}+\\ \frac{77089257945806723 v^6}{4031797835520000}-\frac{9226172386459001 v^8}{2764661372928000}+ \frac{16273137531259548461 v^{10}}{39169722331643904000}-\ldots\\ b^3_{T,7}=\frac{577045151693}{2830464000}-\frac{1680822920381 v^2}{8491392000}+\frac{60974002854799 v^4}{692897587200}-\\ \frac{20335903756276117 v^6}{863956679040000}+\frac{2799280124854146809 v^8}{663518729502720000}-\frac{449833739846395057357 v^{10}}{839351192820940800000}+\ldots \end{array}\end{equation} \asoff \inoff Method \textit{PF - D4}: $b_{1,num}^4=100663296 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18}(\frac{v}{2})$ $ ((11858 v^6$ - $1512 v^4$ + $1005 v^2$ - $60) $ $\cos (\frac{v}{2})$ + $9 (1540 v^6$ + $280 v^4$ - $271 v^2$ + $20) $ $\cos (\frac{3 v}{2})$ + $60 ($ - $\cos (\frac{5 v}{2})$ - $6 $ $\cos (\frac{7 v}{2})$ + $8 $ $\cos (\frac{9 v}{2})$ - $8 $ $\cos (\frac{13 v}{2})$ + $6 $ $\cos (\frac{15 v}{2})$ + $\cos (\frac{17 v}{2})$ - $3 $ $\cos (\frac{19 v}{2})$ + $\cos (\frac{21 v}{2})$ $ )$ + $v ($ - $3072 $ $\cos ^2(\frac{v}{2})$ $ (18 $ $\cos (v)$ + $18 $ $\cos (2 v)$ + $18 $ $\cos (3 v)$ + $3 $ $\cos (4 v)$ + $14 $ $\cos (5 v)$ + $4 $ $\cos (6 v)$ - $7 $ $\cos (7 v)$ + $9) $ $\sin ^5(\frac{v}{2})$ - $16 v^2 (2244 $ $\cos (v)$ + $1419 $ $\cos (2 v)$ + $1480 $ $\cos (3 v)$ + $914 $ $\cos (4 v)$ - $961 $ $\cos (5 v)$ - $535 $ $\cos (6 v)$ + $428 $ $\cos (7 v)$ + $104 $ $\cos (8 v)$ - $77 $ $\cos (9 v)$ + $1122) $ $\sin ^3(\frac{v}{2})$ + $v (11044 v^4$ - $3816 v^2$ + $1401) $ $\cos (\frac{5 v}{2})$ + $v ((4675 v^4$ + $180 v^2$ + $2358) $ $\cos (\frac{7 v}{2})$ + $(803 v^4$ + $5028 v^2$ - $5376) $ $\cos (\frac{9 v}{2})$ + $3 (44 (18$ - $13 v^2) $ $\cos (\frac{11 v}{2})$ + $4 (283$ - $155 v^2) $ $\cos (\frac{13 v}{2})$ + $2 (212 v^2$ - $525) $ $\cos (\frac{15 v}{2})$ + $(40 v^2$ - $71) $ $\cos (\frac{17 v}{2})$ + $(285$ - $92 v^2) $ $\cos (\frac{19 v}{2})$ + $(20 v^2$ - $71) $ $\cos (\frac{21 v}{2})$ $ )))) $ $ b_{2,num}^4=$ - $100663296 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18} (\frac{v}{2})$ $ (2 (77077 v^6$ - $672 v^4$ - $1074 v^2$ + $120 ) $ $\cos (\frac{v}{2})$ + $6 (23705 v^6$ - $1920 v^4$ - $22 v^2$ + $40) $ $\cos (\frac{3 v}{2})$ - $240 (4 $ $\cos ( \frac{5 v}{2})$ - $\cos (\frac{7 v}{2})$ - $2 $ $\cos (\frac{9 v}{2})$ + $2 $ $\cos (\frac{13 v}{2})$ + $\cos (\frac{15 v}{2})$ - $4 $ $\cos (\frac{17 v}{2})$ + $\cos (\frac{19 v}{2})$ + $2 $ $\cos (\frac{21 v}{2})$ - $\cos (\frac{23 v}{2})$ $ )$ + $v ($ - $6144 $ $\cos ^2(\frac{v}{2})$ $ (108 $ $\cos (v)$ + $108 $ $\cos (2 v)$ + $77 $ $\cos (3 v)$ + $70 $ $\cos (4 v)$ + $43 $ $\cos (5 v)$ + $21 $ $\cos (6 v)$ - $6 $ $\cos (7 v)$ - $13 $ $\cos (8 v)$ + $54) $ $\sin ^5(\frac{v}{2})$ - $32 v^2 (11668 $ $\cos (v)$ + $10196 $ $\cos (2 v)$ + $7776 $ $\cos (3 v)$ + $2663 $ $\cos (4 v)$ - $1296 $ $\cos (5 v)$ - $1641 $ $\cos (6 v)$ + $164 $ $\cos (7 v)$ + $661 $ $\cos (8 v)$ + $12 $ $\cos (9 v)$ - $107 $ $\cos (10 v)$ + $6732) $ $\sin ^3(\frac{v}{2})$ + $v (113707 v^4$ - $6972 v^2$ + $7080) $ $\cos (\frac{5 v}{2})$ + $v (67925 v^4$ + $10140 v^2$ - $8412) $ $\cos (\frac{7 v}{2})$ + $v (3 (8283 v^4$ + $4572 v^2$ - $1120) $ $\cos (\frac{9 v}{2})$ + $11 (365 v^4$ - $12 v^2$ + $432) $ $\cos (\frac{11 v}{2})$ + $12 ((742$ - $505 v^2) $ $\cos (\frac{13 v}{2})$ + $(57 v^2$ - $124) $ $\cos (\frac{15 v}{2})$ + $(185 v^2$ - $689) $ $\cos (\frac{17 v}{2})$ + $(143$ - $41 v^2) $ $\cos (\frac{19 v}{2})$ + $2 (86$ - $15 v^2) $ $\cos (\frac{21 v}{2})$ + $(10 v^2$ - $59) $ $\cos (\frac{23 v}{2})$ $ )))) $ $ b_{3,num}^4=201326592 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18}(\frac{v}{2})$ $ ((411884 v^6$ - $10092 v^4$ - $5115 v^2$ + $660) $ $\cos (\frac{v}{2})$ + $3 (126445 v^6$ - $7800 v^4$ + $1598 v^2$ - $80) $ $\cos (\frac{3 v}{2})$ + $60 ($ - $14 $ $\cos (\frac{5 v}{2})$ - $\cos (\frac{7 v}{2})$ + $13 $ $\cos (\frac{9 v}{2})$ - $13 $ $\cos (\frac{13 v}{2})$ + $\cos (\frac{15 v}{2})$ + $14 $ $\cos (\frac{17 v}{2})$ + $\cos (\frac{19 v}{2})$ - $10 $ $\cos (\frac{21 v}{2})$ - $\cos (\frac{23 v}{2})$ + $3 $ $\cos (\frac{25 v}{2})$ $ )$ + $v ($ - $3072 $ $\cos ^2(\frac{v}{2})$ $ (594 $ $\cos (v)$ + $546 $ $\cos (2 v)$ + $458 $ $\cos (3 v)$ + $355 $ $\cos (4 v)$ + $244 $ $\cos (5 v)$ + $108 $ $\cos (6 v)$ - $3 $ $\cos (7 v)$ - $40 $ $\cos (8 v)$ - $18 $ $\cos (9 v)$ + $297) $ $\sin ^5(\frac{v}{2})$ + $2 v (148951 v^4$ - $3768 v^2$ + $1086) $ $\cos (\frac{5 v}{2})$ + $v (186340 v^4$ + $20100 v^2$ - $8697) $ $\cos (\frac{7 v}{2})$ + $v (33 (2648 v^4$ + $636 v^2$ - $187) $ $\cos (\frac{9 v}{2})$ + $132 (205 v^4$ + $29 v^2$ + $81) $ $\cos (\frac{11 v}{2})$ + $11 (365 v^4$ - $450 v^2$ + $1191) $ $\cos (\frac{13 v}{2})$ - $3 (731 $ $\cos (\frac{15 v}{2})$ + $3298 $ $\cos (\frac{17 v}{2})$ + $593 $ $\cos (\frac{19 v}{2})$ - $1028 $ $\cos (\frac{21 v}{2})$ - $149 $ $\cos (\frac{23 v}{2})$ + $147 $ $\cos (\frac{25 v}{2})$ $ )$ - $2 v (8 (64294 $ $\cos (v)$ + $54653 $ $\cos (2 v)$ + $38493 $ $\cos (3 v)$ + $15944 $ $\cos (4 v)$ - $1305 $ $\cos (5 v)$ - $4899 $ $\cos (6 v)$ - $676 $ $\cos (7 v)$ + $1690 $ $\cos (8 v)$ + $633 $ $\cos (9 v)$ - $230 $ $\cos (10 v)$ - $117 $ $\cos (11 v)$ + $34074) $ $\sin ^3(\frac{v}{2})$ + $3 v (171 $ $\cos (\frac{15 v}{2})$ - $365 $ $\cos (\frac{17 v}{2})$ - $73 $ $\cos (\frac{19 v}{2})$ + $10 (9 $ $\cos (\frac{21 v}{2} )$ + $\cos (\frac{23 v}{2})$ - $\cos (\frac{25 v}{2})$ $ )))))) $ $ b_{4,num}^4=$ - $100663296 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18} (\frac{v}{2})$ $ (4 (674575 v^6$ - $23478 v^4$ - $4140 v^2$ + $720) $ $\cos (\frac{v}{2})$ + $4 (610445 v^6$ - $25050 v^4$ + $2328 v^2$ - $240) $ $\cos (\frac{3 v}{2})$ + $240 ($ - $12 $ $\cos (\frac{5 v}{2})$ - $9 $ $\cos (\frac{7 v}{2})$ + $19 $ $\cos (\frac{9 v}{2})$ - $19 $ $\cos (\frac{13 v}{2})$ + $9 $ $\cos (\frac{15 v}{2})$ + $11 $ $\cos (\frac{17 v}{2})$ + $\cos (\frac{19 v}{2})$ - $7 $ $\cos (\frac{21 v}{2})$ - $5 $ $\cos (\frac{23 v}{2})$ + $3 $ $\cos (\frac{25 v}{2})$ + $\cos (\frac{27 v}{2})$ $ )$ + $v ($ - $6144 $ $\cos ^2(\frac{v}{2})$ $ (1947 $ $\cos (v)$ + $1780 $ $\cos (2 v)$ + $1532 $ $\cos (3 v)$ + $1153 $ $\cos (4 v)$ + $812 $ $\cos (5 v)$ + $376 $ $\cos (6 v)$ + $35 $ $\cos (7 v)$ - $80 $ $\cos (8 v)$ - $64 $ $\cos (9 v)$ - $11 $ $\cos (10 v)$ + $990) $ $\sin ^5(\frac{v}{2} )$ + $v (1934185 v^4$ - $35376 v^2$ - $1440) $ $\cos (\frac{5 v}{2})$ + $v (1264615 v^4$ + $92640 v^2$ - $22932) $ $\cos (\frac{7 v}{2})$ + $v ((642895 v^4$ + $114648 v^2$ - $39588) $ $\cos (\frac{9 v}{2})$ + $11 (21955 v^4$ + $2712 v^2$ + $5508) $ $\cos (\frac{11 v}{2})$ + $2 ($ - $16 v (210376 $ $\cos (v)$ + $176605 $ $\cos (2 v)$ + $122822 $ $\cos (3 v)$ + $57558 $ $\cos (4 v)$ + $5448 $ $\cos (5 v)$ - $9243 $ $\cos (6 v)$ - $2316 $ $\cos (7 v)$ + $2721 $ $\cos (8 v)$ + $1892 $ $\cos (9 v)$ - $50 $ $\cos (10 v)$ - $362 $ $\cos (11 v)$ - $61 $ $\cos (12 v)$ + $109790) $ $\sin ^3(\frac{v}{2})$ + $(30745 v^4$ - $5910 v^2$ + $33852) $ $\cos (\frac{13 v}{2})$ + $(4015 v^4$ - $1242 v^2$ - $8928) $ $\cos (\frac{15 v}{2})$ + $6 (5 (87 v^2$ - $608) $ $\cos (\frac{17 v}{2})$ + $(261 v^2$ - $1220) $ $\cos (\frac{19 v}{2})$ + $5 (130$ - $17 v^2) $ $\cos (\frac{21 v}{2})$ + $(499$ - $75 v^2) $ $\cos (\frac{23 v}{2})$ + $3 (5 v^2$ - $39) $ $\cos (\frac{25 v}{2})$ + $(5 v^2$ - $41) $ $\cos (\frac{27 v}{2})$ $ ))))) $ $ b_{5,num}^4=100663296 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18}(\frac{v}{2})$ $ (5 (1193060 v^6$ - $41904 v^4$ - $5997 v^2$ + $1020) $ $\cos (\frac{v}{2})$ + $60 ($ - $14 $ $\cos (\frac{3 v}{2})$ - $126 $ $\cos (\frac{5 v}{2})$ + $3 $ $\cos (\frac{7 v}{2})$ + $68 $ $\cos (\frac{9 v}{2})$ - $68 $ $\cos (\frac{13 v}{2})$ - $4 $ $\cos (\frac{15 v}{2})$ + $113 $ $\cos (\frac{17 v}{2})$ + $9 $ $\cos (\frac{19 v}{2})$ - $63 $ $\cos (\frac{21 v}{2})$ - $22 $ $\cos (\frac{23 v}{2})$ + $5 $ $\cos (\frac{25 v}{2})$ + $13 $ $\cos (\frac{27 v}{2})$ + $\cos (\frac{29 v}{2})$ $ )$ + $v ($ - $3072 $ $\cos ^2(\frac{v}{2})$ $ (8642 $ $\cos (v)$ + $8000 $ $\cos (2 v)$ + $6715 $ $\cos (3 v)$ + $5297 $ $\cos (4 v)$ + $3528 $ $\cos (5 v)$ + $1774 $ $\cos (6 v)$ + $335 $ $\cos (7 v)$ - $291 $ $\cos (8 v)$ - $234 $ $\cos (9 v)$ - $84 $ $\cos (10 v)$ - $5 $ $\cos (11 v)$ + $4438) $ $\sin ^5(\frac{v}{2})$ + $6 v (900240 v^4$ - $34950 v^2$ + $1097) $ $\cos (\frac{3 v}{2})$ + $2 v (2142800 v^4$ - $21390 v^2$ + $4761) $ $\cos (\frac{5 v}{2})$ + $v ((2856205 v^4$ + $170220 v^2$ - $81459) $ $\cos (\frac{7 v}{2})$ + $3 (511335 v^4$ + $68340 v^2$ - $6064) $ $\cos (\frac{9 v}{2})$ + $44 (14405 v^4$ + $1962 v^2$ + $2133) $ $\cos (\frac{11 v}{2})$ + $3 (37216 $ $\cos (\frac{13 v}{2})$ - $1456 $ $\cos (\frac{15 v}{2})$ - $26299 $ $\cos (\frac{17 v}{2})$ - $9051 $ $\cos (\frac{19 v}{2})$ + $3561 $ $\cos (\frac{21 v}{2})$ + $2870 $ $\cos (\frac{23 v}{2})$ + $257 $ $\cos (\frac{25 v}{2})$ - $503 $ $\cos (\frac{27 v}{2})$ - $35 $ $\cos (\frac{29 v}{2})$ $ )$ + $v (v (20 (9515 v^2$ - $318) $ $\cos (\frac{13 v}{2})$ + $9 (4235 v^2$ - $684) $ $\cos (\frac{15 v}{2})$ + $5 (803 v^2$ + $2052) $ $\cos (\frac{17 v}{2})$ + $12 (404 $ $\cos (\frac{19 v}{2})$ - $84 $ $\cos (\frac{21 v}{2})$ - $100 $ $\cos (\frac{23 v}{2})$ - $12 $ $\cos (\frac{25 v}{2})$ + $15 $ $\cos (\frac{27 v}{2})$ + $\cos (\frac{29 v}{2})$ $ ))$ - $16 (923815 $ $\cos (v)$ + $780435 $ $\cos (2 v)$ + $544332 $ $\cos (3 v)$ + $265090 $ $\cos (4 v)$ + $54870 $ $\cos (5 v)$ - $23058 $ $\cos (6 v)$ - $10352 $ $\cos (7 v)$ + $8726 $ $\cos (8 v)$ + $6750 $ $\cos (9 v)$ + $651 $ $\cos (10 v)$ - $1000 $ $\cos (11 v)$ - $444 $ $\cos (12 v)$ - $25 $ $\cos (13 v)$ + $488520) $ $\sin ^3(\frac{v}{2})$ $ )))) $ $ b_{6,num}^4=$ - $100663296 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18} (\frac{v}{2})$ $ (4 (2363372 v^6$ - $84162 v^4$ - $11175 v^2$ + $1860) $ $\cos (\frac{v}{2})$ + $24 (355960 v^6$ - $12795 v^4$ + $476 v^2$ - $80) $ $\cos (\frac{3 v}{2})$ + $240 ($ - $39 $ $\cos (\frac{5 v}{2})$ - $3 $ $\cos (\frac{7 v}{2})$ + $23 $ $\cos (\frac{9 v}{2})$ - $23 $ $\cos (\frac{13 v}{2} )$ + $2 $ $\cos (\frac{15 v}{2})$ + $35 $ $\cos (\frac{17 v}{2})$ + $6 $ $\cos (\frac{19 v}{2})$ - $21 $ $\cos (\frac{21 v}{2})$ - $10 $ $\cos (\frac{23 v}{2})$ + $2 $ $\cos (\frac{25 v}{2})$ + $4 $ $\cos (\frac{27 v}{2})$ + $\cos (\frac{29 v}{2})$ $ )$ + $v ($ - $6144 $ $\cos ^2(\frac{v}{2})$ $ (6889 $ $\cos (v)$ + $6340 $ $\cos (2 v)$ + $5369 $ $\cos (3 v)$ + $4201 $ $\cos (4 v)$ + $2829 $ $\cos (5 v)$ + $1439 $ $\cos (6 v)$ + $331 $ $\cos (7 v)$ - $177 $ $\cos (8 v)$ - $180 $ $\cos (9 v)$ - $69 $ $\cos (10 v)$ - $10 $ $\cos (11 v)$ + $3530) $ $\sin ^5(\frac{v}{2})$ + $4 v (1700006 v^4$ - $14241 v^2$ - $1503) $ $\cos (\frac{5 v}{2})$ + $4 v (1147190 v^4$ + $60825 v^2$ - $25641) $ $\cos (\frac{7 v}{2})$ + $v (12 (210419 v^4$ + $24831 v^2$ - $1957) $ $\cos (\frac{9 v}{2})$ + $44 (24817 v^4$ + $3201 v^2$ + $3159) $ $\cos (\frac{11 v}{2})$ + $(356345 v^4$ + $7080 v^2$ + $156144) $ $\cos (\frac{13 v}{2})$ - $12 (262 $ $\cos (\frac{15 v}{2})$ + $8737 $ $\cos (\frac{17 v}{2})$ + $3825 $ $\cos (\frac{19 v}{2})$ - $2 (498 $ $\cos (\frac{21 v}{2})$ + $523 $ $\cos (\frac{23 v}{2})$ + $67 ( $ $\cos (\frac{25 v}{2})$ - $\cos (\frac{27 v}{2})$ $ ))$ + $35 $ $\cos (\frac{29 v}{2})$ $ )$ + $v (v ((82599 v^2$ - $6408) $ $\cos (\frac{15 v}{2})$ + $15 (803 v^2$ + $864 ) $ $\cos (\frac{17 v}{2})$ + $(803 v^2$ + $7584) $ $\cos (\frac{19 v}{2})$ - $12 (81 $ $\cos (\frac{21 v}{2})$ + $135 $ $\cos (\frac{23 v}{2})$ + $23 $ $\cos (\frac{25 v}{2})$ - $15 $ $\cos (\frac{27 v}{2})$ - $4 $ $\cos (\frac{29 v}{2})$ $ ))$ - $32 (731642 $ $\cos (v)$ + $616515 $ $\cos (2 v)$ + $431394 $ $\cos (3 v)$ + $217139 $ $\cos (4 v)$ + $54930 $ $\cos (5 v)$ - $9918 $ $\cos (6 v)$ - $7072 $ $\cos (7 v)$ + $5350 $ $\cos (8 v)$ + $4914 $ $\cos (9 v)$ + $747 $ $\cos (10 v)$ - $614 $ $\cos (11 v)$ - $339 $ $\cos (12 v)$ - $50 $ $\cos (13 v)$ + $386010) $ $\sin ^3(\frac{v}{2})$ $ )))) $ $ b_{7,num}^4=201326592 v^4 $ $\cos ^3(\frac{v}{2})$ $\sin ^{18}(\frac{v}{2})$ $ (44 (14828 v^4$ + $1779 v^2$ + $1971) $ $\cos (\frac{11 v}{2})$ $ v^2$ + $48 (491 v^2$ + $42) $ $\sin (\frac{v}{2})$ $ v$ + $2 (2652$ - $18397 v^2) $ $\sin (\frac{3 v}{2})$ $ v$ - $2 (46961 v^2$ + $1572) $ $\sin (\frac{5 v}{2})$ $ v$ - $2 (84467 v^2$ + $996) $ $\sin ( \frac{7 v}{2})$ $ v$ + $92 (132$ - $1315 v^2) $ $\sin (\frac{9 v}{2})$ $ v$ + $12 (6461 v^2$ - $1752) $ $\sin (\frac{11 v}{2})$ $ v$ + $12 (10789 v^2$ - $2244) $ $\sin (\frac{13 v}{2})$ $ v$ + $16 (2921 v^2$ + $987) $ $\sin (\frac{15 v}{2})$ $ v$ + $112 (219$ - $122 v^2) $ $\sin (\frac{17 v}{2})$ $ v$ + $16 (570$ - $1019 v^2) $ $\sin (\frac{19 v}{2})$ $ v$ + $8 (233 v^2$ - $1146) $ $\sin ( \frac{21 v}{2})$ $ v$ + $4 (1075 v^2$ - $2004) $ $\sin (\frac{23 v}{2})$ $ v$ + $10 (25 v^2$ + $12) $ $\sin (\frac{25 v}{2})$ $ v$ + $2 (372$ - $131 v^2) $ $\sin (\frac{27 v}{2})$ $ v$ + $30 (12$ - $5 v^2) $ $\sin (\frac{29 v}{2})$ $ v$ + $(5494852 v^6$ - $200688 v^4$ - $24198 v^2$ + $4200) $ $\cos (\frac{v}{2})$ + $(4966940 v^6$ - $164820 v^4$ + $6873 v^2$ - $1500) $ $\cos (\frac{3 v}{2})$ + $(3957316 v^6$ - $37644 v^4$ - $15639 v^2$ - $3900) $ $\cos (\frac{5 v}{2})$ + $(2678500 v^6$ + $135480 v^4$ - $37413 v^2$ - $2340) $ $\cos (\frac{7 v}{2})$ + $2 (742786 v^6$ + $87156 v^4$ - $15261 v^2$ + $2220) $ $\cos (\frac{9 v}{2})$ + $(217855 v^6$ + $7320 v^4$ + $91506 v^2$ - $4440) $ $\cos (\frac{13 v}{2})$ + $4 (13079 v^6$ - $318 v^4$ - $2646 v^2$ + $540) $ $\cos (\frac{15 v}{2})$ + $2 (4180 v^6$ + $2820 v^4$ - $25347 v^2$ + $1740) $ $\cos (\frac{17 v}{2})$ + $(737 v^6$ + $4776 v^4$ - $28968 v^2$ + $960) $ $\cos (\frac{19 v}{2})$ - $6 (54 v^4$ - $863 v^2$ + $380) $ $\cos (\frac{21 v}{2})$ - $24 (45 v^4$ - $347 v^2$ + $80) $ $\cos (\frac{23 v}{2})$ + $9 ($ - $8 v^4$ + $35 v^2$ + $60) $ $\cos (\frac{25 v}{2})$ + $3 (20 v^4$ - $197 v^2$ + $140) $ $\cos (\frac{27 v}{2})$ + $9 (4 v^4$ - $35 v^2$ + $20) $ $\cos (\frac{29 v}{2})$ $ ) $ $ b_{denom}^4= -\left(316659348799488 v^{10} \cos ^{10}\left(\frac{v}{2} \right)\sin ^{30}\left(\frac{v}{2}\right) \right) $ $ b^4_{T,1}=\frac{433489274083}{237758976000}$ - $\frac{152802083671 v^2}{570621542400}$ + $\frac{7762618237 v^4}{1119296102400}$ - $\frac{7881601960439 v^6}{14744860655616000}$ - $\frac{27284304529514897 v^8}{613091306060513280000}$ - $\frac{1799866965050155021 v^{10}}{282022000787836108800000}$ - $\ldots $ $ b^4_{T,2}=$ - $\frac{28417333297}{4953312000}$ + $\frac{152802083671 v^2}{47551795200}$ - $\frac{1000430523577 v^4}{1616761036800}$ + $\frac{604487352966331 v^6}{11058645491712000}$ - $\frac{75851624289432059 v^8}{25545471085854720000}$ + $\frac{646544241473169703 v^{10}}{7833944466328780800000}$ - $\ldots $ $ b^4_{T,3}=\frac{930518896733}{39626496000}$ - $\frac{1680822920381 v^2}{95103590400}$ + $\frac{2349705253321 v^4}{404190259200}$ - $\frac{23296554826706981 v^6}{22117290983424000}$ + $\frac{58594320744987337 v^8}{488908537528320000}$ - $\frac{144079291878124208197 v^{10}}{15667888932657561600000}$ + $\ldots $ $ b^4_{T,4}=$ - $\frac{176930551859}{2971987200}$ + $\frac{1680822920381 v^2}{28531077120}$ - $\frac{74576374036553 v^4}{2910169866240}$ + $\frac{95021198062331 v^6}{14455745740800}$ - $\frac{1557322122991096859 v^8}{1393389331955712000}$ + $\frac{1918393406379510690887 v^{10}}{14101100039391805440000}$ - $\ldots $ $ b^4_{T,5}=\frac{7854755921}{65228800}$ - $\frac{1680822920381 v^2}{12680478720}$ + $\frac{7297045929049 v^4}{107784069120}$ - $\frac{20692039318485463 v^6}{982990710374400}$ + $\frac{5526609376838648143 v^8}{1238568295071744000}$ - $\frac{4320389579215898805647 v^{10}}{6267155573063024640000}$ + $\ldots $ $ b^4_{T,6}=$ - $\frac{146031020287}{825552000}$ + $\frac{1680822920381 v^2}{7925299200}$ - $\frac{1177252560689 v^4}{9980006400}$ + $\frac{74732313119187721 v^6}{1843107581952000}$ - $\frac{3727799369309648939 v^8}{387052592209920000}$ + $\frac{6574125730067577575911 v^{10}}{3916972233164390400000}$ - $\ldots $ $ b^4_{T,7}=\frac{577045151693}{2830464000}$ - $\frac{1680822920381 v^2}{6793113600}$ + $\frac{12244386604777 v^4}{86612198400}$ - $\frac{26404757298856247 v^6}{526602166272000}$ + $\frac{8187780819568609243 v^8}{663518729502720000}$ - $\frac{7493224716658621457999 v^{10}}{3357404771283763200000}$ + $\ldots $ Method \textit{PF - D5}: $ b_{1,num}^5=$ - $7247757312 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15}(\frac{v}{2})$ $ ($ - $122880 \cos ^4(\frac{v}{2} ) (2 $ $\cos (v)$ + $2 $ $\cos (2 v)$ + $2 $ $\cos (3 v)$ + $2 $ $\cos (4 v)$ + $2 $ $\cos (6 v)$ + $1) $ $\sin ^7(\frac{v}{2})$ + $70 v (396 v^6$ + $72 v^4$ - $95 v^2$ + $20) $ $\cos (\frac{v}{2})$ + $30 v (616 v^6$ - $504 v^4$ + $335 v^2$ - $20) $ $\cos (\frac{3 v}{2})$ + $v (60 (3 v^2 (44 v^4$ + $52 v^2$ + $35)$ - $80) $ $\cos (\frac{5 v}{2})$ + $30 (66 v^6$ + $408 v^4$ - $979 v^2$ + $220) $ $\cos (\frac{7 v}{2})$ + $100 (6 \cos (\frac{9 v}{2} )$ - $88 $ $\cos (\frac{11 v}{2})$ + $60 $ $\cos (\frac{13 v}{2})$ + $33 $ $\cos (\frac{15 v}{2})$ - $51 $ $\cos (\frac{17 v}{2})$ + $6 $ $\cos (\frac{19 v}{2})$ + $12 $ $\cos (\frac{21 v}{2})$ - $4 $ $\cos (\frac{23 v}{2})$ $ )$ + $v ($ - $2560 $ $\cos ^2(\frac{v}{2})$ $ (84 $ $\cos (v)$ + $84 $ $\cos (2 v)$ - $13 $ $\cos (3 v)$ + $84 $ $\cos (4 v)$ + $\cos (5 v)$ - $115 $ $\cos (6 v)$ + $31 $ $\cos (8 v)$ + $42) $ $\sin ^5(\frac{v}{2})$ - $16 v^2 (641 $ $\cos (v)$ + $2520 $ $\cos (2 v)$ - $93 $ $\cos (3 v)$ - $5564 $ $\cos (4 v)$ + $13 $ $\cos (5 v)$ + $3882 $ $\cos (6 v)$ - $\cos (7 v)$ - $1292 $ $\cos (8 v)$ + $174 $ $\cos (10 v)$ + $1260) $ $\sin ^3(\frac{v}{2})$ + $5 v ((44 v^4$ - $2808 v^2$ + $3966 ) $ $\cos (\frac{9 v}{2})$ + $88 (44$ - $9 v^2) $ $\cos (\frac{11 v}{2})$ + $24 (73 v^2$ - $212) $ $\cos (\frac{13 v}{2})$ - $3 (24 v^2$ + $121) $ $\cos (\frac{15 v}{2})$ + $3 (735$ - $184 v^2) $ $\cos (\frac{17 v}{2})$ + $6 (20 v^2$ - $71) $ $\cos (\frac{19 v}{2})$ + $12 (6 v^2$ - $29) $ $\cos (\frac{21 v}{2})$ + $4 (29$ - $6 v^2) $ $\cos (\frac{23 v}{2})$ $ )))) $ $ b_{2,num}^5=2415919104 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15} (\frac{v}{2})$ $ ($ - $737280 \cos ^4(\frac{v}{2} ) (12 $ $\cos (v)$ + $12 $ $\cos (2 v)$ + $12 $ $\cos (3 v)$ + $7 $ $\cos (4 v)$ + $10 $ $\cos (5 v)$ + $2 $ $\cos (6 v)$ + $5 $ $\cos (7 v)$ + $6) \sin ^7(\frac{v}{2} )$ + $180 v (4620 v^6$ - $672 v^4$ + $13 v^2$ + $120) $ $\cos (\frac{v}{2})$ + $45 v (14256 v^6$ + $2344 v^4$ + $1987 v^2$ - $1300 ) $ $\cos (\frac{3 v}{2})$ + $v (45 (8184 v^6$ + $1608 v^4$ - $6581 v^2$ + $1420) $ $\cos (\frac{5 v}{2})$ + $45 (3256 v^6$ - $588 v^4$ + $4583 v^2$ - $420) \cos (\frac{7 v}{2} )$ - $300 (123 $ $\cos (\frac{9 v}{2})$ - $154 $ $\cos (\frac{11 v}{2})$ + $130 $ $\cos (\frac{13 v}{2})$ - $96 $ $\cos (\frac{15 v}{2})$ - $84 \cos (\frac{17 v}{2} )$ + $171 $ $\cos (\frac{19 v}{2})$ - $33 $ $\cos (\frac{21 v}{2})$ - $49 $ $\cos (\frac{23 v}{2})$ + $19 $ $\cos (\frac{25 v}{2})$ $ )$ + $v ($ - $7680 $ $\cos ^2(\frac{v}{2})$ $ (1008 $ $\cos (v)$ + $497 $ $\cos (2 v)$ + $814 $ $\cos (3 v)$ + $140 $ $\cos (4 v)$ - $191 $ $\cos (5 v)$ - $231 $ $\cos (6 v)$ - $364 $ $\cos (7 v)$ + $62 $ $\cos (8 v)$ + $137 $ $\cos (9 v)$ + $504) $ $\sin ^5(\frac{v}{2})$ - $16 v^2 (79446 $ $\cos (v)$ + $26700 $ $\cos (2 v)$ - $54673 $ $\cos (3 v)$ - $35424 $ $\cos (4 v)$ - $6282 $ $\cos (5 v)$ + $23632 $ $\cos (6 v)$ + $23514 $ $\cos (7 v)$ - $7782 $ $\cos (8 v)$ - $11000 $ $\cos (9 v)$ + $1044 $ $\cos (10 v)$ + $1755 $ $\cos (11 v)$ + $14510) $ $\sin ^3(\frac{v}{2})$ + $15 v (3 (792 v^4$ - $652 v^2$ + $1075 ) $ $\cos (\frac{9 v}{2})$ + $22 (12 v^4$ - $102 v^2$ - $47 ) $ $\cos (\frac{11 v}{2})$ + $2 (582 v^2$ + $1891) $ $\cos (\frac{13 v}{2})$ + $32 (73 v^2$ - $342) $ $\cos (\frac{15 v}{2})$ + $24 (43$ - $36 v^2) $ $\cos (\frac{17 v}{2})$ + $3 (2131$ - $300 v^2) $ $\cos (\frac{19 v}{2})$ + $(364 v^2$ - $1887) $ $\cos (\frac{21 v}{2})$ + $(132 v^2$ - $1151) $ $\cos (\frac{23 v}{2})$ + $(461$ - $60 v^2) $ $\cos (\frac{25 v}{2})$ $ )))) $ $ b_{3,num}^5=$ - $4831838208 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15}(\frac{v}{2})$ $ ($ - $368640 \cos ^4(\frac{v}{2} ) (66 $ $\cos (v)$ + $66 $ $\cos (2 v)$ + $56 $ $\cos (3 v)$ + $56 $ $\cos (4 v)$ + $30 $ $\cos (5 v)$ + $36 $ $\cos (6 v)$ + $10 $ $\cos (7 v)$ + $10 $ $\cos (8 v)$ + $33) $ $\sin ^7(\frac{v}{2})$ + $120 v (17424 v^6$ - $765 v^4$ + $716 v^2$ - $155 ) $ $\cos (\frac{v}{2})$ + $45 v (37026 v^6$ + $92 v^4$ - $5159 v^2$ + $1100) $ $\cos (\frac{3 v}{2})$ + $v (15 (70422 v^6$ + $10188 v^4$ + $9929 v^2$ - $4220) $ $\cos (\frac{5 v}{2})$ + $300 (161 $ $\cos (\frac{7 v}{2})$ + $33 $ $\cos (\frac{9 v}{2})$ - $319 $ $\cos (\frac{11 v}{2})$ + $275 $ $\cos (\frac{13 v}{2})$ + $81 \cos (\frac{15 v}{2} )$ - $123 $ $\cos (\frac{17 v}{2})$ + $20 $ $\cos (\frac{19 v}{2})$ - $72 $ $\cos (\frac{21 v}{2})$ + $35 ( $ $\cos ( \frac{23 v}{2})$ + $\cos (\frac{25 v}{2})$ $ )$ - $18 $ $\cos (\frac{27 v}{2})$ $ )$ + $v ($ - $7680 $ $\cos ^2(\frac{v}{2})$ $ (2233 $ $\cos (v)$ + $2261 $ $\cos (2 v)$ + $1114 $ $\cos (3 v)$ + $831 $ $\cos (4 v)$ - $199 $ $\cos (5 v)$ - $879 $ $\cos (6 v)$ - $369 $ $\cos (7 v)$ - $102 $ $\cos (8 v)$ + $137 $ $\cos (9 v)$ + $121 $ $\cos (10 v)$ + $1386) $ $\sin ^5(\frac{v}{2})$ - $16 v^2 (106239 $ $\cos (v)$ + $46080 $ $\cos (2 v)$ - $45422 $ $\cos (3 v)$ - $97266 $ $\cos (4 v)$ - $7053 $ $\cos (5 v)$ + $48138 $ $\cos (6 v)$ + $23511 $ $\cos (7 v)$ - $2538 $ $\cos (8 v)$ - $10990 $ $\cos (9 v)$ - $5154 $ $\cos (10 v)$ + $1755 $ $\cos (11 v)$ + $1270 $ $\cos (12 v)$ + $93890) \sin ^3(\frac{v}{2} )$ + $15 v ((36 v^2 (968 v^2$ + $57)$ - $15475) $ $\cos (\frac{7 v}{2})$ + $(12672 v^4$ - $8132 v^2$ + $20199) $ $\cos (\frac{9 v}{2})$ + $11 (270 v^4$ - $138 v^2$ + $1105) $ $\cos (\frac{11 v}{2})$ + $(330 v^4$ + $3138 v^2$ - $18373) $ $\cos (\frac{13 v}{2})$ + $3 (284 v^2$ - $921) $ $\cos (\frac{15 v}{2})$ + $3 (453$ - $68 v^2) $ $\cos (\frac{17 v}{2})$ + $2 (1081$ - $267 v^2) $ $\cos (\frac{19 v}{2})$ + $6 (408$ - $41 v^2) $ $\cos (\frac{21 v}{2})$ + $(228 v^2$ - $1609) $ $\cos (\frac{23 v}{2})$ + $5 (12 v^2$ - $131) $ $\cos (\frac{25 v}{2})$ + $4 (93$ - $10 v^2) $ $\cos (\frac{27 v}{2})$ $ )))) $ $ b_{4,num}^5=12079595520 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15}(\frac{v}{2})$ $ ($ - $737280 \cos ^4(\frac{v}{2} ) (44 $ $\cos (v)$ + $42 $ $\cos (2 v)$ + $40 $ $\cos (3 v)$ + $31 $ $\cos (4 v)$ + $28 $ $\cos (5 v)$ + $16 $ $\cos (6 v)$ + $13 $ $\cos (7 v)$ + $4 $ $\cos (8 v)$ + $2 $ $\cos (9 v)$ + $22) $ $\sin ^7(\frac{v}{2})$ + $24 v ((4356 v^2 (25 v^2$ - $1 )$ - $1043) v^2$ + $530) $ $\cos (\frac{v}{2})$ + $3 v (716320 v^6$ + $20196 v^4$ + $11847 v^2$ - $11460) $ $\cos (\frac{3 v}{2})$ + $v (9 (11 (14520 v^4$ + $948 v^2$ - $2345) v^2$ + $4020) $ $\cos (\frac{5 v}{2})$ - $60 (101 $ $\cos (\frac{7 v}{2})$ + $501 $ $\cos (\frac{9 v}{2})$ - $220 $ $\cos (\frac{11 v}{2})$ + $104 \cos (\frac{13 v}{2} )$ - $630 $ $\cos (\frac{15 v}{2})$ - $82 $ $\cos (\frac{17 v}{2})$ + $649 $ $\cos (\frac{19 v}{2})$ - $27 $ $\cos (\frac{21 v}{2})$ - $91 $ $\cos (\frac{23 v}{2})$ - $67 $ $\cos (\frac{25 v}{2})$ - $30 \cos (\frac{27 v}{2} )$ + $34 $ $\cos (\frac{29 v}{2})$ $ )$ + $v ($ - $1536 $ $\cos ^2(\frac{v}{2})$ $ (16324 $ $\cos (v)$ + $12089 $ $\cos (2 v)$ + $9812 $ $\cos (3 v)$ + $3800 $ $\cos (4 v)$ - $867 $ $\cos (5 v)$ - $3291 $ $\cos (6 v)$ - $3184 $ $\cos (7 v)$ - $484 $ $\cos (8 v)$ + $561 $ $\cos (9 v)$ + $484 $ $\cos (10 v)$ + $214 $ $\cos (11 v)$ + $8102) $ $\sin ^5(\frac{v}{2})$ + $39 v (132 (150 v^4$ + $v^2)$ + $4003) $ $\cos (\frac{7 v}{2})$ + $v ($ - $16 v (160098 $ $\cos (v)$ + $48324 $ $\cos (2 v)$ - $56061 $ $\cos (3 v)$ - $67128 $ $\cos (4 v)$ - $17958 $ $\cos (5 v)$ + $33644 $ $\cos (6 v)$ + $30234 $ $\cos (7 v)$ - $534 $ $\cos (8 v)$ - $10044 $ $\cos (9 v)$ - $4320 $ $\cos (10 v)$ + $15 $ $\cos (11 v)$ + $1016 $ $\cos (12 v)$ + $396 $ $\cos (13 v)$ + $85598) $ $\sin ^3(\frac{v}{2})$ + $3 (44 (2490 v^2$ - $409) v^2$ + $45675) $ $\cos (\frac{9 v}{2})$ + $132 (810 v^4$ - $276 v^2$ + $509) $ $\cos (\frac{11 v}{2})$ + $3 (4 (1980 v^4$ + $1716 v^2$ - $4475 ) $ $\cos (\frac{13 v}{2})$ + $(880 v^4$ + $8816 v^2$ - $53214 ) $ $\cos (\frac{15 v}{2})$ + $2 (2861$ - $648 v^2) $ $\cos (\frac{17 v}{2})$ + $(21575$ - $2808 v^2) $ $\cos (\frac{19 v}{2})$ + $(1959$ - $344 v^2) $ $\cos (\frac{21 v}{2})$ + $(300 v^2$ - $2129) $ $\cos (\frac{23 v}{2})$ + $(300 v^2$ - $2501) $ $\cos (\frac{25 v}{2})$ + $2 (10 v^2$ - $177) $ $\cos (\frac{27 v}{2})$ + $2 (307$ - $30 v^2) $ $\cos (\frac{29 v}{2})$ $ ))))) $ $ b_{5,num}^5=$ - $12079595520 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15}(\frac{v}{2})$ $ ($ - $368640 \cos ^4(\frac{v}{2} ) (2 $ $\cos (v)$ + $1)^2 (22 $ $\cos (v)$ + $22 $ $\cos (2 v)$ + $20 $ $\cos (3 v)$ + $18 $ $\cos (4 v)$ + $10 $ $\cos (5 v)$ + $12 $ $\cos (6 v)$ + $4 $ $\cos (7 v)$ + $2 $ $\cos (8 v)$ + $11) $ $\sin ^7(\frac{v}{2})$ + $6 v (v^2 (8580 (110 v^2$ - $3) v^2$ + $1993)$ - $460) \cos (\frac{v}{2} )$ + $18 v (11 v^2 (360 (66 v^4$ + $v^2)$ - $827 )$ - $20) $ $\cos (\frac{3 v}{2})$ + $v (6 ((13200 v^2 (41 v^2$ + $2)$ - $20807) v^2$ + $980 ) $ $\cos (\frac{5 v}{2})$ + $60 (117 $ $\cos (\frac{7 v}{2})$ - $459 $ $\cos (\frac{9 v}{2})$ - $440 $ $\cos (\frac{11 v}{2})$ + $640 $ $\cos (\frac{13 v}{2})$ + $783 $ $\cos (\frac{15 v}{2})$ - $61 \cos (\frac{17 v}{2} )$ - $658 $ $\cos (\frac{19 v}{2})$ - $336 $ $\cos (\frac{21 v}{2})$ + $217 $ $\cos (\frac{23 v}{2})$ + $169 $ $\cos (\frac{25 v}{2})$ + $18 $ $\cos (\frac{27 v}{2})$ - $20 $ $\cos (\frac{29 v}{2})$ - $16 $ $\cos (\frac{31 v}{2})$ $ )$ + $v ($ - $1536 $ $\cos ^2(\frac{v}{2})$ $ (34140 $ $\cos (v)$ + $28661 $ $\cos (2 v)$ + $19811 $ $\cos (3 v)$ + $9491 $ $\cos (4 v)$ - $979 $ $\cos (5 v)$ - $6567 $ $\cos (6 v)$ - $5641 $ $\cos (7 v)$ - $1793 $ $\cos (8 v)$ + $841 $ $\cos (9 v)$ + $1009 $ $\cos (10 v)$ + $428 $ $\cos (11 v)$ + $95 $ $\cos (12 v)$ + $18514) $ $\sin ^5(\frac{v}{2})$ + $9 v (220 (933 v^2$ + $10) v^2$ + $9447) $ $\cos (\frac{7 v}{2})$ + $3 v (484 (585 v^2$ - $64) v^2$ + $111477) \cos (\frac{9 v}{2} )$ + $v ($ - $16 v (304575 $ $\cos (v)$ + $111318 $ $\cos (2 v)$ - $82171 $ $\cos (3 v)$ - $124536 $ $\cos (4 v)$ - $30912 $ $\cos (5 v)$ + $53088 $ $\cos (6 v)$ + $50880 $ $\cos (7 v)$ + $6120 $ $\cos (8 v)$ - $15695 $ $\cos (9 v)$ - $9480 $ $\cos (10 v)$ - $669 $ $\cos (11 v)$ + $1476 $ $\cos (12 v)$ + $792 $ $\cos (13 v)$ + $162 $ $\cos (14 v)$ + $192432) $ $\sin ^3(\frac{v}{2})$ + $264 (1170 v^4$ - $204 v^2$ + $535) $ $\cos (\frac{11 v}{2})$ + $24 (3630 v^4$ + $1092 v^2$ - $7165) $ $\cos (\frac{13 v}{2})$ + $3 ((5940 v^4$ + $12544 v^2$ - $66585) $ $\cos (\frac{15 v}{2})$ + $(660 v^4$ + $816 v^2$ - $5245) $ $\cos (\frac{17 v}{2})$ + $2 (15655$ - $2208 v^2) $ $\cos (\frac{19 v}{2})$ + $4 (3141$ - $376 v^2 ) $ $\cos (\frac{21 v}{2})$ + $(624 v^2$ - $5363) $ $\cos (\frac{23 v}{2})$ + $(456 v^2$ - $4151) $ $\cos (\frac{25 v}{2})$ + $2 (32 v^2$ - $303) $ $\cos (\frac{27 v}{2})$ + $16 (28$ - $3 v^2) $ $\cos (\frac{29 v}{2})$ + $4 (65$ - $6 v^2) $ $\cos (\frac{31 v}{2})$ $ ))))) $ $ b_{6,num}^5=2415919104 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15} (\frac{v}{2})$ $ ($ - $737280 \cos ^4(\frac{v}{2} ) (782 $ $\cos (v)$ + $756 $ $\cos (2 v)$ + $681 $ $\cos (3 v)$ + $596 $ $\cos (4 v)$ + $446 $ $\cos (5 v)$ + $346 $ $\cos (6 v)$ + $196 $ $\cos (7 v)$ + $111 $ $\cos (8 v)$ + $36 $ $\cos (9 v)$ + $10 $ $\cos (10 v)$ + $\cos (11 v)$ + $395) $ $\sin ^7(\frac{v}{2})$ + $30 v (1475232 v^6$ - $29700 v^4$ + $2389 v^2$ - $2620) $ $\cos (\frac{v}{2})$ + $15 v (2477376 v^6$ + $16060 v^4$ - $104631 v^2$ + $10740) $ $\cos (\frac{3 v}{2})$ + $v (45 (578688 v^6$ + $24420 v^4$ - $2109 v^2$ - $3460) $ $\cos (\frac{5 v}{2})$ + $90 (168256 v^6$ + $2904 v^4$ - $93 v^2$ + $1500) $ $\cos (\frac{7 v}{2})$ - $300 (366 \cos (\frac{9 v}{2} )$ + $1240 $ $\cos (\frac{11 v}{2})$ - $1456 $ $\cos (\frac{13 v}{2})$ - $1008 $ $\cos (\frac{15 v}{2})$ + $376 $ $\cos (\frac{17 v}{2})$ + $568 $ $\cos (\frac{19 v}{2})$ + $624 $ $\cos (\frac{21 v}{2})$ - $226 \cos (\frac{23 v}{2} )$ - $306 $ $\cos (\frac{25 v}{2})$ - $15 $ $\cos (\frac{27 v}{2})$ + $17 $ $\cos (\frac{29 v}{2})$ + $23 $ $\cos (\frac{31 v}{2})$ + $3 $ $\cos (\frac{33 v}{2})$ $ )$ + $v ($ - $7680 $ $\cos ^2(\frac{v}{2})$ $ (53563 $ $\cos (v)$ + $45974 $ $\cos (2 v)$ + $30514 $ $\cos (3 v)$ + $15658 $ $\cos (4 v)$ - $559 $ $\cos (5 v)$ - $9759 $ $\cos (6 v)$ - $7958 $ $\cos (7 v)$ - $3102 $ $\cos (8 v)$ + $869 $ $\cos (9 v)$ + $1533 $ $\cos (10 v)$ + $666 $ $\cos (11 v)$ + $190 $ $\cos (12 v)$ + $17 $ $\cos (13 v)$ + $29210) $ $\sin ^5(\frac{v}{2})$ - $16 v^2 (2248539 $ $\cos (v)$ + $884628 $ $\cos (2 v)$ - $465296 $ $\cos (3 v)$ - $896802 $ $\cos (4 v)$ - $222138 $ $\cos (5 v)$ + $364588 $ $\cos (6 v)$ + $334950 $ $\cos (7 v)$ + $63936 $ $\cos (8 v)$ - $93282 $ $\cos (9 v)$ - $73236 $ $\cos (10 v)$ - $9234 $ $\cos (11 v)$ + $9682 $ $\cos (12 v)$ + $5604 $ $\cos (13 v)$ + $1620 $ $\cos (14 v)$ + $137 $ $\cos (15 v)$ + $1505344) $ $\sin ^3(\frac{v}{2})$ + $30 v (88 (2754 v^2$ - $269) v^2$ + $86133) $ $\cos (\frac{9 v}{2})$ + $15 v (16 (11814 v^4$ - $1449 v^2$ + $5183) \cos (\frac{11 v}{2} )$ + $8 (7227 v^4$ + $1638 v^2$ - $13363) $ $\cos (\frac{13 v}{2})$ + $8 (1683 v^4$ + $1715 v^2$ - $9429) $ $\cos (\frac{15 v}{2})$ + $8 (297 v^4$ + $315 v^2$ - $1181) $ $\cos (\frac{17 v}{2})$ + $8 (33 v^4$ - $585 v^2$ + $4033) $ $\cos (\frac{19 v}{2})$ + $8 (2988$ - $365 v^2) $ $\cos (\frac{21 v}{2})$ + $10 (72 v^2$ - $611) $ $\cos (\frac{23 v}{2})$ + $18 (40 v^2$ - $379) $ $\cos (\frac{25 v}{2})$ + $(76 v^2$ - $681) $ $\cos (\frac{27 v}{2})$ + $(343$ - $36 v^2) $ $\cos (\frac{29 v}{2})$ + $(385$ - $36 v^2) $ $\cos (\frac{31 v}{2})$ + $(45$ - $4 v^2) $ $\cos (\frac{33 v}{2})$ $ )))) $ $ b_{7,num}^5=$ - $4831838208 v^5 $ $\cos ^6(\frac{v}{2})$ $\sin ^{15}(\frac{v}{2})$ $ ($ - $737280 \cos ^4(\frac{v}{2} ) (457 $ $\cos (v)$ + $436 $ $\cos (2 v)$ + $406 $ $\cos (3 v)$ + $331 $ $\cos (4 v)$ + $281 $ $\cos (5 v)$ + $181 $ $\cos (6 v)$ + $131 $ $\cos (7 v)$ + $56 $ $\cos (8 v)$ + $26 $ $\cos (9 v)$ + $5 $ $\cos (10 v)$ + $\cos (11 v)$ + $230) $ $\sin ^7(\frac{v}{2})$ + $30 v ((3960 v^2 (216 v^2$ - $5)$ - $8761) v^2$ + $1780 ) $ $\cos (\frac{v}{2})$ + $15 v (1435984 v^6$ + $22000 v^4$ - $3297 v^2$ - $10980) $ $\cos (\frac{3 v}{2})$ + $v (45 (337392 v^6$ + $11440 v^4$ - $27653 v^2$ + $4460) $ $\cos (\frac{5 v}{2})$ - $300 (219 $ $\cos (\frac{7 v}{2})$ + $603 $ $\cos (\frac{9 v}{2})$ - $300 $ $\cos (\frac{11 v}{2})$ + $28 $ $\cos (\frac{13 v}{2})$ - $756 \cos (\frac{15 v}{2} )$ - $148 $ $\cos (\frac{17 v}{2})$ + $776 $ $\cos (\frac{19 v}{2})$ + $96 $ $\cos (\frac{21 v}{2})$ - $83 $ $\cos (\frac{23 v}{2})$ - $115 $ $\cos (\frac{25 v}{2})$ - $63 $ $\cos (\frac{27 v}{2})$ + $31 \cos (\frac{29 v}{2} )$ + $7 $ $\cos (\frac{31 v}{2})$ + $3 \cos (\frac{33 v}{2} ))$ + $v ($ - $7680 $ $\cos ^2(\frac{v}{2})$ $ (32205 $ $\cos (v)$ + $25293 $ $\cos (2 v)$ + $19114 $ $\cos (3 v)$ + $8361 $ $\cos (4 v)$ - $241 $ $\cos (5 v)$ - $4841 $ $\cos (6 v)$ - $5127 $ $\cos (7 v)$ - $1558 $ $\cos (8 v)$ + $452 $ $\cos (9 v)$ + $766 $ $\cos (10 v)$ + $452 $ $\cos (11 v)$ + $95 $ $\cos (12 v)$ + $17 $ $\cos (13 v)$ + $16488) $ $\sin ^5(\frac{v}{2})$ + $45 v (1408 (141 v^4$ + $v^2)$ + $24571) $ $\cos (\frac{7 v}{2})$ + $15 v (352 (820 v^2$ - $49) v^2$ + $65325) $ $\cos (\frac{9 v}{2})$ + $v ($ - $16 v (1391199 $ $\cos (v)$ + $496638 $ $\cos (2 v)$ - $298296 $ $\cos (3 v)$ - $433962 $ $\cos (4 v)$ - $149583 $ $\cos (5 v)$ + $183078 $ $\cos (6 v)$ + $208215 $ $\cos (7 v)$ + $31836 $ $\cos (8 v)$ - $54047 $ $\cos (9 v)$ - $36606 $ $\cos (10 v)$ - $7569 $ $\cos (11 v)$ + $4842 $ $\cos (12 v)$ + $3624 $ $\cos (13 v)$ + $810 $ $\cos (14 v)$ + $137 $ $\cos (15 v)$ + $794124) $ $\sin ^3(\frac{v}{2})$ + $90 (18975 v^4$ - $2632 v^2$ + $4838) \cos (\frac{11 v}{2} )$ + $30 (18117 v^4$ + $2184 v^2$ - $10934) $ $\cos (\frac{13 v}{2})$ + $15 ($ - $2372 $ $\cos (\frac{17 v}{2})$ + $25024 $ $\cos (\frac{19 v}{2})$ + $6864 \cos (\frac{21 v}{2} )$ - $925 $ $\cos (\frac{23 v}{2})$ - $3401 $ $\cos (\frac{25 v}{2})$ - $1173 $ $\cos (\frac{27 v}{2})$ + $509 $ $\cos (\frac{29 v}{2})$ + $125 $ $\cos (\frac{31 v}{2})$ + $45 $ $\cos (\frac{33 v}{2})$ $ )$ + $30 ((36 (22 v^2$ + $7) $ $\cos (\frac{17 v}{2})$ + $9 (11 v^2$ - $168 ) $ $\cos (\frac{19 v}{2})$ + $(11 v^2$ - $488) $ $\cos (\frac{21 v}{2})$ + $48 $ $\cos (\frac{23 v}{2})$ + $192 $ $\cos (\frac{25 v}{2})$ + $56 \cos (\frac{27 v}{2} )$ - $2 (12 $ $\cos (\frac{29 v}{2})$ + $3 $ $\cos (\frac{31 v}{2})$ + $\cos (\frac{33 v}{2})$ $ ) ) v^2$ + $(4488 v^4$ + $5348 v^2$ - $33558) $ $\cos (\frac{15 v}{2})$ $ ))))) $ $ b_{denom}^5=\left(-151996487423754240 v^{12} \cos ^{15}\left(\frac{v}{2}\right)\sin ^{27}\left(\frac{v}{2}\right)\right) $ $ b^5_{T,1}=\frac{433489274083}{237758976000}$ - $\frac{152802083671v^2}{475517952000}$ + $\frac{1017850218043v^4}{194011324416000}$ - $\frac{355108221471443 v^6}{331759364751360000}$ - $ \frac{131687699860605701v^8}{1021818843434188800000}$ - $\frac{970130052388059581v^{10}}{47003666797972684800000}$ - $\ldots $ $ b^5_{T,2}=$ - $\frac{28417333297}{4953312000}$ + $\frac{152802083671v^2}{39626496000}$ - $\frac{1000430523577v^4}{1154829312000}$ + $\frac{2072463900685193v^6}{27646613729280000}$ - $\frac{4147730814505219v^8}{886995523814400000}$ + $\frac{25097056509899527v^{10}}{559567461880627200000}$ - $\ldots $ $ b^5_{T,3}=\frac{930518896733}{39626496000}$ - $\frac{1680822920381v^2}{79252992000}$ + $\frac{270959894639173v^4}{32335220736000}$ - $\frac{97479391651340473 v^6}{55293227458560000}$ + $ \frac{1103582448711358933v^8}{5160701229465600000}$ - $\frac{135427504564083230351v^{10}}{7833944466328780800000}$ + $\ldots $ $ b^5_{T,4}=$ - $\frac{176930551859}{2971987200}$ + $\frac{1680822920381v^2}{23775897600}$ - $\frac{180938567211709v^4}{4850283110400}$ + $\frac{192417404089068163 v^6}{16587968237568000}$ - $ \frac{13040661300795157v^8}{5582489310720000}$ + $\frac{753690800700831259867v^{10}}{2350183339898634240000}$ - $\ldots $ $ b^5_{T,5}=\frac{7854755921}{65228800}$ - $\frac{1680822920381v^2}{10567065600}$ + $\frac{60974002854799v^4}{615908966400}$ - $\frac{31108033258478857v^6}{819158925312000}$ + $\frac{20614799744422537499v^8}{2064280491786240000}$ - $\frac{283489566000723918761v^{10}}{149217989834833920000}$ + $\ldots $ $ b^5_{T,6}=$ - $\frac{146031020287}{825552000}$ + $\frac{1680822920381v^2}{6604416000}$ - $\frac{232891275659849v^4}{1347300864000}$ + $\frac{340402048152771923v^6}{4607768954880000}$ - $\frac{1791871329414738589v^8}{80635956710400000}$ + $\frac{3257163476890690029371v^{10}}{652828705527398400000}$ - $\ldots $ $ b^5_{T,7}=\frac{577045151693}{2830464000}$ - $\frac{1680822920381v^2}{5660928000}$ + $\frac{478770728431733v^4}{2309658624000}$ - $\frac{361861433042278873v^6}{3949516247040000}$ + $\frac{31765434645249520399v^8}{1105864549171200000}$ - $\frac{3797117763219719452879v^{10}}{559567461880627200000}$ + $\ldots $ Method \textit{PF - D6}: $ b_{1,num}^6=$ - $3478923509760 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin ^{12}(\frac{v}{2})$ $ (v ($ - $12 (1459v^4$ - $970 v^2$ + $60) $ $\cos (v)$ + $4 (7379 v^4$ + $2805 v^2$ - $1890) $ $\cos (2 v)$ + $180 (31 $ $\cos (3 v)$ + $44 $ $\cos (4 v)$ - $78 $ $\cos (5 v)$ - $10 $ $\cos (6v)$ + $88 $ $\cos (7 v)$ - $9 $ $\cos (8 v)$ - $46 $ $\cos (9 v)$ + $4 $ $\cos (10 v)$ + $9 $ $\cos (11v)$ $ )$ + $v (v ((46112 v^2$ - $46950) $ $\cos (3 v)$ + $30 (764 $ $\cos (4 v)$ + $2470 $ $\cos (5 v)$ - $1370 $ $\cos (6 v)$ - $1908 $ $\cos (7 v)$ + $707 $ $\cos(8 v)$ + $726 $ $\cos (9 v)$ - $124 $ $\cos (10 v)$ - $111 $ $\cos (11 v)$ $ )$ + $v (2 v(31457280 v (7 $ $\cos (v)$ - $1) $ $\sin ^9(\frac{v}{2})$ $\cos^{11}(\frac{v}{2})$ - $33436 $ $\cos (4 v)$ - $24673 $ $\cos (5 v)$ + $23310 $ $\cos (6 v)$ + $15023 $ $\cos (7 v)$ - $7754 $ $\cos (8 v)$ - $4901 $ $\cos (9 v)$ + $1044 $ $\cos(10 v)$ + $669 $ $\cos (11 v)$ $ )$ + $19950 $ $\sin (v)$ - $5752 $ $\sin (2 v)$ - $58277 $ $\sin (3 v)$ + $59598 $ $\sin (4 v)$ + $77003 $ $\sin (5 v)$ - $58792 $ $\sin (6 v)$ - $51447 $ $\sin (7 v)$ + $23233 $ $\sin (8 v)$ + $17843 $ $\sin (9 v)$ - $3480 $ $\sin (10 v)$ - $2552 $ $\sin (11 v)$ $ ))$ - $400 (24 $ $\cos (v)$ + $24 $ $\cos (2 v)$ - $101 $ $\cos (3v)$ + $108 $ $\cos (4 v)$ - $67 $ $\cos (5 v)$ - $173 $ $\cos (6 v)$ + $48 $ $\cos (7 v)$ + $59 $ $\cos(8 v)$ + $12) $ $\sin ^3(v)$ $ ))$ + $2 (6278 v^5$ - $5265 v^3$ + $1170v$ + $840 $ $\sin (2 v)$ - $210 $ $\sin (3 v)$ - $1260 $ $\sin (4 v)$ + $840 $ $\sin (5 v)$ + $840 $ $\sin (6 v)$ - $1260 $ $\sin (7 v)$ - $210 $ $\sin (8 v)$ + $840 $ $\sin (9 v)$ - $210 $ $\sin (11v)$ $ )) $ $ b_{2,num}^6=3478923509760 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin^{12}(\frac{v}{2})$ $ (v (8 (41029 v^4$ - $6060v^2$ - $2340) $ $\cos (v)$ + $(216658 v^4$ - $231600 v^2$ + $29520) $ $\cos (2 v)$ + $720 (8 $ $\cos (3 v)$ - $73 $ $\cos (4 v)$ + $46 $ $\cos (5 v)$ + $20 $ $\cos (6v)$ - $26 $ $\cos (7 v)$ + $58 $ $\cos (8 v)$ - $8 $ $\cos (9 v)$ - $53 $ $\cos (10 v)$ + $6 $ $\cos (11v)$ + $13 $ $\cos (12 v)$ $ )$ + $v (v (4 (47520$ - $64489 v^2) $ $\cos (3 v)$ + $240 (889 $ $\cos (4 v)$ - $472 $ $\cos (5 v)$ + $142 $ $\cos (6 v)$ - $427 $ $\cos(7 v)$ - $682 $ $\cos (8 v)$ + $393 $ $\cos (9 v)$ + $383 $ $\cos (10 v)$ - $84 $ $\cos (11 v)$ - $71 $ $\cos (12 v)$ $ )$ + $v (2 v (15728640 v (42 $ $\cos (2 v)$ + $47) $ $\sin^9(\frac{v}{2})$ $\cos ^{11}(\frac{v}{2})$ - $53557 $ $\cos (4 v)$ - $23336 $ $\cos (5 v)$ - $10180 $ $\cos (6 v)$ + $62776 $ $\cos (7 v)$ + $31222 $ $\cos (8 v)$ - $28732 $ $\cos (9 v)$ - $15557 $ $\cos (10 v)$ + $4554 $ $\cos (11 v)$ + $2637 $ $\cos (12 v)$ $ )$ - $3 (4524 $ $\sin (v)$ + $89649 $ $\sin (2 v)$ - $109162 $ $\sin (3v)$ - $66107 $ $\sin (4 v)$ + $12508 $ $\sin (5 v)$ - $17082 $ $\sin (6 v)$ + $55388 $ $\sin (7v)$ + $44508 $ $\sin (8 v)$ - $32396 $ $\sin (9 v)$ - $22567 $ $\sin (10 v)$ + $5770 $ $\sin (11v)$ + $3929 $ $\sin (12 v)$ $ )))$ - $2400 (48 $ $\cos (v)$ - $83 $ $\cos (2v)$ + $134 $ $\cos (3 v)$ - $160 $ $\cos (4 v)$ - $76 $ $\cos (5 v)$ - $19 $ $\cos (6 v)$ - $100 $ $\cos(7 v)$ + $46 $ $\cos (8 v)$ + $54 $ $\cos (9 v)$ + $24) $ $\sin ^3(v)$ $ ))$ - $4(28927 v^5$ - $18240 v^3$ + $1080 v$ - $2520 $ $\sin (v)$ + $630 $ $\sin (2 v)$ + $1260 $ $\sin (3 v)$ - $1890 $ $\sin (4 v)$ + $1260 $ $\sin (5 v)$ + $1260 $ $\sin (6 v)$ - $1890 $ $\sin(7 v)$ + $1260 $ $\sin (8 v)$ + $630 $ $\sin (9 v)$ - $1890 $ $\sin (10 v)$ + $630 $ $\sin (12 v)$ $ )) $ $ b_{3,num}^6=$ - $3478923509760 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin ^{12}(\frac{v}{2})$ $ (4 (221284 v^5$ - $37695v^3$ - $9090 v$ - $1575 $ $\sin (v)$ + $5670 $ $\sin (2 v)$ + $2520 $ $\sin (3 v)$ - $10080 $ $\sin(4 v)$ + $4095 $ $\sin (5 v)$ + $4095 $ $\sin (6 v)$ - $10080 $ $\sin (7 v)$ + $2520 $ $\sin (8v)$ + $4095 $ $\sin (9 v)$ - $1575 $ $\sin (10 v)$ + $2520 $ $\sin (11 v)$ - $1575 $ $\sin (13 v)$ $ )$ + $v ($ - $2 (29854 v^4$ + $91035 v^2$ - $24390) $ $\cos(v)$ - $180 (402 $ $\cos (2 v)$ + $154 $ $\cos (3 v)$ - $804 $ $\cos (4 v)$ + $973 $ $\cos (5v)$ - $115 $ $\cos (6 v)$ - $1308 $ $\cos (7 v)$ + $374 $ $\cos (8 v)$ + $251 $ $\cos (9 v)$ + $\cos(10 v)$ + $326 $ $\cos (11 v)$ - $60 $ $\cos (12 v)$ - $125 $ $\cos (13 v)$ $ )$ + $v (1200(159 $ $\cos (v)$ - $968 $ $\cos (2 v)$ + $1401 $ $\cos (3 v)$ - $459 $ $\cos (4 v)$ + $995 $ $\cos(5 v)$ + $1205 $ $\cos (6 v)$ - $223 $ $\cos (7 v)$ + $73 $ $\cos (8 v)$ - $44 (5 $ $\cos (9v)$ + $6)$ - $247 $ $\cos (10 v)$ $ ) $ $\sin ^3(v)$ + $4 v (70204 v^2$ + $98445) $ $\cos (2 v)$ + $v ((366842 v^2$ - $234180) $ $\cos (3 v)$ + $30 (9964 $ $\cos (4 v)$ + $32995 $ $\cos (5 v)$ - $25405 $ $\cos (6 v)$ - $22548 $ $\cos (7 v)$ + $4066 $ $\cos (8 v)$ + $105 $ $\cos (9 v)$ + $4791 $ $\cos (10 v)$ + $4542 $ $\cos (11 v)$ - $1516 $ $\cos(12 v)$ - $1219 $ $\cos (13 v)$ $ )$ + $v (2 v (15728640 v (294 $ $\cos(v)$ + $70 $ $\cos (3 v)$ - $1) $ $\sin ^9(\frac{v}{2})$ $\cos ^{11}( \frac{v}{2})$ - $517606 $ $\cos (4 v)$ - $235773 $ $\cos (5 v)$ + $227655 $ $\cos (6v)$ + $127788 $ $\cos (7 v)$ + $14656 $ $\cos (8 v)$ - $3946 $ $\cos (9 v)$ - $39071 $ $\cos (10v)$ - $18661 $ $\cos (11 v)$ + $8590 $ $\cos (12 v)$ + $4745 $ $\cos (13 v)$ $ )$ - $3(222705 $ $\sin (v)$ - $265046 $ $\sin (2 v)$ + $119729 $ $\sin (3 v)$ - $332466 $ $\sin (4v)$ - $319876 $ $\sin (5 v)$ + $278964 $ $\sin (6 v)$ + $170124 $ $\sin (7 v)$ - $3966 $ $\sin (8v)$ + $739 $ $\sin (9 v)$ - $48110 $ $\sin (10 v)$ - $30601 $ $\sin (11 v)$ + $12050 $ $\sin (12v)$ + $7780 $ $\sin (13 v)$ $ )))))) $ $ b_{4,num}^6=17394617548800 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin ^{12}(\frac{v}{2})$ $ (v (8 (95239v^4$ + $132 v^2$ - $6660) $ $\cos (v)$ + $720 (37 $ $\cos (2 v)$ + $36 $ $\cos (3 v)$ - $129 $ $\cos (4 v)$ + $92 $ $\cos (5 v)$ + $40 $ $\cos (6 v)$ - $36 $ $\cos (7 v)$ + $110 $ $\cos (8v)$ - $30 $ $\cos (9 v)$ - $77 $ $\cos (10 v)$ + $8 $ $\cos (11 v)$ - $3 $ $\cos (12 v)$ + $4 $ $\cos(13 v)$ + $8 $ $\cos (14 v)$ $ )$ + $v ($ - $160 (3540 $ $\cos (v)$ - $4081 $ $\cos (2v)$ + $4258 $ $\cos (3 v)$ - $6420 $ $\cos (4 v)$ - $2416 $ $\cos (5 v)$ - $1709 $ $\cos (6v)$ - $3480 $ $\cos (7 v)$ + $1352 $ $\cos (8 v)$ + $946 $ $\cos (9 v)$ + $420 $ $\cos (10 v)$ + $452 $ $\cos (11 v)$ - $122) $ $\sin ^3(v)$ + $22 v (9223 v^2$ - $12168) $ $\cos (2v)$ + $v (4 (76560$ - $94681 v^2) $ $\cos (3 v)$ + $48 (7741 $ $\cos (4v)$ - $4350 $ $\cos (5 v)$ + $1490 $ $\cos (6 v)$ - $4837 $ $\cos (7 v)$ - $5698 $ $\cos (8v)$ + $2835 $ $\cos (9 v)$ + $2127 $ $\cos (10 v)$ + $178 $ $\cos (11 v)$ + $205 $ $\cos (12v)$ - $228 $ $\cos (13 v)$ - $176 $ $\cos (14 v)$ $ )$ + $v (2 v (22020096 v (70 $ $\cos (2 v)$ + $10 $ $\cos (4 v)$ + $63) $ $\sin ^9(\frac{v}{2})$ $\cos^{11}(\frac{v}{2})$ - $55381 $ $\cos (4 v)$ - $74360 $ $\cos (5 v)$ - $19668 $ $\cos (6 v)$ + $94024 $ $\cos (7 v)$ + $43706 $ $\cos (8 v)$ - $24996 $ $\cos (9 v)$ - $14833 $ $\cos (10 v)$ - $3270 $ $\cos (11 v)$ - $1099 $ $\cos (12 v)$ + $1808 $ $\cos (13 v)$ + $984 $ $\cos (14 v)$ $ )$ + $210232 $ $\sin (v)$ - $456433 $ $\sin (2 v)$ + $711254 $ $\sin (3v)$ + $322575 $ $\sin (4 v)$ - $9720 $ $\sin (5 v)$ + $108420 $ $\sin (6 v)$ - $311124 $ $\sin (7v)$ - $200610 $ $\sin (8 v)$ + $107672 $ $\sin (9 v)$ + $68535 $ $\sin (10 v)$ + $11650 $ $\sin(11 v)$ + $6143 $ $\sin (12 v)$ - $8148 $ $\sin (13 v)$ - $5090 $ $\sin (14 v)$ $ ) )))$ - $4 (18341 v^5$ + $1344 v^3$ - $2520 v$ - $3360 $ $\sin(v)$ + $210 $ $\sin (2 v)$ + $2100 $ $\sin (3 v)$ - $3150 $ $\sin (4 v)$ + $2520 $ $\sin (5v)$ + $2520 $ $\sin (6 v)$ - $3150 $ $\sin (7 v)$ + $2520 $ $\sin (8 v)$ + $210 $ $\sin (9v)$ - $3150 $ $\sin (10 v)$ + $420 $ $\sin (11 v)$ + $210 $ $\sin (12 v)$ + $420 $ $\sin (14 v)$ $ )) $ $ b_{5,num}^6=$ - $17394617548800 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin ^{12}(\frac{v}{2})$ $ (6 (165878 v^5$ - $9859v^3$ - $9930 v$ - $630 $ $\sin (v)$ + $2520 $ $\sin (2 v)$ + $2310 $ $\sin (3 v)$ - $5670 $ $\sin (4v)$ + $1680 $ $\sin (5 v)$ + $1680 $ $\sin (6 v)$ - $5880 $ $\sin (7 v)$ + $2310 $ $\sin (8v)$ + $1680 $ $\sin (9 v)$ - $840 $ $\sin (10 v)$ + $2310 $ $\sin (11 v)$ - $210 $ $\sin (12v)$ - $840 $ $\sin (13 v)$ - $210 $ $\sin (15 v)$ $ )$ + $v (6 (5893v^4$ - $52145 v^2$ + $9930) $ $\cos (v)$ + $4 (99685 v^4$ + $104103 v^2$ - $13590 ) $ $\cos (2 v)$ + $180 ($ - $361 $ $\cos (3 v)$ + $787 $ $\cos (4 v)$ - $740 $ $\cos (5v)$ + $212 $ $\cos (6 v)$ + $1104 $ $\cos (7 v)$ - $377 $ $\cos (8 v)$ - $40 $ $\cos (9 v)$ - $66 $ $\cos (10 v)$ - $395 $ $\cos (11 v)$ + $65 $ $\cos (12 v)$ + $78 $ $\cos (13 v)$ + $12 $ $\cos (14v)$ + $23 $ $\cos (15 v)$ $ )$ + $v (v (2 (96427 v^2$ + $513) $ $\cos(3 v)$ + $(203778$ - $842996 v^2) $ $\cos (4 v)$ - $6 ($ - $131620 $ $\cos (5v)$ + $116572 $ $\cos (6 v)$ + $82296 $ $\cos (7 v)$ + $145 $ $\cos (8 v)$ + $16904 $ $\cos (9v)$ - $28534 $ $\cos (10 v)$ - $23389 $ $\cos (11 v)$ + $4087 $ $\cos (12 v)$ + $2910 $ $\cos (13v)$ + $1236 $ $\cos (14 v)$ + $925 $ $\cos (15 v)$ $ )$ + $v (2 v (44040192 v(140 $ $\cos ^3(v)$ + $3 $ $\cos (5 v)$ $ ) \sin ^9(\frac{v}{2} ) $ $\cos ^{11}(\frac{v}{2})$ - $147678 $ $\cos (5 v)$ + $123852 $ $\cos (6 v)$ + $76254 $ $\cos (7 v)$ + $47602 $ $\cos (8 v)$ + $14264 $ $\cos (9 v)$ - $35354 $ $\cos (10 v)$ - $18078 $ $\cos (11 v)$ + $3078 $ $\cos (12 v)$ + $2109 $ $\cos (13 v)$ + $1116 $ $\cos (14 v)$ + $603 $ $\cos (15 v)$ $ )$ - $3 (227030 $ $\sin (v)$ - $365012 $ $\sin (2v)$ + $74524 $ $\sin (3 v)$ - $328361 $ $\sin (4 v)$ - $248102 $ $\sin (5 v)$ + $215592 $ $\sin(6 v)$ + $107046 $ $\sin (7 v)$ + $40423 $ $\sin (8 v)$ + $25032 $ $\sin (9 v)$ - $49128 $ $\sin(10 v)$ - $29993 $ $\sin (11 v)$ + $5145 $ $\sin (12 v)$ + $3537 $ $\sin (13 v)$ + $1748 $ $\sin(14 v)$ + $1072 $ $\sin (15 v)$ $ )))$ - $240 ($ - $2688 $ $\cos (v)$ + $6303 $ $\cos(2 v)$ - $8687 $ $\cos (3 v)$ + $1939 $ $\cos (4 v)$ - $6433 $ $\cos (5 v)$ - $5887 $ $\cos (6v)$ + $371 $ $\cos (7 v)$ - $1323 $ $\cos (8 v)$ + $1397 $ $\cos (9 v)$ + $1271 $ $\cos (10v)$ + $200 $ $\cos (11 v)$ + $207 $ $\cos (12 v)$ + $2440) $ $\sin ^3(v)$ $ )) ) $ $ b_{6,num}^6=3478923509760 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin^{12}(\frac{v}{2})$ $ (v (4 (2852497v^4$ + $170640 v^2$ - $207720) $ $\cos (v)$ + $(2064158 v^4$ - $2922480v^2$ + $223920) $ $\cos (2 v)$ + $720 (586 $ $\cos (3 v)$ - $1710 $ $\cos (4 v)$ + $1310 $ $\cos (5 v)$ + $560 $ $\cos (6 v)$ - $408 $ $\cos (7 v)$ + $1504 $ $\cos (8 v)$ - $484 $ $\cos (9v)$ - $912 $ $\cos (10 v)$ + $70 $ $\cos (11 v)$ - $178 $ $\cos (12 v)$ + $74 $ $\cos (13 v)$ + $121 $ $\cos (14 v)$ + $6 $ $\cos (15 v)$ + $11 $ $\cos (16 v)$ $ )$ + $v (v (8(485010$ - $519679 v^2) $ $\cos (3 v)$ + $240 (20686 $ $\cos (4 v)$ - $12039 $ $\cos (5 v)$ + $4270 $ $\cos (6 v)$ - $14162 $ $\cos (7 v)$ - $14606 $ $\cos (8 v)$ + $6470 $ $\cos (9 v)$ + $4182 $ $\cos (10 v)$ + $1427 $ $\cos (11 v)$ - $7 ($ - $178 $ $\cos (12 v)$ + $93 $ $\cos (13 v)$ + $69 $ $\cos (14 v)$ + $8 $ $\cos (15 v)$ $ )$ - $41 $ $\cos (16 v)$ $ )$ + $v (2 v(110100480 v (210 $ $\cos (2 v)$ + $42 $ $\cos (4 v)$ + $2 $ $\cos (6 v)$ + $175) $ $\sin ^9(\frac{v}{2})$ $ \cos ^{11}(\frac{v}{2} )$ - $585830 $ $\cos (4 v)$ - $1144400 $ $\cos (5 v)$ - $249340 $ $\cos (6 v)$ + $1098208 $ $\cos (7 v)$ + $519396 $ $\cos (8 v)$ - $196056 $ $\cos (9 v)$ - $133908 $ $\cos (10v)$ - $82040 $ $\cos (11 v)$ - $35842 $ $\cos (12 v)$ + $22516 $ $\cos (13 v)$ + $12929 $ $\cos(14 v)$ + $1894 $ $\cos (15 v)$ + $1019 $ $\cos (16 v)$ $ )$ + $3 (1368670 $ $\sin(v)$ - $1910495 $ $\sin (2 v)$ + $3490840 $ $\sin (3 v)$ + $1317326 $ $\sin (4 v)$ + $149620 $ $\sin (5 v)$ + $522816 $ $\sin (6 v)$ - $1381824 $ $\sin (7 v)$ - $805504 $ $\sin (8v)$ + $337776 $ $\sin (9 v)$ + $212736 $ $\sin (10 v)$ + $109796 $ $\sin (11 v)$ + $61986 $ $\sin(12 v)$ - $35824 $ $\sin (13 v)$ - $22679 $ $\sin (14 v)$ - $3050 $ $\sin (15 v)$ - $1849 $ $\sin(16 v)$ $ )))$ - $2400 (4366 $ $\cos (v)$ - $4746 $ $\cos (2 v)$ + $4156 $ $\cos(3 v)$ - $6806 $ $\cos (4 v)$ - $2270 $ $\cos (5 v)$ - $2207 $ $\cos (6 v)$ - $3446 $ $\cos (7v)$ + $1146 $ $\cos (8 v)$ + $572 $ $\cos (9 v)$ + $557 $ $\cos (10 v)$ + $544 $ $\cos (11 v)$ + $38 $ $\cos (12 v)$ + $38 $ $\cos (13 v)$ - $654) $ $\sin ^3(v)$ $ ))$ - $2(227863 v^5$ + $369240 v^3$ - $105480 v$ - $75600 $ $\sin (v)$ - $6300 $ $\sin (2v)$ + $56700 $ $\sin (3 v)$ - $80640 $ $\sin (4 v)$ + $69300 $ $\sin (5 v)$ + $70560 $ $\sin (6v)$ - $80640 $ $\sin (7 v)$ + $70560 $ $\sin (8 v)$ - $5040 $ $\sin (9 v)$ - $80640 $ $\sin (10v)$ + $13860 $ $\sin (11 v)$ - $5040 $ $\sin (12 v)$ + $1260 $ $\sin (13 v)$ + $13860 $ $\sin (14v)$ + $1260 $ $\sin (16 v)$ $ )) $ $ b_{7,num}^6=$ - $3478923509760 v^6 $ $\cos ^{10}(\frac{v}{2})$ $\sin ^{12}(\frac{v}{2})$ $ (v ((468896v^4$ - $2845950 v^2$ + $504540) $ $\cos (v)$ + $(3794666 v^4$ + $3508410v^2$ - $424260) $ $\cos (2 v)$ + $180 ($ - $3359 $ $\cos (3 v)$ + $6514 $ $\cos (4v)$ - $5660 $ $\cos (5 v)$ + $1975 $ $\cos (6 v)$ + $8736 $ $\cos (7 v)$ - $3088 $ $\cos (8 v)$ + $88 $ $\cos (9 v)$ - $711 $ $\cos (10 v)$ - $3340 $ $\cos (11 v)$ + $526 $ $\cos (12 v)$ + $487 $ $\cos(13 v)$ + $133 $ $\cos (14 v)$ + $238 $ $\cos (15 v)$ + $4 $ $\cos (16 v)$ + $7 $ $\cos (17 v)$ $ )$ + $v(v (6 (218182 v^2$ + $75025) $ $\cos (3 v)$ - $30 ($ - $48938 $ $\cos (4 v)$ - $205188 $ $\cos (5 v)$ + $189209 $ $\cos (6 v)$ + $123872 $ $\cos (7 v)$ + $9360 $ $\cos (8 v)$ + $34024 $ $\cos (9 v)$ - $47753 $ $\cos (10 v)$ - $37460 $ $\cos (11 v)$ + $4522 $ $\cos (12 v)$ + $2997 $ $\cos (13 v)$ + $2499 $ $\cos (14 v)$ + $1846 $ $\cos (15 v)$ + $68 $ $\cos (16 v)$ + $49 $ $\cos (17 v)$ $ )$ + $v (2 v (31457280 v (49 (25 $ $\cos(v)$ + $9 $ $\cos (3 v)$ + $\cos (5 v)$ $ )$ + $\cos (7 v)$ $ ) \sin ^9(\frac{v}{2} ) $ $\cos ^{11}(\frac{v}{2})$ - $3244536 $ $\cos (4 v)$ - $1062160 $ $\cos (5 v)$ + $806275 $ $\cos (6 v)$ + $541016 $ $\cos (7 v)$ + $436792 $ $\cos (8v)$ + $145188 $ $\cos (9 v)$ - $269891 $ $\cos (10 v)$ - $141460 $ $\cos (11 v)$ + $11496 $ $\cos(12 v)$ + $10442 $ $\cos (13 v)$ + $10763 $ $\cos (14 v)$ + $5913 $ $\cos (15 v)$ + $274 $ $\cos(16 v)$ + $147 $ $\cos (17 v)$ $ )$ - $5461475 $ $\sin (v)$ + $9728100 $ $\sin (2v)$ - $1651927 $ $\sin (3 v)$ + $8272210 $ $\sin (4 v)$ + $5719296 $ $\sin (5 v)$ - $4891402 $ $\sin (6 v)$ - $2271864 $ $\sin (7 v)$ - $1253544 $ $\sin (8 v)$ - $729384 $ $\sin (9v)$ + $1169646 $ $\sin (10 v)$ + $708816 $ $\sin (11 v)$ - $69694 $ $\sin (12 v)$ - $53079 $ $\sin (13 v)$ - $51572 $ $\sin (14 v)$ - $31759 $ $\sin (15 v)$ - $1350 $ $\sin (16 v)$ - $812 $ $\sin (17 v)$ $ ))$ - $400 ($ - $17068 $ $\cos (v)$ + $33671 $ $\cos (2 v)$ - $46168 $ $\cos (3 v)$ + $9411 $ $\cos (4 v)$ - $34507 $ $\cos (5 v)$ - $29117 $ $\cos (6 v)$ + $423 $ $\cos(7 v)$ - $7921 $ $\cos (8 v)$ + $7213 $ $\cos (9 v)$ + $6135 $ $\cos (10 v)$ + $1367 $ $\cos (11v)$ + $1363 $ $\cos (12 v)$ + $36 $ $\cos (13 v)$ + $35 $ $\cos (14 v)$ + $14143) $ $\sin ^3(v)$ $ ))$ - $4 ($ - $2116147 v^5$ + $59850 v^3$ + $134820 v$ + $5775 $ $\sin(v)$ - $26775 $ $\sin (2 v)$ - $30345 $ $\sin (3 v)$ + $67200 $ $\sin (4 v)$ - $17640 $ $\sin (5v)$ - $17745 $ $\sin (6 v)$ + $70560 $ $\sin (7 v)$ - $30240 $ $\sin (8 v)$ - $17640 $ $\sin (9v)$ + $9135 $ $\sin (10 v)$ - $30240 $ $\sin (11 v)$ + $3360 $ $\sin (12 v)$ + $9135 $ $\sin (13v)$ + $105 $ $\sin (14 v)$ + $3360 $ $\sin (15 v)$ + $105 $ $\sin (17 v)$ $ )) $ $ b_{denom}^6=\left(52183852646400 v^{14} \sin ^{21}(v) \right) $ $ b^6_{T,1}=\frac{433489274083}{237758976000}$ - $\frac{152802083671v^2}{407586816000}$ + $\frac{42107584279v^4}{16629542092800}$ - $\frac{48644589686717v^6}{25519951134720000}$ - $\frac{8465930460350551v^8}{29194824098119680000}$ - $\frac{1588162811844063649v^{10}}{30216642941553868800000}$ - $\ldots $ $ b^6_{T,2}=$ - $\frac{28417333297}{4953312000}$ + $\frac{152802083671v^2}{33965568000}$ - $\frac{1000430523577v^4}{866121984000}$ + $\frac{1319911328641663v^6}{13823306864640000}$ - $\frac{633679429758461v^8}{86889357434880000}$ - $\frac{13749338388459469v^{10}}{91565584671375360000}$ - $\ldots $ $ b^6_{T,3}=\frac{930518896733}{39626496000}$ - $\frac{1680822920381v^2}{67931136000}$ + $\frac{2433446807381v^4}{213199257600}$ - $\frac{150750689506359931v^6}{55293227458560000}$ + $\frac{151232830491144629v^8}{442345819668480000}$ - $\frac{11391719790424784543v^{10}}{387392858225049600000}$ + $\ldots $ $ b^6_{T,4}=$ - $\frac{176930551859}{2971987200}$ + $\frac{1680822920381v^2}{20379340800}$ - $\frac{26590548293789v^4}{519673190400}$ + $\frac{154953352570753493v^6}{8293984118784000}$ - $\frac{143381346778111763v^8}{33175936475136000}$ + $\frac{1938525891219194555527v^{10}}{3021664294155386880000}$ - $\ldots $ $ b^6_{T,5}=\frac{7854755921}{65228800}$ - $\frac{1680822920381 v^2}{9057484800}$ + $ \frac{251688917686417 v^4}{1847726899200}$ - $\frac{8983100481771361v^6}{144557457408000}$ + $\frac{289598383359113v^8}{14860025856000}$ - $\frac{2936244786853000878251v^{10}}{671480954256752640000}$ + $\ldots $ $ b^6_{T,6}=$ - $\frac{146031020287}{825552000}$ + $\frac{1680822920381v^2}{5660928000}$ - $\frac{3438345456101v^4}{14435366400}$ + $\frac{280448198337422053v^6}{2303884477440000}$ - $\frac{408566151907529191v^8}{9215537909760000}$ + $\frac{10234561211810943225223v^{10}}{839351192820940800000}$ - $\ldots $ $ b^6_{T,7}=\frac{577045151693}{2830464000}$ - $\frac{1680822920381v^2}{4852224000}$ + $\frac{282860542755301v^4}{989853696000}$ - $\frac{45957876214170247v^6}{303808942080000}$ + $\frac{1822061164406572133v^8}{31596129976320000}$ - $\frac{1213351274004131872663v^{10}}{71944387956080640000}$ + $\ldots $ \inon \begin{figure} \caption{The stability region (s-v plane) of the classical Quinlan-Tremaine 14-step method and of the methods PF-D0,PF-D1, PF-D2, PF-D3, PF-D4, PF-D5 and PF-D6 (from left to right and from top to bottom)} \label{fig:1} \end{figure} \begin{figure} \caption{The accuracy (digits) of the new methods compared to the classical one for the Schr\"odinger equation (E=989)} \label{fig:2} \end{figure} \begin{figure} \caption{The accuracy (digits) of the new methods compared to the classical one for the Schr\"odinger equation (E=341)} \label{fig:3} \end{figure} \begin{figure} \caption{The accuracy (digits) of the new methods compared to the classical one for the Schr\"odinger equation (E=163)} \label{fig:4} \end{figure} \end{article} \end{document}
\begin{document} \title{Quantum Control via Geometry: An explicit example} \author{Mile Gu} \address {Department of Physics, University of Queensland, St Lucia, Queensland 4072, Australia.} \author{Andrew Doherty} \address {Department of Physics, University of Queensland, St Lucia, Queensland 4072, Australia.} \author{Michael A. Nielsen} \address {Department of Physics, University of Queensland, St Lucia, Queensland 4072, Australia.} \address{Perimeter Institute for Theoretical Physics, Waterloo, ON N2L 2Y5, Canada.} \date{\today} \begin{abstract}We explicitly compute the optimal cost for a class of example problems in geometric quantum control. These problems are defined by a Cartan decomposition of $su(2^n)$ into orthogonal subspaces $\mathfrak{l}$ and $\mathfrak{p}$ such that $[\mathfrak{l},\mathfrak{l}] \subseteq \mathfrak{p}, [\mathfrak{p},\mathfrak{l}] = \mathfrak{p}, [\mathfrak{p},\mathfrak{p}] \subseteq \mathfrak{l}$. Motion in the $\mathfrak{l}$ direction are assumed to have negligible cost, where motion in the $\mathfrak{p}$ direction do not. In the special case of two qubits, our results correspond to the minimal interaction cost of a given unitary. \end{abstract} \pacs{03.67.Dd, 42.50.Dv, 89.70.+c} \maketitle \section{Introduction} Characterizing the difficulty of synthesizing particular quantum interactions has generated considerable interest in recent years due to its practical applications in quantum computation. From the perspective of optimal control, it determines the optimal way to construct a desired quantum interaction with a limited set of tools \cite{Ernst90a,Glaser98a,Warren97a,Carlini05a,Carlini06a}. From the perspective of quantum circuits, it expresses the minimal number of basic gates required to build up a given algorithm \cite{Nielsen00a}. These perspectives result in different characterizations of complexity. In optimal control, a unitary is hard if it is costly to synthesize with available interactions. In quantum circuits, a hard unitary requires a large number of the basic available gates. Recent work by Nielsen et al. shows that for certain control problems, both characterizations are polynomially equivalent\footnote{The are some technical caveats to this equivalence related to approximate versus exact implementation. See \cite{Nielsen06d} for details} \cite{Nielsen06d}. This equivalence motivates the application of continuous geometrical methods to quantum circuits, and in the special case where the basic gates are single and two qubit unitaries, quantum complexity \cite{Nielsen06b,Nielsen06c}. In this formulation, each unitary operator corresponds to a point in a particular Riemmannian manifold. The metric is engineered such that the minimal distance between a unitary operator $U$ and the identity $I$ corresponds to the minimal cost of synthesizing $U$. This approach allows us to apply mathematical techniques cultivated over many decades to a significantly newer field. Prior work in quantum optimal control has mostly dealt with systems that evolve under a specific drift Hamiltonian (See for example, \cite{Bennet02a,Khaneja01a,Childs03d,Boscain05a}). However, all entangling operations are equivalent modulo local interactions \cite{Dodd02a,Wocjan02c,Dur00b,Bennet02a,Jones99a} and hence no particular operation should be favored in a model compliant with the spirit of quantum complexity. This motivates the treatment of interaction Hamiltonians as a physical resource, where they are all assigned equal cost. In this paper, we consider a class of quantum control problems where the space of Hamiltonians is divided into two orthogonal subspaces, the application of Hamiltonians in one subspace incurs negligible cost compared to the other. Provided these subspaces satisfy the conditions of a Cartan decomposition (see below), geometrical methods may be used to construct a general solution. While the general class of systems solved in this paper has not been analyzed in previous literature, it encompasses a number of previously studied systems. In the special case of a single qubit, our result provides an alternative characterization of single qubit time-optimal control \cite{Khaneja01a}. In the case of $2$-qubits, our solution coincides with the interaction cost of a two qubit unitary \cite{Vidal02c}, minimized over all possible drift Hamiltonians. \section{Background and Definitions} In this section, we introduce some of the necessary background and notation that will be used in the paper. We assume the reader is familiar with the basic notions of Riemannian geometry, Lie algebras and quantum circuits (E.g ~\cite{Lee97a,Georgi99a,Nielsen00a}). Consider an $n$-qubit system. The space of traceless Hamiltonians $H \in su(2^n)$ on this system forms a vector space under the trace inner product $\ip{A}{B} = \mathrm{tr}(AB)$. This space is spanned by the product operator basis $\prod_{j=1}^n \sigma_{j,k}$, where $\sigma_{j,k}$ denotes the action of applying the Pauli interaction $\sigma_{j} \in \{I,\sigma_x,\sigma_y,\sigma_z\}$ to the $k^{th}$ qubit. The quantum control problem is defined as follows. We wish to synthesize a given $n$-qubit unitary $U \in SU(2^n)$ by the application of some Hamiltonian $H(t) \in su(2^n)$. We define a cost function $C: SU(2^n) \times su(2^n) \rightarrow \mathbb{R}$ such that the application of $H$ for duration $dt$ on a unitary $U $ incurs cost $C(U, H) dt$. Formally, the system is governed by the Schr\"{o}dinger equation \begin{equation}\label{eqn:evo} \frac{dU}{dt} = - i H(t) U(t) \qquad U(0) = I \qquad U(T) = U. \end{equation} We aim to find the $H(t)$ on interval $[0,T]$ such that the total cost $D(I,U) = \int_0^T C(U, H) dt$ is minimized. In this paper, we analyze such problems using the geometrical approach \cite{Nielsen06d}. Each unitary $U \in SU(2^n)$ corresponds to a point in the Riemannian manifold $\mathcal{N} = SU(2^n)$, and each Hamiltonian describes a vector in $T\mathcal{N}$, the tangent space of $\mathcal{N}$. Distances on $\mathcal{N}$ are defined by $C: \mathcal{N} \times T\mathcal{N} \rightarrow \mathbb{R}$. The minimal cost $D(I,U)$ coincides with the minimal distance between $I$ and $U$. We focus on a class of quantum control problems that split the space of Hamiltonians $su(2^n)$ into two vector subspaces, $\mathfrak{l}$ and $\mathfrak{p}$. Hamiltonians in $\mathfrak{l}$ have negligible cost, while those in $\mathfrak{p}$ do not. In addition, $\mathfrak{l}$ and $\mathfrak{p}$ satisfy the set of commutation relations that define a Cartan decomposition \cite{Khaneja01b}: \begin{equation}\label{eqn:com_relation} [\mathfrak{l},\mathfrak{l}] \subseteq \mathfrak{l} \qquad [\mathfrak{p},\mathfrak{l}] = \mathfrak{p}, \qquad [\mathfrak{p},\mathfrak{p}] \subseteq \mathfrak{l}. \end{equation} We refer to such problems as Cartan control problems. \begin{defn}[Cartan control problem] A Cartan control problem on an $n$-qubit system is defined as follows. Let $\mathfrak{l}$ and $\mathfrak{p}$ be subspaces of $su(2^n)$ that satisfy (\ref{eqn:com_relation}). Define $P_\mathfrak{l}$ and $P_\mathfrak{p}$ as their respective projection operators. The application of a Hamiltonian $H$ for time $dt$ incurs cost $C(U,H) = \sqrt{\ip{H}{\mathcal{\tilde{G}}H}}dt$, where \begin{equation} \mathcal{\tilde{G}} = \epsilon P_\mathfrak{l} + P_\mathfrak{p}, \qquad \epsilon \ll 1. \end{equation} Given an unitary $U \in SU(2^n)$, we wish to find $H(t)$ on $[0,T]$ that minimizes $D(I,U) = \int_0^T C(U(T), H(t)) dt$ subject to (\ref{eqn:evo}). Alternatively. this problem can be regarded as computing the distance between $I$ and $U$ on the manifold $N = SU(2^n)$ subject to the metric $C$. \end{defn} The $2$-qubit system, where we wish synthesize $U \in SU(2^n)$ with the minimal amount of non-local interactions, is a special case of this problem. Here, $\mathfrak{l}$ is the vector space of single-qubit Hamiltonians and $\mathfrak{p}$ is the vector space of all directions orthogonal to $\mathfrak{l}$. The resulting Cartan control problem neglects the cost of all single-qubit interactions, and thus $D(I,U)$ is a measure of the minimal amount of interactions required to synthesize $U$. In fact, it coincides with the interaction cost of $U$ \cite{Vidal02c}, when minimized over all possible drift Hamiltonians. The physical interpretation of $n$-qubit Cartan control problems for $n > 2$ is not as transparent. Although there exists a decomposition such that all single-qubit interactions are contained in $\mathfrak{l}$, $\mathfrak{l}$ will invariably also contain interactions involving an unbounded number of qubits. Therefore, the condition $\epsilon \rightarrow 0$ implies that in addition to local interactions, certain non-local interactions can also be applied at negligible cost. Although the solution for these cases does not have a direct physical application, other than provide a lower bound on complexity, it shows how geometrical methods are well adapted to solving a general class of problems that scale with $n$. \section{Solution to the Cartan Control Problem} In this section, we solve the Cartan control problem using geometrical methods \cite{Nielsen06d}. Before approaching the problem directly, we illustrate the intuition behind our approach by a simple example. Consider a cylindrical surface of unit radius $N = \mathbb{R}\times[0,2\pi)$ parameterized by standard cylindrical coordinates, $z$ and $\theta$ and the naturally induced metric $C_N(z, \theta, dz,d\theta) = \sqrt{dz^2 + d\theta^2}$. Suppose we wish to find the minimal distance between two points, $\mathbf{x} = (0,0)$ and $\mathbf{y} = (0,\pi/2)$, it is clear that geodesics between the two points are non-unique since the surface wraps around itself. We circumvent this difficulty by introducing a second manifold $\mathcal{M} = \mathbb{R}^2$ with the standard Euclidean metric $C_M(p,q,dp,dq) = \sqrt{dp^2 + dq^2}$, together with a mapping $U: \mathcal{M} \rightarrow \mathcal{N}$ of the form $U(p,q) = (p, q\mod 2\pi)$. If we define $[\mathbf{x}]$ and $[\mathbf{y}]$ as the pre-image of $\mathbf{x}$ and $\mathbf{y}$ with respect to $U$. i.e: $[\mathbf{x}] = \{0,2j\pi\}$, $[\mathbf{y}] = \{0,(2k+\frac{1}{2})\pi)\}$ $j,k \in \mathbb{Z}$, then the distance $d_N(\mathbf{x},\mathbf{y})$ on $\mathcal{N}$ coincides with the minimal distance between the sets $[\mathbf{x}]$ and $[\mathbf{y}]$ on $\mathcal{M}$, i.e: $\pi/2$. The following lemma states this more generally: \begin{lemma}\label{lem:distance_equiv}Let $\mathcal{M}$ and $\mathcal{N}$ be Riemannian manifolds with distance measures $C_M$ and $C_N$. Denote the distance between two points on $\mathcal{M}$ and $\mathcal{N}$ by $d_M(\cdot,\cdot)$ and $d_N(\cdot,\cdot)$ respectively. Let $U: \mathcal{M}\rightarrow \mathcal{N}$ be a smooth map that preserves the distance, i.e: $C_M(q,\mathbf{v}) = C_N(U(q),U^*(\mathbf{v}))$, where $U^*$ is the pushforward of $U$. Define $[x] = \{\mathbf{p}: \phi(\mathbf{p}) = \mathbf{x}\}$, $[y] = \{\mathbf{q}: \phi(\mathbf{q}) = \mathbf{y}\}$ as the pre-image of $x,y \in \mathcal{N}$ respectively. If $\mathbf{p} \in [x]$, and $\mathbf{q} \in [y]$, then \begin{align}\nonumber d_M\left([x],[y]\right) & \equiv \min_{\mathbf{q} \in [y]} d_M(\mathbf{p},\mathbf{q})\\& = \min_{\mathbf{p} \in [x]} d_M(\mathbf{p},\mathbf{q})= d_N(\mathbf{x},\mathbf{y}), \end{align} where $d_M(\mathbf{p},\mathbf{q})$ denotes the distance between $\mathbf{p}$ and $\mathbf{q}$. \end{lemma} \begin{figure} \caption{(Color online)If $[\mathbf{x} \label{fig:erptsqfit} \end{figure} \begin{proof} Suppose $d_M([x],[y]) = k$, then there exists a curve $\gamma \subset \mathcal{M}$ that connects some $\mathbf{p} \in [x]$ and $\mathbf{q} \in [y]$ of length $k$. Clearly its image $\Gamma(t) = U(\gamma(t))$ is a curve from $\mathbf{x}$ to $\mathbf{y}$ of length $k$ in $\mathcal{N}$. Thus $d_N(\mathbf{x},\mathbf{y}) \leq d_M\left([x],[y]\right)$. Now suppose $d_N(\mathbf{x},\mathbf{y}) = k$, such that there exists a curve $\Gamma(t) \in \mathcal{N}$, $\Gamma(0) = \mathbf{x}$, $\Gamma(1) = \mathbf{y}$ of length $k$. Given any $\mathbf{p} \in [x]$, we show that there exists a $\mathbf{q} \in [y]$ such that $d_N(\mathbf{p},\mathbf{q}) \leq k$ by constructing a curve $\gamma$ from $\mathbf{p}$ to $\mathbf{q}$. Let $l$ be a large integer and $t_j = jk/l$, $j = 0, \ldots, m$, set \begin{align} \gamma(t) = \mathbf{v}_{t_j}t + \gamma\left(t_j\right) \qquad t_j \leq t < t_{j+1} \end{align} where $m$ is a large integer, and $\mathbf{v}_{t_j}$ satisfies \begin{equation} U^* \mathbf{v}_{t_j} = \frac{d\Gamma}{dt}(t_j) \end{equation} Clearly in the limit $n \rightarrow \infty$: \begin{equation} \mathrm{Length}(\gamma(t)) = \lim_{l \rightarrow \infty} \sum_j C_M \left(\gamma\left(t_j\right), \mathbf{v}_{t_j} \right) = k \end{equation} Hence $d_N(\mathbf{x},\mathbf{y}) \geq \min_{\mathbf{q} \in [x]} d_M(\mathbf{p},\mathbf{q})$. Combining the two results gives the desired equivalence. Symmetry implies $d_N(\mathbf{x},\mathbf{y}) \geq \min_{\mathbf{p} \in [y]} d_M(\mathbf{p},\mathbf{q})$, which establishes the desired result. \end{proof} $\blacksquare$ To compute distances on $SU(2^n)$, we define an Euclidean manifold $\mathcal{M} = \mathbb{R}^{4^n -1}$. Denote its coordinates by $\mathbf{q} = (q_1,\ldots,q_{4^n-1})$ and tangent vectors by $\mathbf{v}$. We wish to find a suitable metric $C_M(\mathbf{q},\mathbf{\mathbf{v}}) = \sqrt{\ip{\mathbf{\mathbf{v}}}{G \mathbf{\mathbf{v}}}}$, together with a distance preserving map $U$ such that Lemma \ref{lem:distance_equiv} is applicable. There are many possible choices, of which we ideally select one where $G$ has a simple form. The Cartan decomposition is one such candidate \cite{Khaneja01b}. Let $\mathfrak{z}$ be a maximally commuting subspace of $\mathfrak{p}$, then any unitary can be decomposed into \begin{equation}\label{eqn:decompfirst} U(H_1,H_2,H_3) = e^{-iH_1} e^{-i H_2} e^{-iH_3}, \end{equation} where $H_1,H_3 \in \mathfrak{l}$ and $H_2\in \mathfrak{z}$. The vector of matrices $(H_1, H_2, H_3)$ completely specify $U$. We view this as a cartesian plane, $(\mathbf{q_1},\mathbf{q_2}, \mathbf{q_3}) \in \mathcal{M}$, where $\mathbf{q_i}$ is the vectorization of $H_i$ with respect to some orthonormal basis $B_{j,i}$, i.e. $H_i = \sum_j q_{j,i} B_{j,i}$ and $q_{j,i} = \mathrm{Tr}[H_iB_{j,i}]$. (\ref{eqn:decompfirst}) then defines the desired coordinate map. The second step is to compute the metric $C_M$ on $\mathcal{M}$ such that $C_M(q,\mathbf{v}) = C_N(U(q),U^*(\mathbf{v}))$. The matrix $G$ can be represented by a $3 \times 3$ matrix of superoperators, $G_{i,j}$, such that a perturbation $(\Delta H_1,\Delta H_2, \Delta H_3)$ has length $\Delta \sum_{i,j} \sqrt{\ip{H_i}{G_{ij} H_j}}$. The properties of $G$ can be characterized: \begin{lemma}\label{lemma:local_metric}Let $\mathcal{M} = \mathbb{R}^{4^ n-1}$ be a Riemmannian manifold with metric $C_M(\Delta H_1, \Delta H_2, \Delta H_3) = \Delta \sum \sqrt{\ip{H_i}{G_{i,j}H_j}}$, and $U: \mathcal{M} \rightarrow SU(2^n)$ be as defined by (\ref{eqn:decompfirst}). If $C_M(q,\mathbf{v}) = C_N(U(q),U^*(\mathbf{v}))$, then $G$ has the form \begin{equation} G = \left( \begin{array}{ccc} \epsilon \mathrm{BCH}^\dag_L \mathrm{BCH}_L & & \\ & \textbf{I} & \\ & & \epsilon \mathbf{A}(H_2) + \mathbf{B}(H_2)) \end{array}\right). \end{equation} $\mathbf{A}(H_2)$ and $\mathbf{B}(H_2)$ are $H_2$ dependent operators that satisfy $\mathbf{A}(0) = I$, $\mathbf{B}(0) = 0$, $(I + \mathbf{A}(H_2)) > 0$ and $\mathbf{B}(H_2) > 0$; and $\mathrm{BCH}$ denotes the Baker-Campbell-Hausdorff operator, which satisfies $\exp \{-i(C + D)\} = \exp\{-i \mathrm{BCH}_C(D)\}\exp\{-i D\} + O(|D|^2)$. \end{lemma} \begin{proof} From the BCH equation, $e^{\Delta A} e^{\Delta B} = e^{\Delta A +\Delta B} + O(\Delta^2)$, thus $\mathcal{G}_{ij} = 0$ for $i \neq j$. The remaining components can be computed by considering individual perturbations. The details are purely technical, and are left to the appendix. $\blacksquare$ \end{proof} To simplify the notation, we set $L = H_1$, $Z = H_2$ and $M = H_3$, so that a point on $\mathcal{M}$ is denoted by the 3-tuple $(L,Z,M)$. While the explicit forms of $\mathbf{A}$ and $\mathbf{B}$ are complex, the metric greatly simplifies when $\epsilon \rightarrow 0$. \begin{lemma}\label{prep:triangle}In the limit $\epsilon \rightarrow 0$, the cost of synthesizing a unitary $U$ is given by \begin{equation} D(I,U) = \min_{Z: U(L,Z,M) = U} |Z| \end{equation} where $|Z| = \sqrt{\mathrm{Tr}(Z^2)}$ is the trace norm of $Z$ and $U$ is as defined in (\ref{eqn:decompfirst}). \end{lemma} \begin{figure} \caption{(Color online)$d(\mathbf{p} \label{fig:triangle} \end{figure} \begin{proof} Consider the triangle with vertices $\mathbf{0} = (0,0,0)$, $\mathbf{p} = (L,0,M)$ and $\mathbf{q} = (L,Z,M)$ (Fig \ref{fig:triangle}). Let $C$ be a constant such the operator norm $|\mathrm{BCL}_L| < C$ and $K = \max (C,1)$. The length of straight line from the origin to $\mathbf{p}$ is bounded above by $\epsilon K \sqrt{(|L|^2 + |M|^2)}$, thus so is $d(\mathbf{0},\mathbf{p})$, the distance from the origin to $\mathbf{p}$. Two triangle inequalities then bound $d(\mathbf{0},\mathbf{q})$ from above and below: \begin{align}\nonumber d(\mathbf{0},\mathbf{q}) \leq d(\mathbf{p},\mathbf{q}) + d(\mathbf{0},\mathbf{p}) & \leq |Z| + \epsilon K \sqrt{(|L|^2 + |M|^2)}\\ \nonumber d(\mathbf{0},\mathbf{q}) \geq d(\mathbf{p},\mathbf{q}) - d(\mathbf{0},\mathbf{p}) &\geq |Z| - \epsilon K \sqrt{(|L|^2 + |M|^2)} \end{align} In the limit $\epsilon \rightarrow 0$, $d(\mathbf{0},\mathbf{q}) = |Z|$. Application of Lemma \ref{lem:distance_equiv} gives the required result. \end{proof} $\blacksquare$ We now have an explicit characterization of distances on the coordinate manifold $M$. The final step is to determine the pre-image of a given unitary in $SU(2^n)$. We use a variation of the technique developed in \cite{Childs03d}. Let the Cartan decomposition of $U$ be as in (\ref{eqn:decompfirst}), the properties of Cartan decompositions allow us to choose a basis such that the matrix representation of $U$ can be expressed as \begin{equation}\label{eqn:matrix_decomp} U = A D B^T, \end{equation} where $A = e^{iL}$, $B^T = e^{iM}$ are orthogonal, and $D = e^{iZ}$ is diagonal \cite{Helgason01a}. $D^2$ is the diagonalisation of $U^TU$ and is hence unique up to permutation of its diagonal elements. We use this decomposition to find an explicit expression for $D(I,U)$: \begin{theorem}\label{thrm:endresult}Consider the $n$-qubit Cartan control problem. The minimal cost required to synthesize a unitary $U$ with Cartan decomposition $e^{iL}e^{iZ}e^{iM}$ is \begin{equation} D(I,U) = \sqrt{\min_{y \in \mathcal{L}} |\mathbf{eig}(Z) - \mathbf{y}|^2} \end{equation} where $\mathcal{L}$ is a lattice defined by the set of points: \begin{equation} \mathcal{L} = \{(m_1, m_2, \ldots m_{2^n})\pi: \,\, \sum m_k = 0, \,\, m_k \in \mathbb{Z}\} \end{equation} \end{theorem} \begin{proof}Since $D$ is diagonal, we can describe it by a vector $\mathbf{x} = (x_1,\ldots,x_{2^n})$ such that the diagonal elements of $D$ take the form $e^{i x_k}$. In addition, we know $D = e^{iZ}$ for some $Z = \sum_j z_j B_j \in \mathfrak{z}$, where $B_j$, $j = 1,\ldots 2^n-1$ is an orthonormal basis for $\mathfrak{z}$. Let $\mathcal{A}$ be the mapping that takes the vector $\mathbf{x}$ to $\mathbf{z}$, the vector representation of $Z$ in the $B_j$ basis, i.e: \begin{equation} \mathbf{eig}\left\{[\mathcal{A}\mathbf{x}]^k B_k\right\} = \{x_1,x_2,\ldots, x_{2^n}\} \end{equation} To see that $\mathcal{A}$ is an isometry, i.e., $|\mathcal{A}\mathbf{x}|^2 = |\mathbf{x}|^2$, we note that in our particular representation (\ref{eqn:matrix_decomp}), $Z$ is diagonal, and thus $\mathbf{eig}\left\{[\mathcal{A}\mathbf{x}]^k B_k\right\} = \mathbf{diag}\left\{[\mathcal{A}\mathbf{x}]^k B_k\right\}$. Define $\overrightarrow{B}_k = \mathbf{diag}(B_k)$ as the vector formed from the diagonal elements of $B_k$, then $[A\mathbf{x}]^k \overrightarrow{B}_k = \mathbf{x}$. Let $\mathcal{B} = [\overrightarrow{B}_1,\overrightarrow{B}_2,\ldots,\overrightarrow{B}_k]$ be the matrix whose columns are the elements of $B_k$, then the equation can be rewritten as $\mathcal{B} \mathcal{A} \mathbf{x} = \mathbf{x}$. Since $\mathcal{B}$ is orthonormal, $\mathcal{A} = \mathcal{B}^{-1}$ must be also, and hence preserve the Euclidean norm. So \begin{align}\nonumber D(I,U) & = \min \{|Z|: \, e^{L}e^{Z}e^{M} = U\},\\ & = \min \{|\mathbf{x}|: \, e^{L}e^{[\mathcal{A}\mathbf{x}]^k \sigma_k}e^{M} = U\}. \end{align} Since permutations preserve the Euclidean norm, the only freedom in $x_k$ that we need to minimize over is addition by multiples of $\pi$. Thus given one particular decomposition of a given unitary $U$, $\mathbf{eig}(H_2)$ gives one possible $\mathbf{x}$. The set of all vectors (permutations excluded) that generate $U$ is given by $\{\textbf{x} + \mathbf{l}: \, \mathbf{l} \in \mathcal{L} \}$. The result follows. $\blacksquare$ \end{proof} Given a unitary $U \in SU(2^n)$, the above theorem offers a systematic method to solve for the minimal cost required to synthesize $D(I,U)$. \section{The Single Qubit Control Problem} In this section, we illustrate our result by applying it to the special case of single qubit optimal control. We wish to synthesize a particular spin $\frac{1}{2}$ interaction $U \in SU(2)$. Application of magnetic fields in one particular direction (say $x$) incur negligible cost, while all orthogonal directions require unit cost, i.e. $\mathfrak{l} = \mathrm{Span}(\sigma_x)$ and $\mathfrak{p} = \mathrm{Span}(\sigma_{z,y})$. This problem is a slight variation of the single qubit time-optimal control problem solved in \cite{Khaneja01a}. More precisely, it corresponds to the case of a system that evolves under a constant magnetic field described by the Hamiltonian $H_d = \sqrt{2} \sigma_z$. We wish to synthesize a unitary $U$ in minimal time, given the ability synthesize magnetic fields in the $x$ direction of arbitrary strength, or reverse the direction of $H_d$. \begin{prep}Let $U \in SU(2)$. Suppose we are given one particular decomposition \begin{equation}\label{eqn:decomp} U = \exp(-i x \sigma_x) \exp(-i z \sigma_z) \exp(-i y \sigma_x), \end{equation} where $\sigma_x$, $\sigma_z$ denote standard Pauli matrices, then $D(I,U) = \frac{1}{\sqrt{2}}\min_{m\in \mathbb{Z}}\{z - 2 m\pi\}$. In particular, $D(I,U) = \frac{|z|}{\sqrt{2}}$ for $z \in [-\pi,\pi]$. \end{prep} \begin{proof}Note that $z \sigma_z$ has eigenvalues $\pm \frac{z}{2}$, and $\mathcal{L} = \{ (m\pi,-m\pi):\, m \in \mathbb{Z}\}$. So $D(I,U) = \sqrt{2 \min_{m \in \mathbb{Z}} | z/2 - m \pi|^2 }$. The result follows directly. $\blacksquare$ \end{proof} In \cite{Khaneja01a}, a slightly different result where $D(I,U) = \frac{|z|}{\sqrt{2}}$ for $z \in [0,2 \pi]$ is obtained. The deviation results from our extra assumption that the direction of $H_d$ can be reversed. The KGB result requires the unique decomposition such that $z \in [0,2 \pi]$, whereas our result applies to any decomposition that satisfies (\ref{eqn:decomp}). \section{Conclusion} The geometrical approach provides a useful alternative to more algebraic methods \cite{Bennet02a,Khaneja01a,Childs03d}. In this paper, we have demonstrated how we can use it to characterize the general Cartan control problem. In the single-qubit case, our result solves a slight variation of single-qubit time optimal control, and provides a second perspective to \cite{Khaneja01a}. In the two qubit case, it characterizes the minimal amount of non-local interactions required to synthesize a given interaction. The general $n$-qubit Cartan control problem that we have described does not have direct physical application, because the class of Hamiltonians assumed to be `easy' to apply is too broad to be realistic. However, our results do show an instance where the geometric formalism can be applied to systems of arbitrary size. By reducing the complex space of unitary operations into a cartesian coordinate system with a suitably appropriate metric, we circumvent much of the technical difficulties in algebraically intensive methods. The geometrical method outlined can convert any quantum control problem into a minimization of distances between two sets in cartesian space with a suitably defined metric. This allows analytical solutions in special cases, such as the Cartan control problem. Alternatively there exists numerous numerical techniques that have been designed to solve for minimal distances on manifolds. Thus, the geometric formalism is a promising method, both for solving other problems in control theory, and for its applications in quantum complexity. {\bf Acknowledgments---} M.G., A.D. and M.A.N. acknowledge the support of the Australian Research Council (ARC). M.G. thanks Christian Weedbrook, Mark de Burgh for discussions. \begin{appendix} \section{Derivation of the Coordinate Metric $C_M$} In this section, we provide a detailed outline of how the metric $C_M$ in Lemma $\ref{lemma:local_metric}$ is derived. We consider a point $(L,Z,M)$ on $\mathcal{M}$ and consider perturbations on each of $L$, $Z$ and $M$, which we denote by $P_L$, $P_Z$ and $P_M$ respectively. Firstly \begin{align}\nonumber U(L + \Delta P_L, Z, M) & = e^{-i(L + \Delta P_L)} e^{-iZ} e^{-iM} \\ &= \exp [-i\Delta \mathrm{BCH}_L(P_L)] U \end{align} Since $[L,P_L] \in \mathfrak{l}$, and $\mathrm{BCH}_L \in \mathfrak{l}$, \begin{align} \ip{P_L}{G P_L} &= \epsilon\ip{P_L}{\mathrm{BCH}^\dag_L \mathrm{BCH}_L P_L}\qquad \\ \Rightarrow \mathcal{G}_{11} & = \epsilon \mathrm{BCH}^\dag_L \mathrm{BCH}_L \end{align} Similarly, other components of $G$ can be computed by perturbing $Z$ and $M$: \begin{align} U(L , Z + \Delta P_Z, M) &= e^{-iL} e^{-i(Z + \Delta P_Z)} e^{-iL} \\& = \exp \left[ -i \Delta e^{-iL} P_Z e^{iL} \right]U \end{align} Noting that $e^{-iL} P_Z e^{iL} \in \mathfrak{p}$ since $[\mathfrak{l}, \mathfrak{p}] \in \mathfrak{p}$, we have \begin{equation} \ip{P_{Z}}{G P_{Z}} = |P_Z|^2 \qquad \Rightarrow \qquad G_{22} = I \end{equation} The final perturbation is slightly more complex: \begin{align}\nonumber U(L , Z , M + \Delta P_M) &= e^{-iL} e^{-iZ} e^{-i(M + \Delta P_M)}\\ \nonumber & = e^{-iL} e^{ -i \Delta e^{-iZ} \mathrm{BCH}_M(P_M) e^{iZ} } e^{-iX_2} \\ \nonumber & = e^{ -i \Delta e^{-iL} e^{-iZ} \mathrm{BCH}_M(P_M) e^{iZ} e^{-iL} } U\\ & = e^{ -i \Delta \mathcal{J}_{L,Z,M}(P_M)} U \end{align} Noting that $\mathrm{BCH}_M(P_M) \in \mathfrak{p}$, and $[\mathfrak{p},\mathfrak{p}] \subset \mathfrak{l}$. We can write: \begin{equation} e^{-iZ} \mathrm{BCH}_M(P_M) e^{iZ} = a(Z)Q_P(P_M) + b(Z)Q_L(P_M) \qquad \end{equation} Where $Q_P \in \mathfrak{p}$ and $Q_L \in \mathfrak{l}$, $a^2 + b^2 = 1$ and $b(0) = 0$. The commutation relations $[\mathfrak{l},\mathfrak{l}] \subset \mathfrak{l}$, $[\mathfrak{l},\mathfrak{p}] \subset \mathfrak{p}$ then implies that $\mathcal{J}_{L,Z,M}$ takes the same form, i.e: \begin{equation} \mathcal{J}_{L,Z,M}(P_M) = a(Z)Q_P(P_M) + b(Z)Q_L(P_M) \qquad \end{equation} Thus \begin{equation} \ip{P_{M}}{G P_{M}} = \epsilon a^2(Z) + b^2(Z) \qquad G_{22} = I \end{equation} In particular \begin{equation} G_{33}\mid_{Z=0} = \epsilon \textbf{A}(Z) + \textbf{B}(Z) \end{equation} For some positive definite operators $\textbf{A}$ and $\textbf{B}$ such that $\textbf{A}(0) = \textbf{I}$ and $\textbf{B}(0) = \textbf{0}$. Hence, in matrix representation, the global metric takes on a block diagonal form: \begin{equation} \mathcal{G} = \left( \begin{array}{ccc} \epsilon \mathrm{BCH}^\dag_L \mathrm{BCH}_L & & \\ & \textbf{I} & \\ & & \epsilon A(Z) + B(Z) \end{array}\right) \end{equation} \end{appendix} \end{document}
\betaegin{document} \title[Extrema of dynamic pressure in flows with underlying currents and infinite depth] {Extrema of the dynamic pressure in an irrotational regular wave train with underlying currents and infinite depth} \alphauthor[Lili Fan]{Lili Fan} \alphaddress[Lili Fan]{College of Mathematics and Information Science, Henan Normal University, Xinxiang 453007, China} \varepsilonmail{[email protected]} \alphauthor[Hongjun Gao$^{\dag}$]{Hongjun Gao$^{\dag}$} \alphaddress[Hongjun Gao]{School of Mathematical Sciences, Institute of Mathematics, Nanjing Normal University, Nanjing 210023, China; \ Institute of Mathematics, Jilin University, Changchun 130012, China} \varepsilonmail{[email protected]\, (Corresponding author)} \alphauthor[Lei Mao]{Lei Mao} \alphaddress[Lei Mao]{School of Mathematical Sciences, Institute of Mathematics, Nanjing Normal University, Nanjing 210023, China} \betaegin{abstract}In this paper, we investigate the maximum and minimum of the dynamic pressure in a regular wave train with underlying currents and infinite depth respectively. The result is obtained using maximum principles in combination with exploiting some of the physical structures of the problem itself. \varepsilonnd{abstract} \date{} \mathbbaketitle \noindent {\sigmal Keywords\/}: Stokes wave, dynamic pressure, underlying current, deep-water. \vskip 0.2cm \noindent {\sigmal AMS Subject Classification} (2010): 35Q35; 35J15. \\ \sigmaection{Introduction} \large This paper aims to study the dynamic pressure in an irrotational regular wave train with uniform underlying currents and infinite depth respectively. The dynamic pressure, encoding the fluid motion, is one of the two components of the total pressure beneath a surface water wave and the other is a hydrostatic part that counteracts the force of gravity to make the fluid be in an equilibrium state. The study of the behaviour of the pressure has not only theoretical sense but also practical applications, such as the estimations of the force acting on maritime structures \cite{Cl1,Co5,ES,D2,K,Ol}. The significative investigations of the structures and behavior of Stokes wave have been carried out by Constantin and his colleagues, including trajectories of particles \cite{Co2}, mean velocities \cite{Co4}, analyticity and symmetry of streamlines \cite{CoEs,CoEh}, pressure \cite{CS}, dynamic pressure \cite{Co1} and so on. In regards to Stokes wave in fluid domain with infinite depth, Henry has investigated the horizontal velocity \cite{D}, the trajectories of particles \cite{D3} and the pressure \cite{D1}. The study of the irrotational regular wave trains with uniform underlying currents is due to Basu \cite{Ba}, where he obtained that when the speed of the current is greater than the wave speed, the nature of the flow fields is different from what is expected where no underlying current was considered \cite{Co2,Co,Co3} and the pressure field is unaffected due to the presence of underlying currents. There are also some other noteworthy work related to this area of nonlinear water waves \cite{G,Jo1,L1,L2,U,VO}. Inspired by the work of Constantin \cite{Co1}, we are concerned in this paper about the influence of the uniform underlying currents and infinite depth on the extrema of the dynamic pressure. Due to the appearance of currents, taking the current strength $k$ being greater than the wave speed $c$ for example, we first eliminate the possibility of the relative mass flux $m\geq 0$ by employing the maximum principle and Hopf's maximum principle, then a reapplication of Hopf's maximum principle on the stream function $\partialsi$ enable us get that the horizontal velocity $u$ is greater than the wave speed $c$, which Basu obtained by analyzing the non-dimensional system of the stream function, an equivalent system for the governing equations, in \cite{Ba}. Next, we follow the idea presented in \cite{Co1} to obtain the extrema of dynamic pressure. As for dynamic pressure in deep water, the main difficulty comes from the failure of application of the maximum principle in the fluid domain with infinite depth. Base on the result about the total pressure constructed by Henry \cite{D1}, we eliminate the possibility that the extrema can be obtained in the interior of the domain by assuming the contrary. The investigation of the dynamic pressure along the boundary of fluid domain is relatively normal. In conclusion, we show that the presence of underlying currents and infinite depth makes no difference on the position of extrema of dynamic pressure, which attains its maximum and minimum at the wave crest and wave trough respectively. The remainder of this paper is organized as follows. In Section 2, we present the governing equations for the irrotational regular wave trains with uniform underlying current. In Section 3, we prove the maximum and minimum of the dynamic pressure occur at the wave crest and the wave trough respectively, unaffected due to the presence of uniform underlying currents and infinite depth. \sigmaection{The governing equations} \large The problems we consider are two-dimensional steady periodic irrotational gravity water waves over a homogeneous fluid with a underlying current strength $k$, the $x$-axis being the direction of wave propagation and the $y$-axis pointing vertically upwards. The bottom is assumed to be flat, given by $y=-d$ with $d>0$ representing the mean water depth, and be impermeable. Given $c>0$, assume this two-dimensional periodic steady waves travel at speed $c$; that is, the space-time dependence of the free surface $y=\varepsilonta(x)$, of the pressure $P=P(x,y)$, and of the velocity field $(u,v)$ has the form $(x-ct)$ and is periodic with period $L>0$. Using the map $(x-ct,y) \mathbbapsto (x,y)$, we move to a new reference frame travelling alongside the wave with constant speed $c$ where the fluid flow is steady. Then the governing equations for the gravity waves are embodied by the following nonlinear free boundary problem \cite{Co}: \betaegin{align} (u-c)u_x+vu_y&=-\frac 1 \rho P_x,\quad\;\;\quad -d\leq y\leq \varepsilonta(x),\label{2.1}\\ (u-c)v_x+vv_y&=-\frac 1 \rho P_y-g,\!\quad -d\leq y\leq \varepsilonta(x),\label{2.2}\\ u_x+v_y&=0,\qquad\;\;\qquad\;-d\leq y\leq \varepsilonta(x),\label{2.3}\\ u_y&=v_x,\qquad \;\; \qquad -d\leq y\leq \varepsilonta(x),\label{2.4}\\ v&=0,\quad \; \quad \quad \quad \quad \textrm{on} \quad y=-d,\label{2.5}\\ v&=(u-c)\varepsilonta_x,\;\;\:\quad \textrm{on} \quad y=\varepsilonta(x),\label{2.6}\\ P&=P_{atm},\;\;\qquad\quad \textrm{on} \quad y=\varepsilonta(x),\label{2.7} \varepsilonnd{align} here $P_{atm}$ is the constant atmospheric pressure, $g$ is the (constant) acceleration of gravity and $\rho$ is the (constant) density. A regular wave train is a smooth solution to the governing equations \varepsilonqref{2.1}-\varepsilonqref{2.7} and satisfies \cite{Co}: (i) $u$ and $v$ have a single crest and trough per period, (ii) $\varepsilonta$ is strictly monotone between successive crests and troughs, (iii) $\varepsilonta, u$ and $P$ are symmetric and $v$ is antisymmetric about crest line. Without loss of generality, we may assign that the crest is located at $x=0$ and the trough at $x=L/2$. Using \varepsilonqref{2.3} we can define the stream function $\partialsi$ up to a constant by \betaegin{equation}\label{2.8} \partialsi_y=u-c,\quad\partialsi_x=-v, \varepsilonnd{equation} and we fix the constant by setting $\partialsi=0$ on $ y=\varepsilonta(x)$. We can integrate \varepsilonqref{2.8} to get $\partialsi=m$ on $y=-d$, for \betaegin{equation}\label{2.9} m= - \int_{-d}^{\varepsilonta(x)}(u(x,y)-c)dy, \varepsilonnd{equation} where $m$ is the relative mass flux, and by writing \betaegin{equation}\label{2.10} \partialsi(x,y)=m+\int_{-d}^{y}(u(x,s)-c)ds, \varepsilonnd{equation} we can see that $\partialsi$ is periodic (with period $L$) in the $x$ variable. The level sets of $\partialsi(x,y)$ are the streamlines of the fluid motion. Then the governing equations \varepsilonqref{2.1}-\varepsilonqref{2.7} are transformed to the equivalent system \betaegin{align} &\partialsi_{xx}+\partialsi_{yy}=0,\quad\;\;\quad \text{for}\; -d\leq y\leq \varepsilonta(x),\label{2.11}\\ &\partialsi=0,\quad\;\;\quad \text{on}\; y=\varepsilonta(x),\label{2.12}\\ &\partialsi=m,\quad\;\;\quad \text{on}\; y=-d,\label{2.13}\\ &\frac {\partialsi^2_x+\partialsi^2_y} {2g}+y+d=Q \quad \text{on}\; y=\varepsilonta(x),\label{2.14} \varepsilonnd{align} where the constant $Q$ is the total head and it is expressed by \betaegin{equation}\label{2.15} \frac {\partialsi^2_x+\partialsi^2_y} {2g}+y+d+\frac {P-P_{atm}} {\rho g}=Q \varepsilonnd{equation} throughout $\{(x,y): -d\leq y\leq \varepsilonta(x)\}$ by using the equations \varepsilonqref{2.1} and \varepsilonqref{2.2}. Then the total pressure can be recovered from \betaegin{equation}\label{2.16} P=P_{atm}+\rho g Q-\rho g (y+d)-\rho\frac {\partialsi^2_x+\partialsi^2_y} {2}. \varepsilonnd{equation} Hence the dynamic pressure we concerned in this paper, defined as the difference between the total pressure $P(x,y)$ and the hydrostatic pressure $(P_{atm}-\rho g y)$, is given by \betaegin{equation}\label{2.17} p=P(x,y)-(P_{atm}-\rho g y)=\rho g (Q-d)-\rho\frac {\partialsi^2_x+\partialsi^2_y} {2}. \varepsilonnd{equation} In the setting of periodic waves traveling at a constant speed at the surface of water in an irrotational flow over flat bed, we have \betaegin{equation}\label{3.1} 0=\int_{-d}^{y_0}\int_0^{L}(u_y-v_x)dxdy=\int_0^Lu(x,y_0)dx-\int_0^Lu(x,-d)dx, \varepsilonnd{equation} for $y_0\in[-d,\varepsilonta(L/2)]$. Hence \betaegin{equation}\label{3.2} \int_0^Lu(x,y_0)dx=\int_0^Lu(x,-d)dx,\quad y_0\in[-d,\varepsilonta(L/2)], \varepsilonnd{equation} and the uniform underlying current defined by \betaegin{equation}\label{3.3} k=\frac 1 L \int_0^Lu(x,-d)dx \varepsilonnd{equation} is invariant with $y$. Thus, we represent the underlying current $k$ with \betaegin{equation}\label{3.4} k=c+\frac 1 L \int_0^L\partialsi_y(x,-d)dx \varepsilonnd{equation} by \varepsilonqref{2.8}. \sigmaection{The dynamic pressure} \large In this section we will prove the main result of this paper. \sigmaubsection{The dynamic pressure with underlying currents} \large \betaegin{theorem}\label{the1} In presence of a uniform underlying current with strength $k\neq 0$, the dynamic pressure in an irrotational regular wave trains (1) attains its maximum value and minimum value at the wave crest and trough respectively for $k\neq c$, (2) is identically zero for $k=c$. \varepsilonnd{theorem} \betaegin{proof} We analyze the case $k>c$, $k=c$ and $k<c$ respectively. \noindent\varepsilonmph{Case 1. $k>c$. } Denote $k-c=\betaar{c}>0$, then we have \betaegin{equation}\label{3.5} \betaar{c}=\frac 1 L \int_0^L\partialsi_y(x,-d)dx>0. \varepsilonnd{equation} We claim that \betaegin{equation}\label{3.6} u(x,y)>c, \quad\text{for} \;\; -d\leq y \leq \varepsilonta(x). \varepsilonnd{equation} By the maximum principle, the $L$-periodicity in the $x$ variable of the harmonic function $\partialsi(x,y)$ ensures that its maximum and minimum in the domain \betaegin{equation}\label{3.7} D=\{(x,y):-d\leq y\leq \varepsilonta(x)\}, \varepsilonnd{equation} will be attained on the boundary. This combined with the assumption $\betaar{c}>0$ enforce that \betaegin{equation}\label{3.8} m<0. \varepsilonnd{equation} Since, (a) if $m=0$, then $\partialsi$ is definitely constant throughout the domain $D$, resulting in $\betaar{c}\varepsilonquiv 0$ by its definition. (b) if $m>0$, then the maximum value of $\partialsi$ is attained on the $y=-d$ and the Hopf's maximum principle yields $\partialsi_y(x,-d)<0$, which leads to $\betaar{c}<0$. This make a contradiction. For $m<0$, we get that $\partialsi$ attains is maximum and minimum on $y=\varepsilonta(x)$ and $y=-d$ respectively. And a further application of Hopf's maximum principle leads to $\partialsi_y(x,0)>0$ and $\partialsi_y(x,-d)>0$. Thus $\partialsi_y>0$ on the boundary of $D$ and the maximum principle then ensures $\partialsi_y>0$ throughout $D$ since $\partialsi_y$ is harmonic function, then the claim \varepsilonqref{3.6} follows from the definition \varepsilonqref{2.8} of $\partialsi_y$. Due to the anti-symmetry and the $L$-periodicity in the $x$ variableof $v$ and \varepsilonqref{2.5}, we have that $v=0$ along the lower and lateral boundaries of the domain \betaegin{equation}\label{3.9} D_+=\{(x,y):0<x<L/2, -d< y< \varepsilonta(x)\}. \varepsilonnd{equation} Along the upper boundary we have $\varepsilonta'(x)<0$, so the equation \varepsilonqref{2.6} and the claim \varepsilonqref{3.6} ensure that $v(x,\varepsilonta(x))\leq 0$ for $x\in[0,L/2]$. Since $v\leq 0$ on the whole boundary of $D_+$, we have from maximum principle that $v<0$ throughout $D_+$. Then the Hopf's maximum principle and the equations \varepsilonqref{2.3}-\varepsilonqref{2.4} yield \betaegin{align} &u_x(x,-d)=-v_y(x,-d)>0,\quad \text{for} \quad x\in(0,L/2),\label{3.10}\\ &u_y(0,y)=v_x(0,y)<0,\quad \text{for} \quad y\in(-d,\varepsilonta(0)),\label{3.11}\\ &u_y(L/2,y)=v_x(L/2,y)>0,\quad \text{for} \quad y\in(-d,\varepsilonta(L/2)). \label{3.12} \varepsilonnd{align} Combined with \varepsilonqref{3.6} and \varepsilonqref{2.17} \betaegin{equation}\label{3.13} p=\rho g (Q-d)-\rho\frac {\partialsi^2_x+\partialsi^2_y} {2}=\rho g (Q-d)-\rho\frac {v^2+(u-c)^2} {2}, \varepsilonnd{equation} \varepsilonqref{3.10}-\varepsilonqref{3.12} ensure that $p$ is strictly decreasing as we descend vertically in the fluid below the crest, it continues to decrease along the portion $\{(x,-d):0<x<L/2\}$ of the flat bed as $x$ decrease, and this strict decrease persists as we ascend vertically from the bed towards the surface, below the trough. Furthermore, in view of \varepsilonqref{2.7} and \varepsilonqref{2.17} and the monotonicity of the wave profile between the crest and a successive trough, we have that $p$ is also strictly decreasing as we descend from the crest towards the trough along the upper boundary of $D_+$. On the other hand, by a direct calculation we have \betaegin{equation}\label{3.14} p_{xx}+p_{yy}=-\frac{2(p_x^2+p_y^2)} {\rho (\partialsi_x^2+\partialsi_y^2)}=\alphalpha p_x+\betaeta p_y,\quad \text{in}\;D, \varepsilonnd{equation} with \betaegin{equation}\label{3.15} \alphalpha=-\frac {2p_x} {\rho (\partialsi_x^2+\partialsi_y^2)},\quad \betaeta=-\frac {2p_y} {\rho (\partialsi_x^2+\partialsi_y^2)} \varepsilonnd{equation} where we have $\partialsi_x^2+\partialsi_y^2>0$ by \varepsilonqref{3.6}. Then the maximum principle ensures that the extrema of $p$ can only be attained on the boundary of $D_+$. Noting that the previous discussion about the behaviour of $p$ along the boundary of $D_+$, we conclude the result for $k>c$. \noindent\varepsilonmph{Case 2. $k=c$. } For this case, we have \betaegin{equation}\label{3.18} \betaar{c}=k-c=\frac 1 L \int_0^L\partialsi_y(x,-d)dx=0. \varepsilonnd{equation} Now we claim \betaegin{equation}\label{3.19} \partialsi(x,y)\varepsilonquiv 0\quad \text{in}\quad D. \varepsilonnd{equation} Indeed, we know that the harmonic function $\partialsi(x,y)$ attains its maximum and minimum on the boundary. And if $m\neq 0$, the Hopf's maximum principle yields $\partialsi_y(x,-d)\neq 0$. On the other hand, by the mean-value theorem, \varepsilonqref{3.18} implies that there must exist a point $(x_0,-d)$ such that $\partialsi_y(x_0,-d)=0$, which makes a contradiction. Thus we obtain $m=0$, which combined with the equations \varepsilonqref{2.11}-\varepsilonqref{2.12} and the maximum principle enforces \varepsilonqref{3.19}. Hence we have \betaegin{equation}\label{3.21} \partialsi_y=0\quad \text{and}\quad \partialsi_x=0, \varepsilonnd{equation} which in turn lead to \betaegin{equation}\label{3.22} u=c\quad \text{and} \quad v=0. \varepsilonnd{equation} Thus we get from \varepsilonqref{2.1} and \varepsilonqref{2.2} that \betaegin{equation}\label{3.23} P_x=0\quad \text{and} \quad P_y=-\rho g. \varepsilonnd{equation} Then differentiate \varepsilonqref{2.7} with respect to $x$ to get \betaegin{equation}\label{3.24} P_x(x,\varepsilonta(x))+P_y(x,\varepsilonta(x))\varepsilonta'(x)=0, \varepsilonnd{equation} which combined with \varepsilonqref{3.23} lead to \betaegin{equation}\label{3.25} \varepsilonta(x)=\varepsilonta_0. \varepsilonnd{equation} Performing a shift, we may assume the flat surface $\varepsilonta_0=0$. Hence \varepsilonqref{3.25}, \varepsilonqref{3.21} and \varepsilonqref{2.14} imply $Q=d$ and the total pressure thus reduced to the hydrostatic pressure \betaegin{equation}\label{3.26} P=P_{atm}-\rho g y \varepsilonnd{equation} by \varepsilonqref{2.16} and thus the dynamic pressure is identically zero. \noindent\varepsilonmph{Case 3. $k<c$. } Denote $k-c=\betaar{c}<0$, then we have \betaegin{equation}\label{3.16} \betaar{c}=\frac 1 L \int_0^L\partialsi_y(x,-d)dx<0. \varepsilonnd{equation} A similar discussion as the case $k>c$ leads to \betaegin{equation}\label{3.17} u(x,y)<c, \quad\text{for} \;\; -d\leq y \leq \varepsilonta(x). \varepsilonnd{equation} Hence this case can be treated in a similar way as the case with the irrotational regular wave train without any current \cite{Co1} and we get that the dynamic pressure attains its maximum value at the wave crest and its minimum value at the wave trough. Concluding the Case 1-3, we complete the proof of Theorem \ref{the1}. \varepsilonnd{proof} \sigmaubsection{The dynamic pressure in deep water} The governing equations for irrotational regular wave trains in deep water are the following \cite{D3, Jo1}. \betaegin{align} (u-c)u_x+vu_y&=-\frac 1 \rho P_x,\quad\;\;\quad -d\leq y\leq \varepsilonta(x),\label{4.1}\\ (u-c)v_x+vv_y&=-\frac 1 \rho P_y-g,\!\quad -d\leq y\leq \varepsilonta(x),\label{4.2}\\ u_x+v_y&=0,\qquad\;\;\qquad\;-d\leq y\leq \varepsilonta(x),\label{4.3}\\ u_y&=v_x,\qquad \;\; \qquad -d\leq y\leq \varepsilonta(x),\label{4.4}\\ v&=(u-c)\varepsilonta_x,\;\;\:\quad \textrm{on} \quad y=\varepsilonta(x),\label{4.5}\\ P&=P_{atm},\;\;\qquad\quad \textrm{on} \quad y=\varepsilonta(x),\label{4.6}\\ (u,v)&\rightarrow (0,0) \;\;\qquad\quad \text{as}\;y\rightarrow -\infty \;\; \text{uniformly for}\; x\in\mathbbathbb{R}. \label{4.7} \varepsilonnd{align} Using the stream function $\partialsi$, we can reformulate the governing equations in the moving frame as follows. \betaegin{align} &\partialsi_{xx}+\partialsi_{yy}=0,\quad\;\;\quad \text{for}\; -d\leq y\leq \varepsilonta(x),\label{4.8}\\ &\partialsi=0,\quad\;\;\quad \text{on}\; y=\varepsilonta(x),\label{4.9}\\ &\frac {\partialsi^2_x+\partialsi^2_y} {2g}+y=E\quad \text{on}\; y=\varepsilonta(x),\label{4.10}\\ &\triangledown\partialsi\rightarrow (0,-c),\quad\;\text{as}\;y\rightarrow -\infty \;\; \text{uniformly for}\; x\in\mathbbathbb{R},\label{4.11} \varepsilonnd{align} where the constant $E$ is the total head and it is expressed by \betaegin{equation}\label{4.12} \frac {\partialsi^2_x+\partialsi^2_y} {2g}+y+\frac {P-P_{atm}} {\rho g}=E \varepsilonnd{equation} throughout $\overline{D_\varepsilonta}=\{(x,y): -\infty <y\leq \varepsilonta(x)\}$. The left side of \varepsilonqref{4.12} is the Bernoulli's law. Then the total pressure can be recovered from \betaegin{equation}\label{4.13} P=P_{atm}+\rho g E-\rho g y-\rho\frac {\partialsi^2_x+\partialsi^2_y} {2}. \varepsilonnd{equation} Hence the dynamic pressure is given by \betaegin{equation}\label{4.14} p=P(x,y)-(P_{atm}-\rho g y)=\rho g E-\rho\frac {\partialsi^2_x+\partialsi^2_y} {2}. \varepsilonnd{equation} And we assume that there is no underlying current, that is \cite{D3} \betaegin{equation}\label{k} k=\frac 1 L \int_0^Lu(x,y_0)dx=0, \varepsilonnd{equation} where $y_0$ is any fixed depth below the wave trough level, which implies that \betaegin{equation}\label{u} u<c \quad \text{throughout}\;\;\overline{D_\varepsilonta}. \varepsilonnd{equation} Indeed, let $\betaar{y}\in \mathbbathbb{R}^-$ be negative enough such that $\partialsi_y<0$ uniformly in $x$ for $-\infty<y\leq \betaar{y}$: it follows from the equation \varepsilonqref{4.11} that such a $\betaar{y}$ exists. On the other hand, by the maximum principle, the $L$-periodicity in the $x$ variable of the harmonic function $\partialsi(x,y)$ ensures that its maximum and minimum in the domain $D_{\betaar{y}}=\{(x,y):\betaar{y}\leq y\leq \varepsilonta(x)\}$ will be attained on the boundary. Moreover, the Hopf's maximum principle and the fact $\partialsi_y(x,\betaar{y})<0$ mean that the maximum value is attained on $y=\betaar{y}$ and the minimum value must therefore be attained all along the free surface $y=\varepsilonta(x)$. Hence $\partialsi_y<0$ on the boundary of $D_{\betaar{y}}$ and apply the maximum principle on the harmonic function $\partialsi_y$ yields $\partialsi_y<0$ throughout $D_{\betaar{y}}$. Hence, $\partialsi_y<0$ throughout $\overline{D_\varepsilonta}$, which then ensures \varepsilonqref{u}. The following lemma is some results about the deep-water Stokes wave, which is crucial in our investigation of dynamic pressure in deep water. \betaegin{lemma}\label{lem1}\cite{D3,D1} Denote $D_\varepsilonta^+=\{(x,y):x\in(0,L/2),y\in(-\infty,\varepsilonta(x))\}$, then the following strict inequalities hold \betaegin{equation}\label{4.15} v(x,y)=-\partialsi_x(x,y)> 0\;\; \text{and}\;\; P_x<0,\quad \text{for}\;\; (x,y)\in D_\varepsilonta^+, \varepsilonnd{equation} and on the crest and trough lines there have $v=0$ and $P_x=0$. \varepsilonnd{lemma} Now, it is the position to present the result about the dynamic pressure in deep water. \betaegin{theorem}\label{the2} The dynamic pressure in an irrotational regular wave train in deep water flows without underlying currents attains its maximum value at the crest and minimum value at the wave trough. \varepsilonnd{theorem} \betaegin{proof} From Lemma \ref{lem1}, we know that $v>0$ throughout $D_\varepsilonta^+$. Then the Hopf's maximum principle of the harmonic function $v$ and the equations \varepsilonqref{4.3}-\varepsilonqref{4.4} yield \betaegin{align} &u_y(0,y)=v_x(0,y)>0,\quad \text{for} \quad y\in(-\infty,\varepsilonta(0)),\label{4.16}\\ &u_y(L/2,y)=v_x(L/2,y)<0,\quad \text{for} \quad y\in(-\infty,\varepsilonta(L/2)). \label{4.17} \varepsilonnd{align} Combined with \varepsilonqref{4.14} and \varepsilonqref{u}, \varepsilonqref{4.16}-\varepsilonqref{4.17} ensure that \betaegin{equation}\label{4.18} p=P(x,y)-(P_{atm}-\rho g y)=\rho g E-\rho\frac {v^2+(c-u)^2} {2} \varepsilonnd{equation} is strictly decreasing as we move down along the vertical half-line $[(0,\varepsilonta(0)), (0,-\infty)]$, and it is strictly decreasing along $[(L/2,\varepsilonta(L/2)), (L/2,-\infty)]$ as we move upwards towards the surface, below the trough. Furthermore, in view of \varepsilonqref{4.6} and \varepsilonqref{4.14} and the monotonicity of the wave profile between the crest and a successive trough, we have that $p$ is also strictly decreasing as we descend from the crest towards the trough along the upper boundary of $D_\varepsilonta^+$. On the other hand \varepsilonqref{4.16}-\varepsilonqref{4.17} and \varepsilonqref{4.7} tell us that $u$ decreases strictly towards zero along $x=0$ as y tends to minus infinity, whereas it increases strictly towards zero as $y \rightarrow -\infty$ along $x=L/2$, implying $u(0,y)>0$ for $y\in(-\infty, \varepsilonta(0)]$ and $u(L/2,y)<0$ for $y\in(-\infty, \varepsilonta(L/2)]$. Thus the maximum of $p$ along the boundary of $D_\varepsilonta^+$ can only be attained at $(0,\varepsilonta(0))$ and the minimum of $p$ along the boundary of $D_\varepsilonta^+$ can only be attained at $(L/2,\varepsilonta(L/2))$. Now we prove the extrema of $p$ can not occur in the interior of the domain $D_\varepsilonta^+$. Assume that the contrary that there is a point $(x_0,y_0)\in D_\varepsilonta^+$ such that $p(x_0,y_0)$ is the extrema, then we have \betaegin{align} p_x&=-\rho \left(vv_x+(u-c)u_x\right)=0,\label{4.19}\\ p_y&=-\rho \left(vv_y+(u-c)u_y\right)=0,\label{4.20} \varepsilonnd{align} at $(x_0,y_0)$. Using the equations \varepsilonqref{4.3}-\varepsilonqref{4.4} and \varepsilonqref{u}, we can solve the above equations to get \betaegin{equation}\label{4.21} u_x(x_0,y_0)=u_y(x_0,y_0)=0. \varepsilonnd{equation} Hence \betaegin{equation}\label{4.22} P_x(x_0,y_0)=0 \varepsilonnd{equation} follows by \varepsilonqref{4.1}, which contradicts to \varepsilonqref{4.15} in Lemma \ref{lem1}. This combined with the previously discussions of the behaviour of $p$ along the boundary of $D_\varepsilonta^+$ complete the proof. \varepsilonnd{proof} \betaegin{remark} From the results of Theorem \ref{the1} and Theorem \ref{the2}, we know that the uniform underlying currents and infinite depth make no difference on the position of the extrema of the dynamic pressure. \varepsilonnd{remark} \noindent {\betaf Acknowledgements.} The work of Gao is partially supported by the NSFC grant No. 11531006, National Basic Research Program of China (973 Program) No. 2013CB834100, PAPD of Jiangsu Higher Education Institutions, and the Jiangsu Collaborative Innovation Center for Climate Change. \betaegin{thebibliography}{99} \betaibitem{Ba}{\sigmamall \textsc{B. Basu}, Irrotational two-dimensional free-surface steady water flows over a fat bed with underlying currents, {\it Nonlinear Anal.: Real World Appl.}, {\betaf 147} (2016) 110-124.} \betaibitem{Cl1}{\sigmamall \textsc{D. Clamond}, New exact relations for easy recovery of steady wave profiles from bottom pressure measurements, {\it J. Fluid Mech.}, {\betaf 726} (2013) 547-558.} \betaibitem{Co2}{\sigmamall \textsc{A. Constantin}, The trajectories of particles in Stokes waves, {\it Invent. Math.}, {\betaf 166} (2006) 523-535.} \betaibitem{Co}{\sigmamall \textsc{A. Constantin}, {\it Nonliear Water Waves with Applications to Wave-Current Interactions and Tsunamis,} volume 81 of CBMS-NSF Conference Series in Applied Mathematics, SIAM, Philadelphia, (2011).} \betaibitem{Co4}{\sigmamall \textsc{A. Constantin}, Mean velocities in a Stokes wave, {\it Arch. Ration. Mech. Anal.}, {\betaf 207} (2013) 907-917.} \betaibitem{Co5}{\sigmamall \textsc{A. Constantin}, Estimating wave heights from pressure data at the bed, {\it J. Fluid Mech.}, {\betaf 743} (2014) R2.} \betaibitem{Co1}{\sigmamall \textsc{A. Constantin}, Extrema of the dynamic pressure in an irrotational regular wave train, {\it Phy. of Fluids}, {\betaf 28} (2016) 113604.} \betaibitem{Co3}{\sigmamall \textsc{A. Constantin}, Exact travelling periodic water waves for two-dimensional irrotational fows, in: Adrian Constantin (Ed.), Nonlinear Water Waves, Cetaro, Italy 2013, {\it Lecture Notes in Mathematics}, {\betaf 2158} (2016) 1-82.} \betaibitem{CoEs}{\sigmamall \textsc{A. Constantin and J. Escher}, Analyticity of periodic traveling free surface water waves with vorticity, {\it Ann. Math.}, {\betaf 173} (2011) 559-568.} \betaibitem {CoEh}{\sigmamall \textsc{A. Constantin, M. Ehrnstr\"{o}m and E. Wahl\'{e}n}, Symmetry of steady periodic gravity water waves with vorticity, { \it Duke Math. J.,} \textbf {140} (3) (2007), 591-603.} \betaibitem {CS}{\sigmamall \textsc{A. Constantin and W. Strauss}, Pressure beneath a Stokes wave, { \it Comm. Pure Appl. Math.,} \textbf{63} (2010), 533-557.} \betaibitem{ES}{\sigmamall \textsc{J. Escher and T. Schlurmann}, On the recovery of the free surface from the pressure within periodic traveling water wave, {\it J. Nonlinear Math. Phys.}, {\betaf 15} (2008), 50-57.} \betaibitem{G}{\sigmamall \textsc{F. Genoud}, Extrema of the dynamic pressure in a solitary wave, {\it arXiv:1612.07918v1}.} \betaibitem{D3}{\sigmamall \textsc{D. Henry}, The trajectories of particles in deep-water Stokes waves, {\it Int. Math. Res. Not.}, {\betaf 2006} (2006) 1-13.} \betaibitem{D}{\sigmamall \textsc{D. Henry}, On the deep-water Stokes wave flow, {\it Int. Math. Res. Not.}, Art. ID rnn071, (2008) 7 pages.} \betaibitem{D1}{\sigmamall \textsc{D. Henry}, Pressure in a deep-water Stokes wave, {\it J. Math. Fluid Mech.}, {\betaf 13} (2011) 251-257.} \betaibitem{D2}{\sigmamall \textsc{D. Henry}, On the pressure transfer function for solitary water waves with vorticity, {\it Math. Ann.}, {\betaf 357} (2013) 23-30.} \betaibitem{Jo1}{\sigmamall \textsc{R. S. Johnson}, {\it A Modern Introduction to the Mathematical Theory of Water Waves,} Cambridge Univeristy Press, (1997).} \betaibitem{K}{\sigmamall \textsc{F. Kogelbauer}, Recovery of the wave profile for irrotational periodic water waves from pressure measurements, {\it Nonlinear Anal.: Real World Appl.}, {\betaf 22} (2015) 219-224.} \betaibitem{L1}{\sigmamall \textsc{T. Lyons}, The pressure distribution in extreme Stokes waves, {\it Nonlinear Anal.: Real World Appl.}, {\betaf 31} (2016) 77-87.} \betaibitem{L2}{\sigmamall \textsc{T. Lyons}, The Pressure in a Deep-Water Stokes Wave of Greatest Height, {\it J. Math. Fluid Mech.}, {\betaf 18} (2016) 209-218.} \betaibitem{Ol}{\sigmamall \textsc{K. L. Oliveras, V. Vasan, B. Deconinck and D. Henderson}, Recovering the water-wave profile from pressure measurements, {\it SIAM J. Appl. Math.}, {\betaf 72} (2012) 897-918.} \betaibitem{U}{\sigmamall \textsc{M. Umeyama}, Eulerian-langrangian analysis for particle velocities and tracjectories in a pure wave motion using particle image velocimetry, {\it Phil. Trans. R. Soc. A}, {\betaf 370} (2012) 1687-1702.} \betaibitem{VO}{\sigmamall \textsc{V. Vasan and K. L. Oliveras}, Pressure beneath a traveling wave with constant vorticity, {\it Discrete Contin. Dyn. Syst.}, {\betaf 34} (2014) 3219-3239.} \varepsilonnd{thebibliography} \varepsilonnd{document}